diff --git a/runs/Experiment2/mapping_stats_mp_at.xlsx b/runs/Experiment2/mapping_stats_mp_at.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..de5eb4d7dde707a922acb4c175e73806d1725931
Binary files /dev/null and b/runs/Experiment2/mapping_stats_mp_at.xlsx differ
diff --git a/workflows/Experiment1/3d_pca_plot_schippers_20211021.r b/workflows/Experiment1/3d_pca_plot_schippers_20211021.r
new file mode 100644
index 0000000000000000000000000000000000000000..2408e58fb81a901d75e164a00ac4943d66c02c89
--- /dev/null
+++ b/workflows/Experiment1/3d_pca_plot_schippers_20211021.r
@@ -0,0 +1,175 @@
+# 3d R plot
+#saveat <- "/mnt/NAS_coruscant_datashare/haasf/madland_RNA-seq_Schippers"
+saveat <- "R:/haasf/madland_RNA-seq_Schippers"
+
+#file.rpkm <- '/mnt/Teamdrive/projects/MAdLand/cooperations/Schippers_Lab_RNA-seq_analyses/Mp.p.sort.wotRNA.rpkm'
+file.rpkm <- "S:/projects/MAdLand/cooperations/Schippers_Lab_RNA-seq_analyses/Mp.p.sort.wotRNA.rpkm"
+# file.rpkm <- "S:/projects/MAdLand/cooperations/Schippers_Lab_RNA-seq_analyses/31315_all.p.sort.rpkm"
+
+data.rpkm <- read.table(file.rpkm, header=T, sep="\t", row.names=1)
+
+
+# sort by colnames
+data.rpkm <- data.rpkm[,order(colnames(data.rpkm))]
+
+
+librariesName <- list(
+  control_6h = c("control_6h", "red"),
+  control_24h = c("control_24h", "blue"),
+  LCS_6h = c("LCS_6h", "green"),
+  LCS_24h = c("LCS_24h", "yellow")
+)
+
+header.ori <- c("MF82", "MF81", "MF80", "MF76", "MF75", "MF74", "MF68", "MF67", "MF66", "MF62", "MF61", "MF60")
+header.new <- c("LCS_24h.2", "LCS_24h.1", "LCS_24h", "control_24h.2", "control_24h.1", "control_24h", "control_6h.2", "LCS_6h.1", "LCS_6h", "LCS_6h.2", "control_6h.1", "control_6h")
+# header.ori <- c("P97", "P96", "P95", "P91", "P90", "P89", "P111", "P110", "P109", "P105", "P104", "P103")
+# header.new <- c("LCS_6h.2", "LCS_6h.1", "LCS_6h", "control_6h.2", "control_6h.1", "control_6h", "LCS_24h.2", "LCS_24h.1", "LCS_24h", "control_24h.2", "control_24h.1", "control_24h")
+
+
+header.new <- header.new[order(header.ori)]
+header.ori <- header.ori[order(header.ori)]
+
+col.header <- header.new
+
+colnames(data.rpkm) <- col.header
+
+library("DESeq2")
+library("ggplot2")
+library("RColorBrewer")
+library("pheatmap")
+library("BiocGenerics")
+library("rgl")
+library("magick")
+library("sjmisc")
+
+
+################### running ######################
+### PCA RPKM ###
+
+set.seed(0)
+
+data.inv <- t(data.rpkm)
+data.dist <-dist(data.inv, method="euclidean") # "euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski"
+data.dist.hc <- hclust(data.dist,method="ward.D2")
+data.dist.pca <-princomp(data.dist,cor=T)
+
+pc1 <- data.dist.pca$scores[,1]
+pc2 <- data.dist.pca$scores[,2]
+pc3 <- data.dist.pca$scores[,3]
+
+# create data frame for pc1 pc2 pc3
+data.dist.pca.frame = data.frame(pc1,pc2,pc3)
+rownames(data.dist.pca.frame) <- names(data.dist.pca$scale)
+colnames(data.dist.pca.frame) <- c("pc1","pc2","pc3")
+
+condition.values <- c()
+condition.values.color <- c()
+
+
+for(a in colnames(data.rpkm)) {
+
+  v <- substr(a, nchar(a), nchar(a))
+  if(str_contains(v, c(1,2,3,4,5,6,7,8,9,0), logic = "or")) {
+    if (substr(substr(a, 1, nchar(a)-2), nchar(substr(a, 1, nchar(a)-2)), nchar(substr(a, 1, nchar(a)-2))) == ".") {
+      n <- substr(a, 1, nchar(a)-3)
+    } else {
+      n <- substr(a, 1, nchar(a)-2)
+    }
+
+  } else {
+    n <- a
+
+  }
+  condition.values <- c(condition.values, librariesName[n][[1]][1])
+  condition.values.color <- c(condition.values.color, librariesName[n][[1]][2])
+}
+
+
+data.dist.pca.frame["tissue"] <- condition.values
+data.dist.pca.frame["color"] <- condition.values.color
+data.dist.pca.frame["name"] <- names(data.dist.pca$scale)
+attr(data.dist.pca.frame, "percentVar") <- (data.dist.pca$sdev)^2 / sum(data.dist.pca$sdev^2) # cumsum()
+
+# simple plot
+png(filename=paste0(saveat, "/HC_RPKM_normalized.png"))
+plot(data.dist.hc) # hc plot
+dev.off()
+png(filename=paste0(saveat, "/PCA_variance_RPKM_normalized.png"))
+plot(data.dist.pca) # variances; var(data.dist.pca$sdev[1:9])
+dev.off()
+# get the parcent variation
+percentVar <- round(100 * attr(data.dist.pca.frame, "percentVar"))
+
+# 3d plot
+plot3d(pc1, pc2, pc3,
+       type = "s", # p, s, l, h, n
+       #pch = c(1:3),
+       col = condition.values.color,
+       size = 1,
+       xlab = paste0("PC1: ",percentVar[1],"% variance"),
+       ylab = paste0("PC2: ",percentVar[2],"% variance"),
+       zlab = paste0("PC3: ",percentVar[3],"% variance"),
+       cex = 2,
+       main = "", # -> princomp",
+)
+
+# shift <- matrix(4, 4, 4, byrow = TRUE)
+# text3d(shift,texts=1:3)
+grid3d(c("x", "y", "z"))
+
+## add legend
+legend3d("right", unique(condition.values), pch = 19, col = unique(condition.values.color))
+
+#### video #####
+M <- par3d("userMatrix")
+play3d( par3dinterp( userMatrix=list(M,
+                                     rotate3d(M, pi/2, 1, 0, 0),
+                                     rotate3d(M, pi/2, 0, 1, 0) ) ),
+        duration=2 )
+
+movie3d(spin3d(axis = c(1, 2, 1)), duration = 5,
+        dir = saveat)
+
+#### video end ####
+
+# pc1, pc2
+png(filename=paste0(saveat, "/PCA_RPKM_normalized.png"))
+ggplot(
+  data.dist.pca.frame,
+  aes(
+    pc1,
+    pc2,
+    color=tissue)
+) +
+  geom_point(size=2.5) +
+  xlab(
+    paste0("PC1: ",percentVar[1],"% variance")
+  ) +
+  ylab(
+    paste0("PC2: ",percentVar[2],"% variance")
+  ) +
+  #theme() + #, face="bold"
+  scale_colour_manual(
+    values= c("blue", "red", "yellow", "green") # dodgerblue3
+  ) +
+  ggtitle("PCA of all samples (RPKM normalized)") +
+  theme(
+    plot.title = element_text(lineheight=.8),
+    panel.background = element_rect(fill = "gray95")
+  )
+dev.off()
+
+
+
+
+
+######## Some experiments ######
+# for(i in colnames(data)) {
+#     colnames(data)[colnames(data)==grep(i, names(data), value = TRUE)] <- librariesName[[i]]
+# }
+#
+# for(i in colnames(data)) {
+#     colnames(data)[colnames(data)==grep(i, names(data), value = TRUE)] <- names(librariesName[librariesName==grep(i, librariesName, value = TRUE)])
+# }
+
+
diff --git a/workflows/Experiment1/SNP_pipe.sh b/workflows/Experiment1/SNP_pipe.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1daee78c885a6d7c0c60c785b5ac089d0baddc4e
--- /dev/null
+++ b/workflows/Experiment1/SNP_pipe.sh
@@ -0,0 +1,478 @@
+#!/bin/bash
+
+#. /etc/profile.d/modules.sh
+
+export PATH=$PATH:/vol/biotools/bin
+export MANPATH=$MANPATH:/vol/biotools/man
+export PATH=$PATH:/vol/software/bin
+
+
+# install important R package - lummerland cluster does not have it ...
+#echo "install.packages(\"gplots\", repos=\"https://cran.rstudio.com\")" | R --vanilla --no-save
+#R CMD INSTALL /vol/agrensing/tools/R_packages/bitops_1.0-6.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/caTools_1.17.1.1.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gtools_3.8.1.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gdata_2.18.0.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gplots_3.0.1.tar.gz
+
+#functions
+
+startle ()
+{
+	number_row_reads=`wc -l $fw | awk '{print $1/4}'`
+	echo "## Start log ##"
+	echo "Number of row reads: "$number_row_reads
+}
+
+trimmo ()
+ {
+   # Remove leading low quality or N bases (below quality 3) (LEADING:3)
+   # Remove trailing low quality or N bases (below quality 3) (TRAILING:3)
+   # Scan the read with a 4-base wide sliding window, cutting when the average quality per base drops below 15 (SLIDINGWINDOW:4:15)
+   # HEADCROP: Cut the specified Number of bases from the start of the read
+   # MINLEN was first set to 50 bp. But there are libraries with read start length of 50 bp.
+
+   if [ -e $rv ]; then
+	java $java_ram -jar /nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/trimmomatic-0.39.jar PE -threads $cpu -phred33 -summary $trimmo_out".trimmo.stats" \
+	  $fw $rv $trimmo_out".PE.fw" $trimmo_out".SE.fw" $trimmo_out".PE.rv" $trimmo_out".SE.rv" \
+	  ILLUMINACLIP:/nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/adapters/TruSeq-PE_all.fna:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 HEADCROP:10 MINLEN:30
+
+	# stats
+	echo "## Trimmo statistics ##"
+	printf "trim\t"$trimmo_out".PE.fw\t"; grep -c "^\+$" $trimmo_out".PE.fw"
+	# fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc" $trimmo_out".PE."*
+    fi
+    if [ ! -e $rv ]; then
+	java $java_ram -jar /nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/trimmomatic-0.39.jar SE -threads $cpu -phred33 -summary $trimmo_out".trimmo.stats" \
+	  $fw $trimmo_out".SE" \
+	  ILLUMINACLIP:/nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/adapters/TruSeq-PE_all.fna:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 HEADCROP:10 MINLEN:30
+
+	# stats
+	echo "## Trimmo statistics ##"
+	# printf "trim\t"$trimmo_out".SE\t"; grep -c "^\+$" $trimmo_out".SE"
+        printf "trim\t"$trimmo_out".SE\t"; wc -l $trimmo_out".SE" | awk '{print $1/4}'
+	# fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc" $trimmo_out".SE"
+    fi
+
+ }
+
+polyA ()
+ {
+   # min_len was first set to 50 bp. But there are libraries with read start length of 50 bp.
+   # If you specify any statistic option, no other ouput will be generated. To preprocess data, do not specify a statistics option.
+   # -stats_dupl -stats_all
+
+   inpA=$polyA_out".polyA.passed"
+   infA=$polyA_out".polyA.failed"
+
+   if [ -e $rv ]; then
+	/nfs/agrensing/tools/prinseq/prinseq-lite-0.20.4/./prinseq-lite.pl -verbose -no_qual_header -fastq $trimmo_out".PE.fw" -fastq2 $trimmo_out".PE.rv" \
+	  -trim_tail_left 5 -trim_tail_right 5 -min_len 30 -out_good $inpA -out_bad $infA \
+	  -out_format 3 -log $polyA_out"_polyA.log"
+
+	# stats
+	echo "## PolyA statistics ##"
+	printf "polyA\t"$inpA"_1.fastq\t"; wc -l $inpA"_1.fastq" | awk '{print $1/4}'
+	#fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc" $inpA"_"[12]".fastq"
+   fi
+   if [ ! -e $rv ]; then
+	/nfs/agrensing/tools/prinseq/prinseq-lite-0.20.4/./prinseq-lite.pl -verbose -no_qual_header -fastq $trimmo_out".SE" \
+	  -trim_tail_left 5 -trim_tail_right 5 -min_len 30 -out_good $inpA -out_bad $infA \
+	  -out_format 3 -log $polyA_out"_polyA.log"
+
+	# stats
+	echo "## PolyA statistics ##"
+	printf "polyA\t"$inpA".fastq\t"; grep -c "^\+$" $inpA".fastq"
+	#fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc" $inpA".fastq"
+   fi
+ }
+
+duple ()
+{
+	# number of 100% identical duplicat reads.
+	echo "Duplication:"
+
+        if [ -e $rv ]; then
+
+                #SE 1
+                IN=$polyA_out".polyA.passed"
+
+                num_dup_se1=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_1_dupl_histogram"}}' $IN"_1.fastq" | wc -l`
+                printf $num_dup_se1"\t"; printf %.5f\\t "$(( 100000 * num_dup_se1 / number_row_reads ))e-5"
+                #SE 2
+                num_dup_se2=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_2_dupl_histogram"}}' $IN"_2.fastq" | wc -l`
+                printf $num_dup_se2"\t"; printf %.5f\\t "$(( 100000 * num_dup_se2 / number_row_reads ))e-5"
+
+
+                #PE
+                num_dup_pe=`awk -v nof=$IN 'NR % 4 == 1 || NR % 4 == 2 {if(n==1){if(foo[name]==""){c=0}; foo[name]=foo[name]$1; if(c==1){seq[foo[name]]++} else {c=1}; n=0} else if($1~/^@/){split($1,w,"/"); name=w[1]; foo[name]; n=1}} END {for(a in seq){print seq[a]; p[seq[a]]++} for(l in p){print l"\t"p[l] >> nof"_1_2_dupl_histogram"}}' $IN"_1.fastq" $IN"_2.fastq" | wc -l`
+                printf $num_dup_pe"\t"; printf %.5f\\n "$(( 100000 * num_dup_pe / number_row_reads ))e-5"
+
+
+        fi
+        if [ ! -e $rv ]; then
+                for IN in $polyA_out".polyA.passed"*;
+                do
+                  num_dup_se1=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_dupl_histogram"}}' $IN | wc -l`
+                  printf $IN"\t"$num_dup_se1"\t"; printf %.5f\\n "$(( 100000 * num_dup_se1 / number_row_reads ))e-5"
+                done
+        fi
+
+
+}
+
+mappi()
+{
+    if [ -e $rv ]; then
+	/homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -N 1 -n 7 -A sam --npaths=5 \
+          --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+          --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+          $polyA_out".polyA.passed_1.fastq" $polyA_out".polyA.passed_2.fastq" 2> $mappi_out".mappi.err"
+
+	unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped.1"`
+
+    fi
+    if [ ! -e $rv ]; then
+	/ceph/agrensing/tools/gmap-gsnap/gmap-2021-03-08/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -N 1 -n 5 -A sam --npaths=5 \
+          --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+          --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+          $polyA_out".polyA.passed.fastq" 2> $mappi_out".mappi.err"
+
+	unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped"`
+    fi
+
+
+
+    # mapping statistics
+        echo "## Pp genome version 3 ##"
+        echo "Unmapped reads: "$unmapped_reads
+
+}
+
+organelli()
+{
+        if [ -e $rv ]; then
+                for IN in $mappi_out".genome.concordant_"*;
+                do
+                  print "Fragments:"
+                  printf $IN"\t";
+                  with=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Ei "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`;
+                  wo=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Eiv "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`;
+                  awk -v with=$with -v wo=$wo 'BEGIN {print with"\t"wo"\t"with/(wo+with)*100" %"}' ;
+                done
+        fi
+        if [ ! -e $rv ]; then
+                for IN in $mappi_out".genome"*"uniq"*;
+                do
+                  print "SE reads:"
+                  printf $IN"\t";
+                  with=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Ei "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`;
+                  wo=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Eiv "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`;
+                  awk -v with=$with -v wo=$wo 'BEGIN {print with"\t"wo"\t"with/(wo+with)*100" %"}' ;
+                done
+
+        fi
+
+
+}
+
+mappi_gDNA()
+{
+     if [ -e $rv ]; then
+          /homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -n 7 -A sam --npaths=5 \
+            --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+            --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+            $polyA_out".polyA.passed_1.fastq" $polyA_out".polyA.passed_2.fastq" 2> $mappi_out".mappi.err"
+
+          unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped.1"`
+     fi
+
+     if [ ! -e $rv ]; then
+          /homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -n 5 -A sam --npaths=5 \
+           --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+           --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+           $polyA_out".polyA.passed.fastq" 2> $mappi_out".mappi.err"
+
+         unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped"`
+     fi
+
+  # mapping statistics
+   echo "## Pp genome version 3 ##"
+   echo "Unmapped reads: "$unmapped_reads
+}
+
+
+sample ()
+{
+     # if PE use ".genome"*[tcqg]
+     for IN in $mappi_out".genome"*[tcqg]
+     do
+	  printf $IN"\tsort.bam\n" >&2
+	   /vol/agrensing/tools/samtools/samtools-1.12/samtools view -@ $cpu -bhS $IN | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G - -o $IN".sort.bam"
+
+	  printf $IN"\trmdup.bam\n" >&2
+	   grep -vE "chloro|ribo|mito|ERCC" $IN | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G -n -O BAM - | \
+           /vol/agrensing/tools/samtools/samtools-1.12/samtools fixmate -@ $cpu -m - - | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G -O BAM - | \
+           /vol/agrensing/tools/samtools/samtools-1.12/samtools markdup -@ $cpu -s -r - \
+	   $IN".filter.fixmate.sort.markdup_r.bam"
+
+	  # samtools statistics
+	  v_tmp=`awk '$1!~"@" {print $1}' $IN | sort | uniq | wc -l`
+	  printf "Number of mapped reads: "$IN"\t"$v_tmp"\n"
+
+          # compress the sam text files
+          # gzip $IN
+	  # rm sam file
+          rm $IN
+
+
+	  # bam index
+          /vol/agrensing/tools/samtools/samtools-1.12/samtools index $IN*"_r.bam"
+          /vol/agrensing/tools/samtools/samtools-1.12/samtools index $IN".sort.bam"
+
+     done
+
+     ls -l $mappi_out*"bam" >&2
+
+     # merge all unique bam files
+     echo "merging all .uniq bam files" >&2
+     echo "merging all .uniq bam files"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.bam" $mappi_out*"uniq"*".sort.bam"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.bam"
+
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.rmdup.bam" $mappi_out*"uniq"*"_r.bam"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.rmdup.bam"
+}
+
+counti()
+{
+     if [ -e $rv ]; then
+        # read count v3.3 and ERCC combined
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -B -C -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_ercc_gt.gff \
+          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.pB.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_32458_ercc.exon.pB.log"
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -C -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_ercc_gt.gff \
+          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.p.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_32458_ercc.exon.p.log"
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -B -C -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_rm.double_clean.N_ercc_gt.gff \
+          -t exon -g Parent -o $counti_out"_v3.3_31315_ercc.exon.pB.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_31315_ercc.exon.pB.log"
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -C -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_rm.double_clean.N_ercc_gt.gff \
+          -t exon -g Parent -o $counti_out"_v3.3_31315_ercc.exon.p.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_31315_ercc.exon.p.log"
+
+
+       /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -B -C -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_rm.double_clean.N_ercc_gt.gff \
+         -t exon -g Parent -o $counti_out"_all_v3.3_31315_ercc.exon.pB.txt" $mappi_out".uniq.merged.bam" 2> $counti_out"_all_v3.3_31315_ercc.exon.pB.log"
+       /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -C -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_rm.double_clean.N_ercc_gt.gff \
+         -t exon -g Parent -o $counti_out"_all_v3.3_31315_ercc.exon.p.txt" $mappi_out".uniq.merged.bam" 2> $counti_out"_all_v3.3_31315_ercc.exon.p.log"
+
+        # statistics
+        printf "\n### FeatureCount statisitcs v3.3 ###\n"
+        # 32458
+        echo "Fragment counts on gene models exon PE 32458:"
+        grep "Successfully assigned fragments" $counti_out"_v3.3_32458_ercc.exon.pB.log"
+        echo "Fragment counts on gene models exon PE+SE 32458:"
+        grep "Successfully assigned fragments" $counti_out"_v3.3_32458_ercc.exon.p.log"
+        # 31315
+        echo "Fragment counts on gene models exon PE 31315:"
+        grep "Successfully assigned fragments" $counti_out"_v3.3_31315_ercc.exon.pB.log"
+        echo "Fragment counts on gene models exon PE+SE 31315:"
+        grep "Successfully assigned fragments" $counti_out"_v3.3_31315_ercc.exon.p.log"
+        # 31315 no rmdup
+        echo "Fragment counts on gene models exon PE 31315 (no rmdup):"
+        grep "Successfully assigned fragments" $counti_out"_all_v3.3_31315_ercc.exon.pB.log"
+        echo "Fragment counts on gene models exon PE+SE 31315 (no rmdup):"
+        grep "Successfully assigned fragments" $counti_out"_all_v3.3_31315_ercc.exon.p.log"
+
+     fi
+     if [ ! -e $rv ]; then
+
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_ercc_gt.gff \
+          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.SE.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_32458_ercc.exon.SE.log"
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_rm.double_clean.N_ercc_gt.gff \
+          -t exon -g Parent -o $counti_out"_v3.3_31315_ercc.exon.SE.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_31315_ercc.exon.SE.log"
+
+        # statistics
+        printf "\n### FeatureCount statisitcs v3.3 ###"
+        # 32458
+        echo "Fragment counts on gene models exon SE 32458:"
+        grep "Successfully assigned " $counti_out"_v3.3_32458_ercc.exon.SE.log"
+        # 31315
+        echo "Fragment counts on gene models exon SE 31315:"
+        grep "Successfully assigned " $counti_out"_v3.3_31315_ercc.exon.SE.log"
+
+     fi
+}
+
+depthi()
+{
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools depth -aa $mappi_out".uniq.merged.rmdup.bam" > $mappi_out".uniq.merged.rmdup.depth"
+}
+
+
+snpi()
+{
+  gatkrun='/vol/agrensing/tools/gatk/gatk-4.2.0.0/gatk --java-options -Xmx50G'
+  threading_option='--native-pair-hmm-threads 8'
+  # V3
+   # start the SNP detection
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -ploidy 1 -O $snpi_out".HaplotypeCaller.vcf" 2> $snpi_out".HaplotypeCaller.err"
+
+   $gatkrun SelectVariants -R $reffna --variant $snpi_out".HaplotypeCaller.vcf" -select-type INDEL -O $snpi_out".SelectVariants.vcf" 2> $snpi_out".SelectVariants.err"
+
+   # HQ SNPs
+   python2.7 /vol/agrensing/fhaas/GeneAtlas/SNP/GetHighQualVcfs.py -i $snpi_out".HaplotypeCaller.vcf" -o $output_dir"/05_SNP_calling/"$species"/"$prefix"/" --ploidy 1 --percentile 90 --GQ 90 2> $snpi_out".GetHighQualVcfs.HQ.err"
+
+#   mv $snpi_out".GetHighQualVcfs.HQ.vcf" $snpi_out".GetHighQualVcfs.V3.HQ.vcf"
+
+   $gatkrun IndexFeatureFile -I  $snpi_out".GetHighQualVcfs.HQ.vcf" 2> $snpi_out".GetHighQualVcfs.HQ.idx.err"
+
+   $gatkrun BaseRecalibrator -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.table" 2> $snpi_out".BaseRecalibrator.err"
+
+   $gatkrun ApplyBQSR -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" --bqsr-recal-file $snpi_out".BaseRecalibrator.table" -O $snpi_out".BaseRecalibrator.ApplyBQSR.bam" \
+	2> $snpi_out".BaseRecalibrator.ApplyBQSR.err"
+
+   $gatkrun BaseRecalibrator -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.recal.table" \
+	2> $snpi_out".BaseRecalibrator.recal.err"
+
+   $gatkrun AnalyzeCovariates -before $snpi_out".BaseRecalibrator.table" -after $snpi_out".BaseRecalibrator.recal.table" -verbosity DEBUG -csv $snpi_out".AnalyzeCovariates.calibration-report.csv" \
+     -plots $snpi_out".AnalyzeCovariates.calibration-report.QC.pdf" 2> $snpi_out".AnalyzeCovariates.calibration-report.err"
+
+   $gatkrun PrintReads -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -O $snpi_out".PrintReads.bam" 2> $snpi_out".PrintReads.err"
+
+   $gatkrun PrintReads -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -O $snpi_out".PrintReads.ApplyBQSR.bam" 2> $snpi_out".PrintReads.ApplyBQSR.err"
+
+   # HaplotypeCaller and VariantFiltration w/o ApplyBQSR BAM file
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $snpi_out".PrintReads.bam" -ploidy 1 -O $snpi_out".PrintReads.HaplotypeCaller.vcf" 2> $snpi_out".PrintReads.HaplotypeCaller.err"
+
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $snpi_out".PrintReads.ApplyBQSR.bam" -ploidy 1 -O $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.vcf" \
+	2> $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.err"
+
+   #  filter flags ## NOTE: it is important to look for float and integer numbers (e.g. FS must be 60.0 not 60) ##
+   # --filter-expression "FS > 60" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0"
+   # --filter-name "LowMQ" --filter-expression "QUAL > 30.0 && QUAL < 60.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage"
+   #
+   $gatkrun VariantFiltration -R $reffna --variant $snpi_out".PrintReads.HaplotypeCaller.vcf" \
+     --filter-expression "FS > 60.0" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0" \
+     --filter-name "LowMQ" --filter-expression "QUAL > 30.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage" \
+     -O $snpi_out".VariantFiltration.vcf" 2> $snpi_out".VariantFiltration.err"
+
+   $gatkrun VariantFiltration -R $reffna --variant $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.vcf" \
+     --filter-expression "FS > 60.0" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0" \
+     --filter-name "LowMQ" --filter-expression "QUAL > 30.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage" \
+     -O $snpi_out".ApplyBQSR.VariantFiltration.vcf" 2> $snpi_out".ApplyBQSR.VariantFiltration.err"
+}
+
+corri()
+{
+  for IN in $mappi_out*.gz
+  do
+    gunzip $IN
+    file=`echo $IN | awk '{print substr($1, 1, length($1)-3)}'`
+    /vol/agrensing/tools/samtools/samtools-1.12/samtools view -@ $cpu -bhS $file | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G - -o $file".sort.bam"
+    /vol/agrensing/tools/samtools/samtools-1.12/samtools index $file".sort.bam"
+    gzip $file
+
+  done
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.bam" $mappi_out*"uniq"*".sort.bam"
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.bam"
+
+
+
+  gatkrun='/ceph/agrensing/tools/gatk/gatk-4.2.0.0/gatk --java-options -Xmx50G'
+  threading_option='--native-pair-hmm-threads 8'
+  $gatkrun BaseRecalibrator -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.recal.table" \
+         2> $snpi_out".BaseRecalibrator.recal.err"
+
+  $gatkrun AnalyzeCovariates -before $snpi_out".BaseRecalibrator.table" -after $snpi_out".BaseRecalibrator.recal.table" -verbosity DEBUG -csv $snpi_out".AnalyzeCovariates.calibration-report.csv" \
+    -plots $snpi_out".AnalyzeCovariates.calibration-report.QC.pdf" 2> $snpi_out".AnalyzeCovariates.calibration-report.err"
+
+
+}
+
+
+# script input variables
+
+cpu=$1
+output_dir=$2
+prefix=$3
+species=$4
+infile_path=$5
+
+# set variables
+java_ram="-Xmx50G -Xms25G"
+id=$prefix
+
+fw=$infile_path"/fw."$prefix
+rv=$infile_path"/rv."$prefix
+
+db_path_V3="/vol/agrensing/fhaas/DB/Pp_core_chloro_mito_ribo_ercc_sNone/Pp_core_chloro_mito_ribo_ercc_sNone"
+db_name_V3="Pp_core_chloro_mito_ribo_ercc_sNone"
+
+reffna="/vol/agrensing/fhaas/DB/Pp_core_chloro_mito_ribo_ercc/Pp_core_chloro_mito_ribo_ercc.fna"
+
+
+
+# outdir variables
+# output=$output_dir"/"$prefix
+trimmo_out=$output_dir"/02_trimmomatic/"$species"/"$prefix"/"$prefix
+polyA_out=$output_dir"/03_poly-A/"$species"/"$prefix"/"$prefix
+mappi_out=$output_dir"/04_genome_mapping/"$species"/"$prefix"/"$prefix
+snpi_out=$output_dir"/05_SNP_calling/"$species"/"$prefix"/"$prefix
+counti_out=$output_dir"/06_counts/"$species"/"$prefix"/"$prefix
+
+# create result folder
+mkdir -p $output_dir"/02_trimmomatic/"$species"/"$prefix
+mkdir -p $output_dir"/03_poly-A/"$species"/"$prefix
+mkdir -p $output_dir"/04_genome_mapping/"$species"/"$prefix
+mkdir -p $output_dir"/05_SNP_calling/"$species"/"$prefix
+mkdir -p $output_dir"/06_counts/"$species"/"$prefix
+
+mkdir -p $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc"
+mkdir -p $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc"
+
+
+
+
+
+# print running machine and start date
+hostname
+date
+
+# print all variables
+printf $cpu"\n"$output_dir"\n"$prefix"\n"$species"\n"$fw"\n"
+
+if [ -e $rv ]; then
+  printf $rv"\n"
+fi
+
+printf "\'$java_ram\'""\n"$id"\n"$db_path_V3"\n"$db_name_V3"\n"
+
+echo ""
+
+echo "Phase 0 assamble data"
+# startle
+
+date
+echo "Phase 1 run trimming"
+# trimmo
+
+date
+echo "Phase 2 run polyA"
+# polyA
+# duple
+date
+echo "Phase 3 run mapping"
+# mappi
+#-# mappi_gDNA
+# organelli
+# sample
+# counti
+# depthi
+date
+echo "Phase 4 run SNP calling"
+snpi
+#-# corri
+date
+
diff --git a/workflows/Experiment1/SNP_pipe_mp.sh b/workflows/Experiment1/SNP_pipe_mp.sh
new file mode 100644
index 0000000000000000000000000000000000000000..a01f28b4046b8130a5720181f66074e81ddfd112
--- /dev/null
+++ b/workflows/Experiment1/SNP_pipe_mp.sh
@@ -0,0 +1,481 @@
+#!/bin/bash
+
+#. /etc/profile.d/modules.sh
+
+export PATH=$PATH:/vol/biotools/bin
+export MANPATH=$MANPATH:/vol/biotools/man
+export PATH=$PATH:/vol/software/bin
+
+
+# install important R package - lummerland cluster does not have it ...
+#echo "install.packages(\"gplots\", repos=\"https://cran.rstudio.com\")" | R --vanilla --no-save
+#R CMD INSTALL /vol/agrensing/tools/R_packages/bitops_1.0-6.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/caTools_1.17.1.1.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gtools_3.8.1.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gdata_2.18.0.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gplots_3.0.1.tar.gz
+
+#functions
+
+startle ()
+{
+	number_row_reads=`wc -l $fw | awk '{print $1/4}'`
+	echo "## Start log ##"
+	echo "Number of row reads: "$number_row_reads
+}
+
+trimmo ()
+ {
+   # Remove leading low quality or N bases (below quality 3) (LEADING:3)
+   # Remove trailing low quality or N bases (below quality 3) (TRAILING:3)
+   # Scan the read with a 4-base wide sliding window, cutting when the average quality per base drops below 15 (SLIDINGWINDOW:4:15)
+   # HEADCROP: Cut the specified Number of bases from the start of the read
+   # MINLEN was first set to 50 bp. But there are libraries with read start length of 50 bp.
+
+   if [ -e $rv ]; then
+	java $java_ram -jar /nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/trimmomatic-0.39.jar PE -threads $cpu -phred33 -summary $trimmo_out".trimmo.stats" \
+	  $fw $rv $trimmo_out".PE.fw" $trimmo_out".SE.fw" $trimmo_out".PE.rv" $trimmo_out".SE.rv" \
+	  ILLUMINACLIP:/nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/adapters/TruSeq-PE_all.fna:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 HEADCROP:10 MINLEN:30
+
+	# stats
+	echo "## Trimmo statistics ##"
+	printf "trim\t"$trimmo_out".PE.fw\t"; grep -c "^\+$" $trimmo_out".PE.fw"
+	# fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc" $trimmo_out".PE."*
+    fi
+    if [ ! -e $rv ]; then
+	java $java_ram -jar /nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/trimmomatic-0.39.jar SE -threads $cpu -phred33 -summary $trimmo_out".trimmo.stats" \
+	  $fw $trimmo_out".SE" \
+	  ILLUMINACLIP:/nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/adapters/TruSeq-PE_all.fna:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 HEADCROP:10 MINLEN:30
+
+	# stats
+	echo "## Trimmo statistics ##"
+	# printf "trim\t"$trimmo_out".SE\t"; grep -c "^\+$" $trimmo_out".SE"
+        printf "trim\t"$trimmo_out".SE\t"; wc -l $trimmo_out".SE" | awk '{print $1/4}'
+	# fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc" $trimmo_out".SE"
+    fi
+
+ }
+
+polyA ()
+ {
+   # min_len was first set to 50 bp. But there are libraries with read start length of 50 bp.
+   # If you specify any statistic option, no other ouput will be generated. To preprocess data, do not specify a statistics option.
+   # -stats_dupl -stats_all
+
+   inpA=$polyA_out".polyA.passed"
+   infA=$polyA_out".polyA.failed"
+
+   if [ -e $rv ]; then
+	/nfs/agrensing/tools/prinseq/prinseq-lite-0.20.4/./prinseq-lite.pl -verbose -no_qual_header -fastq $trimmo_out".PE.fw" -fastq2 $trimmo_out".PE.rv" \
+	  -trim_tail_left 5 -trim_tail_right 5 -min_len 30 -out_good $inpA -out_bad $infA \
+	  -out_format 3 -log $polyA_out"_polyA.log"
+
+	# stats
+	echo "## PolyA statistics ##"
+	printf "polyA\t"$inpA"_1.fastq\t"; wc -l $inpA"_1.fastq" | awk '{print $1/4}'
+	#fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc" $inpA"_"[12]".fastq"
+   fi
+   if [ ! -e $rv ]; then
+	/nfs/agrensing/tools/prinseq/prinseq-lite-0.20.4/./prinseq-lite.pl -verbose -no_qual_header -fastq $trimmo_out".SE" \
+	  -trim_tail_left 5 -trim_tail_right 5 -min_len 30 -out_good $inpA -out_bad $infA \
+	  -out_format 3 -log $polyA_out"_polyA.log"
+
+	# stats
+	echo "## PolyA statistics ##"
+	printf "polyA\t"$inpA".fastq\t"; grep -c "^\+$" $inpA".fastq"
+	#fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc" $inpA".fastq"
+   fi
+ }
+
+duple ()
+{
+	# number of 100% identical duplicat reads.
+	echo "Duplication:"
+
+        if [ -e $rv ]; then
+
+                #SE 1
+                IN=$polyA_out".polyA.passed"
+
+                num_dup_se1=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_1_dupl_histogram"}}' $IN"_1.fastq" | wc -l`
+                printf $num_dup_se1"\t"; printf %.5f\\t "$(( 100000 * num_dup_se1 / number_row_reads ))e-5"
+                #SE 2
+                num_dup_se2=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_2_dupl_histogram"}}' $IN"_2.fastq" | wc -l`
+                printf $num_dup_se2"\t"; printf %.5f\\t "$(( 100000 * num_dup_se2 / number_row_reads ))e-5"
+
+
+                #PE
+                num_dup_pe=`awk -v nof=$IN 'NR % 4 == 1 || NR % 4 == 2 {if(n==1){if(foo[name]==""){c=0}; foo[name]=foo[name]$1; if(c==1){seq[foo[name]]++} else {c=1}; n=0} else if($1~/^@/){split($1,w,"/"); name=w[1]; foo[name]; n=1}} END {for(a in seq){print seq[a]; p[seq[a]]++} for(l in p){print l"\t"p[l] >> nof"_1_2_dupl_histogram"}}' $IN"_1.fastq" $IN"_2.fastq" | wc -l`
+                printf $num_dup_pe"\t"; printf %.5f\\n "$(( 100000 * num_dup_pe / number_row_reads ))e-5"
+
+
+        fi
+        if [ ! -e $rv ]; then
+                for IN in $polyA_out".polyA.passed"*;
+                do
+                  num_dup_se1=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_dupl_histogram"}}' $IN | wc -l`
+                  printf $IN"\t"$num_dup_se1"\t"; printf %.5f\\n "$(( 100000 * num_dup_se1 / number_row_reads ))e-5"
+                done
+        fi
+
+
+}
+
+mappi()
+{
+    if [ -e $rv ]; then
+	/homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -N 1 -n 7 -A sam --npaths=5 \
+          --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+          --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+          $polyA_out".polyA.passed_1.fastq" $polyA_out".polyA.passed_2.fastq" 2> $mappi_out".mappi.err"
+
+	unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped.1"`
+
+    fi
+    if [ ! -e $rv ]; then
+	/ceph/agrensing/tools/gmap-gsnap/gmap-2021-03-08/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -N 1 -n 5 -A sam --npaths=5 \
+          --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+          --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+          $polyA_out".polyA.passed.fastq" 2> $mappi_out".mappi.err"
+
+	unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped"`
+    fi
+
+
+
+    # mapping statistics
+        echo "## Pp genome version 3 ##"
+        echo "Unmapped reads: "$unmapped_reads
+
+}
+
+organelli()
+{
+        if [ -e $rv ]; then
+                for IN in $mappi_out".genome.concordant_"*;
+                do
+                  print "Fragments:"
+                  printf $IN"\t";
+                  with=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Ei "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`;
+                  wo=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Eiv "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`;
+                  awk -v with=$with -v wo=$wo 'BEGIN {print with"\t"wo"\t"with/(wo+with)*100" %"}' ;
+                done
+        fi
+        if [ ! -e $rv ]; then
+                for IN in $mappi_out".genome"*"uniq"*;
+                do
+                  print "SE reads:"
+                  printf $IN"\t";
+                  with=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Ei "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`;
+                  wo=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Eiv "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`;
+                  awk -v with=$with -v wo=$wo 'BEGIN {print with"\t"wo"\t"with/(wo+with)*100" %"}' ;
+                done
+
+        fi
+
+
+}
+
+mappi_gDNA()
+{
+     if [ -e $rv ]; then
+          /homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -n 7 -A sam --npaths=5 \
+            --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+            --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+            $polyA_out".polyA.passed_1.fastq" $polyA_out".polyA.passed_2.fastq" 2> $mappi_out".mappi.err"
+
+          unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped.1"`
+     fi
+
+     if [ ! -e $rv ]; then
+          /homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -n 5 -A sam --npaths=5 \
+           --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+           --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+           $polyA_out".polyA.passed.fastq" 2> $mappi_out".mappi.err"
+
+         unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped"`
+     fi
+
+  # mapping statistics
+   echo "## Pp genome version 3 ##"
+   echo "Unmapped reads: "$unmapped_reads
+}
+
+
+sample ()
+{
+     # if PE use ".genome"*[tcqg]
+     for IN in $mappi_out".genome"*[tcqg]
+     do
+	  printf $IN"\tsort.bam\n" >&2
+	   /vol/agrensing/tools/samtools/samtools-1.12/samtools view -@ $cpu -bhS $IN | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G - -o $IN".sort.bam"
+
+	  printf $IN"\trmdup.bam\n" >&2
+	   grep -vE "chloro|ribo|mito|ERCC" $IN | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G -n -O BAM - | \
+           /vol/agrensing/tools/samtools/samtools-1.12/samtools fixmate -@ $cpu -m - - | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G -O BAM - | \
+           /vol/agrensing/tools/samtools/samtools-1.12/samtools markdup -@ $cpu -s -r - \
+	   $IN".filter.fixmate.sort.markdup_r.bam"
+
+	  # samtools statistics
+	  v_tmp=`awk '$1!~"@" {print $1}' $IN | sort | uniq | wc -l`
+	  printf "Number of mapped reads: "$IN"\t"$v_tmp"\n"
+
+          # compress the sam text files
+          # gzip $IN
+	  # rm sam file
+          rm $IN
+
+
+	  # bam index
+          /vol/agrensing/tools/samtools/samtools-1.12/samtools index $IN*"_r.bam"
+          /vol/agrensing/tools/samtools/samtools-1.12/samtools index $IN".sort.bam"
+
+     done
+
+     ls -l $mappi_out*"bam" >&2
+
+     # merge all unique bam files
+     echo "merging all .uniq bam files" >&2
+     echo "merging all .uniq bam files"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.bam" $mappi_out*"uniq"*".sort.bam"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.bam"
+
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.rmdup.bam" $mappi_out*"uniq"*"_r.bam"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.rmdup.bam"
+}
+
+counti()
+{
+     if [ -e $rv ]; then
+        # read count v3.3 and ERCC combined
+#        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -B -C -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_ercc_gt.gff \
+#          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.pB.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_32458_ercc.exon.pB.log"
+#        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -C -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_ercc_gt.gff \
+#          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.p.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_32458_ercc.exon.p.log"
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -B -C -T $cpu -a /vol/agrensing/fhaas/DB/Marchantia_polymorpha/MpTak_v6.1r1_iso1.gff \
+          -t exon -g Parent -o $counti_out"_v6.1.exon.pB.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v6.1.exon.pB.log"
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -C -T $cpu -a /vol/agrensing/fhaas/DB/Marchantia_polymorpha/MpTak_v6.1r1_iso1.gff \
+          -t exon -g Parent -o $counti_out"_v6.1.exon.p.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v6.1.exon.p.log"
+
+
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -B -C -T $cpu -a /vol/agrensing/fhaas/DB/Marchantia_polymorpha/MpTak_v6.1r1_iso1.gff \
+          -t exon -g Parent -o $counti_out"_all_v6.1.exon.pB.txt" $mappi_out".uniq.merged.bam" 2> $counti_out"_all_v6.1.exon.pB.log"
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -C -T $cpu -a /vol/agrensing/fhaas/DB/Marchantia_polymorpha/MpTak_v6.1r1_iso1.gff \
+          -t exon -g Parent -o $counti_out"_all_v6.1.exon.p.txt" $mappi_out".uniq.merged.bam" 2> $counti_out"_all_v6.1.exon.p.log"
+
+
+
+        # statistics
+        printf "\n### FeatureCount statisitcs v6.1 ###\n"
+        # 32458
+#        echo "Fragment counts on gene models exon PE 32458:"
+#        grep "Successfully assigned fragments" $counti_out"_v3.3_32458_ercc.exon.pB.log"
+#        echo "Fragment counts on gene models exon PE+SE 32458:"
+#        grep "Successfully assigned fragments" $counti_out"_v3.3_32458_ercc.exon.p.log"
+        # results
+        echo "Fragment counts on gene models exon PE:"
+        grep "Successfully assigned fragments" $counti_out"_v6.1.exon.pB.log"
+        echo "Fragment counts on gene models exon PE+SE:"
+        grep "Successfully assigned fragments" $counti_out"_v6.1.exon.p.log"
+        # results no rmdup
+        echo "Fragment counts on gene models exon PE (no rmdup):"
+        grep "Successfully assigned fragments" $counti_out"_all_v6.1.exon.pB.log"
+        echo "Fragment counts on gene models exon PE+SE (no rmdup):"
+        grep "Successfully assigned fragments" $counti_out"_all_v6.1.exon.p.log"
+
+     fi
+     if [ ! -e $rv ]; then
+        echo "This part is not working yet"
+#        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_ercc_gt.gff \
+#          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.SE.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_32458_ercc.exon.SE.log"
+#        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_rm.double_clean.N_ercc_gt.gff \
+#          -t exon -g Parent -o $counti_out"_v3.3_31315_ercc.exon.SE.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_31315_ercc.exon.SE.log"
+#
+#        # statistics
+#        printf "\n### FeatureCount statisitcs v3.3 ###"
+#        # 32458
+#        echo "Fragment counts on gene models exon SE 32458:"
+#        grep "Successfully assigned " $counti_out"_v3.3_32458_ercc.exon.SE.log"
+#        # 31315
+#        echo "Fragment counts on gene models exon SE 31315:"
+#        grep "Successfully assigned " $counti_out"_v3.3_31315_ercc.exon.SE.log"
+
+     fi
+}
+
+depthi()
+{
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools depth -aa $mappi_out".uniq.merged.rmdup.bam" > $mappi_out".uniq.merged.rmdup.depth"
+}
+
+
+snpi()
+{
+  gatkrun='/vol/agrensing/tools/gatk/gatk-4.2.0.0/gatk --java-options -Xmx50G'
+  threading_option='--native-pair-hmm-threads 8'
+  # V3
+   # start the SNP detection
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -ploidy 1 -O $snpi_out".HaplotypeCaller.vcf" 2> $snpi_out".HaplotypeCaller.err"
+
+   $gatkrun SelectVariants -R $reffna --variant $snpi_out".HaplotypeCaller.vcf" -select-type INDEL -O $snpi_out".SelectVariants.vcf" 2> $snpi_out".SelectVariants.err"
+
+   # HQ SNPs
+   python2.7 /vol/agrensing/fhaas/GeneAtlas/SNP/GetHighQualVcfs.py -i $snpi_out".HaplotypeCaller.vcf" -o $output_dir"/05_SNP_calling/"$species"/"$prefix"/" --ploidy 1 --percentile 90 --GQ 90 2> $snpi_out".GetHighQualVcfs.HQ.err"
+
+#   mv $snpi_out".GetHighQualVcfs.HQ.vcf" $snpi_out".GetHighQualVcfs.V3.HQ.vcf"
+
+   $gatkrun IndexFeatureFile -I  $snpi_out".GetHighQualVcfs.HQ.vcf" 2> $snpi_out".GetHighQualVcfs.HQ.idx.err"
+
+   $gatkrun BaseRecalibrator -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.table" 2> $snpi_out".BaseRecalibrator.err"
+
+   $gatkrun ApplyBQSR -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" --bqsr-recal-file $snpi_out".BaseRecalibrator.table" -O $snpi_out".BaseRecalibrator.ApplyBQSR.bam" \
+	2> $snpi_out".BaseRecalibrator.ApplyBQSR.err"
+
+   $gatkrun BaseRecalibrator -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.recal.table" \
+	2> $snpi_out".BaseRecalibrator.recal.err"
+
+   $gatkrun AnalyzeCovariates -before $snpi_out".BaseRecalibrator.table" -after $snpi_out".BaseRecalibrator.recal.table" -verbosity DEBUG -csv $snpi_out".AnalyzeCovariates.calibration-report.csv" \
+     -plots $snpi_out".AnalyzeCovariates.calibration-report.QC.pdf" 2> $snpi_out".AnalyzeCovariates.calibration-report.err"
+
+   $gatkrun PrintReads -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -O $snpi_out".PrintReads.bam" 2> $snpi_out".PrintReads.err"
+
+   $gatkrun PrintReads -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -O $snpi_out".PrintReads.ApplyBQSR.bam" 2> $snpi_out".PrintReads.ApplyBQSR.err"
+
+   # HaplotypeCaller and VariantFiltration w/o ApplyBQSR BAM file
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $snpi_out".PrintReads.bam" -ploidy 1 -O $snpi_out".PrintReads.HaplotypeCaller.vcf" 2> $snpi_out".PrintReads.HaplotypeCaller.err"
+
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $snpi_out".PrintReads.ApplyBQSR.bam" -ploidy 1 -O $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.vcf" \
+	2> $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.err"
+
+   #  filter flags ## NOTE: it is important to look for float and integer numbers (e.g. FS must be 60.0 not 60) ##
+   # --filter-expression "FS > 60" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0"
+   # --filter-name "LowMQ" --filter-expression "QUAL > 30.0 && QUAL < 60.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage"
+   #
+   $gatkrun VariantFiltration -R $reffna --variant $snpi_out".PrintReads.HaplotypeCaller.vcf" \
+     --filter-expression "FS > 60.0" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0" \
+     --filter-name "LowMQ" --filter-expression "QUAL > 30.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage" \
+     -O $snpi_out".VariantFiltration.vcf" 2> $snpi_out".VariantFiltration.err"
+
+   $gatkrun VariantFiltration -R $reffna --variant $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.vcf" \
+     --filter-expression "FS > 60.0" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0" \
+     --filter-name "LowMQ" --filter-expression "QUAL > 30.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage" \
+     -O $snpi_out".ApplyBQSR.VariantFiltration.vcf" 2> $snpi_out".ApplyBQSR.VariantFiltration.err"
+}
+
+corri()
+{
+  for IN in $mappi_out*.gz
+  do
+    gunzip $IN
+    file=`echo $IN | awk '{print substr($1, 1, length($1)-3)}'`
+    /vol/agrensing/tools/samtools/samtools-1.12/samtools view -@ $cpu -bhS $file | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G - -o $file".sort.bam"
+    /vol/agrensing/tools/samtools/samtools-1.12/samtools index $file".sort.bam"
+    gzip $file
+
+  done
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.bam" $mappi_out*"uniq"*".sort.bam"
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.bam"
+
+
+
+  gatkrun='/ceph/agrensing/tools/gatk/gatk-4.2.0.0/gatk --java-options -Xmx50G'
+  threading_option='--native-pair-hmm-threads 8'
+  $gatkrun BaseRecalibrator -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.recal.table" \
+         2> $snpi_out".BaseRecalibrator.recal.err"
+
+  $gatkrun AnalyzeCovariates -before $snpi_out".BaseRecalibrator.table" -after $snpi_out".BaseRecalibrator.recal.table" -verbosity DEBUG -csv $snpi_out".AnalyzeCovariates.calibration-report.csv" \
+    -plots $snpi_out".AnalyzeCovariates.calibration-report.QC.pdf" 2> $snpi_out".AnalyzeCovariates.calibration-report.err"
+
+
+}
+
+
+# script input variables
+
+cpu=$1
+output_dir=$2
+prefix=$3
+species=$4
+infile_path=$5
+
+# set variables
+java_ram="-Xmx50G -Xms25G"
+id=$prefix
+
+fw=$infile_path"/fw."$prefix
+rv=$infile_path"/rv."$prefix
+
+db_path_V3="/ceph/agrensing/fhaas/DB/Marchantia_polymorpha/Marchantia_v6.1_maingenome_mito_ribo_chloro/Marchantia_v6.1_maingenome_mito_ribo_chloro"
+db_name_V3="Marchantia_v6.1_maingenome_mito_ribo_chloro"
+
+reffna="/ceph/agrensing/fhaas/DB/Marchantia_polymorpha/Marchantia_v6.1.fna"
+
+
+
+# outdir variables
+# output=$output_dir"/"$prefix
+trimmo_out=$output_dir"/02_trimmomatic/"$species"/"$prefix"/"$prefix
+polyA_out=$output_dir"/03_poly-A/"$species"/"$prefix"/"$prefix
+mappi_out=$output_dir"/04_genome_mapping/"$species"/"$prefix"/"$prefix
+snpi_out=$output_dir"/05_SNP_calling/"$species"/"$prefix"/"$prefix
+counti_out=$output_dir"/06_counts/"$species"/"$prefix"/"$prefix
+
+# create result folder
+mkdir -p $output_dir"/02_trimmomatic/"$species"/"$prefix
+mkdir -p $output_dir"/03_poly-A/"$species"/"$prefix
+mkdir -p $output_dir"/04_genome_mapping/"$species"/"$prefix
+mkdir -p $output_dir"/05_SNP_calling/"$species"/"$prefix
+mkdir -p $output_dir"/06_counts/"$species"/"$prefix
+
+mkdir -p $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc"
+mkdir -p $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc"
+
+
+
+
+
+# print running machine and start date
+hostname
+date
+
+# print all variables
+printf $cpu"\n"$output_dir"\n"$prefix"\n"$species"\n"$fw"\n"
+
+if [ -e $rv ]; then
+  printf $rv"\n"
+fi
+
+printf "\'$java_ram\'""\n"$id"\n"$db_path_V3"\n"$db_name_V3"\n"
+
+echo ""
+
+echo "Phase 0 assamble data"
+startle
+
+date
+echo "Phase 1 run trimming"
+trimmo
+
+date
+echo "Phase 2 run polyA"
+polyA
+duple
+date
+echo "Phase 3 run mapping"
+mappi
+#-# mappi_gDNA
+organelli
+sample
+counti
+depthi
+date
+echo "Phase 4 run SNP calling"
+snpi
+#-# corri
+date
+
+
diff --git a/workflows/Experiment1/new_deg_pipe_four_methods_noiseqbio_schippers_20211021.r b/workflows/Experiment1/new_deg_pipe_four_methods_noiseqbio_schippers_20211021.r
new file mode 100644
index 0000000000000000000000000000000000000000..1703784dcdee3f5874815a8bad5639aa56939f60
--- /dev/null
+++ b/workflows/Experiment1/new_deg_pipe_four_methods_noiseqbio_schippers_20211021.r
@@ -0,0 +1,923 @@
+###############################################################
+## DEG pipeline with edgeR, DESeq2 and NOISeq                 #
+## Modified from the template of Dr. Kristian Ullrich 2014    #
+## @fabianhaas, AG Rensing, Marburg, Jan 2016                 #
+###############################################################
+# The pipeline expects sample triplicates.
+#   If you don't have three samples of one condition, use face files with zero counts.
+# RPKM calculation is optional.
+# Additional program to sum up the tsv files: /mnt/nas_temp/home/haasf/Program/sumup_htseq_counts.pl folder file_prefix
+
+###########################################
+## please define your variables           #
+###########################################
+
+saveat <- "/mnt/NAS_coruscant_datashare/haasf/madland_RNA-seq_Schippers/Mp_all_alternative_DEGs_0.9"
+
+file <- '/mnt/Teamdrive/projects/MAdLand/cooperations/Schippers_Lab_RNA-seq_analyses/Mp_all.p.sort.wotRNA.counts'
+# file <- '/mnt/Teamdrive/projects/MAdLand/cooperations/Schippers_Lab_RNA-seq_analyses/31315_all.p.sort.counts'
+
+# Table with the length and the GC content
+LenGCTable <- "/mnt/Teamdrive/projects/MAdLand/cooperations/Schippers_Lab_RNA-seq_analyses/MpTak_v6.1r1.mrna.fasta.GC.length"
+# LenGCTable <- "/mnt/NAS_coruscant_datashare/haasf/GeneAtlas_RNASeq/DEG_2nd_round/cosmoss_V3.3.release_2016-07-22.IsoformV31.fasta.length_GC_Contig_Daten_for_34840_1nd.csv"
+
+# Column position of the gene name and the width/gc content (first column is gene name!)
+myLenPos <- 1
+myGCPos <- 2
+
+#
+# hoo <- c("","")
+# sampleLists <- c()
+# for(a in hoo) {
+#   for(b in hoo) {
+#     if(a!=b) {
+#       tmp <- c(a,b)
+#       sampleLists <- c(sampleLists, list(tmp))
+#     }
+#   }
+# }
+
+sampleLists <- list(c("control_6h", "control_24h"),
+                    c("control_6h", "LCS_6h"),
+                    c("control_6h", "LCS_24h"),
+                    c("control_24h", "LCS_6h"),
+                    c("control_24h", "LCS_24h"),
+                    c("LCS_6h", "LCS_24h")
+)
+
+# Do you want a RPKM value table (1/0)
+# Be warned, it takes a while to do this. approx. 10 minutes for 35000 genes and 6 samples.
+rpkm <- 0
+# Are the data original HTSeq files? yes <- 1 ; no <- 0
+htseq <- 0
+
+# threshold settings for the R packages
+#edgeR
+pvtedgeR <- 0.001
+#DESeq2
+pvtDESeq2 <- 0.001
+#NOISeq
+pvtNOISeq <- 0.9
+
+# Output folder names
+edgeRDir <- "edgeR"
+DESeq2Dir <- "DESeq2"
+NOISeqDir <- "NOISeq"
+
+#########################
+## stop editing here!! ##
+#########################
+
+
+###########################################
+## executiv function                      #
+###########################################
+
+workflow <- function(file, saveat, SampleNames, LenGCTable, myLenPos, myGCPos, rpkm, pvtedgeR, pvtDESeq2, pvtNOISeq, edgeRDir, DESeq2Dir, NOISeqDir, htseq)
+{
+    # setwd(workingFolder) # set workspace
+
+            sampleNameI <- SampleNames[1]
+            sampleNameII <- SampleNames[2]
+
+            # Comparison name
+            cName <- paste(sampleNameI, sampleNameII, sep = "_vs_")
+
+            filename <- strsplit(file, "/")
+            filename <- filename[[1]][length(filename[[1]])] # long but it's working
+            sampleName <- gsub(", ","_",toString(SampleNames))
+            samples <- SampleNames
+            # create output dir
+            dir.create(saveat, showWarnings = FALSE)
+            outdir <- paste0(saveat,"/",sampleName)
+            dir.create(outdir)
+
+            # Start the log file
+            sink(paste0(outdir, "/DEGpipe_",cName, ".log"))
+
+            # print parameter into the log file
+            print("set parameters:")
+            print(paste0("Counts file:", file))
+            print(paste0("Results stored at:", saveat))
+            print(paste0("SampleNames: ",SampleNames))
+            print(paste0("LenGCTable: ",LenGCTable))
+            print(paste0("myLenPos: ",myLenPos))
+            print(paste0("myGCPos: ",myGCPos))
+            print(paste0("rpkm: ",rpkm))
+            print(paste0("htseq: ",htseq))
+            print(paste0("pvtedgeR: ",pvtedgeR))
+            print(paste0("pvtDESeq2: ",pvtDESeq2))
+            print(paste0("pvtNOISeq: ",pvtNOISeq))
+            print(paste0("edgeRDir: ",edgeRDir))
+            print(paste0("DESeq2Dir: ",DESeq2Dir))
+            print(paste0("NOISeqDir: ",NOISeqDir))
+            # session and package infos for the log file
+            print(sessionInfo())
+            print(Sys.info())
+
+            # load spike-in count tables
+            data0 <- read.table(file, header=T, sep="\t", row.names=1)
+            # just if the gene ids not ending with .1 (it is nessessery for NOISeqbio iteration step)
+            #row.names(data0) <- paste0(row.names(data0), ".1")
+
+            # remove HTSeq rows like "__no_feature"
+            data0 <- data0[ with(data0,  !grepl('__', rownames(data0))) , ]
+            data0 <- data0[ with(data0,  !grepl('ERCC', rownames(data0))) , ]
+            #data0 <- data0[ , with(data0,  !grepl('X.1', colnames(data0)))]
+            #rename col names (library name to roman number)
+
+            librariesName <- c("MF82", "MF81", "MF80", "MF76", "MF75", "MF74", "MF68", "MF67", "MF66", "MF62", "MF61", "MF60")
+#            librariesName <- c("P97", "P96", "P95", "P91", "P90", "P89", "P111", "P110", "P109", "P105", "P104", "P103")
+            romanNumber <- c("LCS_24h.2", "LCS_24h.1", "LCS_24h", "control_24h.2", "control_24h.1", "control_24h", "control_6h.2", "LCS_6h.1", "LCS_6h", "LCS_6h.2", "control_6h.1", "control_6h")
+#            romanNumber <- c("LCS_6h.2", "LCS_6h.1", "LCS_6h", "control_6h.2", "control_6h.1", "control_6h", "LCS_24h.2", "LCS_24h.1", "LCS_24h", "control_24h.2", "control_24h.1", "control_24h")
+
+            for(i in 1:length(librariesName)) {
+                colnames(data0)[colnames(data0)==grep(librariesName[i], names(data0), value = TRUE)] <- romanNumber[i]
+            }
+
+            # creats a list of the column names/samples you would like to compare
+            SampleNames <- makeSampleList(sampleNameI, length(grep(paste0("^",sampleNameI,"[^A-Z]"), colnames(data0))) + 1,
+                                          sampleNameII, length(grep(paste0("^",sampleNameII,"[^A-Z]"), colnames(data0))) + 1)
+            print(SampleNames)
+
+            ## mal anschauen als ersatz für das darüber ##
+            ## newenw <- table.new[, grepl(paste(LIST,collapse="|"), colnames(table.new))]
+
+
+            # exctract the samples from the data0 if needed
+            if(length(SampleNames) > 0) {
+                RTableInFiles <- data0[,SampleNames]
+                data <- data0[,SampleNames]
+            }
+            print(head(RTableInFiles))
+
+            # sort by column name
+            # data <- data[ , order(names(data))] # dangeros, the order of query and referenz might be not the same
+            # see how many rows and columns are in the tables
+            print(paste("Raw data row number: ",nrow(data)))
+            print(paste("Raw data column number: ",ncol(data)))
+
+            # 2) A typical differential expression analysis workflow
+            # 2.1) Filtering and exploratory data analysis
+            # filter: One single gene/spike-in must have at least 5 counts in minimum 2 libraries
+            filter <- apply(data, 1, function(x) length(x[x>5])>=2)
+            dataFiltered <- data[filter,]
+            # see how many rows and columns are left
+            print(paste("Filtered data row number: ",nrow(dataFiltered)))
+            print(paste("Filtered data row number: ",ncol(dataFiltered)))
+            # filter gene names
+            genes <- rownames(dataFiltered)[grep("^Pp3", rownames(dataFiltered))]
+            # filter spike-in names
+            spikes <- rownames(dataFiltered)[grep("^ERCC", rownames(dataFiltered))]
+            # get the column name list as.factors
+            xtmp <- colnames(dataFiltered)
+            x <- split2list(xtmp)
+
+            sampleNameList <- gsub(".[1234567890]+$", "", SampleNames)
+            columnPos <- makeIndexList(SampleNames)
+
+            # run ...
+            ### edgeR ###
+            print(paste0("edgeR is running with ", cName))
+            packageDescription("edgeR")
+            edgeRFunction(outdir, edgeRDir, cName, RTableInFiles, pvtedgeR, sampleNameI, sampleNameII, sampleNameList)
+
+            #### DESeq2 ####
+            print(paste0("DESeq2 is running with ", cName))
+            packageDescription("DESeq2")
+            DESeq2Function(outdir, DESeq2Dir, cName, RTableInFiles, pvtDESeq2, sampleNameI, sampleNameII, sampleNameList)
+
+            ### NOISeq ###
+            print(paste0("NOISeq is running with ", cName))
+            packageDescription("NOISeq")
+           NOISeqFunction(outdir, NOISeqDir, cName, RTableInFiles, pvtNOISeq, sampleNameI, sampleNameII, myLenPos, myGCPos, LenGCTable, columnPos, sampleNameList, SampleNames)
+
+            ### Overlap of all three methods ###
+            # intersection with DEG direction check => 1, without => 0
+            intersection_4.file <- intersection(outdir,1,4)
+            write.table(intersection_4.file, file = paste0(outdir, "/", cName,"_intersection_four_methods_",nrow(intersection_4.file),".tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+            # edgeR, DESeq2, NOISeq
+            intersection_3.file <- intersection(outdir,1,3)
+            write.table(intersection_3.file, file = paste0(outdir, "/", cName,"_intersection_three_methods_",nrow(intersection_3.file),".tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+            sink() # End of log file
+
+}
+
+
+
+
+
+####################
+## main functions  #
+####################
+
+plotGraphics <- function(data, librariesName, cName, outdir, romanNumber)
+{
+    library("DESeq2")
+    library("ggplot2")
+    library("RColorBrewer")
+    library("pheatmap")
+    library("genefilter")
+
+    # create sample annotation lists
+    # sample.name.list <- gsub(".[12]", "", librariesName[colnames(data)])
+    sample.name.list <- gsub(".[12]", "", romanNumber)
+    sample.name.uniq <- unique(unlist(sample.name.list, use.names = FALSE))
+
+    col.data.condition = data.frame(row.names = librariesName, condition = sample.name.list)
+
+    # run DESeq2 functions
+    dds.matrix <- DESeqDataSetFromMatrix(countData = data, colData = col.data.condition, design = ~condition) # data must be integer! (Ganze Zahlen)
+    colData(dds.matrix)$condition <- factor(colData(dds.matrix)$condition, levels = sample.name.uniq)
+    dds.matrix.deseq <- DESeq(dds.matrix)
+    dds.matrix.res <- results(dds.matrix.deseq)
+    dds.matrix.res <- dds.matrix.res[order(dds.matrix.res$padj), ]
+    dds.matrix.res.fdr <- dds.matrix.res[!is.na(dds.matrix.res$padj), ]
+    dds.matrix.res.fdr <- dds.matrix.res.fdr[dds.matrix.res.fdr$padj < 0.0001, ]
+    dds.matrix.rld <- rlogTransformation(dds.matrix, blind = TRUE)
+
+    # data for PCA plot
+    pcaData <- plotPCA(dds.matrix.rld, intgroup = "condition", returnData=TRUE)
+    percentVar <- round(100 * attr(pcaData, "percentVar"))
+    # color for samples
+    sample.colors <- c("gray60","#D73027","#4575B4", "purple", "orange","#113FF7","green", "black")
+
+    # plot the PCA
+    # https://cran.r-project.org/web/packages/ggfortify/vignettes/plot_pca.html
+    png(filename=paste0(outdir,"PCA_all_samples_DEseq2_normalized.png"))
+      ggplot(
+          pcaData,
+          aes(
+              PC1,
+              PC2,
+              color=condition,
+              shape=condition)
+          ) +
+          scale_shape_manual(values = c(4, 1, 2, 3, 0, 5, 6, 7)) +
+          geom_point(size=2.5) +
+          xlab(
+              paste0("PC1: ",percentVar[1],"% variance")
+              ) +
+          ylab(
+              paste0("PC2: ",percentVar[2],"% variance")
+              ) +
+          theme() + #, face="bold"
+          scale_colour_manual(
+              values=sample.colors  # dodgerblue3
+              ) +
+          ggtitle("PCA of all samples (DESeq2 normalized)") +
+          theme(
+              plot.title = element_text(lineheight=.8),
+              panel.background = element_rect(fill = "gray95")
+          )
+    dev.off()
+
+    # how many genes will be involved in the heatmap calculation
+    numbGenes <- 100
+    # pheatmap data
+    topVarGenes <- head(order(rowVars(assay(dds.matrix.rld)),decreasing=TRUE),numbGenes)
+    mat <- assay(dds.matrix.rld)[ topVarGenes, ]
+    mat <- mat - rowMeans(mat)
+    # pheatmap data annotation
+    anno <-as.data.frame(colData(dds.matrix.rld)[,c("condition")])
+    rownames(anno) <- colnames(data) # check out the row names of annotation
+    colnames(anno) <- "condition"
+    # pheatmap data annotation color
+    condition <- sample.colors
+    names(condition) <- sample.name.uniq
+    anno_colors <- list(condition = condition)
+
+    #heatmap(mat)
+    #pheatmap(mat)
+    # ftp://cran.r-project.org/pub/R/web/packages/pheatmap/pheatmap.pdf
+    png(filename=paste0(outdir,"HC_heatmap_all_samples_DESeq2_normalized.png"))
+      pheatmap(mat ,
+               annotation = anno,
+               annotation_colors = anno_colors,
+               fontsize = 10,
+               fontsize_row = 6,
+               fontsize_col = 10,
+               main = paste0("Heatmap of all samples (DESeq2 normalized) ", numbGenes," genes\n"),
+               annotation_names_col = F,
+               show_rownames = F,
+               cluster_rows = F,
+               border_color = F,
+               treeheight_col = 20
+      )
+    dev.off()
+}
+
+
+edgeRFunction <- function(odir, edgeRDir, cName, RTableInFiles, pvtedgeR, sampleNameI, sampleNameII, sampleNameList)
+{
+    library(edgeR)
+
+    outdir <- paste0(odir, "/",edgeRDir,"/") # set outdir
+
+    dir.create(outdir, showWarnings = FALSE) # create output folder
+    dir.create(paste0(outdir,"plots"), showWarnings = FALSE) # create subfolder "plots"
+
+    comparison <- cName
+    counts <- RTableInFiles
+
+    pvt <- pvtedgeR
+
+    group <- factor(sampleNameList)
+
+    # edgeR 'normal'
+
+    y.normal <- DGEList(counts = counts, group = group)
+
+    y.normal[,1]
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotMDS")))
+        plotMDS(y.normal, method = "bcv", main = paste0(cName, "\nedgeR - plotMDS"))
+    dev.off()
+
+    y.normal <- calcNormFactors(y.normal)
+    y.normal <- estimateCommonDisp(y.normal)
+    y.normal <- estimateTagwiseDisp(y.normal)
+
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotBCV")))
+        plotBCV(y.normal, main = paste0(cName, "\nedgeR - plotBCV"))
+    dev.off()
+
+    et.normal <- exactTest(y.normal, pair = c(sampleNameI, sampleNameII))
+
+    topTags(et.normal)
+    # adjust.method   ==>  character string specifying p-value adjustment method. Possible values are "none", "BH", "fdr" (equivalent to "BH"), "BY" and "holm". See p.adjust for details.
+    summary(de.normal <- decideTestsDGE(et.normal, p = pvt, adjust = "BH"))
+    et.normal.fdr <- cbind(et.normal$table, p.adjust(et.normal$table[, 3], method = "BH"))
+    et.normal.fdr <- et.normal.fdr[et.normal.fdr[, 4] < pvt, ]
+
+    write.table(et.normal.fdr, file = paste0(outdir, cName, "_", nrow(et.normal.fdr), "_edgeR_fdr.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    detags.normal <- rownames(de.normal)[as.logical(de.normal)]
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotSmear")))
+    plotSmear(et.normal, de.normal, tags = detags.normal, main = paste0(cName, "\nedgeR - plotSmear"))
+    abline(h = c(-2, 2), col = "blue")
+    dev.off()
+
+    # edgeR 'GLM'
+
+    y.glm <- DGEList(counts = counts, group = group)
+
+    design <- model.matrix(~0 + group, data = y.glm$samples)
+    design
+
+    y.glm <- estimateGLMCommonDisp(y.glm, design)
+    y.glm <- estimateGLMTrendedDisp(y.glm, design)
+
+    ## Loading required package: splines
+    y.glm <- estimateGLMTagwiseDisp(y.glm, design)
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotBCV - GLM")))
+    plotBCV(y.glm, main = paste0(cName, "\nedgeR - plotBCV - GLM"))
+    dev.off()
+
+    y.fit <- glmFit(y.glm, design)
+    y.lrt <- glmLRT(y.fit)
+
+    FDR <- p.adjust(y.lrt$table$PValue, method = "BH")
+    sum(FDR < pvt)
+
+    topTags(y.lrt)
+
+    summary(de.glm <- decideTestsDGE(y.lrt, p = pvt, adjust = "BH"))
+
+}
+
+
+DESeq2Function <- function(odir, DESeq2Dir, cName, RTableInFiles, pvtDESeq2, sampleNameI, sampleNameII, sampleNameList)
+{
+    library(DESeq2)
+    library(RColorBrewer)
+    library(gplots)
+    library(vsn)
+
+    outdir <- paste0(odir, "/",DESeq2Dir,"/") # set outdir
+
+    dir.create(outdir, showWarnings = FALSE)
+    dir.create(paste0(outdir,"plots"), showWarnings = FALSE) # create subfolder "plots"
+
+
+    pvt <- pvtDESeq2
+
+
+
+    #   DESeqDataSet(se, design, ignoreRank = FALSE)
+    #   DESeqDataSetFromMatrix(countData, colData, design, tidy = FALSE, ignoreRank = FALSE, ...)
+    #   DESeqDataSetFromHTSeqCount(sampleTable, directory = ".", design, ignoreRank = FALSE, ...)
+    #   DESeqDataSetFromTximport(txi, colData, design, ...)
+    coldata = data.frame(row.names = colnames(RTableInFiles), condition = sampleNameList)
+
+    ddsHTSeq <- DESeqDataSetFromMatrix(countData = RTableInFiles, colData = coldata, design = ~ condition)
+
+    colData(ddsHTSeq)$condition <- factor(colData(ddsHTSeq)$condition, levels = c(sampleNameI, sampleNameII))
+
+    ddsHTSeq <- DESeq(ddsHTSeq)
+
+    ddsHTSeq.res <- results(ddsHTSeq)
+    ddsHTSeq.res <- ddsHTSeq.res[order(ddsHTSeq.res$padj), ]
+    head(ddsHTSeq.res)
+
+    # png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - plotMA")))
+    #     plotMA(ddsHTSeq, main = paste0(cName, "\nDESeq2 - plotMA"), ylim = c(-2, 2))
+    # dev.off()
+
+    ddsHTSeq.res.fdr <- ddsHTSeq.res[!is.na(ddsHTSeq.res$padj), ]
+    ddsHTSeq.res.fdr <- ddsHTSeq.res.fdr[ddsHTSeq.res.fdr$padj < pvt, ]
+
+    write.table(as.matrix(ddsHTSeq.res.fdr), file = paste0(outdir, cName, "_", nrow(as.matrix(ddsHTSeq.res.fdr)), "_DESeq2_fdr.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    ddsHTSeq.rld <- rlogTransformation(ddsHTSeq, blind = TRUE)
+    ddsHTSeq.vsd <- varianceStabilizingTransformation(ddsHTSeq, blind = TRUE)
+
+    par(mfrow = c(1, 3))
+    notAllZero <- (rowSums(counts(ddsHTSeq)) > 0)
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - SdPlot.1")))
+        meanSdPlot(log2(counts(ddsHTSeq, normalized = TRUE)[notAllZero, ] + 1)) # , ylim = c(0, 2.5)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - SdPlot.2"))) # , ylim = c(0, 2.5)
+        meanSdPlot(assay(ddsHTSeq.rld[notAllZero, ]))
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - SdPlot.3"))) # , ylim = c(0, 2.5)
+        meanSdPlot(assay(ddsHTSeq.vsd[notAllZero, ]))
+    dev.off()
+
+    hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
+    distRL <- dist(t(assay(ddsHTSeq.rld)))
+    mat <- as.matrix(distRL)
+    rownames(mat) <- colnames(mat) <- with(colData(ddsHTSeq), paste0(condition))
+
+    # png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - heatmap")))
+    #     heatmap(mat, trace = "none", col = rev(hmcol), margin = c(13, 13))
+    # dev.off()
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - PCA")))
+        print(plotPCA(ddsHTSeq.rld, intgroup = "condition"))
+    dev.off()
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - DispEsts")))
+        plotDispEsts(ddsHTSeq, main = paste0(cName, "\nDESeq2 - plotDispEsts"))
+    dev.off()
+}
+
+NOISeqFunction <- function(odir, NOISeqDir, cName, RTableInFiles, pvtNOISeq, sampleNameI, sampleNameII, myLenPos, myGCPos, LenGCTable, columnPos, sampleNameList, SampleNames)
+{
+    library(NOISeq)
+
+    outdir <- paste0(odir, "/",NOISeqDir,"/") # set outdir
+
+    dir.create(outdir, showWarnings = FALSE)
+    dir.create(paste0(outdir,"plots"), showWarnings = FALSE) # create subfolder "plots"
+
+    pvt <- pvtNOISeq
+
+    counts <- RTableInFiles[, columnPos]
+    head(counts)
+
+    # must be changed if there are no triplicates
+    # !! Please change this setting if needed !!
+    myfactors <- data.frame(Treatment = sampleNameList, TreatmentRun = SampleNames)
+    # sort counts by row names
+    mycounts <- counts[order(rownames(counts)), ]
+    # filter out low counts
+    mycounts.filter <- filtered.data(mycounts, factor = myfactors$Treatment, norm = FALSE, depth = NULL, method = 1, cv.cutoff = 100, cpm = 1)
+    mycounts.rowname <- rownames(mycounts.filter)
+
+    # Extra file with length and GC content
+    myLengthTable <- read.table(LenGCTable, sep = "\t", header = TRUE, stringsAsFactor = FALSE, row.names=1)
+    myLengthTable <- myLengthTable[order(rownames(myLengthTable)),] # sort by rownames
+    myLengthTable.filter <- myLengthTable[mycounts.rowname,]
+
+    mylength <- myLengthTable.filter[,myLenPos]
+    names(mylength) <- rownames(myLengthTable.filter)
+
+    mygc <- myLengthTable.filter[,myGCPos]
+    names(mygc) <- rownames(myLengthTable.filter)
+
+
+
+
+
+    mydata <- readData(data = mycounts.filter, length = mylength, gc = mygc, factors = myfactors)
+    mydata
+
+    mydata.GCbias <- dat(mydata, type = "GCbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - GCbias.1")))
+#-#        explo.plot(mydata.GCbias)
+#-#    dev.off()
+
+    mydata.GCbias.group <- dat(mydata, factor = "Treatment", type = "GCbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - GCbias.2")))
+#-#      explo.plot(mydata.GCbias.group)
+#-#    dev.off()
+
+    show(mydata.GCbias.group)
+
+    mydata.lengthbias <- dat(mydata, type = "lengthbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - Lenbias.1")))
+#-#        explo.plot(mydata.lengthbias)
+#-#    dev.off()
+
+    mydata.lengthbias.group <- dat(mydata, factor = "Treatment", type = "lengthbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - Lenbias.2")))
+#-#        explo.plot(mydata.lengthbias.group)
+#-#    dev.off()
+
+    show(mydata.lengthbias.group)
+
+    # looks different
+    mydata.cd <- dat(mydata, type = "cd")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - cd")))
+#-#        explo.plot(mydata.cd)
+#-#    dev.off()
+    mylength
+
+    myRPKM <- NOISeq::rpkm(assayData(mydata)$exprs)
+    myUQUA <- NOISeq::uqua(assayData(mydata)$exprs, long = mylength, lc = 0.5, k = 0)
+    myTMM <- NOISeq::tmm(assayData(mydata)$exprs, long = 1000, lc = 0)
+
+    mynoiseq.rpkm <- noiseq(mydata, k = 0.5, norm = "rpkm", factor = "Treatment", pnr = 0.2, nss = 5, v = 0.02, lc = 1, replicates = "biological")
+    mynoiseq.nnorm <- noiseq(mydata, k = 0.5, norm = "n", factor = "Treatment", pnr = 0.2, nss = 5, v = 0.02, lc = 1, replicates = "biological")
+    mynoiseq.tmm <- noiseq(mydata, k = 0.5, norm = "tmm", factor = "Treatment", pnr = 0.2, nss = 5, v = 0.02, lc = 1, replicates = "biological")
+
+    # RPKM
+    mynoiseq.rpkm.prob <- mynoiseq.rpkm@results[[1]][mynoiseq.rpkm@results[[1]]$prob > pvt, ]
+    mynoiseq.rpkm.prob <- mynoiseq.rpkm.prob[!is.na(mynoiseq.rpkm.prob$prob), ]
+    write.table(mynoiseq.rpkm.prob, file = paste0(outdir, cName, "_", nrow(mynoiseq.rpkm.prob), "_NOISeq_rpkm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    # none
+    mynoiseq.nnorm.prob <- mynoiseq.nnorm@results[[1]][mynoiseq.nnorm@results[[1]]$prob > pvt, ]
+    mynoiseq.nnorm.prob <- mynoiseq.nnorm.prob[!is.na(mynoiseq.nnorm.prob$prob), ]
+    write.table(mynoiseq.nnorm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseq.nnorm.prob), "_NOISeq_nnorm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    # TMM
+    mynoiseq.tmm.prob <- mynoiseq.tmm@results[[1]][mynoiseq.tmm@results[[1]]$prob > pvt, ]
+    mynoiseq.tmm.prob <- mynoiseq.tmm.prob[!is.na(mynoiseq.tmm.prob$prob), ]
+    write.table(mynoiseq.tmm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseq.tmm.prob), "_NOISeq_tmm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    ############ NOISeq bio ################
+
+    pvtnsb <- 0.995
+    a0per_value <- 0.99
+
+    iterations <- 5
+    x5 <- c(sample(10000:99999, iterations, replace=F), 12345)
+    x5
+
+    collector.rpkm <- NULL
+    collector.tmm <- NULL
+
+
+    # for (iter in x5) {
+    #   # RPKM
+    #   mynoiseqbio.rpkm <- noiseqbio(mydata, k = 0.5, norm = "rpkm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = iter, filter = 1)#, plot = TRUE)
+    #   mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm@results[[1]][mynoiseqbio.rpkm@results[[1]]$prob > pvtnsb, ]
+    #   mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm.prob[!is.na(mynoiseqbio.rpkm.prob$prob), ]
+    #   collector.rpkm <- rbind(collector.rpkm, mynoiseqbio.rpkm.prob)
+    #   #collector <- rbind(collector, mynoiseqbio.rpkm.prob[rownames(mynoiseqbio.rpkm.prob) %in% rownames(collector),])
+    #   # TMM
+    #   mynoiseqbio.tmm <- noiseqbio(mydata, k = 0.5, norm = "tmm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = iter, filter = 1)#, plot = TRUE)
+    #   mynoiseqbio.tmm.prob <- mynoiseqbio.tmm@results[[1]][mynoiseqbio.tmm@results[[1]]$prob > pvtnsb, ]
+    #   mynoiseqbio.tmm.prob <- mynoiseqbio.tmm.prob[!is.na(mynoiseqbio.tmm.prob$prob), ]
+    #   collector.tmm <- rbind(collector.tmm, mynoiseqbio.tmm.prob)
+    # }
+    #
+    # # RPKM
+    # print(sort(row.names(collector.rpkm)))
+    # library(stringr)
+    # mynoiseqbio.rpkm.prob.overlap <- collector.rpkm[str_split_fixed(row.names(collector.rpkm), "\\.",2)[,2]>=iterations+10,]
+    # row.names(mynoiseqbio.rpkm.prob.overlap) <- substr(row.names(mynoiseqbio.rpkm.prob.overlap),1,str_length(row.names(mynoiseqbio.rpkm.prob.overlap))-1)
+    #
+    # write.table(mynoiseqbio.rpkm.prob.overlap, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.rpkm.prob.overlap),"_NOISeqbio_rpkm_prob_seed_iteration_overlap.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    #
+    # #TMM
+    # print(sort(row.names(collector.tmm)))
+    # library(stringr)
+    # mynoiseqbio.tmm.prob.overlap <- collector.tmm[str_split_fixed(row.names(collector.tmm), "\\.",2)[,2]>=iterations+10,]
+    # row.names(mynoiseqbio.tmm.prob.overlap) <- substr(row.names(mynoiseqbio.tmm.prob.overlap),1,str_length(row.names(mynoiseqbio.tmm.prob.overlap))-1)
+    #
+    # write.table(mynoiseqbio.tmm.prob.overlap, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.tmm.prob.overlap),"_NOISeqbio_tmm_prob_seed_iteration_overlap.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    # # none
+    # mynoiseqbio.nnorm.prob <- mynoiseqbio.nnorm@results[[1]][mynoiseqbio.nnorm@results[[1]]$prob > pvtnsb, ]
+    # mynoiseqbio.nnorm.prob <- mynoiseqbio.nnorm.prob[!is.na(mynoiseqbio.nnorm.prob$prob), ]
+    # write.table(mynoiseqbio.nnorm.prob, file = paste0(outdir, cName, "_NOISeqbio_nnorm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    # # TMM
+    # mynoiseqbio.tmm.prob <- mynoiseqbio.tmm@results[[1]][mynoiseqbio.tmm@results[[1]]$prob > pvtnsb, ]
+    # mynoiseqbio.tmm.prob <- mynoiseqbio.tmm.prob[!is.na(mynoiseqbio.tmm.prob$prob), ]
+    # write.table(mynoiseqbio.tmm.prob, file = paste0(outdir, cName, "_NOISeqbio_tmm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+
+
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - noiseqbiorpkm")))
+        mynoiseqbio.rpkm <- noiseqbio(mydata, k = 0.5, norm = "rpkm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = 12345, filter = 1, plot = TRUE)
+    dev.off()
+
+    mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm@results[[1]][mynoiseqbio.rpkm@results[[1]]$prob > pvtnsb, ]
+    mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm.prob[!is.na(mynoiseqbio.rpkm.prob$prob), ]
+    write.table(mynoiseqbio.rpkm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.rpkm.prob),"_NOISeqbio_rpkm_prob_seed_12345.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - noiseqbiontmm")))
+        mynoiseqbio.tmm <- noiseqbio(mydata, k = 0.5, norm = "tmm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = 12345, filter = 1, plot = TRUE)
+    dev.off()
+
+    mynoiseqbio.tmm.prob <- mynoiseqbio.tmm@results[[1]][mynoiseqbio.tmm@results[[1]]$prob > pvtnsb, ]
+    mynoiseqbio.tmm.prob <- mynoiseqbio.tmm.prob[!is.na(mynoiseqbio.tmm.prob$prob), ]
+    write.table(mynoiseqbio.tmm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.tmm.prob),"_NOISeqbio_tmm_prob_seed_12345.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - noiseqbionnorm")))
+        mynoiseqbio.nnorm <- noiseqbio(mydata, k = 0.5, norm = "n", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = 12345, filter = 1, plot = TRUE)
+    dev.off()
+
+    head(mynoiseqbio.rpkm@results[[1]])
+    head(mynoiseqbio.nnorm@results[[1]])
+
+    mynoiseqbio.rpkm.deg <- degenes(mynoiseqbio.rpkm, q = pvtnsb, M = NULL)
+
+    mynoiseqbio.rpkm.deg.up <- degenes(mynoiseqbio.rpkm, q = pvtnsb, M = "up")
+    mynoiseqbio.rpkm.deg.down <- degenes(mynoiseqbio.rpkm, q = pvtnsb, M = "down")
+    mynoiseqbio.nnorm.deg <- degenes(mynoiseqbio.nnorm, q = pvtnsb, M = NULL)
+    mynoiseqbio.nnorm.deg.up <- degenes(mynoiseqbio.nnorm, q = pvtnsb, M = "up")
+    mynoiseqbio.nnorm.deg.down <- degenes(mynoiseqbio.nnorm, q = pvtnsb, M = "down")
+
+    par(mfrow = c(3, 2))
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.rpkm_0.9")))
+        DE.plot(mynoiseq.rpkm, q = 0.9, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.rpkm_0.8")))
+        DE.plot(mynoiseq.rpkm, q = 0.8, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.nnorn_0.9")))
+        DE.plot(mynoiseq.nnorm, q = 0.9, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.nnorn_0.8")))
+        DE.plot(mynoiseq.nnorm, q = 0.8, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.tmm_0.9")))
+        DE.plot(mynoiseq.tmm, q = 0.9, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.tmm_0.8")))
+        DE.plot(mynoiseq.tmm, q = 0.8, graphic = "expr", log.scale = TRUE)
+    dev.off()
+
+    par(mfrow = c(2, 2))
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.rpkm_",pvt)))
+        DE.plot(mynoiseqbio.rpkm, q = pvt, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.rpkm_",pvtnsb)))
+        DE.plot(mynoiseqbio.rpkm, q = pvtnsb, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.nnorm_",pvt)))
+        DE.plot(mynoiseqbio.nnorm, q = pvt, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.nnorm_",pvtnsb)))
+        DE.plot(mynoiseqbio.nnorm, q = pvtnsb, graphic = "expr", log.scale = TRUE)
+    dev.off()
+
+    par(mfrow = c(1, 2))
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.rpkm_0.9_MD")))
+        DE.plot(mynoiseq.rpkm, q = 0.9, graphic = "MD", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.nnorm_0.9_MD")))
+        DE.plot(mynoiseq.nnorm, q = 0.9, graphic = "MD", log.scale = TRUE)
+    dev.off()
+}
+
+###########################################
+## additional function for more usability #
+###########################################
+
+calcRPKM <- function(c,n,l)
+{
+    RPKM <- (10^9* as.numeric(c)) / (as.numeric(n) * as.numeric(l))
+    return(RPKM)
+}
+
+RPKMMatrix <- function(InTable,directory,LenGCTable,myLenPos)
+{
+    myLengthTable <- read.table(paste0(directory,LenGCTable), sep = "\t", header = TRUE, stringsAsFactor = FALSE, row.names = 1)
+
+    newtable <- matrix(data = c(1), nrow = nrow(InTable), ncol(InTable), byrow= TRUE)
+    colnames(newtable) <- colnames(InTable)
+    rownames(newtable) <- rownames(InTable)
+
+    sumCol <- apply(InTable,2,sum) # list of the all col sums
+
+    for(s in colnames(InTable)) {
+        for(r in rownames(InTable)) {
+            w <- InTable[r, s]
+            len <- myLengthTable[r, myLenPos]
+            sum <- as.numeric(sumCol[s])
+
+            newtable[r, s] <- calcRPKM(w, sum, len)
+        }
+    }
+    return(newtable)
+}
+
+
+# rmHTSeqLines removes the last five rows
+rmHTSeqLines <- function(counts)
+{
+    counts <- head(counts, -5)
+    return(counts)
+}
+
+
+###############
+## functions ##
+###############
+
+# split list to a factor list
+split2list <- function(tmp)
+{
+    xx <- c()
+    for(i in 1:length(tmp)) {
+        if(grepl("\\.", tmp[i])) {
+            foo <- strsplit(tmp[i], '\\.')[[1]]
+            fooo <- strsplit(foo, " ")[[1]]
+            xx <- c(xx, fooo)
+        } else {
+            xx <- c(xx, tmp[i])
+        }
+    }
+    return(as.factor(xx))
+}
+
+# makeGroups
+# https://github.com/drisso/RUVSeq/blob/master/R/helper.R
+makeGroups <- function(xs)
+{
+    xs <- factor(xs)
+    groups <- matrix(-1, nrow = length(levels(xs)), ncol = max(table(xs)))
+    for (i in 1:length(levels(xs))) {
+        idxs <- which(xs == levels(xs)[i])
+        groups[i,1:length(idxs)] <- idxs
+    }
+    return(groups)
+}
+
+# The function makeSampleList creates a list of the columns(samples) you would like to compare
+makeSampleList <- function(A, Al, B, Bl)
+{
+    l <- c()
+    if(exists("Al", mode = "numeric")) {
+        l <- makeSampleList.helper(A, Al, l)
+        l <- makeSampleList.helper(B, Bl, l)
+    }
+    return(l)
+}
+makeSampleList.helper <- function(v,k,l)
+{
+    for(h in 1:k) {
+        if(h == 1) {
+            l <- c(l, v)
+        } else {
+            l <- c(l, paste0(v,".",h-1))
+        }
+    }
+    return(l)
+}
+
+# function creates a list of numbers out of another list. The List is the index of the given list.
+makeIndexList <- function(l)
+{
+    n <- 1
+    jj <- c()
+    for(v in l) {
+        jj <- c(jj, n)
+        n <- n + 1
+    }
+    return(jj)
+}
+
+intersection <- function(l,check,t)
+{
+  l.list <- list.files(path = l, pattern = "*(rpkm|edgeR|DESeq2).*tsv", recursive = TRUE)
+  if(grepl("bio", l.list[4],fixed = TRUE)) {
+    l.list.noiseq <- read.table(paste0(l,"/",l.list[3]), header=T, sep="\t", row.names=1)
+    l.list.noiseqbio <- read.table(paste0(l,"/",l.list[4]), header=T, sep="\t", row.names=1)
+  } else {
+    l.list.noiseq <- read.table(paste0(l,"/",l.list[4]), header=T, sep="\t", row.names=1)
+    l.list.noiseqbio <- read.table(paste0(l,"/",l.list[3]), header=T, sep="\t", row.names=1)
+  }
+  l.list.edgeR <- read.table(paste0(l,"/",l.list[2]), header=T, sep="\t", row.names=1)
+  l.list.deseq2 <- read.table(paste0(l,"/",l.list[1]), header=T, sep="\t", row.names=1)
+
+
+  print("DEGs edgeR")
+  print(nrow(l.list.edgeR))
+  print("DEGs deseq2")
+  print(nrow(l.list.deseq2))
+  print("DEGs noiseq")
+  print(nrow(l.list.noiseq))
+  print("DEGs noiseqbio")
+  print(nrow(l.list.noiseqbio))
+
+  interect.list <- ""
+  interect.list.direct <- ""
+
+  if(t==4) {
+    # List of overlaps without direction check
+    interect.list <- intersect(rownames(l.list.edgeR), intersect(rownames(l.list.noiseq), intersect(rownames(l.list.noiseqbio), rownames(l.list.deseq2))))
+
+    print("DEGs intersection without direction check (four methods)")
+    print(length(interect.list))
+    print(interect.list)
+    print("DEGs intersection with direction check")
+
+    # List of overlaps with direction check
+    if(length(interect.list)!=0) {
+      interect.list.direct <- interDirect(l.list.edgeR, l.list.deseq2, l.list.noiseq, l.list.noiseqbio, interect.list)
+      print(length(interect.list.direct))
+      print(interect.list.direct)
+    } else {
+      print("0")
+    }
+  }
+  if(t==3) {
+    # List of overlaps without direction check
+    interect.list <- intersect(rownames(l.list.edgeR), intersect(rownames(l.list.noiseq), rownames(l.list.deseq2)))
+
+    print("DEGs intersection without direction check  (three methods)")
+    print(length(interect.list))
+    print(interect.list)
+    print("DEGs intersection with direction check")
+
+    # List of overlaps with direction check
+    if(length(interect.list)!=0) {
+      # fake data frame with 0 rows => nrow == 0
+      df_fake <- data.frame(Date=as.Date(character()),
+                       File=character(),
+                       User=character(),
+                       stringsAsFactors=FALSE)
+
+      interect.list.direct <- interDirect(l.list.edgeR, l.list.deseq2, l.list.noiseq, df_fake, interect.list)
+      print(length(interect.list.direct))
+      print(interect.list.direct)
+    } else {
+      print("0")
+    }
+  }
+
+
+  if(check == 1){
+    return(l.list.noiseq[interect.list.direct,])
+  } else {
+    return(l.list.noiseq[interect.list,])
+  }
+}
+
+interDirect <- function(e,d,n,nb,i)
+{
+  lil <- length(i)
+
+  if(nrow(nb) == 0) { # use noiseq twice to simulate noiseqbio
+    nbx <- apply(n[i,], 2, function(x) {ifelse(x > 0, "-", "+")})
+    nby <- paste0(nbx[,"M"],rownames(nbx))
+  } else {
+    nbx <- apply(nb[i,], 2, function(x) {ifelse(x > 0, "-", "+")})
+    nby <- paste0(nbx[,"log2FC"],rownames(nbx))
+  }
+
+  ex <- apply(e[i,], 2, function(x) {ifelse(x < 0, "-", "+")}) #-#-#-# hier < und >
+  dx <- apply(d[i,], 2, function(x) {ifelse(x < 0, "-", "+")}) #-#-#-# hier < und >
+  nx <- apply(n[i,], 2, function(x) {ifelse(x > 0, "-", "+")}) # twisted, because edgeR and DESeq2 are calculating vice versa
+
+  ey <- paste0(ex[,"logFC"],rownames(ex))
+  dy <- paste0(dx[,"log2FoldChange"],rownames(dx))
+  ny <- paste0(nx[,"M"],rownames(nx))
+
+  interect.list.direct <- intersect(ey, intersect(dy, intersect(ny, nby)))
+
+  if(lil>0 && length(interect.list.direct)<=0) {
+    ex <- apply(e[i,], 2, function(x) {ifelse(x > 0, "-", "+")}) #-#-#-# hier < und >
+    dx <- apply(d[i,], 2, function(x) {ifelse(x > 0, "-", "+")}) #-#-#-# hier < und >
+
+    ey <- paste0(ex[,"logFC"],rownames(ex))
+    dy <- paste0(dx[,"log2FoldChange"],rownames(dx))
+
+    interect.list.direct <- intersect(ey, intersect(dy, intersect(ny, nby)))
+  }
+
+  return(substr(interect.list.direct,2,100))
+}
+
+###########################################
+## executiv part of the script            #
+###########################################
+for(SampleNames in sampleLists) {
+   workflow(file, saveat, SampleNames, LenGCTable, myLenPos, myGCPos, rpkm, pvtedgeR, pvtDESeq2, pvtNOISeq, edgeRDir, DESeq2Dir, NOISeqDir, htseq)
+}
+print("Thank you for using the DEG pipeline :) \n")
+
+# fin
+
+### compare DEGs analysis ###
+# used venny and Excel
+
+
diff --git a/workflows/Experiment2/3d_pca_plot_schippers_second_20211209.r b/workflows/Experiment2/3d_pca_plot_schippers_second_20211209.r
new file mode 100644
index 0000000000000000000000000000000000000000..c4aeea758c29d8b97e6e7ba87e32af34ac9ec18f
--- /dev/null
+++ b/workflows/Experiment2/3d_pca_plot_schippers_second_20211209.r
@@ -0,0 +1,176 @@
+# 3d R plot
+saveat <- "/mnt/NAS_coruscant_datashare/haasf/madland_RNA-seq_Schippers/second_experiment/At_all_DEGs_0.9"
+# saveat <- "R:/haasf/madland_RNA-seq_Schippers"
+
+file.rpkm <- '/mnt/NAS_coruscant_datashare/haasf/madland_RNA-seq_Schippers/second_experiment/At_all.p.sort.wo_ncRNA_miRNA.rpkm'
+# file.rpkm <- "S:/projects/MAdLand/cooperations/Schippers_Lab_RNA-seq_analyses/Mp.p.sort.wotRNA.rpkm"
+# file.rpkm <- "S:/projects/MAdLand/cooperations/Schippers_Lab_RNA-seq_analyses/31315_all.p.sort.rpkm"
+
+data.rpkm <- read.table(file.rpkm, header=T, sep="\t", row.names=1)
+
+
+# sort by colnames
+data.rpkm <- data.rpkm[,order(colnames(data.rpkm))]
+
+
+librariesName <- list(
+  control_6h = c("control_6h", "red"),
+  control_24h = c("control_24h", "blue"),
+  LCS_6h = c("LCS_6h", "green"),
+  LCS_24h = c("LCS_24h", "yellow")
+)
+
+# header.ori <- c("M54.fq.M54.fq_all.p.count", "M53.fq.M53.fq_all.p.count", "M52.fq.M52.fq_all.p.count", "M48.fq.M48.fq_all.p.count", "M47.fq.M47.fq_all.p.count", "M46.fq.M46.fq_all.p.count", "M40.fq.M40.fq_all.p.count", "M39.fq.M39.fq_all.p.count", "M38.fq.M38.fq_all.p.count", "M34.fq.M34.fq_all.p.count", "M33.fq.M33.fq_all.p.count", "M32.fq.M32.fq_all.p.count")
+# header.new <- c("LCS_24h.2", "LCS_24h.1", "LCS_24h", "control_24h.2", "control_24h.1", "control_24h", "LCS_6h.2", "LCS_6h.1", "LCS_6h", "control_6h.2", "control_6h.1", "control_6h")
+# header.ori <- c("MF82", "MF81", "MF80", "MF76", "MF75", "MF74", "MF68", "MF67", "MF66", "MF62", "MF61", "MF60")
+# header.new <- c("LCS_24h.2", "LCS_24h.1", "LCS_24h", "control_24h.2", "control_24h.1", "control_24h", "control_6h.2", "LCS_6h.1", "LCS_6h", "LCS_6h.2", "control_6h.1", "control_6h")
+header.ori <- c("A6.fq.A6.fq.single.p.count", "A5.fq.A5.fq.single.p.count", "A4.fq.A4.fq.single.p.count", "A26.fq.A26.fq.single.p.count", "A25.fq.A25.fq.single.p.count", "A24.fq.A24.fq.single.p.count", "A20.fq.A20.fq.single.p.count", "A19.fq.A19.fq.single.p.count", "A18.fq.A18.fq.single.p.count", "A12.fq.A12.fq.single.p.count", "A11.fq.A11.fq.single.p.count", "A10.fq.A10.fq.single.p.count")
+header.new <- c("control_6h.2", "control_6h.1", "control_6h", "LCS_24h.2", "LCS_24h.1", "LCS_24h", "control_24h.2", "control_24h.1", "control_24h", "LCS_6h.2", "LCS_6h.1", "LCS_6h")
+
+
+header.new <- header.new[order(header.ori)]
+header.ori <- header.ori[order(header.ori)]
+
+col.header <- header.new
+
+colnames(data.rpkm) <- col.header
+
+library("DESeq2")
+library("ggplot2")
+library("RColorBrewer")
+library("pheatmap")
+library("BiocGenerics")
+library("rgl")
+library("magick")
+library("sjmisc")
+
+
+################### running ######################
+### PCA RPKM ###
+
+set.seed(0)
+
+data.inv <- t(data.rpkm)
+data.dist <-dist(data.inv, method="euclidean") # "euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski"
+data.dist.hc <- hclust(data.dist,method="ward.D2")
+data.dist.pca <-princomp(data.dist,cor=T)
+
+pc1 <- data.dist.pca$scores[,1]
+pc2 <- data.dist.pca$scores[,2]
+pc3 <- data.dist.pca$scores[,3]
+
+# create data frame for pc1 pc2 pc3
+data.dist.pca.frame = data.frame(pc1,pc2,pc3)
+rownames(data.dist.pca.frame) <- names(data.dist.pca$scale)
+colnames(data.dist.pca.frame) <- c("pc1","pc2","pc3")
+
+condition.values <- c()
+condition.values.color <- c()
+
+
+for(a in colnames(data.rpkm)) {
+
+  v <- substr(a, nchar(a), nchar(a))
+  if(str_contains(v, c(1,2,3,4,5,6,7,8,9,0), logic = "or")) {
+    if (substr(substr(a, 1, nchar(a)-2), nchar(substr(a, 1, nchar(a)-2)), nchar(substr(a, 1, nchar(a)-2))) == ".") {
+      n <- substr(a, 1, nchar(a)-3)
+    } else {
+      n <- substr(a, 1, nchar(a)-2)
+    }
+    
+  } else {
+    n <- a
+    
+  }
+  condition.values <- c(condition.values, librariesName[n][[1]][1])
+  condition.values.color <- c(condition.values.color, librariesName[n][[1]][2])
+}
+
+
+data.dist.pca.frame["tissue"] <- condition.values
+data.dist.pca.frame["color"] <- condition.values.color
+data.dist.pca.frame["name"] <- names(data.dist.pca$scale)
+attr(data.dist.pca.frame, "percentVar") <- (data.dist.pca$sdev)^2 / sum(data.dist.pca$sdev^2) # cumsum()
+
+# simple plot
+png(filename=paste0(saveat, "/HC_RPKM_normalized.png"))
+plot(data.dist.hc) # hc plot
+dev.off()
+png(filename=paste0(saveat, "/PCA_variance_RPKM_normalized.png"))
+plot(data.dist.pca) # variances; var(data.dist.pca$sdev[1:9])
+dev.off()
+# get the parcent variation
+percentVar <- round(100 * attr(data.dist.pca.frame, "percentVar"))
+
+# 3d plot
+plot3d(pc1, pc2, pc3,
+       type = "s", # p, s, l, h, n
+       #pch = c(1:3),
+       col = condition.values.color,
+       size = 1,
+       xlab = paste0("PC1: ",percentVar[1],"% variance"),
+       ylab = paste0("PC2: ",percentVar[2],"% variance"),
+       zlab = paste0("PC3: ",percentVar[3],"% variance"),
+       cex = 2,
+       main = "", # -> princomp",
+)
+
+# shift <- matrix(4, 4, 4, byrow = TRUE)
+# text3d(shift,texts=1:3)
+grid3d(c("x", "y", "z"))
+
+## add legend
+legend3d("right", unique(condition.values), pch = 19, col = unique(condition.values.color))
+
+#### video #####
+M <- par3d("userMatrix")
+play3d( par3dinterp( userMatrix=list(M,
+                                     rotate3d(M, pi/2, 1, 0, 0),
+                                     rotate3d(M, pi/2, 0, 1, 0) ) ),
+        duration=2 )
+
+movie3d(spin3d(axis = c(1, 2, 1)), duration = 5,
+        dir = saveat)
+
+#### video end ####
+
+# pc1, pc2
+png(filename=paste0(saveat, "/PCA_RPKM_normalized.png"))
+ggplot(
+  data.dist.pca.frame, 
+  aes(
+    pc1, 
+    pc2, 
+    color=tissue)
+) + 
+  geom_point(size=2.5) +
+  xlab(
+    paste0("PC1: ",percentVar[1],"% variance")
+  ) +
+  ylab(
+    paste0("PC2: ",percentVar[2],"% variance")
+  ) +
+  #theme() + #, face="bold"
+  scale_colour_manual(
+    values= c("blue", "red", "yellow", "green") # dodgerblue3
+  ) +
+  ggtitle("PCA of all samples (RPKM normalized)") +
+  theme(
+    plot.title = element_text(lineheight=.8),
+    panel.background = element_rect(fill = "gray95")
+  )
+dev.off()
+
+
+
+
+
+######## Some experiments ######
+# for(i in colnames(data)) {
+#     colnames(data)[colnames(data)==grep(i, names(data), value = TRUE)] <- librariesName[[i]]
+# }
+# 
+# for(i in colnames(data)) {
+#     colnames(data)[colnames(data)==grep(i, names(data), value = TRUE)] <- names(librariesName[librariesName==grep(i, librariesName, value = TRUE)])
+# }
+
diff --git a/workflows/Experiment2/new_deg_pipe_four_methods_noiseqbio_schippers_second_at_20211209.r b/workflows/Experiment2/new_deg_pipe_four_methods_noiseqbio_schippers_second_at_20211209.r
new file mode 100644
index 0000000000000000000000000000000000000000..a57bbb7c0306bac69c21ea19219b7f570b4d109b
--- /dev/null
+++ b/workflows/Experiment2/new_deg_pipe_four_methods_noiseqbio_schippers_second_at_20211209.r
@@ -0,0 +1,923 @@
+###############################################################
+## DEG pipeline with edgeR, DESeq2 and NOISeq                 #
+## Modified from the template of Dr. Kristian Ullrich 2014    #
+## @fabianhaas, AG Rensing, Marburg, Jan 2016                 #
+###############################################################
+# The pipeline expects sample triplicates.
+#   If you don't have three samples of one condition, use face files with zero counts.
+# RPKM calculation is optional.
+# Additional program to sum up the tsv files: /mnt/nas_temp/home/haasf/Program/sumup_htseq_counts.pl folder file_prefix
+
+###########################################
+## please define your variables           #
+###########################################
+
+saveat <- "/mnt/NAS_coruscant_datashare/haasf/madland_RNA-seq_Schippers/second_experiment/At_all_DEGs_0.9"
+
+file <- '/mnt/NAS_coruscant_datashare/haasf/madland_RNA-seq_Schippers/second_experiment/At_all.p.sort.wo_ncRNA_miRNA.counts'
+# file <- '/mnt/Teamdrive/projects/MAdLand/cooperations/Schippers_Lab_RNA-seq_analyses/31315_all.p.sort.counts'
+
+# Table with the length and the GC content
+LenGCTable <- "/mnt/NAS_coruscant_datashare/haasf/madland_RNA-seq_Schippers/second_experiment/Arabidopsis_thaliana.TAIR10.cdna.all.GC.length"
+# LenGCTable <- "/mnt/NAS_coruscant_datashare/haasf/GeneAtlas_RNASeq/DEG_2nd_round/cosmoss_V3.3.release_2016-07-22.IsoformV31.fasta.length_GC_Contig_Daten_for_34840_1nd.csv"
+
+# Column position of the gene name and the width/gc content (first column is gene name!)
+myLenPos <- 1
+myGCPos <- 2
+
+# 
+# hoo <- c("","")
+# sampleLists <- c()
+# for(a in hoo) {
+#   for(b in hoo) {
+#     if(a!=b) {
+#       tmp <- c(a,b)
+#       sampleLists <- c(sampleLists, list(tmp))
+#     }
+#   }
+# }
+
+sampleLists <- list(c("LCS_24h", "control_24h"),
+                    c("LCS_24h", "control_6h"),
+                    c("LCS_24h", "LCS_6h"),
+                    c("control_24h", "LCS_6h"),
+                    c("control_24h", "control_6h"),
+                    c("LCS_6h", "control_6h")
+)
+
+# Do you want a RPKM value table (1/0)
+# Be warned, it takes a while to do this. approx. 10 minutes for 35000 genes and 6 samples.
+rpkm <- 0
+# Are the data original HTSeq files? yes <- 1 ; no <- 0
+htseq <- 0
+
+# threshold settings for the R packages
+#edgeR
+pvtedgeR <- 0.001
+#DESeq2
+pvtDESeq2 <- 0.001
+#NOISeq
+pvtNOISeq <- 0.9
+
+# Output folder names
+edgeRDir <- "edgeR"
+DESeq2Dir <- "DESeq2"
+NOISeqDir <- "NOISeq"
+
+#########################
+## stop editing here!! ##
+#########################
+
+
+###########################################
+## executiv function                      #
+###########################################
+
+workflow <- function(file, saveat, SampleNames, LenGCTable, myLenPos, myGCPos, rpkm, pvtedgeR, pvtDESeq2, pvtNOISeq, edgeRDir, DESeq2Dir, NOISeqDir, htseq)
+{
+    # setwd(workingFolder) # set workspace
+
+            sampleNameI <- SampleNames[1]
+            sampleNameII <- SampleNames[2]
+
+            # Comparison name
+            cName <- paste(sampleNameI, sampleNameII, sep = "_vs_")
+
+            filename <- strsplit(file, "/")
+            filename <- filename[[1]][length(filename[[1]])] # long but it's working
+            sampleName <- gsub(", ","_",toString(SampleNames))
+            samples <- SampleNames
+            # create output dir
+            dir.create(saveat, showWarnings = FALSE)
+            outdir <- paste0(saveat,"/",sampleName)
+            dir.create(outdir)
+
+            # Start the log file
+            sink(paste0(outdir, "/DEGpipe_",cName, ".log"))
+
+            # print parameter into the log file
+            print("set parameters:")
+            print(paste0("Counts file:", file))
+            print(paste0("Results stored at:", saveat))
+            print(paste0("SampleNames: ",SampleNames))
+            print(paste0("LenGCTable: ",LenGCTable))
+            print(paste0("myLenPos: ",myLenPos))
+            print(paste0("myGCPos: ",myGCPos))
+            print(paste0("rpkm: ",rpkm))
+            print(paste0("htseq: ",htseq))
+            print(paste0("pvtedgeR: ",pvtedgeR))
+            print(paste0("pvtDESeq2: ",pvtDESeq2))
+            print(paste0("pvtNOISeq: ",pvtNOISeq))
+            print(paste0("edgeRDir: ",edgeRDir))
+            print(paste0("DESeq2Dir: ",DESeq2Dir))
+            print(paste0("NOISeqDir: ",NOISeqDir))
+            # session and package infos for the log file
+            print(sessionInfo())
+            print(Sys.info())
+
+            # load spike-in count tables
+            data0 <- read.table(file, header=T, sep="\t", row.names=1)
+            # just if the gene ids not ending with .1 (it is nessessery for NOISeqbio iteration step)
+            #row.names(data0) <- paste0(row.names(data0), ".1")
+
+            # remove HTSeq rows like "__no_feature"
+            data0 <- data0[ with(data0,  !grepl('__', rownames(data0))) , ]
+            data0 <- data0[ with(data0,  !grepl('ERCC', rownames(data0))) , ]
+            #data0 <- data0[ , with(data0,  !grepl('X.1', colnames(data0)))]
+            #rename col names (library name to roman number)
+	    
+            # librariesName <- c("M54.fq.M54.fq.single.p.count", "M53.fq.M53.fq.single.p.count", "M52.fq.M52.fq.single.p.count", "M48.fq.M48.fq.single.p.count", "M47.fq.M47.fq.single.p.count", "M46.fq.M46.fq.single.p.count", "M40.fq.M40.fq.single.p.count", "M39.fq.M39.fq.single.p.count", "M38.fq.M38.fq.single.p.count", "M34.fq.M34.fq.single.p.count", "M33.fq.M33.fq.single.p.count", "M32.fq.M32.fq.single.p.count")
+            # romanNumber <- c("LCS_24h.2", "LCS_24h.1", "LCS_24h", "control_24h.2", "control_24h.1", "control_24h", "LCS_6h.2", "LCS_6h.1", "LCS_6h", "control_6h.2", "control_6h.1", "control_6h")
+            librariesName <- c("A6.fq.A6.fq.all.p.count", "A5.fq.A5.fq.all.p.count", "A4.fq.A4.fq.all.p.count", "A26.fq.A26.fq.all.p.count", "A25.fq.A25.fq.all.p.count", "A24.fq.A24.fq.all.p.count", "A20.fq.A20.fq.all.p.count", "A19.fq.A19.fq.all.p.count", "A18.fq.A18.fq.all.p.count", "A12.fq.A12.fq.all.p.count", "A11.fq.A11.fq.all.p.count", "A10.fq.A10.fq.all.p.count")
+            romanNumber <- c("control_6h.2", "control_6h.1", "control_6h", "LCS_24h.2", "LCS_24h.1", "LCS_24h", "control_24h.2", "control_24h.1", "control_24h", "LCS_6h.2", "LCS_6h.1", "LCS_6h")
+            
+            
+            for(i in 1:length(librariesName)) {
+                colnames(data0)[colnames(data0)==grep(librariesName[i], names(data0), value = TRUE)] <- romanNumber[i]
+            }
+
+            # creats a list of the column names/samples you would like to compare
+            SampleNames <- makeSampleList(sampleNameI, length(grep(paste0("^",sampleNameI,"[^A-Z]"), colnames(data0))) + 1,
+                                          sampleNameII, length(grep(paste0("^",sampleNameII,"[^A-Z]"), colnames(data0))) + 1)
+            print(SampleNames)
+
+            ## mal anschauen als ersatz für das darüber ##
+            ## newenw <- table.new[, grepl(paste(LIST,collapse="|"), colnames(table.new))]
+
+
+            # exctract the samples from the data0 if needed
+            if(length(SampleNames) > 0) {
+                RTableInFiles <- data0[,SampleNames]
+                data <- data0[,SampleNames]
+            }
+            print(head(RTableInFiles))
+
+            # sort by column name
+            # data <- data[ , order(names(data))] # dangeros, the order of query and referenz might be not the same
+            # see how many rows and columns are in the tables
+            print(paste("Raw data row number: ",nrow(data)))
+            print(paste("Raw data column number: ",ncol(data)))
+
+            # 2) A typical differential expression analysis workflow
+            # 2.1) Filtering and exploratory data analysis
+            # filter: One single gene/spike-in must have at least 5 counts in minimum 2 libraries
+            filter <- apply(data, 1, function(x) length(x[x>5])>=2)
+            dataFiltered <- data[filter,]
+            # see how many rows and columns are left
+            print(paste("Filtered data row number: ",nrow(dataFiltered)))
+            print(paste("Filtered data row number: ",ncol(dataFiltered)))
+            # filter gene names
+            genes <- rownames(dataFiltered)[grep("^Pp3", rownames(dataFiltered))]
+            # filter spike-in names
+            spikes <- rownames(dataFiltered)[grep("^ERCC", rownames(dataFiltered))]
+            # get the column name list as.factors
+            xtmp <- colnames(dataFiltered)
+            x <- split2list(xtmp)
+
+            sampleNameList <- gsub(".[1234567890]+$", "", SampleNames)
+            columnPos <- makeIndexList(SampleNames)
+
+            # run ...
+            ### edgeR ###
+            print(paste0("edgeR is running with ", cName))
+            packageDescription("edgeR")
+            edgeRFunction(outdir, edgeRDir, cName, RTableInFiles, pvtedgeR, sampleNameI, sampleNameII, sampleNameList)
+
+            #### DESeq2 ####
+            print(paste0("DESeq2 is running with ", cName))
+            packageDescription("DESeq2")
+            DESeq2Function(outdir, DESeq2Dir, cName, RTableInFiles, pvtDESeq2, sampleNameI, sampleNameII, sampleNameList)
+
+            ### NOISeq ###
+            print(paste0("NOISeq is running with ", cName))
+            packageDescription("NOISeq")
+           NOISeqFunction(outdir, NOISeqDir, cName, RTableInFiles, pvtNOISeq, sampleNameI, sampleNameII, myLenPos, myGCPos, LenGCTable, columnPos, sampleNameList, SampleNames)
+
+            ### Overlap of all three methods ###
+            # intersection with DEG direction check => 1, without => 0
+            intersection_4.file <- intersection(outdir,1,4)
+            write.table(intersection_4.file, file = paste0(outdir, "/", cName,"_intersection_four_methods_",nrow(intersection_4.file),".tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+            # edgeR, DESeq2, NOISeq
+            intersection_3.file <- intersection(outdir,1,3)
+            write.table(intersection_3.file, file = paste0(outdir, "/", cName,"_intersection_three_methods_",nrow(intersection_3.file),".tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+            
+            sink() # End of log file
+
+}
+
+
+
+
+
+####################
+## main functions  #
+####################
+
+plotGraphics <- function(data, librariesName, cName, outdir, romanNumber)
+{
+    library("DESeq2")
+    library("ggplot2")
+    library("RColorBrewer")
+    library("pheatmap")
+    library("genefilter")
+
+    # create sample annotation lists
+    # sample.name.list <- gsub(".[12]", "", librariesName[colnames(data)])
+    sample.name.list <- gsub(".[12]", "", romanNumber)
+    sample.name.uniq <- unique(unlist(sample.name.list, use.names = FALSE))
+
+    col.data.condition = data.frame(row.names = librariesName, condition = sample.name.list)
+
+    # run DESeq2 functions
+    dds.matrix <- DESeqDataSetFromMatrix(countData = data, colData = col.data.condition, design = ~condition) # data must be integer! (Ganze Zahlen)
+    colData(dds.matrix)$condition <- factor(colData(dds.matrix)$condition, levels = sample.name.uniq)
+    dds.matrix.deseq <- DESeq(dds.matrix)
+    dds.matrix.res <- results(dds.matrix.deseq)
+    dds.matrix.res <- dds.matrix.res[order(dds.matrix.res$padj), ]
+    dds.matrix.res.fdr <- dds.matrix.res[!is.na(dds.matrix.res$padj), ]
+    dds.matrix.res.fdr <- dds.matrix.res.fdr[dds.matrix.res.fdr$padj < 0.0001, ]
+    dds.matrix.rld <- rlogTransformation(dds.matrix, blind = TRUE)
+
+    # data for PCA plot
+    pcaData <- plotPCA(dds.matrix.rld, intgroup = "condition", returnData=TRUE)
+    percentVar <- round(100 * attr(pcaData, "percentVar"))
+    # color for samples
+    sample.colors <- c("gray60","#D73027","#4575B4", "purple", "orange","#113FF7","green", "black")
+
+    # plot the PCA
+    # https://cran.r-project.org/web/packages/ggfortify/vignettes/plot_pca.html
+    png(filename=paste0(outdir,"PCA_all_samples_DEseq2_normalized.png"))
+      ggplot(
+          pcaData,
+          aes(
+              PC1,
+              PC2,
+              color=condition,
+              shape=condition)
+          ) +
+          scale_shape_manual(values = c(4, 1, 2, 3, 0, 5, 6, 7)) +
+          geom_point(size=2.5) +
+          xlab(
+              paste0("PC1: ",percentVar[1],"% variance")
+              ) +
+          ylab(
+              paste0("PC2: ",percentVar[2],"% variance")
+              ) +
+          theme() + #, face="bold"
+          scale_colour_manual(
+              values=sample.colors  # dodgerblue3
+              ) +
+          ggtitle("PCA of all samples (DESeq2 normalized)") +
+          theme(
+              plot.title = element_text(lineheight=.8),
+              panel.background = element_rect(fill = "gray95")
+          )
+    dev.off()
+
+    # how many genes will be involved in the heatmap calculation
+    numbGenes <- 100
+    # pheatmap data
+    topVarGenes <- head(order(rowVars(assay(dds.matrix.rld)),decreasing=TRUE),numbGenes)
+    mat <- assay(dds.matrix.rld)[ topVarGenes, ]
+    mat <- mat - rowMeans(mat)
+    # pheatmap data annotation
+    anno <-as.data.frame(colData(dds.matrix.rld)[,c("condition")])
+    rownames(anno) <- colnames(data) # check out the row names of annotation
+    colnames(anno) <- "condition"
+    # pheatmap data annotation color
+    condition <- sample.colors
+    names(condition) <- sample.name.uniq
+    anno_colors <- list(condition = condition)
+
+    #heatmap(mat)
+    #pheatmap(mat)
+    # ftp://cran.r-project.org/pub/R/web/packages/pheatmap/pheatmap.pdf
+    png(filename=paste0(outdir,"HC_heatmap_all_samples_DESeq2_normalized.png"))
+      pheatmap(mat ,
+               annotation = anno,
+               annotation_colors = anno_colors,
+               fontsize = 10,
+               fontsize_row = 6,
+               fontsize_col = 10,
+               main = paste0("Heatmap of all samples (DESeq2 normalized) ", numbGenes," genes\n"),
+               annotation_names_col = F,
+               show_rownames = F,
+               cluster_rows = F,
+               border_color = F,
+               treeheight_col = 20
+      )
+    dev.off()
+}
+
+
+edgeRFunction <- function(odir, edgeRDir, cName, RTableInFiles, pvtedgeR, sampleNameI, sampleNameII, sampleNameList)
+{
+    library(edgeR)
+
+    outdir <- paste0(odir, "/",edgeRDir,"/") # set outdir
+
+    dir.create(outdir, showWarnings = FALSE) # create output folder
+    dir.create(paste0(outdir,"plots"), showWarnings = FALSE) # create subfolder "plots"
+
+    comparison <- cName
+    counts <- RTableInFiles
+
+    pvt <- pvtedgeR
+
+    group <- factor(sampleNameList)
+
+    # edgeR 'normal'
+
+    y.normal <- DGEList(counts = counts, group = group)
+
+    y.normal[,1]
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotMDS")))
+        plotMDS(y.normal, method = "bcv", main = paste0(cName, "\nedgeR - plotMDS"))
+    dev.off()
+
+    y.normal <- calcNormFactors(y.normal)
+    y.normal <- estimateCommonDisp(y.normal)
+    y.normal <- estimateTagwiseDisp(y.normal)
+
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotBCV")))
+        plotBCV(y.normal, main = paste0(cName, "\nedgeR - plotBCV"))
+    dev.off()
+
+    et.normal <- exactTest(y.normal, pair = c(sampleNameI, sampleNameII))
+
+    topTags(et.normal)
+    # adjust.method   ==>  character string specifying p-value adjustment method. Possible values are "none", "BH", "fdr" (equivalent to "BH"), "BY" and "holm". See p.adjust for details.
+    summary(de.normal <- decideTestsDGE(et.normal, p = pvt, adjust = "BH"))
+    et.normal.fdr <- cbind(et.normal$table, p.adjust(et.normal$table[, 3], method = "BH"))
+    et.normal.fdr <- et.normal.fdr[et.normal.fdr[, 4] < pvt, ]
+
+    write.table(et.normal.fdr, file = paste0(outdir, cName, "_", nrow(et.normal.fdr), "_edgeR_fdr.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    detags.normal <- rownames(de.normal)[as.logical(de.normal)]
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotSmear")))
+    plotSmear(et.normal, de.normal, tags = detags.normal, main = paste0(cName, "\nedgeR - plotSmear"))
+    abline(h = c(-2, 2), col = "blue")
+    dev.off()
+
+    # edgeR 'GLM'
+
+    y.glm <- DGEList(counts = counts, group = group)
+
+    design <- model.matrix(~0 + group, data = y.glm$samples)
+    design
+
+    y.glm <- estimateGLMCommonDisp(y.glm, design)
+    y.glm <- estimateGLMTrendedDisp(y.glm, design)
+
+    ## Loading required package: splines
+    y.glm <- estimateGLMTagwiseDisp(y.glm, design)
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotBCV - GLM")))
+    plotBCV(y.glm, main = paste0(cName, "\nedgeR - plotBCV - GLM"))
+    dev.off()
+
+    y.fit <- glmFit(y.glm, design)
+    y.lrt <- glmLRT(y.fit)
+
+    FDR <- p.adjust(y.lrt$table$PValue, method = "BH")
+    sum(FDR < pvt)
+
+    topTags(y.lrt)
+
+    summary(de.glm <- decideTestsDGE(y.lrt, p = pvt, adjust = "BH"))
+
+}
+
+
+DESeq2Function <- function(odir, DESeq2Dir, cName, RTableInFiles, pvtDESeq2, sampleNameI, sampleNameII, sampleNameList)
+{
+    library(DESeq2)
+    library(RColorBrewer)
+    library(gplots)
+    library(vsn)
+
+    outdir <- paste0(odir, "/",DESeq2Dir,"/") # set outdir
+
+    dir.create(outdir, showWarnings = FALSE)
+    dir.create(paste0(outdir,"plots"), showWarnings = FALSE) # create subfolder "plots"
+
+
+    pvt <- pvtDESeq2
+
+
+
+    #   DESeqDataSet(se, design, ignoreRank = FALSE)
+    #   DESeqDataSetFromMatrix(countData, colData, design, tidy = FALSE, ignoreRank = FALSE, ...)
+    #   DESeqDataSetFromHTSeqCount(sampleTable, directory = ".", design, ignoreRank = FALSE, ...)
+    #   DESeqDataSetFromTximport(txi, colData, design, ...)
+    coldata = data.frame(row.names = colnames(RTableInFiles), condition = sampleNameList)
+
+    ddsHTSeq <- DESeqDataSetFromMatrix(countData = RTableInFiles, colData = coldata, design = ~ condition)
+
+    colData(ddsHTSeq)$condition <- factor(colData(ddsHTSeq)$condition, levels = c(sampleNameI, sampleNameII))
+
+    ddsHTSeq <- DESeq(ddsHTSeq)
+
+    ddsHTSeq.res <- results(ddsHTSeq)
+    ddsHTSeq.res <- ddsHTSeq.res[order(ddsHTSeq.res$padj), ]
+    head(ddsHTSeq.res)
+
+    # png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - plotMA")))
+    #     plotMA(ddsHTSeq, main = paste0(cName, "\nDESeq2 - plotMA"), ylim = c(-2, 2))
+    # dev.off()
+
+    ddsHTSeq.res.fdr <- ddsHTSeq.res[!is.na(ddsHTSeq.res$padj), ]
+    ddsHTSeq.res.fdr <- ddsHTSeq.res.fdr[ddsHTSeq.res.fdr$padj < pvt, ]
+
+    write.table(as.matrix(ddsHTSeq.res.fdr), file = paste0(outdir, cName, "_", nrow(as.matrix(ddsHTSeq.res.fdr)), "_DESeq2_fdr.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    ddsHTSeq.rld <- rlogTransformation(ddsHTSeq, blind = TRUE)
+    ddsHTSeq.vsd <- varianceStabilizingTransformation(ddsHTSeq, blind = TRUE)
+
+    par(mfrow = c(1, 3))
+    notAllZero <- (rowSums(counts(ddsHTSeq)) > 0)
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - SdPlot.1")))
+        meanSdPlot(log2(counts(ddsHTSeq, normalized = TRUE)[notAllZero, ] + 1)) # , ylim = c(0, 2.5)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - SdPlot.2"))) # , ylim = c(0, 2.5)
+        meanSdPlot(assay(ddsHTSeq.rld[notAllZero, ]))
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - SdPlot.3"))) # , ylim = c(0, 2.5)
+        meanSdPlot(assay(ddsHTSeq.vsd[notAllZero, ]))
+    dev.off()
+
+    hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
+    distRL <- dist(t(assay(ddsHTSeq.rld)))
+    mat <- as.matrix(distRL)
+    rownames(mat) <- colnames(mat) <- with(colData(ddsHTSeq), paste0(condition))
+
+    # png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - heatmap")))
+    #     heatmap(mat, trace = "none", col = rev(hmcol), margin = c(13, 13))
+    # dev.off()
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - PCA")))
+        print(plotPCA(ddsHTSeq.rld, intgroup = "condition"))
+    dev.off()
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - DispEsts")))
+        plotDispEsts(ddsHTSeq, main = paste0(cName, "\nDESeq2 - plotDispEsts"))
+    dev.off()
+}
+
+NOISeqFunction <- function(odir, NOISeqDir, cName, RTableInFiles, pvtNOISeq, sampleNameI, sampleNameII, myLenPos, myGCPos, LenGCTable, columnPos, sampleNameList, SampleNames)
+{
+    library(NOISeq)
+
+    outdir <- paste0(odir, "/",NOISeqDir,"/") # set outdir
+
+    dir.create(outdir, showWarnings = FALSE)
+    dir.create(paste0(outdir,"plots"), showWarnings = FALSE) # create subfolder "plots"
+
+    pvt <- pvtNOISeq
+
+    counts <- RTableInFiles[, columnPos]
+    head(counts)
+
+    # must be changed if there are no triplicates
+    # !! Please change this setting if needed !!
+    myfactors <- data.frame(Treatment = sampleNameList, TreatmentRun = SampleNames)
+    # sort counts by row names
+    mycounts <- counts[order(rownames(counts)), ]
+    # filter out low counts
+    mycounts.filter <- filtered.data(mycounts, factor = myfactors$Treatment, norm = FALSE, depth = NULL, method = 1, cv.cutoff = 100, cpm = 1)
+    mycounts.rowname <- rownames(mycounts.filter)
+
+    # Extra file with length and GC content
+    myLengthTable <- read.table(LenGCTable, sep = "\t", header = TRUE, stringsAsFactor = FALSE, row.names=1)
+    myLengthTable <- myLengthTable[order(rownames(myLengthTable)),] # sort by rownames
+    myLengthTable.filter <- myLengthTable[mycounts.rowname,]
+
+    mylength <- myLengthTable.filter[,myLenPos]
+    names(mylength) <- rownames(myLengthTable.filter)
+
+    mygc <- myLengthTable.filter[,myGCPos]
+    names(mygc) <- rownames(myLengthTable.filter)
+
+
+
+
+
+    mydata <- readData(data = mycounts.filter, length = mylength, gc = mygc, factors = myfactors)
+    mydata
+
+    mydata.GCbias <- dat(mydata, type = "GCbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - GCbias.1")))
+#-#        explo.plot(mydata.GCbias)
+#-#    dev.off()
+
+    mydata.GCbias.group <- dat(mydata, factor = "Treatment", type = "GCbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - GCbias.2")))
+#-#      explo.plot(mydata.GCbias.group)
+#-#    dev.off()
+
+    show(mydata.GCbias.group)
+
+    mydata.lengthbias <- dat(mydata, type = "lengthbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - Lenbias.1")))
+#-#        explo.plot(mydata.lengthbias)
+#-#    dev.off()
+
+    mydata.lengthbias.group <- dat(mydata, factor = "Treatment", type = "lengthbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - Lenbias.2")))
+#-#        explo.plot(mydata.lengthbias.group)
+#-#    dev.off()
+
+    show(mydata.lengthbias.group)
+
+    # looks different
+    mydata.cd <- dat(mydata, type = "cd")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - cd")))
+#-#        explo.plot(mydata.cd)
+#-#    dev.off()
+    mylength
+
+    myRPKM <- NOISeq::rpkm(assayData(mydata)$exprs)
+    myUQUA <- NOISeq::uqua(assayData(mydata)$exprs, long = mylength, lc = 0.5, k = 0)
+    myTMM <- NOISeq::tmm(assayData(mydata)$exprs, long = 1000, lc = 0)
+
+    mynoiseq.rpkm <- noiseq(mydata, k = 0.5, norm = "rpkm", factor = "Treatment", pnr = 0.2, nss = 5, v = 0.02, lc = 1, replicates = "biological")
+    mynoiseq.nnorm <- noiseq(mydata, k = 0.5, norm = "n", factor = "Treatment", pnr = 0.2, nss = 5, v = 0.02, lc = 1, replicates = "biological")
+    mynoiseq.tmm <- noiseq(mydata, k = 0.5, norm = "tmm", factor = "Treatment", pnr = 0.2, nss = 5, v = 0.02, lc = 1, replicates = "biological")
+
+    # RPKM
+    mynoiseq.rpkm.prob <- mynoiseq.rpkm@results[[1]][mynoiseq.rpkm@results[[1]]$prob > pvt, ]
+    mynoiseq.rpkm.prob <- mynoiseq.rpkm.prob[!is.na(mynoiseq.rpkm.prob$prob), ]
+    write.table(mynoiseq.rpkm.prob, file = paste0(outdir, cName, "_", nrow(mynoiseq.rpkm.prob), "_NOISeq_rpkm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    # none
+    mynoiseq.nnorm.prob <- mynoiseq.nnorm@results[[1]][mynoiseq.nnorm@results[[1]]$prob > pvt, ]
+    mynoiseq.nnorm.prob <- mynoiseq.nnorm.prob[!is.na(mynoiseq.nnorm.prob$prob), ]
+    write.table(mynoiseq.nnorm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseq.nnorm.prob), "_NOISeq_nnorm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    # TMM
+    mynoiseq.tmm.prob <- mynoiseq.tmm@results[[1]][mynoiseq.tmm@results[[1]]$prob > pvt, ]
+    mynoiseq.tmm.prob <- mynoiseq.tmm.prob[!is.na(mynoiseq.tmm.prob$prob), ]
+    write.table(mynoiseq.tmm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseq.tmm.prob), "_NOISeq_tmm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    ############ NOISeq bio ################
+
+    pvtnsb <- 0.995
+    a0per_value <- 0.99
+
+    iterations <- 5
+    x5 <- c(sample(10000:99999, iterations, replace=F), 12345)
+    x5
+
+    collector.rpkm <- NULL
+    collector.tmm <- NULL
+
+
+    # for (iter in x5) {
+    #   # RPKM
+    #   mynoiseqbio.rpkm <- noiseqbio(mydata, k = 0.5, norm = "rpkm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = iter, filter = 1)#, plot = TRUE)
+    #   mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm@results[[1]][mynoiseqbio.rpkm@results[[1]]$prob > pvtnsb, ]
+    #   mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm.prob[!is.na(mynoiseqbio.rpkm.prob$prob), ]
+    #   collector.rpkm <- rbind(collector.rpkm, mynoiseqbio.rpkm.prob)
+    #   #collector <- rbind(collector, mynoiseqbio.rpkm.prob[rownames(mynoiseqbio.rpkm.prob) %in% rownames(collector),])
+    #   # TMM
+    #   mynoiseqbio.tmm <- noiseqbio(mydata, k = 0.5, norm = "tmm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = iter, filter = 1)#, plot = TRUE)
+    #   mynoiseqbio.tmm.prob <- mynoiseqbio.tmm@results[[1]][mynoiseqbio.tmm@results[[1]]$prob > pvtnsb, ]
+    #   mynoiseqbio.tmm.prob <- mynoiseqbio.tmm.prob[!is.na(mynoiseqbio.tmm.prob$prob), ]
+    #   collector.tmm <- rbind(collector.tmm, mynoiseqbio.tmm.prob)
+    # }
+    #
+    # # RPKM
+    # print(sort(row.names(collector.rpkm)))
+    # library(stringr)
+    # mynoiseqbio.rpkm.prob.overlap <- collector.rpkm[str_split_fixed(row.names(collector.rpkm), "\\.",2)[,2]>=iterations+10,]
+    # row.names(mynoiseqbio.rpkm.prob.overlap) <- substr(row.names(mynoiseqbio.rpkm.prob.overlap),1,str_length(row.names(mynoiseqbio.rpkm.prob.overlap))-1)
+    #
+    # write.table(mynoiseqbio.rpkm.prob.overlap, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.rpkm.prob.overlap),"_NOISeqbio_rpkm_prob_seed_iteration_overlap.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    #
+    # #TMM
+    # print(sort(row.names(collector.tmm)))
+    # library(stringr)
+    # mynoiseqbio.tmm.prob.overlap <- collector.tmm[str_split_fixed(row.names(collector.tmm), "\\.",2)[,2]>=iterations+10,]
+    # row.names(mynoiseqbio.tmm.prob.overlap) <- substr(row.names(mynoiseqbio.tmm.prob.overlap),1,str_length(row.names(mynoiseqbio.tmm.prob.overlap))-1)
+    #
+    # write.table(mynoiseqbio.tmm.prob.overlap, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.tmm.prob.overlap),"_NOISeqbio_tmm_prob_seed_iteration_overlap.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    # # none
+    # mynoiseqbio.nnorm.prob <- mynoiseqbio.nnorm@results[[1]][mynoiseqbio.nnorm@results[[1]]$prob > pvtnsb, ]
+    # mynoiseqbio.nnorm.prob <- mynoiseqbio.nnorm.prob[!is.na(mynoiseqbio.nnorm.prob$prob), ]
+    # write.table(mynoiseqbio.nnorm.prob, file = paste0(outdir, cName, "_NOISeqbio_nnorm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    # # TMM
+    # mynoiseqbio.tmm.prob <- mynoiseqbio.tmm@results[[1]][mynoiseqbio.tmm@results[[1]]$prob > pvtnsb, ]
+    # mynoiseqbio.tmm.prob <- mynoiseqbio.tmm.prob[!is.na(mynoiseqbio.tmm.prob$prob), ]
+    # write.table(mynoiseqbio.tmm.prob, file = paste0(outdir, cName, "_NOISeqbio_tmm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+
+
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - noiseqbiorpkm")))
+        mynoiseqbio.rpkm <- noiseqbio(mydata, k = 0.5, norm = "rpkm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = 12345, filter = 1, plot = TRUE)
+    dev.off()
+
+    mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm@results[[1]][mynoiseqbio.rpkm@results[[1]]$prob > pvtnsb, ]
+    mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm.prob[!is.na(mynoiseqbio.rpkm.prob$prob), ]
+    write.table(mynoiseqbio.rpkm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.rpkm.prob),"_NOISeqbio_rpkm_prob_seed_12345.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - noiseqbiontmm")))
+        mynoiseqbio.tmm <- noiseqbio(mydata, k = 0.5, norm = "tmm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = 12345, filter = 1, plot = TRUE)
+    dev.off()
+
+    mynoiseqbio.tmm.prob <- mynoiseqbio.tmm@results[[1]][mynoiseqbio.tmm@results[[1]]$prob > pvtnsb, ]
+    mynoiseqbio.tmm.prob <- mynoiseqbio.tmm.prob[!is.na(mynoiseqbio.tmm.prob$prob), ]
+    write.table(mynoiseqbio.tmm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.tmm.prob),"_NOISeqbio_tmm_prob_seed_12345.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - noiseqbionnorm")))
+        mynoiseqbio.nnorm <- noiseqbio(mydata, k = 0.5, norm = "n", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = 12345, filter = 1, plot = TRUE)
+    dev.off()
+
+    head(mynoiseqbio.rpkm@results[[1]])
+    head(mynoiseqbio.nnorm@results[[1]])
+
+    mynoiseqbio.rpkm.deg <- degenes(mynoiseqbio.rpkm, q = pvtnsb, M = NULL)
+
+    mynoiseqbio.rpkm.deg.up <- degenes(mynoiseqbio.rpkm, q = pvtnsb, M = "up")
+    mynoiseqbio.rpkm.deg.down <- degenes(mynoiseqbio.rpkm, q = pvtnsb, M = "down")
+    mynoiseqbio.nnorm.deg <- degenes(mynoiseqbio.nnorm, q = pvtnsb, M = NULL)
+    mynoiseqbio.nnorm.deg.up <- degenes(mynoiseqbio.nnorm, q = pvtnsb, M = "up")
+    mynoiseqbio.nnorm.deg.down <- degenes(mynoiseqbio.nnorm, q = pvtnsb, M = "down")
+
+    par(mfrow = c(3, 2))
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.rpkm_0.9")))
+        DE.plot(mynoiseq.rpkm, q = 0.9, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.rpkm_0.8")))
+        DE.plot(mynoiseq.rpkm, q = 0.8, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.nnorn_0.9")))
+        DE.plot(mynoiseq.nnorm, q = 0.9, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.nnorn_0.8")))
+        DE.plot(mynoiseq.nnorm, q = 0.8, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.tmm_0.9")))
+        DE.plot(mynoiseq.tmm, q = 0.9, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.tmm_0.8")))
+        DE.plot(mynoiseq.tmm, q = 0.8, graphic = "expr", log.scale = TRUE)
+    dev.off()
+
+    par(mfrow = c(2, 2))
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.rpkm_",pvt)))
+        DE.plot(mynoiseqbio.rpkm, q = pvt, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.rpkm_",pvtnsb)))
+        DE.plot(mynoiseqbio.rpkm, q = pvtnsb, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.nnorm_",pvt)))
+        DE.plot(mynoiseqbio.nnorm, q = pvt, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.nnorm_",pvtnsb)))
+        DE.plot(mynoiseqbio.nnorm, q = pvtnsb, graphic = "expr", log.scale = TRUE)
+    dev.off()
+
+    par(mfrow = c(1, 2))
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.rpkm_0.9_MD")))
+        DE.plot(mynoiseq.rpkm, q = 0.9, graphic = "MD", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.nnorm_0.9_MD")))
+        DE.plot(mynoiseq.nnorm, q = 0.9, graphic = "MD", log.scale = TRUE)
+    dev.off()
+}
+
+###########################################
+## additional function for more usability #
+###########################################
+
+calcRPKM <- function(c,n,l)
+{
+    RPKM <- (10^9* as.numeric(c)) / (as.numeric(n) * as.numeric(l))
+    return(RPKM)
+}
+
+RPKMMatrix <- function(InTable,directory,LenGCTable,myLenPos)
+{
+    myLengthTable <- read.table(paste0(directory,LenGCTable), sep = "\t", header = TRUE, stringsAsFactor = FALSE, row.names = 1)
+
+    newtable <- matrix(data = c(1), nrow = nrow(InTable), ncol(InTable), byrow= TRUE)
+    colnames(newtable) <- colnames(InTable)
+    rownames(newtable) <- rownames(InTable)
+
+    sumCol <- apply(InTable,2,sum) # list of the all col sums
+
+    for(s in colnames(InTable)) {
+        for(r in rownames(InTable)) {
+            w <- InTable[r, s]
+            len <- myLengthTable[r, myLenPos]
+            sum <- as.numeric(sumCol[s])
+
+            newtable[r, s] <- calcRPKM(w, sum, len)
+        }
+    }
+    return(newtable)
+}
+
+
+# rmHTSeqLines removes the last five rows
+rmHTSeqLines <- function(counts)
+{
+    counts <- head(counts, -5)
+    return(counts)
+}
+
+
+###############
+## functions ##
+###############
+
+# split list to a factor list
+split2list <- function(tmp)
+{
+    xx <- c()
+    for(i in 1:length(tmp)) {
+        if(grepl("\\.", tmp[i])) {
+            foo <- strsplit(tmp[i], '\\.')[[1]]
+            fooo <- strsplit(foo, " ")[[1]]
+            xx <- c(xx, fooo)
+        } else {
+            xx <- c(xx, tmp[i])
+        }
+    }
+    return(as.factor(xx))
+}
+
+# makeGroups
+# https://github.com/drisso/RUVSeq/blob/master/R/helper.R
+makeGroups <- function(xs)
+{
+    xs <- factor(xs)
+    groups <- matrix(-1, nrow = length(levels(xs)), ncol = max(table(xs)))
+    for (i in 1:length(levels(xs))) {
+        idxs <- which(xs == levels(xs)[i])
+        groups[i,1:length(idxs)] <- idxs
+    }
+    return(groups)
+}
+
+# The function makeSampleList creates a list of the columns(samples) you would like to compare
+makeSampleList <- function(A, Al, B, Bl)
+{
+    l <- c()
+    if(exists("Al", mode = "numeric")) {
+        l <- makeSampleList.helper(A, Al, l)
+        l <- makeSampleList.helper(B, Bl, l)
+    }
+    return(l)
+}
+makeSampleList.helper <- function(v,k,l)
+{
+    for(h in 1:k) {
+        if(h == 1) {
+            l <- c(l, v)
+        } else {
+            l <- c(l, paste0(v,".",h-1))
+        }
+    }
+    return(l)
+}
+
+# function creates a list of numbers out of another list. The List is the index of the given list.
+makeIndexList <- function(l)
+{
+    n <- 1
+    jj <- c()
+    for(v in l) {
+        jj <- c(jj, n)
+        n <- n + 1
+    }
+    return(jj)
+}
+
+intersection <- function(l,check,t)
+{
+  l.list <- list.files(path = l, pattern = "*(rpkm|edgeR|DESeq2).*tsv", recursive = TRUE)
+  if(grepl("bio", l.list[4],fixed = TRUE)) {
+    l.list.noiseq <- read.table(paste0(l,"/",l.list[3]), header=T, sep="\t", row.names=1)
+    l.list.noiseqbio <- read.table(paste0(l,"/",l.list[4]), header=T, sep="\t", row.names=1)
+  } else {
+    l.list.noiseq <- read.table(paste0(l,"/",l.list[4]), header=T, sep="\t", row.names=1)
+    l.list.noiseqbio <- read.table(paste0(l,"/",l.list[3]), header=T, sep="\t", row.names=1)
+  }
+  l.list.edgeR <- read.table(paste0(l,"/",l.list[2]), header=T, sep="\t", row.names=1)
+  l.list.deseq2 <- read.table(paste0(l,"/",l.list[1]), header=T, sep="\t", row.names=1)
+  
+  
+  print("DEGs edgeR")
+  print(nrow(l.list.edgeR))
+  print("DEGs deseq2")
+  print(nrow(l.list.deseq2))
+  print("DEGs noiseq")
+  print(nrow(l.list.noiseq))
+  print("DEGs noiseqbio")
+  print(nrow(l.list.noiseqbio))
+  
+  interect.list <- ""
+  interect.list.direct <- ""
+  
+  if(t==4) {
+    # List of overlaps without direction check
+    interect.list <- intersect(rownames(l.list.edgeR), intersect(rownames(l.list.noiseq), intersect(rownames(l.list.noiseqbio), rownames(l.list.deseq2))))
+    
+    print("DEGs intersection without direction check (four methods)")
+    print(length(interect.list))
+    print(interect.list)
+    print("DEGs intersection with direction check")
+    
+    # List of overlaps with direction check
+    if(length(interect.list)!=0) {
+      interect.list.direct <- interDirect(l.list.edgeR, l.list.deseq2, l.list.noiseq, l.list.noiseqbio, interect.list)
+      print(length(interect.list.direct))
+      print(interect.list.direct)
+    } else {
+      print("0")
+    }
+  }
+  if(t==3) {
+    # List of overlaps without direction check
+    interect.list <- intersect(rownames(l.list.edgeR), intersect(rownames(l.list.noiseq), rownames(l.list.deseq2)))
+    
+    print("DEGs intersection without direction check  (three methods)")
+    print(length(interect.list))
+    print(interect.list)
+    print("DEGs intersection with direction check")
+    
+    # List of overlaps with direction check
+    if(length(interect.list)!=0) {
+      # fake data frame with 0 rows => nrow == 0
+      df_fake <- data.frame(Date=as.Date(character()), 
+                       File=character(), 
+                       User=character(), 
+                       stringsAsFactors=FALSE) 
+      
+      interect.list.direct <- interDirect(l.list.edgeR, l.list.deseq2, l.list.noiseq, df_fake, interect.list)
+      print(length(interect.list.direct))
+      print(interect.list.direct)
+    } else {
+      print("0")
+    }
+  }
+  
+  
+  if(check == 1){
+    return(l.list.noiseq[interect.list.direct,])
+  } else {
+    return(l.list.noiseq[interect.list,])
+  }
+}
+
+interDirect <- function(e,d,n,nb,i)
+{
+  lil <- length(i)
+  
+  if(nrow(nb) == 0) { # use noiseq twice to simulate noiseqbio
+    nbx <- apply(n[i,], 2, function(x) {ifelse(x > 0, "-", "+")}) 
+    nby <- paste0(nbx[,"M"],rownames(nbx))
+  } else {
+    nbx <- apply(nb[i,], 2, function(x) {ifelse(x > 0, "-", "+")})
+    nby <- paste0(nbx[,"log2FC"],rownames(nbx))
+  }
+  
+  ex <- apply(e[i,], 2, function(x) {ifelse(x < 0, "-", "+")}) #-#-#-# hier < und >
+  dx <- apply(d[i,], 2, function(x) {ifelse(x < 0, "-", "+")}) #-#-#-# hier < und >
+  nx <- apply(n[i,], 2, function(x) {ifelse(x > 0, "-", "+")}) # twisted, because edgeR and DESeq2 are calculating vice versa   
+  
+  ey <- paste0(ex[,"logFC"],rownames(ex))
+  dy <- paste0(dx[,"log2FoldChange"],rownames(dx))
+  ny <- paste0(nx[,"M"],rownames(nx))
+  
+  interect.list.direct <- intersect(ey, intersect(dy, intersect(ny, nby)))
+  
+  if(lil>0 && length(interect.list.direct)<=0) {
+    ex <- apply(e[i,], 2, function(x) {ifelse(x > 0, "-", "+")}) #-#-#-# hier < und >
+    dx <- apply(d[i,], 2, function(x) {ifelse(x > 0, "-", "+")}) #-#-#-# hier < und >
+    
+    ey <- paste0(ex[,"logFC"],rownames(ex))
+    dy <- paste0(dx[,"log2FoldChange"],rownames(dx))
+    
+    interect.list.direct <- intersect(ey, intersect(dy, intersect(ny, nby)))
+  }
+  
+  return(substr(interect.list.direct,2,100))
+}
+
+###########################################
+## executiv part of the script            #
+###########################################
+for(SampleNames in sampleLists) {
+   workflow(file, saveat, SampleNames, LenGCTable, myLenPos, myGCPos, rpkm, pvtedgeR, pvtDESeq2, pvtNOISeq, edgeRDir, DESeq2Dir, NOISeqDir, htseq)
+}
+print("Thank you for using the DEG pipeline :) \n")
+
+# fin
+
+### compare DEGs analysis ###
+# used venny and Excel
+
diff --git a/workflows/Experiment2/new_deg_pipe_four_methods_noiseqbio_schippers_second_mp_20211209.r b/workflows/Experiment2/new_deg_pipe_four_methods_noiseqbio_schippers_second_mp_20211209.r
new file mode 100644
index 0000000000000000000000000000000000000000..086ba3c03bb228d5f7d1f33bdd46190f4e49ff74
--- /dev/null
+++ b/workflows/Experiment2/new_deg_pipe_four_methods_noiseqbio_schippers_second_mp_20211209.r
@@ -0,0 +1,922 @@
+###############################################################
+## DEG pipeline with edgeR, DESeq2 and NOISeq                 #
+## Modified from the template of Dr. Kristian Ullrich 2014    #
+## @fabianhaas, AG Rensing, Marburg, Jan 2016                 #
+###############################################################
+# The pipeline expects sample triplicates.
+#   If you don't have three samples of one condition, use face files with zero counts.
+# RPKM calculation is optional.
+# Additional program to sum up the tsv files: /mnt/nas_temp/home/haasf/Program/sumup_htseq_counts.pl folder file_prefix
+
+###########################################
+## please define your variables           #
+###########################################
+
+saveat <- "/mnt/NAS_coruscant_datashare/haasf/madland_RNA-seq_Schippers/second_experiment/Mp_rmdup_DEGs_0.9"
+
+file <- '/mnt/NAS_coruscant_datashare/haasf/madland_RNA-seq_Schippers/second_experiment/Mp.p.sort.wotRNA.counts'
+# file <- '/mnt/Teamdrive/projects/MAdLand/cooperations/Schippers_Lab_RNA-seq_analyses/31315_all.p.sort.counts'
+
+# Table with the length and the GC content
+LenGCTable <- "/mnt/Teamdrive/projects/MAdLand/cooperations/Schippers_Lab_RNA-seq_analyses/MpTak_v6.1r1.mrna.fasta.GC.length"
+# LenGCTable <- "/mnt/NAS_coruscant_datashare/haasf/GeneAtlas_RNASeq/DEG_2nd_round/cosmoss_V3.3.release_2016-07-22.IsoformV31.fasta.length_GC_Contig_Daten_for_34840_1nd.csv"
+
+# Column position of the gene name and the width/gc content (first column is gene name!)
+myLenPos <- 1
+myGCPos <- 2
+
+# 
+# hoo <- c("","")
+# sampleLists <- c()
+# for(a in hoo) {
+#   for(b in hoo) {
+#     if(a!=b) {
+#       tmp <- c(a,b)
+#       sampleLists <- c(sampleLists, list(tmp))
+#     }
+#   }
+# }
+
+sampleLists <- list(c("LCS_24h", "control_24h"),
+                    c("LCS_24h", "control_6h"),
+                    c("LCS_24h", "LCS_6h"),
+                    c("control_24h", "LCS_6h"),
+                    c("control_24h", "control_6h"),
+                    c("LCS_6h", "control_6h")
+)
+
+# Do you want a RPKM value table (1/0)
+# Be warned, it takes a while to do this. approx. 10 minutes for 35000 genes and 6 samples.
+rpkm <- 0
+# Are the data original HTSeq files? yes <- 1 ; no <- 0
+htseq <- 0
+
+# threshold settings for the R packages
+#edgeR
+pvtedgeR <- 0.001
+#DESeq2
+pvtDESeq2 <- 0.001
+#NOISeq
+pvtNOISeq <- 0.9
+
+# Output folder names
+edgeRDir <- "edgeR"
+DESeq2Dir <- "DESeq2"
+NOISeqDir <- "NOISeq"
+
+#########################
+## stop editing here!! ##
+#########################
+
+
+###########################################
+## executiv function                      #
+###########################################
+
+workflow <- function(file, saveat, SampleNames, LenGCTable, myLenPos, myGCPos, rpkm, pvtedgeR, pvtDESeq2, pvtNOISeq, edgeRDir, DESeq2Dir, NOISeqDir, htseq)
+{
+    # setwd(workingFolder) # set workspace
+
+            sampleNameI <- SampleNames[1]
+            sampleNameII <- SampleNames[2]
+
+            # Comparison name
+            cName <- paste(sampleNameI, sampleNameII, sep = "_vs_")
+
+            filename <- strsplit(file, "/")
+            filename <- filename[[1]][length(filename[[1]])] # long but it's working
+            sampleName <- gsub(", ","_",toString(SampleNames))
+            samples <- SampleNames
+            # create output dir
+            dir.create(saveat, showWarnings = FALSE)
+            outdir <- paste0(saveat,"/",sampleName)
+            dir.create(outdir)
+
+            # Start the log file
+            sink(paste0(outdir, "/DEGpipe_",cName, ".log"))
+
+            # print parameter into the log file
+            print("set parameters:")
+            print(paste0("Counts file:", file))
+            print(paste0("Results stored at:", saveat))
+            print(paste0("SampleNames: ",SampleNames))
+            print(paste0("LenGCTable: ",LenGCTable))
+            print(paste0("myLenPos: ",myLenPos))
+            print(paste0("myGCPos: ",myGCPos))
+            print(paste0("rpkm: ",rpkm))
+            print(paste0("htseq: ",htseq))
+            print(paste0("pvtedgeR: ",pvtedgeR))
+            print(paste0("pvtDESeq2: ",pvtDESeq2))
+            print(paste0("pvtNOISeq: ",pvtNOISeq))
+            print(paste0("edgeRDir: ",edgeRDir))
+            print(paste0("DESeq2Dir: ",DESeq2Dir))
+            print(paste0("NOISeqDir: ",NOISeqDir))
+            # session and package infos for the log file
+            print(sessionInfo())
+            print(Sys.info())
+
+            # load spike-in count tables
+            data0 <- read.table(file, header=T, sep="\t", row.names=1)
+            # just if the gene ids not ending with .1 (it is nessessery for NOISeqbio iteration step)
+            #row.names(data0) <- paste0(row.names(data0), ".1")
+
+            # remove HTSeq rows like "__no_feature"
+            data0 <- data0[ with(data0,  !grepl('__', rownames(data0))) , ]
+            data0 <- data0[ with(data0,  !grepl('ERCC', rownames(data0))) , ]
+            #data0 <- data0[ , with(data0,  !grepl('X.1', colnames(data0)))]
+            #rename col names (library name to roman number)
+	    
+            librariesName <- c("M54.fq.M54.fq.single.p.count", "M53.fq.M53.fq.single.p.count", "M52.fq.M52.fq.single.p.count", "M48.fq.M48.fq.single.p.count", "M47.fq.M47.fq.single.p.count", "M46.fq.M46.fq.single.p.count", "M40.fq.M40.fq.single.p.count", "M39.fq.M39.fq.single.p.count", "M38.fq.M38.fq.single.p.count", "M34.fq.M34.fq.single.p.count", "M33.fq.M33.fq.single.p.count", "M32.fq.M32.fq.single.p.count")
+#            librariesName <- c("P97", "P96", "P95", "P91", "P90", "P89", "P111", "P110", "P109", "P105", "P104", "P103")
+            romanNumber <- c("LCS_24h.2", "LCS_24h.1", "LCS_24h", "control_24h.2", "control_24h.1", "control_24h", "LCS_6h.2", "LCS_6h.1", "LCS_6h", "control_6h.2", "control_6h.1", "control_6h")
+#            romanNumber <- c("24h_LCS.2", "24h_LCS.1", "24h_LCS", "DMSO24_control.2", "DMSO24_control.1", "DMSO24_control", "6h_LCS.2", "6h_LCS.1", "6h_LCS", "DMSO6_control.2", "DMSO6_control.1", "DMSO6_control")
+            
+            for(i in 1:length(librariesName)) {
+                colnames(data0)[colnames(data0)==grep(librariesName[i], names(data0), value = TRUE)] <- romanNumber[i]
+            }
+
+            # creats a list of the column names/samples you would like to compare
+            SampleNames <- makeSampleList(sampleNameI, length(grep(paste0("^",sampleNameI,"[^A-Z]"), colnames(data0))) + 1,
+                                          sampleNameII, length(grep(paste0("^",sampleNameII,"[^A-Z]"), colnames(data0))) + 1)
+            print(SampleNames)
+
+            ## mal anschauen als ersatz für das darüber ##
+            ## newenw <- table.new[, grepl(paste(LIST,collapse="|"), colnames(table.new))]
+
+
+            # exctract the samples from the data0 if needed
+            if(length(SampleNames) > 0) {
+                RTableInFiles <- data0[,SampleNames]
+                data <- data0[,SampleNames]
+            }
+            print(head(RTableInFiles))
+
+            # sort by column name
+            # data <- data[ , order(names(data))] # dangeros, the order of query and referenz might be not the same
+            # see how many rows and columns are in the tables
+            print(paste("Raw data row number: ",nrow(data)))
+            print(paste("Raw data column number: ",ncol(data)))
+
+            # 2) A typical differential expression analysis workflow
+            # 2.1) Filtering and exploratory data analysis
+            # filter: One single gene/spike-in must have at least 5 counts in minimum 2 libraries
+            filter <- apply(data, 1, function(x) length(x[x>5])>=2)
+            dataFiltered <- data[filter,]
+            # see how many rows and columns are left
+            print(paste("Filtered data row number: ",nrow(dataFiltered)))
+            print(paste("Filtered data row number: ",ncol(dataFiltered)))
+            # filter gene names
+            genes <- rownames(dataFiltered)[grep("^Pp3", rownames(dataFiltered))]
+            # filter spike-in names
+            spikes <- rownames(dataFiltered)[grep("^ERCC", rownames(dataFiltered))]
+            # get the column name list as.factors
+            xtmp <- colnames(dataFiltered)
+            x <- split2list(xtmp)
+
+            sampleNameList <- gsub(".[1234567890]+$", "", SampleNames)
+            columnPos <- makeIndexList(SampleNames)
+
+            # run ...
+            ### edgeR ###
+            print(paste0("edgeR is running with ", cName))
+            packageDescription("edgeR")
+            edgeRFunction(outdir, edgeRDir, cName, RTableInFiles, pvtedgeR, sampleNameI, sampleNameII, sampleNameList)
+
+            #### DESeq2 ####
+            print(paste0("DESeq2 is running with ", cName))
+            packageDescription("DESeq2")
+            DESeq2Function(outdir, DESeq2Dir, cName, RTableInFiles, pvtDESeq2, sampleNameI, sampleNameII, sampleNameList)
+
+            ### NOISeq ###
+            print(paste0("NOISeq is running with ", cName))
+            packageDescription("NOISeq")
+           NOISeqFunction(outdir, NOISeqDir, cName, RTableInFiles, pvtNOISeq, sampleNameI, sampleNameII, myLenPos, myGCPos, LenGCTable, columnPos, sampleNameList, SampleNames)
+
+            ### Overlap of all three methods ###
+            # intersection with DEG direction check => 1, without => 0
+            intersection_4.file <- intersection(outdir,1,4)
+            write.table(intersection_4.file, file = paste0(outdir, "/", cName,"_intersection_four_methods_",nrow(intersection_4.file),".tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+            # edgeR, DESeq2, NOISeq
+            intersection_3.file <- intersection(outdir,1,3)
+            write.table(intersection_3.file, file = paste0(outdir, "/", cName,"_intersection_three_methods_",nrow(intersection_3.file),".tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+            
+            sink() # End of log file
+
+}
+
+
+
+
+
+####################
+## main functions  #
+####################
+
+plotGraphics <- function(data, librariesName, cName, outdir, romanNumber)
+{
+    library("DESeq2")
+    library("ggplot2")
+    library("RColorBrewer")
+    library("pheatmap")
+    library("genefilter")
+
+    # create sample annotation lists
+    # sample.name.list <- gsub(".[12]", "", librariesName[colnames(data)])
+    sample.name.list <- gsub(".[12]", "", romanNumber)
+    sample.name.uniq <- unique(unlist(sample.name.list, use.names = FALSE))
+
+    col.data.condition = data.frame(row.names = librariesName, condition = sample.name.list)
+
+    # run DESeq2 functions
+    dds.matrix <- DESeqDataSetFromMatrix(countData = data, colData = col.data.condition, design = ~condition) # data must be integer! (Ganze Zahlen)
+    colData(dds.matrix)$condition <- factor(colData(dds.matrix)$condition, levels = sample.name.uniq)
+    dds.matrix.deseq <- DESeq(dds.matrix)
+    dds.matrix.res <- results(dds.matrix.deseq)
+    dds.matrix.res <- dds.matrix.res[order(dds.matrix.res$padj), ]
+    dds.matrix.res.fdr <- dds.matrix.res[!is.na(dds.matrix.res$padj), ]
+    dds.matrix.res.fdr <- dds.matrix.res.fdr[dds.matrix.res.fdr$padj < 0.0001, ]
+    dds.matrix.rld <- rlogTransformation(dds.matrix, blind = TRUE)
+
+    # data for PCA plot
+    pcaData <- plotPCA(dds.matrix.rld, intgroup = "condition", returnData=TRUE)
+    percentVar <- round(100 * attr(pcaData, "percentVar"))
+    # color for samples
+    sample.colors <- c("gray60","#D73027","#4575B4", "purple", "orange","#113FF7","green", "black")
+
+    # plot the PCA
+    # https://cran.r-project.org/web/packages/ggfortify/vignettes/plot_pca.html
+    png(filename=paste0(outdir,"PCA_all_samples_DEseq2_normalized.png"))
+      ggplot(
+          pcaData,
+          aes(
+              PC1,
+              PC2,
+              color=condition,
+              shape=condition)
+          ) +
+          scale_shape_manual(values = c(4, 1, 2, 3, 0, 5, 6, 7)) +
+          geom_point(size=2.5) +
+          xlab(
+              paste0("PC1: ",percentVar[1],"% variance")
+              ) +
+          ylab(
+              paste0("PC2: ",percentVar[2],"% variance")
+              ) +
+          theme() + #, face="bold"
+          scale_colour_manual(
+              values=sample.colors  # dodgerblue3
+              ) +
+          ggtitle("PCA of all samples (DESeq2 normalized)") +
+          theme(
+              plot.title = element_text(lineheight=.8),
+              panel.background = element_rect(fill = "gray95")
+          )
+    dev.off()
+
+    # how many genes will be involved in the heatmap calculation
+    numbGenes <- 100
+    # pheatmap data
+    topVarGenes <- head(order(rowVars(assay(dds.matrix.rld)),decreasing=TRUE),numbGenes)
+    mat <- assay(dds.matrix.rld)[ topVarGenes, ]
+    mat <- mat - rowMeans(mat)
+    # pheatmap data annotation
+    anno <-as.data.frame(colData(dds.matrix.rld)[,c("condition")])
+    rownames(anno) <- colnames(data) # check out the row names of annotation
+    colnames(anno) <- "condition"
+    # pheatmap data annotation color
+    condition <- sample.colors
+    names(condition) <- sample.name.uniq
+    anno_colors <- list(condition = condition)
+
+    #heatmap(mat)
+    #pheatmap(mat)
+    # ftp://cran.r-project.org/pub/R/web/packages/pheatmap/pheatmap.pdf
+    png(filename=paste0(outdir,"HC_heatmap_all_samples_DESeq2_normalized.png"))
+      pheatmap(mat ,
+               annotation = anno,
+               annotation_colors = anno_colors,
+               fontsize = 10,
+               fontsize_row = 6,
+               fontsize_col = 10,
+               main = paste0("Heatmap of all samples (DESeq2 normalized) ", numbGenes," genes\n"),
+               annotation_names_col = F,
+               show_rownames = F,
+               cluster_rows = F,
+               border_color = F,
+               treeheight_col = 20
+      )
+    dev.off()
+}
+
+
+edgeRFunction <- function(odir, edgeRDir, cName, RTableInFiles, pvtedgeR, sampleNameI, sampleNameII, sampleNameList)
+{
+    library(edgeR)
+
+    outdir <- paste0(odir, "/",edgeRDir,"/") # set outdir
+
+    dir.create(outdir, showWarnings = FALSE) # create output folder
+    dir.create(paste0(outdir,"plots"), showWarnings = FALSE) # create subfolder "plots"
+
+    comparison <- cName
+    counts <- RTableInFiles
+
+    pvt <- pvtedgeR
+
+    group <- factor(sampleNameList)
+
+    # edgeR 'normal'
+
+    y.normal <- DGEList(counts = counts, group = group)
+
+    y.normal[,1]
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotMDS")))
+        plotMDS(y.normal, method = "bcv", main = paste0(cName, "\nedgeR - plotMDS"))
+    dev.off()
+
+    y.normal <- calcNormFactors(y.normal)
+    y.normal <- estimateCommonDisp(y.normal)
+    y.normal <- estimateTagwiseDisp(y.normal)
+
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotBCV")))
+        plotBCV(y.normal, main = paste0(cName, "\nedgeR - plotBCV"))
+    dev.off()
+
+    et.normal <- exactTest(y.normal, pair = c(sampleNameI, sampleNameII))
+
+    topTags(et.normal)
+    # adjust.method   ==>  character string specifying p-value adjustment method. Possible values are "none", "BH", "fdr" (equivalent to "BH"), "BY" and "holm". See p.adjust for details.
+    summary(de.normal <- decideTestsDGE(et.normal, p = pvt, adjust = "BH"))
+    et.normal.fdr <- cbind(et.normal$table, p.adjust(et.normal$table[, 3], method = "BH"))
+    et.normal.fdr <- et.normal.fdr[et.normal.fdr[, 4] < pvt, ]
+
+    write.table(et.normal.fdr, file = paste0(outdir, cName, "_", nrow(et.normal.fdr), "_edgeR_fdr.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    detags.normal <- rownames(de.normal)[as.logical(de.normal)]
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotSmear")))
+    plotSmear(et.normal, de.normal, tags = detags.normal, main = paste0(cName, "\nedgeR - plotSmear"))
+    abline(h = c(-2, 2), col = "blue")
+    dev.off()
+
+    # edgeR 'GLM'
+
+    y.glm <- DGEList(counts = counts, group = group)
+
+    design <- model.matrix(~0 + group, data = y.glm$samples)
+    design
+
+    y.glm <- estimateGLMCommonDisp(y.glm, design)
+    y.glm <- estimateGLMTrendedDisp(y.glm, design)
+
+    ## Loading required package: splines
+    y.glm <- estimateGLMTagwiseDisp(y.glm, design)
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " edgeR - plotBCV - GLM")))
+    plotBCV(y.glm, main = paste0(cName, "\nedgeR - plotBCV - GLM"))
+    dev.off()
+
+    y.fit <- glmFit(y.glm, design)
+    y.lrt <- glmLRT(y.fit)
+
+    FDR <- p.adjust(y.lrt$table$PValue, method = "BH")
+    sum(FDR < pvt)
+
+    topTags(y.lrt)
+
+    summary(de.glm <- decideTestsDGE(y.lrt, p = pvt, adjust = "BH"))
+
+}
+
+
+DESeq2Function <- function(odir, DESeq2Dir, cName, RTableInFiles, pvtDESeq2, sampleNameI, sampleNameII, sampleNameList)
+{
+    library(DESeq2)
+    library(RColorBrewer)
+    library(gplots)
+    library(vsn)
+
+    outdir <- paste0(odir, "/",DESeq2Dir,"/") # set outdir
+
+    dir.create(outdir, showWarnings = FALSE)
+    dir.create(paste0(outdir,"plots"), showWarnings = FALSE) # create subfolder "plots"
+
+
+    pvt <- pvtDESeq2
+
+
+
+    #   DESeqDataSet(se, design, ignoreRank = FALSE)
+    #   DESeqDataSetFromMatrix(countData, colData, design, tidy = FALSE, ignoreRank = FALSE, ...)
+    #   DESeqDataSetFromHTSeqCount(sampleTable, directory = ".", design, ignoreRank = FALSE, ...)
+    #   DESeqDataSetFromTximport(txi, colData, design, ...)
+    coldata = data.frame(row.names = colnames(RTableInFiles), condition = sampleNameList)
+
+    ddsHTSeq <- DESeqDataSetFromMatrix(countData = RTableInFiles, colData = coldata, design = ~ condition)
+
+    colData(ddsHTSeq)$condition <- factor(colData(ddsHTSeq)$condition, levels = c(sampleNameI, sampleNameII))
+
+    ddsHTSeq <- DESeq(ddsHTSeq)
+
+    ddsHTSeq.res <- results(ddsHTSeq)
+    ddsHTSeq.res <- ddsHTSeq.res[order(ddsHTSeq.res$padj), ]
+    head(ddsHTSeq.res)
+
+    # png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - plotMA")))
+    #     plotMA(ddsHTSeq, main = paste0(cName, "\nDESeq2 - plotMA"), ylim = c(-2, 2))
+    # dev.off()
+
+    ddsHTSeq.res.fdr <- ddsHTSeq.res[!is.na(ddsHTSeq.res$padj), ]
+    ddsHTSeq.res.fdr <- ddsHTSeq.res.fdr[ddsHTSeq.res.fdr$padj < pvt, ]
+
+    write.table(as.matrix(ddsHTSeq.res.fdr), file = paste0(outdir, cName, "_", nrow(as.matrix(ddsHTSeq.res.fdr)), "_DESeq2_fdr.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    ddsHTSeq.rld <- rlogTransformation(ddsHTSeq, blind = TRUE)
+    ddsHTSeq.vsd <- varianceStabilizingTransformation(ddsHTSeq, blind = TRUE)
+
+    par(mfrow = c(1, 3))
+    notAllZero <- (rowSums(counts(ddsHTSeq)) > 0)
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - SdPlot.1")))
+        meanSdPlot(log2(counts(ddsHTSeq, normalized = TRUE)[notAllZero, ] + 1)) # , ylim = c(0, 2.5)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - SdPlot.2"))) # , ylim = c(0, 2.5)
+        meanSdPlot(assay(ddsHTSeq.rld[notAllZero, ]))
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - SdPlot.3"))) # , ylim = c(0, 2.5)
+        meanSdPlot(assay(ddsHTSeq.vsd[notAllZero, ]))
+    dev.off()
+
+    hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
+    distRL <- dist(t(assay(ddsHTSeq.rld)))
+    mat <- as.matrix(distRL)
+    rownames(mat) <- colnames(mat) <- with(colData(ddsHTSeq), paste0(condition))
+
+    # png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - heatmap")))
+    #     heatmap(mat, trace = "none", col = rev(hmcol), margin = c(13, 13))
+    # dev.off()
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - PCA")))
+        print(plotPCA(ddsHTSeq.rld, intgroup = "condition"))
+    dev.off()
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " DESeq2 - DispEsts")))
+        plotDispEsts(ddsHTSeq, main = paste0(cName, "\nDESeq2 - plotDispEsts"))
+    dev.off()
+}
+
+NOISeqFunction <- function(odir, NOISeqDir, cName, RTableInFiles, pvtNOISeq, sampleNameI, sampleNameII, myLenPos, myGCPos, LenGCTable, columnPos, sampleNameList, SampleNames)
+{
+    library(NOISeq)
+
+    outdir <- paste0(odir, "/",NOISeqDir,"/") # set outdir
+
+    dir.create(outdir, showWarnings = FALSE)
+    dir.create(paste0(outdir,"plots"), showWarnings = FALSE) # create subfolder "plots"
+
+    pvt <- pvtNOISeq
+
+    counts <- RTableInFiles[, columnPos]
+    head(counts)
+
+    # must be changed if there are no triplicates
+    # !! Please change this setting if needed !!
+    myfactors <- data.frame(Treatment = sampleNameList, TreatmentRun = SampleNames)
+    # sort counts by row names
+    mycounts <- counts[order(rownames(counts)), ]
+    # filter out low counts
+    mycounts.filter <- filtered.data(mycounts, factor = myfactors$Treatment, norm = FALSE, depth = NULL, method = 1, cv.cutoff = 100, cpm = 1)
+    mycounts.rowname <- rownames(mycounts.filter)
+
+    # Extra file with length and GC content
+    myLengthTable <- read.table(LenGCTable, sep = "\t", header = TRUE, stringsAsFactor = FALSE, row.names=1)
+    myLengthTable <- myLengthTable[order(rownames(myLengthTable)),] # sort by rownames
+    myLengthTable.filter <- myLengthTable[mycounts.rowname,]
+
+    mylength <- myLengthTable.filter[,myLenPos]
+    names(mylength) <- rownames(myLengthTable.filter)
+
+    mygc <- myLengthTable.filter[,myGCPos]
+    names(mygc) <- rownames(myLengthTable.filter)
+
+
+
+
+
+    mydata <- readData(data = mycounts.filter, length = mylength, gc = mygc, factors = myfactors)
+    mydata
+
+    mydata.GCbias <- dat(mydata, type = "GCbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - GCbias.1")))
+#-#        explo.plot(mydata.GCbias)
+#-#    dev.off()
+
+    mydata.GCbias.group <- dat(mydata, factor = "Treatment", type = "GCbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - GCbias.2")))
+#-#      explo.plot(mydata.GCbias.group)
+#-#    dev.off()
+
+    show(mydata.GCbias.group)
+
+    mydata.lengthbias <- dat(mydata, type = "lengthbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - Lenbias.1")))
+#-#        explo.plot(mydata.lengthbias)
+#-#    dev.off()
+
+    mydata.lengthbias.group <- dat(mydata, factor = "Treatment", type = "lengthbias")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - Lenbias.2")))
+#-#        explo.plot(mydata.lengthbias.group)
+#-#    dev.off()
+
+    show(mydata.lengthbias.group)
+
+    # looks different
+    mydata.cd <- dat(mydata, type = "cd")
+#-#    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - cd")))
+#-#        explo.plot(mydata.cd)
+#-#    dev.off()
+    mylength
+
+    myRPKM <- NOISeq::rpkm(assayData(mydata)$exprs)
+    myUQUA <- NOISeq::uqua(assayData(mydata)$exprs, long = mylength, lc = 0.5, k = 0)
+    myTMM <- NOISeq::tmm(assayData(mydata)$exprs, long = 1000, lc = 0)
+
+    mynoiseq.rpkm <- noiseq(mydata, k = 0.5, norm = "rpkm", factor = "Treatment", pnr = 0.2, nss = 5, v = 0.02, lc = 1, replicates = "biological")
+    mynoiseq.nnorm <- noiseq(mydata, k = 0.5, norm = "n", factor = "Treatment", pnr = 0.2, nss = 5, v = 0.02, lc = 1, replicates = "biological")
+    mynoiseq.tmm <- noiseq(mydata, k = 0.5, norm = "tmm", factor = "Treatment", pnr = 0.2, nss = 5, v = 0.02, lc = 1, replicates = "biological")
+
+    # RPKM
+    mynoiseq.rpkm.prob <- mynoiseq.rpkm@results[[1]][mynoiseq.rpkm@results[[1]]$prob > pvt, ]
+    mynoiseq.rpkm.prob <- mynoiseq.rpkm.prob[!is.na(mynoiseq.rpkm.prob$prob), ]
+    write.table(mynoiseq.rpkm.prob, file = paste0(outdir, cName, "_", nrow(mynoiseq.rpkm.prob), "_NOISeq_rpkm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    # none
+    mynoiseq.nnorm.prob <- mynoiseq.nnorm@results[[1]][mynoiseq.nnorm@results[[1]]$prob > pvt, ]
+    mynoiseq.nnorm.prob <- mynoiseq.nnorm.prob[!is.na(mynoiseq.nnorm.prob$prob), ]
+    write.table(mynoiseq.nnorm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseq.nnorm.prob), "_NOISeq_nnorm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    # TMM
+    mynoiseq.tmm.prob <- mynoiseq.tmm@results[[1]][mynoiseq.tmm@results[[1]]$prob > pvt, ]
+    mynoiseq.tmm.prob <- mynoiseq.tmm.prob[!is.na(mynoiseq.tmm.prob$prob), ]
+    write.table(mynoiseq.tmm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseq.tmm.prob), "_NOISeq_tmm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    ############ NOISeq bio ################
+
+    pvtnsb <- 0.995
+    a0per_value <- 0.99
+
+    iterations <- 5
+    x5 <- c(sample(10000:99999, iterations, replace=F), 12345)
+    x5
+
+    collector.rpkm <- NULL
+    collector.tmm <- NULL
+
+
+    # for (iter in x5) {
+    #   # RPKM
+    #   mynoiseqbio.rpkm <- noiseqbio(mydata, k = 0.5, norm = "rpkm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = iter, filter = 1)#, plot = TRUE)
+    #   mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm@results[[1]][mynoiseqbio.rpkm@results[[1]]$prob > pvtnsb, ]
+    #   mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm.prob[!is.na(mynoiseqbio.rpkm.prob$prob), ]
+    #   collector.rpkm <- rbind(collector.rpkm, mynoiseqbio.rpkm.prob)
+    #   #collector <- rbind(collector, mynoiseqbio.rpkm.prob[rownames(mynoiseqbio.rpkm.prob) %in% rownames(collector),])
+    #   # TMM
+    #   mynoiseqbio.tmm <- noiseqbio(mydata, k = 0.5, norm = "tmm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = iter, filter = 1)#, plot = TRUE)
+    #   mynoiseqbio.tmm.prob <- mynoiseqbio.tmm@results[[1]][mynoiseqbio.tmm@results[[1]]$prob > pvtnsb, ]
+    #   mynoiseqbio.tmm.prob <- mynoiseqbio.tmm.prob[!is.na(mynoiseqbio.tmm.prob$prob), ]
+    #   collector.tmm <- rbind(collector.tmm, mynoiseqbio.tmm.prob)
+    # }
+    #
+    # # RPKM
+    # print(sort(row.names(collector.rpkm)))
+    # library(stringr)
+    # mynoiseqbio.rpkm.prob.overlap <- collector.rpkm[str_split_fixed(row.names(collector.rpkm), "\\.",2)[,2]>=iterations+10,]
+    # row.names(mynoiseqbio.rpkm.prob.overlap) <- substr(row.names(mynoiseqbio.rpkm.prob.overlap),1,str_length(row.names(mynoiseqbio.rpkm.prob.overlap))-1)
+    #
+    # write.table(mynoiseqbio.rpkm.prob.overlap, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.rpkm.prob.overlap),"_NOISeqbio_rpkm_prob_seed_iteration_overlap.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    #
+    # #TMM
+    # print(sort(row.names(collector.tmm)))
+    # library(stringr)
+    # mynoiseqbio.tmm.prob.overlap <- collector.tmm[str_split_fixed(row.names(collector.tmm), "\\.",2)[,2]>=iterations+10,]
+    # row.names(mynoiseqbio.tmm.prob.overlap) <- substr(row.names(mynoiseqbio.tmm.prob.overlap),1,str_length(row.names(mynoiseqbio.tmm.prob.overlap))-1)
+    #
+    # write.table(mynoiseqbio.tmm.prob.overlap, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.tmm.prob.overlap),"_NOISeqbio_tmm_prob_seed_iteration_overlap.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    # # none
+    # mynoiseqbio.nnorm.prob <- mynoiseqbio.nnorm@results[[1]][mynoiseqbio.nnorm@results[[1]]$prob > pvtnsb, ]
+    # mynoiseqbio.nnorm.prob <- mynoiseqbio.nnorm.prob[!is.na(mynoiseqbio.nnorm.prob$prob), ]
+    # write.table(mynoiseqbio.nnorm.prob, file = paste0(outdir, cName, "_NOISeqbio_nnorm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+    # # TMM
+    # mynoiseqbio.tmm.prob <- mynoiseqbio.tmm@results[[1]][mynoiseqbio.tmm@results[[1]]$prob > pvtnsb, ]
+    # mynoiseqbio.tmm.prob <- mynoiseqbio.tmm.prob[!is.na(mynoiseqbio.tmm.prob$prob), ]
+    # write.table(mynoiseqbio.tmm.prob, file = paste0(outdir, cName, "_NOISeqbio_tmm_prob.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+
+
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - noiseqbiorpkm")))
+        mynoiseqbio.rpkm <- noiseqbio(mydata, k = 0.5, norm = "rpkm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = 12345, filter = 1, plot = TRUE)
+    dev.off()
+
+    mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm@results[[1]][mynoiseqbio.rpkm@results[[1]]$prob > pvtnsb, ]
+    mynoiseqbio.rpkm.prob <- mynoiseqbio.rpkm.prob[!is.na(mynoiseqbio.rpkm.prob$prob), ]
+    write.table(mynoiseqbio.rpkm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.rpkm.prob),"_NOISeqbio_rpkm_prob_seed_12345.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - noiseqbiontmm")))
+        mynoiseqbio.tmm <- noiseqbio(mydata, k = 0.5, norm = "tmm", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = 12345, filter = 1, plot = TRUE)
+    dev.off()
+
+    mynoiseqbio.tmm.prob <- mynoiseqbio.tmm@results[[1]][mynoiseqbio.tmm@results[[1]]$prob > pvtnsb, ]
+    mynoiseqbio.tmm.prob <- mynoiseqbio.tmm.prob[!is.na(mynoiseqbio.tmm.prob$prob), ]
+    write.table(mynoiseqbio.tmm.prob, file = paste0(outdir, cName,"_", nrow(mynoiseqbio.tmm.prob),"_NOISeqbio_tmm_prob_seed_12345.tsv"), sep = "\t", col.names = TRUE, row.names = TRUE, quote = FALSE)
+
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - noiseqbionnorm")))
+        mynoiseqbio.nnorm <- noiseqbio(mydata, k = 0.5, norm = "n", factor = "Treatment", lc = 1, r = 20, adj = 1.5, a0per = a0per_value, random.seed = 12345, filter = 1, plot = TRUE)
+    dev.off()
+
+    head(mynoiseqbio.rpkm@results[[1]])
+    head(mynoiseqbio.nnorm@results[[1]])
+
+    mynoiseqbio.rpkm.deg <- degenes(mynoiseqbio.rpkm, q = pvtnsb, M = NULL)
+
+    mynoiseqbio.rpkm.deg.up <- degenes(mynoiseqbio.rpkm, q = pvtnsb, M = "up")
+    mynoiseqbio.rpkm.deg.down <- degenes(mynoiseqbio.rpkm, q = pvtnsb, M = "down")
+    mynoiseqbio.nnorm.deg <- degenes(mynoiseqbio.nnorm, q = pvtnsb, M = NULL)
+    mynoiseqbio.nnorm.deg.up <- degenes(mynoiseqbio.nnorm, q = pvtnsb, M = "up")
+    mynoiseqbio.nnorm.deg.down <- degenes(mynoiseqbio.nnorm, q = pvtnsb, M = "down")
+
+    par(mfrow = c(3, 2))
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.rpkm_0.9")))
+        DE.plot(mynoiseq.rpkm, q = 0.9, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.rpkm_0.8")))
+        DE.plot(mynoiseq.rpkm, q = 0.8, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.nnorn_0.9")))
+        DE.plot(mynoiseq.nnorm, q = 0.9, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.nnorn_0.8")))
+        DE.plot(mynoiseq.nnorm, q = 0.8, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.tmm_0.9")))
+        DE.plot(mynoiseq.tmm, q = 0.9, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.tmm_0.8")))
+        DE.plot(mynoiseq.tmm, q = 0.8, graphic = "expr", log.scale = TRUE)
+    dev.off()
+
+    par(mfrow = c(2, 2))
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.rpkm_",pvt)))
+        DE.plot(mynoiseqbio.rpkm, q = pvt, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.rpkm_",pvtnsb)))
+        DE.plot(mynoiseqbio.rpkm, q = pvtnsb, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.nnorm_",pvt)))
+        DE.plot(mynoiseqbio.nnorm, q = pvt, graphic = "expr", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeqbio - DE.nnorm_",pvtnsb)))
+        DE.plot(mynoiseqbio.nnorm, q = pvtnsb, graphic = "expr", log.scale = TRUE)
+    dev.off()
+
+    par(mfrow = c(1, 2))
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.rpkm_0.9_MD")))
+        DE.plot(mynoiseq.rpkm, q = 0.9, graphic = "MD", log.scale = TRUE)
+    dev.off()
+    png(filename=paste0(outdir,"plots/",paste0(cName, " NOISeq - DE.nnorm_0.9_MD")))
+        DE.plot(mynoiseq.nnorm, q = 0.9, graphic = "MD", log.scale = TRUE)
+    dev.off()
+}
+
+###########################################
+## additional function for more usability #
+###########################################
+
+calcRPKM <- function(c,n,l)
+{
+    RPKM <- (10^9* as.numeric(c)) / (as.numeric(n) * as.numeric(l))
+    return(RPKM)
+}
+
+RPKMMatrix <- function(InTable,directory,LenGCTable,myLenPos)
+{
+    myLengthTable <- read.table(paste0(directory,LenGCTable), sep = "\t", header = TRUE, stringsAsFactor = FALSE, row.names = 1)
+
+    newtable <- matrix(data = c(1), nrow = nrow(InTable), ncol(InTable), byrow= TRUE)
+    colnames(newtable) <- colnames(InTable)
+    rownames(newtable) <- rownames(InTable)
+
+    sumCol <- apply(InTable,2,sum) # list of the all col sums
+
+    for(s in colnames(InTable)) {
+        for(r in rownames(InTable)) {
+            w <- InTable[r, s]
+            len <- myLengthTable[r, myLenPos]
+            sum <- as.numeric(sumCol[s])
+
+            newtable[r, s] <- calcRPKM(w, sum, len)
+        }
+    }
+    return(newtable)
+}
+
+
+# rmHTSeqLines removes the last five rows
+rmHTSeqLines <- function(counts)
+{
+    counts <- head(counts, -5)
+    return(counts)
+}
+
+
+###############
+## functions ##
+###############
+
+# split list to a factor list
+split2list <- function(tmp)
+{
+    xx <- c()
+    for(i in 1:length(tmp)) {
+        if(grepl("\\.", tmp[i])) {
+            foo <- strsplit(tmp[i], '\\.')[[1]]
+            fooo <- strsplit(foo, " ")[[1]]
+            xx <- c(xx, fooo)
+        } else {
+            xx <- c(xx, tmp[i])
+        }
+    }
+    return(as.factor(xx))
+}
+
+# makeGroups
+# https://github.com/drisso/RUVSeq/blob/master/R/helper.R
+makeGroups <- function(xs)
+{
+    xs <- factor(xs)
+    groups <- matrix(-1, nrow = length(levels(xs)), ncol = max(table(xs)))
+    for (i in 1:length(levels(xs))) {
+        idxs <- which(xs == levels(xs)[i])
+        groups[i,1:length(idxs)] <- idxs
+    }
+    return(groups)
+}
+
+# The function makeSampleList creates a list of the columns(samples) you would like to compare
+makeSampleList <- function(A, Al, B, Bl)
+{
+    l <- c()
+    if(exists("Al", mode = "numeric")) {
+        l <- makeSampleList.helper(A, Al, l)
+        l <- makeSampleList.helper(B, Bl, l)
+    }
+    return(l)
+}
+makeSampleList.helper <- function(v,k,l)
+{
+    for(h in 1:k) {
+        if(h == 1) {
+            l <- c(l, v)
+        } else {
+            l <- c(l, paste0(v,".",h-1))
+        }
+    }
+    return(l)
+}
+
+# function creates a list of numbers out of another list. The List is the index of the given list.
+makeIndexList <- function(l)
+{
+    n <- 1
+    jj <- c()
+    for(v in l) {
+        jj <- c(jj, n)
+        n <- n + 1
+    }
+    return(jj)
+}
+
+intersection <- function(l,check,t)
+{
+  l.list <- list.files(path = l, pattern = "*(rpkm|edgeR|DESeq2).*tsv", recursive = TRUE)
+  if(grepl("bio", l.list[4],fixed = TRUE)) {
+    l.list.noiseq <- read.table(paste0(l,"/",l.list[3]), header=T, sep="\t", row.names=1)
+    l.list.noiseqbio <- read.table(paste0(l,"/",l.list[4]), header=T, sep="\t", row.names=1)
+  } else {
+    l.list.noiseq <- read.table(paste0(l,"/",l.list[4]), header=T, sep="\t", row.names=1)
+    l.list.noiseqbio <- read.table(paste0(l,"/",l.list[3]), header=T, sep="\t", row.names=1)
+  }
+  l.list.edgeR <- read.table(paste0(l,"/",l.list[2]), header=T, sep="\t", row.names=1)
+  l.list.deseq2 <- read.table(paste0(l,"/",l.list[1]), header=T, sep="\t", row.names=1)
+  
+  
+  print("DEGs edgeR")
+  print(nrow(l.list.edgeR))
+  print("DEGs deseq2")
+  print(nrow(l.list.deseq2))
+  print("DEGs noiseq")
+  print(nrow(l.list.noiseq))
+  print("DEGs noiseqbio")
+  print(nrow(l.list.noiseqbio))
+  
+  interect.list <- ""
+  interect.list.direct <- ""
+  
+  if(t==4) {
+    # List of overlaps without direction check
+    interect.list <- intersect(rownames(l.list.edgeR), intersect(rownames(l.list.noiseq), intersect(rownames(l.list.noiseqbio), rownames(l.list.deseq2))))
+    
+    print("DEGs intersection without direction check (four methods)")
+    print(length(interect.list))
+    print(interect.list)
+    print("DEGs intersection with direction check")
+    
+    # List of overlaps with direction check
+    if(length(interect.list)!=0) {
+      interect.list.direct <- interDirect(l.list.edgeR, l.list.deseq2, l.list.noiseq, l.list.noiseqbio, interect.list)
+      print(length(interect.list.direct))
+      print(interect.list.direct)
+    } else {
+      print("0")
+    }
+  }
+  if(t==3) {
+    # List of overlaps without direction check
+    interect.list <- intersect(rownames(l.list.edgeR), intersect(rownames(l.list.noiseq), rownames(l.list.deseq2)))
+    
+    print("DEGs intersection without direction check  (three methods)")
+    print(length(interect.list))
+    print(interect.list)
+    print("DEGs intersection with direction check")
+    
+    # List of overlaps with direction check
+    if(length(interect.list)!=0) {
+      # fake data frame with 0 rows => nrow == 0
+      df_fake <- data.frame(Date=as.Date(character()), 
+                       File=character(), 
+                       User=character(), 
+                       stringsAsFactors=FALSE) 
+      
+      interect.list.direct <- interDirect(l.list.edgeR, l.list.deseq2, l.list.noiseq, df_fake, interect.list)
+      print(length(interect.list.direct))
+      print(interect.list.direct)
+    } else {
+      print("0")
+    }
+  }
+  
+  
+  if(check == 1){
+    return(l.list.noiseq[interect.list.direct,])
+  } else {
+    return(l.list.noiseq[interect.list,])
+  }
+}
+
+interDirect <- function(e,d,n,nb,i)
+{
+  lil <- length(i)
+  
+  if(nrow(nb) == 0) { # use noiseq twice to simulate noiseqbio
+    nbx <- apply(n[i,], 2, function(x) {ifelse(x > 0, "-", "+")}) 
+    nby <- paste0(nbx[,"M"],rownames(nbx))
+  } else {
+    nbx <- apply(nb[i,], 2, function(x) {ifelse(x > 0, "-", "+")})
+    nby <- paste0(nbx[,"log2FC"],rownames(nbx))
+  }
+  
+  ex <- apply(e[i,], 2, function(x) {ifelse(x < 0, "-", "+")}) #-#-#-# hier < und >
+  dx <- apply(d[i,], 2, function(x) {ifelse(x < 0, "-", "+")}) #-#-#-# hier < und >
+  nx <- apply(n[i,], 2, function(x) {ifelse(x > 0, "-", "+")}) # twisted, because edgeR and DESeq2 are calculating vice versa   
+  
+  ey <- paste0(ex[,"logFC"],rownames(ex))
+  dy <- paste0(dx[,"log2FoldChange"],rownames(dx))
+  ny <- paste0(nx[,"M"],rownames(nx))
+  
+  interect.list.direct <- intersect(ey, intersect(dy, intersect(ny, nby)))
+  
+  if(lil>0 && length(interect.list.direct)<=0) {
+    ex <- apply(e[i,], 2, function(x) {ifelse(x > 0, "-", "+")}) #-#-#-# hier < und >
+    dx <- apply(d[i,], 2, function(x) {ifelse(x > 0, "-", "+")}) #-#-#-# hier < und >
+    
+    ey <- paste0(ex[,"logFC"],rownames(ex))
+    dy <- paste0(dx[,"log2FoldChange"],rownames(dx))
+    
+    interect.list.direct <- intersect(ey, intersect(dy, intersect(ny, nby)))
+  }
+  
+  return(substr(interect.list.direct,2,100))
+}
+
+###########################################
+## executiv part of the script            #
+###########################################
+for(SampleNames in sampleLists) {
+   workflow(file, saveat, SampleNames, LenGCTable, myLenPos, myGCPos, rpkm, pvtedgeR, pvtDESeq2, pvtNOISeq, edgeRDir, DESeq2Dir, NOISeqDir, htseq)
+}
+print("Thank you for using the DEG pipeline :) \n")
+
+# fin
+
+### compare DEGs analysis ###
+# used venny and Excel
+
diff --git a/workflows/Experiment2/snp_pipe_at.sge b/workflows/Experiment2/snp_pipe_at.sge
new file mode 100644
index 0000000000000000000000000000000000000000..c0c5cd5e01f53c5ca8db6855509c6566772335ae
--- /dev/null
+++ b/workflows/Experiment2/snp_pipe_at.sge
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+
+prefix=$1
+job_mem=$2
+out_path=$3
+nthreads=$4
+species=$5
+inpath=$6
+
+
+
+qsub -N "run_"$prefix -l h_vmem=$job_mem -pe multislot $nthreads -S /bin/bash -e $out_path -o $out_path /vol/agrensing/fhaas/RNA-seq_Schippers/second_samples/no_backup/SNP_pipe_At.sh $nthreads $out_path $prefix $species $inpath
diff --git a/workflows/Experiment2/snp_pipe_at.sh b/workflows/Experiment2/snp_pipe_at.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1d09557f660ac8550a3facc4df5b835520495417
--- /dev/null
+++ b/workflows/Experiment2/snp_pipe_at.sh
@@ -0,0 +1,469 @@
+#!/bin/bash
+
+#. /etc/profile.d/modules.sh
+
+export PATH=$PATH:/vol/biotools/bin
+export MANPATH=$MANPATH:/vol/biotools/man
+export PATH=$PATH:/vol/software/bin
+
+
+# install important R package - lummerland cluster does not have it ...
+#echo "install.packages(\"gplots\", repos=\"https://cran.rstudio.com\")" | R --vanilla --no-save
+#R CMD INSTALL /vol/agrensing/tools/R_packages/bitops_1.0-6.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/caTools_1.17.1.1.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gtools_3.8.1.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gdata_2.18.0.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gplots_3.0.1.tar.gz
+
+#functions
+
+startle ()
+{
+	number_row_reads=`wc -l $fw | awk '{print $1/4}'`
+	echo "## Start log ##"
+	echo "Number of row reads: "$number_row_reads
+}
+
+trimmo ()
+ {
+   # Remove leading low quality or N bases (below quality 3) (LEADING:3)
+   # Remove trailing low quality or N bases (below quality 3) (TRAILING:3)
+   # Scan the read with a 4-base wide sliding window, cutting when the average quality per base drops below 15 (SLIDINGWINDOW:4:15)
+   # HEADCROP: Cut the specified Number of bases from the start of the read
+   # MINLEN was first set to 50 bp. But there are libraries with read start length of 50 bp.
+
+   if [ -e $rv ]; then
+	java $java_ram -jar /nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/trimmomatic-0.39.jar PE -threads $cpu -phred33 -summary $trimmo_out".trimmo.stats" \
+	  $fw $rv $trimmo_out".PE.fw" $trimmo_out".SE.fw" $trimmo_out".PE.rv" $trimmo_out".SE.rv" \
+	  ILLUMINACLIP:/nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/adapters/TruSeq-PE_all.fna:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 HEADCROP:10 MINLEN:30
+ 
+	# stats
+	echo "## Trimmo statistics ##"
+	printf "trim\t"$trimmo_out".PE.fw\t"; grep -c "^\+$" $trimmo_out".PE.fw"
+	# fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc" $trimmo_out".PE."*
+    fi
+    if [ ! -e $rv ]; then
+	java $java_ram -jar /nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/trimmomatic-0.39.jar SE -threads $cpu -phred33 -summary $trimmo_out".trimmo.stats" \
+	  $fw $trimmo_out".SE" \
+	  ILLUMINACLIP:/nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/adapters/TruSeq-PE_all.fna:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 HEADCROP:10 MINLEN:30
+ 
+	# stats
+	echo "## Trimmo statistics ##"
+	# printf "trim\t"$trimmo_out".SE\t"; grep -c "^\+$" $trimmo_out".SE"
+        printf "trim\t"$trimmo_out".SE\t"; wc -l $trimmo_out".SE" | awk '{print $1/4}'
+	# fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc" $trimmo_out".SE"
+    fi
+ 
+ }
+ 
+polyA ()
+ {
+   # min_len was first set to 50 bp. But there are libraries with read start length of 50 bp.
+   # If you specify any statistic option, no other ouput will be generated. To preprocess data, do not specify a statistics option.
+   # -stats_dupl -stats_all
+ 
+   inpA=$polyA_out".polyA.passed"
+   infA=$polyA_out".polyA.failed"
+
+   if [ -e $rv ]; then
+	/nfs/agrensing/tools/prinseq/prinseq-lite-0.20.4/./prinseq-lite.pl -verbose -no_qual_header -fastq $trimmo_out".PE.fw" -fastq2 $trimmo_out".PE.rv" \
+	  -trim_tail_left 5 -trim_tail_right 5 -min_len 30 -out_good $inpA -out_bad $infA \
+	  -out_format 3 -log $polyA_out"_polyA.log"
+
+	# stats
+	echo "## PolyA statistics ##"
+	printf "polyA\t"$inpA"_1.fastq\t"; wc -l $inpA"_1.fastq" | awk '{print $1/4}'
+	#fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc" $inpA"_"[12]".fastq"
+   fi
+   if [ ! -e $rv ]; then
+	/nfs/agrensing/tools/prinseq/prinseq-lite-0.20.4/./prinseq-lite.pl -verbose -no_qual_header -fastq $trimmo_out".SE" \
+	  -trim_tail_left 5 -trim_tail_right 5 -min_len 30 -out_good $inpA -out_bad $infA \
+	  -out_format 3 -log $polyA_out"_polyA.log"
+
+	# stats
+	echo "## PolyA statistics ##"
+	printf "polyA\t"$inpA".fastq\t"; grep -c "^\+$" $inpA".fastq"
+	#fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc" $inpA".fastq"
+   fi  
+ }
+
+duple ()
+{
+	# number of 100% identical duplicat reads.
+	echo "Duplication:"
+
+        if [ -e $rv ]; then
+
+                #SE 1
+                IN=$polyA_out".polyA.passed"
+
+                num_dup_se1=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_1_dupl_histogram"}}' $IN"_1.fastq" | wc -l`
+                printf $num_dup_se1"\t"; printf %.5f\\t "$(( 100000 * num_dup_se1 / number_row_reads ))e-5"
+                #SE 2
+                num_dup_se2=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_2_dupl_histogram"}}' $IN"_2.fastq" | wc -l`
+                printf $num_dup_se2"\t"; printf %.5f\\t "$(( 100000 * num_dup_se2 / number_row_reads ))e-5"
+
+
+                #PE
+                num_dup_pe=`awk -v nof=$IN 'NR % 4 == 1 || NR % 4 == 2 {if(n==1){if(foo[name]==""){c=0}; foo[name]=foo[name]$1; if(c==1){seq[foo[name]]++} else {c=1}; n=0} else if($1~/^@/){split($1,w,"/"); name=w[1]; foo[name]; n=1}} END {for(a in seq){print seq[a]; p[seq[a]]++} for(l in p){print l"\t"p[l] >> nof"_1_2_dupl_histogram"}}' $IN"_1.fastq" $IN"_2.fastq" | wc -l`
+                printf $num_dup_pe"\t"; printf %.5f\\n "$(( 100000 * num_dup_pe / number_row_reads ))e-5"
+
+
+        fi
+        if [ ! -e $rv ]; then
+                for IN in $polyA_out".polyA.passed"*;
+                do
+                  num_dup_se1=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_dupl_histogram"}}' $IN | wc -l`
+                  printf $IN"\t"$num_dup_se1"\t"; printf %.5f\\n "$(( 100000 * num_dup_se1 / number_row_reads ))e-5"
+                done
+        fi
+
+
+}
+
+mappi()
+{ 
+    if [ -e $rv ]; then
+	/homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -N 1 -n 7 -A sam --npaths=5 \
+          --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+          --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+          $polyA_out".polyA.passed_1.fastq" $polyA_out".polyA.passed_2.fastq" 2> $mappi_out".mappi.err"
+	
+	unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped.1"`
+
+    fi
+    if [ ! -e $rv ]; then
+	/ceph/agrensing/tools/gmap-gsnap/gmap-2021-03-08/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -N 1 -n 5 -A sam --npaths=5 \
+          --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+          --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+          $polyA_out".polyA.passed.fastq" 2> $mappi_out".mappi.err"
+
+	unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped"`
+    fi
+
+    
+
+    # mapping statistics
+        echo "## Pp genome version 3 ##"
+        echo "Unmapped reads: "$unmapped_reads
+        
+}
+
+organelli()
+{
+        if [ -e $rv ]; then
+                for IN in $mappi_out".genome.concordant_"*; 
+                do 
+                  print "Fragments:"
+                  printf $IN"\t"; 
+                  with=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Ei "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`; 
+                  wo=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Eiv "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`; 
+                  awk -v with=$with -v wo=$wo 'BEGIN {print with"\t"wo"\t"with/(wo+with)*100" %"}' ; 
+                done
+        fi
+        if [ ! -e $rv ]; then
+                for IN in $mappi_out".genome"*"uniq"*; 
+                do 
+                  print "SE reads:"
+                  printf $IN"\t"; 
+                  with=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Ei "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`; 
+                  wo=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Eiv "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`; 
+                  awk -v with=$with -v wo=$wo 'BEGIN {print with"\t"wo"\t"with/(wo+with)*100" %"}' ; 
+                done
+
+        fi
+
+
+}
+
+mappi_gDNA()
+{
+     if [ -e $rv ]; then
+          /homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -n 7 -A sam --npaths=5 \
+            --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+            --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+            $polyA_out".polyA.passed_1.fastq" $polyA_out".polyA.passed_2.fastq" 2> $mappi_out".mappi.err"
+  
+          unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped.1"`
+     fi
+
+     if [ ! -e $rv ]; then
+          /homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -n 5 -A sam --npaths=5 \
+           --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+           --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+           $polyA_out".polyA.passed.fastq" 2> $mappi_out".mappi.err"
+ 
+         unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped"`
+     fi
+
+  # mapping statistics
+   echo "## Pp genome version 3 ##"
+   echo "Unmapped reads: "$unmapped_reads
+}
+
+
+sample ()
+{
+     # if PE use ".genome"*[tcqg]
+     for IN in $mappi_out".genome"*[tcqg]
+     do
+	  printf $IN"\tsort.bam\n" >&2
+	   /vol/agrensing/tools/samtools/samtools-1.12/samtools view -@ $cpu -bhS $IN | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G - -o $IN".sort.bam"
+
+	  printf $IN"\trmdup.bam\n" >&2
+	   grep -vE "chloro|ribo|mito|ERCC" $IN | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G -n -O BAM - | \
+           /vol/agrensing/tools/samtools/samtools-1.12/samtools fixmate -@ $cpu -m - - | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G -O BAM - | \
+           /vol/agrensing/tools/samtools/samtools-1.12/samtools markdup -@ $cpu -s -r - \
+	   $IN".filter.fixmate.sort.markdup_r.bam"
+	  
+	  # samtools statistics
+	  v_tmp=`awk '$1!~"@" {print $1}' $IN | sort | uniq | wc -l`
+	  printf "Number of mapped reads: "$IN"\t"$v_tmp"\n"
+
+          # compress the sam text files
+          # gzip $IN
+	  # rm sam file
+          rm $IN
+	  
+	  
+	  # bam index
+          /vol/agrensing/tools/samtools/samtools-1.12/samtools index $IN*"_r.bam"
+          /vol/agrensing/tools/samtools/samtools-1.12/samtools index $IN".sort.bam"
+
+     done
+
+     ls -l $mappi_out*"bam" >&2
+	
+     # merge all unique bam files
+     echo "merging all .uniq bam files" >&2
+     echo "merging all .uniq bam files"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.bam" $mappi_out*"uniq"*".sort.bam"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.bam"
+
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.rmdup.bam" $mappi_out*"uniq"*"_r.bam"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.rmdup.bam"
+}
+
+counti()
+{
+     if [ -e $rv ]; then
+        # read count v3.3 and ERCC combined
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -B -C -T $cpu -a $gff_file \
+          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.pB.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v51_rmdup.exon.pB.log"
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -C -T $cpu -a $gff_file \
+          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.p.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v51_rmdup.exon.p.log"
+
+       /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -B -C -T $cpu -a $gff_file \
+         -t exon -g Parent -o $counti_out"_all_v3.3_31315_ercc.exon.pB.txt" $mappi_out".uniq.merged.bam" 2> $counti_out"_v51_all.exon.pB.log"
+       /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -C -T $cpu -a $gff_file \
+         -t exon -g Parent -o $counti_out"_all_v3.3_31315_ercc.exon.p.txt" $mappi_out".uniq.merged.bam" 2> $counti_out"_v51_all.exon.p.log"
+
+        # statistics
+        printf "\n### FeatureCount statisitcs v3.3 ###\n"
+        # 32458
+        echo "Fragment counts on gene models exon PE rmdup:"
+        grep "Successfully assigned fragments" $counti_out"_v51_rmdup.exon.pB.log"
+        echo "Fragment counts on gene models exon PE+SE rmdup:"
+        grep "Successfully assigned fragments" $counti_out"_v51_rmdup.exon.p.log"
+
+        # 31315 no rmdup
+        echo "Fragment counts on gene models exon PE (no rmdup):"
+        grep "Successfully assigned fragments" $counti_out"_v51_all.exon.pB.log"
+        echo "Fragment counts on gene models exon PE+SE (no rmdup):"
+        grep "Successfully assigned fragments" $counti_out"_v51_all.exon.p.log"
+                                     
+     fi
+     if [ ! -e $rv ]; then
+        echo "This part is not working yet"                                                                                                                                           
+#        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_ercc_gt.gff \
+#          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.SE.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_32458_ercc.exon.SE.log"
+#        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_rm.double_clean.N_ercc_gt.gff \
+#          -t exon -g Parent -o $counti_out"_v3.3_31315_ercc.exon.SE.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_31315_ercc.exon.SE.log"
+#                  
+#      # statistics
+#      printf "\n### FeatureCount statisitcs v3.3 ###"
+#      # 32458
+#      echo "Fragment counts on gene models exon SE 32458:"
+#      grep "Successfully assigned " $counti_out"_v3.3_32458_ercc.exon.SE.log" 
+#      # 31315
+#      echo "Fragment counts on gene models exon SE 31315:"
+#      grep "Successfully assigned " $counti_out"_v3.3_31315_ercc.exon.SE.log"
+                                                                                                                                                                        
+     fi
+}
+
+depthi()
+{
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools depth -aa $mappi_out".uniq.merged.rmdup.bam" > $mappi_out".uniq.merged.rmdup.depth"
+}
+
+
+snpi()
+{
+  gatkrun='/vol/agrensing/tools/gatk/gatk-4.2.0.0/gatk --java-options -Xmx50G'
+  threading_option='--native-pair-hmm-threads 8'
+  # V3
+   # start the SNP detection
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -ploidy 1 -O $snpi_out".HaplotypeCaller.vcf" 2> $snpi_out".HaplotypeCaller.err"
+
+   $gatkrun SelectVariants -R $reffna --variant $snpi_out".HaplotypeCaller.vcf" -select-type INDEL -O $snpi_out".SelectVariants.vcf" 2> $snpi_out".SelectVariants.err"
+
+   # HQ SNPs
+   python2.7 /vol/agrensing/fhaas/GeneAtlas/SNP/GetHighQualVcfs.py -i $snpi_out".HaplotypeCaller.vcf" -o $output_dir"/05_SNP_calling/"$species"/"$prefix"/" --ploidy 1 --percentile 90 --GQ 90 2> $snpi_out".GetHighQualVcfs.HQ.err"
+
+#   mv $snpi_out".GetHighQualVcfs.HQ.vcf" $snpi_out".GetHighQualVcfs.V3.HQ.vcf"
+
+   $gatkrun IndexFeatureFile -I  $snpi_out".GetHighQualVcfs.HQ.vcf" 2> $snpi_out".GetHighQualVcfs.HQ.idx.err"
+
+   $gatkrun BaseRecalibrator -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.table" 2> $snpi_out".BaseRecalibrator.err"
+
+   $gatkrun ApplyBQSR -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" --bqsr-recal-file $snpi_out".BaseRecalibrator.table" -O $snpi_out".BaseRecalibrator.ApplyBQSR.bam" \
+	2> $snpi_out".BaseRecalibrator.ApplyBQSR.err"
+
+   $gatkrun BaseRecalibrator -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.recal.table" \
+	2> $snpi_out".BaseRecalibrator.recal.err"
+
+   $gatkrun AnalyzeCovariates -before $snpi_out".BaseRecalibrator.table" -after $snpi_out".BaseRecalibrator.recal.table" -verbosity DEBUG -csv $snpi_out".AnalyzeCovariates.calibration-report.csv" \
+     -plots $snpi_out".AnalyzeCovariates.calibration-report.QC.pdf" 2> $snpi_out".AnalyzeCovariates.calibration-report.err"
+
+   $gatkrun PrintReads -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -O $snpi_out".PrintReads.bam" 2> $snpi_out".PrintReads.err"
+   
+   $gatkrun PrintReads -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -O $snpi_out".PrintReads.ApplyBQSR.bam" 2> $snpi_out".PrintReads.ApplyBQSR.err"
+ 
+   # HaplotypeCaller and VariantFiltration w/o ApplyBQSR BAM file
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $snpi_out".PrintReads.bam" -ploidy 1 -O $snpi_out".PrintReads.HaplotypeCaller.vcf" 2> $snpi_out".PrintReads.HaplotypeCaller.err"
+   
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $snpi_out".PrintReads.ApplyBQSR.bam" -ploidy 1 -O $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.vcf" \
+	2> $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.err"
+
+   #  filter flags ## NOTE: it is important to look for float and integer numbers (e.g. FS must be 60.0 not 60) ##
+   # --filter-expression "FS > 60" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0"
+   # --filter-name "LowMQ" --filter-expression "QUAL > 30.0 && QUAL < 60.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage"
+   #
+   $gatkrun VariantFiltration -R $reffna --variant $snpi_out".PrintReads.HaplotypeCaller.vcf" \
+     --filter-expression "FS > 60.0" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0" \
+     --filter-name "LowMQ" --filter-expression "QUAL > 30.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage" \
+     -O $snpi_out".VariantFiltration.vcf" 2> $snpi_out".VariantFiltration.err"
+   
+   $gatkrun VariantFiltration -R $reffna --variant $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.vcf" \
+     --filter-expression "FS > 60.0" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0" \
+     --filter-name "LowMQ" --filter-expression "QUAL > 30.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage" \
+     -O $snpi_out".ApplyBQSR.VariantFiltration.vcf" 2> $snpi_out".ApplyBQSR.VariantFiltration.err"
+}
+
+corri()
+{
+  for IN in $mappi_out*.gz
+  do
+    gunzip $IN
+    file=`echo $IN | awk '{print substr($1, 1, length($1)-3)}'`
+    /vol/agrensing/tools/samtools/samtools-1.12/samtools view -@ $cpu -bhS $file | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G - -o $file".sort.bam"
+    /vol/agrensing/tools/samtools/samtools-1.12/samtools index $file".sort.bam"
+    gzip $file
+
+  done
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.bam" $mappi_out*"uniq"*".sort.bam"
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.bam"
+
+
+
+  gatkrun='/ceph/agrensing/tools/gatk/gatk-4.2.0.0/gatk --java-options -Xmx50G'
+  threading_option='--native-pair-hmm-threads 8'
+  $gatkrun BaseRecalibrator -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.recal.table" \
+         2> $snpi_out".BaseRecalibrator.recal.err"
+
+  $gatkrun AnalyzeCovariates -before $snpi_out".BaseRecalibrator.table" -after $snpi_out".BaseRecalibrator.recal.table" -verbosity DEBUG -csv $snpi_out".AnalyzeCovariates.calibration-report.csv" \
+    -plots $snpi_out".AnalyzeCovariates.calibration-report.QC.pdf" 2> $snpi_out".AnalyzeCovariates.calibration-report.err"
+
+
+}
+
+
+# script input variables
+
+cpu=$1
+output_dir=$2
+prefix=$3
+species=$4
+infile_path=$5
+
+# set variables
+java_ram="-Xmx50G -Xms25G"
+id=$prefix
+
+fw=$infile_path"/fw."$prefix
+rv=$infile_path"/rv."$prefix
+
+db_path_V3="/vol/agrensing/fhaas/DB/Arabidopsis_TAIR10_chloro_mito/Arabidopsis_TAIR10_chloro_mito"
+db_name_V3="Arabidopsis_TAIR10_chloro_mito"
+
+reffna="/ceph/agrensing/fhaas/DB/Arabidopsis_TAIR10_chloro_mito/Arabidopsis_thaliana.TAIR10.dna.toplevel.fa"
+gff_file="/ceph/agrensing/fhaas/DB/Arabidopsis_TAIR10_chloro_mito/Arabidopsis_thaliana.TAIR10.51.rmiso.gff3"
+
+
+# outdir variables
+# output=$output_dir"/"$prefix
+trimmo_out=$output_dir"/02_trimmomatic/"$species"/"$prefix"/"$prefix
+polyA_out=$output_dir"/03_poly-A/"$species"/"$prefix"/"$prefix
+mappi_out=$output_dir"/04_genome_mapping/"$species"/"$prefix"/"$prefix
+snpi_out=$output_dir"/05_SNP_calling/"$species"/"$prefix"/"$prefix
+counti_out=$output_dir"/06_counts/"$species"/"$prefix"/"$prefix
+
+# create result folder
+mkdir -p $output_dir"/02_trimmomatic/"$species"/"$prefix
+mkdir -p $output_dir"/03_poly-A/"$species"/"$prefix
+mkdir -p $output_dir"/04_genome_mapping/"$species"/"$prefix
+mkdir -p $output_dir"/05_SNP_calling/"$species"/"$prefix
+mkdir -p $output_dir"/06_counts/"$species"/"$prefix
+
+mkdir -p $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc"
+mkdir -p $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc"
+
+
+
+
+
+# print running machine and start date
+hostname
+date
+
+# print all variables
+printf $cpu"\n"$output_dir"\n"$prefix"\n"$species"\n"$fw"\n"
+
+if [ -e $rv ]; then
+  printf $rv"\n"
+fi
+
+printf "\'$java_ram\'""\n"$id"\n"$db_path_V3"\n"$db_name_V3"\n"
+
+echo ""
+
+echo "Phase 0 assamble data"
+startle
+
+date
+echo "Phase 1 run trimming"
+trimmo
+
+date
+echo "Phase 2 run polyA"
+polyA
+duple
+date
+echo "Phase 3 run mapping"
+mappi
+#-# mappi_gDNA
+organelli
+sample
+counti
+depthi
+date
+echo "Phase 4 run SNP calling"
+snpi
+#-# corri
+date
+
diff --git a/workflows/Experiment2/snp_pipe_mp.sge b/workflows/Experiment2/snp_pipe_mp.sge
new file mode 100644
index 0000000000000000000000000000000000000000..9bb78136bab059a1f33de19051482cc6eeb0ec38
--- /dev/null
+++ b/workflows/Experiment2/snp_pipe_mp.sge
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+
+prefix=$1
+job_mem=$2
+out_path=$3
+nthreads=$4
+species=$5
+inpath=$6
+
+
+
+qsub -N "run_"$prefix -l h_vmem=$job_mem -pe multislot $nthreads -S /bin/bash -e $out_path -o $out_path /vol/agrensing/fhaas/RNA-seq_Schippers/second_samples/no_backup/SNP_pipe_Mp.sh $nthreads $out_path $prefix $species $inpath
diff --git a/workflows/Experiment2/snp_pipe_mp.sh b/workflows/Experiment2/snp_pipe_mp.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8fda4f695ca04c6e640419208806fca16c8835d3
--- /dev/null
+++ b/workflows/Experiment2/snp_pipe_mp.sh
@@ -0,0 +1,480 @@
+#!/bin/bash
+
+#. /etc/profile.d/modules.sh
+
+export PATH=$PATH:/vol/biotools/bin
+export MANPATH=$MANPATH:/vol/biotools/man
+export PATH=$PATH:/vol/software/bin
+
+
+# install important R package - lummerland cluster does not have it ...
+#echo "install.packages(\"gplots\", repos=\"https://cran.rstudio.com\")" | R --vanilla --no-save
+#R CMD INSTALL /vol/agrensing/tools/R_packages/bitops_1.0-6.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/caTools_1.17.1.1.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gtools_3.8.1.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gdata_2.18.0.tar.gz
+#R CMD INSTALL /vol/agrensing/tools/R_packages/gplots_3.0.1.tar.gz
+
+#functions
+
+startle ()
+{
+	number_row_reads=`wc -l $fw | awk '{print $1/4}'`
+	echo "## Start log ##"
+	echo "Number of row reads: "$number_row_reads
+}
+
+trimmo ()
+ {
+   # Remove leading low quality or N bases (below quality 3) (LEADING:3)
+   # Remove trailing low quality or N bases (below quality 3) (TRAILING:3)
+   # Scan the read with a 4-base wide sliding window, cutting when the average quality per base drops below 15 (SLIDINGWINDOW:4:15)
+   # HEADCROP: Cut the specified Number of bases from the start of the read
+   # MINLEN was first set to 50 bp. But there are libraries with read start length of 50 bp.
+
+   if [ -e $rv ]; then
+	java $java_ram -jar /nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/trimmomatic-0.39.jar PE -threads $cpu -phred33 -summary $trimmo_out".trimmo.stats" \
+	  $fw $rv $trimmo_out".PE.fw" $trimmo_out".SE.fw" $trimmo_out".PE.rv" $trimmo_out".SE.rv" \
+	  ILLUMINACLIP:/nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/adapters/TruSeq-PE_all.fna:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 HEADCROP:10 MINLEN:30
+ 
+	# stats
+	echo "## Trimmo statistics ##"
+	printf "trim\t"$trimmo_out".PE.fw\t"; grep -c "^\+$" $trimmo_out".PE.fw"
+	# fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc" $trimmo_out".PE."*
+    fi
+    if [ ! -e $rv ]; then
+	java $java_ram -jar /nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/trimmomatic-0.39.jar SE -threads $cpu -phred33 -summary $trimmo_out".trimmo.stats" \
+	  $fw $trimmo_out".SE" \
+	  ILLUMINACLIP:/nfs/agrensing/tools/trimmomatic/Trimmomatic-0.39/adapters/TruSeq-PE_all.fna:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 HEADCROP:10 MINLEN:30
+ 
+	# stats
+	echo "## Trimmo statistics ##"
+	# printf "trim\t"$trimmo_out".SE\t"; grep -c "^\+$" $trimmo_out".SE"
+        printf "trim\t"$trimmo_out".SE\t"; wc -l $trimmo_out".SE" | awk '{print $1/4}'
+	# fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc" $trimmo_out".SE"
+    fi
+ 
+ }
+ 
+polyA ()
+ {
+   # min_len was first set to 50 bp. But there are libraries with read start length of 50 bp.
+   # If you specify any statistic option, no other ouput will be generated. To preprocess data, do not specify a statistics option.
+   # -stats_dupl -stats_all
+ 
+   inpA=$polyA_out".polyA.passed"
+   infA=$polyA_out".polyA.failed"
+
+   if [ -e $rv ]; then
+	/nfs/agrensing/tools/prinseq/prinseq-lite-0.20.4/./prinseq-lite.pl -verbose -no_qual_header -fastq $trimmo_out".PE.fw" -fastq2 $trimmo_out".PE.rv" \
+	  -trim_tail_left 5 -trim_tail_right 5 -min_len 30 -out_good $inpA -out_bad $infA \
+	  -out_format 3 -log $polyA_out"_polyA.log"
+
+	# stats
+	echo "## PolyA statistics ##"
+	printf "polyA\t"$inpA"_1.fastq\t"; wc -l $inpA"_1.fastq" | awk '{print $1/4}'
+	#fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc" $inpA"_"[12]".fastq"
+   fi
+   if [ ! -e $rv ]; then
+	/nfs/agrensing/tools/prinseq/prinseq-lite-0.20.4/./prinseq-lite.pl -verbose -no_qual_header -fastq $trimmo_out".SE" \
+	  -trim_tail_left 5 -trim_tail_right 5 -min_len 30 -out_good $inpA -out_bad $infA \
+	  -out_format 3 -log $polyA_out"_polyA.log"
+
+	# stats
+	echo "## PolyA statistics ##"
+	printf "polyA\t"$inpA".fastq\t"; grep -c "^\+$" $inpA".fastq"
+	#fastqc
+	/nfs/agrensing/tools/FastQC/FastQC_v0.11.7/fastqc -f fastq -t $cpu -o $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc" $inpA".fastq"
+   fi  
+ }
+
+duple ()
+{
+	# number of 100% identical duplicat reads.
+	echo "Duplication:"
+
+        if [ -e $rv ]; then
+
+                #SE 1
+                IN=$polyA_out".polyA.passed"
+
+                num_dup_se1=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_1_dupl_histogram"}}' $IN"_1.fastq" | wc -l`
+                printf $num_dup_se1"\t"; printf %.5f\\t "$(( 100000 * num_dup_se1 / number_row_reads ))e-5"
+                #SE 2
+                num_dup_se2=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_2_dupl_histogram"}}' $IN"_2.fastq" | wc -l`
+                printf $num_dup_se2"\t"; printf %.5f\\t "$(( 100000 * num_dup_se2 / number_row_reads ))e-5"
+
+
+                #PE
+                num_dup_pe=`awk -v nof=$IN 'NR % 4 == 1 || NR % 4 == 2 {if(n==1){if(foo[name]==""){c=0}; foo[name]=foo[name]$1; if(c==1){seq[foo[name]]++} else {c=1}; n=0} else if($1~/^@/){split($1,w,"/"); name=w[1]; foo[name]; n=1}} END {for(a in seq){print seq[a]; p[seq[a]]++} for(l in p){print l"\t"p[l] >> nof"_1_2_dupl_histogram"}}' $IN"_1.fastq" $IN"_2.fastq" | wc -l`
+                printf $num_dup_pe"\t"; printf %.5f\\n "$(( 100000 * num_dup_pe / number_row_reads ))e-5"
+
+
+        fi
+        if [ ! -e $rv ]; then
+                for IN in $polyA_out".polyA.passed"*;
+                do
+                  num_dup_se1=`awk -v name=$IN 'NR % 4 == 2 {foo[$1]++} END {for(o in foo){print o"\t"foo[o]; p[foo[o]]++} for(l in p){print l"\t"p[l] >> name"_dupl_histogram"}}' $IN | wc -l`
+                  printf $IN"\t"$num_dup_se1"\t"; printf %.5f\\n "$(( 100000 * num_dup_se1 / number_row_reads ))e-5"
+                done
+        fi
+
+
+}
+
+mappi()
+{ 
+    if [ -e $rv ]; then
+	/homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -N 1 -n 7 -A sam --npaths=5 \
+          --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+          --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+          $polyA_out".polyA.passed_1.fastq" $polyA_out".polyA.passed_2.fastq" 2> $mappi_out".mappi.err"
+	
+	unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped.1"`
+
+    fi
+    if [ ! -e $rv ]; then
+	/ceph/agrensing/tools/gmap-gsnap/gmap-2021-03-08/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -N 1 -n 5 -A sam --npaths=5 \
+          --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+          --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+          $polyA_out".polyA.passed.fastq" 2> $mappi_out".mappi.err"
+
+	unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped"`
+    fi
+
+    
+
+    # mapping statistics
+        echo "## Pp genome version 3 ##"
+        echo "Unmapped reads: "$unmapped_reads
+        
+}
+
+organelli()
+{
+        if [ -e $rv ]; then
+                for IN in $mappi_out".genome.concordant_"*; 
+                do 
+                  print "Fragments:"
+                  printf $IN"\t"; 
+                  with=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Ei "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`; 
+                  wo=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Eiv "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`; 
+                  awk -v with=$with -v wo=$wo 'BEGIN {print with"\t"wo"\t"with/(wo+with)*100" %"}' ; 
+                done
+        fi
+        if [ ! -e $rv ]; then
+                for IN in $mappi_out".genome"*"uniq"*; 
+                do 
+                  print "SE reads:"
+                  printf $IN"\t"; 
+                  with=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Ei "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`; 
+                  wo=`/ceph/agrensing/tools/samtools/samtools-1.12/samtools view $IN | grep -Eiv "chloro|mito|ribo" | awk '{print $1}' | sort | uniq | wc -l`; 
+                  awk -v with=$with -v wo=$wo 'BEGIN {print with"\t"wo"\t"with/(wo+with)*100" %"}' ; 
+                done
+
+        fi
+
+
+}
+
+mappi_gDNA()
+{
+     if [ -e $rv ]; then
+          /homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -n 7 -A sam --npaths=5 \
+            --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+            --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+            $polyA_out".polyA.passed_1.fastq" $polyA_out".polyA.passed_2.fastq" 2> $mappi_out".mappi.err"
+  
+          unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped.1"`
+     fi
+
+     if [ ! -e $rv ]; then
+          /homes/fhaas/src/gsnap -D $db_path_V3 -d $db_name_V3 -t $cpu -n 5 -A sam --npaths=5 \
+           --read-group-platform=illumina --read-group-id=RGUNKNOWN --read-group-name=$id --read-group-library=LBUNKNOWN  \
+           --failed-input=$mappi_out".unmapped" --split-output=$mappi_out".genome" \
+           $polyA_out".polyA.passed.fastq" 2> $mappi_out".mappi.err"
+ 
+         unmapped_reads=`grep -c "^\+$" $mappi_out".unmapped"`
+     fi
+
+  # mapping statistics
+   echo "## Pp genome version 3 ##"
+   echo "Unmapped reads: "$unmapped_reads
+}
+
+
+sample ()
+{
+     # if PE use ".genome"*[tcqg]
+     for IN in $mappi_out".genome"*[tcqg]
+     do
+	  printf $IN"\tsort.bam\n" >&2
+	   /vol/agrensing/tools/samtools/samtools-1.12/samtools view -@ $cpu -bhS $IN | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G - -o $IN".sort.bam"
+
+	  printf $IN"\trmdup.bam\n" >&2
+	   grep -vE "chloro|ribo|mito|ERCC" $IN | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G -n -O BAM - | \
+           /vol/agrensing/tools/samtools/samtools-1.12/samtools fixmate -@ $cpu -m - - | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G -O BAM - | \
+           /vol/agrensing/tools/samtools/samtools-1.12/samtools markdup -@ $cpu -s -r - \
+	   $IN".filter.fixmate.sort.markdup_r.bam"
+	  
+	  # samtools statistics
+	  v_tmp=`awk '$1!~"@" {print $1}' $IN | sort | uniq | wc -l`
+	  printf "Number of mapped reads: "$IN"\t"$v_tmp"\n"
+
+          # compress the sam text files
+          # gzip $IN
+	  # rm sam file
+          rm $IN
+	  
+	  
+	  # bam index
+          /vol/agrensing/tools/samtools/samtools-1.12/samtools index $IN*"_r.bam"
+          /vol/agrensing/tools/samtools/samtools-1.12/samtools index $IN".sort.bam"
+
+     done
+
+     ls -l $mappi_out*"bam" >&2
+	
+     # merge all unique bam files
+     echo "merging all .uniq bam files" >&2
+     echo "merging all .uniq bam files"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.bam" $mappi_out*"uniq"*".sort.bam"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.bam"
+
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.rmdup.bam" $mappi_out*"uniq"*"_r.bam"
+     /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.rmdup.bam"
+}
+
+counti()
+{
+     if [ -e $rv ]; then
+        # read count v3.3 and ERCC combined
+#        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -B -C -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_ercc_gt.gff \
+#          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.pB.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_32458_ercc.exon.pB.log"
+#        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -C -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_ercc_gt.gff \
+#          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.p.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_32458_ercc.exon.p.log"
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -B -C -T $cpu -a /vol/agrensing/fhaas/DB/Marchantia_polymorpha/MpTak_v6.1r1_iso1.gff \
+          -t exon -g Parent -o $counti_out"_v6.1.exon.pB.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v6.1.exon.pB.log"
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -C -T $cpu -a /vol/agrensing/fhaas/DB/Marchantia_polymorpha/MpTak_v6.1r1_iso1.gff \
+          -t exon -g Parent -o $counti_out"_v6.1.exon.p.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v6.1.exon.p.log"
+
+
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -B -C -T $cpu -a /vol/agrensing/fhaas/DB/Marchantia_polymorpha/MpTak_v6.1r1_iso1.gff \
+          -t exon -g Parent -o $counti_out"_all_v6.1.exon.pB.txt" $mappi_out".uniq.merged.bam" 2> $counti_out"_all_v6.1.exon.pB.log"
+        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -p -C -T $cpu -a /vol/agrensing/fhaas/DB/Marchantia_polymorpha/MpTak_v6.1r1_iso1.gff \
+          -t exon -g Parent -o $counti_out"_all_v6.1.exon.p.txt" $mappi_out".uniq.merged.bam" 2> $counti_out"_all_v6.1.exon.p.log"
+
+
+
+        # statistics
+        printf "\n### FeatureCount statisitcs v6.1 ###\n"
+        # 32458
+#        echo "Fragment counts on gene models exon PE 32458:"
+#        grep "Successfully assigned fragments" $counti_out"_v3.3_32458_ercc.exon.pB.log"
+#        echo "Fragment counts on gene models exon PE+SE 32458:"
+#        grep "Successfully assigned fragments" $counti_out"_v3.3_32458_ercc.exon.p.log"
+        # results
+        echo "Fragment counts on gene models exon PE:"
+        grep "Successfully assigned fragments" $counti_out"_v6.1.exon.pB.log" 
+        echo "Fragment counts on gene models exon PE+SE:"
+        grep "Successfully assigned fragments" $counti_out"_v6.1.exon.p.log"
+        # results no rmdup       
+        echo "Fragment counts on gene models exon PE (no rmdup):"
+        grep "Successfully assigned fragments" $counti_out"_all_v6.1.exon.pB.log"
+        echo "Fragment counts on gene models exon PE+SE (no rmdup):"
+        grep "Successfully assigned fragments" $counti_out"_all_v6.1.exon.p.log"
+                                     
+     fi
+     if [ ! -e $rv ]; then
+        echo "This part is not working yet"                                                                                                                                            
+#        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_ercc_gt.gff \
+#          -t exon -g Parent -o $counti_out"_v3.3_32458_ercc.exon.SE.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_32458_ercc.exon.SE.log"
+#        /vol/agrensing/tools/Subread/subread-1.6.2-Linux-x86_64/bin/featureCounts -T $cpu -a /vol/agrensing/fhaas/GeneAtlas/Pp_v3.3_rm.double_clean.N_ercc_gt.gff \
+#          -t exon -g Parent -o $counti_out"_v3.3_31315_ercc.exon.SE.txt" $mappi_out".uniq.merged.rmdup.bam" 2> $counti_out"_v3.3_31315_ercc.exon.SE.log"
+#                   
+#        # statistics
+#        printf "\n### FeatureCount statisitcs v3.3 ###"
+#        # 32458
+#        echo "Fragment counts on gene models exon SE 32458:"
+#        grep "Successfully assigned " $counti_out"_v3.3_32458_ercc.exon.SE.log" 
+#        # 31315
+#        echo "Fragment counts on gene models exon SE 31315:"
+#        grep "Successfully assigned " $counti_out"_v3.3_31315_ercc.exon.SE.log"
+                                                                                                                                                                        
+     fi
+}
+
+depthi()
+{
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools depth -aa $mappi_out".uniq.merged.rmdup.bam" > $mappi_out".uniq.merged.rmdup.depth"
+}
+
+
+snpi()
+{
+  gatkrun='/vol/agrensing/tools/gatk/gatk-4.2.0.0/gatk --java-options -Xmx50G'
+  threading_option='--native-pair-hmm-threads 8'
+  # V3
+   # start the SNP detection
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -ploidy 1 -O $snpi_out".HaplotypeCaller.vcf" 2> $snpi_out".HaplotypeCaller.err"
+
+   $gatkrun SelectVariants -R $reffna --variant $snpi_out".HaplotypeCaller.vcf" -select-type INDEL -O $snpi_out".SelectVariants.vcf" 2> $snpi_out".SelectVariants.err"
+
+   # HQ SNPs
+   python2.7 /vol/agrensing/fhaas/GeneAtlas/SNP/GetHighQualVcfs.py -i $snpi_out".HaplotypeCaller.vcf" -o $output_dir"/05_SNP_calling/"$species"/"$prefix"/" --ploidy 1 --percentile 90 --GQ 90 2> $snpi_out".GetHighQualVcfs.HQ.err"
+
+#   mv $snpi_out".GetHighQualVcfs.HQ.vcf" $snpi_out".GetHighQualVcfs.V3.HQ.vcf"
+
+   $gatkrun IndexFeatureFile -I  $snpi_out".GetHighQualVcfs.HQ.vcf" 2> $snpi_out".GetHighQualVcfs.HQ.idx.err"
+
+   $gatkrun BaseRecalibrator -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.table" 2> $snpi_out".BaseRecalibrator.err"
+
+   $gatkrun ApplyBQSR -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" --bqsr-recal-file $snpi_out".BaseRecalibrator.table" -O $snpi_out".BaseRecalibrator.ApplyBQSR.bam" \
+	2> $snpi_out".BaseRecalibrator.ApplyBQSR.err"
+
+   $gatkrun BaseRecalibrator -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.recal.table" \
+	2> $snpi_out".BaseRecalibrator.recal.err"
+
+   $gatkrun AnalyzeCovariates -before $snpi_out".BaseRecalibrator.table" -after $snpi_out".BaseRecalibrator.recal.table" -verbosity DEBUG -csv $snpi_out".AnalyzeCovariates.calibration-report.csv" \
+     -plots $snpi_out".AnalyzeCovariates.calibration-report.QC.pdf" 2> $snpi_out".AnalyzeCovariates.calibration-report.err"
+
+   $gatkrun PrintReads -R $reffna -I $mappi_out".uniq.merged.rmdup.bam" -O $snpi_out".PrintReads.bam" 2> $snpi_out".PrintReads.err"
+   
+   $gatkrun PrintReads -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -O $snpi_out".PrintReads.ApplyBQSR.bam" 2> $snpi_out".PrintReads.ApplyBQSR.err"
+ 
+   # HaplotypeCaller and VariantFiltration w/o ApplyBQSR BAM file
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $snpi_out".PrintReads.bam" -ploidy 1 -O $snpi_out".PrintReads.HaplotypeCaller.vcf" 2> $snpi_out".PrintReads.HaplotypeCaller.err"
+   
+   $gatkrun HaplotypeCaller $threading_option -R $reffna -I $snpi_out".PrintReads.ApplyBQSR.bam" -ploidy 1 -O $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.vcf" \
+	2> $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.err"
+
+   #  filter flags ## NOTE: it is important to look for float and integer numbers (e.g. FS must be 60.0 not 60) ##
+   # --filter-expression "FS > 60" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0"
+   # --filter-name "LowMQ" --filter-expression "QUAL > 30.0 && QUAL < 60.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage"
+   #
+   $gatkrun VariantFiltration -R $reffna --variant $snpi_out".PrintReads.HaplotypeCaller.vcf" \
+     --filter-expression "FS > 60.0" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0" \
+     --filter-name "LowMQ" --filter-expression "QUAL > 30.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage" \
+     -O $snpi_out".VariantFiltration.vcf" 2> $snpi_out".VariantFiltration.err"
+   
+   $gatkrun VariantFiltration -R $reffna --variant $snpi_out".PrintReads.ApplyBQSR.HaplotypeCaller.vcf" \
+     --filter-expression "FS > 60.0" --filter-name "StrandBias" --filter-expression "QD < 2.0" --filter-name "LowQD" --filter-expression "MQ < 40.0" \
+     --filter-name "LowMQ" --filter-expression "QUAL > 30.0" --filter-name "LowQual" --filter-expression "DP < 5" --filter-name "LowCoverage" \
+     -O $snpi_out".ApplyBQSR.VariantFiltration.vcf" 2> $snpi_out".ApplyBQSR.VariantFiltration.err"
+}
+
+corri()
+{
+  for IN in $mappi_out*.gz
+  do
+    gunzip $IN
+    file=`echo $IN | awk '{print substr($1, 1, length($1)-3)}'`
+    /vol/agrensing/tools/samtools/samtools-1.12/samtools view -@ $cpu -bhS $file | /vol/agrensing/tools/samtools/samtools-1.12/samtools sort -@ $cpu -m 2G - -o $file".sort.bam"
+    /vol/agrensing/tools/samtools/samtools-1.12/samtools index $file".sort.bam"
+    gzip $file
+
+  done
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools merge -@ $cpu $mappi_out".uniq.merged.bam" $mappi_out*"uniq"*".sort.bam"
+  /vol/agrensing/tools/samtools/samtools-1.12/samtools index $mappi_out".uniq.merged.bam"
+
+
+
+  gatkrun='/ceph/agrensing/tools/gatk/gatk-4.2.0.0/gatk --java-options -Xmx50G'
+  threading_option='--native-pair-hmm-threads 8'
+  $gatkrun BaseRecalibrator -R $reffna -I $snpi_out".BaseRecalibrator.ApplyBQSR.bam" -known-sites $snpi_out".GetHighQualVcfs.HQ.vcf" -O $snpi_out".BaseRecalibrator.recal.table" \
+         2> $snpi_out".BaseRecalibrator.recal.err"
+
+  $gatkrun AnalyzeCovariates -before $snpi_out".BaseRecalibrator.table" -after $snpi_out".BaseRecalibrator.recal.table" -verbosity DEBUG -csv $snpi_out".AnalyzeCovariates.calibration-report.csv" \
+    -plots $snpi_out".AnalyzeCovariates.calibration-report.QC.pdf" 2> $snpi_out".AnalyzeCovariates.calibration-report.err"
+
+
+}
+
+
+# script input variables
+
+cpu=$1
+output_dir=$2
+prefix=$3
+species=$4
+infile_path=$5
+
+# set variables
+java_ram="-Xmx50G -Xms25G"
+id=$prefix
+
+fw=$infile_path"/fw."$prefix
+rv=$infile_path"/rv."$prefix
+
+db_path_V3="/ceph/agrensing/fhaas/DB/Marchantia_polymorpha/Marchantia_v6.1_maingenome_mito_ribo_chloro/Marchantia_v6.1_maingenome_mito_ribo_chloro"
+db_name_V3="Marchantia_v6.1_maingenome_mito_ribo_chloro"
+
+reffna="/ceph/agrensing/fhaas/DB/Marchantia_polymorpha/Marchantia_v6.1.fna"
+
+
+
+# outdir variables
+# output=$output_dir"/"$prefix
+trimmo_out=$output_dir"/02_trimmomatic/"$species"/"$prefix"/"$prefix
+polyA_out=$output_dir"/03_poly-A/"$species"/"$prefix"/"$prefix
+mappi_out=$output_dir"/04_genome_mapping/"$species"/"$prefix"/"$prefix
+snpi_out=$output_dir"/05_SNP_calling/"$species"/"$prefix"/"$prefix
+counti_out=$output_dir"/06_counts/"$species"/"$prefix"/"$prefix
+
+# create result folder
+mkdir -p $output_dir"/02_trimmomatic/"$species"/"$prefix
+mkdir -p $output_dir"/03_poly-A/"$species"/"$prefix
+mkdir -p $output_dir"/04_genome_mapping/"$species"/"$prefix
+mkdir -p $output_dir"/05_SNP_calling/"$species"/"$prefix
+mkdir -p $output_dir"/06_counts/"$species"/"$prefix
+
+mkdir -p $output_dir"/02_trimmomatic/"$species"/"$prefix"/fastqc"
+mkdir -p $output_dir"/03_poly-A/"$species"/"$prefix"/fastqc"
+
+
+
+
+
+# print running machine and start date
+hostname
+date
+
+# print all variables
+printf $cpu"\n"$output_dir"\n"$prefix"\n"$species"\n"$fw"\n"
+
+if [ -e $rv ]; then
+  printf $rv"\n"
+fi
+
+printf "\'$java_ram\'""\n"$id"\n"$db_path_V3"\n"$db_name_V3"\n"
+
+echo ""
+
+echo "Phase 0 assamble data"
+startle
+
+date
+echo "Phase 1 run trimming"
+trimmo
+
+date
+echo "Phase 2 run polyA"
+polyA
+duple
+date
+echo "Phase 3 run mapping"
+mappi
+#-# mappi_gDNA
+organelli
+sample
+counti
+depthi
+date
+echo "Phase 4 run SNP calling"
+snpi
+#-# corri
+date
+