-
Notifications
You must be signed in to change notification settings - Fork 1
/
bf2raw.nf
62 lines (48 loc) · 1.14 KB
/
bf2raw.nf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
process convert {
conda 'bf2raw_env.yml'
publishDir params.pubDir
maxForks params.maxConvJobs
disk params.maxConvJobDisk
input:
tuple val(dataset), path(imgfile)
output:
tuple (path(dataset), path("${dataset}/${imgfile.baseName}.zarr"))
script:
def outfile = "${dataset}/${imgfile.baseName}.zarr"
"""
mkdir \"${dataset}\"
bioformats2raw --memo-directory /tmp ${imgfile} \"${outfile}\"
"""
}
process upload {
conda 'bf2raw_env.yml'
input:
tuple path(dataset), path(img)
output:
tuple(path(dataset), path(img))
script:
"""
aws --profile ${params.awsProfile} s3 sync ${img} s3://${params.bucket}/${dataset}/${img}
"""
}
process remove {
input:
tuple path(dataset), path(img)
script:
def pubdir = "${params.pubDir}"
"""
dspath=`readlink -z ${pubdir}/${dataset}`
rm -rf \"\$dspath\"/${img}
"""
}
workflow {
image_paths = Channel
.fromPath(params.input)
.splitCsv(header:false, sep:"\t")
.map { tuple( it[0].replaceAll('Dataset:name:',''), file(it[params.column]) ) }
convert(image_paths)
upload(convert.out)
if ( params.removeZarrs ) {
remove(upload.out)
}
}