-
Notifications
You must be signed in to change notification settings - Fork 271
/
ceci_nic5.config
83 lines (79 loc) · 2.24 KB
/
ceci_nic5.config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
/*
* CÉCI NIC5 CLUSTER
* =================
* This configuration file provides sensible defaults to run Nextflow pipelines
* on the CÉCI NIC5 cluster.
*
* For more information on the CÉCI NIC5 cluster, refer to this page of the
* wiki:
* https://www.ceci-hpc.be/clusters.html#nic5
*/
params {
config_profile_name = 'CÉCI'
config_profile_description = 'CÉCI NIC5 cluster profiles provided by the GIGA Bioinformatics Team.'
config_profile_contact = 'Martin Grignard (@MartinGrignard)'
config_profile_url = 'https://www.ceci-hpc.be/clusters.html#nic5'
}
/*
* Resources limitations
* ---------------------
* These resources limitations are maximum values across all nodes of all
* queues. At least one node matches these maximum resources in all of the
* available queues.
*
* For more information on the available nodes, use the `sinfo` command.
*/
params {
max_cpus = 64
max_memory = 1.TB
max_time = 2.days
}
/*
* Singularity configuration
* -------------------------
* Singularity is used to run containerised tools.
*/
singularity {
autoMounts = true
cacheDir = "${HOME}/.cache/singularity"
enabled = true
pullTimeout = 3.hours
}
/*
* Slurm configuration
* -------------------
* Slurm is used as a workload manager. This configuration makes sure to share
* the available resources in a fairly.
*
* For more information on how to use Slurm on the CÉCI clusters, refer to this
* page of the wiki:
* https://support.ceci-hpc.be/doc/_contents/QuickStart/SubmittingJobs/SlurmTutorial.html
*/
executor {
name = 'slurm'
queueSize = 200
pollInterval = 10.s
}
/*
* Process configuration
* ---------------------
* Several queues are available on the cluster, based on the required memory.
* This configuration makes sure to request resources on the most
* relevant queue.
*
* For more information on the available queues, refer to this page of the
* wiki:
* https://www.ceci-hpc.be/clusters.html#nic5
*/
process {
queue = {
task.memory <= 256.GB ? 'batch' : 'hmem'
}
resourceLimits = [
cpus : 64,
memory: 1.TB,
time : 2.days,
]
stageInMode = 'symlink'
stageOutMode = 'rsync'
}