nf-core/configs: DaiSyBio

To use the DaiSyBio profile, run a nf-core pipeline with -profile daisybio,<singularity/apptainer>.

This will automatically download and apply ‘daisybio.config’ as a nextflow config file.

The config file will set slurm as a scheduler for the compute cluster, define max resources, and specify cache locations for singularity, apptainer, and iGenomes. Pipeline-specific parameters still need to be configured manually.

Work directories will be kept at /nfs/scratch/nf-core_work/ in a directory named after the full path of the launch directory (”.” separated). Thy are automatically removed after a successful pipeline run. To keep the intermediate file, e.g. for using the -resume function, add keep_work as a profile: -profile daisybio,<singularity/apptainer>,keep_work.

If you need GPU access, add gpu to the profile list to submit to the gpu queue: -profile daisybio,<singularity/apptainer>,gpu.

Config file

See config file on GitHub

params {
    config_profile_description = 'DaiSyBio cluster profile provided by nf-core/configs.'
    config_profile_contact = 'Johannes Kersting (@JohannesKersting)'
    config_profile_url = 'https://www.mls.ls.tum.de/daisybio/startseite/'
    max_memory = 1.TB
    max_cpus = 120
    max_time = 96.h
    igenomes_base = '/nfs/data/references/igenomes'
}

// define workDir in /nfs/scratch/nf-core_work/ named after the launch dir
workDir = {
    def work_dir = "/nfs/scratch/nf-core_work/"
    if(new File(work_dir).exists() && System.getenv("PWD")) {
        work_dir = work_dir+System.getenv("PWD").tokenize('/').join('.')

        // if directory does not exist, create it and set the group to the group launch dir
        if(!new File(work_dir).exists()) {
            "mkdir -p ${work_dir}".execute()
            def pwd = System.getenv("PWD")
            def group = "stat -c %g ${pwd}".execute().text.trim()
            "chgrp -R ${group} ${work_dir}".execute()
            "chmod -R g+s ${work_dir}".execute()
        }
        return work_dir
    } else {
        return "work"
    }
}.call()

process {
    resourceLimits = [
        memory: 1.TB,
        cpus: 120,
        time: 96.h
    ]
    executor = 'slurm'
    queue = 'shared-cpu'
    maxRetries = 2
}

executor {
    queueSize = 50
    submitRateLimit = '10 sec'
}

cleanup = true
profiles {
    // profile to keep work directory
    keep_work {
        cleanup = false
    }

    //profile for singularity
    singularity {
        singularity.enabled     = true
        singularity.autoMounts  = true
        conda.enabled           = false
        docker.enabled          = false
        podman.enabled          = false
        shifter.enabled         = false
        charliecloud.enabled    = false
        apptainer.enabled       = false
        process.beforeScript = 'module load singularity'
        singularity.cacheDir = '/nfs/scratch/singularity_cache'
    }

    // profile for apptainer
    apptainer {
        apptainer.enabled       = true
        apptainer.autoMounts    = true
        conda.enabled           = false
        docker.enabled          = false
        singularity.enabled     = false
        podman.enabled          = false
        shifter.enabled         = false
        charliecloud.enabled    = false
        process.beforeScript = 'module load apptainer'
        apptainer.cacheDir = '/nfs/scratch/apptainer_cache'
    }

    // profile for gpu queue
    gpu {
        docker.runOptions       = '-u $(id -u):$(id -g) --gpus all'
        apptainer.runOptions    = '--nv'
        singularity.runOptions  = '--nv'
        process.queue = 'shared-gpu'
        process.clusterOptions = '--qos=limitgpus --gpus=a40:1 --exclude compms-gpu-1.exbio.wzw.tum.de'
        executor.queueSize = 5
    }
}