diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml index 3ab7364a..b26c8df0 100644 --- a/.github/workflows/awsfulltest.yml +++ b/.github/workflows/awsfulltest.yml @@ -37,7 +37,7 @@ jobs: } profiles: test_full - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 with: name: Seqera Platform debug log file path: | diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml index d747d13c..b1f77e32 100644 --- a/.github/workflows/awstest.yml +++ b/.github/workflows/awstest.yml @@ -25,7 +25,7 @@ jobs: } profiles: test - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 with: name: Seqera Platform debug log file path: | diff --git a/.github/workflows/download_pipeline.yml b/.github/workflows/download_pipeline.yml index 6d94bcbf..45884ff9 100644 --- a/.github/workflows/download_pipeline.yml +++ b/.github/workflows/download_pipeline.yml @@ -127,7 +127,7 @@ jobs: fi - name: Upload Nextflow logfile for debugging purposes - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 with: name: nextflow_logfile.txt path: .nextflow.log* diff --git a/.github/workflows/fix_linting.yml b/.github/workflows/fix_linting.yml index b67a297c..6a1fef59 100644 --- a/.github/workflows/fix_linting.yml +++ b/.github/workflows/fix_linting.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: # Use the @nf-core-bot token to check out so we can push later - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 with: token: ${{ secrets.nf_core_bot_auth_token }} diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 30e66026..7a527a34 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -11,7 +11,7 @@ jobs: pre-commit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 - name: Set up Python 3.14 uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6 @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out pipeline code - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 - name: Install Nextflow uses: nf-core/setup-nextflow@v2 @@ -71,7 +71,7 @@ jobs: - name: Upload linting log file artifact if: ${{ always() }} - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 with: name: linting-logs path: | diff --git a/.github/workflows/nf-test.yml b/.github/workflows/nf-test.yml index cb4db1d2..121dd70a 100644 --- a/.github/workflows/nf-test.yml +++ b/.github/workflows/nf-test.yml @@ -40,7 +40,7 @@ jobs: rm -rf ./* || true rm -rf ./.??* || true ls -la ./ - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 with: fetch-depth: 0 @@ -86,7 +86,7 @@ jobs: TOTAL_SHARDS: ${{ needs.nf-test-changes.outputs.total_shards }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 with: fetch-depth: 0 diff --git a/.github/workflows/template-version-comment.yml b/.github/workflows/template-version-comment.yml index c5988af9..e8560fc7 100644 --- a/.github/workflows/template-version-comment.yml +++ b/.github/workflows/template-version-comment.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out pipeline code - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5 with: ref: ${{ github.event.pull_request.head.sha }} diff --git a/.nf-core.yml b/.nf-core.yml index a38a857f..1a7fed6b 100644 --- a/.nf-core.yml +++ b/.nf-core.yml @@ -2,7 +2,7 @@ lint: files_unchanged: - .github/CONTRIBUTING.md - .vscode/settings.json -nf_core_version: 3.4.1 +nf_core_version: 3.5.1 repository_type: pipeline template: author: Jonas Scheid, Steffen Lemke, Leon Bichmann, Marissa Dubbelaar @@ -12,7 +12,7 @@ template: name: mhcquant org: nf-core outdir: . - version: 3.1.0 skip_features: - fastqc - igenomes + version: 3.2.0dev diff --git a/.prettierignore b/.prettierignore index ceb1e1ca..b287a6a9 100644 --- a/.prettierignore +++ b/.prettierignore @@ -13,3 +13,5 @@ bin/ .nf-test/ ro-crate-metadata.json .nf-test/ +modules/nf-core/ +subworkflows/nf-core/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 363fe483..fe08b61a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,19 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 3.2.0 - [releasename] - [date] + +### `Changed` + +- Migrate to topic channels [#431](https://github.com/nf-core/mhcquant/pull/431) + +### `Dependencies` + +| Dependency | Old version | New version | +| ---------- | ----------- | ----------- | +| `MultiQC` | 1.31.0 | 1.33.0 | +| `Nf-core` | 3.4.1 | 3.5.1 | + ## 3.1.0 - BlüBa - 07/01/26 ### `Added` diff --git a/README.md b/README.md index dab675dc..5f5030cf 100644 --- a/README.md +++ b/README.md @@ -5,13 +5,13 @@ -[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/codespaces/new/nf-core/mhcquant) +[![Open in GitHub Codespaces](https://img.shields.io/badge/Open_In_GitHub_Codespaces-black?labelColor=grey&logo=github)](https://github.com/codespaces/new/nf-core/mhcquant) [![GitHub Actions CI Status](https://github.com/nf-core/mhcquant/actions/workflows/nf-test.yml/badge.svg)](https://github.com/nf-core/mhcquant/actions/workflows/nf-test.yml) [![GitHub Actions Linting Status](https://github.com/nf-core/mhcquant/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/mhcquant/actions/workflows/linting.yml)[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/mhcquant/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.8427707-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.8427707) [![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com) [![Nextflow](https://img.shields.io/badge/version-%E2%89%A525.04.0-green?style=flat&logo=nextflow&logoColor=white&color=%230DC09D&link=https%3A%2F%2Fnextflow.io)](https://www.nextflow.io/) -[![nf-core template version](https://img.shields.io/badge/nf--core_template-3.4.1-green?style=flat&logo=nfcore&logoColor=white&color=%2324B064&link=https%3A%2F%2Fnf-co.re)](https://github.com/nf-core/tools/releases/tag/3.4.1) +[![nf-core template version](https://img.shields.io/badge/nf--core_template-3.5.1-green?style=flat&logo=nfcore&logoColor=white&color=%2324B064&link=https%3A%2F%2Fnf-co.re)](https://github.com/nf-core/tools/releases/tag/3.5.1) [![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/) [![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/) [![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/) diff --git a/assets/multiqc_config.yml b/assets/multiqc_config.yml index 6db209cc..2c55035f 100644 --- a/assets/multiqc_config.yml +++ b/assets/multiqc_config.yml @@ -3,9 +3,7 @@ custom_logo_url: https://github.com/nf-core/mhcquant custom_logo_title: "nf-core/mhcquant" report_comment: > - This report has been generated by the nf-core/mhcquant - analysis pipeline. For information about how to interpret these results, please see the - documentation. + This report has been generated by the nf-core/mhcquant analysis pipeline. For information about how to interpret these results, please see the documentation. report_section_order: "nf-core-mhcquant-methods-description": order: -1000 diff --git a/conf/igenomes.config b/conf/igenomes.config deleted file mode 100644 index 3f114377..00000000 --- a/conf/igenomes.config +++ /dev/null @@ -1,440 +0,0 @@ -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Nextflow config file for iGenomes paths -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Defines reference genomes using iGenome paths. - Can be used by any config that customises the base path using: - $params.igenomes_base / --igenomes_base ----------------------------------------------------------------------------------------- -*/ - -params { - // illumina iGenomes reference file paths - genomes { - 'GRCh37' { - fasta = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/README.txt" - mito_name = "MT" - macs_gsize = "2.7e9" - blacklist = "${projectDir}/assets/blacklists/GRCh37-blacklist.bed" - } - 'GRCh38' { - fasta = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Annotation/Genes/genes.bed" - mito_name = "chrM" - macs_gsize = "2.7e9" - blacklist = "${projectDir}/assets/blacklists/hg38-blacklist.bed" - } - 'CHM13' { - fasta = "${params.igenomes_base}/Homo_sapiens/UCSC/CHM13/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Homo_sapiens/UCSC/CHM13/Sequence/BWAIndex/" - bwamem2 = "${params.igenomes_base}/Homo_sapiens/UCSC/CHM13/Sequence/BWAmem2Index/" - gtf = "${params.igenomes_base}/Homo_sapiens/NCBI/CHM13/Annotation/Genes/genes.gtf" - gff = "ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/009/914/755/GCF_009914755.1_T2T-CHM13v2.0/GCF_009914755.1_T2T-CHM13v2.0_genomic.gff.gz" - mito_name = "chrM" - } - 'GRCm38' { - fasta = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/README.txt" - mito_name = "MT" - macs_gsize = "1.87e9" - blacklist = "${projectDir}/assets/blacklists/GRCm38-blacklist.bed" - } - 'TAIR10' { - fasta = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/README.txt" - mito_name = "Mt" - } - 'EB2' { - fasta = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/README.txt" - } - 'UMD3.1' { - fasta = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/README.txt" - mito_name = "MT" - } - 'WBcel235' { - fasta = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Annotation/Genes/genes.bed" - mito_name = "MtDNA" - macs_gsize = "9e7" - } - 'CanFam3.1' { - fasta = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/README.txt" - mito_name = "MT" - } - 'GRCz10' { - fasta = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Annotation/Genes/genes.bed" - mito_name = "MT" - } - 'BDGP6' { - fasta = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Annotation/Genes/genes.bed" - mito_name = "M" - macs_gsize = "1.2e8" - } - 'EquCab2' { - fasta = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/README.txt" - mito_name = "MT" - } - 'EB1' { - fasta = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/README.txt" - } - 'Galgal4' { - fasta = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Annotation/Genes/genes.bed" - mito_name = "MT" - } - 'Gm01' { - fasta = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/README.txt" - } - 'Mmul_1' { - fasta = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/README.txt" - mito_name = "MT" - } - 'IRGSP-1.0' { - fasta = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Annotation/Genes/genes.bed" - mito_name = "Mt" - } - 'CHIMP2.1.4' { - fasta = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/README.txt" - mito_name = "MT" - } - 'Rnor_5.0' { - fasta = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Annotation/Genes/genes.bed" - mito_name = "MT" - } - 'Rnor_6.0' { - fasta = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Annotation/Genes/genes.bed" - mito_name = "MT" - } - 'R64-1-1' { - fasta = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Annotation/Genes/genes.bed" - mito_name = "MT" - macs_gsize = "1.2e7" - } - 'EF2' { - fasta = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/README.txt" - mito_name = "MT" - macs_gsize = "1.21e7" - } - 'Sbi1' { - fasta = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/README.txt" - } - 'Sscrofa10.2' { - fasta = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/README.txt" - mito_name = "MT" - } - 'AGPv3' { - fasta = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Annotation/Genes/genes.bed" - mito_name = "Mt" - } - 'hg38' { - fasta = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.bed" - mito_name = "chrM" - macs_gsize = "2.7e9" - blacklist = "${projectDir}/assets/blacklists/hg38-blacklist.bed" - } - 'hg19' { - fasta = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/README.txt" - mito_name = "chrM" - macs_gsize = "2.7e9" - blacklist = "${projectDir}/assets/blacklists/hg19-blacklist.bed" - } - 'mm10' { - fasta = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/README.txt" - mito_name = "chrM" - macs_gsize = "1.87e9" - blacklist = "${projectDir}/assets/blacklists/mm10-blacklist.bed" - } - 'bosTau8' { - fasta = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Annotation/Genes/genes.bed" - mito_name = "chrM" - } - 'ce10' { - fasta = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/README.txt" - mito_name = "chrM" - macs_gsize = "9e7" - } - 'canFam3' { - fasta = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/README.txt" - mito_name = "chrM" - } - 'danRer10' { - fasta = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Annotation/Genes/genes.bed" - mito_name = "chrM" - macs_gsize = "1.37e9" - } - 'dm6' { - fasta = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Annotation/Genes/genes.bed" - mito_name = "chrM" - macs_gsize = "1.2e8" - } - 'equCab2' { - fasta = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/README.txt" - mito_name = "chrM" - } - 'galGal4' { - fasta = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/README.txt" - mito_name = "chrM" - } - 'panTro4' { - fasta = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/README.txt" - mito_name = "chrM" - } - 'rn6' { - fasta = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Annotation/Genes/genes.bed" - mito_name = "chrM" - } - 'sacCer3' { - fasta = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/BismarkIndex/" - readme = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Annotation/README.txt" - mito_name = "chrM" - macs_gsize = "1.2e7" - } - 'susScr3' { - fasta = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/WholeGenomeFasta/genome.fa" - bwa = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/BWAIndex/version0.6.0/" - bowtie2 = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/Bowtie2Index/" - star = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/STARIndex/" - bismark = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/BismarkIndex/" - gtf = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/Genes/genes.gtf" - bed12 = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/Genes/genes.bed" - readme = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/README.txt" - mito_name = "chrM" - } - } -} diff --git a/conf/igenomes_ignored.config b/conf/igenomes_ignored.config deleted file mode 100644 index b4034d82..00000000 --- a/conf/igenomes_ignored.config +++ /dev/null @@ -1,9 +0,0 @@ -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Nextflow config file for iGenomes paths -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Empty genomes dictionary to use when igenomes is ignored. ----------------------------------------------------------------------------------------- -*/ - -params.genomes = [:] diff --git a/conf/modules.config b/conf/modules.config index c5b93fae..ada4c8ad 100644 --- a/conf/modules.config +++ b/conf/modules.config @@ -367,6 +367,8 @@ process { mode: params.publish_dir_mode, pattern: '*.html'] ] + errorStrategy = 'retry' + maxRetries = 3 } withName: 'OPENMS_PERCOLATORADAPTER' { diff --git a/conf/test_full.config b/conf/test_full.config index 4fc59bc5..e9215ae2 100644 --- a/conf/test_full.config +++ b/conf/test_full.config @@ -25,5 +25,4 @@ params { quantify = true generate_speclib = true annotate_ions = true - epicore = true } diff --git a/main.nf b/main.nf index 5067acde..8c309076 100644 --- a/main.nf +++ b/main.nf @@ -18,7 +18,6 @@ include { MHCQUANT } from './workflows/mhcquant' include { PIPELINE_INITIALISATION } from './subworkflows/local/utils_nfcore_mhcquant_pipeline' include { PIPELINE_COMPLETION } from './subworkflows/local/utils_nfcore_mhcquant_pipeline' - /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NAMED WORKFLOWS FOR PIPELINE diff --git a/modules.json b/modules.json index 32f6f4c5..c3897798 100644 --- a/modules.json +++ b/modules.json @@ -12,7 +12,7 @@ }, "multiqc": { "branch": "master", - "git_sha": "e10b76ca0c66213581bec2833e30d31f239dec0b", + "git_sha": "5bdb098216aaf5df9c3b6343e6204cd932503c16", "installed_by": ["modules"] }, "openms/decoydatabase": { @@ -81,7 +81,7 @@ }, "utils_nfcore_pipeline": { "branch": "master", - "git_sha": "05954dab2ff481bcb999f24455da29a5828af08d", + "git_sha": "271e7fc14eb1320364416d996fb077421f3faed2", "installed_by": ["subworkflows"] }, "utils_nfschema_plugin": { diff --git a/modules/local/easypqp/convert/main.nf b/modules/local/easypqp/convert/main.nf index 45387cdc..5e442dfc 100644 --- a/modules/local/easypqp/convert/main.nf +++ b/modules/local/easypqp/convert/main.nf @@ -14,7 +14,7 @@ process EASYPQP_CONVERT { output: tuple val(meta), path("*.psmpkl") , emit: psmpkl tuple val(meta), path("*.peakpkl"), emit: peakpkl - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('easypqp'), eval("easypqp --version 2>&1 | grep -oP '(?<=easypqp, version )\\d+\\.\\d+\\.\\d+'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -32,11 +32,6 @@ process EASYPQP_CONVERT { --spectra $spectra \\ --unimod $unimod \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - easypqp: \$(easypqp --version 2>&1 | grep -oP '(?<=easypqp, version )\\d+\\.\\d+\\.\\d+') - END_VERSIONS """ stub: @@ -49,10 +44,5 @@ process EASYPQP_CONVERT { touch "${prefix}.psmpkl" touch "${prefix}.peakpkl" - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - easypqp: \$(easypqp --version 2>&1 | grep -oP '(?<=easypqp, version )\\d+\\.\\d+\\.\\d+') - END_VERSIONS """ } diff --git a/modules/local/easypqp/library/main.nf b/modules/local/easypqp/library/main.nf index 6f720fb1..8db92728 100644 --- a/modules/local/easypqp/library/main.nf +++ b/modules/local/easypqp/library/main.nf @@ -12,7 +12,7 @@ process EASYPQP_LIBRARY { output: tuple val(meta), path("*.tsv") , emit: tsv - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('easypqp'), eval("easypqp --version 2>&1 | grep -oP '(?<=easypqp, version )\\d+\\.\\d+\\.\\d+'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -30,11 +30,6 @@ process EASYPQP_LIBRARY { --out ${prefix}_speclib.tsv \ $args \ $psmpkl $peakpkl - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - easypqp: \$(easypqp --version 2>&1 | grep -oP '(?<=easypqp, version )\\d+\\.\\d+\\.\\d+') - END_VERSIONS """ stub: @@ -46,10 +41,5 @@ process EASYPQP_LIBRARY { mkdir -p \$MPLCONFIGDIR \$XDG_CACHE_HOME touch "${prefix}_speclib.tsv" - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - easypqp: \$(easypqp --version 2>&1 | grep -oP '(?<=easypqp, version )\\d+\\.\\d+\\.\\d+') - END_VERSIONS """ } diff --git a/modules/local/epicore/main.nf b/modules/local/epicore/main.nf index a04efff9..664c993e 100644 --- a/modules/local/epicore/main.nf +++ b/modules/local/epicore/main.nf @@ -16,7 +16,7 @@ process EPICORE { path "${result_tsv}", emit: final_epicore_tsv path "epicore_length_distribution.html", emit: length_dist path "epicore_intensity_histogram.html", emit: intensity_hist - path "versions.yml", emit: versions + tuple val("${task.process}"), val('epicore'), eval("echo \$(epicore --version) | grep 'epicore' | cut -d ' ' -f3 | cut -c2-"), emit: versions, topic: versions script: def args = task.ext.args ?: '' @@ -32,11 +32,6 @@ process EPICORE { # Add epicore statistics to MultiQC general stats table wc -l < epitopes.csv | awk '{print \$1 - 1}' > epicores.txt awk 'NR==1 {print \$0 ",# Epicores"; next} NR==2 {getline extra < "epicores.txt"; print \$0 "," extra}' $general_stats > _modified_$general_stats - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - epicore: \$(echo \$(epicore --version) | grep 'epicore' | cut -d ' ' -f3 | cut -c2-) - END_VERSIONS """ stub: @@ -47,10 +42,5 @@ process EPICORE { touch ${prefix}.tsv touch epicore_length_distribution.html touch epicore_intensity_hist.html - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - epicore: \$(echo \$(epicore --version) | grep 'epicore' | cut -d ' ' -f3 | cut -c2-) - END_VERSIONS """ } diff --git a/modules/local/ms2rescore/main.nf b/modules/local/ms2rescore/main.nf index 06fcb72a..8adc1fc5 100644 --- a/modules/local/ms2rescore/main.nf +++ b/modules/local/ms2rescore/main.nf @@ -17,7 +17,7 @@ process MS2RESCORE { tuple val(meta), path("*ms2rescore.idXML") , emit: idxml tuple val(meta), path("*feature_names.tsv"), emit: feature_names tuple val(meta), path("*.html" ) , optional:true, emit: html - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('MS2Rescore'), eval("echo \"\$(ms2rescore --version 2>&1)\" | grep -oP 'MS²Rescore \\(v\\K[^\\)]+'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -33,11 +33,6 @@ process MS2RESCORE { --output_path ${prefix}.idXML \\ --processes $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - MS²Rescore: \$(echo \$(ms2rescore --version 2>&1) | grep -oP 'MS²Rescore \\(v\\K[^\\)]+' )) - END_VERSIONS """ stub: @@ -47,10 +42,5 @@ process MS2RESCORE { touch ${prefix}.idXML touch ${meta.id}_feature_names.tsv touch ${meta.id}.html - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - MS²Rescore: \$(echo \$(ms2rescore --version 2>&1) | grep -oP 'MS²Rescore \\(v\\K[^\\)]+' )) - END_VERSIONS """ } diff --git a/modules/local/openms/featurefinderidentification/main.nf b/modules/local/openms/featurefinderidentification/main.nf index 794455ec..4f9e7398 100644 --- a/modules/local/openms/featurefinderidentification/main.nf +++ b/modules/local/openms/featurefinderidentification/main.nf @@ -13,27 +13,22 @@ process OPENMS_FEATUREFINDERIDENTIFICATION { output: tuple val(meta), path("*.featureXML"), emit: featurexml - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when script: def prefix = task.ext.prefix ?: "${meta.id}_${meta.sample}_${meta.condition}" - def args = task.ext.args ?: '' def quant_fdr = params.quantification_fdr ? "-id $id_int -id_ext $id_ext -svm:min_prob ${params.quantification_min_prob}" : "-id $id_ext" - args = args + " $quant_fdr" - + def args = quant_fdr + args = args + (task.ext.args ? " ${task.ext.args}" : '') + """ FeatureFinderIdentification -in $mzml \\ -out ${prefix}.featureXML \\ -threads $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -41,10 +36,5 @@ process OPENMS_FEATUREFINDERIDENTIFICATION { """ touch ${prefix}.featureXML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/local/openms/idconflictresolver/main.nf b/modules/local/openms/idconflictresolver/main.nf index 9228ee81..b116851e 100644 --- a/modules/local/openms/idconflictresolver/main.nf +++ b/modules/local/openms/idconflictresolver/main.nf @@ -12,24 +12,18 @@ process OPENMS_IDCONFLICTRESOLVER { output: tuple val(meta), path("*.consensusXML"), emit: consensusxml - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when script: def prefix = task.ext.prefix ?: "${meta.id}_resolved" - + """ - IDConflictResolver \\ - -in $consensus \\ + IDConflictResolver -in $consensus \\ -out ${prefix}.consensusXML \\ -threads $task.cpus - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -37,10 +31,5 @@ process OPENMS_IDCONFLICTRESOLVER { """ touch ${prefix}.consensusXML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/local/openms/mapaligneridentification/main.nf b/modules/local/openms/mapaligneridentification/main.nf index fd19d5c3..fd6f3ec1 100644 --- a/modules/local/openms/mapaligneridentification/main.nf +++ b/modules/local/openms/mapaligneridentification/main.nf @@ -12,7 +12,7 @@ process OPENMS_MAPALIGNERIDENTIFICATION { output: tuple val(meta), path("*.trafoXML"), emit: trafoxml - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -20,17 +20,11 @@ process OPENMS_MAPALIGNERIDENTIFICATION { script: def args = task.ext.args ?: '' def out_names = idxmls.collect { it.baseName.replace('_fdr_filtered','')+'.trafoXML' }.join(' ') - + """ - MapAlignerIdentification \\ - -in $idxmls \\ + MapAlignerIdentification -in $idxmls \\ -trafo_out ${out_names} \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -38,10 +32,5 @@ process OPENMS_MAPALIGNERIDENTIFICATION { """ touch test1.consensusXML touch test2.consensusXML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/local/openms/maprttransformer/main.nf b/modules/local/openms/maprttransformer/main.nf index a2889e35..fd436d6d 100644 --- a/modules/local/openms/maprttransformer/main.nf +++ b/modules/local/openms/maprttransformer/main.nf @@ -12,28 +12,22 @@ process OPENMS_MAPRTTRANSFORMER { output: tuple val(meta), path("*_aligned.*"), emit: aligned - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when script: - def args = task.ext.args ?: '' - def prefix = task.ext.prefix ?: "${meta.id}" + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}_aligned" def fileExt = alignment_file.collect { it.name.tokenize("\\.")[1] }.join(' ') """ - MapRTTransformer \\ - -in $alignment_file \\ + MapRTTransformer -in $alignment_file \\ -trafo_in $trafoxml \\ -out ${prefix}.${fileExt} \\ -threads $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -42,10 +36,5 @@ process OPENMS_MAPRTTRANSFORMER { """ touch ${prefix}.${fileExt} - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/local/openms/mztabexporter/main.nf b/modules/local/openms/mztabexporter/main.nf index ee5500b4..5fe288dd 100644 --- a/modules/local/openms/mztabexporter/main.nf +++ b/modules/local/openms/mztabexporter/main.nf @@ -12,7 +12,7 @@ process OPENMS_MZTABEXPORTER { output: tuple val(meta), path("*.mzTab"), emit: mztab - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -20,18 +20,12 @@ process OPENMS_MZTABEXPORTER { script: def args = task.ext.args ?: '' def prefix = task.ext.prefix ?: "${meta.id}" - + """ - MzTabExporter \\ - -in $in_file \\ + MzTabExporter -in $in_file \\ -out ${prefix}.mzTab \\ -threads $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -39,10 +33,5 @@ process OPENMS_MZTABEXPORTER { """ touch ${prefix}.mzTab - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/local/openms/psmfeatureextractor/main.nf b/modules/local/openms/psmfeatureextractor/main.nf index 870e6519..33fd6f10 100644 --- a/modules/local/openms/psmfeatureextractor/main.nf +++ b/modules/local/openms/psmfeatureextractor/main.nf @@ -12,7 +12,7 @@ process OPENMS_PSMFEATUREEXTRACTOR { output: tuple val(meta), path("*.idXML"), emit: idxml - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -24,18 +24,12 @@ process OPENMS_PSMFEATUREEXTRACTOR { """ extra_features=\$(awk 'NR > 1 && \$1 !~ /psm_file/ {printf \"%s \", \$2}' ${feature_file}) - - PSMFeatureExtractor \\ - -in $idxml \\ + + PSMFeatureExtractor -in $idxml \\ -out ${prefix}.idXML \\ -threads $task.cpus \\ -extra \$extra_features \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -43,10 +37,5 @@ process OPENMS_PSMFEATUREEXTRACTOR { """ touch ${prefix}.idXML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/local/openms/textexporter/main.nf b/modules/local/openms/textexporter/main.nf index 46dd8f33..f8b9173a 100644 --- a/modules/local/openms/textexporter/main.nf +++ b/modules/local/openms/textexporter/main.nf @@ -12,7 +12,7 @@ process OPENMS_TEXTEXPORTER { output: tuple val(meta), path("*.tsv"), emit: tsv - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -20,20 +20,14 @@ process OPENMS_TEXTEXPORTER { script: def args = task.ext.args ?: '' def prefix = task.ext.prefix ?: "${meta.id}" - + """ - TextExporter \\ - -in $file \\ + TextExporter -in $file \\ -out ${prefix}_exported.tsv \\ -threads $task.cpus \\ -id:add_hit_metavalues 0 \\ -id:peptides_only \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -41,10 +35,5 @@ process OPENMS_TEXTEXPORTER { """ touch ${prefix}.tsv - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/local/openmsthirdparty/featurelinkerunlabeledkd/main.nf b/modules/local/openmsthirdparty/featurelinkerunlabeledkd/main.nf index 96ce7b37..d4015255 100644 --- a/modules/local/openmsthirdparty/featurelinkerunlabeledkd/main.nf +++ b/modules/local/openmsthirdparty/featurelinkerunlabeledkd/main.nf @@ -12,7 +12,7 @@ process OPENMS_FEATURELINKERUNLABELEDKD { output: tuple val(meta), path("*.consensusXML"), emit: consensusxml - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -24,11 +24,6 @@ process OPENMS_FEATURELINKERUNLABELEDKD { FeatureLinkerUnlabeledKD -in $features \\ -out ${prefix}.consensusXML \\ -threads $task.cpus - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms-thirdparty: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -36,10 +31,5 @@ process OPENMS_FEATURELINKERUNLABELEDKD { """ touch ${prefix}.consensusXML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms-thirdparty: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/local/openmsthirdparty/percolatoradapter/main.nf b/modules/local/openmsthirdparty/percolatoradapter/main.nf index 8199ce17..94a934ad 100644 --- a/modules/local/openmsthirdparty/percolatoradapter/main.nf +++ b/modules/local/openmsthirdparty/percolatoradapter/main.nf @@ -13,7 +13,8 @@ process OPENMS_PERCOLATORADAPTER { output: tuple val(meta), path("*.idXML") , emit: idxml tuple val(meta), path("*_percolator_feature_weights.tsv"), emit: feature_weights, optional: true - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('PercolatorAdapter'), eval("PercolatorAdapter 2>&1 | grep -E '^Version(.*)' | sed 's/Version: //g' | cut -d ' ' -f 1"), emit: versions, topic: versions + tuple val("${task.process}"), val('percolator'), eval("percolator -h 2>&1 | grep -E '^Percolator version(.*)' | sed 's/Percolator version //g'"), emit: versions_1, topic: versions when: task.ext.when == null || task.ext.when @@ -28,12 +29,6 @@ process OPENMS_PERCOLATORADAPTER { -out ${prefix}.idXML \\ -threads $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - PercolatorAdapter: \$(PercolatorAdapter 2>&1 | grep -E '^Version(.*)' | sed 's/Version: //g' | cut -d ' ' -f 1) - percolator: \$(percolator -h 2>&1 | grep -E '^Percolator version(.*)' | sed 's/Percolator version //g') - END_VERSIONS """ stub: @@ -41,11 +36,5 @@ process OPENMS_PERCOLATORADAPTER { """ touch ${prefix}.idXML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - PercolatorAdapter: \$(PercolatorAdapter 2>&1 | grep -E '^Version(.*)' | sed 's/Version: //g' | cut -d ' ' -f 1) - percolator: \$(percolator -h 2>&1 | grep -E '^Percolator version(.*)' | sed 's/Percolator version //g') - END_VERSIONS """ } diff --git a/modules/local/pyopenms/chromatogramextractor/main.nf b/modules/local/pyopenms/chromatogramextractor/main.nf index 33f138e0..038177ab 100644 --- a/modules/local/pyopenms/chromatogramextractor/main.nf +++ b/modules/local/pyopenms/chromatogramextractor/main.nf @@ -12,7 +12,7 @@ process PYOPENMS_CHROMATOGRAMEXTRACTOR { output: tuple val(meta), path("*.csv") , emit: csv - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('pyopenms'), eval("pip show pyopenms | grep Version | sed 's/Version: //'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -25,11 +25,6 @@ process PYOPENMS_CHROMATOGRAMEXTRACTOR { chromatogram_extractor.py \\ -in $mzml \\ -out ${prefix}_chrom.csv \\ - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - pyopenms: \$(pip show pyopenms | grep Version | sed 's/Version: //') - END_VERSIONS """ stub: @@ -37,10 +32,5 @@ process PYOPENMS_CHROMATOGRAMEXTRACTOR { """ touch ${prefix}_chrom.csv - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - pyopenms: \$(pip show pyopenms | grep Version | sed 's/Version: //') - END_VERSIONS """ } diff --git a/modules/local/pyopenms/ionannotator/main.nf b/modules/local/pyopenms/ionannotator/main.nf index cb5f06a6..e58c6ce1 100644 --- a/modules/local/pyopenms/ionannotator/main.nf +++ b/modules/local/pyopenms/ionannotator/main.nf @@ -12,7 +12,7 @@ process PYOPENMS_IONANNOTATOR { output: tuple val(meta), path("*.tsv") , emit: tsv - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('pyopenms'), eval("pip show pyopenms | grep Version | sed 's/Version: //'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -35,12 +35,6 @@ process PYOPENMS_IONANNOTATOR { $zions \\ $aions \\ $cions - - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - pyopenms: \$(pip show pyopenms | grep Version | sed 's/Version: //') - END_VERSIONS """ stub: @@ -49,10 +43,5 @@ process PYOPENMS_IONANNOTATOR { """ touch ${prefix}_all_peaks.tsv touch ${prefix}_matching_ions.tsv - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - pyopenms: \$(pip show pyopenms | grep Version | sed 's/Version: //') - END_VERSIONS """ } diff --git a/modules/local/pyopenms/summarize_results/main.nf b/modules/local/pyopenms/summarize_results/main.nf index e92edd38..c26a416c 100644 --- a/modules/local/pyopenms/summarize_results/main.nf +++ b/modules/local/pyopenms/summarize_results/main.nf @@ -16,7 +16,7 @@ process SUMMARIZE_RESULTS { path '*_peptide_length.csv' , emit: lengths, optional: true path '*_peptide_intensity.csv' , emit: intensities, optional: true tuple val(meta), path('*.tsv'), path('*_general_stats.csv') , emit: epicore_input - path 'versions.yml' , emit: versions + tuple val("${task.process}"), val('pyopenms'), eval("pip show pyopenms | grep Version | sed 's/Version: //'"), emit: versions, topic: versions script: def args = task.ext.args ?: '' @@ -29,11 +29,6 @@ process SUMMARIZE_RESULTS { --out_prefix $prefix \\ $quantify \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - pyopenms: \$(pip show pyopenms | grep Version | sed 's/Version: //') - END_VERSIONS """ stub: @@ -48,10 +43,5 @@ process SUMMARIZE_RESULTS { touch ${prefix}_peptide_intensity.csv touch ${prefix}_general_stats.csv touch ${prefix}.tsv - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - pyopenms: \$(pip show pyopenms | grep Version | sed 's/Version: //') - END_VERSIONS """ } diff --git a/modules/local/tdf2mzml/main.nf b/modules/local/tdf2mzml/main.nf index b900b81a..482545a9 100644 --- a/modules/local/tdf2mzml/main.nf +++ b/modules/local/tdf2mzml/main.nf @@ -8,19 +8,14 @@ process TDF2MZML { output: tuple val(meta), path("*.mzML"), emit: mzml - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('python'), eval("python3 --version | cut -d ' ' -f2"), emit: versions, topic: versions + tuple val("${task.process}"), val('tdf2mzml'), eval("echo 0.3.0"), emit: versions_1, topic: versions script: def prefix = task.ext.prefix ?: "${tdf.simpleName}" """ tdf2mzml.py -i $tdf -o ${prefix}.mzML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - python: \$(python3 --version | cut -d ' ' -f2) - tdf2mzml: \$(echo 0.3.0) - END_VERSIONS """ stub: @@ -28,11 +23,5 @@ process TDF2MZML { """ touch ${prefix}.mzML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - python: \$(python3 --version | cut -d ' ' -f2) - tdf2mzml: \$(echo 0.3.0) - END_VERSIONS """ } diff --git a/modules/local/untar/main.nf b/modules/local/untar/main.nf index 35c364b1..649af575 100644 --- a/modules/local/untar/main.nf +++ b/modules/local/untar/main.nf @@ -12,7 +12,7 @@ process UNTAR { output: tuple val(meta), path("*.d"), emit: untar - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('untar'), eval("echo \$(tar --version 2>&1) | sed 's/^.*(GNU tar) //; s/ Copyright.*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -33,11 +33,6 @@ process UNTAR { $archive \\ --strip-components=\$depth \\ $args2 - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - untar: \$(echo \$(tar --version 2>&1) | sed 's/^.*(GNU tar) //; s/ Copyright.*\$//') - END_VERSIONS """ stub: @@ -46,10 +41,5 @@ process UNTAR { """ mkdir $prefix touch ${prefix}/file.txt - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - untar: \$(echo \$(tar --version 2>&1) | sed 's/^.*(GNU tar) //; s/ Copyright.*\$//') - END_VERSIONS """ } diff --git a/modules/local/unzip/main.nf b/modules/local/unzip/main.nf index 5405ef0f..3c5e5110 100644 --- a/modules/local/unzip/main.nf +++ b/modules/local/unzip/main.nf @@ -12,7 +12,7 @@ process UNZIP { output: tuple val(meta), path("*.d"), emit: unzipped_archive - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('7za'), eval("echo \$(7za --help) | sed 's/.*p7zip Version //; s/(.*//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -28,11 +28,6 @@ process UNZIP { -o"." \\ $args \\ $archive - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - 7za: \$(echo \$(7za --help) | sed 's/.*p7zip Version //; s/(.*//') - END_VERSIONS """ stub: @@ -40,10 +35,5 @@ process UNZIP { """ touch ${prefix}.d - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - 7za: \$(echo \$(7za --help) | sed 's/.*p7zip Version //; s/(.*//') - END_VERSIONS """ } diff --git a/modules/nf-core/gunzip/main.nf b/modules/nf-core/gunzip/main.nf index 3ffc8e92..df91c841 100644 --- a/modules/nf-core/gunzip/main.nf +++ b/modules/nf-core/gunzip/main.nf @@ -12,7 +12,7 @@ process GUNZIP { output: tuple val(meta), path("${gunzip}"), emit: gunzip - path "versions.yml", emit: versions + tuple val("${task.process}"), val('gunzip'), eval("echo \$(gunzip --version 2>&1) | sed 's/^.*(gzip) //; s/ Copyright.*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -32,11 +32,6 @@ process GUNZIP { ${args} \\ ${archive} \\ > ${gunzip} - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - gunzip: \$(echo \$(gunzip --version 2>&1) | sed 's/^.*(gzip) //; s/ Copyright.*\$//') - END_VERSIONS """ stub: @@ -47,9 +42,5 @@ process GUNZIP { gunzip = prefix + ".${extension}" """ touch ${gunzip} - cat <<-END_VERSIONS > versions.yml - "${task.process}": - gunzip: \$(echo \$(gunzip --version 2>&1) | sed 's/^.*(gzip) //; s/ Copyright.*\$//') - END_VERSIONS """ } diff --git a/modules/nf-core/multiqc/environment.yml b/modules/nf-core/multiqc/environment.yml index dd513cbd..009874d4 100644 --- a/modules/nf-core/multiqc/environment.yml +++ b/modules/nf-core/multiqc/environment.yml @@ -4,4 +4,4 @@ channels: - conda-forge - bioconda dependencies: - - bioconda::multiqc=1.31 + - bioconda::multiqc=1.33 diff --git a/modules/nf-core/multiqc/main.nf b/modules/nf-core/multiqc/main.nf index 5288f5cc..3b0e975b 100644 --- a/modules/nf-core/multiqc/main.nf +++ b/modules/nf-core/multiqc/main.nf @@ -3,11 +3,11 @@ process MULTIQC { conda "${moduleDir}/environment.yml" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/ef/eff0eafe78d5f3b65a6639265a16b89fdca88d06d18894f90fcdb50142004329/data' : - 'community.wave.seqera.io/library/multiqc:1.31--1efbafd542a23882' }" + 'https://community-cr-prod.seqera.io/docker/registry/v2/blobs/sha256/34/34e733a9ae16a27e80fe00f863ea1479c96416017f24a907996126283e7ecd4d/data' : + 'community.wave.seqera.io/library/multiqc:1.33--ee7739d47738383b' }" input: - path multiqc_files, stageAs: "?/*" + path multiqc_files, stageAs: "?/*" path(multiqc_config) path(extra_multiqc_config) path(multiqc_logo) @@ -15,10 +15,11 @@ process MULTIQC { path(sample_names) output: - path "*multiqc_report.html", emit: report - path "*_data" , emit: data - path "*_plots" , optional:true, emit: plots - path "versions.yml" , emit: versions + path "*.html" , emit: report + path "*_data" , emit: data + path "*_plots" , optional:true, emit: plots + tuple val("${task.process}"), val('multiqc'), eval('multiqc --version | sed "s/.* //g"'), emit: versions + // MultiQC should not push its versions to the `versions` topic. Its input depends on the versions topic to be resolved thus outputting to the topic will let the pipeline hang forever when: task.ext.when == null || task.ext.when @@ -26,38 +27,29 @@ process MULTIQC { script: def args = task.ext.args ?: '' def prefix = task.ext.prefix ? "--filename ${task.ext.prefix}.html" : '' - def config = multiqc_config ? "--config $multiqc_config" : '' - def extra_config = extra_multiqc_config ? "--config $extra_multiqc_config" : '' + def config = multiqc_config ? "--config ${multiqc_config}" : '' + def extra_config = extra_multiqc_config ? "--config ${extra_multiqc_config}" : '' def logo = multiqc_logo ? "--cl-config 'custom_logo: \"${multiqc_logo}\"'" : '' def replace = replace_names ? "--replace-names ${replace_names}" : '' def samples = sample_names ? "--sample-names ${sample_names}" : '' """ multiqc \\ --force \\ - $args \\ - $config \\ - $prefix \\ - $extra_config \\ - $logo \\ - $replace \\ - $samples \\ + ${args} \\ + ${config} \\ + ${prefix} \\ + ${extra_config} \\ + ${logo} \\ + ${replace} \\ + ${samples} \\ . - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) - END_VERSIONS """ stub: """ mkdir multiqc_data + touch multiqc_data/.stub mkdir multiqc_plots touch multiqc_report.html - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) - END_VERSIONS """ } diff --git a/modules/nf-core/multiqc/meta.yml b/modules/nf-core/multiqc/meta.yml index ce30eb73..f790cab0 100644 --- a/modules/nf-core/multiqc/meta.yml +++ b/modules/nf-core/multiqc/meta.yml @@ -57,10 +57,10 @@ input: - edam: http://edamontology.org/format_3475 # TSV output: report: - - "*multiqc_report.html": + - "*.html": type: file description: MultiQC report file - pattern: "multiqc_report.html" + pattern: ".html" ontologies: [] data: - "*_data": @@ -74,12 +74,15 @@ output: pattern: "*_data" ontologies: [] versions: - - versions.yml: - type: file - description: File containing software versions - pattern: "versions.yml" - ontologies: - - edam: http://edamontology.org/format_3750 # YAML + - - ${task.process}: + type: string + description: The process the versions were collected from + - multiqc: + type: string + description: The tool name + - multiqc --version | sed "s/.* //g": + type: eval + description: The expression to obtain the version of the tool authors: - "@abhi18av" - "@bunop" @@ -90,3 +93,27 @@ maintainers: - "@bunop" - "@drpatelh" - "@jfy133" +containers: + conda: + linux_amd64: + lock_file: https://wave.seqera.io/v1alpha1/builds/bd-d58f60e4deb769bf_1/condalock + linux_arm64: + lock_file: https://wave.seqera.io/v1alpha1/builds/bd-193776baee4194db_1/condalock + docker: + linux_amd64: + build_id: bd-d58f60e4deb769bf_1 + name: community.wave.seqera.io/library/multiqc:1.32--d58f60e4deb769bf + scanId: sc-d76ac07493e940b4_6 + linux_arm64: + build_id: bd-193776baee4194db_1 + name: community.wave.seqera.io/library/multiqc:1.32--193776baee4194db + scanId: sc-86caded0bff8246e_3 + singularity: + linux_amd64: + build_id: bd-e649ffa094d1ef4a_1 + name: oras://community.wave.seqera.io/library/multiqc:1.32--e649ffa094d1ef4a + https: https://community.wave.seqera.io/v2/library/multiqc/blobs/sha256:8c6c120d559d7ee04c7442b61ad7cf5a9e8970be5feefb37d68eeaa60c1034eb + linux_arm64: + build_id: bd-aee0064f5570ef22_1 + name: oras://community.wave.seqera.io/library/multiqc:1.32--aee0064f5570ef22 + https: https://community.wave.seqera.io/v2/library/multiqc/blobs/sha256:f02c59ebf6e9a00aa954ee8188a4ecc5c743e18f40b9215a242f67606a00f9cf diff --git a/modules/nf-core/multiqc/tests/main.nf.test b/modules/nf-core/multiqc/tests/main.nf.test index 33316a7d..d1ae8b06 100644 --- a/modules/nf-core/multiqc/tests/main.nf.test +++ b/modules/nf-core/multiqc/tests/main.nf.test @@ -30,7 +30,33 @@ nextflow_process { { assert process.success }, { assert process.out.report[0] ==~ ".*/multiqc_report.html" }, { assert process.out.data[0] ==~ ".*/multiqc_data" }, - { assert snapshot(process.out.versions).match("multiqc_versions_single") } + { assert snapshot(process.out.findAll { key, val -> key.startsWith("versions")}).match() } + ) + } + + } + + test("sarscov2 single-end [fastqc] - custom prefix") { + config "./custom_prefix.config" + + when { + process { + """ + input[0] = Channel.of(file(params.modules_testdata_base_path + 'genomics/sarscov2/illumina/fastqc/test_fastqc.zip', checkIfExists: true)) + input[1] = [] + input[2] = [] + input[3] = [] + input[4] = [] + input[5] = [] + """ + } + } + + then { + assertAll( + { assert process.success }, + { assert process.out.report[0] ==~ ".*/custom_prefix.html" }, + { assert process.out.data[0] ==~ ".*/custom_prefix_data" } ) } @@ -56,7 +82,7 @@ nextflow_process { { assert process.success }, { assert process.out.report[0] ==~ ".*/multiqc_report.html" }, { assert process.out.data[0] ==~ ".*/multiqc_data" }, - { assert snapshot(process.out.versions).match("multiqc_versions_config") } + { assert snapshot(process.out.findAll { key, val -> key.startsWith("versions")}).match() } ) } } @@ -84,7 +110,7 @@ nextflow_process { { assert snapshot(process.out.report.collect { file(it).getName() } + process.out.data.collect { file(it).getName() } + process.out.plots.collect { file(it).getName() } + - process.out.versions ).match("multiqc_stub") } + process.out.findAll { key, val -> key.startsWith("versions")} ).match() } ) } diff --git a/modules/nf-core/multiqc/tests/main.nf.test.snap b/modules/nf-core/multiqc/tests/main.nf.test.snap index e1f36138..d72d35b7 100644 --- a/modules/nf-core/multiqc/tests/main.nf.test.snap +++ b/modules/nf-core/multiqc/tests/main.nf.test.snap @@ -1,41 +1,61 @@ { - "multiqc_versions_single": { + "sarscov2 single-end [fastqc]": { "content": [ - [ - "versions.yml:md5,8968b114a3e20756d8af2b80713bcc4f" - ] + { + "versions": [ + [ + "MULTIQC", + "multiqc", + "1.33" + ] + ] + } ], "meta": { - "nf-test": "0.9.2", - "nextflow": "25.04.6" + "nf-test": "0.9.3", + "nextflow": "25.10.2" }, - "timestamp": "2025-09-08T20:57:36.139055243" + "timestamp": "2025-12-09T10:10:43.020315838" }, - "multiqc_stub": { + "sarscov2 single-end [fastqc] - stub": { "content": [ [ "multiqc_report.html", "multiqc_data", "multiqc_plots", - "versions.yml:md5,8968b114a3e20756d8af2b80713bcc4f" + { + "versions": [ + [ + "MULTIQC", + "multiqc", + "1.33" + ] + ] + } ] ], "meta": { - "nf-test": "0.9.2", - "nextflow": "25.04.6" + "nf-test": "0.9.3", + "nextflow": "25.10.2" }, - "timestamp": "2025-09-08T20:59:15.142230631" + "timestamp": "2025-12-09T10:11:14.131950776" }, - "multiqc_versions_config": { + "sarscov2 single-end [fastqc] [config]": { "content": [ - [ - "versions.yml:md5,8968b114a3e20756d8af2b80713bcc4f" - ] + { + "versions": [ + [ + "MULTIQC", + "multiqc", + "1.33" + ] + ] + } ], "meta": { - "nf-test": "0.9.2", - "nextflow": "25.04.6" + "nf-test": "0.9.3", + "nextflow": "25.10.2" }, - "timestamp": "2025-09-08T20:58:29.629087066" + "timestamp": "2025-12-09T10:11:07.15692209" } -} +} \ No newline at end of file diff --git a/modules/nf-core/openms/decoydatabase/main.nf b/modules/nf-core/openms/decoydatabase/main.nf index 42494818..b9ce0fbe 100644 --- a/modules/nf-core/openms/decoydatabase/main.nf +++ b/modules/nf-core/openms/decoydatabase/main.nf @@ -12,7 +12,7 @@ process OPENMS_DECOYDATABASE { output: tuple val(meta), path("*.fasta"), emit: decoy_fasta - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -27,12 +27,6 @@ process OPENMS_DECOYDATABASE { -out ${prefix}.fasta \\ -threads $task.cpus \\ $args - - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -41,10 +35,5 @@ process OPENMS_DECOYDATABASE { """ touch ${prefix}.fasta - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/nf-core/openms/filefilter/main.nf b/modules/nf-core/openms/filefilter/main.nf index 55788339..fa1668fc 100644 --- a/modules/nf-core/openms/filefilter/main.nf +++ b/modules/nf-core/openms/filefilter/main.nf @@ -14,7 +14,7 @@ process OPENMS_FILEFILTER { tuple val(meta), path("*.mzML"), emit: mzml, optional: true tuple val(meta), path("*.featureXML"), emit: featurexml, optional: true tuple val(meta), path("*.consensusXML"), emit: consensusxml, optional: true - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -31,11 +31,6 @@ process OPENMS_FILEFILTER { -out ${prefix}.${suffix} \\ -threads $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -46,10 +41,5 @@ process OPENMS_FILEFILTER { """ touch ${prefix}.${suffix} - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/nf-core/openms/idfilter/main.nf b/modules/nf-core/openms/idfilter/main.nf index c15927bf..708307c0 100644 --- a/modules/nf-core/openms/idfilter/main.nf +++ b/modules/nf-core/openms/idfilter/main.nf @@ -12,7 +12,7 @@ process OPENMS_IDFILTER { output: tuple val(meta), path("*.{idXML,consensusXML}"), emit: filtered - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -31,12 +31,7 @@ process OPENMS_IDFILTER { -out ${prefix}.${suffix} \\ -threads $task.cpus \\ $filter \\ - $args \\ - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS + $args """ stub: @@ -50,10 +45,5 @@ process OPENMS_IDFILTER { """ touch ${prefix}.${suffix} - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/nf-core/openms/idmassaccuracy/main.nf b/modules/nf-core/openms/idmassaccuracy/main.nf index 819720e2..0681931f 100644 --- a/modules/nf-core/openms/idmassaccuracy/main.nf +++ b/modules/nf-core/openms/idmassaccuracy/main.nf @@ -13,7 +13,7 @@ process OPENMS_IDMASSACCURACY { output: tuple val(meta), path("*frag_mass_err.tsv") , emit: frag_err tuple val(meta), path("*prec_mass_err.tsv") , emit: prec_err, optional: true - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -29,11 +29,6 @@ process OPENMS_IDMASSACCURACY { -out_fragment ${prefix}_frag_mass_err.tsv \\ -threads $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - OpenMS: \$(FileInfo 2>&1 | grep -E '^Version(.*)' | cut -d ' ' -f 2 | cut -d '-' -f 1) - END_VERSIONS """ stub: @@ -43,10 +38,5 @@ process OPENMS_IDMASSACCURACY { """ touch ${prefix}_frag_mass_err.tsv touch ${prefix}_prec_mass_err.tsv - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - OpenMS: \$(FileInfo 2>&1 | grep -E '^Version(.*)' | cut -d ' ' -f 2 | cut -d '-' -f 1) - END_VERSIONS """ } diff --git a/modules/nf-core/openms/idmerger/main.nf b/modules/nf-core/openms/idmerger/main.nf index 421a77f8..22b98df5 100644 --- a/modules/nf-core/openms/idmerger/main.nf +++ b/modules/nf-core/openms/idmerger/main.nf @@ -12,7 +12,7 @@ process OPENMS_IDMERGER { output: tuple val(meta), path("*.idXML"), emit: idxml - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -27,11 +27,6 @@ process OPENMS_IDMERGER { -out ${prefix}.idXML \\ -threads $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -40,10 +35,5 @@ process OPENMS_IDMERGER { """ touch ${prefix}.idXML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/nf-core/openms/idripper/main.nf b/modules/nf-core/openms/idripper/main.nf index e545b906..42aa71d1 100644 --- a/modules/nf-core/openms/idripper/main.nf +++ b/modules/nf-core/openms/idripper/main.nf @@ -12,7 +12,7 @@ process OPENMS_IDRIPPER { output: tuple val(meta), path("*.idXML"), emit: idxmls - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -27,11 +27,6 @@ process OPENMS_IDRIPPER { -out . \\ -threads $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -41,10 +36,5 @@ process OPENMS_IDRIPPER { """ touch ${prefix}_1.idXML touch ${prefix}_2.idXML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/nf-core/openms/idscoreswitcher/main.nf b/modules/nf-core/openms/idscoreswitcher/main.nf index 6911b2c5..cded386f 100644 --- a/modules/nf-core/openms/idscoreswitcher/main.nf +++ b/modules/nf-core/openms/idscoreswitcher/main.nf @@ -12,7 +12,7 @@ process OPENMS_IDSCORESWITCHER { output: tuple val(meta), path("*.idXML"), emit: idxml - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -28,11 +28,6 @@ process OPENMS_IDSCORESWITCHER { -out ${prefix}.idXML \\ -threads $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -42,10 +37,5 @@ process OPENMS_IDSCORESWITCHER { """ touch ${prefix}.idXML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/nf-core/openms/peakpickerhires/main.nf b/modules/nf-core/openms/peakpickerhires/main.nf index 9ac6e935..dbcf4957 100644 --- a/modules/nf-core/openms/peakpickerhires/main.nf +++ b/modules/nf-core/openms/peakpickerhires/main.nf @@ -12,7 +12,7 @@ process OPENMS_PEAKPICKERHIRES { output: tuple val(meta), path("*.mzML"), emit: mzml - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -27,11 +27,6 @@ process OPENMS_PEAKPICKERHIRES { -out ${prefix}.mzML \\ -threads $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -40,10 +35,5 @@ process OPENMS_PEAKPICKERHIRES { """ touch ${prefix}.mzML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/nf-core/openms/peptideindexer/main.nf b/modules/nf-core/openms/peptideindexer/main.nf index 1456a908..0864b0a6 100644 --- a/modules/nf-core/openms/peptideindexer/main.nf +++ b/modules/nf-core/openms/peptideindexer/main.nf @@ -12,7 +12,7 @@ process OPENMS_PEPTIDEINDEXER { output: tuple val(meta), path("*.idXML"), emit: indexed_idxml - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('openms'), eval("FileInfo --help 2>&1 | grep -E '^Version' | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//'"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -30,11 +30,6 @@ process OPENMS_PEPTIDEINDEXER { -out ${prefix}.idXML \\ -threads $task.cpus \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ stub: @@ -43,10 +38,5 @@ process OPENMS_PEPTIDEINDEXER { """ touch ${prefix}.idXML - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - openms: \$(echo \$(FileInfo --help 2>&1) | sed 's/^.*Version: //; s/-.*\$//' | sed 's/ -*//; s/ .*\$//') - END_VERSIONS """ } diff --git a/modules/nf-core/openmsthirdparty/cometadapter/main.nf b/modules/nf-core/openmsthirdparty/cometadapter/main.nf index 27fc32fd..0d331e3f 100644 --- a/modules/nf-core/openmsthirdparty/cometadapter/main.nf +++ b/modules/nf-core/openmsthirdparty/cometadapter/main.nf @@ -13,7 +13,8 @@ process OPENMSTHIRDPARTY_COMETADAPTER { output: tuple val(meta), path("*.idXML"), emit: idxml tuple val(meta), path("*.tsv") , emit: pin, optional: true - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('CometAdapter'), eval("CometAdapter 2>&1 | grep -E '^Version' | sed 's/Version: //g' | cut -d ' ' -f 1 | cut -d '-' -f 1"), emit: versions, topic: versions + tuple val("${task.process}"), val('Comet'), eval("comet 2>&1 | grep -E 'Comet version' | sed 's/Comet version //g' | tr -d '\"'"), emit: versions_1, topic: versions when: task.ext.when == null || task.ext.when @@ -29,13 +30,6 @@ process OPENMSTHIRDPARTY_COMETADAPTER { -out ${prefix}.idXML \\ -threads $task.cpus \\ $args - - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - CometAdapter: \$(CometAdapter 2>&1 | grep -E '^Version(.*)' | sed 's/Version: //g' | cut -d ' ' -f 1 | cut -d '-' -f 1) - Comet: \$(comet 2>&1 | grep -E "Comet version.*" | sed 's/Comet version //g' | sed 's/"//g') - END_VERSIONS """ stub: @@ -45,11 +39,5 @@ process OPENMSTHIRDPARTY_COMETADAPTER { """ touch ${prefix}.idXML touch ${prefix}_pin.tsv - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - CometAdapter: \$(CometAdapter 2>&1 | grep -E '^Version(.*)' | sed 's/Version: //g' | cut -d ' ' -f 1 | cut -d '-' -f 1) - Comet: \$(comet 2>&1 | grep -E "Comet version.*" | sed 's/Comet version //g' | sed 's/"//g') - END_VERSIONS """ } diff --git a/modules/nf-core/thermorawfileparser/main.nf b/modules/nf-core/thermorawfileparser/main.nf index 4f407e97..03f7c42d 100644 --- a/modules/nf-core/thermorawfileparser/main.nf +++ b/modules/nf-core/thermorawfileparser/main.nf @@ -12,7 +12,7 @@ process THERMORAWFILEPARSER { output: tuple val(meta), path("*.{mzML,mzML.gz,mgf,mgf.gz,parquet,parquet.gz}"), emit: spectra - path "versions.yml" , emit: versions + tuple val("${task.process}"), val('thermorawfileparser'), eval("ThermoRawFileParser.sh --version"), emit: versions, topic: versions when: task.ext.when == null || task.ext.when @@ -32,11 +32,6 @@ process THERMORAWFILEPARSER { -i $raw \\ -b ${prefix}.${suffix} \\ $args - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - thermorawfileparser: \$(ThermoRawFileParser.sh --version) - END_VERSIONS """ stub: @@ -51,10 +46,5 @@ process THERMORAWFILEPARSER { """ touch ${prefix}.${suffix} - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - thermorawfileparser: \$(ThermoRawFileParser.sh --version) - END_VERSIONS """ } diff --git a/nextflow.config b/nextflow.config index 97ebb856..724e9dc1 100644 --- a/nextflow.config +++ b/nextflow.config @@ -256,6 +256,7 @@ podman.registry = 'quay.io' singularity.registry = 'quay.io' charliecloud.registry = 'quay.io' + // Export these variables to prevent local Python/R libraries from conflicting with those in the container // The JULIA depot path has been adjusted to a fixed path `/usr/local/share/julia` that needs to be used for packages in the container. // See https://apeltzer.github.io/post/03-julia-lang-nextflow/ for details on that. Once we have a common agreement on where to keep Julia packages, this is adjustable. @@ -354,7 +355,7 @@ manifest { mainScript = 'main.nf' defaultBranch = 'master' nextflowVersion = '!>=25.04.0' - version = '3.1.0' + version = '3.2.0dev' doi = '10.1186/s13059-025-03763-8' } diff --git a/ro-crate-metadata.json b/ro-crate-metadata.json index 9484fb86..ff701c73 100644 --- a/ro-crate-metadata.json +++ b/ro-crate-metadata.json @@ -21,9 +21,9 @@ { "@id": "./", "@type": "Dataset", - "creativeWorkStatus": "Stable", - "datePublished": "2025-10-20T07:45:13+00:00", - "description": "

\n \n \n \"nf-core/mhcquant\"\n \n

\n\n[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/codespaces/new/nf-core/mhcquant)\n[![GitHub Actions CI Status](https://github.com/nf-core/mhcquant/actions/workflows/nf-test.yml/badge.svg)](https://github.com/nf-core/mhcquant/actions/workflows/nf-test.yml)\n[![GitHub Actions Linting Status](https://github.com/nf-core/mhcquant/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/mhcquant/actions/workflows/linting.yml)[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/mhcquant/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.8427707-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.8427707)\n[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\n\n[![Nextflow](https://img.shields.io/badge/version-%E2%89%A525.04.0-green?style=flat&logo=nextflow&logoColor=white&color=%230DC09D&link=https%3A%2F%2Fnextflow.io)](https://www.nextflow.io/)\n[![nf-core template version](https://img.shields.io/badge/nf--core_template-3.4.1-green?style=flat&logo=nfcore&logoColor=white&color=%2324B064&link=https%3A%2F%2Fnf-co.re)](https://github.com/nf-core/tools/releases/tag/3.4.1)\n[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\n[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\n[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\n[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/nf-core/mhcquant)\n\n[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23mhcquant-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/mhcquant)[![Follow on Bluesky](https://img.shields.io/badge/bluesky-%40nf__core-1185fe?labelColor=000000&logo=bluesky)](https://bsky.app/profile/nf-co.re)[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\n\n## Introduction\n\n**nfcore/mhcquant** is a best-practice bioinformatics pipeline to process data-dependent acquisition (DDA) immunopeptidomics data. This involves mass spectrometry-based identification and quantification of immunopeptides presented on major histocompatibility complex (MHC) molecules which mediate T cell immunosurveillance. Immunopeptidomics has central implications for clinical research, in the context of [T cell-centric immunotherapies](https://www.sciencedirect.com/science/article/pii/S1044532323000180).\n\nThe pipeline is based on the OpenMS C++ framework for computational mass spectrometry. Spectrum files (mzML/Thermo raw/Bruker tdf) serve as inputs and a database search (Comet) is performed based on a given input protein database. Peptide properties are predicted by MS\u00b2Rescore. FDR rescoring is applied using Percolator or Mokapot based on a competitive target-decoy approach. The pipeline supports both local FDR control (per sample-condition group) and global FDR control (across all samples). For label-free quantification, all input files undergo identification-based retention time alignment and targeted feature extraction matching ids between runs. The pipeline can also generate spectrum libraries suitable for DIA-based searches as well as computing consensus epitopes using epicore.\n\n![overview](assets/mhcquant_subway.png)\n\nThe pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\n\nOn release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/mhcquant/results).\n\n## Usage\n\n> [!NOTE]\n> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how\n> to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline)\n> with `-profile test` before running the workflow on actual data.\n\nFirst, prepare a samplesheet with your input data that looks as follows:\n\n`samplesheet.tsv`\n\n```tsv title=\"samplesheet.tsv\nID\tSample\tCondition\tReplicateFileName\n1\ttumor\ttreated\t/path/to/msrun1.raw|mzML|d\n2\ttumor\ttreated\t/path/to/msrun2.raw|mzML|d\n3\ttumor\tuntreated\t/path/to/msrun3.raw|mzML|d\n4\ttumor\tuntreated\t/path/to/msrun4.raw|mzML|d\n```\n\nEach row represents a mass spectrometry run in one of the formats: raw, RAW, mzML, mzML.gz, d, d.tar.gz, d.zip\n\nNow, you can run the pipeline using:\n\n```bash\nnextflow run nf-core/mhcquant \\\n -profile \\\n --input 'samplesheet.tsv' \\\n --fasta 'SWISSPROT_2020.fasta' \\\n --outdir ./results\n```\n\nOptional parameters for additional functionality:\n\n```bash\n# Enable quantification, global FDR and spectrum library generation, ion annotations, and consenus epitopes\nnextflow run nf-core/mhcquant \\\n --input 'samplesheet.tsv' \\\n --fasta 'SWISSPROT_2020.fasta' \\\n --annotate_ions \\\n --epicore \\\n --generate_speclib \\\n --global_fdr \\\n --quantify \\\n --outdir ./results \\\n -profile docker\n```\n\n> [!WARNING]\n> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_; see [docs](https://nf-co.re/docs/usage/getting_started/configuration#custom-configuration-files).\n\nFor more details and further functionality, please refer to the [usage documentation](https://nf-co.re/mhcquant/usage) and the [parameter documentation](https://nf-co.re/mhcquant/parameters).\n\n## Pipeline summary\n\n### Default Steps\n\nBy default the pipeline currently performs identification of MHC class I peptides with HCD settings:\n\n- **Spectra Preparation**: Preparing spectra dependent on the input format (`PREPARE_SPECTRA` subworkflow)\n- **Database Preparation**: Creation of reversed decoy database (`DecoyDatabase`)\n- **Peptide Identification**: Identification of peptides in the MS/MS spectra (`CometAdapter`)\n- **Database Indexing**: Refreshes protein references for all peptide hits and adds target/decoy information (`PeptideIndexer`)\n- **Identification Merging**: Merges identification files with the same `Sample` and `Condition` label (`IDMerger`)\n- **Rescoring**: Feature prediction and peptide-spectrum-match rescoring (`RESCORE` subworkflow)\n - Prediction of retention times and MS2 intensities (`MS\u00b2Rescore`)\n - Extract PSM features for rescoring engines (`PSMFeatureExtractor`)\n - Peptide-spectrum-match rescoring using Percolator or Mokapot (`PercolatorAdapter`)\n - Filters peptide identification result according to configurable FDR threshold (`IDFilter`)\n- **Export**: Converts identification result to tab-separated files (`TextExporter`)\n\n### FDR Control Modes\n\nThe pipeline supports two FDR control strategies:\n\n- **Local FDR** (default): FDR control applied per `Sample` and `Condition` group\n- **Global FDR**: FDR control applied across all samples in the dataset (enable with `--global_fdr`)\n\n### Additional Steps\n\nAdditional functionality contained by the pipeline currently includes:\n\n#### Quantification (`QUANT` subworkflow)\n\nWhen enabled with `--quantify`, the pipeline performs label-free quantification:\n\n- **Alignment**: Corrects retention time distortions between runs (`MAP_ALIGNMENT` subworkflow)\n - Corrects retention time distortions between runs (`MapAlignerIdentification`)\n - Applies retention time transformations to runs (`MapRTTransformer`)\n- **Feature Processing**: Detects and processes features (`PROCESS_FEATURE` subworkflow)\n - Detects features in MS1 data based on peptide identifications (`FeatureFinderIdentification`)\n - Group corresponding features across label-free experiments (`FeatureLinkerUnlabeledKD`)\n - Resolves ambiguous annotations of features with peptide identifications (`IDConflictResolver`)\n\n#### Spectrum Library Generation (`SPECLIB` subworkflow)\n\nWhen enabled with `--generate_speclib`, the pipeline generates spectrum libraries suitable for DIA-based searches. Outputs one library per sample or a single library across all samples (if global FDR mode is enabled with `--global_fdr`).\n\n#### Ion Annotation (`IONANNOTATOR` subworkflow)\n\nThe pipeline annotates the final list of peptides with their respective ions and charges:\n\n- Annotates final list of peptides with their respective ions and charges (`IonAnnotator`)\n\n#### Output\n\n## Documentation\n\nTo see the the results of a test run with a full size dataset refer to the [results](https://nf-co.re/mhcquant/results) tab on the nf-core website pipeline page.\nFor more details about the output files and reports, please refer to the\n[output documentation](https://nf-co.re/mhcquant/output).\n\n1. [Nextflow installation](https://nf-co.re/usage/installation)\n2. Pipeline configuration\n - [Pipeline installation](https://nf-co.re/docs/usage/getting_started/offline)\n - [Adding your own system config](https://nf-co.re/usage/adding_own_config)\n3. [Running the pipeline](https://nf-co.re/mhcquant/docs/usage.md)\n - This includes tutorials, FAQs, and troubleshooting instructions\n4. [Output and how to interpret the results](https://nf-co.re/mhcquant/docs/output.md)\n\n## Credits\n\nnf-core/mhcquant was originally written by [Leon Bichmann](https://github.com/Leon-Bichmann) from the [Kohlbacher Lab](https://kohlbacherlab.org/). The pipeline was re-written in Nextflow DSL2 by [Marissa Dubbelaar](https://github.com/marissaDubbelaar) and was significantly improved by [Jonas Scheid](https://github.com/jonasscheid) and [Steffen Lemke](https://github.com/steffenlem) from [Peptide-based Immunotherapy](https://www.medizin.uni-tuebingen.de/en-de/peptid-basierte-immuntherapie) and [Quantitative Biology Center](https://uni-tuebingen.de/forschung/forschungsinfrastruktur/zentrum-fuer-quantitative-biologie-qbic/) in T\u00fcbingen.\n\nHelpful contributors:\n\n- [Lukas Heumos](https://github.com/Zethson)\n- [Alexander Peltzer](https://github.com/apeltzer)\n- [Maxime Garcia](https://github.com/maxulysse)\n- [Gisela Gabernet](https://github.com/ggabernet)\n- [Susanne Jodoin](https://github.com/SusiJo)\n- [Oskar Wacker](https://github.com/WackerO)\n- [Leon Kuchenbecker](https://github.com/lkuchenb)\n- [Phil Ewels](https://github.com/ewels)\n- [Christian Fufezan](https://github.com/fu)\n- [Sven Fillinger](https://github.com/sven1103)\n- [Kevin Menden](https://github.com/KevinMenden)\n- [Julia Graf](https://github.com/JuliaGraf)\n- [Jana Hoffmann](https://github.com/janaHoffmann1)\n\n## Contributions and Support\n\nIf you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\n\nFor further information or help, don't hesitate to get in touch on the [Slack `#mhcquant` channel](https://nfcore.slack.com/channels/mhcquant) (you can join with [this invite](https://nf-co.re/join/slack)).\n\n## Citations\n\nIf you use nf-core/mhcquant for your analysis, please cite the corresponding manuscript: [10.1186/s13059-025-03763-8](https://doi.org/10.1186/s13059-025-03763-8)\n\n> **MHCquant2 refines immunopeptidomics tumor antigen discovery**\n>\n> Jonas Scheid, Steffen Lemke, Naomi Hoenisch-Gravel, Anna Dengler, Timo Sachsenberg, Arthur Declerq, Ralf Gabriels, Jens Bauer, Marcel Wacker, Leon Bichmann, Lennart Martens, Marissa L. Dubbelaar, Sven Nahnsen & Juliane S. Walz\n>\n> _Genome Biology_ 2025 26 (1), 290. doi: [10.1021/acs.jproteome.9b00313](https://pubs.acs.org/doi/10.1021/acs.jproteome.9b00313)\n\n> **MHCquant: Automated and Reproducible Data Analysis for Immunopeptidomics**\n>\n> Leon Bichmann, Annika Nelde, Michael Ghosh, Lukas Heumos, Christopher Mohr, Alexander Peltzer, Leon Kuchenbecker, Timo Sachsenberg, Juliane S. Walz, Stefan Stevanovi\u0107, Hans-Georg Rammensee & Oliver Kohlbacher\n>\n> _Journal of Proteome Research_ 2019 18 (11), 3876-3884. doi: [10.1021/acs.jproteome.9b00313](https://pubs.acs.org/doi/10.1021/acs.jproteome.9b00313)\n\nAn extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\n\nYou can cite the `nf-core` publication as follows:\n\n> **The nf-core framework for community-curated bioinformatics pipelines.**\n>\n> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\n>\n> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\n\nIn addition, references of tools and data used in this pipeline are as follows:\n\n> **OpenMS framework**\n>\n> Pfeuffer J. et al, _Nat Methods_ 2024 Mar;21(3):365-367. doi: [0.1038/s41592-024-02197-7](https://www.nature.com/articles/s41592-024-02197-7).\n>\n> **Comet Search Engine**\n>\n> Eng J.K. et al, _J Am Soc Mass Spectrom._ 2015 Nov;26(11):1865-74. doi: [10.1007/s13361-015-1179-x](https://pubs.acs.org/doi/10.1007/s13361-015-1179-x).\n>\n> **Retention time prediction**\n>\n> Bouwmeester R. et al, _Nature Methods_ 2021 Oct;18(11):1363-1369. doi: [10.1038/s41592-021-01301-5](https://www.nature.com/articles/s41592-021-01301-5)\n>\n> **MS\u00b2 Peak intensity prediction**\n>\n> Declercq A. et al, _Nucleic Acids Res._ 2023 Jul 5;51(W1):W338-W342. doi: [10.1093/nar/gkad335](https://academic.oup.com/nar/article/51/W1/W338/7151340?login=false)\n>\n> **CCS prediction**\n>\n> Declercq A. et al _Journal of Proteome Research_ 2025 Feb 6. doi: [10.1021/acs.jproteome.4c00609](https://pubs.acs.org/doi/10.1021/acs.jproteome.4c00609)\n>\n> **MS\u00b2Rescore framework**\n>\n> Buur L. M. et al, \\_J Proteome Res. 2024 Mar 16. doi: [10.1021/acs.jproteome.3c00785](https://pubs.acs.org/doi/10.1021/acs.jproteome.3c00785)\n>\n> **Percolator**\n>\n> K\u00e4ll L. et al, _Nat Methods_ 2007 Nov;4(11):923-5. doi: [10.1038/nmeth1113](https://www.nature.com/articles/nmeth1113).\n>\n> **Identification based RT Alignment**\n>\n> Weisser H. et al, _J Proteome Res._ 2013 Apr 5;12(4):1628-44. doi: [10.1021/pr300992u](https://pubs.acs.org/doi/10.1021/pr300992u)\n>\n> **Targeted peptide quantification**\n>\n> Weisser H. et al, _J Proteome Res._ 2017 Aug 4;16(8):2964-2974. doi: [10.1021/acs.jproteome.7b00248](https://pubs.acs.org/doi/10.1021/acs.jproteome.7b00248)\n", + "creativeWorkStatus": "InProgress", + "datePublished": "2026-01-08T12:19:18+00:00", + "description": "

\n \n \n \"nf-core/mhcquant\"\n \n

\n\n[![Open in GitHub Codespaces](https://img.shields.io/badge/Open_In_GitHub_Codespaces-black?labelColor=grey&logo=github)](https://github.com/codespaces/new/nf-core/mhcquant)\n[![GitHub Actions CI Status](https://github.com/nf-core/mhcquant/actions/workflows/nf-test.yml/badge.svg)](https://github.com/nf-core/mhcquant/actions/workflows/nf-test.yml)\n[![GitHub Actions Linting Status](https://github.com/nf-core/mhcquant/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/mhcquant/actions/workflows/linting.yml)[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/mhcquant/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.8427707-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.8427707)\n[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\n\n[![Nextflow](https://img.shields.io/badge/version-%E2%89%A525.04.0-green?style=flat&logo=nextflow&logoColor=white&color=%230DC09D&link=https%3A%2F%2Fnextflow.io)](https://www.nextflow.io/)\n[![nf-core template version](https://img.shields.io/badge/nf--core_template-3.5.1-green?style=flat&logo=nfcore&logoColor=white&color=%2324B064&link=https%3A%2F%2Fnf-co.re)](https://github.com/nf-core/tools/releases/tag/3.5.1)\n[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\n[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\n[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\n[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/nf-core/mhcquant)\n\n[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23mhcquant-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/mhcquant)[![Follow on Bluesky](https://img.shields.io/badge/bluesky-%40nf__core-1185fe?labelColor=000000&logo=bluesky)](https://bsky.app/profile/nf-co.re)[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\n\n## Introduction\n\n**nfcore/mhcquant** is a best-practice bioinformatics pipeline to process data-dependent acquisition (DDA) immunopeptidomics data. This involves mass spectrometry-based identification and quantification of immunopeptides presented on major histocompatibility complex (MHC) molecules which mediate T cell immunosurveillance. Immunopeptidomics has central implications for clinical research, in the context of [T cell-centric immunotherapies](https://www.sciencedirect.com/science/article/pii/S1044532323000180).\n\nThe pipeline is based on the OpenMS C++ framework for computational mass spectrometry. Spectrum files (mzML/Thermo raw/Bruker tdf) serve as inputs and a database search (Comet) is performed based on a given input protein database. Peptide properties are predicted by MS\u00b2Rescore. FDR rescoring is applied using Percolator or Mokapot based on a competitive target-decoy approach. The pipeline supports both local FDR control (per sample-condition group) and global FDR control (across all samples). For label-free quantification, all input files undergo identification-based retention time alignment and targeted feature extraction matching ids between runs. The pipeline can also generate spectrum libraries suitable for DIA-based searches as well as computing consensus epitopes using epicore.\n\n![overview](assets/mhcquant_subway.png)\n\nThe pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\n\nOn release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/mhcquant/results).\n\n## Usage\n\n> [!NOTE]\n> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how\n> to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline)\n> with `-profile test` before running the workflow on actual data.\n\nFirst, prepare a samplesheet with your input data that looks as follows:\n\n`samplesheet.tsv`\n\n```tsv title=\"samplesheet.tsv\nID\tSample\tCondition\tReplicateFileName\n1\ttumor\ttreated\t/path/to/msrun1.raw|mzML|d\n2\ttumor\ttreated\t/path/to/msrun2.raw|mzML|d\n3\ttumor\tuntreated\t/path/to/msrun3.raw|mzML|d\n4\ttumor\tuntreated\t/path/to/msrun4.raw|mzML|d\n```\n\nEach row represents a mass spectrometry run in one of the formats: raw, RAW, mzML, mzML.gz, d, d.tar.gz, d.zip\n\nNow, you can run the pipeline using:\n\n```bash\nnextflow run nf-core/mhcquant \\\n -profile \\\n --input 'samplesheet.tsv' \\\n --fasta 'SWISSPROT_2020.fasta' \\\n --outdir ./results\n```\n\nOptional parameters for additional functionality:\n\n```bash\n# Enable quantification, global FDR and spectrum library generation, ion annotations, and consenus epitopes\nnextflow run nf-core/mhcquant \\\n --input 'samplesheet.tsv' \\\n --fasta 'SWISSPROT_2020.fasta' \\\n --annotate_ions \\\n --epicore \\\n --generate_speclib \\\n --global_fdr \\\n --quantify \\\n --outdir ./results \\\n -profile docker\n```\n\n> [!WARNING]\n> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_; see [docs](https://nf-co.re/docs/usage/getting_started/configuration#custom-configuration-files).\n\nFor more details and further functionality, please refer to the [usage documentation](https://nf-co.re/mhcquant/usage) and the [parameter documentation](https://nf-co.re/mhcquant/parameters).\n\n## Pipeline summary\n\n### Default Steps\n\nBy default the pipeline currently performs identification of MHC class I peptides with HCD settings:\n\n- **Spectra Preparation**: Preparing spectra dependent on the input format (`PREPARE_SPECTRA` subworkflow)\n- **Database Preparation**: Creation of reversed decoy database (`DecoyDatabase`)\n- **Peptide Identification**: Identification of peptides in the MS/MS spectra (`CometAdapter`)\n- **Database Indexing**: Refreshes protein references for all peptide hits and adds target/decoy information (`PeptideIndexer`)\n- **Identification Merging**: Merges identification files with the same `Sample` and `Condition` label (`IDMerger`)\n- **Rescoring**: Feature prediction and peptide-spectrum-match rescoring (`RESCORE` subworkflow)\n - Prediction of retention times and MS2 intensities (`MS\u00b2Rescore`)\n - Extract PSM features for rescoring engines (`PSMFeatureExtractor`)\n - Peptide-spectrum-match rescoring using Percolator or Mokapot (`PercolatorAdapter`)\n - Filters peptide identification result according to configurable FDR threshold (`IDFilter`)\n- **Export**: Converts identification result to tab-separated files (`TextExporter`)\n\n### FDR Control Modes\n\nThe pipeline supports two FDR control strategies:\n\n- **Local FDR** (default): FDR control applied per `Sample` and `Condition` group\n- **Global FDR**: FDR control applied across all samples in the dataset (enable with `--global_fdr`)\n\n### Additional Steps\n\nAdditional functionality contained by the pipeline currently includes:\n\n#### Quantification (`QUANT` subworkflow)\n\nWhen enabled with `--quantify`, the pipeline performs label-free quantification:\n\n- **Alignment**: Corrects retention time distortions between runs (`MAP_ALIGNMENT` subworkflow)\n - Corrects retention time distortions between runs (`MapAlignerIdentification`)\n - Applies retention time transformations to runs (`MapRTTransformer`)\n- **Feature Processing**: Detects and processes features (`PROCESS_FEATURE` subworkflow)\n - Detects features in MS1 data based on peptide identifications (`FeatureFinderIdentification`)\n - Group corresponding features across label-free experiments (`FeatureLinkerUnlabeledKD`)\n - Resolves ambiguous annotations of features with peptide identifications (`IDConflictResolver`)\n\n#### Spectrum Library Generation (`SPECLIB` subworkflow)\n\nWhen enabled with `--generate_speclib`, the pipeline generates spectrum libraries suitable for DIA-based searches. Outputs one library per sample or a single library across all samples (if global FDR mode is enabled with `--global_fdr`).\n\n#### Ion Annotation (`IONANNOTATOR` subworkflow)\n\nThe pipeline annotates the final list of peptides with their respective ions and charges:\n\n- Annotates final list of peptides with their respective ions and charges (`IonAnnotator`)\n\n#### Output\n\n## Documentation\n\nTo see the the results of a test run with a full size dataset refer to the [results](https://nf-co.re/mhcquant/results) tab on the nf-core website pipeline page.\nFor more details about the output files and reports, please refer to the\n[output documentation](https://nf-co.re/mhcquant/output).\n\n1. [Nextflow installation](https://nf-co.re/usage/installation)\n2. Pipeline configuration\n - [Pipeline installation](https://nf-co.re/docs/usage/getting_started/offline)\n - [Adding your own system config](https://nf-co.re/usage/adding_own_config)\n3. [Running the pipeline](https://nf-co.re/mhcquant/docs/usage.md)\n - This includes tutorials, FAQs, and troubleshooting instructions\n4. [Output and how to interpret the results](https://nf-co.re/mhcquant/docs/output.md)\n\n## Credits\n\nnf-core/mhcquant was originally written by [Leon Bichmann](https://github.com/Leon-Bichmann) from the [Kohlbacher Lab](https://kohlbacherlab.org/). The pipeline was re-written in Nextflow DSL2 by [Marissa Dubbelaar](https://github.com/marissaDubbelaar) and was significantly improved by [Jonas Scheid](https://github.com/jonasscheid) and [Steffen Lemke](https://github.com/steffenlem) from [Peptide-based Immunotherapy](https://www.medizin.uni-tuebingen.de/en-de/peptid-basierte-immuntherapie) and [Quantitative Biology Center](https://uni-tuebingen.de/forschung/forschungsinfrastruktur/zentrum-fuer-quantitative-biologie-qbic/) in T\u00fcbingen.\n\nHelpful contributors:\n\n- [Lukas Heumos](https://github.com/Zethson)\n- [Alexander Peltzer](https://github.com/apeltzer)\n- [Maxime Garcia](https://github.com/maxulysse)\n- [Gisela Gabernet](https://github.com/ggabernet)\n- [Susanne Jodoin](https://github.com/SusiJo)\n- [Oskar Wacker](https://github.com/WackerO)\n- [Leon Kuchenbecker](https://github.com/lkuchenb)\n- [Phil Ewels](https://github.com/ewels)\n- [Christian Fufezan](https://github.com/fu)\n- [Sven Fillinger](https://github.com/sven1103)\n- [Kevin Menden](https://github.com/KevinMenden)\n- [Julia Graf](https://github.com/JuliaGraf)\n- [Jana Hoffmann](https://github.com/janaHoffmann1)\n\n## Contributions and Support\n\nIf you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\n\nFor further information or help, don't hesitate to get in touch on the [Slack `#mhcquant` channel](https://nfcore.slack.com/channels/mhcquant) (you can join with [this invite](https://nf-co.re/join/slack)).\n\n## Citations\n\nIf you use nf-core/mhcquant for your analysis, please cite the corresponding manuscript: [10.1186/s13059-025-03763-8](https://doi.org/10.1186/s13059-025-03763-8)\n\n> **MHCquant2 refines immunopeptidomics tumor antigen discovery**\n>\n> Jonas Scheid, Steffen Lemke, Naomi Hoenisch-Gravel, Anna Dengler, Timo Sachsenberg, Arthur Declerq, Ralf Gabriels, Jens Bauer, Marcel Wacker, Leon Bichmann, Lennart Martens, Marissa L. Dubbelaar, Sven Nahnsen & Juliane S. Walz\n>\n> _Genome Biology_ 2025 26 (1), 290. doi: [10.1021/acs.jproteome.9b00313](https://pubs.acs.org/doi/10.1021/acs.jproteome.9b00313)\n\n> **MHCquant: Automated and Reproducible Data Analysis for Immunopeptidomics**\n>\n> Leon Bichmann, Annika Nelde, Michael Ghosh, Lukas Heumos, Christopher Mohr, Alexander Peltzer, Leon Kuchenbecker, Timo Sachsenberg, Juliane S. Walz, Stefan Stevanovi\u0107, Hans-Georg Rammensee & Oliver Kohlbacher\n>\n> _Journal of Proteome Research_ 2019 18 (11), 3876-3884. doi: [10.1021/acs.jproteome.9b00313](https://pubs.acs.org/doi/10.1021/acs.jproteome.9b00313)\n\nAn extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\n\nYou can cite the `nf-core` publication as follows:\n\n> **The nf-core framework for community-curated bioinformatics pipelines.**\n>\n> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\n>\n> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\n\nIn addition, references of tools and data used in this pipeline are as follows:\n\n> **OpenMS framework**\n>\n> Pfeuffer J. et al, _Nat Methods_ 2024 Mar;21(3):365-367. doi: [0.1038/s41592-024-02197-7](https://www.nature.com/articles/s41592-024-02197-7).\n>\n> **Comet Search Engine**\n>\n> Eng J.K. et al, _J Am Soc Mass Spectrom._ 2015 Nov;26(11):1865-74. doi: [10.1007/s13361-015-1179-x](https://pubs.acs.org/doi/10.1007/s13361-015-1179-x).\n>\n> **Retention time prediction**\n>\n> Bouwmeester R. et al, _Nature Methods_ 2021 Oct;18(11):1363-1369. doi: [10.1038/s41592-021-01301-5](https://www.nature.com/articles/s41592-021-01301-5)\n>\n> **MS\u00b2 Peak intensity prediction**\n>\n> Declercq A. et al, _Nucleic Acids Res._ 2023 Jul 5;51(W1):W338-W342. doi: [10.1093/nar/gkad335](https://academic.oup.com/nar/article/51/W1/W338/7151340?login=false)\n>\n> **CCS prediction**\n>\n> Declercq A. et al _Journal of Proteome Research_ 2025 Feb 6. doi: [10.1021/acs.jproteome.4c00609](https://pubs.acs.org/doi/10.1021/acs.jproteome.4c00609)\n>\n> **MS\u00b2Rescore framework**\n>\n> Buur L. M. et al, \\_J Proteome Res. 2024 Mar 16. doi: [10.1021/acs.jproteome.3c00785](https://pubs.acs.org/doi/10.1021/acs.jproteome.3c00785)\n>\n> **Percolator**\n>\n> K\u00e4ll L. et al, _Nat Methods_ 2007 Nov;4(11):923-5. doi: [10.1038/nmeth1113](https://www.nature.com/articles/nmeth1113).\n>\n> **Identification based RT Alignment**\n>\n> Weisser H. et al, _J Proteome Res._ 2013 Apr 5;12(4):1628-44. doi: [10.1021/pr300992u](https://pubs.acs.org/doi/10.1021/pr300992u)\n>\n> **Targeted peptide quantification**\n>\n> Weisser H. et al, _J Proteome Res._ 2017 Aug 4;16(8):2964-2974. doi: [10.1021/acs.jproteome.7b00248](https://pubs.acs.org/doi/10.1021/acs.jproteome.7b00248)\n", "hasPart": [ { "@id": "main.nf" @@ -105,7 +105,7 @@ }, "mentions": [ { - "@id": "#3dbe415b-3d82-41eb-a11f-065f425f8a83" + "@id": "#9e4c34cf-5530-46b2-be64-79e63d572382" } ], "name": "nf-core/mhcquant" @@ -139,18 +139,18 @@ { "@id": "https://orcid.org/0000-0001-7135-0073" }, - { - "@id": "https://orcid.org/0000-0002-6503-2180" - }, { "@id": "https://orcid.org/0000-0002-8937-3457" }, { "@id": "https://orcid.org/0000-0002-5923-1343" + }, + { + "@id": "https://orcid.org/0000-0002-6503-2180" } ], "dateCreated": "", - "dateModified": "2025-10-20T07:45:13Z", + "dateModified": "2026-01-08T12:19:18Z", "dct:conformsTo": "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/", "keywords": [ "nf-core", @@ -187,10 +187,10 @@ }, "url": [ "https://github.com/nf-core/mhcquant", - "https://nf-co.re/mhcquant/3.1.0/" + "https://nf-co.re/mhcquant/dev/" ], "version": [ - "3.1.0" + "3.2.0dev" ] }, { @@ -206,11 +206,11 @@ "version": "!>=25.04.0" }, { - "@id": "#3dbe415b-3d82-41eb-a11f-065f425f8a83", + "@id": "#9e4c34cf-5530-46b2-be64-79e63d572382", "@type": "TestSuite", "instance": [ { - "@id": "#cd4e5431-8228-4f6a-a3c6-4b722e22ba0a" + "@id": "#b55a3b6b-29d0-41f1-a5dc-9c168ffc0064" } ], "mainEntity": { @@ -219,7 +219,7 @@ "name": "Test suite for nf-core/mhcquant" }, { - "@id": "#cd4e5431-8228-4f6a-a3c6-4b722e22ba0a", + "@id": "#b55a3b6b-29d0-41f1-a5dc-9c168ffc0064", "@type": "TestInstance", "name": "GitHub Actions workflow for testing nf-core/mhcquant", "resource": "repos/nf-core/mhcquant/actions/workflows/nf-test.yml", @@ -369,12 +369,6 @@ "email": "bichmann@informatik.uni-tuebingen.de", "name": "Leon Bichmann" }, - { - "@id": "https://orcid.org/0000-0002-6503-2180", - "@type": "Person", - "email": "apeltzer@users.noreply.github.com", - "name": "Alexander Peltzer" - }, { "@id": "https://orcid.org/0000-0002-8937-3457", "@type": "Person", @@ -386,6 +380,12 @@ "@type": "Person", "email": "43858870+jonasscheid@users.noreply.github.com", "name": "Jonas Scheid" + }, + { + "@id": "https://orcid.org/0000-0002-6503-2180", + "@type": "Person", + "email": "apeltzer@users.noreply.github.com", + "name": "Alexander Peltzer" } ] } \ No newline at end of file diff --git a/subworkflows/local/map_alignment/main.nf b/subworkflows/local/map_alignment/main.nf index 76725d0d..7ca36a31 100644 --- a/subworkflows/local/map_alignment/main.nf +++ b/subworkflows/local/map_alignment/main.nf @@ -18,7 +18,6 @@ workflow MAP_ALIGNMENT { // Compute group-wise alignment rt transformation OPENMS_MAPALIGNERIDENTIFICATION( ch_runs_to_be_aligned ) - ch_versions = ch_versions.mix(OPENMS_MAPALIGNERIDENTIFICATION.out.versions) // Join run specific trafoXMLs with meta information merge_meta_map @@ -32,7 +31,6 @@ workflow MAP_ALIGNMENT { // Align mzML files using trafoXMLs ch_trafos_mzmls = ch_mzml.join(ch_trafos) OPENMS_MAPRTTRANSFORMERMZML(ch_trafos_mzmls) - ch_versions = ch_versions.mix(OPENMS_MAPRTTRANSFORMERMZML.out.versions) // Align idXMLfiles using trafoXMLs ch_runs_to_be_aligned @@ -45,7 +43,6 @@ workflow MAP_ALIGNMENT { .set { ch_trafos_idxml } OPENMS_MAPRTTRANSFORMERIDXML(ch_trafos_idxml) - ch_versions = ch_versions.mix(OPENMS_MAPRTTRANSFORMERIDXML.out.versions) emit: versions = ch_versions diff --git a/subworkflows/local/prepare_spectra/main.nf b/subworkflows/local/prepare_spectra/main.nf index 6c199bf6..8079904a 100644 --- a/subworkflows/local/prepare_spectra/main.nf +++ b/subworkflows/local/prepare_spectra/main.nf @@ -36,14 +36,12 @@ workflow PREPARE_SPECTRA { // Raw file conversion THERMORAWFILEPARSER(branched_ms_files.raw) - ch_versions = ch_versions.mix(THERMORAWFILEPARSER.out.versions) + // ch_versions = ch_versions.mix(THERMORAWFILEPARSER.out.versions) // Decompress timsTOF archive for data conversion UNTAR(branched_ms_files.d_tar) - ch_versions = ch_versions.mix(UNTAR.out.versions) UNZIP(branched_ms_files.d_zip) - ch_versions = ch_versions.mix(UNZIP.out.versions) ch_tdf_files = branched_ms_files.d .mix(UNTAR.out.untar, @@ -51,11 +49,10 @@ workflow PREPARE_SPECTRA { // timsTOF data conversion TDF2MZML(ch_tdf_files) - ch_versions = ch_versions.mix(TDF2MZML.out.versions) // Gunzip mzML files GUNZIP(branched_ms_files.mzml_gz) - ch_versions = ch_versions.mix(GUNZIP.out.versions) + // ch_versions = ch_versions.mix(GUNZIP.out.versions) // Initialize channel for ms files that do not need to be converted ch_ms_files = branched_ms_files.mzml .mix(GUNZIP.out.gunzip, @@ -65,7 +62,7 @@ workflow PREPARE_SPECTRA { // Optional: Run Peak Picking as Preprocessing if (params.run_centroidisation) { OPENMS_PEAKPICKERHIRES(ch_ms_files) - ch_versions = ch_versions.mix(OPENMS_PEAKPICKERHIRES.out.versions) + // ch_versions = ch_versions.mix(OPENMS_PEAKPICKERHIRES.out.versions) ch_mzml_file = OPENMS_PEAKPICKERHIRES.out.mzml } else { ch_mzml_file = ch_ms_files diff --git a/subworkflows/local/process_feature/main.nf b/subworkflows/local/process_feature/main.nf index 8156a5f2..b9459a2c 100644 --- a/subworkflows/local/process_feature/main.nf +++ b/subworkflows/local/process_feature/main.nf @@ -18,15 +18,12 @@ workflow PROCESS_FEATURE { .map { meta, featurexml -> [ groupKey([id: "${meta.sample}_${meta.condition}"], meta.group_count), featurexml] } .groupTuple() .set { ch_features_grouped } - ch_versions = ch_versions.mix(OPENMS_FEATUREFINDERIDENTIFICATION.out.versions) // Link extracted features OPENMS_FEATURELINKERUNLABELEDKD(ch_features_grouped) - ch_versions = ch_versions.mix(OPENMS_FEATURELINKERUNLABELEDKD.out.versions) // Resolve conflicting ids matching to the same feature OPENMS_IDCONFLICTRESOLVER(OPENMS_FEATURELINKERUNLABELEDKD.out.consensusxml) - ch_versions = ch_versions.mix(OPENMS_IDCONFLICTRESOLVER.out.versions) emit: // Define the information that is returned by this workflow diff --git a/subworkflows/local/quant/main.nf b/subworkflows/local/quant/main.nf index d9202d8d..7c6824ed 100644 --- a/subworkflows/local/quant/main.nf +++ b/subworkflows/local/quant/main.nf @@ -38,12 +38,12 @@ workflow QUANT { .map { meta -> [[spectra:meta.spectra], meta]} ) .map { spectra, idxmls, meta -> [meta, idxmls] } .set { ch_ripped_idxml } - ch_versions = ch_versions.mix(OPENMS_IDRIPPER.out.versions) + // ch_versions = ch_versions.mix(OPENMS_IDRIPPER.out.versions) // Switch to xcorr for filtering since q-values are set to 1 with peptide-level-fdr if (params.fdr_level == 'peptide_level_fdrs'){ ch_runs_score_switched = OPENMS_IDSCORESWITCHER( ch_ripped_idxml ).idxml - ch_versions = ch_versions.mix(OPENMS_IDSCORESWITCHER.out.versions) + // ch_versions = ch_versions.mix(OPENMS_IDSCORESWITCHER.out.versions) } else { ch_runs_score_switched = ch_ripped_idxml } @@ -61,7 +61,7 @@ workflow QUANT { .map { meta, idxml -> [ groupKey([id:"${meta.sample}_${meta.condition}"], meta.group_count), idxml] } .groupTuple() .set { ch_runs_to_be_aligned } - ch_versions = ch_versions.mix(OPENMS_IDFILTER_QUANT.out.versions) + // ch_versions = ch_versions.mix(OPENMS_IDFILTER_QUANT.out.versions) // Align retention times of runs MAP_ALIGNMENT( @@ -75,7 +75,7 @@ workflow QUANT { OPENMS_IDMERGER_QUANT( MAP_ALIGNMENT.out.aligned_idxml .map { meta, aligned_idxml -> [ groupKey([id: "${meta.sample}_${meta.condition}"], meta.group_count), aligned_idxml] } .groupTuple()) - ch_versions = ch_versions.mix(OPENMS_IDMERGER_QUANT.out.versions) + // ch_versions = ch_versions.mix(OPENMS_IDMERGER_QUANT.out.versions) // Manipulate channels such that we end up with : [meta, mzml, run_idxml, merged_runs_idxml] MAP_ALIGNMENT.out.aligned_mzml @@ -92,7 +92,6 @@ workflow QUANT { ch_versions = ch_versions.mix(PROCESS_FEATURE.out.versions) OPENMS_MZTABEXPORTER(PROCESS_FEATURE.out.consensusxml) - ch_versions = ch_versions.mix(OPENMS_MZTABEXPORTER.out.versions) emit: consensusxml = PROCESS_FEATURE.out.consensusxml diff --git a/subworkflows/local/rescore/main.nf b/subworkflows/local/rescore/main.nf index 564a6b0c..aca93aff 100644 --- a/subworkflows/local/rescore/main.nf +++ b/subworkflows/local/rescore/main.nf @@ -32,7 +32,6 @@ workflow RESCORE { // Compute features via ms2rescore MS2RESCORE(ch_merged_runs) - ch_versions = ch_versions.mix(MS2RESCORE.out.versions) if (params.rescoring_engine == 'mokapot') { log.warn "The rescoring engine is set to mokapot. This rescoring engine currently only supports psm-level-fdr via ms2rescore." @@ -41,22 +40,20 @@ workflow RESCORE { } // Switch comet e-value to mokapot q-value OPENMS_IDSCORESWITCHER(MS2RESCORE.out.idxml) - ch_versions = ch_versions.mix(OPENMS_IDSCORESWITCHER.out.versions) + // ch_versions = ch_versions.mix(OPENMS_IDSCORESWITCHER.out.versions) ch_rescored_runs = OPENMS_IDSCORESWITCHER.out.idxml // Filter by mokapot q-value OPENMS_IDFILTER_Q_VALUE(ch_rescored_runs.map {group_meta, idxml -> [group_meta, idxml, []]}) - ch_versions = ch_versions.mix(OPENMS_IDFILTER_Q_VALUE.out.versions) + // ch_versions = ch_versions.mix(OPENMS_IDFILTER_Q_VALUE.out.versions) ch_filter_q_value = OPENMS_IDFILTER_Q_VALUE.out.filtered } else { // Extract PSM features for Percolator OPENMS_PSMFEATUREEXTRACTOR(MS2RESCORE.out.idxml.join(MS2RESCORE.out.feature_names)) - ch_versions = ch_versions.mix(OPENMS_PSMFEATUREEXTRACTOR.out.versions) // Run Percolator with local FDR OPENMS_PERCOLATORADAPTER(OPENMS_PSMFEATUREEXTRACTOR.out.idxml) - ch_versions = ch_versions.mix(OPENMS_PERCOLATORADAPTER.out.versions) ch_multiqc_files = ch_multiqc_files.mix(OPENMS_PERCOLATORADAPTER.out.feature_weights.map{ meta, feature_weights -> feature_weights }) ch_pout = OPENMS_PERCOLATORADAPTER.out.idxml @@ -68,7 +65,7 @@ workflow RESCORE { ch_rescored_runs = OPENMS_PERCOLATORADAPTER_GLOBAL.out.idxml // Filter by global percolator q-value OPENMS_IDFILTER_Q_VALUE_GLOBAL(ch_rescored_runs.map {id, idxml -> [id, idxml, []]}) - ch_versions = ch_versions.mix(OPENMS_IDFILTER_Q_VALUE_GLOBAL.out.versions) + // ch_versions = ch_versions.mix(OPENMS_IDFILTER_Q_VALUE_GLOBAL.out.versions) // Backfilter sample_condition runs according to global FDR OPENMS_IDFILTER_GLOBAL(ch_pout.combine(OPENMS_IDFILTER_Q_VALUE_GLOBAL.out.filtered.map{ it[1] })) ch_filter_q_value = OPENMS_IDFILTER_GLOBAL.out.filtered @@ -79,7 +76,7 @@ workflow RESCORE { ch_rescored_runs = ch_pout // Filter by percolator q-value OPENMS_IDFILTER_Q_VALUE(ch_rescored_runs.map {group_meta, idxml -> [group_meta, idxml, []]}) - ch_versions = ch_versions.mix(OPENMS_IDFILTER_Q_VALUE.out.versions) + // ch_versions = ch_versions.mix(OPENMS_IDFILTER_Q_VALUE.out.versions) ch_filter_q_value = OPENMS_IDFILTER_Q_VALUE.out.filtered } } diff --git a/subworkflows/local/speclib/main.nf b/subworkflows/local/speclib/main.nf index dad8aa0f..cce8741b 100644 --- a/subworkflows/local/speclib/main.nf +++ b/subworkflows/local/speclib/main.nf @@ -28,7 +28,6 @@ workflow SPECLIB { // Convert psms and spectra to pickle files EASYPQP_CONVERT(fdrfiltered_comet_idxml.join(mzml), unimod) - ch_versions = ch_versions.mix(EASYPQP_CONVERT.out.versions) EASYPQP_CONVERT.out.psmpkl .map { meta, psmpkl -> [groupKey([id: "${meta.sample}_${meta.condition}"], meta.group_count), psmpkl] } @@ -41,7 +40,6 @@ workflow SPECLIB { // Generate spectrum library for each sample-condition pair EASYPQP_LIBRARY(ch_psmpkl.join(ch_peakpkl)) - ch_versions = ch_versions.mix(EASYPQP_LIBRARY.out.versions) // Generate spectrum library for all MSruns in the samplesheet if (params.global_fdr) { diff --git a/subworkflows/local/utils_nfcore_mhcquant_pipeline/main.nf b/subworkflows/local/utils_nfcore_mhcquant_pipeline/main.nf index de3353fc..ada8bd4d 100644 --- a/subworkflows/local/utils_nfcore_mhcquant_pipeline/main.nf +++ b/subworkflows/local/utils_nfcore_mhcquant_pipeline/main.nf @@ -39,7 +39,7 @@ workflow PIPELINE_INITIALISATION { main: - ch_versions = Channel.empty() + ch_versions = channel.empty() // // Print version and exit if required and dump pipeline parameters to JSON file @@ -64,7 +64,7 @@ workflow PIPELINE_INITIALISATION { \033[0;35m nf-core/mhcquant ${workflow.manifest.version}\033[0m -\033[2m----------------------------------------------------\033[0m- """ - after_text = """${workflow.manifest.doi ? "\n* The pipeline\n" : ""}${workflow.manifest.doi.tokenize(",").collect { " https://doi.org/${it.trim().replace('https://doi.org/','')}"}.join("\n")}${workflow.manifest.doi ? "\n" : ""} + after_text = """${workflow.manifest.doi ? "\n* The pipeline\n" : ""}${workflow.manifest.doi.tokenize(",").collect { doi -> " https://doi.org/${doi.trim().replace('https://doi.org/','')}"}.join("\n")}${workflow.manifest.doi ? "\n" : ""} * The nf-core framework https://doi.org/10.1038/s41587-020-0439-x @@ -96,7 +96,7 @@ workflow PIPELINE_INITIALISATION { // Create channel from input file provided through params.input // - Channel + channel .fromList(samplesheetToList(params.input, "${projectDir}/assets/schema_input.json")) .map { meta, file, fasta -> [meta.subMap('sample','condition'), meta, file, fasta] } .tap { ch_input } diff --git a/subworkflows/nf-core/utils_nfcore_pipeline/main.nf b/subworkflows/nf-core/utils_nfcore_pipeline/main.nf index bfd25876..2f30e9a4 100644 --- a/subworkflows/nf-core/utils_nfcore_pipeline/main.nf +++ b/subworkflows/nf-core/utils_nfcore_pipeline/main.nf @@ -98,7 +98,7 @@ def workflowVersionToYAML() { // Get channel of software versions used in pipeline in YAML format // def softwareVersionsToYAML(ch_versions) { - return ch_versions.unique().map { version -> processVersionsFromYAML(version) }.unique().mix(Channel.of(workflowVersionToYAML())) + return ch_versions.unique().map { version -> processVersionsFromYAML(version) }.unique().mix(channel.of(workflowVersionToYAML())) } // diff --git a/tests/.nftignore b/tests/.nftignore index 53b36346..b794e87c 100644 --- a/tests/.nftignore +++ b/tests/.nftignore @@ -1,5 +1,4 @@ .DS_Store -multiqc/multiqc_data/fastqc_top_overrepresented_sequences_table.txt multiqc/multiqc_data/multiqc.parquet multiqc/multiqc_data/multiqc.log multiqc/multiqc_data/multiqc_data.json diff --git a/tests/default.nf.test.snap b/tests/default.nf.test.snap index 85ba67fb..a6f7b857 100644 --- a/tests/default.nf.test.snap +++ b/tests/default.nf.test.snap @@ -3,11 +3,11 @@ "content": [ { "MS2RESCORE": { - "MS\u00b2Rescore": "3.1.5)" + "MS2Rescore": "3.1.5" }, "OPENMSTHIRDPARTY_COMETADAPTER": { - "CometAdapter": "3.4.1", - "Comet": "2024.01 rev. 1" + "Comet": "2024.01 rev. 1", + "CometAdapter": "3.4.1" }, "OPENMS_DECOYDATABASE": { "openms": "3.4.1" @@ -16,7 +16,7 @@ "openms": "3.4.1" }, "OPENMS_IDMASSACCURACY": { - "OpenMS": "3.4.1" + "openms": "3.4.1" }, "OPENMS_IDMERGER": { "openms": "3.4.1" @@ -41,7 +41,7 @@ "pyopenms": "3.4.1" }, "Workflow": { - "nf-core/mhcquant": "v3.1.0" + "nf-core/mhcquant": "v3.2.0dev" } }, [ @@ -384,6 +384,6 @@ "nf-test": "0.9.3", "nextflow": "25.04.8" }, - "timestamp": "2025-10-28T21:36:13.560396546" + "timestamp": "2026-01-09T09:50:11.865149581" } } \ No newline at end of file diff --git a/tests/ionannotator.nf.test.snap b/tests/ionannotator.nf.test.snap index b0fd3915..dd848f79 100644 --- a/tests/ionannotator.nf.test.snap +++ b/tests/ionannotator.nf.test.snap @@ -4,11 +4,11 @@ 22, { "MS2RESCORE": { - "MS\u00b2Rescore": "3.1.5)" + "MS2Rescore": "3.1.5" }, "OPENMSTHIRDPARTY_COMETADAPTER": { - "CometAdapter": "3.4.1", - "Comet": "2024.01 rev. 1" + "Comet": "2024.01 rev. 1", + "CometAdapter": "3.4.1" }, "OPENMS_DECOYDATABASE": { "openms": "3.4.1" @@ -17,7 +17,7 @@ "openms": "3.4.1" }, "OPENMS_IDMASSACCURACY": { - "OpenMS": "3.4.1" + "openms": "3.4.1" }, "OPENMS_IDMERGER": { "openms": "3.4.1" @@ -45,7 +45,7 @@ "pyopenms": "3.4.1" }, "Workflow": { - "nf-core/mhcquant": "v3.1.0" + "nf-core/mhcquant": "v3.2.0dev" } }, [ @@ -405,6 +405,6 @@ "nf-test": "0.9.3", "nextflow": "25.04.8" }, - "timestamp": "2025-10-28T21:53:36.826911285" + "timestamp": "2026-01-09T10:04:59.923025945" } } \ No newline at end of file diff --git a/tests/mokapot.nf.test.snap b/tests/mokapot.nf.test.snap index 4acd6735..045eaca7 100644 --- a/tests/mokapot.nf.test.snap +++ b/tests/mokapot.nf.test.snap @@ -4,11 +4,11 @@ 20, { "MS2RESCORE": { - "MS\u00b2Rescore": "3.1.5)" + "MS2Rescore": "3.1.5" }, "OPENMSTHIRDPARTY_COMETADAPTER": { - "CometAdapter": "3.4.1", - "Comet": "2024.01 rev. 1" + "Comet": "2024.01 rev. 1", + "CometAdapter": "3.4.1" }, "OPENMS_DECOYDATABASE": { "openms": "3.4.1" @@ -17,7 +17,7 @@ "openms": "3.4.1" }, "OPENMS_IDMASSACCURACY": { - "OpenMS": "3.4.1" + "openms": "3.4.1" }, "OPENMS_IDMERGER": { "openms": "3.4.1" @@ -38,7 +38,7 @@ "pyopenms": "3.4.1" }, "Workflow": { - "nf-core/mhcquant": "v3.1.0" + "nf-core/mhcquant": "v3.2.0dev" } }, [ @@ -112,6 +112,6 @@ "nf-test": "0.9.3", "nextflow": "25.04.8" }, - "timestamp": "2025-10-28T22:04:22.998726246" + "timestamp": "2026-01-09T10:16:10.560078316" } } \ No newline at end of file diff --git a/tests/speclib.nf.test.snap b/tests/speclib.nf.test.snap index 4864a01a..9d4e26c3 100644 --- a/tests/speclib.nf.test.snap +++ b/tests/speclib.nf.test.snap @@ -10,20 +10,23 @@ "easypqp": "0.1.53" }, "MS2RESCORE": { - "MS\u00b2Rescore": "3.1.5)" + "MS2Rescore": "3.1.5" }, "OPENMSTHIRDPARTY_COMETADAPTER": { - "CometAdapter": "3.4.1", - "Comet": "2024.01 rev. 1" + "Comet": "2024.01 rev. 1", + "CometAdapter": "3.4.1" }, "OPENMS_DECOYDATABASE": { "openms": "3.4.1" }, + "OPENMS_IDFILTER_FOR_SPECLIB": { + "openms": "3.4.1" + }, "OPENMS_IDFILTER_Q_VALUE": { "openms": "3.4.1" }, "OPENMS_IDMASSACCURACY": { - "OpenMS": "3.4.1" + "openms": "3.4.1" }, "OPENMS_IDMERGER": { "openms": "3.4.1" @@ -48,7 +51,7 @@ "pyopenms": "3.4.1" }, "Workflow": { - "nf-core/mhcquant": "v3.1.0" + "nf-core/mhcquant": "v3.2.0dev" } }, [ @@ -393,6 +396,6 @@ "nf-test": "0.9.3", "nextflow": "25.04.8" }, - "timestamp": "2025-10-28T22:13:30.098880662" + "timestamp": "2026-01-09T10:24:20.32429559" } } \ No newline at end of file diff --git a/workflows/mhcquant.nf b/workflows/mhcquant.nf index dad8d043..336a6ad1 100644 --- a/workflows/mhcquant.nf +++ b/workflows/mhcquant.nf @@ -62,13 +62,13 @@ workflow MHCQUANT { // Prepare spectra files (Decompress archives, convert to mzML, centroid if specified) PREPARE_SPECTRA(ch_samplesheet) - ch_versions = ch_versions.mix(PREPARE_SPECTRA.out.versions) + // Decoy Database creation // Decoy Database creation if (!params.skip_decoy_generation) { // Generate reversed decoy database OPENMS_DECOYDATABASE(ch_fasta) - ch_versions = ch_versions.mix(OPENMS_DECOYDATABASE.out.versions) + // ch_versions = ch_versions.mix(OPENMS_DECOYDATABASE.out.versions) ch_decoy_db = OPENMS_DECOYDATABASE.out.decoy_fasta } else { ch_decoy_db = ch_fasta @@ -77,7 +77,7 @@ workflow MHCQUANT { // Optionally clean up mzML files if (params.filter_mzml){ OPENMS_FILEFILTER(PREPARE_SPECTRA.out.mzml) - ch_versions = ch_versions.mix(OPENMS_FILEFILTER.out.versions) + // ch_versions = ch_versions.mix(OPENMS_FILEFILTER.out.versions) ch_clean_mzml_file = OPENMS_FILEFILTER.out.mzml } else { ch_clean_mzml_file = PREPARE_SPECTRA.out.mzml @@ -85,7 +85,6 @@ workflow MHCQUANT { // Compute MS1 TICs for QC PYOPENMS_CHROMATOGRAMEXTRACTOR(ch_clean_mzml_file) - ch_versions = ch_versions.mix(PYOPENMS_CHROMATOGRAMEXTRACTOR.out.versions) ch_multiqc_files = ch_multiqc_files.mix(PYOPENMS_CHROMATOGRAMEXTRACTOR.out.csv.map{ meta, mzml -> mzml }) // Prepare the comet input channel with global fasta or per-sample_condition fasta @@ -98,7 +97,7 @@ workflow MHCQUANT { // Run comet database search and index decoy and target hits OPENMSTHIRDPARTY_COMETADAPTER(ch_comet_in) - ch_versions = ch_versions.mix(OPENMSTHIRDPARTY_COMETADAPTER.out.versions) + // ch_versions = ch_versions.mix(OPENMSTHIRDPARTY_COMETADAPTER.out.versions) // Prepare the peptideindexer channel with global fasta or per-sample_condition fasta ch_peptideindexer_in = params.fasta ? @@ -109,11 +108,11 @@ workflow MHCQUANT { .map { groupKey, meta, idxml, fasta -> [meta, idxml, fasta] } OPENMS_PEPTIDEINDEXER(ch_peptideindexer_in) - ch_versions = ch_versions.mix(OPENMS_PEPTIDEINDEXER.out.versions) + // ch_versions = ch_versions.mix(OPENMS_PEPTIDEINDEXER.out.versions) // Compute mass errors for multiQC report OPENMS_IDMASSACCURACY(PREPARE_SPECTRA.out.mzml.join(OPENMS_PEPTIDEINDEXER.out.indexed_idxml)) - ch_versions = ch_versions.mix(OPENMS_IDMASSACCURACY.out.versions) + // ch_versions = ch_versions.mix(OPENMS_IDMASSACCURACY.out.versions) ch_multiqc_files = ch_multiqc_files.mix(OPENMS_IDMASSACCURACY.out.frag_err.map{ meta, frag_err -> frag_err }) // Save indexed runs for later use to keep meta-run information. Sort based on file id @@ -129,7 +128,7 @@ workflow MHCQUANT { // Merge aligned idXMLfiles OPENMS_IDMERGER(ch_runs_to_merge) - ch_versions = ch_versions.mix(OPENMS_IDMERGER.out.versions) + // ch_versions = ch_versions.mix(OPENMS_IDMERGER.out.versions) // Run MS2Rescore ch_clean_mzml_file @@ -143,7 +142,6 @@ workflow MHCQUANT { // SUBWORKFLOW: RESCORE WITH MOKKAPOT OR PERCOLATOR AND FILTER BY Q-VALUE ON LOCAL/GLOBAL FDR // RESCORE( ch_rescore_in, ch_multiqc_files ) - ch_versions = ch_versions.mix(RESCORE.out.versions) ch_multiqc_files = ch_multiqc_files.mix(RESCORE.out.multiqc_files) // GENERATE SPECTRUM LIBRARY @@ -164,7 +162,6 @@ workflow MHCQUANT { // SUBWORKFLOW: SPECLIB // SPECLIB(ch_fdrfilter_comet_idxml_filtered, ch_clean_mzml_file) - ch_versions = ch_versions.mix(SPECLIB.out.versions) } // @@ -172,7 +169,6 @@ workflow MHCQUANT { // if (params.quantify) { QUANT(merge_meta_map, RESCORE.out.rescored_runs, RESCORE.out.fdr_filtered, ch_clean_mzml_file) - ch_versions = ch_versions.mix(QUANT.out.versions) ch_output = QUANT.out.consensusxml } else { ch_output = RESCORE.out.fdr_filtered @@ -188,12 +184,10 @@ workflow MHCQUANT { // Annotate spectra with ion fragmentation information PYOPENMS_IONANNOTATOR( ch_ion_annotator_input ) - ch_versions = ch_versions.mix(PYOPENMS_IONANNOTATOR.out.versions) } // Prepare for check if file is empty OPENMS_TEXTEXPORTER(ch_output) - ch_versions = ch_versions.mix(OPENMS_TEXTEXPORTER.out.versions) // Return an error message when there is only a header present in the document OPENMS_TEXTEXPORTER.out.tsv.map { meta, tsv -> if (tsv.size() < 130) { @@ -203,14 +197,12 @@ workflow MHCQUANT { // Process the tsv file to facilitate visualization with MultiQC SUMMARIZE_RESULTS(OPENMS_TEXTEXPORTER.out.tsv) - ch_versions = ch_versions.mix(SUMMARIZE_RESULTS.out.versions) // // EPICORE // if (params.epicore) { EPICORE(ch_fasta.map{ it.last()}, SUMMARIZE_RESULTS.out.epicore_input) - ch_versions = ch_versions.mix(EPICORE.out.versions) ch_multiqc_files = ch_multiqc_files.mix( EPICORE.out.length_dist, EPICORE.out.intensity_hist @@ -233,7 +225,25 @@ workflow MHCQUANT { // // Collate and save software versions // - softwareVersionsToYAML(ch_versions) + def topic_versions = Channel.topic("versions") + .distinct() + .branch { entry -> + versions_file: entry instanceof Path + versions_tuple: true + } + + def topic_versions_string = topic_versions.versions_tuple + .map { process, tool, version -> + [ process[process.lastIndexOf(':')+1..-1], " ${tool}: ${version}" ] + } + .groupTuple(by:0) + .map { process, tool_versions -> + tool_versions.unique().sort() + "${process}:\n${tool_versions.join('\n')}" + } + + softwareVersionsToYAML(ch_versions.mix(topic_versions.versions_file)) + .mix(topic_versions_string) .collectFile( storeDir: "${params.outdir}/pipeline_info", name: 'nf_core_' + 'mhcquant_software_' + 'mqc_' + 'versions.yml', @@ -244,24 +254,24 @@ workflow MHCQUANT { // // MODULE: MultiQC // - ch_multiqc_config = Channel.fromPath( + ch_multiqc_config = channel.fromPath( "$projectDir/assets/multiqc_config.yml", checkIfExists: true) ch_multiqc_custom_config = params.multiqc_config ? - Channel.fromPath(params.multiqc_config, checkIfExists: true) : - Channel.empty() + channel.fromPath(params.multiqc_config, checkIfExists: true) : + channel.empty() ch_multiqc_logo = params.multiqc_logo ? - Channel.fromPath(params.multiqc_logo, checkIfExists: true) : - Channel.empty() + channel.fromPath(params.multiqc_logo, checkIfExists: true) : + channel.empty() summary_params = paramsSummaryMap( workflow, parameters_schema: "nextflow_schema.json") - ch_workflow_summary = Channel.value(paramsSummaryMultiqc(summary_params)) + ch_workflow_summary = channel.value(paramsSummaryMultiqc(summary_params)) ch_multiqc_files = ch_multiqc_files.mix( ch_workflow_summary.collectFile(name: 'workflow_summary_mqc.yaml')) ch_multiqc_custom_methods_description = params.multiqc_methods_description ? file(params.multiqc_methods_description, checkIfExists: true) : file("$projectDir/assets/methods_description_template.yml", checkIfExists: true) - ch_methods_description = Channel.value( + ch_methods_description = channel.value( methodsDescriptionText(ch_multiqc_custom_methods_description)) ch_multiqc_files = ch_multiqc_files.mix(ch_collated_versions)