diff --git a/build.ps1 b/build.ps1 index c5498ee..d10ea7c 100644 --- a/build.ps1 +++ b/build.ps1 @@ -1,39 +1,9 @@ .\Scripts\activate pyinstaller --paths=python python/eeg2bids.py -F ` --name eeg2bids-service-windows ` ---add-data 'python/libs/bids_validator/rules/top_level_rules.json;./bids_validator/rules' ` ---add-data 'python/libs/bids_validator/rules/associated_data_rules.json;./bids_validator/rules' ` ---add-data 'python/libs/bids_validator/rules/file_level_rules.json;./bids_validator/rules' ` ---add-data 'python/libs/bids_validator/rules/phenotypic_rules.json;./bids_validator/rules' ` ---add-data 'python/libs/bids_validator/rules/session_level_rules.json;./bids_validator/rules' ` ---add-data 'python/libs/bids_validator/rules/subject_level_rules.json;./bids_validator/rules' ` ---add-data 'python/libs/bids_validator/tsv/non_custom_columns.json;./bids_validator/tsv' ` ---add-data 'python/libs/mne/channels/data/montages/EGI_256.csd;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/GSN-HydroCel-128.sfp;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/GSN-HydroCel-129.sfp;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/GSN-HydroCel-256.sfp;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/GSN-HydroCel-257.sfp;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/GSN-HydroCel-32.sfp;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/artinis-brite23.elc;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/artinis-octamon.elc;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/biosemi128.txt;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/biosemi16.txt;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/biosemi160.txt;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/biosemi256.txt;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/biosemi32.txt;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/biosemi64.txt;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/easycap-M1.txt;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/easycap-M10.txt;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/mgh60.elc;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/mgh70.elc;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/standard_1005.elc;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/standard_1020.elc;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/standard_alphabetic.elc;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/standard_postfixed.elc;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/standard_prefixed.elc;./mne/channels/data/montages' ` ---add-data 'python/libs/mne/channels/data/montages/standard_primed.elc;./mne/channels/data/montages' ` +--add-data 'python/libs/mne;./mne' ` +--add-data 'python/libs/mne_bids;./mne_bids' ` +--add-data 'python/libs/bids_validator;./bids_validator' ` --hidden-import=eventlet.hubs.epolls ` --hidden-import=eventlet.hubs.kqueue ` --hidden-import=eventlet.hubs.selects ` diff --git a/build.sh b/build.sh index fed0ebf..7edfd07 100755 --- a/build.sh +++ b/build.sh @@ -1,13 +1,9 @@ source bin/activate pyinstaller --paths=python python/eeg2bids.py -w -F \ --name eeg2bids-service \ ---add-data 'python/libs/bids_validator/rules/top_level_rules.json:bids_validator/rules' \ ---add-data 'python/libs/bids_validator/rules/associated_data_rules.json:bids_validator/rules' \ ---add-data 'python/libs/bids_validator/rules/file_level_rules.json:bids_validator/rules' \ ---add-data 'python/libs/bids_validator/rules/phenotypic_rules.json:bids_validator/rules' \ ---add-data 'python/libs/bids_validator/rules/session_level_rules.json:bids_validator/rules' \ ---add-data 'python/libs/bids_validator/rules/subject_level_rules.json:bids_validator/rules' \ ---add-data 'python/libs/bids_validator/tsv/non_custom_columns.json:bids_validator/tsv' \ +--add-data 'python/libs/mne:mne' \ +--add-data 'python/libs/mne_bids:mne_bids' \ +--add-data 'python/libs/bids_validator:bids_validator' \ --hidden-import=eventlet.hubs.epolls \ --hidden-import=eventlet.hubs.kqueue \ --hidden-import=eventlet.hubs.selects \ diff --git a/package.json b/package.json index c25ecec..207b1a4 100644 --- a/package.json +++ b/package.json @@ -37,7 +37,7 @@ "wait-on": "^6.0.1" }, "scripts": { - "rebuild": "rebuild --runtime=electron --target=11.2.1", + "rebuild": "rebuild --runtime=electron --target=19.0.3", "react-start": "cross-env BROWSER=NONE react-scripts start", "electron-start": "cross-env DEV=1 electron .", "start": "concurrently \"npm run react-start\" \"wait-on http://localhost:3000/ && npm run electron-start\"", @@ -150,6 +150,7 @@ "artifactName": "${productName}-Setup-${version}.${ext}" }, "files": [ + "build/index.html/*", "build/**/*", "node_modules/**/*", "utils/**/*", diff --git a/public/electron.js b/public/electron.js index db45d85..b16ca89 100644 --- a/public/electron.js +++ b/public/electron.js @@ -52,9 +52,9 @@ let mainWindow; */ const createMainWindow = () => { const startUrl = process.env.DEV ? - 'http://localhost:3000/app' : + 'http://localhost:3000' : `${url.pathToFileURL(path.join( - __dirname, '/../build/index.html')).href}/app`; + __dirname, '/../build/index.html')).href}`; mainWindow = new BrowserWindow({ show: false, icon, diff --git a/python/libs/bids_validator/__init__.py b/python/libs/bids_validator/__init__.py new file mode 100644 index 0000000..cf39560 --- /dev/null +++ b/python/libs/bids_validator/__init__.py @@ -0,0 +1,6 @@ +"""BIDS validator common Python package.""" +from .bids_validator import BIDSValidator +__all__ = ['BIDSValidator'] + +from . import _version +__version__ = _version.get_versions()['version'] diff --git a/python/libs/bids_validator/_version.py b/python/libs/bids_validator/_version.py new file mode 100644 index 0000000..b282a46 --- /dev/null +++ b/python/libs/bids_validator/_version.py @@ -0,0 +1,21 @@ + +# This file was generated by 'versioneer.py' (0.20) from +# revision-control system data, or from the parent directory name of an +# unpacked source archive. Distribution tarballs contain a pre-generated copy +# of this file. + +import json + +version_json = ''' +{ + "date": "2022-03-28T15:30:05-0500", + "dirty": false, + "error": null, + "full-revisionid": "fd78d856d5b3785456a9eb4659c223f5a15bc512", + "version": "1.9.3" +} +''' # END VERSION_JSON + + +def get_versions(): + return json.loads(version_json) diff --git a/python/libs/bids_validator/bids_validator.py b/python/libs/bids_validator/bids_validator.py new file mode 100644 index 0000000..2103838 --- /dev/null +++ b/python/libs/bids_validator/bids_validator.py @@ -0,0 +1,172 @@ +"""Validation class for BIDS projects.""" +import re +import os +import json + + +class BIDSValidator(): + """Object for BIDS (Brain Imaging Data Structure) verification. + + The main method of this class is `is_bids()`. You should use it for + checking whether a file path is compatible with BIDS. + + """ + + def __init__(self, index_associated=True): + """Initialize BIDSValidator object. + + Parameters + ---------- + index_associated : bool + Specifies if an associated data should be checked. If it is true + then any file paths in directories `code/`, `derivatives/`, + `sourcedata/` and `stimuli/` will pass the validation, else they + won't. Defaults to True. + + """ + self.dir_rules = os.path.join(os.path.dirname(__file__)) + "/rules/" + self.index_associated = index_associated + + def is_bids(self, path): + """Check if file path adheres to BIDS. + + Main method of the validator. uses other class methods for checking + different aspects of the file path. + + Parameters + ---------- + path : str + Path of a file to be checked. Must be relative to root of a BIDS + dataset. + + Notes + ----- + When you test a file path, make sure that the path is relative to the + root of the BIDS dataset the file is part of. That is, as soon as the + file path contains parts outside of the BIDS dataset, the validation + will fail. For example "home/username/my_dataset/participants.tsv" will + fail, although "participants.tsv" is a valid BIDS file. + + Examples + -------- + >>> from bids_validator import BIDSValidator + >>> validator = BIDSValidator() + >>> filepaths = ["/sub-01/anat/sub-01_rec-CSD_T1w.nii.gz", + ... "/sub-01/anat/sub-01_acq-23_rec-CSD_T1w.exe", # wrong extension + ... "home/username/my_dataset/participants.tsv", # not relative to root + ... "/participants.tsv"] + >>> for filepath in filepaths: + ... print(validator.is_bids(filepath)) + True + False + False + True + + """ + conditions = [] + + conditions.append(self.is_top_level(path)) + conditions.append(self.is_associated_data(path)) + conditions.append(self.is_session_level(path)) + conditions.append(self.is_subject_level(path)) + conditions.append(self.is_phenotypic(path)) + conditions.append(self.is_file(path)) + + return (any(conditions)) + + def is_top_level(self, path): + """Check if the file has appropriate name for a top-level file.""" + regexps = self.get_regular_expressions(self.dir_rules + + 'top_level_rules.json') + + conditions = [False if re.compile(x).search(path) is None else True for + x in regexps] + + return (any(conditions)) + + def is_associated_data(self, path): + """Check if file is appropriate associated data.""" + if not self.index_associated: + return False + + regexps = self.get_regular_expressions(self.dir_rules + + 'associated_data_rules.json') + + conditions = [(re.compile(x).search(path) is not None) for + x in regexps] + + return any(conditions) + + def is_session_level(self, path): + """Check if the file has appropriate name for a session level.""" + regexps = self.get_regular_expressions(self.dir_rules + + 'session_level_rules.json') + + conditions = [self.conditional_match(x, path) for x in regexps] + + return (any(conditions)) + + def is_subject_level(self, path): + """Check if the file has appropriate name for a subject level.""" + regexps = self.get_regular_expressions(self.dir_rules + + 'subject_level_rules.json') + + conditions = [(re.compile(x).search(path) is not None) for + x in regexps] + + return (any(conditions)) + + def is_phenotypic(self, path): + """Check if file is phenotypic data.""" + regexps = self.get_regular_expressions(self.dir_rules + + 'phenotypic_rules.json') + + conditions = [(re.compile(x).search(path) is not None) for + x in regexps] + + return (any(conditions)) + + def is_file(self, path): + """Check if file is phenotypic data.""" + regexps = self.get_regular_expressions(self.dir_rules + + 'file_level_rules.json') + + conditions = [(re.compile(x).search(path) is not None) for + x in regexps] + + return (any(conditions)) + + def get_regular_expressions(self, file_name): + """Read regular expressions from a file.""" + regexps = [] + + with open(file_name, 'r') as fin: + rules = json.load(fin) + + for key in list(rules.keys()): + rule = rules[key] + + regexp = rule["regexp"] + + if "tokens" in rule: + tokens = rule["tokens"] + + for token in list(tokens): + regexp = regexp.replace(token, "|".join(tokens[token])) + + regexps.append(regexp) + + return regexps + + def conditional_match(self, expression, path): + """Find conditional match.""" + match = re.compile(expression).findall(path) + match = match[0] if len(match) >= 1 else False + # adapted from JS code and JS does not support conditional groups + if (match): + if ((match[1] == match[2][1:]) | (not match[1])): + return True + else: + return False + else: + return False diff --git a/python/libs/bids_validator/rules/file_level_rules.json b/python/libs/bids_validator/rules/file_level_rules.json index 8215413..0c36e50 100644 --- a/python/libs/bids_validator/rules/file_level_rules.json +++ b/python/libs/bids_validator/rules/file_level_rules.json @@ -1,32 +1,48 @@ { - "anat": { - "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?anat[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(_(?:@@@_anat_suffixes_@@@)\\.(@@@_anat_ext_@@@)|(?:_recording-[a-zA-Z0-9]+)?(?:@@@_cont_ext_@@@))$", + "anat_nonparametric": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?anat[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_part-(imag|mag|phase|real))?_(?:@@@_anat_suffixes_@@@)\\.(@@@_anat_ext_@@@)$", "tokens": { "@@@_anat_suffixes_@@@": [ "T1w", "T2w", - "T1map", - "T2map", - "T1rho", + "PDw", + "T2starw", "FLAIR", - "PD", - "PDT2", "inplaneT1", "inplaneT2", + "PDT2", "angio", - "SWImagandphase", "T2star", "FLASH", + "PD" + ], + "@@@_anat_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + + "anat_parametric": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?anat[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_(?:@@@_anat_suffixes_@@@)\\.(@@@_anat_ext_@@@)$", + "tokens": { + "@@@_anat_suffixes_@@@": [ + "T1map", + "T2map", + "T2starmap", + "R1map", + "R2map", + "R2starmap", "PDmap", - "photo" + "MTRmap", + "MTsat", + "UNIT1", + "T1rho", + "MWFmap", + "MTVmap", + "PDT2map", + "Chimap", + "S0map", + "M0map" ], - "@@@_anat_ext_@@@": ["nii\\.gz", "nii", "json"], - "@@@_cont_ext_@@@": [ - "_physio\\.tsv\\.gz", - "_stim\\.tsv\\.gz", - "_physio\\.json", - "_stim\\.json" - ] + "@@@_anat_ext_@@@": ["nii\\.gz", "nii", "json"] } }, @@ -36,25 +52,96 @@ "@@@_anat_suffixes_@@@": [ "T1w", "T2w", - "T1map", - "T2map", - "T1rho", + "PDw", + "T2starw", "FLAIR", - "PD", - "PDT2", "inplaneT1", "inplaneT2", + "PDT2", "angio", - "SWImagandphase", + "T1map", + "T2map", + "T2starmap", + "R1map", + "R2map", + "R2starmap", + "PDmap", + "MTRmap", + "MTsat", + "UNIT1", + "T1rho", + "MWFmap", + "MTVmap", + "PDT2map", + "Chimap", + "TB1map", + "RB1map", + "S0map", + "M0map", + "MESE", + "MEGRE", + "VFA", + "IRT1", + "MP2RAGE", + "MPM", + "MTS", + "MTR", "T2star", "FLASH", - "PDmap", - "photo" + "PD" ], "@@@_anat_ext_@@@": ["nii.gz", "nii"] } }, + "anat_multiecho": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?anat[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_echo-[0-9]+?(_part-(imag|mag|phase|real))?_(?:@@@_anat_suffixes_@@@)\\.(@@@_anat_ext_@@@)$", + "tokens": { + "@@@_anat_suffixes_@@@": ["MESE", "MEGRE"], + "@@@_anat_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + + "anat_multiflip": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?anat[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_flip-[0-9]+?(?:_part-(imag|mag|phase|real))?_(?:@@@_anat_suffixes_@@@)\\.(@@@_anat_ext_@@@)$", + "tokens": { + "@@@_anat_suffixes_@@@": ["VFA"], + "@@@_anat_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + + "anat_multiinv": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?anat[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_inv-[0-9]+?(?:_part-(imag|mag|phase|real))?_(?:@@@_anat_suffixes_@@@)\\.(@@@_anat_ext_@@@)$", + "tokens": { + "@@@_anat_suffixes_@@@": ["IRT1"], + "@@@_anat_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + + "anat_mp2rage": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?anat[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_echo-[0-9]+)?(?:_flip-[0-9]+)?_inv-[0-9]+?(?:_part-(imag|mag|phase|real))?_(?:@@@_anat_suffixes_@@@)\\.(@@@_anat_ext_@@@)$", + "tokens": { + "@@@_anat_suffixes_@@@": ["MP2RAGE"], + "@@@_anat_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + + "anat_vfa_mt": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?anat[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_echo-[0-9]+)?_flip-[0-9]+?_mt-(on|off)?(?:_part-(imag|mag|phase|real))?_(?:@@@_anat_suffixes_@@@)\\.(@@@_anat_ext_@@@)$", + "tokens": { + "@@@_anat_suffixes_@@@": ["MPM", "MTS"], + "@@@_anat_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + + "anat_mtr": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?anat[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_mt-(on|off)?(?:_part-(imag|mag|phase|real))?_(?:@@@_anat_suffixes_@@@)\\.(@@@_anat_ext_@@@)$", + "tokens": { + "@@@_anat_suffixes_@@@": ["MTR"], + "@@@_anat_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + "behavioral": { "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?beh[\\/\\\\]\\1(_\\2)?_task-[a-zA-Z0-9]+(?:_acq-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?((?:@@@_behavioral_ext_@@@)|(?:_recording-[a-zA-Z0-9]+)?(?:@@@_cont_ext_@@@))$", "tokens": { @@ -74,7 +161,7 @@ }, "dwi": { - "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?dwi[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_dir-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?((?:@@@_dwi_ext_@@@)|(?:_recording-[a-zA-Z0-9]+)?(?:@@@_cont_ext_@@@))$", + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?dwi[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_dir-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_part-(imag|mag|phase|real))?((?:@@@_dwi_ext_@@@)|(?:_recording-[a-zA-Z0-9]+)?(?:@@@_cont_ext_@@@))$", "tokens": { "@@@_dwi_ext_@@@": [ "_dwi\\.nii\\.gz", @@ -95,8 +182,8 @@ } }, - "field_map": { - "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?fmap[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_dir-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_(?:@@@_field_map_type_@@@)\\.(@@@_field_map_ext_@@@)$", + "fmap_gre": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?fmap[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_(?:@@@_field_map_type_@@@)\\.(@@@_field_map_ext_@@@)$", "tokens": { "@@@_field_map_type_@@@": [ "phasediff", @@ -105,30 +192,62 @@ "magnitude1", "magnitude2", "magnitude", - "fieldmap", - "epi", - "m0scan" + "fieldmap" ], "@@@_field_map_ext_@@@": ["nii\\.gz", "nii", "json"] } }, - "field_map_main_nii": { - "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?fmap[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_dir-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_(?:@@@_field_map_type_@@@)\\.(@@@_field_map_ext_@@@)$", + "fmap_pepolar_asl": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?fmap[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?_dir-[a-zA-Z0-9]+(?:_run-[0-9]+)?_(?:@@@_field_map_type_@@@)\\.(@@@_field_map_ext_@@@)$", "tokens": { - "@@@_field_map_type_@@@": [ - "phasediff", - "phase1", - "phase2", - "fieldmap", - "epi" - ], - "@@@_field_map_ext_@@@": ["nii\\.gz", "nii"] + "@@@_field_map_type_@@@": ["m0scan", "epi"], + "@@@_field_map_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + + "fmap_TB1DAM": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?fmap[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_flip-[0-9]+?(?:_part-(imag|mag|phase|real))?_(?:@@@_field_map_type_@@@)\\.(@@@_field_map_ext_@@@)$", + "tokens": { + "@@@_field_map_type_@@@": ["TB1DAM"], + "@@@_field_map_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + + "fmap_TB1EPI": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?fmap[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_echo-[0-9]+?_flip-[0-9]+?(?:_inv-[0-9]+)?(?:_part-(imag|mag|phase|real))?_(?:@@@_field_map_type_@@@)\\.(@@@_field_map_ext_@@@)$", + "tokens": { + "@@@_field_map_type_@@@": ["TB1EPI"], + "@@@_field_map_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + + "fmap_rf": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?fmap[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_echo-[0-9]+)?(?:_flip-[0-9]+)?(?:_inv-[0-9]+)?(?:_part-(imag|mag|phase|real))?_(?:@@@_field_map_type_@@@)\\.(@@@_field_map_ext_@@@)$", + "tokens": { + "@@@_field_map_type_@@@": ["TB1AFI", "TB1TFL", "TB1RFM", "RB1COR"], + "@@@_field_map_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + + "fmap_TB1SRGE": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?fmap[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_echo-[0-9]+)?_flip-[0-9]+?_inv-[0-9]+?(?:_part-(imag|mag|phase|real))?_(?:@@@_field_map_type_@@@)\\.(@@@_field_map_ext_@@@)$", + "tokens": { + "@@@_field_map_type_@@@": ["TB1SRGE"], + "@@@_field_map_ext_@@@": ["nii\\.gz", "nii", "json"] + } + }, + + "fmap_parametric": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?fmap[\\/\\\\]\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_(?:@@@_field_map_type_@@@)\\.(@@@_field_map_ext_@@@)$", + "tokens": { + "@@@_field_map_type_@@@": ["TB1map", "RB1map"], + "@@@_field_map_ext_@@@": ["nii\\.gz", "nii", "json"] } }, "func": { - "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?func[\\/\\\\]\\1(_\\2)?_task-[a-zA-Z0-9]+(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_dir-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_echo-[0-9]+)?((?:@@@_func_ext_@@@)|(?:_recording-[a-zA-Z0-9]+)?(?:@@@_cont_ext_@@@))$", + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?func[\\/\\\\]\\1(_\\2)?_task-[a-zA-Z0-9]+(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_dir-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_echo-[0-9]+)?(?:_part-(imag|mag|phase|real))?(?:@@@_func_ext_@@@)$", "tokens": { "@@@_func_ext_@@@": [ "_bold\\.nii\\.gz", @@ -137,17 +256,30 @@ "_cbv\\.nii\\.gz", "_cbv\\.nii", "_cbv\\.json", - "_phase\\.nii\\.gz", - "_phase\\.nii", - "_phase\\.json", "_sbref\\.nii\\.gz", "_sbref\\.nii", - "_sbref\\.json", - "_events\\.tsv", - "_events\\.json", - "_defacemask\\.nii\\.gz", - "_defacemask\\.nii" - ], + "_sbref\\.json" + ] + } + }, + + "func_phase_deprecated": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?func[\\/\\\\]\\1(_\\2)?_task-[a-zA-Z0-9]+(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_dir-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_echo-[0-9]+)?(?:@@@_func_ext_@@@)$", + "tokens": { + "@@@_func_ext_@@@": ["_phase\\.nii\\.gz", "_phase\\.nii", "_phase\\.json"] + } + }, + + "func_events": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?func[\\/\\\\]\\1(_\\2)?_task-[a-zA-Z0-9]+(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_dir-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:@@@_func_ext_@@@)$", + "tokens": { + "@@@_func_ext_@@@": ["_events\\.tsv", "_events\\.json"] + } + }, + + "func_timeseries": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?func[\\/\\\\]\\1(_\\2)?_task-[a-zA-Z0-9]+(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_dir-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_recording-[a-zA-Z0-9]+)?(?:@@@_cont_ext_@@@)$", + "tokens": { "@@@_cont_ext_@@@": [ "_physio\\.tsv\\.gz", "_stim\\.tsv\\.gz", @@ -158,7 +290,7 @@ }, "func_bold": { - "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?func[\\/\\\\]\\1(_\\2)?_task-[a-zA-Z0-9]+(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_dir-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_echo-[0-9]+)?(?:@@@_func_bold_ext_@@@)$", + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?func[\\/\\\\]\\1(_\\2)?_task-[a-zA-Z0-9]+(?:_acq-[a-zA-Z0-9]+)?(?:_ce-[a-zA-Z0-9]+)?(?:_dir-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_echo-[0-9]+)?(?:_part-(imag|mag|phase|real))?(?:@@@_func_bold_ext_@@@)$", "tokens": { "@@@_func_bold_ext_@@@": [ "_bold\\.nii\\.gz", @@ -186,8 +318,55 @@ }, "eeg": { - "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?eeg[\\/\\\\]\\1(_\\2)?(?:_task-[a-zA-Z0-9]+)?(?:_acq-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_proc-[a-zA-Z0-9]+)?(?:_split-[0-9]+)?((_eeg\\.(@@@_eeg_type_@@@)|(@@@_eeg_ext_@@@))|(?:_recording-[a-zA-Z0-9]+)?(?:@@@_cont_ext_@@@))$", + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?eeg[\\/\\\\]\\1(_\\2)?(?:_task-[a-zA-Z0-9]+)?(?:_acq-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_proc-[a-zA-Z0-9]+)?(?:_split-[0-9]+)?(?:_space-(@@@_eeg_space_@@@))?((_eeg\\.(@@@_eeg_type_@@@)|(@@@_eeg_ext_@@@))|(?:_recording-[a-zA-Z0-9]+)?(?:@@@_cont_ext_@@@))$", "tokens": { + "@@@_eeg_space_@@@": [ + "Other", + "CapTrak", + "EEGLAB", + "EEGLAB-HJ", + "CTF", + "ElektaNeuromag", + "4DBti", + "KitYokogawa", + "ChietiItab", + "ICBM452AirSpace", + "ICBM452Warp5Space", + "IXI549Space", + "fsaverage", + "fsaverageSym", + "fsLR", + "MNIColin27", + "MNI152Lin", + "MNI152NLin2009aSym", + "MNI152NLin2009bSym", + "MNI152NLin2009cSym", + "MNI152NLin2009aAsym", + "MNI152NLin2009bAsym", + "MNI152NLin2009cAsym", + "MNI152NLin6Sym", + "MNI152NLin6ASym", + "MNI305", + "NIHPD", + "OASIS30AntsOASISAnts", + "OASIS30Atropos", + "Talairach", + "UNCInfant", + "fsaverage3", + "fsaverage4", + "fsaverage5", + "fsaverage6", + "fsaveragesym", + "UNCInfant0V21", + "UNCInfant1V21", + "UNCInfant2V21", + "UNCInfant0V22", + "UNCInfant1V22", + "UNCInfant2V22", + "UNCInfant0V23", + "UNCInfant1V23", + "UNCInfant2V23" + ], "@@@_eeg_type_@@@": ["vhdr", "vmrk", "eeg", "edf", "bdf", "set", "fdt"], "@@@_eeg_ext_@@@": [ "_events\\.json", @@ -208,10 +387,51 @@ ] } }, - "ieeg": { - "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?ieeg[\\/\\\\]\\1(_\\2)?(?:_task-[a-zA-Z0-9]+)?(?:_acq-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_proc-[a-zA-Z0-9]+)?(?:_split-[0-9]+)?(?:_space-[a-zA-Z0-9]+)?((_ieeg\\.(@@@_ieeg_type_@@@)|(@@@_ieeg_ext_@@@))|(?:_recording-[a-zA-Z0-9]+)?(?:@@@_cont_ext_@@@))$", + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?ieeg[\\/\\\\]\\1(_\\2)?(?:_task-[a-zA-Z0-9]+)?(?:_acq-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_proc-[a-zA-Z0-9]+)?(?:_split-[0-9]+)?(?:_space-(@@@_ieeg_space_@@@))?((_ieeg\\.(@@@_ieeg_type_@@@)|(@@@_ieeg_ext_@@@))|(?:_recording-[a-zA-Z0-9]+)?(?:@@@_cont_ext_@@@))$", "tokens": { + "@@@_ieeg_space_@@@": [ + "Other", + "Pixels", + "ACPC", + "ScanRAS", + "ICBM452AirSpace", + "ICBM452Warp5Space", + "IXI549Space", + "fsaverage", + "fsaverageSym", + "fsLR", + "MNIColin27", + "MNI152Lin", + "MNI152NLin2009aSym", + "MNI152NLin2009bSym", + "MNI152NLin2009cSym", + "MNI152NLin2009aAsym", + "MNI152NLin2009bAsym", + "MNI152NLin2009cAsym", + "MNI152NLin6Sym", + "MNI152NLin6ASym", + "MNI305", + "NIHPD", + "OASIS30AntsOASISAnts", + "OASIS30Atropos", + "Talairach", + "UNCInfant", + "fsaverage3", + "fsaverage4", + "fsaverage5", + "fsaverage6", + "fsaveragesym", + "UNCInfant0V21", + "UNCInfant1V21", + "UNCInfant2V21", + "UNCInfant0V22", + "UNCInfant1V22", + "UNCInfant2V22", + "UNCInfant0V23", + "UNCInfant1V23", + "UNCInfant2V23" + ], "@@@_ieeg_type_@@@": [ "edf", "vhdr", @@ -283,5 +503,90 @@ "stimuli": { "regexp": "^[\\/\\\\](?:stimuli)[\\/\\\\](?:.*)$" + }, + + "pet": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?pet[\\/\\\\](sub-[a-zA-Z0-9]+)(?:(_ses-[a-zA-Z0-9]+))?(?:_task-[a-zA-Z0-9]+)?(?:_trc-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?_(@@@_pet_ext_@@@)$", + "tokens": { + "@@@_pet_ext_@@@": [ + "pet\\.nii\\.gz", + "pet\\.nii", + "pet\\.json", + "events\\.json", + "events\\.tsv" + ] + } + }, + + "pet_blood": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?pet[\\/\\\\](sub-[a-zA-Z0-9]+)(?:(_ses-[a-zA-Z0-9]+))?(?:_task-[a-zA-Z0-9]+)?(?:_trc-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_recording-[a-zA-Z0-9]+)?_(@@@_pet_ext_@@@)$", + "tokens": { + "@@@_pet_ext_@@@": ["blood\\.tsv\\.gz", "blood\\.tsv", "blood\\.json"] + } + }, + + "microscopy": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?micr[\\/\\\\](sub-[a-zA-Z0-9]+)(?:(_ses-[a-zA-Z0-9]+))?(?:_sample-[a-zA-Z0-9]+)(?:_acq-[a-zA-Z0-9]+)?(?:_stain-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_chunk-[0-9]+)?((@@@_microscopy_type_@@@)(@@@_microscopy_ext_@@@))$", + "tokens": { + "@@@_microscopy_type_@@@": [ + "_TEM", + "_SEM", + "_uCT", + "_BF", + "_DF", + "_PC", + "_DIC", + "_FLUO", + "_CONF", + "_PLI", + "_CARS", + "_2PE", + "_MPE", + "_SR", + "_NLO", + "_OCT", + "_SPIM" + ], + "@@@_microscopy_ext_@@@": [ + ".ome\\.tif", + ".ome\\.btf", + ".tif", + ".png" + ] + } + }, + "microscopy_photo": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?micr[\\/\\\\](sub-[a-zA-Z0-9]+)(?:(_ses-[a-zA-Z0-9]+))?(?:_sample-[a-zA-Z0-9]+)(?:_acq-[a-zA-Z0-9]+)?(@@@_photo_ext_@@@)$", + "tokens":{ + "@@@_photo_ext_@@@": [ + "_photo\\.jpg", + "_photo\\.png", + "_photo\\.tif" + ] + } + }, + "microscopy_json": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?micr[\\/\\\\](sub-[a-zA-Z0-9]+)(?:(_ses-[a-zA-Z0-9]+))?(?:_sample-[a-zA-Z0-9]+)?(?:_acq-[a-zA-Z0-9]+)?(?:_stain-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_chunk-[0-9]+)?(@@@_microscopy_type_@@@)\\.json$", + "tokens": { + "@@@_microscopy_type_@@@": [ + "_TEM", + "_SEM", + "_uCT", + "_BF", + "_DF", + "_PC", + "_DIC", + "_FLUO", + "_CONF", + "_PLI", + "_CARS", + "_2PE", + "_MPE", + "_SR", + "_NLO", + "_OCT", + "_SPIM" + ] + } } } diff --git a/python/libs/bids_validator/rules/session_level_rules.json b/python/libs/bids_validator/rules/session_level_rules.json index 438e6a5..241ef3c 100644 --- a/python/libs/bids_validator/rules/session_level_rules.json +++ b/python/libs/bids_validator/rules/session_level_rules.json @@ -32,6 +32,13 @@ } }, + "pet_ses": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?\\1(_\\2)?(?:_trc-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_task-[a-zA-Z0-9]+)?(?:_run-[0-9]+_)?(@@@_pet_ses_type_@@@)$", + "tokens": { + "@@@_pet_ses_type_@@@": ["_pet.json", "_events.json", "_events.tsv"] + } + }, + "anat_ses": { "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?\\1(_\\2)?(?:_acq-[a-zA-Z0-9]+)?(?:_rec-[a-zA-Z0-9]+)?(?:_run-[0-9]+_)?(@@@_anat_ses_type_@@@).json$", "tokens": { @@ -76,7 +83,7 @@ }, "eeg_ses": { - "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?\\1(_\\2)?(?:_task-[a-zA-Z0-9]+)?(?:_acq-[a-zA-Z0-9]+)?(?:_proc-[a-zA-Z0-9]+)?(@@@_eeg_ses_type_@@@)$", + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?\\1(_\\2)?(?:_task-[a-zA-Z0-9]+)?(?:_acq-[a-zA-Z0-9]+)?(?:_proc-[a-zA-Z0-9]+)?(?:_space-(@@@_eeg_space_@@@))?(@@@_eeg_ses_type_@@@)$", "tokens": { "@@@_eeg_ses_type_@@@": [ "_events.tsv", @@ -87,12 +94,59 @@ "_eeg.json", "_coordsystem.json", "_photo.jpg" + ], + "@@@_eeg_space_@@@": [ + "Other", + "CapTrak", + "EEGLAB", + "EEGLAB-HJ", + "CTF", + "ElektaNeuromag", + "4DBti", + "KitYokogawa", + "ChietiItab", + "ICBM452AirSpace", + "ICBM452Warp5Space", + "IXI549Space", + "fsaverage", + "fsaverageSym", + "fsLR", + "MNIColin27", + "MNI152Lin", + "MNI152NLin2009aSym", + "MNI152NLin2009bSym", + "MNI152NLin2009cSym", + "MNI152NLin2009aAsym", + "MNI152NLin2009bAsym", + "MNI152NLin2009cAsym", + "MNI152NLin6Sym", + "MNI152NLin6ASym", + "MNI305", + "NIHPD", + "OASIS30AntsOASISAnts", + "OASIS30Atropos", + "Talairach", + "UNCInfant", + "fsaverage3", + "fsaverage4", + "fsaverage5", + "fsaverage6", + "fsaveragesym", + "UNCInfant0V21", + "UNCInfant1V21", + "UNCInfant2V21", + "UNCInfant0V22", + "UNCInfant1V22", + "UNCInfant2V22", + "UNCInfant0V23", + "UNCInfant1V23", + "UNCInfant2V23" ] } }, "ieeg_ses": { - "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?\\1(_\\2)?(?:_task-[a-zA-Z0-9]+)?(?:_acq-[a-zA-Z0-9]+)?(?:_proc-[a-zA-Z0-9]+)?(?:_space-[a-zA-Z0-9]+)?(@@@_ieeg_ses_type_@@@)$", + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?\\1(_\\2)?(?:_task-[a-zA-Z0-9]+)?(?:_acq-[a-zA-Z0-9]+)?(?:_proc-[a-zA-Z0-9]+)?(?:_space-(@@@_ieeg_space_@@@))?(@@@_ieeg_ses_type_@@@)$", "tokens": { "@@@_ieeg_ses_type_@@@": [ "_events.tsv", @@ -103,6 +157,73 @@ "_ieeg.json", "_coordsystem.json", "_photo.jpg" + ], + "@@@_ieeg_space_@@@": [ + "Other", + "Pixels", + "ACPC", + "ScanRAS", + "ICBM452AirSpace", + "ICBM452Warp5Space", + "IXI549Space", + "fsaverage", + "fsaverageSym", + "fsLR", + "MNIColin27", + "MNI152Lin", + "MNI152NLin2009aSym", + "MNI152NLin2009bSym", + "MNI152NLin2009cSym", + "MNI152NLin2009aAsym", + "MNI152NLin2009bAsym", + "MNI152NLin2009cAsym", + "MNI152NLin6Sym", + "MNI152NLin6ASym", + "MNI305", + "NIHPD", + "OASIS30AntsOASISAnts", + "OASIS30Atropos", + "Talairach", + "UNCInfant", + "fsaverage3", + "fsaverage4", + "fsaverage5", + "fsaverage6", + "fsaveragesym", + "UNCInfant0V21", + "UNCInfant1V21", + "UNCInfant2V21", + "UNCInfant0V22", + "UNCInfant1V22", + "UNCInfant2V22", + "UNCInfant0V23", + "UNCInfant1V23", + "UNCInfant2V23" + ] + } + }, + + "microscopy_ses": { + "regexp": "^[\\/\\\\](sub-[a-zA-Z0-9]+)[\\/\\\\](?:(ses-[a-zA-Z0-9]+)[\\/\\\\])?\\1(_\\2)?(?:_sample-[a-zA-Z0-9]+)?(?:_acq-[a-zA-Z0-9]+)?(?:_stain-[a-zA-Z0-9]+)?(?:_run-[0-9]+_)?(?:_chunk-[0-9]+)?(@@@_microscopy_ses_type_@@@)$", + "tokens": { + "@@@_microscopy_ses_type_@@@": [ + "_TEM.json", + "_SEM.json", + "_uCT.json", + "_BF.json", + "_DF.json", + "_PC.json", + "_DIC.json", + "_FLUO.json", + "_CONF.json", + "_PLI.json", + "_CARS.json", + "_2PE.json", + "_MPE.json", + "_SR.json", + "_NLO.json", + "_OCT.json", + "_SPIM.json" ] } } diff --git a/python/libs/bids_validator/rules/top_level_rules.json b/python/libs/bids_validator/rules/top_level_rules.json index 90fa9f9..4e80dc1 100644 --- a/python/libs/bids_validator/rules/top_level_rules.json +++ b/python/libs/bids_validator/rules/top_level_rules.json @@ -13,7 +13,11 @@ "phasediff.json", "phase1.json", "phase2.json", - "fieldmap.json" + "fieldmap.json", + "events.json", + "scans.json", + "samples.json", + "samples.tsv" ] } }, @@ -28,10 +32,7 @@ "_events\\.tsv", "_beh\\.json" ], - "@@@_cont_ext_@@@": [ - "_physio\\.json", - "_stim\\.json" - ] + "@@@_cont_ext_@@@": ["_physio\\.json", "_stim\\.json"] } }, @@ -47,6 +48,13 @@ } }, + "pet_top": { + "regexp": "^[\\/\\\\](?:ses-[a-zA-Z0-9]+_)?(?:trc-[a-zA-Z0-9]+_)?(?:rec-[a-zA-Z0-9]+_)?(?:task-[a-zA-Z0-9]+_)?(?:run-[0-9]+_)?(@@@_pet_suffixes_@@@)\\.json$", + "tokens": { + "@@@_pet_suffixes_@@@": ["pet"] + } + }, + "anat_top": { "regexp": "^[\\/\\\\](?:ses-[a-zA-Z0-9]+_)?(?:acq-[a-zA-Z0-9]+_)?(?:rec-[a-zA-Z0-9]+_)?(?:run-[0-9]+_)?(@@@_anat_suffixes_@@@)\\.json$", "tokens": { @@ -125,5 +133,30 @@ "tokens": { "@@@_other_top_files_ext_@@@": ["physio\\.json", "stim\\.json"] } + }, + + "microscopy_top": { + "regexp": "^[\\/\\\\](?:ses-[a-zA-Z0-9]+_)?(?:_sample-[a-zA-Z0-9]+)?(?:_acq-[a-zA-Z0-9]+)?(?:_stain-[a-zA-Z0-9]+)?(?:_run-[0-9]+)?(?:_chunk-[0-9]+)?(?:@@@_microscopy_top_ext_@@@)$", + "tokens": { + "@@@_microscopy_top_ext_@@@": [ + "_TEM\\.json", + "_SEM\\.json", + "_uCT\\.json", + "_BF\\.json", + "_DF\\.json", + "_PC\\.json", + "_DIC\\.json", + "_FLUO\\.json", + "_CONF\\.json", + "_PLI\\.json", + "_CARS\\.json", + "_2PE\\.json", + "_MPE\\.json", + "_SR\\.json", + "_NLO\\.json", + "_OCT\\.json", + "_SPIM\\.json" + ] + } } } diff --git a/python/libs/bids_validator/test_bids_validator.py b/python/libs/bids_validator/test_bids_validator.py new file mode 100644 index 0000000..a9cbe51 --- /dev/null +++ b/python/libs/bids_validator/test_bids_validator.py @@ -0,0 +1,54 @@ +"""Test BIDSValidator functionality. + +git-annex and datalad are used to download a test data structure without the +actual file contents. + +""" +import os + +import pytest +import datalad.api + +from bids_validator import BIDSValidator + +HOME = os.path.expanduser('~') + +TEST_DATA_DICT = { + 'eeg_matchingpennies': ( + 'https://gin.g-node.org/sappelhoff/eeg_matchingpennies' + ), + } + +EXCLUDE_KEYWORDS = ['git', 'datalad', 'sourcedata', 'bidsignore'] + + +def _download_test_data(test_data_dict, dsname): + """Download test data using datalad.""" + url = test_data_dict[dsname] + dspath = os.path.join(HOME, dsname) + datalad.api.install(dspath, url) + return dspath + + +def _gather_test_files(dspath, exclude_keywords): + """Get test files from dataset path, relative to dataset.""" + files = [] + for r, _, f in os.walk(dspath): + for file in f: + fname = os.path.join(r, file) + fname = fname.replace(dspath, '') + if not any(keyword in fname for keyword in exclude_keywords): + files.append(fname) + + return files + + +dspath = _download_test_data(TEST_DATA_DICT, 'eeg_matchingpennies') +files = _gather_test_files(dspath, EXCLUDE_KEYWORDS) + + +@pytest.mark.parametrize('fname', files) +def test_is_bids(fname): + """Test that is_bids returns true for each file in a valid BIDS dataset.""" + validator = BIDSValidator() + assert validator.is_bids(fname) diff --git a/python/libs/bids_validator/tsv/non_custom_columns.json b/python/libs/bids_validator/tsv/non_custom_columns.json index 7fe3bc9..53f7808 100644 --- a/python/libs/bids_validator/tsv/non_custom_columns.json +++ b/python/libs/bids_validator/tsv/non_custom_columns.json @@ -43,5 +43,6 @@ "phenotype": ["participant_id"], "scans": ["acq_time", "filename"], "sessions": ["acq_time", "session_id"], - "aslcontext": ["volume_type"] + "aslcontext": ["volume_type"], + "blood": ["time", "plasma_radioactivity", "whole_blood_radioactivity", "metabolite_parent_fraction", "hplc_recovery_fractions"] } diff --git a/python/libs/mne/__init__.py b/python/libs/mne/__init__.py new file mode 100644 index 0000000..82a9fa3 --- /dev/null +++ b/python/libs/mne/__init__.py @@ -0,0 +1,130 @@ +"""MNE software for MEG and EEG data analysis.""" + +# PEP0440 compatible formatted version, see: +# https://www.python.org/dev/peps/pep-0440/ +# +# Generic release markers: +# X.Y +# X.Y.Z # For bugfix releases +# +# Admissible pre-release markers: +# X.YaN # Alpha release +# X.YbN # Beta release +# X.YrcN # Release Candidate +# X.Y # Final release +# +# Dev branch marker is: 'X.Y.devN' where N is an integer. +# + +from ._version import __version__ + +# have to import verbose first since it's needed by many things +from .utils import (set_log_level, set_log_file, verbose, set_config, + get_config, get_config_path, set_cache_dir, + set_memmap_min_size, grand_average, sys_info, open_docs, + use_log_level) +from .io.pick import (pick_types, pick_channels, + pick_channels_regexp, pick_channels_forward, + pick_types_forward, pick_channels_cov, + pick_channels_evoked, pick_info, + channel_type, channel_indices_by_type) +from .io.base import concatenate_raws +from .io.meas_info import create_info, Info +from .io.proj import Projection +from .io.kit import read_epochs_kit +from .io.eeglab import read_epochs_eeglab +from .io.reference import (set_eeg_reference, set_bipolar_reference, + add_reference_channels) +from .io.what import what +from .bem import (make_sphere_model, make_bem_model, make_bem_solution, + read_bem_surfaces, write_bem_surfaces, write_head_bem, + read_bem_solution, write_bem_solution) +from .cov import (read_cov, write_cov, Covariance, compute_raw_covariance, + compute_covariance, whiten_evoked, make_ad_hoc_cov) +from .event import (read_events, write_events, find_events, merge_events, + pick_events, make_fixed_length_events, concatenate_events, + find_stim_steps, AcqParserFIF) +from ._freesurfer import (head_to_mni, head_to_mri, read_talxfm, + get_volume_labels_from_aseg, read_freesurfer_lut, + vertex_to_mni, read_lta) +from .forward import (read_forward_solution, apply_forward, apply_forward_raw, + average_forward_solutions, Forward, + write_forward_solution, make_forward_solution, + convert_forward_solution, make_field_map, + make_forward_dipole, use_coil_def) +from .source_estimate import (read_source_estimate, + SourceEstimate, VectorSourceEstimate, + VolSourceEstimate, VolVectorSourceEstimate, + MixedSourceEstimate, MixedVectorSourceEstimate, + grade_to_tris, + spatial_src_adjacency, + spatial_tris_adjacency, + spatial_dist_adjacency, + spatial_inter_hemi_adjacency, + spatio_temporal_src_adjacency, + spatio_temporal_tris_adjacency, + spatio_temporal_dist_adjacency, + extract_label_time_course, stc_near_sensors) +from .surface import (read_surface, write_surface, decimate_surface, read_tri, + get_head_surf, get_meg_helmet_surf, dig_mri_distances, + warp_montage_volume, get_montage_volume_labels) +from .morph_map import read_morph_map +from .morph import (SourceMorph, read_source_morph, grade_to_vertices, + compute_source_morph) +from .source_space import (read_source_spaces, + write_source_spaces, setup_source_space, + setup_volume_source_space, SourceSpaces, + add_source_space_distances, morph_source_spaces, + get_volume_labels_from_src) +from .annotations import (Annotations, read_annotations, annotations_from_events, + events_from_annotations) +from .epochs import (BaseEpochs, Epochs, EpochsArray, read_epochs, + concatenate_epochs, make_fixed_length_epochs) +from .evoked import (Evoked, EvokedArray, read_evokeds, write_evokeds, + combine_evoked) +from .label import (read_label, label_sign_flip, + write_label, stc_to_label, grow_labels, Label, split_label, + BiHemiLabel, read_labels_from_annot, write_labels_to_annot, + random_parcellation, morph_labels, labels_to_stc) +from .misc import parse_config, read_reject_parameters +from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels, + scale_source_space) +from .transforms import (read_trans, write_trans, + transform_surface_to, Transform) +from .proj import (read_proj, write_proj, compute_proj_epochs, + compute_proj_evoked, compute_proj_raw, sensitivity_map) +from .dipole import read_dipole, Dipole, DipoleFixed, fit_dipole +from .channels import (equalize_channels, rename_channels, find_layout, + read_vectorview_selection) +from .report import Report, open_report + +from .io import read_epochs_fieldtrip, read_evoked_fieldtrip, read_evokeds_mff +from .rank import compute_rank + +from . import beamformer +from . import channels +from . import chpi +from . import commands +from . import coreg +from . import cuda +from . import datasets +from . import dipole +from . import epochs +from . import event +from . import io +from . import filter +from . import gui +from . import inverse_sparse +from . import minimum_norm +from . import preprocessing +from . import simulation +from . import stats +from . import surface +from . import time_frequency +from . import viz +from . import decoding +from . import export + +# initialize logging +set_log_level(None, False) +set_log_file() diff --git a/python/libs/mne/__main__.py b/python/libs/mne/__main__.py new file mode 100644 index 0000000..414754c --- /dev/null +++ b/python/libs/mne/__main__.py @@ -0,0 +1,7 @@ +# Authors: Eric Larson +# License: BSD Style. + +from .commands.utils import main + +if __name__ == '__main__': + main() diff --git a/python/libs/mne/_freesurfer.py b/python/libs/mne/_freesurfer.py new file mode 100644 index 0000000..7b5e33f --- /dev/null +++ b/python/libs/mne/_freesurfer.py @@ -0,0 +1,737 @@ +# -*- coding: utf-8 -*- +"""Freesurfer handling functions.""" +# Authors: Alex Rockhill +# Eric Larson +# +# License: BSD-3-Clause + +import os.path as op +import numpy as np +from gzip import GzipFile + +from .bem import _bem_find_surface, read_bem_surfaces +from .io.constants import FIFF +from .io.meas_info import read_fiducials +from .transforms import (apply_trans, invert_transform, combine_transforms, + _ensure_trans, read_ras_mni_t, Transform) +from .surface import read_surface, _read_mri_surface +from .utils import (verbose, _validate_type, _check_fname, _check_option, + get_subjects_dir, _require_version, logger) + + +def _check_subject_dir(subject, subjects_dir): + """Check that the Freesurfer subject directory is as expected.""" + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + for img_name in ('T1', 'brain', 'aseg'): + if not op.isfile(op.join(subjects_dir, subject, 'mri', + f'{img_name}.mgz')): + raise ValueError('Freesurfer recon-all subject folder ' + 'is incorrect or improperly formatted, ' + f'got {op.join(subjects_dir, subject)}') + return op.join(subjects_dir, subject) + + +def _get_aseg(aseg, subject, subjects_dir): + """Check that the anatomical segmentation file exists and load it.""" + _require_version('nibabel', 'load aseg', '2.1.0') + import nibabel as nib + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + if not aseg.endswith('aseg'): + raise RuntimeError( + f'`aseg` file path must end with "aseg", got {aseg}') + aseg = _check_fname(op.join(subjects_dir, subject, 'mri', aseg + '.mgz'), + overwrite='read', must_exist=True) + aseg = nib.load(aseg) + aseg_data = np.array(aseg.dataobj) + return aseg, aseg_data + + +def _import_nibabel(why='use MRI files'): + try: + import nibabel as nib + except ImportError as exp: + msg = 'nibabel is required to %s, got:\n%s' % (why, exp) + else: + msg = '' + if msg: + raise ImportError(msg) + return nib + + +def _reorient_image(img, axcodes='RAS'): + """Reorient an image to a given orientation. + + Parameters + ---------- + img : instance of SpatialImage + The MRI image. + axcodes : tuple | str + The axis codes specifying the orientation, e.g. "RAS". + See :func:`nibabel.orientations.aff2axcodes`. + + Returns + ------- + img_data : ndarray + The reoriented image data. + vox_ras_t : ndarray + The new transform from the new voxels to surface RAS. + + Notes + ----- + .. versionadded:: 0.24 + """ + import nibabel as nib + orig_data = np.array(img.dataobj).astype(np.float32) + # reorient data to RAS + ornt = nib.orientations.axcodes2ornt( + nib.orientations.aff2axcodes(img.affine)).astype(int) + ras_ornt = nib.orientations.axcodes2ornt(axcodes) + ornt_trans = nib.orientations.ornt_transform(ornt, ras_ornt) + img_data = nib.orientations.apply_orientation(orig_data, ornt_trans) + orig_mgh = nib.MGHImage(orig_data, img.affine) + aff_trans = nib.orientations.inv_ornt_aff(ornt_trans, img.shape) + vox_ras_t = np.dot(orig_mgh.header.get_vox2ras_tkr(), aff_trans) + return img_data, vox_ras_t + + +def _mri_orientation(orientation): + """Get MRI orientation information from an image. + + Parameters + ---------- + orientation : str + Orientation that you want. Can be "axial", "saggital", or "coronal". + + Returns + ------- + axis : int + The dimension of the axis to take slices over when plotting. + x : int + The dimension of the x axis. + y : int + The dimension of the y axis. + + Notes + ----- + .. versionadded:: 0.21 + .. versionchanged:: 0.24 + """ + _check_option('orientation', orientation, ('coronal', 'axial', 'sagittal')) + axis = dict(coronal=1, axial=2, sagittal=0)[orientation] + x, y = sorted(set([0, 1, 2]).difference(set([axis]))) + return axis, x, y + + +def _get_mri_info_data(mri, data): + # Read the segmentation data using nibabel + if data: + _import_nibabel('load MRI atlas data') + out = dict() + _, out['vox_mri_t'], out['mri_ras_t'], dims, _, mgz = _read_mri_info( + mri, return_img=True) + out.update( + mri_width=dims[0], mri_height=dims[1], + mri_depth=dims[1], mri_volume_name=mri) + if data: + assert mgz is not None + out['mri_vox_t'] = invert_transform(out['vox_mri_t']) + out['data'] = np.asarray(mgz.dataobj) + return out + + +def _get_mgz_header(fname): + """Adapted from nibabel to quickly extract header info.""" + fname = _check_fname(fname, overwrite='read', must_exist=True, + name='MRI image') + if not fname.endswith('.mgz'): + raise IOError('Filename must end with .mgz') + header_dtd = [('version', '>i4'), ('dims', '>i4', (4,)), + ('type', '>i4'), ('dof', '>i4'), ('goodRASFlag', '>i2'), + ('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)), + ('Pxyz_c', '>f4', (3,))] + header_dtype = np.dtype(header_dtd) + with GzipFile(fname, 'rb') as fid: + hdr_str = fid.read(header_dtype.itemsize) + header = np.ndarray(shape=(), dtype=header_dtype, + buffer=hdr_str) + # dims + dims = header['dims'].astype(int) + dims = dims[:3] if len(dims) == 4 else dims + # vox2ras_tkr + delta = header['delta'] + ds = np.array(delta, float) + ns = np.array(dims * ds) / 2.0 + v2rtkr = np.array([[-ds[0], 0, 0, ns[0]], + [0, 0, ds[2], -ns[2]], + [0, -ds[1], 0, ns[1]], + [0, 0, 0, 1]], dtype=np.float32) + # ras2vox + d = np.diag(delta) + pcrs_c = dims / 2.0 + Mdc = header['Mdc'].T + pxyz_0 = header['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c)) + M = np.eye(4, 4) + M[0:3, 0:3] = np.dot(Mdc, d) + M[0:3, 3] = pxyz_0.T + header = dict(dims=dims, vox2ras_tkr=v2rtkr, vox2ras=M, + zooms=header['delta']) + return header + + +def _get_atlas_values(vol_info, rr): + # Transform MRI coordinates (where our surfaces live) to voxels + rr_vox = apply_trans(vol_info['mri_vox_t'], rr) + good = ((rr_vox >= -.5) & + (rr_vox < np.array(vol_info['data'].shape, int) - 0.5)).all(-1) + idx = np.round(rr_vox[good].T).astype(np.int64) + values = np.full(rr.shape[0], np.nan) + values[good] = vol_info['data'][tuple(idx)] + return values + + +def get_volume_labels_from_aseg(mgz_fname, return_colors=False, + atlas_ids=None): + """Return a list of names and colors of segmented volumes. + + Parameters + ---------- + mgz_fname : str + Filename to read. Typically aseg.mgz or some variant in the freesurfer + pipeline. + return_colors : bool + If True returns also the labels colors. + atlas_ids : dict | None + A lookup table providing a mapping from region names (str) to ID values + (int). Can be None to use the standard Freesurfer LUT. + + .. versionadded:: 0.21.0 + + Returns + ------- + label_names : list of str + The names of segmented volumes included in this mgz file. + label_colors : list of str + The RGB colors of the labels included in this mgz file. + + See Also + -------- + read_freesurfer_lut + + Notes + ----- + .. versionchanged:: 0.21.0 + The label names are now sorted in the same order as their corresponding + values in the MRI file. + + .. versionadded:: 0.9.0 + """ + import nibabel as nib + if not op.isfile(mgz_fname): + raise IOError('aseg file "%s" not found' % mgz_fname) + atlas = nib.load(mgz_fname) + data = np.asarray(atlas.dataobj) # don't need float here + want = np.unique(data) + if atlas_ids is None: + atlas_ids, colors = read_freesurfer_lut() + elif return_colors: + raise ValueError('return_colors must be False if atlas_ids are ' + 'provided') + # restrict to the ones in the MRI, sorted by label name + keep = np.in1d(list(atlas_ids.values()), want) + keys = sorted((key for ki, key in enumerate(atlas_ids.keys()) if keep[ki]), + key=lambda x: atlas_ids[x]) + if return_colors: + colors = [colors[k] for k in keys] + out = keys, colors + else: + out = keys + return out + + +############################################################################## +# Head to MRI volume conversion + + +@verbose +def head_to_mri(pos, subject, mri_head_t, subjects_dir=None, + verbose=None): + """Convert pos from head coordinate system to MRI ones. + + This function converts to MRI RAS coordinates and not to surface + RAS. + + Parameters + ---------- + pos : array, shape (n_pos, 3) + The coordinates (in m) in head coordinate system. + %(subject)s + mri_head_t : instance of Transform + MRI<->Head coordinate transformation. + %(subjects_dir)s + %(verbose)s + + Returns + ------- + coordinates : array, shape (n_pos, 3) + The MRI RAS coordinates (in mm) of pos. + + Notes + ----- + This function requires nibabel. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz') + head_mri_t = _ensure_trans(mri_head_t, 'head', 'mri') + _, _, mri_ras_t, _, _ = _read_mri_info(t1_fname) + head_ras_t = combine_transforms(head_mri_t, mri_ras_t, 'head', 'ras') + return 1e3 * apply_trans(head_ras_t, pos) # mm + + +############################################################################## +# Surface to MNI conversion + +@verbose +def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, verbose=None): + """Convert the array of vertices for a hemisphere to MNI coordinates. + + Parameters + ---------- + vertices : int, or list of int + Vertex number(s) to convert. + hemis : int, or list of int + Hemisphere(s) the vertices belong to. + %(subject)s + subjects_dir : str, or None + Path to SUBJECTS_DIR if it is not set in the environment. + %(verbose)s + + Returns + ------- + coordinates : array, shape (n_vertices, 3) + The MNI coordinates (in mm) of the vertices. + """ + singleton = False + if not isinstance(vertices, list) and not isinstance(vertices, np.ndarray): + singleton = True + vertices = [vertices] + + if not isinstance(hemis, list) and not isinstance(hemis, np.ndarray): + hemis = [hemis] * len(vertices) + + if not len(hemis) == len(vertices): + raise ValueError('hemi and vertices must match in length') + + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + + surfs = [op.join(subjects_dir, subject, 'surf', '%s.white' % h) + for h in ['lh', 'rh']] + + # read surface locations in MRI space + rr = [read_surface(s)[0] for s in surfs] + + # take point locations in MRI space and convert to MNI coordinates + xfm = read_talxfm(subject, subjects_dir) + xfm['trans'][:3, 3] *= 1000. # m->mm + data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)]) + if singleton: + data = data[0] + return apply_trans(xfm['trans'], data) + + +############################################################################## +# Volume to MNI conversion + +@verbose +def head_to_mni(pos, subject, mri_head_t, subjects_dir=None, + verbose=None): + """Convert pos from head coordinate system to MNI ones. + + Parameters + ---------- + pos : array, shape (n_pos, 3) + The coordinates (in m) in head coordinate system. + %(subject)s + mri_head_t : instance of Transform + MRI<->Head coordinate transformation. + %(subjects_dir)s + %(verbose)s + + Returns + ------- + coordinates : array, shape (n_pos, 3) + The MNI coordinates (in mm) of pos. + + Notes + ----- + This function requires either nibabel. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + + # before we go from head to MRI (surface RAS) + head_mni_t = combine_transforms( + _ensure_trans(mri_head_t, 'head', 'mri'), + read_talxfm(subject, subjects_dir), 'head', 'mni_tal') + return apply_trans(head_mni_t, pos) * 1000. + + +@verbose +def get_mni_fiducials(subject, subjects_dir=None, verbose=None): + """Estimate fiducials for a subject. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + fids_mri : list + List of estimated fiducials (each point in a dict), in the order + LPA, nasion, RPA. + + Notes + ----- + This takes the ``fsaverage-fiducials.fif`` file included with MNE—which + contain the LPA, nasion, and RPA for the ``fsaverage`` subject—and + transforms them to the given FreeSurfer subject's MRI space. + The MRI of ``fsaverage`` is already in MNI Talairach space, so applying + the inverse of the given subject's MNI Talairach affine transformation + (``$SUBJECTS_DIR/$SUBJECT/mri/transforms/talairach.xfm``) is used + to estimate the subject's fiducial locations. + + For more details about the coordinate systems and transformations involved, + see https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems and + :ref:`tut-source-alignment`. + """ + # Eventually we might want to allow using the MNI Talairach with-skull + # transformation rather than the standard brain-based MNI Talaranch + # transformation, and/or project the points onto the head surface + # (if available). + fname_fids_fs = op.join(op.dirname(__file__), 'data', + 'fsaverage', 'fsaverage-fiducials.fif') + + # Read fsaverage fiducials file and subject Talairach. + fids, coord_frame = read_fiducials(fname_fids_fs) + assert coord_frame == FIFF.FIFFV_COORD_MRI + if subject == 'fsaverage': + return fids # special short-circuit for fsaverage + mni_mri_t = invert_transform(read_talxfm(subject, subjects_dir)) + for f in fids: + f['r'] = apply_trans(mni_mri_t, f['r']) + return fids + + +@verbose +def estimate_head_mri_t(subject, subjects_dir=None, verbose=None): + """Estimate the head->mri transform from fsaverage fiducials. + + A subject's fiducials can be estimated given a Freesurfer ``recon-all`` + by transforming ``fsaverage`` fiducials using the inverse Talairach + transform, see :func:`mne.coreg.get_mni_fiducials`. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + %(trans_not_none)s + """ + from .channels.montage import make_dig_montage, compute_native_head_t + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + lpa, nasion, rpa = get_mni_fiducials(subject, subjects_dir) + montage = make_dig_montage(lpa=lpa['r'], nasion=nasion['r'], rpa=rpa['r'], + coord_frame='mri') + return invert_transform(compute_native_head_t(montage)) + + +def _ensure_image_in_surface_RAS(image, subject, subjects_dir): + """Check if the image is in Freesurfer surface RAS space.""" + import nibabel as nib + if not isinstance(image, nib.spatialimages.SpatialImage): + image = nib.load(image) + image = nib.MGHImage(image.dataobj.astype(np.float32), image.affine) + fs_img = nib.load(op.join(subjects_dir, subject, 'mri', 'brain.mgz')) + if not np.allclose(image.affine, fs_img.affine, atol=1e-6): + raise RuntimeError('The `image` is not aligned to Freesurfer ' + 'surface RAS space. This space is required as ' + 'it is the space where the anatomical ' + 'segmentation and reconstructed surfaces are') + return image # returns MGH image for header + + +@verbose +def read_lta(fname, verbose=None): + """Read a Freesurfer linear transform array file. + + Parameters + ---------- + fname : str | None + The transform filename. + %(verbose)s + + Returns + ------- + affine : ndarray + The affine transformation described by the lta file. + """ + _validate_type(fname, ('path-like', None), 'fname') + _check_fname(fname, 'read', must_exist=True) + with open(fname, 'r') as fid: + affine = np.loadtxt(fid.readlines()[5:9]) + return affine + + +@verbose +def read_talxfm(subject, subjects_dir=None, verbose=None): + """Compute MRI-to-MNI transform from FreeSurfer talairach.xfm file. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + mri_mni_t : instance of Transform + The affine transformation from MRI to MNI space for the subject. + """ + # Adapted from freesurfer m-files. Altered to deal with Norig + # and Torig correctly + subjects_dir = get_subjects_dir(subjects_dir) + # Setup the RAS to MNI transform + ras_mni_t = read_ras_mni_t(subject, subjects_dir) + ras_mni_t['trans'][:3, 3] /= 1000. # mm->m + + # We want to get from Freesurfer surface RAS ('mri') to MNI ('mni_tal'). + # This file only gives us RAS (non-zero origin) ('ras') to MNI ('mni_tal'). + # Se we need to get the ras->mri transform from the MRI headers. + + # To do this, we get Norig and Torig + # (i.e. vox_ras_t and vox_mri_t, respectively) + path = op.join(subjects_dir, subject, 'mri', 'orig.mgz') + if not op.isfile(path): + path = op.join(subjects_dir, subject, 'mri', 'T1.mgz') + if not op.isfile(path): + raise IOError('mri not found: %s' % path) + _, _, mri_ras_t, _, _ = _read_mri_info(path) + mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, 'mri', 'mni_tal') + return mri_mni_t + + +def _check_mri(mri, subject, subjects_dir): + """Check whether an mri exists in the Freesurfer subject directory.""" + _validate_type(mri, 'path-like', 'mri') + if op.isfile(mri) and op.basename(mri) != mri: + return mri + if not op.isfile(mri): + if subject is None: + raise FileNotFoundError( + f'MRI file {mri!r} not found and no subject provided') + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + mri = op.join(subjects_dir, subject, 'mri', mri) + if not op.isfile(mri): + raise FileNotFoundError(f'MRI file {mri!r} not found') + if op.basename(mri) == mri: + err = (f'Ambiguous filename - found {mri!r} in current folder.\n' + 'If this is correct prefix name with relative or absolute path') + raise IOError(err) + return mri + + +def _read_mri_info(path, units='m', return_img=False, use_nibabel=False): + # This is equivalent but 100x slower, so only use nibabel if we need to + # (later): + if use_nibabel: + import nibabel + hdr = nibabel.load(path).header + n_orig = hdr.get_vox2ras() + t_orig = hdr.get_vox2ras_tkr() + dims = hdr.get_data_shape() + zooms = hdr.get_zooms()[:3] + else: + hdr = _get_mgz_header(path) + n_orig = hdr['vox2ras'] + t_orig = hdr['vox2ras_tkr'] + dims = hdr['dims'] + zooms = hdr['zooms'] + + # extract the MRI_VOXEL to RAS (non-zero origin) transform + vox_ras_t = Transform('mri_voxel', 'ras', n_orig) + + # extract the MRI_VOXEL to MRI transform + vox_mri_t = Transform('mri_voxel', 'mri', t_orig) + + # construct the MRI to RAS (non-zero origin) transform + mri_ras_t = combine_transforms( + invert_transform(vox_mri_t), vox_ras_t, 'mri', 'ras') + + assert units in ('m', 'mm') + if units == 'm': + conv = np.array([[1e-3, 1e-3, 1e-3, 1]]).T + # scaling and translation terms + vox_ras_t['trans'] *= conv + vox_mri_t['trans'] *= conv + # just the translation term + mri_ras_t['trans'][:, 3:4] *= conv + + out = (vox_ras_t, vox_mri_t, mri_ras_t, dims, zooms) + if return_img: + nibabel = _import_nibabel() + out += (nibabel.load(path),) + return out + + +def read_freesurfer_lut(fname=None): + """Read a Freesurfer-formatted LUT. + + Parameters + ---------- + fname : str | None + The filename. Can be None to read the standard Freesurfer LUT. + + Returns + ------- + atlas_ids : dict + Mapping from label names to IDs. + colors : dict + Mapping from label names to colors. + """ + lut = _get_lut(fname) + names, ids = lut['name'], lut['id'] + colors = np.array([lut['R'], lut['G'], lut['B'], lut['A']], float).T + atlas_ids = dict(zip(names, ids)) + colors = dict(zip(names, colors)) + return atlas_ids, colors + + +def _get_lut(fname=None): + """Get a FreeSurfer LUT.""" + _validate_type(fname, ('path-like', None), 'fname') + if fname is None: + fname = op.join(op.dirname(__file__), 'data', + 'FreeSurferColorLUT.txt') + _check_fname(fname, 'read', must_exist=True) + dtype = [('id', ' 0 + return lut + + +@verbose +def _get_head_surface(surf, subject, subjects_dir, bem=None, verbose=None): + """Get a head surface from the Freesurfer subject directory. + + Parameters + ---------- + surf : str + The name of the surface 'auto', 'head', 'outer_skin', 'head-dense' + or 'seghead'. + %(subject)s + %(subjects_dir)s + bem : mne.bem.ConductorModel | None + The conductor model that stores information about the head surface. + %(verbose)s + + Returns + ------- + head_surf : dict | None + A dictionary with keys 'rr', 'tris', 'ntri', 'use_tris', 'np' + and 'coord_frame' that store information for mesh plotting and other + useful information about the head surface. + + Notes + ----- + .. versionadded: 0.24 + """ + _check_option( + 'surf', surf, ('auto', 'head', 'outer_skin', 'head-dense', 'seghead')) + if surf in ('auto', 'head', 'outer_skin'): + if bem is not None: + try: + return _bem_find_surface(bem, 'head') + except RuntimeError: + logger.info('Could not find the surface for ' + 'head in the provided BEM model, ' + 'looking in the subject directory.') + if subject is None: + if surf == 'auto': + return + raise ValueError('To plot the head surface, the BEM/sphere' + ' model must contain a head surface ' + 'or "subject" must be provided (got ' + 'None)') + subject_dir = op.join( + get_subjects_dir(subjects_dir, raise_error=True), subject) + if surf in ('head-dense', 'seghead'): + try_fnames = [op.join(subject_dir, 'bem', f'{subject}-head-dense.fif'), + op.join(subject_dir, 'surf', 'lh.seghead')] + else: + try_fnames = [op.join(subject_dir, 'bem', 'outer_skin.surf'), + op.join(subject_dir, 'bem', 'flash', 'outer_skin.surf'), + op.join(subject_dir, 'bem', f'{subject}-head.fif')] + for fname in try_fnames: + if op.exists(fname): + logger.info(f'Using {op.basename(fname)} for head surface.') + if op.splitext(fname)[-1] == '.fif': + return read_bem_surfaces(fname, on_defects='warn')[0] + else: + return _read_mri_surface(fname) + raise IOError('No head surface found for subject ' + f'{subject} after trying:\n' + '\n'.join(try_fnames)) + + +@verbose +def _get_skull_surface(surf, subject, subjects_dir, bem=None, verbose=None): + """Get a skull surface from the Freesurfer subject directory. + + Parameters + ---------- + surf : str + The name of the surface 'outer' or 'inner'. + %(subject)s + %(subjects_dir)s + bem : mne.bem.ConductorModel | None + The conductor model that stores information about the skull surface. + %(verbose)s + + Returns + ------- + skull_surf : dict | None + A dictionary with keys 'rr', 'tris', 'ntri', 'use_tris', 'np' + and 'coord_frame' that store information for mesh plotting and other + useful information about the head surface. + + Notes + ----- + .. versionadded: 0.24 + """ + if bem is not None: + try: + return _bem_find_surface(bem, surf + '_skull') + except RuntimeError: + logger.info('Could not find the surface for ' + 'skull in the provided BEM model, ' + 'looking in the subject directory.') + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + fname = _check_fname(op.join(subjects_dir, subject, 'bem', + surf + '_skull.surf'), + overwrite='read', must_exist=True, + name=f'{surf} skull surface') + return _read_mri_surface(fname) diff --git a/python/libs/mne/_ola.py b/python/libs/mne/_ola.py new file mode 100644 index 0000000..860722f --- /dev/null +++ b/python/libs/mne/_ola.py @@ -0,0 +1,442 @@ +# -*- coding: utf-8 -*- +# Authors: Eric Larson + +# License: BSD-3-Clause + +import numpy as np + +from .utils import _ensure_int, verbose, logger, _VerboseDep + + +############################################################################### +# Class for interpolation between adjacent points + +class _Interp2(object): + r"""Interpolate between two points. + + Parameters + ---------- + control_points : array, shape (n_changes,) + The control points (indices) to use. + values : callable | array, shape (n_changes, ...) + Callable that takes the control point and returns a list of + arrays that must be interpolated. + interp : str + Can be 'zero', 'linear', 'hann', or 'cos2' (same as hann). + + Notes + ----- + This will process data using overlapping windows of potentially + different sizes to achieve a constant output value using different + 2-point interpolation schemes. For example, for linear interpolation, + and window sizes of 6 and 17, this would look like:: + + 1 _ _ + |\ / '-. .-' + | \ / '-. .-' + | x |-.-| + | / \ .-' '-. + |/ \_.-' '-. + 0 +----|----|----|----|--- + 0 5 10 15 20 25 + + """ + + def __init__(self, control_points, values, interp='hann'): + # set up interpolation + self.control_points = np.array(control_points, int).ravel() + if not np.array_equal(np.unique(self.control_points), + self.control_points): + raise ValueError('Control points must be sorted and unique') + if len(self.control_points) == 0: + raise ValueError('Must be at least one control point') + if not (self.control_points >= 0).all(): + raise ValueError('All control points must be positive (got %s)' + % (self.control_points[:3],)) + if isinstance(values, np.ndarray): + values = [values] + if isinstance(values, (list, tuple)): + for v in values: + if not (v is None or isinstance(v, np.ndarray)): + raise TypeError('All entries in "values" must be ndarray ' + 'or None, got %s' % (type(v),)) + if v is not None and v.shape[0] != len(self.control_points): + raise ValueError('Values, if provided, must be the same ' + 'length as the number of control points ' + '(%s), got %s' + % (len(self.control_points), v.shape[0])) + use_values = values + + def val(pt): + idx = np.where(control_points == pt)[0][0] + return [v[idx] if v is not None else None for v in use_values] + values = val + self.values = values + self.n_last = None + self._position = 0 # start at zero + self._left_idx = 0 + self._left = self._right = self._use_interp = None + known_types = ('cos2', 'linear', 'zero', 'hann') + if interp not in known_types: + raise ValueError('interp must be one of %s, got "%s"' + % (known_types, interp)) + self._interp = interp + + def feed_generator(self, n_pts): + """Feed data and get interpolators as a generator.""" + self.n_last = 0 + n_pts = _ensure_int(n_pts, 'n_pts') + original_position = self._position + stop = self._position + n_pts + logger.debug('Feed %s (%s-%s)' % (n_pts, self._position, stop)) + used = np.zeros(n_pts, bool) + if self._left is None: # first one + logger.debug(' Eval @ %s (%s)' % (0, self.control_points[0])) + self._left = self.values(self.control_points[0]) + if len(self.control_points) == 1: + self._right = self._left + n_used = 0 + + # Left zero-order hold condition + if self._position < self.control_points[self._left_idx]: + n_use = min(self.control_points[self._left_idx] - self._position, + n_pts) + logger.debug(' Left ZOH %s' % n_use) + this_sl = slice(None, n_use) + assert used[this_sl].size == n_use + assert not used[this_sl].any() + used[this_sl] = True + yield [this_sl, self._left, None, None] + self._position += n_use + n_used += n_use + self.n_last += 1 + + # Standard interpolation condition + stop_right_idx = np.where(self.control_points >= stop)[0] + if len(stop_right_idx) == 0: + stop_right_idx = [len(self.control_points) - 1] + stop_right_idx = stop_right_idx[0] + left_idxs = np.arange(self._left_idx, stop_right_idx) + self.n_last += max(len(left_idxs) - 1, 0) + for bi, left_idx in enumerate(left_idxs): + if left_idx != self._left_idx or self._right is None: + if self._right is not None: + assert left_idx == self._left_idx + 1 + self._left = self._right + self._left_idx += 1 + self._use_interp = None # need to recreate it + eval_pt = self.control_points[self._left_idx + 1] + logger.debug(' Eval @ %s (%s)' + % (self._left_idx + 1, eval_pt)) + self._right = self.values(eval_pt) + assert self._right is not None + left_point = self.control_points[self._left_idx] + right_point = self.control_points[self._left_idx + 1] + if self._use_interp is None: + interp_span = right_point - left_point + if self._interp == 'zero': + self._use_interp = None + elif self._interp == 'linear': + self._use_interp = np.linspace(1., 0., interp_span, + endpoint=False) + else: # self._interp in ('cos2', 'hann'): + self._use_interp = np.cos( + np.linspace(0, np.pi / 2., interp_span, + endpoint=False)) + self._use_interp *= self._use_interp + n_use = min(stop, right_point) - self._position + if n_use > 0: + logger.debug(' Interp %s %s (%s-%s)' % (self._interp, n_use, + left_point, right_point)) + interp_start = self._position - left_point + assert interp_start >= 0 + if self._use_interp is None: + this_interp = None + else: + this_interp = \ + self._use_interp[interp_start:interp_start + n_use] + assert this_interp.size == n_use + this_sl = slice(n_used, n_used + n_use) + assert used[this_sl].size == n_use + assert not used[this_sl].any() + used[this_sl] = True + yield [this_sl, self._left, self._right, this_interp] + self._position += n_use + n_used += n_use + + # Right zero-order hold condition + if self.control_points[self._left_idx] <= self._position: + n_use = stop - self._position + if n_use > 0: + logger.debug(' Right ZOH %s' % n_use) + this_sl = slice(n_pts - n_use, None) + assert not used[this_sl].any() + used[this_sl] = True + assert self._right is not None + yield [this_sl, self._right, None, None] + self._position += n_use + n_used += n_use + self.n_last += 1 + assert self._position == stop + assert n_used == n_pts + assert used.all() + assert self._position == original_position + n_pts + + def feed(self, n_pts): + """Feed data and get interpolated values.""" + # Convenience function for assembly + out_arrays = None + for o in self.feed_generator(n_pts): + if out_arrays is None: + out_arrays = [np.empty(v.shape + (n_pts,)) + if v is not None else None for v in o[1]] + for ai, arr in enumerate(out_arrays): + if arr is not None: + if o[3] is None: + arr[..., o[0]] = o[1][ai][..., np.newaxis] + else: + arr[..., o[0]] = ( + o[1][ai][..., np.newaxis] * o[3] + + o[2][ai][..., np.newaxis] * (1. - o[3])) + assert out_arrays is not None + return out_arrays + + +############################################################################### +# Constant overlap-add processing class + + +def _check_store(store): + if isinstance(store, np.ndarray): + store = [store] + if isinstance(store, (list, tuple)) and all(isinstance(s, np.ndarray) + for s in store): + store = _Storer(*store) + if not callable(store): + raise TypeError('store must be callable, got type %s' + % (type(store),)) + return store + + +class _COLA(_VerboseDep): + r"""Constant overlap-add processing helper. + + Parameters + ---------- + process : callable + A function that takes a chunk of input data with shape + ``(n_channels, n_samples)`` and processes it. + store : callable | ndarray + A function that takes a completed chunk of output data. + Can also be an ``ndarray``, in which case it is treated as the + output data in which to store the results. + n_total : int + The total number of samples. + n_samples : int + The number of samples per window. + n_overlap : int + The overlap between windows. + window : str + The window to use. Default is "hann". + tol : float + The tolerance for COLA checking. + + Notes + ----- + This will process data using overlapping windows to achieve a constant + output value. For example, for ``n_total=27``, ``n_samples=10``, + ``n_overlap=5`` and ``window='triang'``:: + + 1 _____ _______ + | \ /\ /\ / + | \ / \ / \ / + | x x x + | / \ / \ / \ + | / \/ \/ \ + 0 +----|----|----|----|----|- + 0 5 10 15 20 25 + + This produces four windows: the first three are the requested length + (10 samples) and the last one is longer (12 samples). The first and last + window are asymmetric. + """ + + @verbose + def __init__(self, process, store, n_total, n_samples, n_overlap, + sfreq, window='hann', tol=1e-10, *, verbose=None): + from scipy.signal import get_window + n_samples = _ensure_int(n_samples, 'n_samples') + n_overlap = _ensure_int(n_overlap, 'n_overlap') + n_total = _ensure_int(n_total, 'n_total') + if n_samples <= 0: + raise ValueError('n_samples must be > 0, got %s' % (n_samples,)) + if n_overlap < 0: + raise ValueError('n_overlap must be >= 0, got %s' % (n_overlap,)) + if n_total < 0: + raise ValueError('n_total must be >= 0, got %s' % (n_total,)) + self._n_samples = int(n_samples) + self._n_overlap = int(n_overlap) + del n_samples, n_overlap + if n_total < self._n_samples: + raise ValueError('Number of samples per window (%d) must be at ' + 'most the total number of samples (%s)' + % (self._n_samples, n_total)) + if not callable(process): + raise TypeError('process must be callable, got type %s' + % (type(process),)) + self._process = process + self._step = self._n_samples - self._n_overlap + self._store = _check_store(store) + self._idx = 0 + self._in_buffers = self._out_buffers = None + + # Create our window boundaries + window_name = window if isinstance(window, str) else 'custom' + self._window = get_window(window, self._n_samples, + fftbins=(self._n_samples - 1) % 2) + self._window /= _check_cola(self._window, self._n_samples, self._step, + window_name, tol=tol) + self.starts = np.arange(0, n_total - self._n_samples + 1, self._step) + self.stops = self.starts + self._n_samples + delta = n_total - self.stops[-1] + self.stops[-1] = n_total + sfreq = float(sfreq) + pl = 's' if len(self.starts) != 1 else '' + logger.info(' Processing %4d data chunk%s of (at least) %0.1f sec ' + 'with %0.1f sec overlap and %s windowing' + % (len(self.starts), pl, self._n_samples / sfreq, + self._n_overlap / sfreq, window_name)) + del window, window_name + if delta > 0: + logger.info(' The final %0.3f sec will be lumped into the ' + 'final window' % (delta / sfreq,)) + + @property + def _in_offset(self): + """Compute from current processing window start and buffer len.""" + return self.starts[self._idx] + self._in_buffers[0].shape[-1] + + @verbose + def feed(self, *datas, verbose=None, **kwargs): + """Pass in a chunk of data.""" + # Append to our input buffer + if self._in_buffers is None: + self._in_buffers = [None] * len(datas) + if len(datas) != len(self._in_buffers): + raise ValueError('Got %d array(s), needed %d' + % (len(datas), len(self._in_buffers))) + for di, data in enumerate(datas): + if not isinstance(data, np.ndarray) or data.ndim < 1: + raise TypeError('data entry %d must be an 2D ndarray, got %s' + % (di, type(data),)) + if self._in_buffers[di] is None: + # In practice, users can give large chunks, so we use + # dynamic allocation of the in buffer. We could save some + # memory allocation by only ever processing max_len at once, + # but this would increase code complexity. + self._in_buffers[di] = np.empty( + data.shape[:-1] + (0,), data.dtype) + if data.shape[:-1] != self._in_buffers[di].shape[:-1] or \ + self._in_buffers[di].dtype != data.dtype: + raise TypeError('data must dtype %s and shape[:-1]==%s, ' + 'got dtype %s shape[:-1]=%s' + % (self._in_buffers[di].dtype, + self._in_buffers[di].shape[:-1], + data.dtype, data.shape[:-1])) + logger.debug(' + Appending %d->%d' + % (self._in_offset, self._in_offset + data.shape[-1])) + self._in_buffers[di] = np.concatenate( + [self._in_buffers[di], data], -1) + if self._in_offset > self.stops[-1]: + raise ValueError('data (shape %s) exceeded expected total ' + 'buffer size (%s > %s)' + % (data.shape, self._in_offset, + self.stops[-1])) + # Check to see if we can process the next chunk and dump outputs + while self._idx < len(self.starts) and \ + self._in_offset >= self.stops[self._idx]: + start, stop = self.starts[self._idx], self.stops[self._idx] + this_len = stop - start + this_window = self._window.copy() + if self._idx == len(self.starts) - 1: + this_window = np.pad( + self._window, (0, this_len - len(this_window)), 'constant') + for offset in range(self._step, len(this_window), self._step): + n_use = len(this_window) - offset + this_window[offset:] += self._window[:n_use] + if self._idx == 0: + for offset in range(self._n_samples - self._step, 0, + -self._step): + this_window[:offset] += self._window[-offset:] + logger.debug(' * Processing %d->%d' % (start, stop)) + this_proc = [in_[..., :this_len].copy() + for in_ in self._in_buffers] + if not all(proc.shape[-1] == this_len == this_window.size + for proc in this_proc): + raise RuntimeError('internal indexing error') + outs = self._process(*this_proc, **kwargs) + if self._out_buffers is None: + max_len = np.max(self.stops - self.starts) + self._out_buffers = [np.zeros(o.shape[:-1] + (max_len,), + o.dtype) for o in outs] + for oi, out in enumerate(outs): + out *= this_window + self._out_buffers[oi][..., :stop - start] += out + self._idx += 1 + if self._idx < len(self.starts): + next_start = self.starts[self._idx] + else: + next_start = self.stops[-1] + delta = next_start - self.starts[self._idx - 1] + for di in range(len(self._in_buffers)): + self._in_buffers[di] = self._in_buffers[di][..., delta:] + logger.debug(' - Shifting input/output buffers by %d samples' + % (delta,)) + self._store(*[o[..., :delta] for o in self._out_buffers]) + for ob in self._out_buffers: + ob[..., :-delta] = ob[..., delta:] + ob[..., -delta:] = 0. + + +def _check_cola(win, nperseg, step, window_name, tol=1e-10): + """Check whether the Constant OverLap Add (COLA) constraint is met.""" + # adapted from SciPy + binsums = np.sum([win[ii * step:(ii + 1) * step] + for ii in range(nperseg // step)], axis=0) + if nperseg % step != 0: + binsums[:nperseg % step] += win[-(nperseg % step):] + const = np.median(binsums) + deviation = np.max(np.abs(binsums - const)) + if deviation > tol: + raise ValueError('segment length %d with step %d for %s window ' + 'type does not provide a constant output ' + '(%g%% deviation)' + % (nperseg, step, window_name, + 100 * deviation / const)) + return const + + +class _Storer(object): + """Store data in chunks.""" + + def __init__(self, *outs, picks=None): + for oi, out in enumerate(outs): + if not isinstance(out, np.ndarray) or out.ndim < 1: + raise TypeError('outs[oi] must be >= 1D ndarray, got %s' + % (out,)) + self.outs = outs + self.idx = 0 + self.picks = picks + + def __call__(self, *outs): + if (len(outs) != len(self.outs) or + not all(out.shape[-1] == outs[0].shape[-1] for out in outs)): + raise ValueError('Bad outs') + idx = (Ellipsis,) + if self.picks is not None: + idx += (self.picks,) + stop = self.idx + outs[0].shape[-1] + idx += (slice(self.idx, stop),) + for o1, o2 in zip(self.outs, outs): + o1[idx] = o2 + self.idx = stop diff --git a/python/libs/mne/_version.py b/python/libs/mne/_version.py new file mode 100644 index 0000000..5f82340 --- /dev/null +++ b/python/libs/mne/_version.py @@ -0,0 +1,6 @@ +"""The version number.""" +# Authors: Eric Larson +# +# License: BSD-3-Clause + +__version__ = '1.0.3' diff --git a/python/libs/mne/annotations.py b/python/libs/mne/annotations.py new file mode 100644 index 0000000..4ad9f29 --- /dev/null +++ b/python/libs/mne/annotations.py @@ -0,0 +1,1589 @@ +# Authors: Jaakko Leppakangas +# Robert Luke +# +# License: BSD-3-Clause + +from collections import OrderedDict +from datetime import datetime, timedelta, timezone +import os.path as op +import re +from copy import deepcopy +from itertools import takewhile +import json +from collections import Counter +from collections.abc import Iterable +import warnings +from textwrap import shorten +import numpy as np + +from .utils import (_pl, check_fname, _validate_type, verbose, warn, logger, + _check_pandas_installed, _mask_to_onsets_offsets, + _DefaultEventParser, _check_dt, _stamp_to_dt, _dt_to_stamp, + _check_fname, int_like, _check_option, fill_doc, + _on_missing, _is_numeric, _check_dict_keys) + +from .io.write import (start_block, end_block, write_float, write_name_list, + write_double, start_file, write_string) +from .io.constants import FIFF +from .io.open import fiff_open +from .io.tree import dir_tree_find +from .io.tag import read_tag + +# For testing windows_like_datetime, we monkeypatch "datetime" in this module. +# Keep the true datetime object around for _validate_type use. +_datetime = datetime + + +def _check_o_d_s_c(onset, duration, description, ch_names): + onset = np.atleast_1d(np.array(onset, dtype=float)) + if onset.ndim != 1: + raise ValueError('Onset must be a one dimensional array, got %s ' + '(shape %s).' + % (onset.ndim, onset.shape)) + duration = np.array(duration, dtype=float) + if duration.ndim == 0 or duration.shape == (1,): + duration = np.repeat(duration, len(onset)) + if duration.ndim != 1: + raise ValueError('Duration must be a one dimensional array, ' + 'got %d.' % (duration.ndim,)) + + description = np.array(description, dtype=str) + if description.ndim == 0 or description.shape == (1,): + description = np.repeat(description, len(onset)) + if description.ndim != 1: + raise ValueError('Description must be a one dimensional array, ' + 'got %d.' % (description.ndim,)) + _prep_name_list(description, 'check', 'description') + + # ch_names: convert to ndarray of tuples + _validate_type(ch_names, (None, tuple, list, np.ndarray), 'ch_names') + if ch_names is None: + ch_names = [()] * len(onset) + ch_names = list(ch_names) + for ai, ch in enumerate(ch_names): + _validate_type(ch, (list, tuple, np.ndarray), f'ch_names[{ai}]') + ch_names[ai] = tuple(ch) + for ci, name in enumerate(ch_names[ai]): + _validate_type(name, str, f'ch_names[{ai}][{ci}]') + ch_names = _ndarray_ch_names(ch_names) + + if not (len(onset) == len(duration) == len(description) == len(ch_names)): + raise ValueError( + 'Onset, duration, description, and ch_names must be ' + f'equal in sizes, got {len(onset)}, {len(duration)}, ' + f'{len(description)}, and {len(ch_names)}.') + return onset, duration, description, ch_names + + +def _ndarray_ch_names(ch_names): + # np.array(..., dtype=object) if all entries are empty will give + # an empty array of shape (n_entries, 0) which is not helpful. So let's + # force it to give us an array of shape (n_entries,) full of empty + # tuples + out = np.empty(len(ch_names), dtype=object) + out[:] = ch_names + return out + + +@fill_doc +class Annotations(object): + """Annotation object for annotating segments of raw data. + + .. note:: + To convert events to `~mne.Annotations`, use + `~mne.annotations_from_events`. To convert existing `~mne.Annotations` + to events, use `~mne.events_from_annotations`. + + Parameters + ---------- + onset : array of float, shape (n_annotations,) + The starting time of annotations in seconds after ``orig_time``. + duration : array of float, shape (n_annotations,) | float + Durations of the annotations in seconds. If a float, all the + annotations are given the same duration. + description : array of str, shape (n_annotations,) | str + Array of strings containing description for each annotation. If a + string, all the annotations are given the same description. To reject + epochs, use description starting with keyword 'bad'. See example above. + orig_time : float | str | datetime | tuple of int | None + A POSIX Timestamp, datetime or a tuple containing the timestamp as the + first element and microseconds as the second element. Determines the + starting time of annotation acquisition. If None (default), + starting time is determined from beginning of raw data acquisition. + In general, ``raw.info['meas_date']`` (or None) can be used for syncing + the annotations with raw data if their acquisition is started at the + same time. If it is a string, it should conform to the ISO8601 format. + More precisely to this '%%Y-%%m-%%d %%H:%%M:%%S.%%f' particular case of + the ISO8601 format where the delimiter between date and time is ' '. + %(ch_names_annot)s + + .. versionadded:: 0.23 + + See Also + -------- + mne.annotations_from_events + mne.events_from_annotations + + Notes + ----- + Annotations are added to instance of :class:`mne.io.Raw` as the attribute + :attr:`raw.annotations `. + + To reject bad epochs using annotations, use + annotation description starting with 'bad' keyword. The epochs with + overlapping bad segments are then rejected automatically by default. + + To remove epochs with blinks you can do: + + >>> eog_events = mne.preprocessing.find_eog_events(raw) # doctest: +SKIP + >>> n_blinks = len(eog_events) # doctest: +SKIP + >>> onset = eog_events[:, 0] / raw.info['sfreq'] - 0.25 # doctest: +SKIP + >>> duration = np.repeat(0.5, n_blinks) # doctest: +SKIP + >>> description = ['bad blink'] * n_blinks # doctest: +SKIP + >>> annotations = mne.Annotations(onset, duration, description) # doctest: +SKIP + >>> raw.set_annotations(annotations) # doctest: +SKIP + >>> epochs = mne.Epochs(raw, events, event_id, tmin, tmax) # doctest: +SKIP + + **ch_names** + + Specifying channel names allows the creation of channel-specific + annotations. Once the annotations are assigned to a raw instance with + :meth:`mne.io.Raw.set_annotations`, if channels are renamed by the raw + instance, the annotation channels also get renamed. If channels are dropped + from the raw instance, any channel-specific annotation that has no channels + left in the raw instance will also be removed. + + **orig_time** + + If ``orig_time`` is None, the annotations are synced to the start of the + data (0 seconds). Otherwise the annotations are synced to sample 0 and + ``raw.first_samp`` is taken into account the same way as with events. + + When setting annotations, the following alignments + between ``raw.info['meas_date']`` and ``annotation.orig_time`` take place: + + :: + + ----------- meas_date=XX, orig_time=YY ----------------------------- + + | +------------------+ + |______________| RAW | + | | | + | +------------------+ + meas_date first_samp + . + . | +------+ + . |_________| ANOT | + . | | | + . | +------+ + . orig_time onset[0] + . + | +------+ + |___________________| | + | | | + | +------+ + orig_time onset[0]' + + ----------- meas_date=XX, orig_time=None --------------------------- + + | +------------------+ + |______________| RAW | + | | | + | +------------------+ + . N +------+ + . o_________| ANOT | + . n | | + . e +------+ + . + | +------+ + |________________________| | + | | | + | +------+ + orig_time onset[0]' + + ----------- meas_date=None, orig_time=YY --------------------------- + + N +------------------+ + o______________| RAW | + n | | + e +------------------+ + | +------+ + |_________| ANOT | + | | | + | +------+ + + [[[ CRASH ]]] + + ----------- meas_date=None, orig_time=None ------------------------- + + N +------------------+ + o______________| RAW | + n | | + e +------------------+ + . N +------+ + . o_________| ANOT | + . n | | + . e +------+ + . + N +------+ + o________________________| | + n | | + e +------+ + orig_time onset[0]' + + .. warning:: + This means that when ``raw.info['meas_date'] is None``, doing + ``raw.set_annotations(raw.annotations)`` will not alter ``raw`` if and + only if ``raw.first_samp == 0``. When it's non-zero, + ``raw.set_annotations`` will assume that the "new" annotations refer to + the original data (with ``first_samp==0``), and will be re-referenced to + the new time offset! + + **Specific annotation** + + ``BAD_ACQ_SKIP`` annotation leads to specific reading/writing file + behaviours. See :meth:`mne.io.read_raw_fif` and + :meth:`Raw.save() ` notes for details. + """ # noqa: E501 + + def __init__(self, onset, duration, description, + orig_time=None, ch_names=None): # noqa: D102 + self._orig_time = _handle_meas_date(orig_time) + self.onset, self.duration, self.description, self.ch_names = \ + _check_o_d_s_c(onset, duration, description, ch_names) + self._sort() # ensure we're sorted + + @property + def orig_time(self): + """The time base of the Annotations.""" + return self._orig_time + + def __eq__(self, other): + """Compare to another Annotations instance.""" + if not isinstance(other, Annotations): + return False + return (np.array_equal(self.onset, other.onset) and + np.array_equal(self.duration, other.duration) and + np.array_equal(self.description, other.description) and + np.array_equal(self.ch_names, other.ch_names) and + self.orig_time == other.orig_time) + + def __repr__(self): + """Show the representation.""" + counter = Counter(self.description) + kinds = ', '.join(['%s (%s)' % k for k in sorted(counter.items())]) + kinds = (': ' if len(kinds) > 0 else '') + kinds + ch_specific = ', channel-specific' if self._any_ch_names() else '' + s = ('Annotations | %s segment%s%s%s' % + (len(self.onset), _pl(len(self.onset)), ch_specific, kinds)) + return '<' + shorten(s, width=77, placeholder=' ...') + '>' + + def __len__(self): + """Return the number of annotations. + + Returns + ------- + n_annot : int + The number of annotations. + """ + return len(self.duration) + + def __add__(self, other): + """Add (concatencate) two Annotation objects.""" + out = self.copy() + out += other + return out + + def __iadd__(self, other): + """Add (concatencate) two Annotation objects in-place. + + Both annotations must have the same orig_time + """ + if len(self) == 0: + self._orig_time = other.orig_time + if self.orig_time != other.orig_time: + raise ValueError("orig_time should be the same to " + "add/concatenate 2 annotations " + "(got %s != %s)" % (self.orig_time, + other.orig_time)) + return self.append(other.onset, other.duration, other.description, + other.ch_names) + + def __iter__(self): + """Iterate over the annotations.""" + for idx in range(len(self.onset)): + yield self.__getitem__(idx) + + def __getitem__(self, key): + """Propagate indexing and slicing to the underlying numpy structure.""" + if isinstance(key, int_like): + out_keys = ('onset', 'duration', 'description', 'orig_time') + out_vals = (self.onset[key], self.duration[key], + self.description[key], self.orig_time) + if self._any_ch_names(): + out_keys += ('ch_names',) + out_vals += (self.ch_names[key],) + return OrderedDict(zip(out_keys, out_vals)) + else: + key = list(key) if isinstance(key, tuple) else key + return Annotations(onset=self.onset[key], + duration=self.duration[key], + description=self.description[key], + orig_time=self.orig_time, + ch_names=self.ch_names[key]) + + @fill_doc + def append(self, onset, duration, description, ch_names=None): + """Add an annotated segment. Operates inplace. + + Parameters + ---------- + onset : float | array-like + Annotation time onset from the beginning of the recording in + seconds. + duration : float | array-like + Duration of the annotation in seconds. + description : str | array-like + Description for the annotation. To reject epochs, use description + starting with keyword 'bad'. + %(ch_names_annot)s + + .. versionadded:: 0.23 + + Returns + ------- + self : mne.Annotations + The modified Annotations object. + + Notes + ----- + The array-like support for arguments allows this to be used similarly + to not only ``list.append``, but also + `list.extend `__. + """ # noqa: E501 + onset, duration, description, ch_names = _check_o_d_s_c( + onset, duration, description, ch_names) + self.onset = np.append(self.onset, onset) + self.duration = np.append(self.duration, duration) + self.description = np.append(self.description, description) + self.ch_names = np.append(self.ch_names, ch_names) + self._sort() + return self + + def copy(self): + """Return a copy of the Annotations. + + Returns + ------- + inst : instance of Annotations + A copy of the object. + """ + return deepcopy(self) + + def delete(self, idx): + """Remove an annotation. Operates inplace. + + Parameters + ---------- + idx : int | array-like of int + Index of the annotation to remove. Can be array-like to + remove multiple indices. + """ + self.onset = np.delete(self.onset, idx) + self.duration = np.delete(self.duration, idx) + self.description = np.delete(self.description, idx) + self.ch_names = np.delete(self.ch_names, idx) + + def to_data_frame(self): + """Export annotations in tabular structure as a pandas DataFrame. + + Returns + ------- + result : pandas.DataFrame + Returns a pandas DataFrame with onset, duration, and + description columns. A column named ch_names is added if any + annotations are channel-specific. + """ + pd = _check_pandas_installed(strict=True) + dt = _handle_meas_date(self.orig_time) + if dt is None: + dt = _handle_meas_date(0) + dt = dt.replace(tzinfo=None) + onsets_dt = [dt + timedelta(seconds=o) for o in self.onset] + df = dict(onset=onsets_dt, duration=self.duration, + description=self.description) + if self._any_ch_names(): + df.update(ch_names=self.ch_names) + df = pd.DataFrame(df) + return df + + def _any_ch_names(self): + return any(len(ch) for ch in self.ch_names) + + def _prune_ch_names(self, info, on_missing): + # this prunes channel names and if a given channel-specific annotation + # no longer has any channels left, it gets dropped + keep = set(info['ch_names']) + ch_names = self.ch_names + warned = False + drop_idx = list() + for ci, ch in enumerate(ch_names): + if len(ch): + names = list() + for name in ch: + if name not in keep: + if not warned: + _on_missing( + on_missing, 'At least one channel name in ' + f'annotations missing from info: {name}') + warned = True + else: + names.append(name) + ch_names[ci] = tuple(names) + if not len(ch_names[ci]): + drop_idx.append(ci) + if len(drop_idx): + self.delete(drop_idx) + return self + + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Save annotations to FIF, CSV or TXT. + + Typically annotations get saved in the FIF file for raw data + (e.g., as ``raw.annotations``), but this offers the possibility + to also save them to disk separately in different file formats + which are easier to share between packages. + + Parameters + ---------- + fname : str + The filename to use. + %(overwrite)s + + .. versionadded:: 0.23 + %(verbose)s + + Notes + ----- + The format of the information stored in the saved annotation objects + depends on the chosen file format. :file:`.csv` files store the onset + as timestamps (e.g., ``2002-12-03 19:01:56.676071``), + whereas :file:`.txt` files store onset as seconds since start of the + recording (e.g., ``45.95597082905339``). + """ + check_fname(fname, 'annotations', ('-annot.fif', '-annot.fif.gz', + '_annot.fif', '_annot.fif.gz', + '.txt', '.csv')) + fname = _check_fname(fname, overwrite=overwrite) + if fname.endswith(".txt"): + _write_annotations_txt(fname, self) + elif fname.endswith(".csv"): + _write_annotations_csv(fname, self) + else: + with start_file(fname) as fid: + _write_annotations(fid, self) + + def _sort(self): + """Sort in place.""" + # instead of argsort here we use sorted so that it gives us + # the onset-then-duration hierarchy + vals = sorted(zip(self.onset, self.duration, range(len(self)))) + order = list(list(zip(*vals))[-1]) if len(vals) else [] + self.onset = self.onset[order] + self.duration = self.duration[order] + self.description = self.description[order] + self.ch_names = self.ch_names[order] + + @verbose + def crop(self, tmin=None, tmax=None, emit_warning=False, + use_orig_time=True, verbose=None): + """Remove all annotation that are outside of [tmin, tmax]. + + The method operates inplace. + + Parameters + ---------- + tmin : float | datetime | None + Start time of selection in seconds. + tmax : float | datetime | None + End time of selection in seconds. + emit_warning : bool + Whether to emit warnings when limiting or omitting annotations. + Defaults to False. + use_orig_time : bool + Whether to use orig_time as an offset. + Defaults to True. + %(verbose)s + + Returns + ------- + self : instance of Annotations + The cropped Annotations object. + """ + if len(self) == 0: + return self # no annotations, nothing to do + if not use_orig_time or self.orig_time is None: + offset = _handle_meas_date(0) + else: + offset = self.orig_time + if tmin is None: + tmin = timedelta(seconds=self.onset.min()) + offset + if tmax is None: + tmax = timedelta( + seconds=(self.onset + self.duration).max()) + offset + for key, val in [('tmin', tmin), ('tmax', tmax)]: + _validate_type(val, ('numeric', _datetime), key, + 'numeric, datetime, or None') + absolute_tmin = _handle_meas_date(tmin) + absolute_tmax = _handle_meas_date(tmax) + del tmin, tmax + if absolute_tmin > absolute_tmax: + raise ValueError('tmax should be greater than or equal to tmin ' + '(%s < %s).' % (absolute_tmin, absolute_tmax)) + logger.debug('Cropping annotations %s - %s' % (absolute_tmin, + absolute_tmax)) + + onsets, durations, descriptions, ch_names = [], [], [], [] + out_of_bounds, clip_left_elem, clip_right_elem = [], [], [] + for idx, (onset, duration, description, ch) in enumerate(zip( + self.onset, self.duration, self.description, self.ch_names)): + # if duration is NaN behave like a zero + if np.isnan(duration): + duration = 0. + # convert to absolute times + absolute_onset = timedelta(seconds=onset) + offset + absolute_offset = absolute_onset + timedelta(seconds=duration) + out_of_bounds.append( + absolute_onset > absolute_tmax or + absolute_offset < absolute_tmin) + if out_of_bounds[-1]: + clip_left_elem.append(False) + clip_right_elem.append(False) + logger.debug( + f' [{idx}] Dropping ' + f'({absolute_onset} - {absolute_offset}: {description})') + else: + # clip the left side + clip_left_elem.append(absolute_onset < absolute_tmin) + if clip_left_elem[-1]: + absolute_onset = absolute_tmin + clip_right_elem.append(absolute_offset > absolute_tmax) + if clip_right_elem[-1]: + absolute_offset = absolute_tmax + if clip_left_elem[-1] or clip_right_elem[-1]: + durations.append( + (absolute_offset - absolute_onset).total_seconds()) + else: + durations.append(duration) + onsets.append( + (absolute_onset - offset).total_seconds()) + logger.debug( + f' [{idx}] Keeping ' + f'({absolute_onset} - {absolute_offset} -> ' + f'{onset} - {onset + duration})') + descriptions.append(description) + ch_names.append(ch) + logger.debug(f'Cropping complete (kept {len(onsets)})') + self.onset = np.array(onsets, float) + self.duration = np.array(durations, float) + assert (self.duration >= 0).all() + self.description = np.array(descriptions, dtype=str) + self.ch_names = _ndarray_ch_names(ch_names) + + if emit_warning: + omitted = np.array(out_of_bounds).sum() + if omitted > 0: + warn('Omitted %s annotation(s) that were outside data' + ' range.' % omitted) + limited = (np.array(clip_left_elem) | + np.array(clip_right_elem)).sum() + if limited > 0: + warn('Limited %s annotation(s) that were expanding outside the' + ' data range.' % limited) + + return self + + @verbose + def set_durations(self, mapping, verbose=None): + """Set annotation duration(s). Operates inplace. + + Parameters + ---------- + mapping : dict | float + A dictionary mapping the annotation description to a duration in + seconds e.g. ``{'ShortStimulus' : 3, 'LongStimulus' : 12}``. + Alternatively, if a number is provided, then all annotations + durations are set to the single provided value. + %(verbose)s + + Returns + ------- + self : mne.Annotations + The modified Annotations object. + + Notes + ----- + .. versionadded:: 0.24.0 + """ + _validate_type(mapping, (int, float, dict)) + + if isinstance(mapping, dict): + _check_dict_keys(mapping, self.description, + valid_key_source="data", + key_description="Annotation description(s)") + for stim in mapping: + map_idx = [desc == stim for desc in self.description] + self.duration[map_idx] = mapping[stim] + + elif _is_numeric(mapping): + self.duration = np.ones(self.description.shape) * mapping + + else: + raise ValueError("Setting durations requires the mapping of " + "descriptions to times to be provided as a dict. " + f"Instead {type(mapping)} was provided.") + + return self + + @verbose + def rename(self, mapping, verbose=None): + """Rename annotation description(s). Operates inplace. + + Parameters + ---------- + mapping : dict + A dictionary mapping the old description to a new description, + e.g. {'1.0' : 'Control', '2.0' : 'Stimulus'}. + %(verbose)s + + Returns + ------- + self : mne.Annotations + The modified Annotations object. + + Notes + ----- + .. versionadded:: 0.24.0 + """ + _validate_type(mapping, dict) + _check_dict_keys(mapping, self.description, valid_key_source="data", + key_description="Annotation description(s)") + + for old, new in mapping.items(): + self.description = [d.replace(old, new) for d in self.description] + + self.description = np.array(self.description) + return self + + +class EpochAnnotationsMixin: + """Mixin class for Annotations in Epochs.""" + + @property + def annotations(self): # noqa: D102 + return self._annotations + + @verbose + def set_annotations(self, annotations, on_missing='raise', *, + verbose=None): + """Setter for Epoch annotations from Raw. + + This method does not handle offsetting the times based + on first_samp or measurement dates, since that is expected + to occur in Raw.set_annotations(). + + Parameters + ---------- + annotations : instance of mne.Annotations | None + Annotations to set. + %(on_missing_ch_names)s + %(verbose)s + + Returns + ------- + self : instance of Epochs + The epochs object with annotations. + + Notes + ----- + Annotation onsets and offsets are stored as time in seconds (not as + sample numbers). + + If you have an ``-epo.fif`` file saved to disk created before 1.0, + annotations can be added correctly only if no decimation or + resampling was performed. We thus suggest to regenerate your + :class:`mne.Epochs` from raw and re-save to disk with 1.0+ if you + want to safely work with :class:`~mne.Annotations` in epochs. + + Since this method does not handle offsetting the times based + on first_samp or measurement dates, the recommended way to add + Annotations is:: + + raw.set_annotations(annotations) + annotations = raw.annotations + epochs.set_annotations(annotations) + + .. versionadded:: 1.0 + """ + _validate_type(annotations, (Annotations, None), 'annotations') + if annotations is None: + self._annotations = None + else: + if getattr(self, '_unsafe_annot_add', False): + warn('Adding annotations to Epochs created (and saved to ' + 'disk) before 1.0 will yield incorrect results if ' + 'decimation or resampling was performed on the instance, ' + 'we recommend regenerating the Epochs and re-saving them ' + 'to disk') + new_annotations = annotations.copy() + new_annotations._prune_ch_names(self.info, on_missing) + self._annotations = new_annotations + return self + + def get_annotations_per_epoch(self): + """Get a list of annotations that occur during each epoch. + + Returns + ------- + epoch_annots : list + A list of lists (with length equal to number of epochs) where each + inner list contains any annotations that overlap the corresponding + epoch. Annotations are stored as a :class:`tuple` of onset, + duration, description (not as a :class:`~mne.Annotations` object), + where the onset is now relative to time=0 of the epoch, rather than + time=0 of the original continuous (raw) data. + """ + # create a list of annotations for each epoch + epoch_annot_list = [[] for _ in range(len(self.events))] + + # check if annotations exist + if self.annotations is None: + return epoch_annot_list + + # when each epoch and annotation starts/stops + # no need to account for first_samp here... + epoch_tzeros = self.events[:, 0] / self._raw_sfreq + epoch_starts, epoch_stops = np.atleast_2d( + epoch_tzeros) + np.atleast_2d(self.times[[0, -1]]).T + # ... because first_samp isn't accounted for here either + annot_starts = self._annotations.onset + annot_stops = annot_starts + self._annotations.duration + + # the first two cases (annot_straddles_epoch_{start|end}) will both + # (redundantly) capture cases where an annotation fully encompasses + # an epoch (e.g., annot from 1-4s, epoch from 2-3s). The redundancy + # doesn't matter because results are summed and then cast to bool (all + # we care about is presence/absence of overlap). + annot_straddles_epoch_start = np.logical_and( + np.atleast_2d(epoch_starts) >= np.atleast_2d(annot_starts).T, + np.atleast_2d(epoch_starts) < np.atleast_2d(annot_stops).T) + + annot_straddles_epoch_end = np.logical_and( + np.atleast_2d(epoch_stops) > np.atleast_2d(annot_starts).T, + np.atleast_2d(epoch_stops) <= np.atleast_2d(annot_stops).T) + + # this captures the only remaining case we care about: annotations + # fully contained within an epoch (or exactly coextensive with it). + annot_fully_within_epoch = np.logical_and( + np.atleast_2d(epoch_starts) <= np.atleast_2d(annot_starts).T, + np.atleast_2d(epoch_stops) >= np.atleast_2d(annot_stops).T) + + # combine all cases to get array of shape (n_annotations, n_epochs). + # Nonzero entries indicate overlap between the corresponding + # annotation (row index) and epoch (column index). + all_cases = (annot_straddles_epoch_start + + annot_straddles_epoch_end + + annot_fully_within_epoch) + + # for each Epoch-Annotation overlap occurrence: + for annot_ix, epo_ix in zip(*np.nonzero(all_cases)): + this_annot = self._annotations[annot_ix] + this_tzero = epoch_tzeros[epo_ix] + # adjust annotation onset to be relative to epoch tzero... + annot = (this_annot['onset'] - this_tzero, + this_annot['duration'], + this_annot['description']) + # ...then add it to the correct sublist of `epoch_annot_list` + epoch_annot_list[epo_ix].append(annot) + return epoch_annot_list + + def add_annotations_to_metadata(self, overwrite=False): + """Add raw annotations into the Epochs metadata data frame. + + Adds three columns to the ``metadata`` consisting of a list + in each row: + - ``annot_onset``: the onset of each Annotation within + the Epoch relative to the start time of the Epoch (in seconds). + - ``annot_duration``: the duration of each Annotation + within the Epoch in seconds. + - ``annot_description``: the free-form text description of each + Annotation. + + Parameters + ---------- + overwrite : bool + Whether to overwrite existing columns in metadata or not. + Default is False. + + Returns + ------- + self : instance of Epochs + The modified instance (instance is also modified inplace). + + Notes + ----- + .. versionadded:: 1.0 + """ + pd = _check_pandas_installed() + + # check if annotations exist + if self.annotations is None: + warn(f'There were no Annotations stored in {self}, so ' + 'metadata was not modified.') + return self + + # get existing metadata DataFrame or instantiate an empty one + if self._metadata is not None: + metadata = self._metadata + else: + data = np.empty((len(self.events), 0)) + metadata = pd.DataFrame(data=data) + + if any(name in metadata.columns for name in + ['annot_onset', 'annot_duration', 'annot_description']) and \ + not overwrite: + raise RuntimeError( + 'Metadata for Epochs already contains columns ' + '"annot_onset", "annot_duration", or "annot_description".') + + # get the Epoch annotations, then convert to separate lists for + # onsets, durations, and descriptions + epoch_annot_list = self.get_annotations_per_epoch() + onset, duration, description = [], [], [] + for epoch_annot in epoch_annot_list: + for ix, annot_prop in enumerate((onset, duration, description)): + entry = [annot[ix] for annot in epoch_annot] + + # round onset and duration to avoid IO round trip mismatch + if ix < 2: + entry = np.round(entry, decimals=12).tolist() + + annot_prop.append(entry) + + # Create a new Annotations column that is instantiated as an empty + # list per Epoch. + metadata['annot_onset'] = pd.Series(onset) + metadata['annot_duration'] = pd.Series(duration) + metadata['annot_description'] = pd.Series(description) + + # reset the metadata + self.metadata = metadata + return self + + +def _combine_annotations(one, two, one_n_samples, one_first_samp, + two_first_samp, sfreq): + """Combine a tuple of annotations.""" + assert one is not None + assert two is not None + shift = one_n_samples / sfreq # to the right by the number of samples + shift += one_first_samp / sfreq # to the right by the offset + shift -= two_first_samp / sfreq # undo its offset + onset = np.concatenate([one.onset, two.onset + shift]) + duration = np.concatenate([one.duration, two.duration]) + description = np.concatenate([one.description, two.description]) + ch_names = np.concatenate([one.ch_names, two.ch_names]) + return Annotations(onset, duration, description, one.orig_time, ch_names) + + +def _handle_meas_date(meas_date): + """Convert meas_date to datetime or None. + + If `meas_date` is a string, it should conform to the ISO8601 format. + More precisely to this '%Y-%m-%d %H:%M:%S.%f' particular case of the + ISO8601 format where the delimiter between date and time is ' '. + Note that ISO8601 allows for ' ' or 'T' as delimiters between date and + time. + """ + if isinstance(meas_date, str): + ACCEPTED_ISO8601 = '%Y-%m-%d %H:%M:%S.%f' + try: + meas_date = datetime.strptime(meas_date, ACCEPTED_ISO8601) + except ValueError: + meas_date = None + else: + meas_date = meas_date.replace(tzinfo=timezone.utc) + elif isinstance(meas_date, tuple): + # old way + meas_date = _stamp_to_dt(meas_date) + if meas_date is not None: + if np.isscalar(meas_date): + # It would be nice just to do: + # + # meas_date = datetime.fromtimestamp(meas_date, timezone.utc) + # + # But Windows does not like timestamps < 0. So we'll use + # our specialized wrapper instead: + meas_date = np.array(np.modf(meas_date)[::-1]) + meas_date *= [1, 1e6] + meas_date = _stamp_to_dt(np.round(meas_date)) + _check_dt(meas_date) # run checks + return meas_date + + +def _sync_onset(raw, onset, inverse=False): + """Adjust onsets in relation to raw data.""" + offset = (-1 if inverse else 1) * raw._first_time + assert raw.info['meas_date'] == raw.annotations.orig_time + annot_start = onset - offset + return annot_start + + +def _annotations_starts_stops(raw, kinds, name='skip_by_annotation', + invert=False): + """Get starts and stops from given kinds. + + onsets and ends are inclusive. + """ + _validate_type(kinds, (str, list, tuple), name) + if isinstance(kinds, str): + kinds = [kinds] + else: + for kind in kinds: + _validate_type(kind, 'str', "All entries") + + if len(raw.annotations) == 0: + onsets, ends = np.array([], int), np.array([], int) + else: + idxs = [idx for idx, desc in enumerate(raw.annotations.description) + if any(desc.upper().startswith(kind.upper()) + for kind in kinds)] + # onsets are already sorted + onsets = raw.annotations.onset[idxs] + onsets = _sync_onset(raw, onsets) + ends = onsets + raw.annotations.duration[idxs] + onsets = raw.time_as_index(onsets, use_rounding=True) + ends = raw.time_as_index(ends, use_rounding=True) + assert (onsets <= ends).all() # all durations >= 0 + if invert: + # We need to eliminate overlaps here, otherwise wacky things happen, + # so we carefully invert the relationship + mask = np.zeros(len(raw.times), bool) + for onset, end in zip(onsets, ends): + mask[onset:end] = True + mask = ~mask + extras = (onsets == ends) + extra_onsets, extra_ends = onsets[extras], ends[extras] + onsets, ends = _mask_to_onsets_offsets(mask) + # Keep ones where things were exactly equal + del extras + # we could do this with a np.insert+np.searchsorted, but our + # ordered-ness should get us it for free + onsets = np.sort(np.concatenate([onsets, extra_onsets])) + ends = np.sort(np.concatenate([ends, extra_ends])) + assert (onsets <= ends).all() + return onsets, ends + + +def _prep_name_list(lst, operation, name='description'): + if operation == 'check': + if any(['{COLON}' in val for val in lst]): + raise ValueError( + f'The substring "{{COLON}}" in {name} not supported.') + elif operation == 'write': + # take a list of strings and return a sanitized string + return ':'.join(val.replace(':', '{COLON}') for val in lst) + else: + # take a sanitized string and return a list of strings + assert operation == 'read' + assert isinstance(lst, str) + if not len(lst): + return [] + return [val.replace('{COLON}', ':') for val in lst.split(':')] + + +def _write_annotations(fid, annotations): + """Write annotations.""" + start_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS) + write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, annotations.onset) + write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, + annotations.duration + annotations.onset) + write_name_list(fid, FIFF.FIFF_COMMENT, _prep_name_list( + annotations.description, 'write').split(':')) + if annotations.orig_time is not None: + write_double(fid, FIFF.FIFF_MEAS_DATE, + _dt_to_stamp(annotations.orig_time)) + if annotations._any_ch_names(): + write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG, + json.dumps(tuple(annotations.ch_names))) + end_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS) + + +def _write_annotations_csv(fname, annot): + annot = annot.to_data_frame() + if 'ch_names' in annot: + annot['ch_names'] = [ + _prep_name_list(ch, 'write') for ch in annot['ch_names']] + annot.to_csv(fname, index=False) + + +def _write_annotations_txt(fname, annot): + content = "# MNE-Annotations\n" + if annot.orig_time is not None: + # for backward compat, we do not write tzinfo (assumed UTC) + content += f"# orig_time : {annot.orig_time.replace(tzinfo=None)}\n" + content += "# onset, duration, description" + data = [annot.onset, annot.duration, annot.description] + if annot._any_ch_names(): + content += ', ch_names' + data.append([_prep_name_list(ch, 'write') for ch in annot.ch_names]) + content += '\n' + data = np.array(data, dtype=str).T + assert data.ndim == 2 + assert data.shape[0] == len(annot.onset) + assert data.shape[1] in (3, 4) + with open(fname, 'wb') as fid: + fid.write(content.encode()) + np.savetxt(fid, data, delimiter=',', fmt="%s") + + +def read_annotations(fname, sfreq='auto', uint16_codec=None): + r"""Read annotations from a file. + + This function reads a .fif, .fif.gz, .vmrk, .edf, .txt, .csv .cnt, .cef, + or .set file and makes an :class:`mne.Annotations` object. + + Parameters + ---------- + fname : str + The filename. + sfreq : float | 'auto' + The sampling frequency in the file. This parameter is necessary for + \*.vmrk and \*.cef files as Annotations are expressed in seconds and + \*.vmrk/\*.cef files are in samples. For any other file format, + ``sfreq`` is omitted. If set to 'auto' then the ``sfreq`` is taken + from the respective info file of the same name with according file + extension (\*.vhdr for brainvision; \*.dap for Curry 7; \*.cdt.dpa for + Curry 8). So data.vmrk looks for sfreq in data.vhdr, data.cef looks in + data.dap and data.cdt.cef looks in data.cdt.dpa. + uint16_codec : str | None + This parameter is only used in EEGLAB (\*.set) and omitted otherwise. + If your \*.set file contains non-ascii characters, sometimes reading + it may fail and give rise to error message stating that "buffer is + too small". ``uint16_codec`` allows to specify what codec (for example: + 'latin1' or 'utf-8') should be used when reading character arrays and + can therefore help you solve this problem. + + Returns + ------- + annot : instance of Annotations | None + The annotations. + + Notes + ----- + The annotations stored in a .csv require the onset columns to be + timestamps. If you have onsets as floats (in seconds), you should use the + .txt extension. + """ + from .io.brainvision.brainvision import _read_annotations_brainvision + from .io.eeglab.eeglab import _read_annotations_eeglab + from .io.edf.edf import _read_annotations_edf + from .io.cnt.cnt import _read_annotations_cnt + from .io.curry.curry import _read_annotations_curry + from .io.ctf.markers import _read_annotations_ctf + _validate_type(fname, 'path-like', 'fname') + fname = _check_fname( + fname, overwrite='read', must_exist=True, + need_dir=str(fname).endswith('.ds'), # for CTF + name='fname') + name = op.basename(fname) + if name.endswith(('fif', 'fif.gz')): + # Read FiF files + ff, tree, _ = fiff_open(fname, preload=False) + with ff as fid: + annotations = _read_annotations_fif(fid, tree) + elif name.endswith('txt'): + orig_time = _read_annotations_txt_parse_header(fname) + onset, duration, description, ch_names = _read_annotations_txt(fname) + annotations = Annotations(onset=onset, duration=duration, + description=description, orig_time=orig_time, + ch_names=ch_names) + + elif name.endswith('vmrk'): + annotations = _read_annotations_brainvision(fname, sfreq=sfreq) + + elif name.endswith('csv'): + annotations = _read_annotations_csv(fname) + + elif name.endswith('cnt'): + annotations = _read_annotations_cnt(fname) + + elif name.endswith('ds'): + annotations = _read_annotations_ctf(fname) + + elif name.endswith('cef'): + annotations = _read_annotations_curry(fname, sfreq=sfreq) + + elif name.endswith('set'): + annotations = _read_annotations_eeglab(fname, + uint16_codec=uint16_codec) + + elif name.endswith(('edf', 'bdf', 'gdf')): + onset, duration, description = _read_annotations_edf(fname) + onset = np.array(onset, dtype=float) + duration = np.array(duration, dtype=float) + annotations = Annotations(onset=onset, duration=duration, + description=description, + orig_time=None) + + elif name.startswith('events_') and fname.endswith('mat'): + annotations = _read_brainstorm_annotations(fname) + else: + raise IOError('Unknown annotation file format "%s"' % fname) + + if annotations is None: + raise IOError('No annotation data found in file "%s"' % fname) + return annotations + + +def _read_annotations_csv(fname): + """Read annotations from csv. + + Parameters + ---------- + fname : str + The filename. + + Returns + ------- + annot : instance of Annotations + The annotations. + """ + pd = _check_pandas_installed(strict=True) + df = pd.read_csv(fname, keep_default_na=False) + orig_time = df['onset'].values[0] + try: + float(orig_time) + warn('It looks like you have provided annotation onsets as floats. ' + 'These will be interpreted as MILLISECONDS. If that is not what ' + 'you want, save your CSV as a TXT file; the TXT reader accepts ' + 'onsets in seconds.') + except ValueError: + pass + onset_dt = pd.to_datetime(df['onset']) + onset = (onset_dt - onset_dt[0]).dt.total_seconds() + duration = df['duration'].values.astype(float) + description = df['description'].values + ch_names = None + if 'ch_names' in df.columns: + ch_names = [_prep_name_list(val, 'read') + for val in df['ch_names'].values] + return Annotations(onset, duration, description, orig_time, ch_names) + + +def _read_brainstorm_annotations(fname, orig_time=None): + """Read annotations from a Brainstorm events_ file. + + Parameters + ---------- + fname : str + The filename + orig_time : float | int | instance of datetime | array of int | None + A POSIX Timestamp, datetime or an array containing the timestamp as the + first element and microseconds as the second element. Determines the + starting time of annotation acquisition. If None (default), + starting time is determined from beginning of raw data acquisition. + In general, ``raw.info['meas_date']`` (or None) can be used for syncing + the annotations with raw data if their acquisition is started at the + same time. + + Returns + ------- + annot : instance of Annotations | None + The annotations. + """ + from scipy import io + + def get_duration_from_times(t): + return t[1] - t[0] if t.shape[0] == 2 else np.zeros(len(t[0])) + + annot_data = io.loadmat(fname) + onsets, durations, descriptions = (list(), list(), list()) + for label, _, _, _, times, _, _ in annot_data['events'][0]: + onsets.append(times[0]) + durations.append(get_duration_from_times(times)) + n_annot = len(times[0]) + descriptions += [str(label[0])] * n_annot + + return Annotations(onset=np.concatenate(onsets), + duration=np.concatenate(durations), + description=descriptions, + orig_time=orig_time) + + +def _is_iso8601(candidate_str): + ISO8601 = r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}\.\d{6}$' + return re.compile(ISO8601).match(candidate_str) is not None + + +def _read_annotations_txt_parse_header(fname): + def is_orig_time(x): + return x.startswith('# orig_time :') + + with open(fname) as fid: + header = list(takewhile(lambda x: x.startswith('#'), fid)) + + orig_values = [h[13:].strip() for h in header if is_orig_time(h)] + orig_values = [_handle_meas_date(orig) for orig in orig_values + if _is_iso8601(orig)] + + return None if not orig_values else orig_values[0] + + +def _read_annotations_txt(fname): + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore") + out = np.loadtxt(fname, delimiter=',', + dtype=np.bytes_, unpack=True) + ch_names = None + if len(out) == 0: + onset, duration, desc = [], [], [] + else: + _check_option('text header', len(out), (3, 4)) + if len(out) == 3: + onset, duration, desc = out + else: + onset, duration, desc, ch_names = out + + onset = [float(o.decode()) for o in np.atleast_1d(onset)] + duration = [float(d.decode()) for d in np.atleast_1d(duration)] + desc = [str(d.decode()).strip() for d in np.atleast_1d(desc)] + if ch_names is not None: + ch_names = [_prep_name_list(ch.decode().strip(), 'read') + for ch in ch_names] + return onset, duration, desc, ch_names + + +def _read_annotations_fif(fid, tree): + """Read annotations.""" + annot_data = dir_tree_find(tree, FIFF.FIFFB_MNE_ANNOTATIONS) + if len(annot_data) == 0: + annotations = None + else: + annot_data = annot_data[0] + orig_time = ch_names = None + onset, duration, description = list(), list(), list() + for ent in annot_data['directory']: + kind = ent.kind + pos = ent.pos + tag = read_tag(fid, pos) + if kind == FIFF.FIFF_MNE_BASELINE_MIN: + onset = tag.data + onset = list() if onset is None else onset + elif kind == FIFF.FIFF_MNE_BASELINE_MAX: + duration = tag.data + duration = list() if duration is None else duration - onset + elif kind == FIFF.FIFF_COMMENT: + description = _prep_name_list(tag.data, 'read') + elif kind == FIFF.FIFF_MEAS_DATE: + orig_time = tag.data + try: + orig_time = float(orig_time) # old way + except TypeError: + orig_time = tuple(orig_time) # new way + elif kind == FIFF.FIFF_MNE_EPOCHS_DROP_LOG: + ch_names = tuple(tuple(x) for x in json.loads(tag.data)) + assert len(onset) == len(duration) == len(description) + annotations = Annotations(onset, duration, description, + orig_time, ch_names) + return annotations + + +def _select_annotations_based_on_description(descriptions, event_id, regexp): + """Get a collection of descriptions and returns index of selected.""" + regexp_comp = re.compile('.*' if regexp is None else regexp) + + event_id_ = dict() + dropped = [] + # Iterate over the sorted descriptions so that the Counter mapping + # is slightly less arbitrary + for desc in sorted(descriptions): + if desc in event_id_: + continue + + if regexp_comp.match(desc) is None: + continue + + if isinstance(event_id, dict): + if desc in event_id: + event_id_[desc] = event_id[desc] + else: + continue + else: + trigger = event_id(desc) + if trigger is not None: + event_id_[desc] = trigger + else: + dropped.append(desc) + + event_sel = [ii for ii, kk in enumerate(descriptions) + if kk in event_id_] + + if len(event_sel) == 0 and regexp is not None: + raise ValueError('Could not find any of the events you specified.') + + return event_sel, event_id_ + + +def _select_events_based_on_id(events, event_desc): + """Get a collection of events and returns index of selected.""" + event_desc_ = dict() + func = event_desc.get if isinstance(event_desc, dict) else event_desc + event_ids = events[np.unique(events[:, 2], return_index=True)[1], 2] + for e in event_ids: + trigger = func(e) + if trigger is not None: + event_desc_[e] = trigger + + event_sel = [ii for ii, e in enumerate(events) if e[2] in event_desc_] + + if len(event_sel) == 0: + raise ValueError('Could not find any of the events you specified.') + + return event_sel, event_desc_ + + +def _check_event_id(event_id, raw): + from .io.brainvision.brainvision import _BVEventParser + from .io.brainvision.brainvision import _check_bv_annot + from .io.brainvision.brainvision import RawBrainVision + from .io import RawFIF, RawArray + + if event_id is None: + return _DefaultEventParser() + elif event_id == 'auto': + if isinstance(raw, RawBrainVision): + return _BVEventParser() + elif (isinstance(raw, (RawFIF, RawArray)) and + _check_bv_annot(raw.annotations.description)): + logger.info('Non-RawBrainVision raw using branvision markers') + return _BVEventParser() + else: + return _DefaultEventParser() + elif callable(event_id) or isinstance(event_id, dict): + return event_id + else: + raise ValueError('Invalid type for event_id (should be None, str, ' + 'dict or callable). Got {}'.format(type(event_id))) + + +def _check_event_description(event_desc, events): + """Check event_id and convert to default format.""" + if event_desc is None: # convert to int to make typing-checks happy + event_desc = list(np.unique(events[:, 2])) + + if isinstance(event_desc, dict): + for val in event_desc.values(): + _validate_type(val, (str, None), 'Event names') + elif isinstance(event_desc, Iterable): + event_desc = np.asarray(event_desc) + if event_desc.ndim != 1: + raise ValueError('event_desc must be 1D, got shape {}'.format( + event_desc.shape)) + event_desc = dict(zip(event_desc, map(str, event_desc))) + elif callable(event_desc): + pass + else: + raise ValueError('Invalid type for event_desc (should be None, list, ' + '1darray, dict or callable). Got {}'.format( + type(event_desc))) + + return event_desc + + +@verbose +def events_from_annotations(raw, event_id="auto", + regexp=r'^(?![Bb][Aa][Dd]|[Ee][Dd][Gg][Ee]).*$', + use_rounding=True, chunk_duration=None, + verbose=None): + """Get :term:`events` and ``event_id`` from an Annotations object. + + Parameters + ---------- + raw : instance of Raw + The raw data for which Annotations are defined. + event_id : dict | callable | None | 'auto' + Can be: + + - **dict**: map descriptions (keys) to integer event codes (values). + Only the descriptions present will be mapped, others will be ignored. + - **callable**: must take a string input and return an integer event + code, or return ``None`` to ignore the event. + - **None**: Map descriptions to unique integer values based on their + ``sorted`` order. + - **'auto' (default)**: prefer a raw-format-specific parser: + + - Brainvision: map stimulus events to their integer part; response + events to integer part + 1000; optic events to integer part + 2000; + 'SyncStatus/Sync On' to 99998; 'New Segment/' to 99999; + all others like ``None`` with an offset of 10000. + - Other raw formats: Behaves like None. + + .. versionadded:: 0.18 + regexp : str | None + Regular expression used to filter the annotations whose + descriptions is a match. The default ignores descriptions beginning + ``'bad'`` or ``'edge'`` (case-insensitive). + + .. versionchanged:: 0.18 + Default ignores bad and edge descriptions. + use_rounding : bool + If True, use rounding (instead of truncation) when converting + times to indices. This can help avoid non-unique indices. + chunk_duration : float | None + Chunk duration in seconds. If ``chunk_duration`` is set to None + (default), generated events correspond to the annotation onsets. + If not, :func:`mne.events_from_annotations` returns as many events as + they fit within the annotation duration spaced according to + ``chunk_duration``. As a consequence annotations with duration shorter + than ``chunk_duration`` will not contribute events. + %(verbose)s + + Returns + ------- + %(events)s + event_id : dict + The event_id variable that can be passed to :class:`~mne.Epochs`. + + See Also + -------- + mne.annotations_from_events + + Notes + ----- + For data formats that store integer events as strings (e.g., NeuroScan + ``.cnt`` files), passing the Python built-in function :class:`int` as the + ``event_id`` parameter will do what most users probably want in those + circumstances: return an ``event_id`` dictionary that maps event ``'1'`` to + integer event code ``1``, ``'2'`` to ``2``, etc. + """ + if len(raw.annotations) == 0: + event_id = dict() if not isinstance(event_id, dict) else event_id + return np.empty((0, 3), dtype=int), event_id + + annotations = raw.annotations + + event_id = _check_event_id(event_id, raw) + + event_sel, event_id_ = _select_annotations_based_on_description( + annotations.description, event_id=event_id, regexp=regexp) + + if chunk_duration is None: + inds = raw.time_as_index(annotations.onset, use_rounding=use_rounding, + origin=annotations.orig_time) + if annotations.orig_time is not None: + inds += raw.first_samp + values = [event_id_[kk] for kk in annotations.description[event_sel]] + inds = inds[event_sel] + else: + inds = values = np.array([]).astype(int) + for annot in annotations[event_sel]: + annot_offset = annot['onset'] + annot['duration'] + _onsets = np.arange(start=annot['onset'], stop=annot_offset, + step=chunk_duration) + good_events = annot_offset - _onsets >= chunk_duration + if good_events.any(): + _onsets = _onsets[good_events] + _inds = raw.time_as_index(_onsets, + use_rounding=use_rounding, + origin=annotations.orig_time) + _inds += raw.first_samp + inds = np.append(inds, _inds) + _values = np.full(shape=len(_inds), + fill_value=event_id_[annot['description']], + dtype=int) + values = np.append(values, _values) + + events = np.c_[inds, np.zeros(len(inds)), values].astype(int) + + logger.info('Used Annotations descriptions: %s' % + (list(event_id_.keys()),)) + + return events, event_id_ + + +@verbose +def annotations_from_events(events, sfreq, event_desc=None, first_samp=0, + orig_time=None, verbose=None): + """Convert an event array to an Annotations object. + + Parameters + ---------- + events : ndarray, shape (n_events, 3) + The events. + sfreq : float + Sampling frequency. + event_desc : dict | array-like | callable | None + Events description. Can be: + + - **dict**: map integer event codes (keys) to descriptions (values). + Only the descriptions present will be mapped, others will be ignored. + - **array-like**: list, or 1d array of integers event codes to include. + Only the event codes present will be mapped, others will be ignored. + Event codes will be passed as string descriptions. + - **callable**: must take a integer event code as input and return a + string description or None to ignore it. + - **None**: Use integer event codes as descriptions. + first_samp : int + The first data sample (default=0). See :attr:`mne.io.Raw.first_samp` + docstring. + orig_time : float | str | datetime | tuple of int | None + Determines the starting time of annotation acquisition. If None + (default), starting time is determined from beginning of raw data + acquisition. For details, see :meth:`mne.Annotations` docstring. + %(verbose)s + + Returns + ------- + annot : instance of Annotations + The annotations. + + See Also + -------- + mne.events_from_annotations + + Notes + ----- + Annotations returned by this function will all have zero (null) duration. + + Creating events from annotations via the function + `mne.events_from_annotations` takes in event mappings with + key→value pairs as description→ID, whereas `mne.annotations_from_events` + takes in event mappings with key→value pairs as ID→description. + If you need to use these together, you can invert the mapping by doing:: + + event_desc = {v: k for k, v in event_id.items()} + """ + event_desc = _check_event_description(event_desc, events) + event_sel, event_desc_ = _select_events_based_on_id(events, event_desc) + events_sel = events[event_sel] + onsets = (events_sel[:, 0] - first_samp) / sfreq + descriptions = [event_desc_[e[2]] for e in events_sel] + durations = np.zeros(len(events_sel)) # dummy durations + + # Create annotations + annots = Annotations(onset=onsets, + duration=durations, + description=descriptions, + orig_time=orig_time) + + return annots + + +def _adjust_onset_meas_date(annot, raw): + """Adjust the annotation onsets based on raw meas_date.""" + # If there is a non-None meas date, then the onset should take into + # account the first_samp / first_time. + if raw.info['meas_date'] is not None: + annot.onset += raw.first_time diff --git a/python/libs/mne/baseline.py b/python/libs/mne/baseline.py new file mode 100644 index 0000000..846c89f --- /dev/null +++ b/python/libs/mne/baseline.py @@ -0,0 +1,201 @@ +"""Utility functions to baseline-correct data.""" + +# Authors: Alexandre Gramfort +# +# License: BSD-3-Clause + +import numpy as np + +from .utils import logger, verbose, _check_option + + +def _log_rescale(baseline, mode='mean'): + """Log the rescaling method.""" + if baseline is not None: + _check_option('mode', mode, ['logratio', 'ratio', 'zscore', 'mean', + 'percent', 'zlogratio']) + msg = 'Applying baseline correction (mode: %s)' % mode + else: + msg = 'No baseline correction applied' + return msg + + +@verbose +def rescale(data, times, baseline, mode='mean', copy=True, picks=None, + verbose=None): + """Rescale (baseline correct) data. + + Parameters + ---------- + data : array + It can be of any shape. The only constraint is that the last + dimension should be time. + times : 1D array + Time instants is seconds. + %(baseline_rescale)s + mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio' + Perform baseline correction by + + - subtracting the mean of baseline values ('mean') + - dividing by the mean of baseline values ('ratio') + - dividing by the mean of baseline values and taking the log + ('logratio') + - subtracting the mean of baseline values followed by dividing by + the mean of baseline values ('percent') + - subtracting the mean of baseline values and dividing by the + standard deviation of baseline values ('zscore') + - dividing by the mean of baseline values, taking the log, and + dividing by the standard deviation of log baseline values + ('zlogratio') + + copy : bool + Whether to return a new instance or modify in place. + picks : list of int | None + Data to process along the axis=-2 (None, default, processes all). + %(verbose)s + + Returns + ------- + data_scaled: array + Array of same shape as data after rescaling. + """ + if copy: + data = data.copy() + if verbose is not False: + msg = _log_rescale(baseline, mode) + logger.info(msg) + if baseline is None or data.shape[-1] == 0: + return data + + bmin, bmax = baseline + if bmin is None: + imin = 0 + else: + imin = np.where(times >= bmin)[0] + if len(imin) == 0: + raise ValueError('bmin is too large (%s), it exceeds the largest ' + 'time value' % (bmin,)) + imin = int(imin[0]) + if bmax is None: + imax = len(times) + else: + imax = np.where(times <= bmax)[0] + if len(imax) == 0: + raise ValueError('bmax is too small (%s), it is smaller than the ' + 'smallest time value' % (bmax,)) + imax = int(imax[-1]) + 1 + if imin >= imax: + raise ValueError('Bad rescaling slice (%s:%s) from time values %s, %s' + % (imin, imax, bmin, bmax)) + + # technically this is inefficient when `picks` is given, but assuming + # that we generally pick most channels for rescaling, it's not so bad + mean = np.mean(data[..., imin:imax], axis=-1, keepdims=True) + + if mode == 'mean': + def fun(d, m): + d -= m + elif mode == 'ratio': + def fun(d, m): + d /= m + elif mode == 'logratio': + def fun(d, m): + d /= m + np.log10(d, out=d) + elif mode == 'percent': + def fun(d, m): + d -= m + d /= m + elif mode == 'zscore': + def fun(d, m): + d -= m + d /= np.std(d[..., imin:imax], axis=-1, keepdims=True) + elif mode == 'zlogratio': + def fun(d, m): + d /= m + np.log10(d, out=d) + d /= np.std(d[..., imin:imax], axis=-1, keepdims=True) + + if picks is None: + fun(data, mean) + else: + for pi in picks: + fun(data[..., pi, :], mean[..., pi, :]) + return data + + +def _check_baseline(baseline, times, sfreq, on_baseline_outside_data='raise'): + """Check if the baseline is valid, and adjust it if requested. + + ``None`` values inside the baseline parameter will be replaced with + ``times[0]`` and ``times[-1]``. + + Parameters + ---------- + baseline : tuple | None + Beginning and end of the baseline period, in seconds. If ``None``, + assume no baseline and return immediately. + times : array + The time points. + sfreq : float + The sampling rate. + on_baseline_outside_data : 'raise' | 'info' | 'adjust' + What do do if the baseline period exceeds the data. + If ``'raise'``, raise an exception (default). + If ``'info'``, log an info message. + If ``'adjust'``, adjust the baseline such that it's within the data + range again. + + Returns + ------- + (baseline_tmin, baseline_tmax) | None + The baseline with ``None`` values replaced with times, and with + adjusted times if ``on_baseline_outside_data='adjust'``; or ``None`` + if the ``baseline`` parameter is ``None``. + + """ + if baseline is None: + return None + + if not isinstance(baseline, tuple) or len(baseline) != 2: + raise ValueError(f'`baseline={baseline}` is an invalid argument, must ' + f'be a tuple of length 2 or None') + + tmin, tmax = times[0], times[-1] + tstep = 1. / float(sfreq) + + # check default value of baseline and `tmin=0` + if baseline == (None, 0) and tmin == 0: + raise ValueError('Baseline interval is only one sample. Use ' + '`baseline=(0, 0)` if this is desired.') + + baseline_tmin, baseline_tmax = baseline + + if baseline_tmin is None: + baseline_tmin = tmin + baseline_tmin = float(baseline_tmin) + + if baseline_tmax is None: + baseline_tmax = tmax + baseline_tmax = float(baseline_tmax) + + if baseline_tmin > baseline_tmax: + raise ValueError( + "Baseline min (%s) must be less than baseline max (%s)" + % (baseline_tmin, baseline_tmax)) + + if (baseline_tmin < tmin - tstep) or (baseline_tmax > tmax + tstep): + msg = (f"Baseline interval [{baseline_tmin}, {baseline_tmax}] sec " + f"is outside of epochs data [{tmin}, {tmax}] sec. Epochs were " + f"probably cropped.") + if on_baseline_outside_data == 'raise': + raise ValueError(msg) + elif on_baseline_outside_data == 'info': + logger.info(msg) + elif on_baseline_outside_data == 'adjust': + if baseline_tmin < tmin - tstep: + baseline_tmin = tmin + if baseline_tmax > tmax + tstep: + baseline_tmax = tmax + + return baseline_tmin, baseline_tmax diff --git a/python/libs/mne/beamformer/__init__.py b/python/libs/mne/beamformer/__init__.py new file mode 100644 index 0000000..a4dcca4 --- /dev/null +++ b/python/libs/mne/beamformer/__init__.py @@ -0,0 +1,8 @@ +"""Beamformers for source localization.""" + +from ._lcmv import (make_lcmv, apply_lcmv, apply_lcmv_epochs, apply_lcmv_raw, + apply_lcmv_cov) +from ._dics import make_dics, apply_dics, apply_dics_epochs, apply_dics_csd +from ._rap_music import rap_music +from ._compute_beamformer import Beamformer, read_beamformer +from .resolution_matrix import make_lcmv_resolution_matrix diff --git a/python/libs/mne/beamformer/_compute_beamformer.py b/python/libs/mne/beamformer/_compute_beamformer.py new file mode 100644 index 0000000..4edaebf --- /dev/null +++ b/python/libs/mne/beamformer/_compute_beamformer.py @@ -0,0 +1,521 @@ +"""Functions shared between different beamformer types.""" + +# Authors: Alexandre Gramfort +# Roman Goj +# Britta Westner +# +# License: BSD-3-Clause + +from copy import deepcopy + +import numpy as np + +from ..cov import Covariance, make_ad_hoc_cov +from ..forward.forward import is_fixed_orient, _restrict_forward_to_src_sel +from ..io.proj import make_projector, Projection +from ..minimum_norm.inverse import _get_vertno, _prepare_forward +from ..source_space import label_src_vertno_sel +from ..utils import (verbose, check_fname, _reg_pinv, _check_option, logger, + _pl, _check_src_normal, check_version, _sym_mat_pow, warn, + _import_h5io_funcs) +from ..time_frequency.csd import CrossSpectralDensity + + +def _check_proj_match(proj, filters): + """Check whether SSP projections in data and spatial filter match.""" + proj_data, _, _ = make_projector(proj, filters['ch_names']) + if not np.allclose(proj_data, filters['proj'], + atol=np.finfo(float).eps, rtol=1e-13): + raise ValueError('The SSP projections present in the data ' + 'do not match the projections used when ' + 'calculating the spatial filter.') + + +def _check_src_type(filters): + """Check whether src_type is in filters and set custom warning.""" + if 'src_type' not in filters: + filters['src_type'] = None + warn_text = ('The spatial filter does not contain src_type and a robust ' + 'guess of src_type is not possible without src. Consider ' + 'recomputing the filter.') + return filters, warn_text + + +def _prepare_beamformer_input(info, forward, label=None, pick_ori=None, + noise_cov=None, rank=None, pca=False, loose=None, + combine_xyz='fro', exp=None, limit=None, + allow_fixed_depth=True, limit_depth_chs=False): + """Input preparation common for LCMV, DICS, and RAP-MUSIC.""" + _check_option('pick_ori', pick_ori, + ('normal', 'max-power', 'vector', None)) + + # Restrict forward solution to selected vertices + if label is not None: + _, src_sel = label_src_vertno_sel(label, forward['src']) + forward = _restrict_forward_to_src_sel(forward, src_sel) + + if loose is None: + loose = 0. if is_fixed_orient(forward) else 1. + if noise_cov is None: + noise_cov = make_ad_hoc_cov(info, std=1.) + forward, info_picked, gain, _, orient_prior, _, trace_GRGT, noise_cov, \ + whitener = _prepare_forward( + forward, info, noise_cov, 'auto', loose, rank=rank, pca=pca, + use_cps=True, exp=exp, limit_depth_chs=limit_depth_chs, + combine_xyz=combine_xyz, limit=limit, + allow_fixed_depth=allow_fixed_depth) + is_free_ori = not is_fixed_orient(forward) # could have been changed + nn = forward['source_nn'] + if is_free_ori: # take Z coordinate + nn = nn[2::3] + nn = nn.copy() + vertno = _get_vertno(forward['src']) + if forward['surf_ori']: + nn[...] = [0, 0, 1] # align to local +Z coordinate + if pick_ori is not None and not is_free_ori: + raise ValueError( + 'Normal or max-power orientation (got %r) can only be picked when ' + 'a forward operator with free orientation is used.' % (pick_ori,)) + if pick_ori == 'normal' and not forward['surf_ori']: + raise ValueError('Normal orientation can only be picked when a ' + 'forward operator oriented in surface coordinates is ' + 'used.') + _check_src_normal(pick_ori, forward['src']) + del forward, info + + # Undo the scaling that MNE prefers + scale = np.sqrt((noise_cov['eig'] > 0).sum() / trace_GRGT) + gain /= scale + if orient_prior is not None: + orient_std = np.sqrt(orient_prior) + else: + orient_std = np.ones(gain.shape[1]) + + # Get the projector + proj, _, _ = make_projector( + info_picked['projs'], info_picked['ch_names']) + return (is_free_ori, info_picked, proj, vertno, gain, whitener, nn, + orient_std) + + +def _reduce_leadfield_rank(G): + """Reduce the rank of the leadfield.""" + # decompose lead field + u, s, v = np.linalg.svd(G, full_matrices=False) + + # backproject, omitting one direction (equivalent to setting the smallest + # singular value to zero) + G = np.matmul(u[:, :, :-1], s[:, :-1, np.newaxis] * v[:, :-1, :]) + + return G + + +def _sym_inv_sm(x, reduce_rank, inversion, sk): + """Symmetric inversion with single- or matrix-style inversion.""" + if x.shape[1:] == (1, 1): + with np.errstate(divide='ignore', invalid='ignore'): + x_inv = 1. / x + x_inv[~np.isfinite(x_inv)] = 1. + else: + assert x.shape[1:] == (3, 3) + if inversion == 'matrix': + x_inv = _sym_mat_pow(x, -1, reduce_rank=reduce_rank) + # Reapply source covariance after inversion + x_inv *= sk[:, :, np.newaxis] + x_inv *= sk[:, np.newaxis, :] + else: + # Invert for each dipole separately using plain division + diags = np.diagonal(x, axis1=1, axis2=2) + assert not reduce_rank # guaranteed earlier + with np.errstate(divide='ignore'): + diags = 1. / diags + # set the diagonal of each 3x3 + x_inv = np.zeros_like(x) + for k in range(x.shape[0]): + this = diags[k] + # Reapply source covariance after inversion + this *= (sk[k] * sk[k]) + x_inv[k].flat[::4] = this + return x_inv + + +def _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori, + reduce_rank, rank, inversion, nn, orient_std, + whitener): + """Compute a spatial beamformer filter (LCMV or DICS). + + For more detailed information on the parameters, see the docstrings of + `make_lcmv` and `make_dics`. + + Parameters + ---------- + G : ndarray, shape (n_dipoles, n_channels) + The leadfield. + Cm : ndarray, shape (n_channels, n_channels) + The data covariance matrix. + reg : float + Regularization parameter. + n_orient : int + Number of dipole orientations defined at each source point + weight_norm : None | 'unit-noise-gain' | 'nai' + The weight normalization scheme to use. + pick_ori : None | 'normal' | 'max-power' + The source orientation to compute the beamformer in. + reduce_rank : bool + Whether to reduce the rank by one during computation of the filter. + rank : dict | None | 'full' | 'info' + See compute_rank. + inversion : 'matrix' | 'single' + The inversion scheme to compute the weights. + nn : ndarray, shape (n_dipoles, 3) + The source normals. + orient_std : ndarray, shape (n_dipoles,) + The std of the orientation prior used in weighting the lead fields. + whitener : ndarray, shape (n_channels, n_channels) + The whitener. + + Returns + ------- + W : ndarray, shape (n_dipoles, n_channels) + The beamformer filter weights. + """ + _check_option('weight_norm', weight_norm, + ['unit-noise-gain-invariant', 'unit-noise-gain', + 'nai', None]) + + # Whiten the data covariance + Cm = whitener @ Cm @ whitener.T.conj() + # Restore to properly Hermitian as large whitening coefs can have bad + # rounding error + Cm[:] = (Cm + Cm.T.conj()) / 2. + + assert Cm.shape == (G.shape[0],) * 2 + s, _ = np.linalg.eigh(Cm) + if not (s >= -s.max() * 1e-7).all(): + # This shouldn't ever happen, but just in case + warn('data covariance does not appear to be positive semidefinite, ' + 'results will likely be incorrect') + # Tikhonov regularization using reg parameter to control for + # trade-off between spatial resolution and noise sensitivity + # eq. 25 in Gross and Ioannides, 1999 Phys. Med. Biol. 44 2081 + Cm_inv, loading_factor, rank = _reg_pinv(Cm, reg, rank) + + assert orient_std.shape == (G.shape[1],) + n_sources = G.shape[1] // n_orient + assert nn.shape == (n_sources, 3) + + logger.info('Computing beamformer filters for %d source%s' + % (n_sources, _pl(n_sources))) + n_channels = G.shape[0] + assert n_orient in (3, 1) + Gk = np.reshape(G.T, (n_sources, n_orient, n_channels)).transpose(0, 2, 1) + assert Gk.shape == (n_sources, n_channels, n_orient) + sk = np.reshape(orient_std, (n_sources, n_orient)) + del G, orient_std + pinv_kwargs = dict() + if check_version('numpy', '1.17'): + pinv_kwargs['hermitian'] = True + + _check_option('reduce_rank', reduce_rank, (True, False)) + + # inversion of the denominator + _check_option('inversion', inversion, ('matrix', 'single')) + if inversion == 'single' and n_orient > 1 and pick_ori == 'vector' and \ + weight_norm == 'unit-noise-gain-invariant': + raise ValueError( + 'Cannot use pick_ori="vector" with inversion="single" and ' + 'weight_norm="unit-noise-gain-invariant"') + if reduce_rank and inversion == 'single': + raise ValueError('reduce_rank cannot be used with inversion="single"; ' + 'consider using inversion="matrix" if you have a ' + 'rank-deficient forward model (i.e., from a sphere ' + 'model with MEG channels), otherwise consider using ' + 'reduce_rank=False') + if n_orient > 1: + _, Gk_s, _ = np.linalg.svd(Gk, full_matrices=False) + assert Gk_s.shape == (n_sources, n_orient) + if not reduce_rank and (Gk_s[:, 0] > 1e6 * Gk_s[:, 2]).any(): + raise ValueError( + 'Singular matrix detected when estimating spatial filters. ' + 'Consider reducing the rank of the forward operator by using ' + 'reduce_rank=True.') + del Gk_s + + # + # 1. Reduce rank of the lead field + # + if reduce_rank: + Gk = _reduce_leadfield_rank(Gk) + + def _compute_bf_terms(Gk, Cm_inv): + bf_numer = np.matmul(Gk.swapaxes(-2, -1).conj(), Cm_inv) + bf_denom = np.matmul(bf_numer, Gk) + return bf_numer, bf_denom + + # + # 2. Reorient lead field in direction of max power or normal + # + if pick_ori == 'max-power': + assert n_orient == 3 + _, bf_denom = _compute_bf_terms(Gk, Cm_inv) + if weight_norm is None: + ori_numer = np.eye(n_orient)[np.newaxis] + ori_denom = bf_denom + else: + # compute power, cf Sekihara & Nagarajan 2008, eq. 4.47 + ori_numer = bf_denom + # Cm_inv should be Hermitian so no need for .T.conj() + ori_denom = np.matmul( + np.matmul(Gk.swapaxes(-2, -1).conj(), Cm_inv @ Cm_inv), Gk) + ori_denom_inv = _sym_inv_sm(ori_denom, reduce_rank, inversion, sk) + ori_pick = np.matmul(ori_denom_inv, ori_numer) + assert ori_pick.shape == (n_sources, n_orient, n_orient) + + # pick eigenvector that corresponds to maximum eigenvalue: + eig_vals, eig_vecs = np.linalg.eig(ori_pick.real) # not Hermitian! + # sort eigenvectors by eigenvalues for picking: + order = np.argsort(np.abs(eig_vals), axis=-1) + # eig_vals = np.take_along_axis(eig_vals, order, axis=-1) + max_power_ori = eig_vecs[np.arange(len(eig_vecs)), :, order[:, -1]] + assert max_power_ori.shape == (n_sources, n_orient) + + # set the (otherwise arbitrary) sign to match the normal + signs = np.sign(np.sum(max_power_ori * nn, axis=1, keepdims=True)) + signs[signs == 0] = 1. + max_power_ori *= signs + + # Compute the lead field for the optimal orientation, + # and adjust numer/denom + Gk = np.matmul(Gk, max_power_ori[..., np.newaxis]) + n_orient = 1 + else: + max_power_ori = None + if pick_ori == 'normal': + Gk = Gk[..., 2:3] + n_orient = 1 + + # + # 3. Compute numerator and denominator of beamformer formula (unit-gain) + # + + bf_numer, bf_denom = _compute_bf_terms(Gk, Cm_inv) + assert bf_denom.shape == (n_sources,) + (n_orient,) * 2 + assert bf_numer.shape == (n_sources, n_orient, n_channels) + del Gk # lead field has been adjusted and should not be used anymore + + # + # 4. Invert the denominator + # + + # Here W is W_ug, i.e.: + # G.T @ Cm_inv / (G.T @ Cm_inv @ G) + bf_denom_inv = _sym_inv_sm(bf_denom, reduce_rank, inversion, sk) + assert bf_denom_inv.shape == (n_sources, n_orient, n_orient) + W = np.matmul(bf_denom_inv, bf_numer) + assert W.shape == (n_sources, n_orient, n_channels) + del bf_denom_inv, sk + + # + # 5. Re-scale filter weights according to the selected weight_norm + # + + # Weight normalization is done by computing, for each source:: + # + # W_ung = W_ug / sqrt(W_ug @ W_ug.T) + # + # with W_ung referring to the unit-noise-gain (weight normalized) filter + # and W_ug referring to the above-calculated unit-gain filter stored in W. + + if weight_norm is not None: + # Three different ways to calculate the normalization factors here. + # Only matters when in vector mode, as otherwise n_orient == 1 and + # they are all equivalent. Sekihara 2008 says to use + # + # In MNE < 0.21, we just used the Frobenius matrix norm: + # + # noise_norm = np.linalg.norm(W, axis=(1, 2), keepdims=True) + # assert noise_norm.shape == (n_sources, 1, 1) + # W /= noise_norm + # + # Sekihara 2008 says to use sqrt(diag(W_ug @ W_ug.T)), which is not + # rotation invariant: + if weight_norm in ('unit-noise-gain', 'nai'): + noise_norm = np.matmul(W, W.swapaxes(-2, -1).conj()).real + noise_norm = np.reshape( # np.diag operation over last two axes + noise_norm, (n_sources, -1, 1))[:, ::n_orient + 1] + np.sqrt(noise_norm, out=noise_norm) + noise_norm[noise_norm == 0] = np.inf + assert noise_norm.shape == (n_sources, n_orient, 1) + W /= noise_norm + else: + assert weight_norm == 'unit-noise-gain-invariant' + # Here we use sqrtm. The shortcut: + # + # use = W + # + # ... does not match the direct route (it is rotated!), so we'll + # use the direct one to match FieldTrip: + use = bf_numer + inner = np.matmul(use, use.swapaxes(-2, -1).conj()) + W = np.matmul(_sym_mat_pow(inner, -0.5), use) + noise_norm = 1. + + if weight_norm == 'nai': + # Estimate noise level based on covariance matrix, taking the + # first eigenvalue that falls outside the signal subspace or the + # loading factor used during regularization, whichever is largest. + if rank > len(Cm): + # Covariance matrix is full rank, no noise subspace! + # Use the loading factor as noise ceiling. + if loading_factor == 0: + raise RuntimeError( + 'Cannot compute noise subspace with a full-rank ' + 'covariance matrix and no regularization. Try ' + 'manually specifying the rank of the covariance ' + 'matrix or using regularization.') + noise = loading_factor + else: + noise, _ = np.linalg.eigh(Cm) + noise = noise[-rank] + noise = max(noise, loading_factor) + W /= np.sqrt(noise) + + W = W.reshape(n_sources * n_orient, n_channels) + logger.info('Filter computation complete') + return W, max_power_ori + + +def _compute_power(Cm, W, n_orient): + """Use beamformer filters to compute source power. + + Parameters + ---------- + Cm : ndarray, shape (n_channels, n_channels) + Data covariance matrix or CSD matrix. + W : ndarray, shape (nvertices*norient, nchannels) + Beamformer weights. + + Returns + ------- + power : ndarray, shape (nvertices,) + Source power. + """ + n_sources = W.shape[0] // n_orient + + Wk = W.reshape(n_sources, n_orient, W.shape[1]) + source_power = np.trace((Wk @ Cm @ Wk.conj().transpose(0, 2, 1)).real, + axis1=1, axis2=2) + + return source_power + + +class Beamformer(dict): + """A computed beamformer. + + Notes + ----- + .. versionadded:: 0.17 + """ + + def copy(self): + """Copy the beamformer. + + Returns + ------- + beamformer : instance of Beamformer + A deep copy of the beamformer. + """ + return deepcopy(self) + + def __repr__(self): # noqa: D105 + n_verts = sum(len(v) for v in self['vertices']) + n_channels = len(self['ch_names']) + if self['subject'] is None: + subject = 'unknown' + else: + subject = '"%s"' % (self['subject'],) + out = (' +# Britta Westner +# Susanna Aro +# Roman Goj +# +# License: BSD-3-Clause +import numpy as np + +from ..channels import equalize_channels +from ..io.pick import pick_info, pick_channels +from ..utils import (logger, verbose, _check_one_ch_type, + _check_channels_spatial_filter, _check_rank, + _check_option, _validate_type) +from ..forward import _subject_from_forward +from ..minimum_norm.inverse import combine_xyz, _check_reference, _check_depth +from ..rank import compute_rank +from ..source_estimate import _make_stc, _get_src_type +from ._compute_beamformer import (_prepare_beamformer_input, + _compute_beamformer, _check_src_type, + Beamformer, _compute_power, + _proj_whiten_data) + + +@verbose +def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, + pick_ori=None, rank=None, weight_norm=None, + reduce_rank=False, depth=1., real_filter=True, + inversion='matrix', verbose=None): + """Compute a Dynamic Imaging of Coherent Sources (DICS) spatial filter. + + This is a beamformer filter that can be used to estimate the source power + at a specific frequency range :footcite:`GrossEtAl2001`. It does this by + constructing a spatial filter for each source point. + The computation of these filters is very similar to those of the LCMV + beamformer (:func:`make_lcmv`), but instead of operating on a covariance + matrix, the CSD matrix is used. When applying these filters to a CSD matrix + (see :func:`apply_dics_csd`), the source power can be estimated for each + source point. + + Parameters + ---------- + %(info_not_none)s + forward : instance of Forward + Forward operator. + csd : instance of CrossSpectralDensity + The data cross-spectral density (CSD) matrices. A source estimate is + performed for each frequency or frequency-bin defined in the CSD + object. + reg : float + The regularization to apply to the cross-spectral density before + computing the inverse. + noise_csd : instance of CrossSpectralDensity | None + Noise cross-spectral density (CSD) matrices. If provided, whitening + will be done. The noise CSDs need to have been computed for the same + frequencies as the data CSDs. Providing noise CSDs is mandatory if you + mix sensor types, e.g. gradiometers with magnetometers or EEG with + MEG. + + .. versionadded:: 0.20 + label : Label | None + Restricts the solution to a given label. + %(pick_ori_bf)s + %(rank_none)s + + .. versionadded:: 0.17 + %(weight_norm)s + + Defaults to ``None``, in which case no normalization is performed. + %(reduce_rank)s + %(depth)s + real_filter : bool + If ``True``, take only the real part of the cross-spectral-density + matrices to compute real filters. + + .. versionchanged:: 0.23 + Version 0.23 deprecated ``False`` as default for ``real_filter``. + With version 0.24, ``True`` is the new default. + %(inversion_bf)s + + .. versionchanged:: 0.21 + Default changed to ``'matrix'``. + %(verbose)s + + Returns + ------- + filters : instance of Beamformer + Dictionary containing filter weights from DICS beamformer. + Contains the following keys: + + 'kind' : str + The type of beamformer, in this case 'DICS'. + 'weights' : ndarray, shape (n_frequencies, n_weights) + For each frequency, the filter weights of the beamformer. + 'csd' : instance of CrossSpectralDensity + The data cross-spectral density matrices used to compute the + beamformer. + 'ch_names' : list of str + Channels used to compute the beamformer. + 'proj' : ndarray, shape (n_channels, n_channels) + Projections used to compute the beamformer. + 'vertices' : list of ndarray + Vertices for which the filter weights were computed. + 'n_sources' : int + Number of source location for which the filter weight were + computed. + 'subject' : str + The subject ID. + 'pick-ori' : None | 'max-power' | 'normal' | 'vector' + The orientation in which the beamformer filters were computed. + 'inversion' : 'single' | 'matrix' + Whether the spatial filters were computed for each dipole + separately or jointly for all dipoles at each vertex using a + matrix inversion. + 'weight_norm' : None | 'unit-noise-gain' + The normalization of the weights. + 'src_type' : str + Type of source space. + 'is_free_ori' : bool + Whether the filter was computed in a fixed direction + (pick_ori='max-power', pick_ori='normal') or not. + 'whitener' : None | ndarray, shape (n_channels, n_channels) + Whitening matrix, provided if whitening was applied to the + covariance matrix and leadfield during computation of the + beamformer weights. + 'max-power-ori' : ndarray, shape (n_sources, 3) | None + When pick_ori='max-power', this fields contains the estimated + direction of maximum power at each source location. + + See Also + -------- + apply_dics_csd + + Notes + ----- + The original reference is :footcite:`GrossEtAl2001`. See + :footcite:`vanVlietEtAl2018` for a tutorial style paper on the topic. + + The DICS beamformer is very similar to the LCMV (:func:`make_lcmv`) + beamformer and many of the parameters are shared. However, + :func:`make_dics` and :func:`make_lcmv` currently have different defaults + for these parameters, which were settled on separately through extensive + practical use case testing (but not necessarily exhaustive parameter space + searching), and it remains to be seen how functionally interchangeable they + could be. + + The default setting reproduce the DICS beamformer as described in + :footcite:`vanVlietEtAl2018`:: + + inversion='single', weight_norm=None, depth=1. + + To use the :func:`make_lcmv` defaults, use:: + + inversion='matrix', weight_norm='unit-noise-gain-invariant', depth=None + + For more information about ``real_filter``, see the + supplemental information from :footcite:`HippEtAl2011`. + + References + ---------- + .. footbibliography:: + """ # noqa: E501 + rank = _check_rank(rank) + _check_option('pick_ori', pick_ori, [None, 'normal', 'max-power']) + _check_option('inversion', inversion, ['single', 'matrix']) + _validate_type(weight_norm, (str, None), 'weight_norm') + + frequencies = [np.mean(freq_bin) for freq_bin in csd.frequencies] + n_freqs = len(frequencies) + + _, _, allow_mismatch = _check_one_ch_type('dics', info, forward, csd, + noise_csd) + # remove bads so that equalize_channels only keeps all good + info = pick_info(info, pick_channels(info['ch_names'], [], info['bads'])) + info, forward, csd = equalize_channels([info, forward, csd]) + + csd, noise_csd = _prepare_noise_csd(csd, noise_csd, real_filter) + + depth = _check_depth(depth, 'depth_sparse') + if inversion == 'single': + depth['combine_xyz'] = False + + is_free_ori, info, proj, vertices, G, whitener, nn, orient_std = \ + _prepare_beamformer_input( + info, forward, label, pick_ori, noise_cov=noise_csd, rank=rank, + pca=False, **depth) + + # Compute ranks + csd_int_rank = [] + if not allow_mismatch: + noise_rank = compute_rank(noise_csd, info=info, rank=rank) + for i in range(len(frequencies)): + csd_rank = compute_rank(csd.get_data(index=i, as_cov=True), + info=info, rank=rank) + if not allow_mismatch: + for key in csd_rank: + if key not in noise_rank or csd_rank[key] != noise_rank[key]: + raise ValueError('%s data rank (%s) did not match the ' + 'noise rank (%s)' + % (key, csd_rank[key], + noise_rank.get(key, None))) + csd_int_rank.append(sum(csd_rank.values())) + + del noise_csd + ch_names = list(info['ch_names']) + + logger.info('Computing DICS spatial filters...') + Ws = [] + max_oris = [] + for i, freq in enumerate(frequencies): + if n_freqs > 1: + logger.info(' computing DICS spatial filter at %sHz (%d/%d)' % + (freq, i + 1, n_freqs)) + + Cm = csd.get_data(index=i) + + # XXX: Weird that real_filter happens *before* whitening, which could + # make things complex again...? + if real_filter: + Cm = Cm.real + + # compute spatial filter + n_orient = 3 if is_free_ori else 1 + W, max_power_ori = _compute_beamformer( + G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, + rank=csd_int_rank[i], inversion=inversion, nn=nn, + orient_std=orient_std, whitener=whitener) + Ws.append(W) + max_oris.append(max_power_ori) + + Ws = np.array(Ws) + if pick_ori == 'max-power': + max_oris = np.array(max_oris) + else: + max_oris = None + + src_type = _get_src_type(forward['src'], vertices) + subject = _subject_from_forward(forward) + is_free_ori = is_free_ori if pick_ori in [None, 'vector'] else False + n_sources = np.sum([len(v) for v in vertices]) + + filters = Beamformer( + kind='DICS', weights=Ws, csd=csd, ch_names=ch_names, proj=proj, + vertices=vertices, n_sources=n_sources, subject=subject, + pick_ori=pick_ori, inversion=inversion, weight_norm=weight_norm, + src_type=src_type, is_free_ori=is_free_ori, whitener=whitener, + max_power_ori=max_oris) + + return filters + + +def _prepare_noise_csd(csd, noise_csd, real_filter): + if noise_csd is not None: + csd, noise_csd = equalize_channels([csd, noise_csd]) + # Use the same noise CSD for all frequencies + if len(noise_csd.frequencies) > 1: + noise_csd = noise_csd.mean() + noise_csd = noise_csd.get_data(as_cov=True) + if real_filter: + noise_csd['data'] = noise_csd['data'].real + return csd, noise_csd + + +def _apply_dics(data, filters, info, tmin): + """Apply DICS spatial filter to data for source reconstruction.""" + if isinstance(data, np.ndarray) and data.ndim == 2: + data = [data] + one_epoch = True + else: + one_epoch = False + + Ws = filters['weights'] + one_freq = len(Ws) == 1 + + subject = filters['subject'] + # compatibility with 0.16, add src_type as None if not present: + filters, warn_text = _check_src_type(filters) + + for i, M in enumerate(data): + if not one_epoch: + logger.info("Processing epoch : %d" % (i + 1)) + + # Apply SSPs + M = _proj_whiten_data(M, info['projs'], filters) + + stcs = [] + for W in Ws: + # project to source space using beamformer weights + sol = np.dot(W, M) + + if filters['is_free_ori']: + logger.info('combining the current components...') + sol = combine_xyz(sol) + + tstep = 1.0 / info['sfreq'] + + stcs.append(_make_stc(sol, vertices=filters['vertices'], + src_type=filters['src_type'], tmin=tmin, + tstep=tstep, subject=subject, + warn_text=warn_text)) + if one_freq: + yield stcs[0] + else: + yield stcs + + logger.info('[done]') + + +@verbose +def apply_dics(evoked, filters, verbose=None): + """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. + + Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights + on evoked data. + + .. warning:: The result of this function is meant as an intermediate step + for further processing (such as computing connectivity). If + you are interested in estimating source time courses, use an + LCMV beamformer (:func:`make_lcmv`, :func:`apply_lcmv`) + instead. If you are interested in estimating spectral power at + the source level, use :func:`apply_dics_csd`. + .. warning:: This implementation has not been heavily tested so please + report any issues or suggestions. + + Parameters + ---------- + evoked : Evoked + Evoked data to apply the DICS beamformer weights to. + filters : instance of Beamformer + DICS spatial filter (beamformer weights) + Filter weights returned from :func:`make_dics`. + %(verbose)s + + Returns + ------- + stc : SourceEstimate | VolSourceEstimate | list + Source time courses. If the DICS beamformer has been computed for more + than one frequency, a list is returned containing for each frequency + the corresponding time courses. + + See Also + -------- + apply_dics_epochs + apply_dics_csd + """ # noqa: E501 + _check_reference(evoked) + + info = evoked.info + data = evoked.data + tmin = evoked.times[0] + + sel = _check_channels_spatial_filter(evoked.ch_names, filters) + data = data[sel] + + stc = _apply_dics(data=data, filters=filters, info=info, tmin=tmin) + + return next(stc) + + +@verbose +def apply_dics_epochs(epochs, filters, return_generator=False, verbose=None): + """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. + + Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights + on single trial data. + + .. warning:: The result of this function is meant as an intermediate step + for further processing (such as computing connectivity). If + you are interested in estimating source time courses, use an + LCMV beamformer (:func:`make_lcmv`, :func:`apply_lcmv`) + instead. If you are interested in estimating spectral power at + the source level, use :func:`apply_dics_csd`. + .. warning:: This implementation has not been heavily tested so please + report any issue or suggestions. + + Parameters + ---------- + epochs : Epochs + Single trial epochs. + filters : instance of Beamformer + DICS spatial filter (beamformer weights) + Filter weights returned from :func:`make_dics`. The DICS filters must + have been computed for a single frequency only. + return_generator : bool + Return a generator object instead of a list. This allows iterating + over the stcs without having to keep them all in memory. + %(verbose)s + + Returns + ------- + stc: list | generator of (SourceEstimate | VolSourceEstimate) + The source estimates for all epochs. + + See Also + -------- + apply_dics + apply_dics_csd + """ + _check_reference(epochs) + + if len(filters['weights']) > 1: + raise ValueError( + 'This function only works on DICS beamformer weights that have ' + 'been computed for a single frequency. When calling make_dics(), ' + 'make sure to use a CSD object with only a single frequency (or ' + 'frequency-bin) defined.' + ) + + info = epochs.info + tmin = epochs.times[0] + + sel = _check_channels_spatial_filter(epochs.ch_names, filters) + data = epochs.get_data()[:, sel, :] + + stcs = _apply_dics(data=data, filters=filters, info=info, tmin=tmin) + + if not return_generator: + stcs = list(stcs) + + return stcs + + +@verbose +def apply_dics_csd(csd, filters, verbose=None): + """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. + + Apply a previously computed DICS beamformer to a cross-spectral density + (CSD) object to estimate source power in time and frequency windows + specified in the CSD object :footcite:`GrossEtAl2001`. + + Parameters + ---------- + csd : instance of CrossSpectralDensity + The data cross-spectral density (CSD) matrices. A source estimate is + performed for each frequency or frequency-bin defined in the CSD + object. + filters : instance of Beamformer + DICS spatial filter (beamformer weights) + Filter weights returned from `make_dics`. + %(verbose)s + + Returns + ------- + stc : SourceEstimate + Source power with frequency instead of time. + frequencies : list of float + The frequencies for which the source power has been computed. If the + data CSD object defines frequency-bins instead of exact frequencies, + the mean of each bin is returned. + + References + ---------- + .. footbibliography:: + """ # noqa: E501 + ch_names = filters['ch_names'] + vertices = filters['vertices'] + n_orient = 3 if filters['is_free_ori'] else 1 + subject = filters['subject'] + whitener = filters['whitener'] + n_sources = filters['n_sources'] + + # If CSD is summed over multiple frequencies, take the average frequency + frequencies = [np.mean(dfreq) for dfreq in csd.frequencies] + n_freqs = len(frequencies) + + source_power = np.zeros((n_sources, len(csd.frequencies))) + + # Ensure the CSD is in the same order as the weights + csd_picks = [csd.ch_names.index(ch) for ch in ch_names] + + logger.info('Computing DICS source power...') + for i, freq in enumerate(frequencies): + if n_freqs > 1: + logger.info(' applying DICS spatial filter at %sHz (%d/%d)' % + (freq, i + 1, n_freqs)) + + Cm = csd.get_data(index=i) + Cm = Cm[csd_picks, :][:, csd_picks] + W = filters['weights'][i] + + # Whiten the CSD + Cm = np.dot(whitener, np.dot(Cm, whitener.conj().T)) + + source_power[:, i] = _compute_power(Cm, W, n_orient) + + logger.info('[done]') + + # compatibility with 0.16, add src_type as None if not present: + filters, warn_text = _check_src_type(filters) + + return (_make_stc(source_power, vertices=vertices, + src_type=filters['src_type'], tmin=0., tstep=1., + subject=subject, warn_text=warn_text), + frequencies) diff --git a/python/libs/mne/beamformer/_lcmv.py b/python/libs/mne/beamformer/_lcmv.py new file mode 100644 index 0000000..fcfe711 --- /dev/null +++ b/python/libs/mne/beamformer/_lcmv.py @@ -0,0 +1,438 @@ +"""Compute Linearly constrained minimum variance (LCMV) beamformer.""" + +# Authors: Alexandre Gramfort +# Roman Goj +# Britta Westner +# +# License: BSD-3-Clause +import numpy as np + +from ..rank import compute_rank +from ..io.meas_info import _simplify_info +from ..io.pick import pick_channels_cov, pick_info +from ..forward import _subject_from_forward +from ..minimum_norm.inverse import combine_xyz, _check_reference, _check_depth +from ..source_estimate import _make_stc, _get_src_type +from ..utils import (logger, verbose, _check_channels_spatial_filter, + _check_one_ch_type, _check_info_inv, warn) +from ._compute_beamformer import ( + _prepare_beamformer_input, _compute_power, + _compute_beamformer, _check_src_type, Beamformer, _proj_whiten_data) + + +@verbose +def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, + pick_ori=None, rank='info', + weight_norm='unit-noise-gain-invariant', + reduce_rank=False, depth=None, inversion='matrix', verbose=None): + """Compute LCMV spatial filter. + + Parameters + ---------- + %(info_not_none)s + Specifies the channels to include. Bad channels (in ``info['bads']``) + are not used. + forward : instance of Forward + Forward operator. + data_cov : instance of Covariance + The data covariance. + reg : float + The regularization for the whitened data covariance. + noise_cov : instance of Covariance + The noise covariance. If provided, whitening will be done. Providing a + noise covariance is mandatory if you mix sensor types, e.g. + gradiometers with magnetometers or EEG with MEG. + label : instance of Label + Restricts the LCMV solution to a given label. + %(pick_ori_bf)s + + - ``'vector'`` + Keeps the currents for each direction separate + %(rank_info)s + %(weight_norm)s + + Defaults to ``'unit-noise-gain-invariant'``. + %(reduce_rank)s + %(depth)s + + .. versionadded:: 0.18 + %(inversion_bf)s + + .. versionadded:: 0.21 + %(verbose)s + + Returns + ------- + filters : instance of Beamformer + Dictionary containing filter weights from LCMV beamformer. + Contains the following keys: + + 'kind' : str + The type of beamformer, in this case 'LCMV'. + 'weights' : array + The filter weights of the beamformer. + 'data_cov' : instance of Covariance + The data covariance matrix used to compute the beamformer. + 'noise_cov' : instance of Covariance | None + The noise covariance matrix used to compute the beamformer. + 'whitener' : None | ndarray, shape (n_channels, n_channels) + Whitening matrix, provided if whitening was applied to the + covariance matrix and leadfield during computation of the + beamformer weights. + 'weight_norm' : str | None + Type of weight normalization used to compute the filter + weights. + 'pick-ori' : None | 'max-power' | 'normal' | 'vector' + The orientation in which the beamformer filters were computed. + 'ch_names' : list of str + Channels used to compute the beamformer. + 'proj' : array + Projections used to compute the beamformer. + 'is_ssp' : bool + If True, projections were applied prior to filter computation. + 'vertices' : list + Vertices for which the filter weights were computed. + 'is_free_ori' : bool + If True, the filter was computed with free source orientation. + 'n_sources' : int + Number of source location for which the filter weight were + computed. + 'src_type' : str + Type of source space. + 'source_nn' : ndarray, shape (n_sources, 3) + For each source location, the surface normal. + 'proj' : ndarray, shape (n_channels, n_channels) + Projections used to compute the beamformer. + 'subject' : str + The subject ID. + 'rank' : int + The rank of the data covariance matrix used to compute the + beamformer weights. + 'max-power-ori' : ndarray, shape (n_sources, 3) | None + When pick_ori='max-power', this fields contains the estimated + direction of maximum power at each source location. + 'inversion' : 'single' | 'matrix' + Whether the spatial filters were computed for each dipole + separately or jointly for all dipoles at each vertex using a + matrix inversion. + + Notes + ----- + The original reference is :footcite:`VanVeenEtAl1997`. + + To obtain the Sekihara unit-noise-gain vector beamformer, you should use + ``weight_norm='unit-noise-gain', pick_ori='vector'`` followed by + :meth:`vec_stc.project('pca', src) `. + + .. versionchanged:: 0.21 + The computations were extensively reworked, and the default for + ``weight_norm`` was set to ``'unit-noise-gain-invariant'``. + + References + ---------- + .. footbibliography:: + """ + # check number of sensor types present in the data and ensure a noise cov + info = _simplify_info(info) + noise_cov, _, allow_mismatch = _check_one_ch_type( + 'lcmv', info, forward, data_cov, noise_cov) + # XXX we need this extra picking step (can't just rely on minimum norm's + # because there can be a mismatch. Should probably add an extra arg to + # _prepare_beamformer_input at some point (later) + picks = _check_info_inv(info, forward, data_cov, noise_cov) + info = pick_info(info, picks) + data_rank = compute_rank(data_cov, rank=rank, info=info) + noise_rank = compute_rank(noise_cov, rank=rank, info=info) + for key in data_rank: + if (key not in noise_rank or data_rank[key] != noise_rank[key]) and \ + not allow_mismatch: + raise ValueError('%s data rank (%s) did not match the noise ' + 'rank (%s)' + % (key, data_rank[key], + noise_rank.get(key, None))) + del noise_rank + rank = data_rank + logger.info('Making LCMV beamformer with rank %s' % (rank,)) + del data_rank + depth = _check_depth(depth, 'depth_sparse') + if inversion == 'single': + depth['combine_xyz'] = False + + is_free_ori, info, proj, vertno, G, whitener, nn, orient_std = \ + _prepare_beamformer_input( + info, forward, label, pick_ori, noise_cov=noise_cov, rank=rank, + pca=False, **depth) + ch_names = list(info['ch_names']) + + data_cov = pick_channels_cov(data_cov, include=ch_names) + Cm = data_cov._get_square() + if 'estimator' in data_cov: + del data_cov['estimator'] + rank_int = sum(rank.values()) + del rank + + # compute spatial filter + n_orient = 3 if is_free_ori else 1 + W, max_power_ori = _compute_beamformer( + G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, rank_int, + inversion=inversion, nn=nn, orient_std=orient_std, + whitener=whitener) + + # get src type to store with filters for _make_stc + src_type = _get_src_type(forward['src'], vertno) + + # get subject to store with filters + subject_from = _subject_from_forward(forward) + + # Is the computed beamformer a scalar or vector beamformer? + is_free_ori = is_free_ori if pick_ori in [None, 'vector'] else False + is_ssp = bool(info['projs']) + + filters = Beamformer( + kind='LCMV', weights=W, data_cov=data_cov, noise_cov=noise_cov, + whitener=whitener, weight_norm=weight_norm, pick_ori=pick_ori, + ch_names=ch_names, proj=proj, is_ssp=is_ssp, vertices=vertno, + is_free_ori=is_free_ori, n_sources=forward['nsource'], + src_type=src_type, source_nn=forward['source_nn'].copy(), + subject=subject_from, rank=rank_int, max_power_ori=max_power_ori, + inversion=inversion) + + return filters + + +def _apply_lcmv(data, filters, info, tmin): + """Apply LCMV spatial filter to data for source reconstruction.""" + if isinstance(data, np.ndarray) and data.ndim == 2: + data = [data] + return_single = True + else: + return_single = False + + W = filters['weights'] + + for i, M in enumerate(data): + if len(M) != len(filters['ch_names']): + raise ValueError('data and picks must have the same length') + + if not return_single: + logger.info("Processing epoch : %d" % (i + 1)) + + M = _proj_whiten_data(M, info['projs'], filters) + + # project to source space using beamformer weights + vector = False + if filters['is_free_ori']: + sol = np.dot(W, M) + if filters['pick_ori'] == 'vector': + vector = True + else: + logger.info('combining the current components...') + sol = combine_xyz(sol) + else: + # Linear inverse: do computation here or delayed + if (M.shape[0] < W.shape[0] and + filters['pick_ori'] != 'max-power'): + sol = (W, M) + else: + sol = np.dot(W, M) + + tstep = 1.0 / info['sfreq'] + + # compatibility with 0.16, add src_type as None if not present: + filters, warn_text = _check_src_type(filters) + + yield _make_stc(sol, vertices=filters['vertices'], tmin=tmin, + tstep=tstep, subject=filters['subject'], + vector=vector, source_nn=filters['source_nn'], + src_type=filters['src_type'], warn_text=warn_text) + + logger.info('[done]') + + +def _deprecate_max_ori_out(max_ori_out): + if max_ori_out is not None: + warn('max_ori_out will be removed in 1.0, do not pass it as an ' + 'argument', DeprecationWarning) + + +@verbose +def apply_lcmv(evoked, filters, *, max_ori_out=None, verbose=None): + """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. + + Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights + on evoked data. + + Parameters + ---------- + evoked : Evoked + Evoked data to invert. + filters : instance of Beamformer + LCMV spatial filter (beamformer weights). + Filter weights returned from :func:`make_lcmv`. + %(max_ori_out_deprecated)s + %(verbose)s + + Returns + ------- + stc : SourceEstimate | VolSourceEstimate | VectorSourceEstimate + Source time courses. + + See Also + -------- + make_lcmv, apply_lcmv_raw, apply_lcmv_epochs, apply_lcmv_cov + + Notes + ----- + .. versionadded:: 0.18 + """ + _check_reference(evoked) + _deprecate_max_ori_out(max_ori_out) + + info = evoked.info + data = evoked.data + tmin = evoked.times[0] + + sel = _check_channels_spatial_filter(evoked.ch_names, filters) + data = data[sel] + + stc = _apply_lcmv(data=data, filters=filters, info=info, + tmin=tmin) + + return next(stc) + + +@verbose +def apply_lcmv_epochs(epochs, filters, *, max_ori_out=None, + return_generator=False, verbose=None): + """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. + + Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights + on single trial data. + + Parameters + ---------- + epochs : Epochs + Single trial epochs. + filters : instance of Beamformer + LCMV spatial filter (beamformer weights) + Filter weights returned from :func:`make_lcmv`. + %(max_ori_out_deprecated)s + return_generator : bool + Return a generator object instead of a list. This allows iterating + over the stcs without having to keep them all in memory. + %(verbose)s + + Returns + ------- + stc: list | generator of (SourceEstimate | VolSourceEstimate) + The source estimates for all epochs. + + See Also + -------- + make_lcmv, apply_lcmv_raw, apply_lcmv, apply_lcmv_cov + """ + _check_reference(epochs) + _deprecate_max_ori_out(max_ori_out) + + info = epochs.info + tmin = epochs.times[0] + + sel = _check_channels_spatial_filter(epochs.ch_names, filters) + data = epochs.get_data()[:, sel, :] + stcs = _apply_lcmv(data=data, filters=filters, info=info, + tmin=tmin) + + if not return_generator: + stcs = [s for s in stcs] + + return stcs + + +@verbose +def apply_lcmv_raw(raw, filters, start=None, stop=None, *, max_ori_out=None, + verbose=None): + """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. + + Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights + on raw data. + + Parameters + ---------- + raw : mne.io.Raw + Raw data to invert. + filters : instance of Beamformer + LCMV spatial filter (beamformer weights). + Filter weights returned from :func:`make_lcmv`. + start : int + Index of first time sample (index not time is seconds). + stop : int + Index of first time sample not to include (index not time is seconds). + %(max_ori_out_deprecated)s + %(verbose)s + + Returns + ------- + stc : SourceEstimate | VolSourceEstimate + Source time courses. + + See Also + -------- + make_lcmv, apply_lcmv_epochs, apply_lcmv, apply_lcmv_cov + """ + _check_reference(raw) + _deprecate_max_ori_out(max_ori_out) + + info = raw.info + + sel = _check_channels_spatial_filter(raw.ch_names, filters) + data, times = raw[sel, start:stop] + tmin = times[0] + + stc = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin) + + return next(stc) + + +@verbose +def apply_lcmv_cov(data_cov, filters, verbose=None): + """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. + + Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights + to a data covariance matrix to estimate source power. + + Parameters + ---------- + data_cov : instance of Covariance + Data covariance matrix. + filters : instance of Beamformer + LCMV spatial filter (beamformer weights). + Filter weights returned from :func:`make_lcmv`. + %(verbose)s + + Returns + ------- + stc : SourceEstimate | VolSourceEstimate + Source power. + + See Also + -------- + make_lcmv, apply_lcmv, apply_lcmv_epochs, apply_lcmv_raw + """ + sel = _check_channels_spatial_filter(data_cov.ch_names, filters) + sel_names = [data_cov.ch_names[ii] for ii in sel] + data_cov = pick_channels_cov(data_cov, sel_names) + + n_orient = filters['weights'].shape[0] // filters['n_sources'] + # Need to project and whiten along both dimensions + data = _proj_whiten_data(data_cov['data'].T, data_cov['projs'], filters) + data = _proj_whiten_data(data.T, data_cov['projs'], filters) + del data_cov + source_power = _compute_power(data, filters['weights'], n_orient) + + # compatibility with 0.16, add src_type as None if not present: + filters, warn_text = _check_src_type(filters) + + return(_make_stc(source_power, vertices=filters['vertices'], + src_type=filters['src_type'], tmin=0., tstep=1., + subject=filters['subject'], + source_nn=filters['source_nn'], warn_text=warn_text)) diff --git a/python/libs/mne/beamformer/_rap_music.py b/python/libs/mne/beamformer/_rap_music.py new file mode 100644 index 0000000..827b085 --- /dev/null +++ b/python/libs/mne/beamformer/_rap_music.py @@ -0,0 +1,290 @@ +"""Compute a Recursively Applied and Projected MUltiple Signal Classification (RAP-MUSIC).""" # noqa + +# Authors: Yousra Bekhti +# Alexandre Gramfort +# +# License: BSD-3-Clause + +import numpy as np + +from ..forward import is_fixed_orient, convert_forward_solution +from ..io.pick import pick_channels_evoked, pick_info, pick_channels_forward +from ..inverse_sparse.mxne_inverse import _make_dipoles_sparse +from ..minimum_norm.inverse import _log_exp_var +from ..utils import logger, verbose, _check_info_inv, fill_doc +from ..dipole import Dipole +from ._compute_beamformer import _prepare_beamformer_input + + +@fill_doc +def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, + picks=None): + """RAP-MUSIC for evoked data. + + Parameters + ---------- + data : array, shape (n_channels, n_times) + Evoked data. + %(info_not_none)s + times : array + Times. + forward : instance of Forward + Forward operator. + noise_cov : instance of Covariance + The noise covariance. + n_dipoles : int + The number of dipoles to estimate. The default value is 2. + picks : list of int + Caller ensures this is a list of int. + + Returns + ------- + dipoles : list of instances of Dipole + The dipole fits. + explained_data : array | None + Data explained by the dipoles using a least square fitting with the + selected active dipoles and their estimated orientation. + Computed only if return_explained_data is True. + """ + from scipy import linalg + info = pick_info(info, picks) + del picks + # things are much simpler if we avoid surface orientation + align = forward['source_nn'].copy() + if forward['surf_ori'] and not is_fixed_orient(forward): + forward = convert_forward_solution(forward, surf_ori=False) + is_free_ori, info, _, _, G, whitener, _, _ = _prepare_beamformer_input( + info, forward, noise_cov=noise_cov, rank=None) + forward = pick_channels_forward(forward, info['ch_names'], ordered=True) + del info + + # whiten the data (leadfield already whitened) + M = np.dot(whitener, data) + del data + + _, eig_vectors = linalg.eigh(np.dot(M, M.T)) + phi_sig = eig_vectors[:, -n_dipoles:] + + n_orient = 3 if is_free_ori else 1 + G.shape = (G.shape[0], -1, n_orient) + gain = forward['sol']['data'].copy() + gain.shape = G.shape + n_channels = G.shape[0] + A = np.empty((n_channels, n_dipoles)) + gain_dip = np.empty((n_channels, n_dipoles)) + oris = np.empty((n_dipoles, 3)) + poss = np.empty((n_dipoles, 3)) + + G_proj = G.copy() + phi_sig_proj = phi_sig.copy() + + idxs = list() + for k in range(n_dipoles): + subcorr_max = -1. + source_idx, source_ori, source_pos = 0, [0, 0, 0], [0, 0, 0] + for i_source in range(G.shape[1]): + Gk = G_proj[:, i_source] + subcorr, ori = _compute_subcorr(Gk, phi_sig_proj) + if subcorr > subcorr_max: + subcorr_max = subcorr + source_idx = i_source + source_ori = ori + source_pos = forward['source_rr'][i_source] + if n_orient == 3 and align is not None: + surf_normal = forward['source_nn'][3 * i_source + 2] + # make sure ori is aligned to the surface orientation + source_ori *= np.sign(source_ori @ surf_normal) or 1. + if n_orient == 1: + source_ori = forward['source_nn'][i_source] + + idxs.append(source_idx) + if n_orient == 3: + Ak = np.dot(G[:, source_idx], source_ori) + else: + Ak = G[:, source_idx, 0] + A[:, k] = Ak + oris[k] = source_ori + poss[k] = source_pos + + logger.info("source %s found: p = %s" % (k + 1, source_idx)) + if n_orient == 3: + logger.info("ori = %s %s %s" % tuple(oris[k])) + + projection = _compute_proj(A[:, :k + 1]) + G_proj = np.einsum('ab,bso->aso', projection, G) + phi_sig_proj = np.dot(projection, phi_sig) + del G, G_proj + + sol = linalg.lstsq(A, M)[0] + if n_orient == 3: + X = sol[:, np.newaxis] * oris[:, :, np.newaxis] + X.shape = (-1, len(times)) + else: + X = sol + + gain_active = gain[:, idxs] + if n_orient == 3: + gain_dip = (oris * gain_active).sum(-1) + idxs = np.array(idxs) + active_set = np.array( + [[3 * idxs, 3 * idxs + 1, 3 * idxs + 2]]).T.ravel() + else: + gain_dip = gain_active[:, :, 0] + active_set = idxs + gain_active = whitener @ gain_active.reshape(gain.shape[0], -1) + assert gain_active.shape == (n_channels, X.shape[0]) + + explained_data = gain_dip @ sol + M_estimate = whitener @ explained_data + _log_exp_var(M, M_estimate) + tstep = np.median(np.diff(times)) if len(times) > 1 else 1. + dipoles = _make_dipoles_sparse( + X, active_set, forward, times[0], tstep, M, + gain_active, active_is_idx=True) + for dipole, ori in zip(dipoles, oris): + signs = np.sign((dipole.ori * ori).sum(-1, keepdims=True)) + dipole.ori *= signs + dipole.amplitude *= signs[:, 0] + logger.info('[done]') + return dipoles, explained_data + + +def _make_dipoles(times, poss, oris, sol, gof): + """Instantiate a list of Dipoles. + + Parameters + ---------- + times : array, shape (n_times,) + The time instants. + poss : array, shape (n_dipoles, 3) + The dipoles' positions. + oris : array, shape (n_dipoles, 3) + The dipoles' orientations. + sol : array, shape (n_times,) + The dipoles' amplitudes over time. + gof : array, shape (n_times,) + The goodness of fit of the dipoles. + Shared between all dipoles. + + Returns + ------- + dipoles : list + The list of Dipole instances. + """ + oris = np.array(oris) + + dipoles = [] + for i_dip in range(poss.shape[0]): + i_pos = poss[i_dip][np.newaxis, :].repeat(len(times), axis=0) + i_ori = oris[i_dip][np.newaxis, :].repeat(len(times), axis=0) + dipoles.append(Dipole(times, i_pos, sol[i_dip], i_ori, gof)) + + return dipoles + + +def _compute_subcorr(G, phi_sig): + """Compute the subspace correlation.""" + from scipy import linalg + Ug, Sg, Vg = linalg.svd(G, full_matrices=False) + # Now we look at the actual rank of the forward fields + # in G and handle the fact that it might be rank defficient + # eg. when using MEG and a sphere model for which the + # radial component will be truly 0. + rank = np.sum(Sg > (Sg[0] * 1e-6)) + if rank == 0: + return 0, np.zeros(len(G)) + rank = max(rank, 2) # rank cannot be 1 + Ug, Sg, Vg = Ug[:, :rank], Sg[:rank], Vg[:rank] + tmp = np.dot(Ug.T.conjugate(), phi_sig) + Uc, Sc, _ = linalg.svd(tmp, full_matrices=False) + X = np.dot(Vg.T / Sg[None, :], Uc[:, 0]) # subcorr + return Sc[0], X / np.linalg.norm(X) + + +def _compute_proj(A): + """Compute the orthogonal projection operation for a manifold vector A.""" + from scipy import linalg + U, _, _ = linalg.svd(A, full_matrices=False) + return np.identity(A.shape[0]) - np.dot(U, U.T.conjugate()) + + +@verbose +def rap_music(evoked, forward, noise_cov, n_dipoles=5, return_residual=False, + verbose=None): + """RAP-MUSIC source localization method. + + Compute Recursively Applied and Projected MUltiple SIgnal Classification + (RAP-MUSIC) on evoked data. + + .. note:: The goodness of fit (GOF) of all the returned dipoles is the + same and corresponds to the GOF of the full set of dipoles. + + Parameters + ---------- + evoked : instance of Evoked + Evoked data to localize. + forward : instance of Forward + Forward operator. + noise_cov : instance of Covariance + The noise covariance. + n_dipoles : int + The number of dipoles to look for. The default value is 5. + return_residual : bool + If True, the residual is returned as an Evoked instance. + %(verbose)s + + Returns + ------- + dipoles : list of instance of Dipole + The dipole fits. + residual : instance of Evoked + The residual a.k.a. data not explained by the dipoles. + Only returned if return_residual is True. + + See Also + -------- + mne.fit_dipole + + Notes + ----- + The references are: + + J.C. Mosher and R.M. Leahy. 1999. Source localization using recursively + applied and projected (RAP) MUSIC. Signal Processing, IEEE Trans. 47, 2 + (February 1999), 332-340. + DOI=10.1109/78.740118 https://doi.org/10.1109/78.740118 + + Mosher, J.C.; Leahy, R.M., EEG and MEG source localization using + recursively applied (RAP) MUSIC, Signals, Systems and Computers, 1996. + pp.1201,1207 vol.2, 3-6 Nov. 1996 + doi: 10.1109/ACSSC.1996.599135 + + .. versionadded:: 0.9.0 + """ + info = evoked.info + data = evoked.data + times = evoked.times + + picks = _check_info_inv(info, forward, data_cov=None, noise_cov=noise_cov) + + data = data[picks] + + dipoles, explained_data = _apply_rap_music(data, info, times, forward, + noise_cov, n_dipoles, + picks) + + if return_residual: + residual = evoked.copy() + selection = [info['ch_names'][p] for p in picks] + + residual = pick_channels_evoked(residual, + include=selection) + residual.data -= explained_data + active_projs = [p for p in residual.info['projs'] if p['active']] + for p in active_projs: + p['active'] = False + residual.add_proj(active_projs, remove_existing=True) + residual.apply_proj() + return dipoles, residual + else: + return dipoles diff --git a/python/libs/mne/beamformer/resolution_matrix.py b/python/libs/mne/beamformer/resolution_matrix.py new file mode 100644 index 0000000..b0e7c45 --- /dev/null +++ b/python/libs/mne/beamformer/resolution_matrix.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +"""Compute resolution matrix for beamformers.""" +# Authors: olaf.hauk@mrc-cbu.cam.ac.uk +# +# License: BSD-3-Clause +import numpy as np + +from ..io.pick import pick_channels, pick_info, pick_channels_forward +from ..evoked import EvokedArray +from ..utils import logger, fill_doc +from ._lcmv import apply_lcmv + + +@fill_doc +def make_lcmv_resolution_matrix(filters, forward, info): + """Compute resolution matrix for LCMV beamformer. + + Parameters + ---------- + filters : instance of Beamformer + Dictionary containing filter weights from LCMV beamformer + (see mne.beamformer.make_lcmv). + forward : instance of Forward + Forward Solution with leadfield matrix. + %(info_not_none)s Used to compute LCMV filters. + + Returns + ------- + resmat : array, shape (n_dipoles_lcmv, n_dipoles_fwd) + Resolution matrix (filter matrix multiplied to leadfield from + forward solution). Numbers of rows (n_dipoles_lcmv) and columns + (n_dipoles_fwd) may differ by a factor depending on orientation + constraints of filter and forward solution, respectively (e.g. factor 3 + for free dipole orientation versus factor 1 for scalar beamformers). + """ + # don't include bad channels from noise covariance matrix + bads_filt = filters['noise_cov']['bads'] + ch_names = filters['noise_cov']['names'] + + # good channels + ch_names = [c for c in ch_names if (c not in bads_filt)] + + # adjust channels in forward solution + forward = pick_channels_forward(forward, ch_names, ordered=True) + + # get leadfield matrix from forward solution + leadfield = forward['sol']['data'] + + # get the filter weights for beamformer as matrix + filtmat = _get_matrix_from_lcmv(filters, forward, info) + + # compute resolution matrix + resmat = filtmat.dot(leadfield) + + shape = resmat.shape + + logger.info('Dimensions of LCMV resolution matrix: %d by %d.' % shape) + + return resmat + + +def _get_matrix_from_lcmv(filters, forward, info, verbose=None): + """Get inverse matrix for LCMV beamformer. + + Returns + ------- + invmat : array, shape (n_dipoles, n_channels) + Inverse matrix associated with LCMV beamformer filters. + """ + # number of channels for identity matrix + info = pick_info( + info, pick_channels(info['ch_names'], filters['ch_names'])) + n_chs = len(info['ch_names']) + + # create identity matrix as input for inverse operator + # set elements to zero for non-selected channels + id_mat = np.eye(n_chs) + + # convert identity matrix to evoked data type (pretending it's an epochs + evo_ident = EvokedArray(id_mat, info=info, tmin=0.) + + # apply beamformer to identity matrix + stc_lcmv = apply_lcmv(evo_ident, filters, verbose=verbose) + + # turn source estimate into numpsy array + invmat = stc_lcmv.data + + return invmat diff --git a/python/libs/mne/beamformer/tests/__init__.py b/python/libs/mne/beamformer/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/beamformer/tests/test_dics.py b/python/libs/mne/beamformer/tests/test_dics.py new file mode 100644 index 0000000..fa29ff7 --- /dev/null +++ b/python/libs/mne/beamformer/tests/test_dics.py @@ -0,0 +1,722 @@ +# Authors: Marijn van Vliet +# Britta Westner +# +# License: BSD-3-Clause + +import copy as cp +import os.path as op + +import pytest +from numpy.testing import (assert_array_equal, assert_allclose, + assert_array_less) +import numpy as np + +import mne +from mne.beamformer import (make_dics, apply_dics, apply_dics_epochs, + apply_dics_csd, read_beamformer, Beamformer) +from mne.beamformer._compute_beamformer import _prepare_beamformer_input +from mne.beamformer._dics import _prepare_noise_csd +from mne.beamformer.tests.test_lcmv import _assert_weight_norm +from mne.datasets import testing +from mne.io.constants import FIFF +from mne.proj import compute_proj_evoked, make_projector +from mne.surface import _compute_nearest +from mne.time_frequency import CrossSpectralDensity, csd_morlet +from mne.time_frequency.csd import _sym_mat_to_vector +from mne.transforms import invert_transform, apply_trans +from mne.utils import object_diff, requires_version, catch_logging + +data_path = testing.data_path(download=False) +fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') +fname_fwd = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') +fname_fwd_vol = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-vol-7-fwd.fif') +fname_event = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc_raw-eve.fif') + +subjects_dir = op.join(data_path, 'subjects') + + +@pytest.fixture(scope='module', params=[testing._pytest_param()]) +def _load_forward(): + """Load forward models.""" + fwd_free = mne.read_forward_solution(fname_fwd) + fwd_free = mne.pick_types_forward(fwd_free, meg=True, eeg=False) + fwd_free = mne.convert_forward_solution(fwd_free, surf_ori=False) + fwd_surf = mne.convert_forward_solution(fwd_free, surf_ori=True, + use_cps=False) + fwd_fixed = mne.convert_forward_solution(fwd_free, force_fixed=True, + use_cps=False) + fwd_vol = mne.read_forward_solution(fname_fwd_vol) + return fwd_free, fwd_surf, fwd_fixed, fwd_vol + + +def _simulate_data(fwd, idx): # Somewhere on the frontal lobe by default + """Simulate an oscillator on the cortex.""" + source_vertno = fwd['src'][0]['vertno'][idx] + + sfreq = 50. # Hz. + times = np.arange(10 * sfreq) / sfreq # 10 seconds of data + signal = np.sin(20 * 2 * np.pi * times) # 20 Hz oscillator + signal[:len(times) // 2] *= 2 # Make signal louder at the beginning + signal *= 1e-9 # Scale to be in the ballpark of MEG data + + # Construct a SourceEstimate object that describes the signal at the + # cortical level. + stc = mne.SourceEstimate( + signal[np.newaxis, :], + vertices=[[source_vertno], []], + tmin=0, + tstep=1 / sfreq, + subject='sample', + ) + + # Create an info object that holds information about the sensors + info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad') + with info._unlock(): + info.update(fwd['info']) # Merge in sensor position information + # heavily decimate sensors to make it much faster + info = mne.pick_info(info, np.arange(info['nchan'])[::5]) + fwd = mne.pick_channels_forward(fwd, info['ch_names']) + + # Run the simulated signal through the forward model, obtaining + # simulated sensor data. + raw = mne.apply_forward_raw(fwd, stc, info) + + # Add a little noise + random = np.random.RandomState(42) + noise = random.randn(*raw._data.shape) * 1e-14 + raw._data += noise + + # Define a single epoch (weird baseline but shouldn't matter) + epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0, + tmax=raw.times[-1], baseline=(0., 0.), preload=True) + evoked = epochs.average() + + # Compute the cross-spectral density matrix + csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=5) + + labels = mne.read_labels_from_annot( + 'sample', hemi='lh', subjects_dir=subjects_dir) + label = [ + label for label in labels if np.in1d(source_vertno, label.vertices)[0]] + assert len(label) == 1 + label = label[0] + vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno']) + source_ind = vertices.tolist().index(source_vertno) + assert vertices[source_ind] == source_vertno + return epochs, evoked, csd, source_vertno, label, vertices, source_ind + + +idx_param = pytest.mark.parametrize('idx', [ + 0, + pytest.param(100, marks=pytest.mark.slowtest), + 200, + pytest.param(233, marks=pytest.mark.slowtest), +]) + + +def _rand_csd(rng, info): + scales = mne.make_ad_hoc_cov(info).data + n = scales.size + # Some random complex correlation structure (with channel scalings) + data = rng.randn(n, n) + 1j * rng.randn(n, n) + data = data @ data.conj().T + data *= scales + data *= scales[:, np.newaxis] + data.flat[::n + 1] = scales + return data + + +def _make_rand_csd(info, csd): + rng = np.random.RandomState(0) + data = _rand_csd(rng, info) + # now we need to have the same null space as the data csd + s, u = np.linalg.eigh(csd.get_data(csd.frequencies[0])) + mask = np.abs(s) >= s[-1] * 1e-7 + rank = mask.sum() + assert rank == len(data) == len(info['ch_names']) + noise_csd = CrossSpectralDensity( + _sym_mat_to_vector(data), info['ch_names'], 0., csd.n_fft) + return noise_csd, rank + + +@pytest.mark.slowtest +@testing.requires_testing_data +@requires_version('h5io') +@idx_param +@pytest.mark.parametrize('whiten', [ + pytest.param(False, marks=pytest.mark.slowtest), + True, +]) +def test_make_dics(tmp_path, _load_forward, idx, whiten): + """Test making DICS beamformer filters.""" + # We only test proper handling of parameters here. Testing the results is + # done in test_apply_dics_timeseries and test_apply_dics_csd. + + fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward + epochs, _, csd, _, label, vertices, source_ind = \ + _simulate_data(fwd_fixed, idx) + with pytest.raises(ValueError, match='several sensor types'): + make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None) + if whiten: + noise_csd, rank = _make_rand_csd(epochs.info, csd) + assert rank == len(epochs.info['ch_names']) == 62 + else: + noise_csd = None + epochs.pick_types(meg='grad') + + with pytest.raises(ValueError, match="Invalid value for the 'pick_ori'"): + make_dics(epochs.info, fwd_fixed, csd, pick_ori="notexistent", + noise_csd=noise_csd) + with pytest.raises(ValueError, match='rank, if str'): + make_dics(epochs.info, fwd_fixed, csd, rank='foo', noise_csd=noise_csd) + with pytest.raises(TypeError, match='rank must be'): + make_dics(epochs.info, fwd_fixed, csd, rank=1., noise_csd=noise_csd) + + # Test if fixed forward operator is detected when picking normal + # orientation + with pytest.raises(ValueError, match='forward operator with free ori'): + make_dics(epochs.info, fwd_fixed, csd, pick_ori="normal", + noise_csd=noise_csd) + + # Test if non-surface oriented forward operator is detected when picking + # normal orientation + with pytest.raises(ValueError, match='oriented in surface coordinates'): + make_dics(epochs.info, fwd_free, csd, pick_ori="normal", + noise_csd=noise_csd) + + # Test if volume forward operator is detected when picking normal + # orientation + with pytest.raises(ValueError, match='oriented in surface coordinates'): + make_dics(epochs.info, fwd_vol, csd, pick_ori="normal", + noise_csd=noise_csd) + + # Test invalid combinations of parameters + with pytest.raises(ValueError, match='reduce_rank cannot be used with'): + make_dics(epochs.info, fwd_free, csd, inversion='single', + reduce_rank=True, noise_csd=noise_csd) + # TODO: Restore this? + # with pytest.raises(ValueError, match='not stable with depth'): + # make_dics(epochs.info, fwd_free, csd, weight_norm='unit-noise-gain', + # inversion='single', depth=None) + + # Sanity checks on the returned filters + n_freq = len(csd.frequencies) + vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno']) + n_verts = len(vertices) + n_orient = 3 + + n_channels = len(epochs.ch_names) + # Test return values + weight_norm = 'unit-noise-gain' + inversion = 'single' + filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, + weight_norm=weight_norm, depth=None, real_filter=False, + noise_csd=noise_csd, inversion=inversion) + assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels) + assert np.iscomplexobj(filters['weights']) + assert filters['csd'].ch_names == epochs.ch_names + assert isinstance(filters['csd'], CrossSpectralDensity) + assert filters['ch_names'] == epochs.ch_names + assert_array_equal(filters['proj'], np.eye(n_channels)) + assert_array_equal(filters['vertices'][0], vertices) + assert_array_equal(filters['vertices'][1], []) # Label was on the LH + assert filters['subject'] == fwd_free['src']._subject + assert filters['pick_ori'] is None + assert filters['is_free_ori'] + assert filters['inversion'] == inversion + assert filters['weight_norm'] == weight_norm + assert 'DICS' in repr(filters) + assert 'subject "sample"' in repr(filters) + assert str(len(vertices)) in repr(filters) + assert str(n_channels) in repr(filters) + assert 'rank' not in repr(filters) + _, noise_cov = _prepare_noise_csd(csd, noise_csd, real_filter=False) + _, _, _, _, G, _, _, _ = _prepare_beamformer_input( + epochs.info, fwd_surf, label, 'vector', combine_xyz=False, exp=None, + noise_cov=noise_cov) + G.shape = (n_channels, n_verts, n_orient) + G = G.transpose(1, 2, 0).conj() # verts, orient, ch + _assert_weight_norm(filters, G) + + inversion = 'matrix' + filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, + weight_norm=weight_norm, depth=None, + noise_csd=noise_csd, inversion=inversion) + _assert_weight_norm(filters, G) + + weight_norm = 'unit-noise-gain-invariant' + inversion = 'single' + filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, + weight_norm=weight_norm, depth=None, + noise_csd=noise_csd, inversion=inversion) + _assert_weight_norm(filters, G) + + # Test picking orientations. Also test weight norming under these different + # conditions. + weight_norm = 'unit-noise-gain' + filters = make_dics(epochs.info, fwd_surf, csd, label=label, + pick_ori='normal', weight_norm=weight_norm, + depth=None, noise_csd=noise_csd, inversion=inversion) + n_orient = 1 + assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels) + assert not filters['is_free_ori'] + _assert_weight_norm(filters, G) + + filters = make_dics(epochs.info, fwd_surf, csd, label=label, + pick_ori='max-power', weight_norm=weight_norm, + depth=None, noise_csd=noise_csd, inversion=inversion) + n_orient = 1 + assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels) + assert not filters['is_free_ori'] + _assert_weight_norm(filters, G) + + # From here on, only work on a single frequency + csd = csd[0] + + # Test using a real-valued filter + filters = make_dics(epochs.info, fwd_surf, csd, label=label, + pick_ori='normal', real_filter=True, + noise_csd=noise_csd) + assert not np.iscomplexobj(filters['weights']) + + # Test forward normalization. When inversion='single', the power of a + # unit-noise CSD should be 1, even without weight normalization. + if not whiten: + csd_noise = csd.copy() + inds = np.triu_indices(csd.n_channels) + # Using [:, :] syntax for in-place broadcasting + csd_noise._data[:, :] = np.eye(csd.n_channels)[inds][:, np.newaxis] + filters = make_dics(epochs.info, fwd_surf, csd_noise, label=label, + weight_norm=None, depth=1., noise_csd=noise_csd, + inversion='single') + w = filters['weights'][0][:3] + assert_allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=1e-6, + atol=0) + + # Test turning off both forward and weight normalization + filters = make_dics(epochs.info, fwd_surf, csd, label=label, + weight_norm=None, depth=None, noise_csd=noise_csd) + w = filters['weights'][0][:3] + assert not np.allclose(np.diag(w.dot(w.conjugate().T)), 1.0, + rtol=1e-2, atol=0) + + # Test neural-activity-index weight normalization. It should be a scaled + # version of the unit-noise-gain beamformer. + filters_nai = make_dics( + epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', + weight_norm='nai', depth=None, noise_csd=noise_csd) + w_nai = filters_nai['weights'][0] + filters_ung = make_dics( + epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', + weight_norm='unit-noise-gain', depth=None, noise_csd=noise_csd) + w_ung = filters_ung['weights'][0] + assert_allclose(np.corrcoef(np.abs(w_nai).ravel(), + np.abs(w_ung).ravel()), 1, atol=1e-7) + + # Test whether spatial filter contains src_type + assert 'src_type' in filters + + fname = op.join(str(tmp_path), 'filters-dics.h5') + filters.save(fname) + filters_read = read_beamformer(fname) + assert isinstance(filters, Beamformer) + assert isinstance(filters_read, Beamformer) + for key in ['tmin', 'tmax']: # deal with strictness of object_diff + setattr(filters['csd'], key, np.float64(getattr(filters['csd'], key))) + assert object_diff(filters, filters_read) == '' + + +def _fwd_dist(power, fwd, vertices, source_ind, tidx=1): + idx = np.argmax(power.data[:, tidx]) + rr_got = fwd['src'][0]['rr'][vertices[idx]] + rr_want = fwd['src'][0]['rr'][vertices[source_ind]] + return np.linalg.norm(rr_got - rr_want) + + +@idx_param +@pytest.mark.parametrize('inversion, weight_norm', [ + ('single', None), + ('matrix', 'unit-noise-gain'), +]) +def test_apply_dics_csd(_load_forward, idx, inversion, weight_norm): + """Test applying a DICS beamformer to a CSD matrix.""" + fwd_free, fwd_surf, fwd_fixed, _ = _load_forward + epochs, _, csd, source_vertno, label, vertices, source_ind = \ + _simulate_data(fwd_fixed, idx) + reg = 1 # Lots of regularization for our toy dataset + + with pytest.raises(ValueError, match='several sensor types'): + make_dics(epochs.info, fwd_free, csd) + epochs.pick_types(meg='grad') + + # Try different types of forward models + assert label.hemi == 'lh' + for fwd in [fwd_free, fwd_surf, fwd_fixed]: + filters = make_dics(epochs.info, fwd, csd, label=label, reg=reg, + inversion=inversion, weight_norm=weight_norm) + power, f = apply_dics_csd(csd, filters) + assert f == [10, 20] + + # Did we find the true source at 20 Hz? + dist = _fwd_dist(power, fwd_free, vertices, source_ind) + assert dist == 0. + + # Is the signal stronger at 20 Hz than 10? + assert power.data[source_ind, 1] > power.data[source_ind, 0] + + +@pytest.mark.parametrize('pick_ori', [None, 'normal', 'max-power']) +@pytest.mark.parametrize('inversion', ['single', 'matrix']) +@idx_param +def test_apply_dics_ori_inv(_load_forward, pick_ori, inversion, idx): + """Test picking different orientations and inversion modes.""" + fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward + epochs, _, csd, source_vertno, label, vertices, source_ind = \ + _simulate_data(fwd_fixed, idx) + epochs.pick_types(meg='grad') + + reg_ = 5 if inversion == 'matrix' else 1 + filters = make_dics(epochs.info, fwd_surf, csd, label=label, + reg=reg_, pick_ori=pick_ori, + inversion=inversion, depth=None, + weight_norm='unit-noise-gain') + power, f = apply_dics_csd(csd, filters) + assert f == [10, 20] + dist = _fwd_dist(power, fwd_surf, vertices, source_ind) + # This is 0. for unit-noise-gain-invariant: + assert dist <= (0.02 if inversion == 'matrix' else 0.) + assert power.data[source_ind, 1] > power.data[source_ind, 0] + + # Test unit-noise-gain weighting + csd_noise = csd.copy() + inds = np.triu_indices(csd.n_channels) + csd_noise._data[...] = np.eye(csd.n_channels)[inds][:, np.newaxis] + noise_power, f = apply_dics_csd(csd_noise, filters) + want_norm = 3 if pick_ori is None else 1. + assert_allclose(noise_power.data, want_norm, atol=1e-7) + + # Test filter with forward normalization instead of weight + # normalization + filters = make_dics(epochs.info, fwd_surf, csd, label=label, + reg=reg_, pick_ori=pick_ori, + inversion=inversion, weight_norm=None, + depth=1.) + power, f = apply_dics_csd(csd, filters) + assert f == [10, 20] + dist = _fwd_dist(power, fwd_surf, vertices, source_ind) + mat_tol = {0: 0.055, 100: 0.20, 200: 0.015, 233: 0.035}[idx] + max_ = (mat_tol if inversion == 'matrix' else 0.) + assert 0 <= dist <= max_ + assert power.data[source_ind, 1] > power.data[source_ind, 0] + + +def _nearest_vol_ind(fwd_vol, fwd, vertices, source_ind): + return _compute_nearest( + fwd_vol['source_rr'], + fwd['src'][0]['rr'][vertices][source_ind][np.newaxis])[0] + + +@idx_param +def test_real(_load_forward, idx): + """Test using a real-valued filter.""" + fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward + epochs, _, csd, source_vertno, label, vertices, source_ind = \ + _simulate_data(fwd_fixed, idx) + epochs.pick_types(meg='grad') + reg = 1 # Lots of regularization for our toy dataset + filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg, + real_filter=True, inversion='single') + # Also test here that no warings are thrown - implemented to check whether + # src should not be None warning occurs: + power, f = apply_dics_csd(csd, filters_real) + + assert f == [10, 20] + dist = _fwd_dist(power, fwd_surf, vertices, source_ind) + assert dist == 0 + assert power.data[source_ind, 1] > power.data[source_ind, 0] + + # Test rank reduction + filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=5, + pick_ori='max-power', inversion='matrix', + reduce_rank=True) + power, f = apply_dics_csd(csd, filters_real) + assert f == [10, 20] + dist = _fwd_dist(power, fwd_surf, vertices, source_ind) + assert dist == 0 + assert power.data[source_ind, 1] > power.data[source_ind, 0] + + # Test computing source power on a volume source space + filters_vol = make_dics(epochs.info, fwd_vol, csd, reg=reg, + inversion='single') + power, f = apply_dics_csd(csd, filters_vol) + vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind) + assert f == [10, 20] + dist = _fwd_dist( + power, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind) + vol_tols = {100: 0.008, 200: 0.008} + assert dist <= vol_tols.get(idx, 0.) + assert power.data[vol_source_ind, 1] > power.data[vol_source_ind, 0] + + # check whether a filters object without src_type throws expected warning + del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning + with pytest.warns(RuntimeWarning, match='spatial filter does not contain ' + 'src_type'): + apply_dics_csd(csd, filters_vol) + + +@pytest.mark.filterwarnings("ignore:The use of several sensor types with the" + ":RuntimeWarning") +@idx_param +def test_apply_dics_timeseries(_load_forward, idx): + """Test DICS applied to timeseries data.""" + fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward + epochs, evoked, csd, source_vertno, label, vertices, source_ind = \ + _simulate_data(fwd_fixed, idx) + reg = 5 # Lots of regularization for our toy dataset + + with pytest.raises(ValueError, match='several sensor types'): + make_dics(evoked.info, fwd_surf, csd) + evoked.pick_types(meg='grad') + + multiple_filters = make_dics(evoked.info, fwd_surf, csd, label=label, + reg=reg) + + # Sanity checks on the resulting STC after applying DICS on evoked + stcs = apply_dics(evoked, multiple_filters) + assert isinstance(stcs, list) + assert len(stcs) == len(multiple_filters['weights']) + assert_array_equal(stcs[0].vertices[0], multiple_filters['vertices'][0]) + assert_array_equal(stcs[0].vertices[1], multiple_filters['vertices'][1]) + assert_allclose(stcs[0].times, evoked.times) + + # Applying filters for multiple frequencies on epoch data should fail + with pytest.raises(ValueError, match='computed for a single frequency'): + apply_dics_epochs(epochs, multiple_filters) + + # From now on, only apply filters with a single frequency (20 Hz). + csd20 = csd.pick_frequency(20) + filters = make_dics(evoked.info, fwd_surf, csd20, label=label, reg=reg, + inversion='single') + + # Sanity checks on the resulting STC after applying DICS on epochs. + # Also test here that no warnings are thrown - implemented to check whether + # src should not be None warning occurs + stcs = apply_dics_epochs(epochs, filters) + + assert isinstance(stcs, list) + assert len(stcs) == 1 + assert_array_equal(stcs[0].vertices[0], filters['vertices'][0]) + assert_array_equal(stcs[0].vertices[1], filters['vertices'][1]) + assert_allclose(stcs[0].times, epochs.times) + + # Did we find the source? + stc = (stcs[0] ** 2).mean() + dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0) + assert dist == 0 + + # Apply filters to evoked + stc = apply_dics(evoked, filters) + stc = (stc ** 2).mean() + dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0) + assert dist == 0 + + # Test if wrong channel selection is detected in application of filter + evoked_ch = cp.deepcopy(evoked) + evoked_ch.pick_channels(evoked_ch.ch_names[:-1]) + with pytest.raises(ValueError, match='MEG 2633 which is not present'): + apply_dics(evoked_ch, filters) + + # Test whether projections are applied, by adding a custom projection + filters_noproj = make_dics(evoked.info, fwd_surf, csd20, label=label) + stc_noproj = apply_dics(evoked, filters_noproj) + evoked_proj = evoked.copy() + p = compute_proj_evoked(evoked_proj, n_grad=1, n_mag=0, n_eeg=0) + proj_matrix = make_projector(p, evoked_proj.ch_names)[0] + evoked_proj.add_proj(p) + filters_proj = make_dics(evoked_proj.info, fwd_surf, csd20, label=label) + assert_array_equal(filters_proj['proj'], proj_matrix) + stc_proj = apply_dics(evoked_proj, filters_proj) + assert np.any(np.not_equal(stc_noproj.data, stc_proj.data)) + + # Test detecting incompatible projections + filters_proj['proj'] = filters_proj['proj'][:-1, :-1] + with pytest.raises(ValueError, match='operands could not be broadcast'): + apply_dics(evoked_proj, filters_proj) + + # Test returning a generator + stcs = apply_dics_epochs(epochs, filters, return_generator=False) + stcs_gen = apply_dics_epochs(epochs, filters, return_generator=True) + assert_array_equal(stcs[0].data, next(stcs_gen).data) + + # Test computing timecourses on a volume source space + filters_vol = make_dics(evoked.info, fwd_vol, csd20, reg=reg, + inversion='single') + stc = apply_dics(evoked, filters_vol) + stc = (stc ** 2).mean() + assert stc.data.shape[1] == 1 + vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind) + dist = _fwd_dist(stc, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind, + tidx=0) + vol_tols = {100: 0.008, 200: 0.015} + vol_tol = vol_tols.get(idx, 0.) + assert dist <= vol_tol + + # check whether a filters object without src_type throws expected warning + del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning + with pytest.warns(RuntimeWarning, match='filter does not contain src_typ'): + apply_dics_epochs(epochs, filters_vol) + + +def _cov_as_csd(cov, info): + rng = np.random.RandomState(0) + assert cov['data'].ndim == 2 + assert len(cov['data']) == len(cov['names']) + # we need to make this have at least some complex structure + data = cov['data'] + 1e-1 * _rand_csd(rng, info) + assert data.dtype == np.complex128 + return CrossSpectralDensity(_sym_mat_to_vector(data), cov['names'], 0., 16) + + +# Just test free ori here (assume fixed is same as LCMV if these are) +# Changes here should be synced with test_lcmv.py +@pytest.mark.slowtest +@pytest.mark.parametrize( + 'reg, pick_ori, weight_norm, use_cov, depth, lower, upper, real_filter', [ + (0.05, None, 'unit-noise-gain-invariant', False, None, 26, 28, False), + (0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, False), + (0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, True), + (0.05, None, 'unit-noise-gain', False, None, 13, 14, False), + (0.05, None, 'unit-noise-gain', True, None, 35, 37, False), + (0.05, None, 'nai', True, None, 35, 37, False), + (0.05, None, None, True, None, 12, 14, False), + (0.05, None, None, True, 0.8, 39, 43, False), + (0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20, + False), + (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, False), + (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, True), + (0.05, 'max-power', 'nai', True, None, 21, 24, False), + (0.05, 'max-power', None, True, None, 7, 10, False), + (0.05, 'max-power', None, True, 0.8, 15, 18, False), + # skip most no-reg tests, assume others are equal to LCMV if these are + (0.00, None, None, True, None, 21, 32, False), + (0.00, 'max-power', None, True, None, 13, 19, False), + ]) +def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm, + use_cov, depth, lower, upper, real_filter): + """Test localization bias for free-orientation DICS.""" + evoked, fwd, noise_cov, data_cov, want = bias_params_free + noise_csd = _cov_as_csd(noise_cov, evoked.info) + data_csd = _cov_as_csd(data_cov, evoked.info) + del noise_cov, data_cov + if not use_cov: + evoked.pick_types(meg='grad') + noise_csd = None + filters = make_dics( + evoked.info, fwd, data_csd, reg, noise_csd, pick_ori=pick_ori, + weight_norm=weight_norm, depth=depth, real_filter=real_filter) + loc = apply_dics(evoked, filters).data + loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc) + # Compute the percentage of sources for which there is no loc bias: + perc = (want == np.argmax(loc, axis=0)).mean() * 100 + assert lower <= perc <= upper + + +@pytest.mark.parametrize( + 'weight_norm, lower, upper, lower_ori, upper_ori, real_filter', [ + ('unit-noise-gain-invariant', 57, 58, 0.60, 0.61, False), + ('unit-noise-gain', 57, 58, 0.60, 0.61, False), + ('unit-noise-gain', 57, 58, 0.60, 0.61, True), + (None, 27, 28, 0.56, 0.57, False), + ]) +def test_orientation_max_power(bias_params_fixed, bias_params_free, + weight_norm, lower, upper, lower_ori, upper_ori, + real_filter): + """Test orientation selection for bias for max-power DICS.""" + # we simulate data for the fixed orientation forward and beamform using + # the free orientation forward, and check the orientation match at the end + evoked, _, noise_cov, data_cov, want = bias_params_fixed + noise_csd = _cov_as_csd(noise_cov, evoked.info) + data_csd = _cov_as_csd(data_cov, evoked.info) + del data_cov, noise_cov + fwd = bias_params_free[1] + filters = make_dics(evoked.info, fwd, data_csd, 0.05, noise_csd, + pick_ori='max-power', weight_norm=weight_norm, + depth=None, real_filter=real_filter) + loc = np.abs(apply_dics(evoked, filters).data) + ori = filters['max_power_ori'][0] + assert ori.shape == (246, 3) + loc = np.abs(loc) + # Compute the percentage of sources for which there is no loc bias: + max_idx = np.argmax(loc, axis=0) + mask = want == max_idx # ones that localized properly + perc = mask.mean() * 100 + assert lower <= perc <= upper + # Compute the dot products of our forward normals and + # assert we get some hopefully reasonable agreement + assert fwd['coord_frame'] == FIFF.FIFFV_COORD_HEAD + nn = np.concatenate( + [s['nn'][v] for s, v in zip(fwd['src'], filters['vertices'])]) + nn = nn[want] + nn = apply_trans(invert_transform(fwd['mri_head_t']), nn, move=False) + assert_allclose(np.linalg.norm(nn, axis=1), 1, atol=1e-6) + assert_allclose(np.linalg.norm(ori, axis=1), 1, atol=1e-12) + dots = np.abs((nn[mask] * ori[mask]).sum(-1)) + assert_array_less(dots, 1) + assert_array_less(0, dots) + got = np.mean(dots) + assert lower_ori < got < upper_ori + + +@testing.requires_testing_data +@idx_param +@pytest.mark.parametrize('whiten', (False, True)) +def test_make_dics_rank(_load_forward, idx, whiten): + """Test making DICS beamformer filters with rank param.""" + _, fwd_surf, fwd_fixed, _ = _load_forward + epochs, _, csd, _, label, _, _ = _simulate_data(fwd_fixed, idx) + if whiten: + noise_csd, want_rank = _make_rand_csd(epochs.info, csd) + kind = 'mag + grad' + else: + noise_csd = None + epochs.pick_types(meg='grad') + want_rank = len(epochs.ch_names) + assert want_rank == 41 + kind = 'grad' + + with catch_logging() as log: + filters = make_dics( + epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, + verbose=True) + log = log.getvalue() + assert f'Estimated rank ({kind}): {want_rank}' in log, log + stc, _ = apply_dics_csd(csd, filters) + other_rank = want_rank - 1 # shouldn't make a huge difference + use_rank = dict(meg=other_rank) + if not whiten: + # XXX it's a bug that our rank functions don't treat "meg" + # properly here... + use_rank['grad'] = use_rank.pop('meg') + with catch_logging() as log: + filters_2 = make_dics( + epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, + rank=use_rank, verbose=True) + log = log.getvalue() + assert f'Computing rank from covariance with rank={use_rank}' in log, log + stc_2, _ = apply_dics_csd(csd, filters_2) + corr = np.corrcoef(stc_2.data.ravel(), stc.data.ravel())[0, 1] + assert 0.8 < corr < 0.999999 + + # degenerate conditions + if whiten: + # make rank deficient + data = noise_csd.get_data(0.) + data[0] = data[:0] = 0 + noise_csd._data[:, 0] = _sym_mat_to_vector(data) + with pytest.raises(ValueError, match='meg data rank.*the noise rank'): + filters = make_dics( + epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, + verbose=True) diff --git a/python/libs/mne/beamformer/tests/test_external.py b/python/libs/mne/beamformer/tests/test_external.py new file mode 100644 index 0000000..85c85e8 --- /dev/null +++ b/python/libs/mne/beamformer/tests/test_external.py @@ -0,0 +1,108 @@ +# Authors: Britta Westner +# +# License: BSD-3-Clause + +import os.path as op + +import pytest +import numpy as np +from numpy.testing import assert_array_equal, assert_allclose +from scipy.io import savemat + +import mne +from mne.beamformer import make_lcmv, apply_lcmv, apply_lcmv_cov +from mne.beamformer.tests.test_lcmv import _get_data +from mne.datasets import testing +from mne.utils import requires_version + +data_path = testing.data_path(download=False) +ft_data_path = op.join(data_path, 'fieldtrip', 'beamformer') +fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') +fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif') +fname_fwd = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') +fname_fwd_vol = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-vol-7-fwd.fif') +fname_event = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc_raw-eve.fif') +fname_label = op.join(data_path, 'MEG', 'sample', 'labels', 'Aud-lh.label') + +reject = dict(grad=4000e-13, mag=4e-12) + + +@pytest.fixture(scope='function', params=[testing._pytest_param()]) +def _get_bf_data(save_fieldtrip=False): + raw, epochs, evoked, data_cov, _, _, _, _, _, fwd = _get_data(proj=False) + + if save_fieldtrip is True: + # raw needs to be saved with all channels and picked in FieldTrip + raw.save(op.join(ft_data_path, 'raw.fif'), overwrite=True) + + # src (tris are not available in fwd['src'] once imported into MATLAB) + src = fwd['src'].copy() + mne.write_source_spaces(op.join(ft_data_path, 'src.fif'), src, + verbose='error', overwrite=True) + + # pick gradiometers only: + epochs.pick_types(meg='grad') + evoked.pick_types(meg='grad') + + # compute covariance matrix (ignore false alarm about no baseline) + data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145, + method='empirical', verbose='error') + + if save_fieldtrip is True: + # if the covariance matrix and epochs need resaving: + # data covariance: + cov_savepath = op.join(ft_data_path, 'sample_cov.mat') + sample_cov = {'sample_cov': data_cov['data']} + savemat(cov_savepath, sample_cov) + # evoked data: + ev_savepath = op.join(ft_data_path, 'sample_evoked.mat') + data_ev = {'sample_evoked': evoked.data} + savemat(ev_savepath, data_ev) + + return evoked, data_cov, fwd + + +# beamformer types to be tested: unit-gain (vector and scalar) and +# unit-noise-gain (time series and power output [apply_lcmv_cov]) +@requires_version('pymatreader') +@pytest.mark.parametrize('bf_type, weight_norm, pick_ori, pwr', [ + ['ug_scal', None, 'max-power', False], + ['ung', 'unit-noise-gain', 'max-power', False], + ['ung_pow', 'unit-noise-gain', 'max-power', True], + ['ug_vec', None, 'vector', False], + ['ung_vec', 'unit-noise-gain', 'vector', False], +]) +def test_lcmv_fieldtrip(_get_bf_data, bf_type, weight_norm, pick_ori, pwr): + """Test LCMV vs fieldtrip output.""" + from pymatreader import read_mat + evoked, data_cov, fwd = _get_bf_data + + # run the MNE-Python beamformer + filters = make_lcmv(evoked.info, fwd, data_cov=data_cov, + noise_cov=None, pick_ori=pick_ori, reg=0.05, + weight_norm=weight_norm) + if pwr: + stc_mne = apply_lcmv_cov(data_cov, filters) + else: + stc_mne = apply_lcmv(evoked, filters) + + # load the FieldTrip output + ft_fname = op.join(ft_data_path, 'ft_source_' + bf_type + '-vol.mat') + stc_ft_data = read_mat(ft_fname)['stc'] + if stc_ft_data.ndim == 1: + stc_ft_data.shape = (stc_ft_data.size, 1) + + if stc_mne.data.ndim == 2: + signs = np.sign((stc_mne.data * stc_ft_data).sum(-1, keepdims=True)) + if pwr: + assert_array_equal(signs, 1.) + stc_mne.data *= signs + assert stc_ft_data.shape == stc_mne.data.shape + if pick_ori == 'vector': + # compare norms first + assert_allclose(np.linalg.norm(stc_mne.data, axis=1), + np.linalg.norm(stc_ft_data, axis=1), rtol=1e-6) + assert_allclose(stc_mne.data, stc_ft_data, rtol=1e-6) diff --git a/python/libs/mne/beamformer/tests/test_lcmv.py b/python/libs/mne/beamformer/tests/test_lcmv.py new file mode 100644 index 0000000..bd413e8 --- /dev/null +++ b/python/libs/mne/beamformer/tests/test_lcmv.py @@ -0,0 +1,931 @@ +from copy import deepcopy +import os.path as op + +import pytest +import numpy as np +from scipy import linalg +from scipy.spatial.distance import cdist +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_allclose, assert_array_less) + +import mne +from mne.transforms import apply_trans, invert_transform +from mne import (convert_forward_solution, read_forward_solution, compute_rank, + VolVectorSourceEstimate, VolSourceEstimate, EvokedArray, + pick_channels_cov, read_vectorview_selection) +from mne.beamformer import (make_lcmv, apply_lcmv, apply_lcmv_epochs, + apply_lcmv_raw, Beamformer, + read_beamformer, apply_lcmv_cov, make_dics) +from mne.beamformer._compute_beamformer import _prepare_beamformer_input +from mne.datasets import testing +from mne.fixes import _get_args +from mne.io.compensator import set_current_comp +from mne.io.constants import FIFF +from mne.minimum_norm import make_inverse_operator, apply_inverse +from mne.minimum_norm.tests.test_inverse import _assert_free_ori_match +from mne.simulation import simulate_evoked +from mne.utils import (object_diff, requires_version, catch_logging, + _record_warnings) + + +data_path = testing.data_path(download=False) +fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') +fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif') +fname_fwd = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') +fname_fwd_vol = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-vol-7-fwd.fif') +fname_event = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc_raw-eve.fif') +fname_label = op.join(data_path, 'MEG', 'sample', 'labels', 'Aud-lh.label') +ctf_fname = op.join(data_path, 'CTF', 'somMDYO-18av.ds') + +reject = dict(grad=4000e-13, mag=4e-12) + + +def _read_forward_solution_meg(*args, **kwargs): + fwd = read_forward_solution(*args) + fwd = convert_forward_solution(fwd, **kwargs) + return mne.pick_types_forward(fwd, meg=True, eeg=False) + + +def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, + epochs_preload=True, data_cov=True, proj=True): + """Read in data used in tests.""" + label = mne.read_label(fname_label) + events = mne.read_events(fname_event) + raw = mne.io.read_raw_fif(fname_raw, preload=True) + forward = mne.read_forward_solution(fname_fwd) + if all_forward: + forward_surf_ori = _read_forward_solution_meg( + fname_fwd, surf_ori=True) + forward_fixed = _read_forward_solution_meg( + fname_fwd, force_fixed=True, surf_ori=True, use_cps=False) + forward_vol = _read_forward_solution_meg(fname_fwd_vol) + else: + forward_surf_ori = None + forward_fixed = None + forward_vol = None + + event_id, tmin, tmax = 1, tmin, tmax + + # Setup for reading the raw data + raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bad channels + # Set up pick list: MEG - bad channels + left_temporal_channels = read_vectorview_selection('Left-temporal') + picks = mne.pick_types(raw.info, meg=True, + selection=left_temporal_channels) + picks = picks[::2] # decimate for speed + # add a couple channels we will consider bad + bad_picks = [100, 101] + bads = [raw.ch_names[pick] for pick in bad_picks] + assert not any(pick in picks for pick in bad_picks) + picks = np.concatenate([picks, bad_picks]) + raw.pick_channels([raw.ch_names[ii] for ii in picks]) + del picks + + raw.info['bads'] = bads # add more bads + if proj: + raw.info.normalize_proj() # avoid projection warnings + else: + raw.del_proj() + + if epochs: + # Read epochs + epochs = mne.Epochs( + raw, events, event_id, tmin, tmax, proj=True, + baseline=(None, 0), preload=epochs_preload, reject=reject) + if epochs_preload: + epochs.resample(200, npad=0) + epochs.crop(0, None) + evoked = epochs.average() + info = evoked.info + else: + epochs = None + evoked = None + info = raw.info + + noise_cov = mne.read_cov(fname_cov) + noise_cov['projs'] = [] # avoid warning + noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05, + eeg=0.1, proj=True, rank=None) + if data_cov: + data_cov = mne.compute_covariance( + epochs, tmin=0.04, tmax=0.145, verbose='error') # baseline warning + else: + data_cov = None + + return raw, epochs, evoked, data_cov, noise_cov, label, forward,\ + forward_surf_ori, forward_fixed, forward_vol + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_lcmv_vector(): + """Test vector LCMV solutions.""" + info = mne.io.read_raw_fif(fname_raw).info + + # For speed and for rank-deficiency calculation simplicity, + # just use grads + info = mne.pick_info(info, mne.pick_types(info, meg='grad', exclude=())) + with info._unlock(): + info.update(bads=[], projs=[]) + + forward = mne.read_forward_solution(fname_fwd) + forward = mne.pick_channels_forward(forward, info['ch_names']) + vertices = [s['vertno'][::200] for s in forward['src']] + n_vertices = sum(len(v) for v in vertices) + assert n_vertices == 4 + + amplitude = 100e-9 + stc = mne.SourceEstimate(amplitude * np.eye(n_vertices), vertices, + 0, 1. / info['sfreq']) + forward_sim = mne.convert_forward_solution(forward, force_fixed=True, + use_cps=True, copy=True) + forward_sim = mne.forward.restrict_forward_to_stc(forward_sim, stc) + noise_cov = mne.make_ad_hoc_cov(info) + noise_cov.update(data=np.diag(noise_cov['data']), diag=False) + evoked = simulate_evoked(forward_sim, stc, info, noise_cov, nave=1) + source_nn = forward_sim['source_nn'] + source_rr = forward_sim['source_rr'] + + # Figure out our indices + mask = np.concatenate([np.in1d(s['vertno'], v) + for s, v in zip(forward['src'], vertices)]) + mapping = np.where(mask)[0] + assert_array_equal(source_rr, forward['source_rr'][mapping]) + + # Don't check NN because we didn't rotate to surf ori + del forward_sim + + # Let's do minimum norm as a sanity check (dipole_fit is slower) + inv = make_inverse_operator(info, forward, noise_cov, loose=1.) + stc_vector_mne = apply_inverse(evoked, inv, pick_ori='vector') + mne_ori = stc_vector_mne.data[mapping, :, np.arange(n_vertices)] + mne_ori /= np.linalg.norm(mne_ori, axis=-1)[:, np.newaxis] + mne_angles = np.rad2deg(np.arccos(np.sum(mne_ori * source_nn, axis=-1))) + assert np.mean(mne_angles) < 35 + + # Now let's do LCMV + data_cov = mne.make_ad_hoc_cov(info) # just a stub for later + with pytest.raises(ValueError, match="pick_ori"): + make_lcmv(info, forward, data_cov, 0.05, noise_cov, pick_ori='bad') + + lcmv_ori = list() + for ti in range(n_vertices): + this_evoked = evoked.copy().crop(evoked.times[ti], evoked.times[ti]) + data_cov['diag'] = False + data_cov['data'] = (np.outer(this_evoked.data, this_evoked.data) + + noise_cov['data']) + vals = linalg.svdvals(data_cov['data']) + assert vals[0] / vals[-1] < 1e5 # not rank deficient + + with catch_logging() as log: + filters = make_lcmv(info, forward, data_cov, 0.05, noise_cov, + verbose=True) + log = log.getvalue() + assert '498 sources' in log + with catch_logging() as log: + filters_vector = make_lcmv(info, forward, data_cov, 0.05, + noise_cov, pick_ori='vector', + verbose=True) + log = log.getvalue() + assert '498 sources' in log + stc = apply_lcmv(this_evoked, filters) + stc_vector = apply_lcmv(this_evoked, filters_vector) + assert isinstance(stc, mne.SourceEstimate) + assert isinstance(stc_vector, mne.VectorSourceEstimate) + assert_allclose(stc.data, stc_vector.magnitude().data) + + # Check the orientation by pooling across some neighbors, as LCMV can + # have some "holes" at the points of interest + idx = np.where(cdist(forward['source_rr'], source_rr[[ti]]) < 0.02)[0] + lcmv_ori.append(np.mean(stc_vector.data[idx, :, 0], axis=0)) + lcmv_ori[-1] /= np.linalg.norm(lcmv_ori[-1]) + + lcmv_angles = np.rad2deg(np.arccos(np.sum(lcmv_ori * source_nn, axis=-1))) + assert np.mean(lcmv_angles) < 55 + + +@pytest.mark.slowtest +@requires_version('h5io') +@testing.requires_testing_data +@pytest.mark.parametrize('reg, proj, kind', [ + (0.01, True, 'volume'), + (0., False, 'volume'), + (0.01, False, 'surface'), + (0., True, 'surface'), +]) +def test_make_lcmv_bem(tmp_path, reg, proj, kind): + """Test LCMV with evoked data and single trials.""" + raw, epochs, evoked, data_cov, noise_cov, label, forward,\ + forward_surf_ori, forward_fixed, forward_vol = _get_data(proj=proj) + + if kind == 'surface': + fwd = forward + else: + fwd = forward_vol + assert kind == 'volume' + + filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, + noise_cov=noise_cov) + stc = apply_lcmv(evoked, filters) + stc.crop(0.02, None) + + stc_pow = np.sum(np.abs(stc.data), axis=1) + idx = np.argmax(stc_pow) + max_stc = stc.data[idx] + tmax = stc.times[np.argmax(max_stc)] + + assert 0.08 < tmax < 0.15, tmax + assert 0.9 < np.max(max_stc) < 3.5, np.max(max_stc) + + if kind == 'surface': + # Test picking normal orientation (surface source space only). + filters = make_lcmv(evoked.info, forward_surf_ori, data_cov, + reg=reg, noise_cov=noise_cov, + pick_ori='normal', weight_norm=None) + stc_normal = apply_lcmv(evoked, filters) + stc_normal.crop(0.02, None) + + stc_pow = np.sum(np.abs(stc_normal.data), axis=1) + idx = np.argmax(stc_pow) + max_stc = stc_normal.data[idx] + tmax = stc_normal.times[np.argmax(max_stc)] + + lower = 0.04 if proj else 0.025 + assert lower < tmax < 0.14, tmax + lower = 3e-7 if proj else 2e-7 + assert lower < np.max(max_stc) < 3e-6, np.max(max_stc) + + # No weight normalization was applied, so the amplitude of normal + # orientation results should always be smaller than free + # orientation results. + assert (np.abs(stc_normal.data) <= stc.data).all() + + # Test picking source orientation maximizing output source power + filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, + noise_cov=noise_cov, pick_ori='max-power') + stc_max_power = apply_lcmv(evoked, filters) + stc_max_power.crop(0.02, None) + stc_pow = np.sum(np.abs(stc_max_power.data), axis=1) + idx = np.argmax(stc_pow) + max_stc = np.abs(stc_max_power.data[idx]) + tmax = stc.times[np.argmax(max_stc)] + + lower = 0.08 if proj else 0.04 + assert lower < tmax < 0.15, tmax + assert 0.8 < np.max(max_stc) < 3., np.max(max_stc) + + stc_max_power.data[:, :] = np.abs(stc_max_power.data) + + if kind == 'surface': + # Maximum output source power orientation results should be + # similar to free orientation results in areas with channel + # coverage + label = mne.read_label(fname_label) + mean_stc = stc.extract_label_time_course( + label, fwd['src'], mode='mean') + mean_stc_max_pow = \ + stc_max_power.extract_label_time_course( + label, fwd['src'], mode='mean') + assert_array_less(np.abs(mean_stc - mean_stc_max_pow), 1.0) + + # Test if spatial filter contains src_type + assert filters['src_type'] == kind + + # __repr__ + assert len(evoked.ch_names) == 22 + assert len(evoked.info['projs']) == (3 if proj else 0) + assert len(evoked.info['bads']) == 2 + rank = 17 if proj else 20 + assert 'LCMV' in repr(filters) + assert 'unknown subject' not in repr(filters) + assert f'{fwd["nsource"]} vert' in repr(filters) + assert '20 ch' in repr(filters) + assert 'rank %s' % rank in repr(filters) + + # I/O + fname = op.join(str(tmp_path), 'filters.h5') + with pytest.warns(RuntimeWarning, match='-lcmv.h5'): + filters.save(fname) + filters_read = read_beamformer(fname) + assert isinstance(filters, Beamformer) + assert isinstance(filters_read, Beamformer) + # deal with object_diff strictness + filters_read['rank'] = int(filters_read['rank']) + filters['rank'] = int(filters['rank']) + assert object_diff(filters, filters_read) == '' + + if kind != 'surface': + return + + # Test if fixed forward operator is detected when picking normal or + # max-power orientation + pytest.raises(ValueError, make_lcmv, evoked.info, forward_fixed, data_cov, + reg=0.01, noise_cov=noise_cov, pick_ori='normal') + pytest.raises(ValueError, make_lcmv, evoked.info, forward_fixed, data_cov, + reg=0.01, noise_cov=noise_cov, pick_ori='max-power') + + # Test if non-surface oriented forward operator is detected when picking + # normal orientation + pytest.raises(ValueError, make_lcmv, evoked.info, forward, data_cov, + reg=0.01, noise_cov=noise_cov, pick_ori='normal') + + # Test if volume forward operator is detected when picking normal + # orientation + pytest.raises(ValueError, make_lcmv, evoked.info, forward_vol, data_cov, + reg=0.01, noise_cov=noise_cov, pick_ori='normal') + + # Test if missing of noise covariance matrix is detected when more than + # one channel type is present in the data + pytest.raises(ValueError, make_lcmv, evoked.info, forward_vol, + data_cov=data_cov, reg=0.01, noise_cov=None, + pick_ori='max-power') + + # Test if wrong channel selection is detected in application of filter + evoked_ch = deepcopy(evoked) + evoked_ch.pick_channels(evoked_ch.ch_names[1:]) + filters = make_lcmv(evoked.info, forward_vol, data_cov, reg=0.01, + noise_cov=noise_cov) + with pytest.deprecated_call(match='max_ori_out'): + with pytest.raises(ValueError, match='was computed with'): + apply_lcmv(evoked_ch, filters, max_ori_out='deprecated') + + # Test if discrepancies in channel selection of data and fwd model are + # handled correctly in apply_lcmv + # make filter with data where first channel was removed + filters = make_lcmv(evoked_ch.info, forward_vol, data_cov, reg=0.01, + noise_cov=noise_cov) + # applying that filter to the full data set should automatically exclude + # this channel from the data + # also test here that no warnings are thrown - implemented to check whether + # src should not be None warning occurs + stc = apply_lcmv(evoked, filters) + + # the result should be equal to applying this filter to a dataset without + # this channel: + stc_ch = apply_lcmv(evoked_ch, filters) + assert_array_almost_equal(stc.data, stc_ch.data) + + # Test if non-matching SSP projection is detected in application of filter + if proj: + raw_proj = raw.copy().del_proj() + with pytest.raises(ValueError, match='do not match the projections'): + apply_lcmv_raw(raw_proj, filters) + + # Test apply_lcmv_raw + use_raw = raw.copy().crop(0, 1) + stc = apply_lcmv_raw(use_raw, filters) + assert_allclose(stc.times, use_raw.times) + assert_array_equal(stc.vertices[0], forward_vol['src'][0]['vertno']) + + # Test if spatial filter contains src_type + assert 'src_type' in filters + + # check whether a filters object without src_type throws expected warning + del filters['src_type'] # emulate 0.16 behaviour to cause warning + with pytest.warns(RuntimeWarning, match='spatial filter does not contain ' + 'src_type'): + apply_lcmv(evoked, filters) + + # Now test single trial using fixed orientation forward solution + # so we can compare it to the evoked solution + filters = make_lcmv(epochs.info, forward_fixed, data_cov, reg=0.01, + noise_cov=noise_cov) + stcs = apply_lcmv_epochs(epochs, filters) + stcs_ = apply_lcmv_epochs(epochs, filters, return_generator=True) + assert_array_equal(stcs[0].data, next(stcs_).data) + + epochs.drop_bad() + assert (len(epochs.events) == len(stcs)) + + # average the single trial estimates + stc_avg = np.zeros_like(stcs[0].data) + for this_stc in stcs: + stc_avg += this_stc.data + stc_avg /= len(stcs) + + # compare it to the solution using evoked with fixed orientation + filters = make_lcmv(evoked.info, forward_fixed, data_cov, reg=0.01, + noise_cov=noise_cov) + stc_fixed = apply_lcmv(evoked, filters) + assert_array_almost_equal(stc_avg, stc_fixed.data) + + # use a label so we have few source vertices and delayed computation is + # not used + filters = make_lcmv(epochs.info, forward_fixed, data_cov, reg=0.01, + noise_cov=noise_cov, label=label) + stcs_label = apply_lcmv_epochs(epochs, filters) + + assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data) + + # Test condition where the filters weights are zero. There should not be + # any divide-by-zero errors + zero_cov = data_cov.copy() + zero_cov['data'][:] = 0 + filters = make_lcmv(epochs.info, forward_fixed, zero_cov, reg=0.01, + noise_cov=noise_cov) + assert_array_equal(filters['weights'], 0) + + # Test condition where one channel type is picked + # (avoid "grad data rank (13) did not match the noise rank (None)") + data_cov_grad = pick_channels_cov( + data_cov, [ch_name for ch_name in epochs.info['ch_names'] + if ch_name.endswith(('2', '3'))]) + assert len(data_cov_grad['names']) > 4 + make_lcmv(epochs.info, forward_fixed, data_cov_grad, reg=0.01, + noise_cov=noise_cov) + + +@testing.requires_testing_data +@pytest.mark.slowtest +@pytest.mark.parametrize('weight_norm, pick_ori', [ + ('unit-noise-gain', 'max-power'), + ('unit-noise-gain', 'vector'), + ('unit-noise-gain', None), + ('nai', 'vector'), + (None, 'max-power'), +]) +def test_make_lcmv_sphere(pick_ori, weight_norm): + """Test LCMV with sphere head model.""" + # unit-noise gain beamformer and orientation + # selection and rank reduction of the leadfield + _, _, evoked, data_cov, noise_cov, _, _, _, _, _ = _get_data(proj=True) + assert 'eeg' not in evoked + assert 'meg' in evoked + sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080) + src = mne.setup_volume_source_space( + pos=25., sphere=sphere, mindist=5.0, exclude=2.0) + fwd_sphere = mne.make_forward_solution(evoked.info, None, src, sphere) + + # Test that we get an error if not reducing rank + with pytest.raises(ValueError, match='Singular matrix detected'): + with pytest.warns(RuntimeWarning, match='positive semidefinite'): + make_lcmv( + evoked.info, fwd_sphere, data_cov, reg=0.1, + noise_cov=noise_cov, weight_norm=weight_norm, + pick_ori=pick_ori, reduce_rank=False, rank='full') + + # Now let's reduce it + filters = make_lcmv(evoked.info, fwd_sphere, data_cov, reg=0.1, + noise_cov=noise_cov, weight_norm=weight_norm, + pick_ori=pick_ori, reduce_rank=True) + stc_sphere = apply_lcmv(evoked, filters) + if isinstance(stc_sphere, VolVectorSourceEstimate): + stc_sphere = stc_sphere.magnitude() + else: + stc_sphere = abs(stc_sphere) + assert isinstance(stc_sphere, VolSourceEstimate) + stc_sphere.crop(0.02, None) + + stc_pow = np.sum(stc_sphere.data, axis=1) + idx = np.argmax(stc_pow) + max_stc = stc_sphere.data[idx] + tmax = stc_sphere.times[np.argmax(max_stc)] + assert 0.08 < tmax < 0.15, tmax + min_, max_ = 1.0, 4.5 + if weight_norm is None: + min_ *= 2e-7 + max_ *= 2e-7 + assert min_ < np.max(max_stc) < max_, (min_, np.max(max_stc), max_) + + +@testing.requires_testing_data +@pytest.mark.parametrize('weight_norm', (None, 'unit-noise-gain')) +@pytest.mark.parametrize('pick_ori', ('max-power', 'normal')) +def test_lcmv_cov(weight_norm, pick_ori): + """Test LCMV source power computation.""" + raw, epochs, evoked, data_cov, noise_cov, label, forward,\ + forward_surf_ori, forward_fixed, forward_vol = _get_data() + convert_forward_solution(forward, surf_ori=True, copy=False) + filters = make_lcmv(evoked.info, forward, data_cov, noise_cov=noise_cov, + weight_norm=weight_norm, pick_ori=pick_ori) + for cov in (data_cov, noise_cov): + this_cov = pick_channels_cov(cov, evoked.ch_names) + this_evoked = evoked.copy().pick_channels(this_cov['names']) + this_cov['projs'] = this_evoked.info['projs'] + assert this_evoked.ch_names == this_cov['names'] + stc = apply_lcmv_cov(this_cov, filters) + assert stc.data.min() > 0 + assert stc.shape == (498, 1) + ev = EvokedArray(this_cov.data, this_evoked.info) + stc_1 = apply_lcmv(ev, filters) + assert stc_1.data.min() < 0 + ev = EvokedArray(stc_1.data.T, this_evoked.info) + stc_2 = apply_lcmv(ev, filters) + assert stc_2.data.shape == (498, 498) + data = np.diag(stc_2.data)[:, np.newaxis] + assert data.min() > 0 + assert_allclose(data, stc.data, rtol=1e-12) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_lcmv_ctf_comp(): + """Test interpolation with compensated CTF data.""" + raw = mne.io.read_raw_ctf(ctf_fname, preload=True) + raw.pick(raw.ch_names[:70]) + + events = mne.make_fixed_length_events(raw, duration=0.2)[:2] + epochs = mne.Epochs(raw, events, tmin=-0.1, tmax=0.2) + evoked = epochs.average() + + data_cov = mne.compute_covariance(epochs) + fwd = mne.make_forward_solution(evoked.info, None, + mne.setup_volume_source_space(pos=30.0), + mne.make_sphere_model()) + with pytest.raises(ValueError, match='reduce_rank'): + make_lcmv(evoked.info, fwd, data_cov) + filters = make_lcmv(evoked.info, fwd, data_cov, reduce_rank=True) + assert 'weights' in filters + + # test whether different compensations throw error + info_comp = evoked.info.copy() + set_current_comp(info_comp, 1) + with pytest.raises(RuntimeError, match='Compensation grade .* not match'): + make_lcmv(info_comp, fwd, data_cov) + + +@pytest.mark.slowtest +@testing.requires_testing_data +@pytest.mark.parametrize('proj, weight_norm', [ + (True, 'unit-noise-gain'), + (False, 'unit-noise-gain'), + (True, None), + (True, 'nai'), +]) +def test_lcmv_reg_proj(proj, weight_norm): + """Test LCMV with and without proj.""" + raw = mne.io.read_raw_fif(fname_raw, preload=True) + events = mne.find_events(raw) + raw.pick_types(meg=True) + assert len(raw.ch_names) == 305 + epochs = mne.Epochs(raw, events, None, preload=True, proj=proj) + with pytest.warns(RuntimeWarning, match='Too few samples'): + noise_cov = mne.compute_covariance(epochs, tmax=0) + data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15) + forward = mne.read_forward_solution(fname_fwd) + filters = make_lcmv(epochs.info, forward, data_cov, reg=0.05, + noise_cov=noise_cov, pick_ori='max-power', + weight_norm='nai', rank=None, verbose=True) + want_rank = 302 # 305 good channels - 3 MEG projs + assert filters['rank'] == want_rank + # And also with and without noise_cov + with pytest.raises(ValueError, match='several sensor types'): + make_lcmv(epochs.info, forward, data_cov, reg=0.05, + noise_cov=None) + epochs.pick_types(meg='grad') + kwargs = dict(reg=0.05, pick_ori=None, weight_norm=weight_norm) + filters_cov = make_lcmv(epochs.info, forward, data_cov, + noise_cov=noise_cov, **kwargs) + filters_nocov = make_lcmv(epochs.info, forward, data_cov, + noise_cov=None, **kwargs) + ad_hoc = mne.make_ad_hoc_cov(epochs.info) + filters_adhoc = make_lcmv(epochs.info, forward, data_cov, + noise_cov=ad_hoc, **kwargs) + evoked = epochs.average() + stc_cov = apply_lcmv(evoked, filters_cov) + stc_nocov = apply_lcmv(evoked, filters_nocov) + stc_adhoc = apply_lcmv(evoked, filters_adhoc) + + # Compare adhoc and nocov: scale difference is necessitated by using std=1. + if weight_norm == 'unit-noise-gain': + scale = np.sqrt(ad_hoc['data'][0]) + else: + scale = 1. + assert_allclose(stc_nocov.data, stc_adhoc.data * scale) + a = np.dot(filters_nocov['weights'], filters_nocov['whitener']) + b = np.dot(filters_adhoc['weights'], filters_adhoc['whitener']) * scale + atol = np.mean(np.sqrt(a * a)) * 1e-7 + assert_allclose(a, b, atol=atol, rtol=1e-7) + + # Compare adhoc and cov: locs might not be equivalent, but the same + # general profile should persist, so look at the std and be lenient: + if weight_norm == 'unit-noise-gain': + adhoc_scale = 0.12 + else: + adhoc_scale = 1. + assert_allclose( + np.linalg.norm(stc_adhoc.data, axis=0) * adhoc_scale, + np.linalg.norm(stc_cov.data, axis=0), rtol=0.3) + assert_allclose( + np.linalg.norm(stc_nocov.data, axis=0) / scale * adhoc_scale, + np.linalg.norm(stc_cov.data, axis=0), rtol=0.3) + + if weight_norm == 'nai': + # NAI is always normalized by noise-level (based on eigenvalues) + for stc in (stc_nocov, stc_cov): + assert_allclose(stc.data.std(), 0.584, rtol=0.2) + elif weight_norm is None: + # None always represents something not normalized, reflecting channel + # weights + for stc in (stc_nocov, stc_cov): + assert_allclose(stc.data.std(), 2.8e-8, rtol=0.1) + else: + assert weight_norm == 'unit-noise-gain' + # Channel scalings depend on presence of noise_cov + assert_allclose(stc_nocov.data.std(), 7.8e-13, rtol=0.1) + assert_allclose(stc_cov.data.std(), 0.187, rtol=0.2) + + +@pytest.mark.parametrize('reg, weight_norm, use_cov, depth, lower, upper', [ + (0.05, 'unit-noise-gain', True, None, 97, 98), + (0.05, 'nai', True, None, 96, 98), + (0.05, 'nai', True, 0.8, 96, 98), + (0.05, None, True, None, 74, 76), + (0.05, None, True, 0.8, 90, 93), # depth improves weight_norm=None + (0.05, 'unit-noise-gain', False, None, 83, 86), + (0.05, 'unit-noise-gain', False, 0.8, 83, 86), # depth same for wn != None + # no reg + (0.00, 'unit-noise-gain', True, None, 35, 99), # TODO: Still not stable +]) +def test_localization_bias_fixed(bias_params_fixed, reg, weight_norm, use_cov, + depth, lower, upper): + """Test localization bias for fixed-orientation LCMV.""" + evoked, fwd, noise_cov, data_cov, want = bias_params_fixed + if not use_cov: + evoked.pick_types(meg='grad') + noise_cov = None + assert data_cov['data'].shape[0] == len(data_cov['names']) + loc = apply_lcmv(evoked, make_lcmv(evoked.info, fwd, data_cov, reg, + noise_cov, depth=depth, + weight_norm=weight_norm)).data + loc = np.abs(loc) + # Compute the percentage of sources for which there is no loc bias: + perc = (want == np.argmax(loc, axis=0)).mean() * 100 + assert lower <= perc <= upper + + +# Changes here should be synced with test_dics.py +@pytest.mark.parametrize( + 'reg, pick_ori, weight_norm, use_cov, depth, lower, upper, ' + 'lower_ori, upper_ori', [ + (0.05, 'vector', 'unit-noise-gain-invariant', False, None, 26, 28, 0.82, 0.84), # noqa: E501 + (0.05, 'vector', 'unit-noise-gain-invariant', True, None, 40, 42, 0.96, 0.98), # noqa: E501 + (0.05, 'vector', 'unit-noise-gain', False, None, 13, 14, 0.79, 0.81), + (0.05, 'vector', 'unit-noise-gain', True, None, 35, 37, 0.98, 0.99), + (0.05, 'vector', 'nai', True, None, 35, 37, 0.98, 0.99), + (0.05, 'vector', None, True, None, 12, 14, 0.97, 0.98), + (0.05, 'vector', None, True, 0.8, 39, 43, 0.97, 0.98), + (0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20, 0, 0), # noqa: E501 + (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, 0, 0), + (0.05, 'max-power', 'nai', True, None, 21, 24, 0, 0), + (0.05, 'max-power', None, True, None, 7, 10, 0, 0), + (0.05, 'max-power', None, True, 0.8, 15, 18, 0, 0), + (0.05, None, None, True, 0.8, 40, 42, 0, 0), + # no reg + (0.00, 'vector', None, True, None, 23, 24, 0.96, 0.97), + (0.00, 'vector', 'unit-noise-gain-invariant', True, None, 52, 54, 0.95, 0.96), # noqa: E501 + (0.00, 'vector', 'unit-noise-gain', True, None, 44, 48, 0.97, 0.99), + (0.00, 'vector', 'nai', True, None, 44, 48, 0.97, 0.99), + (0.00, 'max-power', None, True, None, 14, 15, 0, 0), + (0.00, 'max-power', 'unit-noise-gain-invariant', True, None, 35, 37, 0, 0), # noqa: E501 + (0.00, 'max-power', 'unit-noise-gain', True, None, 35, 37, 0, 0), + (0.00, 'max-power', 'nai', True, None, 35, 37, 0, 0), + ]) +def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm, + use_cov, depth, lower, upper, + lower_ori, upper_ori): + """Test localization bias for free-orientation LCMV.""" + evoked, fwd, noise_cov, data_cov, want = bias_params_free + if not use_cov: + evoked.pick_types(meg='grad') + noise_cov = None + with _record_warnings(): # rank deficiency of data_cov + filters = make_lcmv(evoked.info, fwd, data_cov, reg, + noise_cov, pick_ori=pick_ori, + weight_norm=weight_norm, + depth=depth) + loc = apply_lcmv(evoked, filters).data + if pick_ori == 'vector': + ori = loc.copy() / np.linalg.norm(loc, axis=1, keepdims=True) + else: + # doesn't make sense for pooled (None) or max-power (can't be all 3) + ori = None + loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc) + # Compute the percentage of sources for which there is no loc bias: + max_idx = np.argmax(loc, axis=0) + perc = (want == max_idx).mean() * 100 + assert lower <= perc <= upper + _assert_free_ori_match(ori, max_idx, lower_ori, upper_ori) + + +# Changes here should be synced with the ones above, but these have meaningful +# orientation values +@pytest.mark.parametrize( + 'reg, weight_norm, use_cov, depth, lower, upper, lower_ori, upper_ori', [ + (0.05, 'unit-noise-gain-invariant', False, None, 38, 40, 0.54, 0.55), + (0.05, 'unit-noise-gain', False, None, 38, 40, 0.54, 0.55), + (0.05, 'nai', True, None, 56, 57, 0.59, 0.61), + (0.05, None, True, None, 27, 28, 0.56, 0.57), + (0.05, None, True, 0.8, 42, 43, 0.56, 0.57), + # no reg + (0.00, None, True, None, 50, 51, 0.58, 0.59), + (0.00, 'unit-noise-gain-invariant', True, None, 73, 75, 0.59, 0.61), + (0.00, 'unit-noise-gain', True, None, 73, 75, 0.59, 0.61), + (0.00, 'nai', True, None, 73, 75, 0.59, 0.61), + ]) +def test_orientation_max_power(bias_params_fixed, bias_params_free, + reg, weight_norm, use_cov, depth, lower, upper, + lower_ori, upper_ori): + """Test orientation selection for bias for max-power LCMV.""" + # we simulate data for the fixed orientation forward and beamform using + # the free orientation forward, and check the orientation match at the end + evoked, _, noise_cov, data_cov, want = bias_params_fixed + fwd = bias_params_free[1] + if not use_cov: + evoked.pick_types(meg='grad') + noise_cov = None + filters = make_lcmv(evoked.info, fwd, data_cov, reg, + noise_cov, pick_ori='max-power', + weight_norm=weight_norm, + depth=depth) + loc = apply_lcmv(evoked, filters).data + ori = filters['max_power_ori'] + assert ori.shape == (246, 3) + loc = np.abs(loc) + # Compute the percentage of sources for which there is no loc bias: + max_idx = np.argmax(loc, axis=0) + mask = want == max_idx # ones that localized properly + perc = mask.mean() * 100 + assert lower <= perc <= upper + # Compute the dot products of our forward normals and + assert fwd['coord_frame'] == FIFF.FIFFV_COORD_HEAD + nn = np.concatenate( + [s['nn'][v] for s, v in zip(fwd['src'], filters['vertices'])]) + nn = nn[want] + nn = apply_trans(invert_transform(fwd['mri_head_t']), nn, move=False) + assert_allclose(np.linalg.norm(nn, axis=1), 1, atol=1e-6) + assert_allclose(np.linalg.norm(ori, axis=1), 1, atol=1e-12) + dots = np.abs((nn[mask] * ori[mask]).sum(-1)) + assert_array_less(dots, 1) + assert_array_less(0, dots) + got = np.mean(dots) + assert lower_ori < got < upper_ori + + +@pytest.mark.parametrize('weight_norm, pick_ori', [ + pytest.param('nai', 'max-power', marks=pytest.mark.slowtest), + ('unit-noise-gain', 'vector'), + ('unit-noise-gain', 'max-power'), + pytest.param('unit-noise-gain', None, marks=pytest.mark.slowtest), +]) +def test_depth_does_not_matter(bias_params_free, weight_norm, pick_ori): + """Test that depth weighting does not matter for normalized filters.""" + evoked, fwd, noise_cov, data_cov, _ = bias_params_free + data = apply_lcmv(evoked, make_lcmv( + evoked.info, fwd, data_cov, 0.05, noise_cov, pick_ori=pick_ori, + weight_norm=weight_norm, depth=0.)).data + data_depth = apply_lcmv(evoked, make_lcmv( + evoked.info, fwd, data_cov, 0.05, noise_cov, pick_ori=pick_ori, + weight_norm=weight_norm, depth=1.)).data + assert data.shape == data_depth.shape + for d1, d2 in zip(data, data_depth): + # Sign flips can change when nearly orthogonal to the normal direction + d2 *= np.sign(np.dot(d1.ravel(), d2.ravel())) + atol = np.linalg.norm(d1) * 1e-7 + assert_allclose(d1, d2, atol=atol) + + +@testing.requires_testing_data +def test_lcmv_maxfiltered(): + """Test LCMV on maxfiltered data.""" + raw = mne.io.read_raw_fif(fname_raw).fix_mag_coil_types() + raw_sss = mne.preprocessing.maxwell_filter(raw) + events = mne.find_events(raw_sss) + del raw + raw_sss.pick_types(meg='mag') + assert len(raw_sss.ch_names) == 102 + epochs = mne.Epochs(raw_sss, events) + data_cov = mne.compute_covariance(epochs, tmin=0) + fwd = mne.read_forward_solution(fname_fwd) + rank = compute_rank(data_cov, info=epochs.info) + assert rank == {'mag': 71} + for use_rank in ('info', rank, 'full', None): + make_lcmv(epochs.info, fwd, data_cov, rank=use_rank) + + +# To reduce test time, only test combinations that should matter rather than +# all of them +@testing.requires_testing_data +@pytest.mark.parametrize('pick_ori, weight_norm, reg, inversion', [ + ('vector', 'unit-noise-gain-invariant', 0.05, 'matrix'), + ('vector', 'unit-noise-gain-invariant', 0.05, 'single'), + ('vector', 'unit-noise-gain', 0.05, 'matrix'), + ('vector', 'unit-noise-gain', 0.05, 'single'), + ('vector', 'unit-noise-gain', 0.0, 'matrix'), + ('vector', 'unit-noise-gain', 0.0, 'single'), + ('vector', 'nai', 0.05, 'matrix'), + ('max-power', 'unit-noise-gain', 0.05, 'matrix'), + ('max-power', 'unit-noise-gain', 0.0, 'single'), + ('max-power', 'unit-noise-gain', 0.05, 'single'), + ('max-power', 'unit-noise-gain-invariant', 0.05, 'matrix'), + ('normal', 'unit-noise-gain', 0.05, 'matrix'), + ('normal', 'nai', 0.0, 'matrix'), +]) +def test_unit_noise_gain_formula(pick_ori, weight_norm, reg, inversion): + """Test unit-noise-gain filter against formula.""" + raw = mne.io.read_raw_fif(fname_raw, preload=True) + events = mne.find_events(raw) + raw.pick_types(meg='mag') + assert len(raw.ch_names) == 102 + epochs = mne.Epochs(raw, events, None, preload=True) + data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15) + # for now, avoid whitening to make life easier + noise_cov = mne.make_ad_hoc_cov(epochs.info, std=dict(grad=1., mag=1.)) + forward = mne.read_forward_solution(fname_fwd) + convert_forward_solution(forward, surf_ori=True, copy=False) + rank = None + kwargs = dict(reg=reg, noise_cov=noise_cov, pick_ori=pick_ori, + weight_norm=weight_norm, rank=rank, inversion=inversion) + if inversion == 'single' and pick_ori == 'vector' and \ + weight_norm == 'unit-noise-gain-invariant': + with pytest.raises(ValueError, match='Cannot use'): + make_lcmv(epochs.info, forward, data_cov, **kwargs) + return + filters = make_lcmv(epochs.info, forward, data_cov, **kwargs) + _, _, _, _, G, _, _, _ = _prepare_beamformer_input( + epochs.info, forward, None, 'vector', noise_cov=noise_cov, rank=rank, + pca=False, exp=None) + n_channels, n_sources = G.shape + n_sources //= 3 + G.shape = (n_channels, n_sources, 3) + G = G.transpose(1, 2, 0) # verts, orient, ch + _assert_weight_norm(filters, G) + + +def _assert_weight_norm(filters, G): + """Check the result of the chosen weight normalization strategy.""" + weights, max_power_ori = filters['weights'], filters['max_power_ori'] + + # Make the dimensions of the weight matrix equal for both DICS (which + # defines weights for multiple frequencies) and LCMV (which does not). + if filters['kind'] == 'LCMV': + weights = weights[np.newaxis] + if max_power_ori is not None: + max_power_ori = max_power_ori[np.newaxis] + if max_power_ori is not None: + max_power_ori = max_power_ori[..., np.newaxis] + + weight_norm = filters['weight_norm'] + inversion = filters['inversion'] + n_channels = weights.shape[2] + + if inversion == 'matrix': + # Dipoles are grouped in groups with size n_orient + n_sources = filters['n_sources'] + n_orient = 3 if filters['is_free_ori'] else 1 + elif inversion == 'single': + # Every dipole is treated as a unique source + n_sources = weights.shape[1] + n_orient = 1 + + for wi, w in enumerate(weights): + w = w.reshape(n_sources, n_orient, n_channels) + + # Compute leadfield in the direction chosen during the computation of + # the beamformer. + if filters['pick_ori'] == 'max-power': + use_G = np.sum(G * max_power_ori[wi], axis=1, keepdims=True) + elif filters['pick_ori'] == 'normal': + use_G = G[:, -1:] + else: + use_G = G + if inversion == 'single': + # Every dipole is treated as a unique source + use_G = use_G.reshape(n_sources, 1, n_channels) + assert w.shape == use_G.shape == (n_sources, n_orient, n_channels) + + # Test weight normalization scheme + got = np.matmul(w, w.conj().swapaxes(-2, -1)) + desired = np.repeat(np.eye(n_orient)[np.newaxis], w.shape[0], axis=0) + if n_orient == 3 and weight_norm in ('unit-noise-gain', 'nai'): + # only the diagonal is correct! + assert not np.allclose(got, desired, atol=1e-7) + got = got.reshape(n_sources, -1)[:, ::n_orient + 1] + desired = np.ones_like(got) + if weight_norm == 'nai': # additional scale factor, should be fixed + atol = 1e-7 * got.flat[0] + desired *= got.flat[0] + else: + atol = 1e-7 + assert_allclose(got, desired, atol=atol, err_msg='w @ w.conj().T = I') + + # Check that the result here is a diagonal matrix for Sekihara + if n_orient > 1 and weight_norm != 'unit-noise-gain-invariant': + got = w @ use_G.swapaxes(-2, -1) + diags = np.diagonal(got, 0, -2, -1) + want = np.apply_along_axis(np.diagflat, 1, diags) + atol = np.mean(diags).real * 1e-12 + assert_allclose(got, want, atol=atol, err_msg='G.T @ w = θI') + + +def test_api(): + """Test LCMV/DICS API equivalence.""" + lcmv_names = _get_args(make_lcmv) + dics_names = _get_args(make_dics) + dics_names[dics_names.index('csd')] = 'data_cov' + dics_names[dics_names.index('noise_csd')] = 'noise_cov' + dics_names.pop(dics_names.index('real_filter')) # not a thing for LCMV + assert lcmv_names == dics_names diff --git a/python/libs/mne/beamformer/tests/test_rap_music.py b/python/libs/mne/beamformer/tests/test_rap_music.py new file mode 100644 index 0000000..e8ac3d9 --- /dev/null +++ b/python/libs/mne/beamformer/tests/test_rap_music.py @@ -0,0 +1,202 @@ +# Authors: Yousra Bekhti +# Alexandre Gramfort +# +# License: BSD-3-Clause + +import os.path as op + +import pytest +import numpy as np +from scipy import linalg +from numpy.testing import assert_allclose + +import mne +from mne.beamformer import rap_music +from mne.cov import regularize +from mne.datasets import testing +from mne.minimum_norm.tests.test_inverse import assert_var_exp_log +from mne.utils import catch_logging + + +data_path = testing.data_path(download=False) +fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif') +fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif') +fname_fwd = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') + + +def _get_data(ch_decim=1): + """Read in data used in tests.""" + # Read evoked + evoked = mne.read_evokeds(fname_ave, 0, baseline=(None, 0)) + evoked.info['bads'] = ['MEG 2443'] + with evoked.info._unlock(): + evoked.info['lowpass'] = 16 # fake for decim + evoked.decimate(12) + evoked.crop(0.0, 0.3) + picks = mne.pick_types(evoked.info, meg=True, eeg=False) + picks = picks[::ch_decim] + evoked.pick_channels([evoked.ch_names[pick] for pick in picks]) + evoked.info.normalize_proj() + + noise_cov = mne.read_cov(fname_cov) + noise_cov['projs'] = [] + noise_cov = regularize(noise_cov, evoked.info, rank='full', proj=False) + return evoked, noise_cov + + +def simu_data(evoked, forward, noise_cov, n_dipoles, times, nave=1): + """Simulate an evoked dataset with 2 sources. + + One source is put in each hemisphere. + """ + # Generate the two dipoles data + mu, sigma = 0.1, 0.005 + s1 = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 / + (2 * sigma ** 2)) + + mu, sigma = 0.075, 0.008 + s2 = -1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 / + (2 * sigma ** 2)) + data = np.array([s1, s2]) * 1e-9 + + src = forward['src'] + rng = np.random.RandomState(42) + + rndi = rng.randint(len(src[0]['vertno'])) + lh_vertno = src[0]['vertno'][[rndi]] + + rndi = rng.randint(len(src[1]['vertno'])) + rh_vertno = src[1]['vertno'][[rndi]] + + vertices = [lh_vertno, rh_vertno] + tmin, tstep = times.min(), 1 / evoked.info['sfreq'] + stc = mne.SourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep) + + sim_evoked = mne.simulation.simulate_evoked(forward, stc, evoked.info, + noise_cov, nave=nave, + random_state=rng) + + return sim_evoked, stc + + +def _check_dipoles(dipoles, fwd, stc, evoked, residual=None): + src = fwd['src'] + pos1 = fwd['source_rr'][np.where(src[0]['vertno'] == + stc.vertices[0])] + pos2 = fwd['source_rr'][np.where(src[1]['vertno'] == + stc.vertices[1])[0] + + len(src[0]['vertno'])] + + # Check the position of the two dipoles + assert (dipoles[0].pos[0] in np.array([pos1, pos2])) + assert (dipoles[1].pos[0] in np.array([pos1, pos2])) + + ori1 = fwd['source_nn'][np.where(src[0]['vertno'] == + stc.vertices[0])[0]][0] + ori2 = fwd['source_nn'][np.where(src[1]['vertno'] == + stc.vertices[1])[0] + + len(src[0]['vertno'])][0] + + # Check the orientation of the dipoles + assert (np.max(np.abs(np.dot(dipoles[0].ori[0], + np.array([ori1, ori2]).T))) > 0.99) + + assert (np.max(np.abs(np.dot(dipoles[1].ori[0], + np.array([ori1, ori2]).T))) > 0.99) + + if residual is not None: + picks_grad = mne.pick_types(residual.info, meg='grad') + picks_mag = mne.pick_types(residual.info, meg='mag') + rel_tol = 0.02 + for picks in [picks_grad, picks_mag]: + assert (linalg.norm(residual.data[picks], ord='fro') < + rel_tol * linalg.norm(evoked.data[picks], ord='fro')) + + +@testing.requires_testing_data +def test_rap_music_simulated(): + """Test RAP-MUSIC with simulated evoked.""" + evoked, noise_cov = _get_data(ch_decim=16) + forward = mne.read_forward_solution(fname_fwd) + forward = mne.pick_channels_forward(forward, evoked.ch_names) + forward_surf_ori = mne.convert_forward_solution(forward, surf_ori=True) + forward_fixed = mne.convert_forward_solution(forward, force_fixed=True, + surf_ori=True, use_cps=True) + + n_dipoles = 2 + sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov, + n_dipoles, evoked.times, nave=evoked.nave) + # Check dipoles for fixed ori + with catch_logging() as log: + dipoles = rap_music(sim_evoked, forward_fixed, noise_cov, + n_dipoles=n_dipoles, verbose=True) + assert_var_exp_log(log.getvalue(), 89, 91) + _check_dipoles(dipoles, forward_fixed, stc, sim_evoked) + assert 97 < dipoles[0].gof.max() < 100 + assert 91 < dipoles[1].gof.max() < 93 + assert dipoles[0].gof.min() >= 0. + + nave = 100000 # add a tiny amount of noise to the simulated evokeds + sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov, + n_dipoles, evoked.times, nave=nave) + dipoles, residual = rap_music(sim_evoked, forward_fixed, noise_cov, + n_dipoles=n_dipoles, return_residual=True) + _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual) + + # Check dipoles for free ori + dipoles, residual = rap_music(sim_evoked, forward, noise_cov, + n_dipoles=n_dipoles, return_residual=True) + _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual) + + # Check dipoles for free surface ori + dipoles, residual = rap_music(sim_evoked, forward_surf_ori, noise_cov, + n_dipoles=n_dipoles, return_residual=True) + _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_rap_music_sphere(): + """Test RAP-MUSIC with real data, sphere model, MEG only.""" + evoked, noise_cov = _get_data(ch_decim=8) + sphere = mne.make_sphere_model(r0=(0., 0., 0.04)) + src = mne.setup_volume_source_space(subject=None, pos=10., + sphere=(0.0, 0.0, 40, 65.0), + mindist=5.0, exclude=0.0, + sphere_units='mm') + forward = mne.make_forward_solution(evoked.info, trans=None, src=src, + bem=sphere) + + with catch_logging() as log: + dipoles = rap_music(evoked, forward, noise_cov, n_dipoles=2, + verbose=True) + assert_var_exp_log(log.getvalue(), 47, 49) + # Test that there is one dipole on each hemisphere + pos = np.array([dip.pos[0] for dip in dipoles]) + assert pos.shape == (2, 3) + assert (pos[:, 0] < 0).sum() == 1 + assert (pos[:, 0] > 0).sum() == 1 + # Check the amplitude scale + assert (1e-10 < dipoles[0].amplitude[0] < 1e-7) + # Check the orientation + dip_fit = mne.fit_dipole(evoked, noise_cov, sphere)[0] + assert (np.max(np.abs(np.dot(dip_fit.ori, dipoles[0].ori[0]))) > 0.99) + assert (np.max(np.abs(np.dot(dip_fit.ori, dipoles[1].ori[0]))) > 0.99) + idx = dip_fit.gof.argmax() + dist = np.linalg.norm(dipoles[0].pos[idx] - dip_fit.pos[idx]) + assert 0.004 <= dist < 0.007 + assert_allclose(dipoles[0].gof[idx], dip_fit.gof[idx], atol=3) + + +@testing.requires_testing_data +def test_rap_music_picks(): + """Test RAP-MUSIC with picking.""" + evoked = mne.read_evokeds(fname_ave, condition='Right Auditory', + baseline=(None, 0)) + evoked.crop(tmin=0.05, tmax=0.15) # select N100 + evoked.pick_types(meg=True, eeg=False) + forward = mne.read_forward_solution(fname_fwd) + noise_cov = mne.read_cov(fname_cov) + dipoles = rap_music(evoked, forward, noise_cov, n_dipoles=2) + assert len(dipoles) == 2 diff --git a/python/libs/mne/beamformer/tests/test_resolution_matrix.py b/python/libs/mne/beamformer/tests/test_resolution_matrix.py new file mode 100644 index 0000000..82aae75 --- /dev/null +++ b/python/libs/mne/beamformer/tests/test_resolution_matrix.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Authors: Olaf Hauk +# +# License: BSD-3-Clause +""" +Test computation of resolution matrix for LCMV beamformers. + +If noise and data covariance are the same, the LCMV beamformer weights should +be the transpose of the leadfield matrix. +""" + +from copy import deepcopy +import os.path as op +import numpy as np +from numpy.testing import assert_allclose + +import mne +from mne.datasets import testing +from mne.beamformer import make_lcmv, make_lcmv_resolution_matrix + +data_path = testing.data_path(download=False) +subjects_dir = op.join(data_path, 'subjects') +fname_inv = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif') +fname_evoked = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-ave.fif') +fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') +fname_fwd = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') +fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif') + + +@testing.requires_testing_data +def test_resolution_matrix_lcmv(): + """Test computation of resolution matrix for LCMV beamformers.""" + # read forward solution + forward = mne.read_forward_solution(fname_fwd) + + # remove bad channels + forward = mne.pick_channels_forward(forward, exclude='bads') + + # forward operator with fixed source orientations + forward_fxd = mne.convert_forward_solution(forward, surf_ori=True, + force_fixed=True) + + # evoked info + info = mne.io.read_info(fname_evoked) + mne.pick_info(info, mne.pick_types(info, meg=True), copy=False) # good MEG + + # noise covariance matrix + # ad-hoc to avoid discrepancies due to regularisation of real noise + # covariance matrix + noise_cov = mne.make_ad_hoc_cov(info) + + # Resolution matrix for Beamformer + data_cov = noise_cov.copy() # to test a property of LCMV + + # compute beamformer filters + # reg=0. to make sure noise_cov and data_cov are as similar as possible + filters = make_lcmv(info, forward_fxd, data_cov, reg=0., + noise_cov=noise_cov, + pick_ori=None, rank=None, + weight_norm=None, + reduce_rank=False, + verbose=False) + + # Compute resolution matrix for beamformer + resmat_lcmv = make_lcmv_resolution_matrix(filters, forward_fxd, info) + + # for noise_cov==data_cov and whitening, the filter weights should be the + # transpose of leadfield + + # create filters with transposed whitened leadfield as weights + forward_fxd = mne.pick_channels_forward(forward_fxd, info['ch_names']) + filters_lfd = deepcopy(filters) + filters_lfd['weights'][:] = forward_fxd['sol']['data'].T + + # compute resolution matrix for filters with transposed leadfield + resmat_fwd = make_lcmv_resolution_matrix(filters_lfd, forward_fxd, info) + + # pairwise correlation for rows (CTFs) of resolution matrices for whitened + # LCMV beamformer and transposed leadfield should be 1 + # Some rows are off by about 0.1 - not yet clear why + corr = [] + + for (f, l) in zip(resmat_fwd, resmat_lcmv): + + corr.append(np.corrcoef(f, l)[0, 1]) + + # all row correlations should at least be above ~0.8 + assert_allclose(corr, 1., atol=0.2) + + # Maximum row correlation should at least be close to 1 + assert_allclose(np.max(corr), 1., atol=0.01) diff --git a/python/libs/mne/bem.py b/python/libs/mne/bem.py new file mode 100644 index 0000000..e419302 --- /dev/null +++ b/python/libs/mne/bem.py @@ -0,0 +1,2158 @@ +# Authors: Matti Hämäläinen +# Alexandre Gramfort +# Eric Larson +# Lorenzo De Santis +# +# License: BSD-3-Clause + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. + +from collections import OrderedDict +from functools import partial +import glob +import os +import os.path as op +import shutil +from copy import deepcopy + +import numpy as np + +from .io.constants import FIFF, FWD +from .io._digitization import _dig_kind_dict, _dig_kind_rev, _dig_kind_ints +from .io.write import (start_and_end_file, start_block, write_float, write_int, + write_float_matrix, write_int_matrix, end_block) +from .io.tag import find_tag +from .io.tree import dir_tree_find +from .io.open import fiff_open +from .surface import (read_surface, write_surface, complete_surface_info, + _compute_nearest, _get_ico_surface, read_tri, + _fast_cross_nd_sum, _get_solids, _complete_sphere_surf, + decimate_surface) +from .transforms import _ensure_trans, apply_trans, Transform +from .utils import (verbose, logger, run_subprocess, get_subjects_dir, warn, + _pl, _validate_type, _TempDir, _check_freesurfer_home, + _check_fname, has_nibabel, _check_option, path_like, + _on_missing, _import_h5io_funcs) + + +# ############################################################################ +# Compute BEM solution + +# The following approach is based on: +# +# de Munck JC: "A linear discretization of the volume conductor boundary +# integral equation using analytically integrated elements", +# IEEE Trans Biomed Eng. 1992 39(9) : 986 - 990 +# + + +class ConductorModel(dict): + """BEM or sphere model.""" + + def __repr__(self): # noqa: D105 + if self['is_sphere']: + center = ', '.join('%0.1f' % (x * 1000.) for x in self['r0']) + rad = self.radius + if rad is None: # no radius / MEG only + extra = 'Sphere (no layers): r0=[%s] mm' % center + else: + extra = ('Sphere (%s layer%s): r0=[%s] R=%1.f mm' + % (len(self['layers']) - 1, _pl(self['layers']), + center, rad * 1000.)) + else: + extra = ('BEM (%s layer%s)' % (len(self['surfs']), + _pl(self['surfs']))) + return '' % extra + + def copy(self): + """Return copy of ConductorModel instance.""" + return deepcopy(self) + + @property + def radius(self): + """Sphere radius if an EEG sphere model.""" + if not self['is_sphere']: + raise RuntimeError('radius undefined for BEM') + return None if len(self['layers']) == 0 else self['layers'][-1]['rad'] + + +def _calc_beta(rk, rk_norm, rk1, rk1_norm): + """Compute coefficients for calculating the magic vector omega.""" + rkk1 = rk1[0] - rk[0] + size = np.linalg.norm(rkk1) + rkk1 /= size + num = rk_norm + np.dot(rk, rkk1) + den = rk1_norm + np.dot(rk1, rkk1) + res = np.log(num / den) / size + return res + + +def _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area): + """Compute the linear potential matrix element computations.""" + omega = np.zeros((len(fros), 3)) + + # we replicate a little bit of the _get_solids code here for speed + # (we need some of the intermediate values later) + v1 = tri_rr[np.newaxis, 0, :] - fros + v2 = tri_rr[np.newaxis, 1, :] - fros + v3 = tri_rr[np.newaxis, 2, :] - fros + triples = _fast_cross_nd_sum(v1, v2, v3) + l1 = np.linalg.norm(v1, axis=1) + l2 = np.linalg.norm(v2, axis=1) + l3 = np.linalg.norm(v3, axis=1) + ss = l1 * l2 * l3 + ss += np.einsum('ij,ij,i->i', v1, v2, l3) + ss += np.einsum('ij,ij,i->i', v1, v3, l2) + ss += np.einsum('ij,ij,i->i', v2, v3, l1) + solids = np.arctan2(triples, ss) + + # We *could* subselect the good points from v1, v2, v3, triples, solids, + # l1, l2, and l3, but there are *very* few bad points. So instead we do + # some unnecessary calculations, and then omit them from the final + # solution. These three lines ensure we don't get invalid values in + # _calc_beta. + bad_mask = np.abs(solids) < np.pi / 1e6 + l1[bad_mask] = 1. + l2[bad_mask] = 1. + l3[bad_mask] = 1. + + # Calculate the magic vector vec_omega + beta = [_calc_beta(v1, l1, v2, l2)[:, np.newaxis], + _calc_beta(v2, l2, v3, l3)[:, np.newaxis], + _calc_beta(v3, l3, v1, l1)[:, np.newaxis]] + vec_omega = (beta[2] - beta[0]) * v1 + vec_omega += (beta[0] - beta[1]) * v2 + vec_omega += (beta[1] - beta[2]) * v3 + + area2 = 2.0 * tri_area + n2 = 1.0 / (area2 * area2) + # leave omega = 0 otherwise + # Put it all together... + yys = [v1, v2, v3] + idx = [0, 1, 2, 0, 2] + for k in range(3): + diff = yys[idx[k - 1]] - yys[idx[k + 1]] + zdots = _fast_cross_nd_sum(yys[idx[k + 1]], yys[idx[k - 1]], tri_nn) + omega[:, k] = -n2 * (area2 * zdots * 2. * solids - + triples * (diff * vec_omega).sum(axis=-1)) + # omit the bad points from the solution + omega[bad_mask] = 0. + return omega + + +def _correct_auto_elements(surf, mat): + """Improve auto-element approximation.""" + pi2 = 2.0 * np.pi + tris_flat = surf['tris'].ravel() + misses = pi2 - mat.sum(axis=1) + for j, miss in enumerate(misses): + # How much is missing? + n_memb = len(surf['neighbor_tri'][j]) + assert n_memb > 0 # should be guaranteed by our surface checks + # The node itself receives one half + mat[j, j] = miss / 2.0 + # The rest is divided evenly among the member nodes... + miss /= (4.0 * n_memb) + members = np.where(j == tris_flat)[0] + mods = members % 3 + offsets = np.array([[1, 2], [-1, 1], [-1, -2]]) + tri_1 = members + offsets[mods, 0] + tri_2 = members + offsets[mods, 1] + for t1, t2 in zip(tri_1, tri_2): + mat[j, tris_flat[t1]] += miss + mat[j, tris_flat[t2]] += miss + return + + +def _fwd_bem_lin_pot_coeff(surfs): + """Calculate the coefficients for linear collocation approach.""" + # taken from fwd_bem_linear_collocation.c + nps = [surf['np'] for surf in surfs] + np_tot = sum(nps) + coeff = np.zeros((np_tot, np_tot)) + offsets = np.cumsum(np.concatenate(([0], nps))) + for si_1, surf1 in enumerate(surfs): + rr_ord = np.arange(nps[si_1]) + for si_2, surf2 in enumerate(surfs): + logger.info(" %s (%d) -> %s (%d) ..." % + (_bem_surf_name[surf1['id']], nps[si_1], + _bem_surf_name[surf2['id']], nps[si_2])) + tri_rr = surf2['rr'][surf2['tris']] + tri_nn = surf2['tri_nn'] + tri_area = surf2['tri_area'] + submat = coeff[offsets[si_1]:offsets[si_1 + 1], + offsets[si_2]:offsets[si_2 + 1]] # view + for k in range(surf2['ntri']): + tri = surf2['tris'][k] + if si_1 == si_2: + skip_idx = ((rr_ord == tri[0]) | + (rr_ord == tri[1]) | + (rr_ord == tri[2])) + else: + skip_idx = list() + # No contribution from a triangle that + # this vertex belongs to + # if sidx1 == sidx2 and (tri == j).any(): + # continue + # Otherwise do the hard job + coeffs = _lin_pot_coeff(fros=surf1['rr'], tri_rr=tri_rr[k], + tri_nn=tri_nn[k], tri_area=tri_area[k]) + coeffs[skip_idx] = 0. + submat[:, tri] -= coeffs + if si_1 == si_2: + _correct_auto_elements(surf1, submat) + return coeff + + +def _fwd_bem_multi_solution(solids, gamma, nps): + """Do multi surface solution. + + * Invert I - solids/(2*M_PI) + * Take deflation into account + * The matrix is destroyed after inversion + * This is the general multilayer case + """ + pi2 = 1.0 / (2 * np.pi) + n_tot = np.sum(nps) + assert solids.shape == (n_tot, n_tot) + nsurf = len(nps) + defl = 1.0 / n_tot + # Modify the matrix + offsets = np.cumsum(np.concatenate(([0], nps))) + for si_1 in range(nsurf): + for si_2 in range(nsurf): + mult = pi2 if gamma is None else pi2 * gamma[si_1, si_2] + slice_j = slice(offsets[si_1], offsets[si_1 + 1]) + slice_k = slice(offsets[si_2], offsets[si_2 + 1]) + solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult + solids += np.eye(n_tot) + return np.linalg.inv(solids) + + +def _fwd_bem_homog_solution(solids, nps): + """Make a homogeneous solution.""" + return _fwd_bem_multi_solution(solids, gamma=None, nps=nps) + + +def _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri): + """Modify the solution according to the IP approach.""" + n_last = n_tri[-1] + mult = (1.0 + ip_mult) / ip_mult + + logger.info(' Combining...') + offsets = np.cumsum(np.concatenate(([0], n_tri))) + for si in range(len(n_tri)): + # Pick the correct submatrix (right column) and multiply + sub = solution[offsets[si]:offsets[si + 1], np.sum(n_tri[:-1]):] + # Multiply + sub -= 2 * np.dot(sub, ip_solution) + + # The lower right corner is a special case + sub[-n_last:, -n_last:] += mult * ip_solution + + # Final scaling + logger.info(' Scaling...') + solution *= ip_mult + return + + +def _check_complete_surface(surf, copy=False, incomplete='raise', extra=''): + surf = complete_surface_info(surf, copy=copy, verbose=False) + fewer = np.where([len(t) < 3 for t in surf['neighbor_tri']])[0] + if len(fewer) > 0: + msg = ('Surface {} has topological defects: {:.0f} / {:.0f} vertices ' + 'have fewer than three neighboring triangles [{}]{}' + .format(_bem_surf_name[surf['id']], len(fewer), surf['ntri'], + ', '.join(str(f) for f in fewer), extra)) + _on_missing(on_missing=incomplete, msg=msg, name='on_defects') + return surf + + +def _fwd_bem_linear_collocation_solution(bem): + """Compute the linear collocation potential solution.""" + # first, add surface geometries + for surf in bem['surfs']: + _check_complete_surface(surf) + + logger.info('Computing the linear collocation solution...') + logger.info(' Matrix coefficients...') + coeff = _fwd_bem_lin_pot_coeff(bem['surfs']) + bem['nsol'] = len(coeff) + logger.info(" Inverting the coefficient matrix...") + nps = [surf['np'] for surf in bem['surfs']] + bem['solution'] = _fwd_bem_multi_solution(coeff, bem['gamma'], nps) + if len(bem['surfs']) == 3: + ip_mult = bem['sigma'][1] / bem['sigma'][2] + if ip_mult <= FWD.BEM_IP_APPROACH_LIMIT: + logger.info('IP approach required...') + logger.info(' Matrix coefficients (homog)...') + coeff = _fwd_bem_lin_pot_coeff([bem['surfs'][-1]]) + logger.info(' Inverting the coefficient matrix (homog)...') + ip_solution = _fwd_bem_homog_solution(coeff, + [bem['surfs'][-1]['np']]) + logger.info(' Modify the original solution to incorporate ' + 'IP approach...') + _fwd_bem_ip_modify_solution(bem['solution'], ip_solution, ip_mult, + nps) + bem['bem_method'] = FWD.BEM_LINEAR_COLL + logger.info("Solution ready.") + + +@verbose +def make_bem_solution(surfs, verbose=None): + """Create a BEM solution using the linear collocation approach. + + Parameters + ---------- + surfs : list of dict + The BEM surfaces to use (from :func:`mne.make_bem_model`). + %(verbose)s + + Returns + ------- + bem : instance of ConductorModel + The BEM solution. + + See Also + -------- + make_bem_model + read_bem_surfaces + write_bem_surfaces + read_bem_solution + write_bem_solution + + Notes + ----- + .. versionadded:: 0.10.0 + """ + logger.info('Approximation method : Linear collocation\n') + bem = _ensure_bem_surfaces(surfs) + _add_gamma_multipliers(bem) + if len(bem['surfs']) == 3: + logger.info('Three-layer model surfaces loaded.') + elif len(bem['surfs']) == 1: + logger.info('Homogeneous model surface loaded.') + else: + raise RuntimeError('Only 1- or 3-layer BEM computations supported') + _check_bem_size(bem['surfs']) + _fwd_bem_linear_collocation_solution(bem) + logger.info('BEM geometry computations complete.') + return bem + + +# ############################################################################ +# Make BEM model + +def _ico_downsample(surf, dest_grade): + """Downsample the surface if isomorphic to a subdivided icosahedron.""" + n_tri = len(surf['tris']) + bad_msg = ("Cannot decimate to requested ico grade %d. The provided " + "BEM surface has %d triangles, which cannot be isomorphic with " + "a subdivided icosahedron. Consider manually decimating the " + "surface to a suitable density and then use ico=None in " + "make_bem_model." % (dest_grade, n_tri)) + if n_tri % 20 != 0: + raise RuntimeError(bad_msg) + n_tri = n_tri // 20 + found = int(round(np.log(n_tri) / np.log(4))) + if n_tri != 4 ** found: + raise RuntimeError(bad_msg) + del n_tri + + if dest_grade > found: + raise RuntimeError('For this surface, decimation grade should be %d ' + 'or less, not %s.' % (found, dest_grade)) + + source = _get_ico_surface(found) + dest = _get_ico_surface(dest_grade, patch_stats=True) + del dest['tri_cent'] + del dest['tri_nn'] + del dest['neighbor_tri'] + del dest['tri_area'] + if not np.array_equal(source['tris'], surf['tris']): + raise RuntimeError('The source surface has a matching number of ' + 'triangles but ordering is wrong') + logger.info('Going from %dth to %dth subdivision of an icosahedron ' + '(n_tri: %d -> %d)' % (found, dest_grade, len(surf['tris']), + len(dest['tris']))) + # Find the mapping + dest['rr'] = surf['rr'][_get_ico_map(source, dest)] + return dest + + +def _get_ico_map(fro, to): + """Get a mapping between ico surfaces.""" + nearest, dists = _compute_nearest(fro['rr'], to['rr'], return_dists=True) + n_bads = (dists > 5e-3).sum() + if n_bads > 0: + raise RuntimeError('No matching vertex for %d destination vertices' + % (n_bads)) + return nearest + + +def _order_surfaces(surfs): + """Reorder the surfaces.""" + if len(surfs) != 3: + return surfs + # we have three surfaces + surf_order = [FIFF.FIFFV_BEM_SURF_ID_HEAD, + FIFF.FIFFV_BEM_SURF_ID_SKULL, + FIFF.FIFFV_BEM_SURF_ID_BRAIN] + ids = np.array([surf['id'] for surf in surfs]) + if set(ids) != set(surf_order): + raise RuntimeError('bad surface ids: %s' % ids) + order = [np.where(ids == id_)[0][0] for id_ in surf_order] + surfs = [surfs[idx] for idx in order] + return surfs + + +def _assert_complete_surface(surf, incomplete='raise'): + """Check the sum of solid angles as seen from inside.""" + # from surface_checks.c + # Center of mass.... + cm = surf['rr'].mean(axis=0) + logger.info('%s CM is %6.2f %6.2f %6.2f mm' % + (_bem_surf_name[surf['id']], + 1000 * cm[0], 1000 * cm[1], 1000 * cm[2])) + tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0] + prop = tot_angle / (2 * np.pi) + if np.abs(prop - 1.0) > 1e-5: + msg = (f'Surface {_bem_surf_name[surf["id"]]} is not complete (sum of ' + f'solid angles yielded {prop}, should be 1.)') + _on_missing( + incomplete, msg, name='incomplete', error_klass=RuntimeError) + + +def _assert_inside(fro, to): + """Check one set of points is inside a surface.""" + # this is "is_inside" in surface_checks.c + fro_name = _bem_surf_name[fro["id"]] + to_name = _bem_surf_name[to["id"]] + logger.info( + f'Checking that surface {fro_name} is inside surface {to_name} ...') + tot_angle = _get_solids(to['rr'][to['tris']], fro['rr']) + if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any(): + raise RuntimeError( + f'Surface {fro_name} is not completely inside surface {to_name}') + + +def _check_surfaces(surfs, incomplete='raise'): + """Check that the surfaces are complete and non-intersecting.""" + for surf in surfs: + _assert_complete_surface(surf, incomplete=incomplete) + # Then check the topology + for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]): + _assert_inside(surf_2, surf_1) + + +def _check_surface_size(surf): + """Check that the coordinate limits are reasonable.""" + sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0) + if (sizes < 0.05).any(): + raise RuntimeError( + f'Dimensions of the surface {_bem_surf_name[surf["id"]]} seem too ' + f'small ({1000 * sizes.min():9.5f}). Maybe the unit of measure' + ' is meters instead of mm') + + +def _check_thicknesses(surfs): + """Compute how close we are.""" + for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]): + min_dist = _compute_nearest(surf_1['rr'], surf_2['rr'], + return_dists=True)[1] + min_dist = min_dist.min() + fro = _bem_surf_name[surf_1['id']] + to = _bem_surf_name[surf_2['id']] + logger.info(f'Checking distance between {fro} and {to} surfaces...') + logger.info(f'Minimum distance between the {fro} and {to} surfaces is ' + f'approximately {1000 * min_dist:6.1f} mm') + + +def _surfaces_to_bem(surfs, ids, sigmas, ico=None, rescale=True, + incomplete='raise', extra=''): + """Convert surfaces to a BEM.""" + # equivalent of mne_surf2bem + # surfs can be strings (filenames) or surface dicts + if len(surfs) not in (1, 3) or not (len(surfs) == len(ids) == + len(sigmas)): + raise ValueError('surfs, ids, and sigmas must all have the same ' + 'number of elements (1 or 3)') + for si, surf in enumerate(surfs): + if isinstance(surf, str): + surfs[si] = read_surface(surf, return_dict=True)[-1] + # Downsampling if the surface is isomorphic with a subdivided icosahedron + if ico is not None: + for si, surf in enumerate(surfs): + surfs[si] = _ico_downsample(surf, ico) + for surf, id_ in zip(surfs, ids): + # Do topology checks (but don't save data) to fail early + surf['id'] = id_ + _check_complete_surface(surf, copy=True, incomplete=incomplete, + extra=extra) + surf['coord_frame'] = surf.get('coord_frame', FIFF.FIFFV_COORD_MRI) + surf.update(np=len(surf['rr']), ntri=len(surf['tris'])) + if rescale: + surf['rr'] /= 1000. # convert to meters + + # Shifting surfaces is not implemented here... + + # Order the surfaces for the benefit of the topology checks + for surf, sigma in zip(surfs, sigmas): + surf['sigma'] = sigma + surfs = _order_surfaces(surfs) + + # Check topology as best we can + _check_surfaces(surfs, incomplete=incomplete) + for surf in surfs: + _check_surface_size(surf) + _check_thicknesses(surfs) + logger.info('Surfaces passed the basic topology checks.') + return surfs + + +@verbose +def make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3), + subjects_dir=None, verbose=None): + """Create a BEM model for a subject. + + .. note:: To get a single layer bem corresponding to the --homog flag in + the command line tool set the ``conductivity`` parameter + to a list/tuple with a single value (e.g. [0.3]). + + Parameters + ---------- + subject : str + The subject. + ico : int | None + The surface ico downsampling to use, e.g. 5=20484, 4=5120, 3=1280. + If None, no subsampling is applied. + conductivity : array of int, shape (3,) or (1,) + The conductivities to use for each shell. Should be a single element + for a one-layer model, or three elements for a three-layer model. + Defaults to ``[0.3, 0.006, 0.3]``. The MNE-C default for a + single-layer model would be ``[0.3]``. + %(subjects_dir)s + %(verbose)s + + Returns + ------- + surfaces : list of dict + The BEM surfaces. Use `make_bem_solution` to turn these into a + `~mne.bem.ConductorModel` suitable for forward calculation. + + See Also + -------- + make_bem_solution + make_sphere_model + read_bem_surfaces + write_bem_surfaces + + Notes + ----- + .. versionadded:: 0.10.0 + """ + conductivity = np.array(conductivity, float) + if conductivity.ndim != 1 or conductivity.size not in (1, 3): + raise ValueError('conductivity must be 1D array-like with 1 or 3 ' + 'elements') + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subject_dir = op.join(subjects_dir, subject) + bem_dir = op.join(subject_dir, 'bem') + inner_skull = op.join(bem_dir, 'inner_skull.surf') + outer_skull = op.join(bem_dir, 'outer_skull.surf') + outer_skin = op.join(bem_dir, 'outer_skin.surf') + surfaces = [inner_skull, outer_skull, outer_skin] + ids = [FIFF.FIFFV_BEM_SURF_ID_BRAIN, + FIFF.FIFFV_BEM_SURF_ID_SKULL, + FIFF.FIFFV_BEM_SURF_ID_HEAD] + logger.info('Creating the BEM geometry...') + if len(conductivity) == 1: + surfaces = surfaces[:1] + ids = ids[:1] + surfaces = _surfaces_to_bem(surfaces, ids, conductivity, ico) + _check_bem_size(surfaces) + logger.info('Complete.\n') + return surfaces + + +# ############################################################################ +# Compute EEG sphere model + +def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms): + """Get the model depended weighting factor for n.""" + nlayer = len(m['layers']) + if nlayer in (0, 1): + return 1. + + # Initialize the arrays + c1 = np.zeros(nlayer - 1) + c2 = np.zeros(nlayer - 1) + cr = np.zeros(nlayer - 1) + cr_mult = np.zeros(nlayer - 1) + for k in range(nlayer - 1): + c1[k] = m['layers'][k]['sigma'] / m['layers'][k + 1]['sigma'] + c2[k] = c1[k] - 1.0 + cr_mult[k] = m['layers'][k]['rel_rad'] + cr[k] = cr_mult[k] + cr_mult[k] *= cr_mult[k] + + coeffs = np.zeros(n_terms - 1) + for n in range(1, n_terms): + # Increment the radius coefficients + for k in range(nlayer - 1): + cr[k] *= cr_mult[k] + + # Multiply the matrices + M = np.eye(2) + n1 = n + 1.0 + for k in range(nlayer - 2, -1, -1): + M = np.dot([[n + n1 * c1[k], n1 * c2[k] / cr[k]], + [n * c2[k] * cr[k], n1 + n * c1[k]]], M) + num = n * (2.0 * n + 1.0) ** (nlayer - 1) + coeffs[n - 1] = num / (n * M[1, 1] + n1 * M[1, 0]) + return coeffs + + +def _compose_linear_fitting_data(mu, u): + """Get the linear fitting data.""" + from scipy import linalg + k1 = np.arange(1, u['nterms']) + mu1ns = mu[0] ** k1 + # data to be fitted + y = u['w'][:-1] * (u['fn'][1:] - mu1ns * u['fn'][0]) + # model matrix + M = u['w'][:-1, np.newaxis] * (mu[1:] ** k1[:, np.newaxis] - + mu1ns[:, np.newaxis]) + uu, sing, vv = linalg.svd(M, full_matrices=False) + ncomp = u['nfit'] - 1 + uu, sing, vv = uu[:, :ncomp], sing[:ncomp], vv[:ncomp] + return y, uu, sing, vv + + +def _compute_linear_parameters(mu, u): + """Compute the best-fitting linear parameters.""" + y, uu, sing, vv = _compose_linear_fitting_data(mu, u) + + # Compute the residuals + vec = np.dot(y, uu) + resi = y - np.dot(uu, vec) + vec /= sing + + lambda_ = np.zeros(u['nfit']) + lambda_[1:] = np.dot(vec, vv) + lambda_[0] = u['fn'][0] - np.sum(lambda_[1:]) + rv = np.dot(resi, resi) / np.dot(y, y) + return rv, lambda_ + + +def _one_step(mu, u): + """Evaluate the residual sum of squares fit for one set of mu values.""" + if np.abs(mu).max() > 1.0: + return 1.0 + + # Compose the data for the linear fitting, compute SVD, then residuals + y, uu, sing, vv = _compose_linear_fitting_data(mu, u) + resi = y - np.dot(uu, np.dot(y, uu)) + return np.dot(resi, resi) + + +def _fwd_eeg_fit_berg_scherg(m, nterms, nfit): + """Fit the Berg-Scherg equivalent spherical model dipole parameters.""" + from scipy.optimize import fmin_cobyla + assert nfit >= 2 + u = dict(nfit=nfit, nterms=nterms) + + # (1) Calculate the coefficients of the true expansion + u['fn'] = _fwd_eeg_get_multi_sphere_model_coeffs(m, nterms + 1) + + # (2) Calculate the weighting + f = (min([layer['rad'] for layer in m['layers']]) / + max([layer['rad'] for layer in m['layers']])) + + # correct weighting + k = np.arange(1, nterms + 1) + u['w'] = np.sqrt((2.0 * k + 1) * (3.0 * k + 1.0) / + k) * np.power(f, (k - 1.0)) + u['w'][-1] = 0 + + # Do the nonlinear minimization, constraining mu to the interval [-1, +1] + mu_0 = np.zeros(3) + fun = partial(_one_step, u=u) + max_ = 1. - 2e-4 # adjust for fmin_cobyla "catol" that not all scipy have + cons = list() + for ii in range(nfit): + def mycon(x, ii=ii): + return max_ - np.abs(x[ii]) + cons.append(mycon) + mu = fmin_cobyla(fun, mu_0, cons, rhobeg=0.5, rhoend=1e-5, disp=0) + + # (6) Do the final step: calculation of the linear parameters + rv, lambda_ = _compute_linear_parameters(mu, u) + order = np.argsort(mu)[::-1] + mu, lambda_ = mu[order], lambda_[order] # sort: largest mu first + + m['mu'] = mu + # This division takes into account the actual conductivities + m['lambda'] = lambda_ / m['layers'][-1]['sigma'] + m['nfit'] = nfit + return rv + + +@verbose +def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None, + relative_radii=(0.90, 0.92, 0.97, 1.0), + sigmas=(0.33, 1.0, 0.004, 0.33), verbose=None): + """Create a spherical model for forward solution calculation. + + Parameters + ---------- + r0 : array-like | str + Head center to use (in head coordinates). If 'auto', the head + center will be calculated from the digitization points in info. + head_radius : float | str | None + If float, compute spherical shells for EEG using the given radius. + If 'auto', estimate an appropriate radius from the dig points in Info, + If None, exclude shells (single layer sphere model). + %(info)s Only needed if ``r0`` or ``head_radius`` are ``'auto'``. + relative_radii : array-like + Relative radii for the spherical shells. + sigmas : array-like + Sigma values for the spherical shells. + %(verbose)s + + Returns + ------- + sphere : instance of ConductorModel + The resulting spherical conductor model. + + See Also + -------- + make_bem_model + make_bem_solution + + Notes + ----- + The default model has:: + + relative_radii = (0.90, 0.92, 0.97, 1.0) + sigmas = (0.33, 1.0, 0.004, 0.33) + + These correspond to compartments (with relative radii in ``m`` and + conductivities σ in ``S/m``) for the brain, CSF, skull, and scalp, + respectively. + + .. versionadded:: 0.9.0 + """ + for name in ('r0', 'head_radius'): + param = locals()[name] + if isinstance(param, str): + if param != 'auto': + raise ValueError('%s, if str, must be "auto" not "%s"' + % (name, param)) + relative_radii = np.array(relative_radii, float).ravel() + sigmas = np.array(sigmas, float).ravel() + if len(relative_radii) != len(sigmas): + raise ValueError('relative_radii length (%s) must match that of ' + 'sigmas (%s)' % (len(relative_radii), + len(sigmas))) + if len(sigmas) <= 1 and head_radius is not None: + raise ValueError('at least 2 sigmas must be supplied if ' + 'head_radius is not None, got %s' % (len(sigmas),)) + if (isinstance(r0, str) and r0 == 'auto') or \ + (isinstance(head_radius, str) and head_radius == 'auto'): + if info is None: + raise ValueError('Info must not be None for auto mode') + head_radius_fit, r0_fit = fit_sphere_to_headshape(info, units='m')[:2] + if isinstance(r0, str): + r0 = r0_fit + if isinstance(head_radius, str): + head_radius = head_radius_fit + sphere = ConductorModel(is_sphere=True, r0=np.array(r0), + coord_frame=FIFF.FIFFV_COORD_HEAD) + sphere['layers'] = list() + if head_radius is not None: + # Eventually these could be configurable... + relative_radii = np.array(relative_radii, float) + sigmas = np.array(sigmas, float) + order = np.argsort(relative_radii) + relative_radii = relative_radii[order] + sigmas = sigmas[order] + for rel_rad, sig in zip(relative_radii, sigmas): + # sort layers by (relative) radius, and scale radii + layer = dict(rad=rel_rad, sigma=sig) + layer['rel_rad'] = layer['rad'] = rel_rad + sphere['layers'].append(layer) + + # scale the radii + R = sphere['layers'][-1]['rad'] + rR = sphere['layers'][-1]['rel_rad'] + for layer in sphere['layers']: + layer['rad'] /= R + layer['rel_rad'] /= rR + + # + # Setup the EEG sphere model calculations + # + + # Scale the relative radii + for k in range(len(relative_radii)): + sphere['layers'][k]['rad'] = (head_radius * + sphere['layers'][k]['rel_rad']) + rv = _fwd_eeg_fit_berg_scherg(sphere, 200, 3) + logger.info('\nEquiv. model fitting -> RV = %g %%' % (100 * rv)) + for k in range(3): + logger.info('mu%d = %g lambda%d = %g' + % (k + 1, sphere['mu'][k], k + 1, + sphere['layers'][-1]['sigma'] * + sphere['lambda'][k])) + logger.info('Set up EEG sphere model with scalp radius %7.1f mm\n' + % (1000 * head_radius,)) + return sphere + + +# ############################################################################# +# Sphere fitting + +@verbose +def fit_sphere_to_headshape(info, dig_kinds='auto', units='m', verbose=None): + """Fit a sphere to the headshape points to determine head center. + + Parameters + ---------- + %(info_not_none)s + %(dig_kinds)s + units : str + Can be "m" (default) or "mm". + + .. versionadded:: 0.12 + %(verbose)s + + Returns + ------- + radius : float + Sphere radius. + origin_head: ndarray, shape (3,) + Head center in head coordinates. + origin_device: ndarray, shape (3,) + Head center in device coordinates. + + Notes + ----- + This function excludes any points that are low and frontal + (``z < 0 and y > 0``) to improve the fit. + """ + if not isinstance(units, str) or units not in ('m', 'mm'): + raise ValueError('units must be a "m" or "mm"') + radius, origin_head, origin_device = _fit_sphere_to_headshape( + info, dig_kinds) + if units == 'mm': + radius *= 1e3 + origin_head *= 1e3 + origin_device *= 1e3 + return radius, origin_head, origin_device + + +@verbose +def get_fitting_dig(info, dig_kinds='auto', exclude_frontal=True, + verbose=None): + """Get digitization points suitable for sphere fitting. + + Parameters + ---------- + %(info_not_none)s + %(dig_kinds)s + %(exclude_frontal)s + Default is True. + + .. versionadded:: 0.19 + %(verbose)s + + Returns + ------- + dig : array, shape (n_pts, 3) + The digitization points (in head coordinates) to use for fitting. + + Notes + ----- + This will exclude digitization locations that have ``z < 0 and y > 0``, + i.e. points on the nose and below the nose on the face. + + .. versionadded:: 0.14 + """ + _validate_type(info, "info") + if info['dig'] is None: + raise RuntimeError('Cannot fit headshape without digitization ' + ', info["dig"] is None') + if isinstance(dig_kinds, str): + if dig_kinds == 'auto': + # try "extra" first + try: + return get_fitting_dig(info, 'extra') + except ValueError: + pass + return get_fitting_dig(info, ('extra', 'eeg')) + else: + dig_kinds = (dig_kinds,) + # convert string args to ints (first make dig_kinds mutable in case tuple) + dig_kinds = list(dig_kinds) + for di, d in enumerate(dig_kinds): + dig_kinds[di] = _dig_kind_dict.get(d, d) + if dig_kinds[di] not in _dig_kind_ints: + raise ValueError('dig_kinds[#%d] (%s) must be one of %s' + % (di, d, sorted(list(_dig_kind_dict.keys())))) + + # get head digization points of the specified kind(s) + hsp = [p['r'] for p in info['dig'] if p['kind'] in dig_kinds] + if any(p['coord_frame'] != FIFF.FIFFV_COORD_HEAD for p in info['dig']): + raise RuntimeError('Digitization points not in head coordinates, ' + 'contact mne-python developers') + + # exclude some frontal points (nose etc.) + if exclude_frontal: + hsp = [p for p in hsp if not (p[2] < -1e-6 and p[1] > 1e-6)] + hsp = np.array(hsp) + + if len(hsp) <= 10: + kinds_str = ', '.join(['"%s"' % _dig_kind_rev[d] + for d in sorted(dig_kinds)]) + msg = ('Only %s head digitization points of the specified kind%s (%s,)' + % (len(hsp), _pl(dig_kinds), kinds_str)) + if len(hsp) < 4: + raise ValueError(msg + ', at least 4 required') + else: + warn(msg + ', fitting may be inaccurate') + return hsp + + +@verbose +def _fit_sphere_to_headshape(info, dig_kinds, verbose=None): + """Fit a sphere to the given head shape.""" + hsp = get_fitting_dig(info, dig_kinds) + radius, origin_head = _fit_sphere(np.array(hsp), disp=False) + # compute origin in device coordinates + dev_head_t = info['dev_head_t'] + if dev_head_t is None: + dev_head_t = Transform('meg', 'head') + head_to_dev = _ensure_trans(dev_head_t, 'head', 'meg') + origin_device = apply_trans(head_to_dev, origin_head) + logger.info('Fitted sphere radius:'.ljust(30) + '%0.1f mm' + % (radius * 1e3,)) + # 99th percentile on Wikipedia for Giabella to back of head is 21.7cm, + # i.e. 108mm "radius", so let's go with 110mm + # en.wikipedia.org/wiki/Human_head#/media/File:HeadAnthropometry.JPG + if radius > 0.110: + warn('Estimated head size (%0.1f mm) exceeded 99th ' + 'percentile for adult head size' % (1e3 * radius,)) + # > 2 cm away from head center in X or Y is strange + if np.linalg.norm(origin_head[:2]) > 0.02: + warn('(X, Y) fit (%0.1f, %0.1f) more than 20 mm from ' + 'head frame origin' % tuple(1e3 * origin_head[:2])) + logger.info('Origin head coordinates:'.ljust(30) + + '%0.1f %0.1f %0.1f mm' % tuple(1e3 * origin_head)) + logger.info('Origin device coordinates:'.ljust(30) + + '%0.1f %0.1f %0.1f mm' % tuple(1e3 * origin_device)) + return radius, origin_head, origin_device + + +def _fit_sphere(points, disp='auto'): + """Fit a sphere to an arbitrary set of points.""" + from scipy.optimize import fmin_cobyla + if isinstance(disp, str) and disp == 'auto': + disp = True if logger.level <= 20 else False + # initial guess for center and radius + radii = (np.max(points, axis=1) - np.min(points, axis=1)) / 2. + radius_init = radii.mean() + center_init = np.median(points, axis=0) + + # optimization + x0 = np.concatenate([center_init, [radius_init]]) + + def cost_fun(center_rad): + d = np.linalg.norm(points - center_rad[:3], axis=1) - center_rad[3] + d *= d + return d.sum() + + def constraint(center_rad): + return center_rad[3] # radius must be >= 0 + + x_opt = fmin_cobyla(cost_fun, x0, constraint, rhobeg=radius_init, + rhoend=radius_init * 1e-6, disp=disp) + + origin, radius = x_opt[:3], x_opt[3] + return radius, origin + + +def _check_origin(origin, info, coord_frame='head', disp=False): + """Check or auto-determine the origin.""" + if isinstance(origin, str): + if origin != 'auto': + raise ValueError('origin must be a numerical array, or "auto", ' + 'not %s' % (origin,)) + if coord_frame == 'head': + R, origin = fit_sphere_to_headshape(info, verbose=False, + units='m')[:2] + logger.info(' Automatic origin fit: head of radius %0.1f mm' + % (R * 1000.,)) + del R + else: + origin = (0., 0., 0.) + origin = np.array(origin, float) + if origin.shape != (3,): + raise ValueError('origin must be a 3-element array') + if disp: + origin_str = ', '.join(['%0.1f' % (o * 1000) for o in origin]) + msg = (' Using origin %s mm in the %s frame' + % (origin_str, coord_frame)) + if coord_frame == 'meg' and info['dev_head_t'] is not None: + o_dev = apply_trans(info['dev_head_t'], origin) + origin_str = ', '.join('%0.1f' % (o * 1000,) for o in o_dev) + msg += ' (%s mm in the head frame)' % (origin_str,) + logger.info(msg) + return origin + + +# ############################################################################ +# Create BEM surfaces + +@verbose +def make_watershed_bem(subject, subjects_dir=None, overwrite=False, + volume='T1', atlas=False, gcaatlas=False, preflood=None, + show=False, copy=True, T1=None, brainmask='ws.mgz', + verbose=None): + """Create BEM surfaces using the FreeSurfer watershed algorithm. + + Parameters + ---------- + subject : str + Subject name. + %(subjects_dir)s + %(overwrite)s + volume : str + Defaults to T1. + atlas : bool + Specify the --atlas option for mri_watershed. + gcaatlas : bool + Specify the --brain_atlas option for mri_watershed. + preflood : int + Change the preflood height. + show : bool + Show surfaces to visually inspect all three BEM surfaces (recommended). + + .. versionadded:: 0.12 + + copy : bool + If True (default), use copies instead of symlinks for surfaces + (if they do not already exist). + + .. versionadded:: 0.18 + .. versionchanged:: 1.1 Use copies instead of symlinks. + T1 : bool | None + If True, pass the ``-T1`` flag. + By default (None), this takes the same value as ``gcaatlas``. + + .. versionadded:: 0.19 + brainmask : str + The filename for the brainmask output file relative to the + ``$SUBJECTS_DIR/$SUBJECT/bem/watershed/`` directory. + Can be for example ``"../../mri/brainmask.mgz"`` to overwrite + the brainmask obtained via ``recon-all -autorecon1``. + + .. versionadded:: 0.19 + %(verbose)s + + See Also + -------- + mne.viz.plot_bem + + Notes + ----- + If your BEM meshes do not look correct when viewed in + :func:`mne.viz.plot_alignment` or :func:`mne.viz.plot_bem`, consider + potential solutions from the :ref:`FAQ `. + + .. versionadded:: 0.10 + """ + from .viz.misc import plot_bem + env, mri_dir, bem_dir = _prepare_env(subject, subjects_dir) + tempdir = _TempDir() # fsl and Freesurfer create some random junk in CWD + run_subprocess_env = partial(run_subprocess, env=env, + cwd=tempdir) + + subjects_dir = env['SUBJECTS_DIR'] # Set by _prepare_env() above. + subject_dir = op.join(subjects_dir, subject) + ws_dir = op.join(bem_dir, 'watershed') + T1_dir = op.join(mri_dir, volume) + T1_mgz = T1_dir + if not T1_dir.endswith('.mgz'): + T1_mgz += '.mgz' + + if not op.isdir(bem_dir): + os.makedirs(bem_dir) + _check_fname(T1_mgz, overwrite='read', must_exist=True, name='MRI data') + if op.isdir(ws_dir): + if not overwrite: + raise RuntimeError('%s already exists. Use the --overwrite option' + ' to recreate it.' % ws_dir) + else: + shutil.rmtree(ws_dir) + + # put together the command + cmd = ['mri_watershed'] + if preflood: + cmd += ["-h", "%s" % int(preflood)] + + if T1 is None: + T1 = gcaatlas + if T1: + cmd += ['-T1'] + if gcaatlas: + fname = op.join(env['FREESURFER_HOME'], 'average', + 'RB_all_withskull_*.gca') + fname = sorted(glob.glob(fname))[::-1][0] + logger.info('Using GCA atlas: %s' % (fname,)) + cmd += ['-atlas', '-brain_atlas', fname, + subject_dir + '/mri/transforms/talairach_with_skull.lta'] + elif atlas: + cmd += ['-atlas'] + if op.exists(T1_mgz): + cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_mgz, + op.join(ws_dir, brainmask)] + else: + cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_dir, + op.join(ws_dir, brainmask)] + # report and run + logger.info('\nRunning mri_watershed for BEM segmentation with the ' + 'following parameters:\n\nResults dir = %s\nCommand = %s\n' + % (ws_dir, ' '.join(cmd))) + os.makedirs(op.join(ws_dir)) + run_subprocess_env(cmd) + del tempdir # clean up directory + if op.isfile(T1_mgz): + new_info = _extract_volume_info(T1_mgz) if has_nibabel() else dict() + if not new_info: + warn('nibabel is not available or the volumn info is invalid.' + 'Volume info not updated in the written surface.') + surfs = ['brain', 'inner_skull', 'outer_skull', 'outer_skin'] + for s in surfs: + surf_ws_out = op.join(ws_dir, '%s_%s_surface' % (subject, s)) + + rr, tris, volume_info = read_surface(surf_ws_out, + read_metadata=True) + # replace volume info, 'head' stays + volume_info.update(new_info) + write_surface(surf_ws_out, rr, tris, volume_info=volume_info, + overwrite=True) + + # Create symbolic links + surf_out = op.join(bem_dir, '%s.surf' % s) + if not overwrite and op.exists(surf_out): + skip_symlink = True + else: + if op.exists(surf_out): + os.remove(surf_out) + _symlink(surf_ws_out, surf_out, copy) + skip_symlink = False + + if skip_symlink: + logger.info("Unable to create all symbolic links to .surf files " + "in bem folder. Use --overwrite option to recreate " + "them.") + dest = op.join(bem_dir, 'watershed') + else: + logger.info("Symbolic links to .surf files created in bem folder") + dest = bem_dir + + logger.info("\nThank you for waiting.\nThe BEM triangulations for this " + "subject are now available at:\n%s." % dest) + + # Write a head file for coregistration + fname_head = op.join(bem_dir, subject + '-head.fif') + if op.isfile(fname_head): + os.remove(fname_head) + + surf = _surfaces_to_bem([op.join(ws_dir, subject + '_outer_skin_surface')], + [FIFF.FIFFV_BEM_SURF_ID_HEAD], sigmas=[1]) + write_bem_surfaces(fname_head, surf) + + # Show computed BEM surfaces + if show: + plot_bem(subject=subject, subjects_dir=subjects_dir, + orientation='coronal', slices=None, show=True) + + logger.info('Created %s\n\nComplete.' % (fname_head,)) + + +def _extract_volume_info(mgz): + """Extract volume info from a mgz file.""" + import nibabel + header = nibabel.load(mgz).header + version = header['version'] + vol_info = dict() + if version == 1: + version = '%s # volume info valid' % version + vol_info['valid'] = version + vol_info['filename'] = mgz + vol_info['volume'] = header['dims'][:3] + vol_info['voxelsize'] = header['delta'] + vol_info['xras'], vol_info['yras'], vol_info['zras'] = header['Mdc'] + vol_info['cras'] = header['Pxyz_c'] + + return vol_info + + +# ############################################################################ +# Read + +@verbose +def read_bem_surfaces(fname, patch_stats=False, s_id=None, on_defects='raise', + verbose=None): + """Read the BEM surfaces from a FIF file. + + Parameters + ---------- + fname : str + The name of the file containing the surfaces. + patch_stats : bool, optional (default False) + Calculate and add cortical patch statistics to the surfaces. + s_id : int | None + If int, only read and return the surface with the given s_id. + An error will be raised if it doesn't exist. If None, all + surfaces are read and returned. + %(on_defects)s + + .. versionadded:: 0.23 + %(verbose)s + + Returns + ------- + surf: list | dict + A list of dictionaries that each contain a surface. If s_id + is not None, only the requested surface will be returned. + + See Also + -------- + write_bem_surfaces, write_bem_solution, make_bem_model + """ + # Open the file, create directory + _validate_type(s_id, ('int-like', None), 's_id') + fname = _check_fname(fname, 'read', True, 'fname') + if fname.endswith('.h5'): + surf = _read_bem_surfaces_h5(fname, s_id) + else: + surf = _read_bem_surfaces_fif(fname, s_id) + if s_id is not None and len(surf) != 1: + raise ValueError('surface with id %d not found' % s_id) + for this in surf: + if patch_stats or this['nn'] is None: + _check_complete_surface(this, incomplete=on_defects) + return surf[0] if s_id is not None else surf + + +def _read_bem_surfaces_h5(fname, s_id): + read_hdf5, _ = _import_h5io_funcs() + bem = read_hdf5(fname) + try: + [s['id'] for s in bem['surfs']] + except Exception: # not our format + raise ValueError('BEM data not found') + surf = bem['surfs'] + if s_id is not None: + surf = [s for s in surf if s['id'] == s_id] + return surf + + +def _read_bem_surfaces_fif(fname, s_id): + # Default coordinate frame + coord_frame = FIFF.FIFFV_COORD_MRI + f, tree, _ = fiff_open(fname) + with f as fid: + # Find BEM + bem = dir_tree_find(tree, FIFF.FIFFB_BEM) + if bem is None or len(bem) == 0: + raise ValueError('BEM data not found') + + bem = bem[0] + # Locate all surfaces + bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF) + if bemsurf is None: + raise ValueError('BEM surface data not found') + + logger.info(' %d BEM surfaces found' % len(bemsurf)) + # Coordinate frame possibly at the top level + tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME) + if tag is not None: + coord_frame = tag.data + # Read all surfaces + if s_id is not None: + surf = [_read_bem_surface(fid, bsurf, coord_frame, s_id) + for bsurf in bemsurf] + surf = [s for s in surf if s is not None] + else: + surf = list() + for bsurf in bemsurf: + logger.info(' Reading a surface...') + this = _read_bem_surface(fid, bsurf, coord_frame) + surf.append(this) + logger.info('[done]') + logger.info(' %d BEM surfaces read' % len(surf)) + return surf + + +def _read_bem_surface(fid, this, def_coord_frame, s_id=None): + """Read one bem surface.""" + # fid should be open as a context manager here + res = dict() + # Read all the interesting stuff + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID) + + if tag is None: + res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN + else: + res['id'] = int(tag.data) + + if s_id is not None and res['id'] != s_id: + return None + + tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA) + res['sigma'] = 1.0 if tag is None else float(tag.data) + + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE) + if tag is None: + raise ValueError('Number of vertices not found') + + res['np'] = int(tag.data) + + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI) + if tag is None: + raise ValueError('Number of triangles not found') + res['ntri'] = int(tag.data) + + tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME) + if tag is None: + tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME) + if tag is None: + res['coord_frame'] = def_coord_frame + else: + res['coord_frame'] = tag.data + else: + res['coord_frame'] = tag.data + + # Vertices, normals, and triangles + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES) + if tag is None: + raise ValueError('Vertex data not found') + + res['rr'] = tag.data.astype(np.float64) + if res['rr'].shape[0] != res['np']: + raise ValueError('Vertex information is incorrect') + + tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS) + if tag is None: + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS) + if tag is None: + res['nn'] = None + else: + res['nn'] = tag.data.astype(np.float64) + if res['nn'].shape[0] != res['np']: + raise ValueError('Vertex normal information is incorrect') + + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES) + if tag is None: + raise ValueError('Triangulation not found') + + res['tris'] = tag.data - 1 # index start at 0 in Python + if res['tris'].shape[0] != res['ntri']: + raise ValueError('Triangulation information is incorrect') + + return res + + +@verbose +def read_bem_solution(fname, verbose=None): + """Read the BEM solution from a file. + + Parameters + ---------- + fname : str + The file containing the BEM solution. + %(verbose)s + + Returns + ------- + bem : instance of ConductorModel + The BEM solution. + + See Also + -------- + read_bem_surfaces + write_bem_surfaces + make_bem_solution + write_bem_solution + """ + fname = _check_fname(fname, 'read', True, 'fname') + # mirrors fwd_bem_load_surfaces from fwd_bem_model.c + if fname.endswith('.h5'): + read_hdf5, _ = _import_h5io_funcs() + logger.info('Loading surfaces and solution...') + bem = read_hdf5(fname) + else: + bem = _read_bem_solution_fif(fname) + + if len(bem['surfs']) == 3: + logger.info('Three-layer model surfaces loaded.') + needed = np.array([FIFF.FIFFV_BEM_SURF_ID_HEAD, + FIFF.FIFFV_BEM_SURF_ID_SKULL, + FIFF.FIFFV_BEM_SURF_ID_BRAIN]) + if not all(x['id'] in needed for x in bem['surfs']): + raise RuntimeError('Could not find necessary BEM surfaces') + # reorder surfaces as necessary (shouldn't need to?) + reorder = [None] * 3 + for x in bem['surfs']: + reorder[np.where(x['id'] == needed)[0][0]] = x + bem['surfs'] = reorder + elif len(bem['surfs']) == 1: + if not bem['surfs'][0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN: + raise RuntimeError('BEM Surfaces not found') + logger.info('Homogeneous model surface loaded.') + + assert set(bem.keys()) == set(('surfs', 'solution', 'bem_method')) + bem = ConductorModel(bem) + bem['is_sphere'] = False + # sanity checks and conversions + _check_option('BEM approximation method', bem['bem_method'], + (FIFF.FIFFV_BEM_APPROX_LINEAR, FIFF.FIFFV_BEM_APPROX_CONST)) + dim = 0 + for surf in bem['surfs']: + if bem['bem_method'] == FIFF.FIFFV_BEM_APPROX_LINEAR: + dim += surf['np'] + else: # method == FIFF.FIFFV_BEM_APPROX_CONST + dim += surf['ntri'] + dims = bem['solution'].shape + if len(dims) != 2: + raise RuntimeError('Expected a two-dimensional solution matrix ' + 'instead of a %d dimensional one' % dims[0]) + if dims[0] != dim or dims[1] != dim: + raise RuntimeError('Expected a %d x %d solution matrix instead of ' + 'a %d x %d one' % (dim, dim, dims[1], dims[0])) + bem['nsol'] = bem['solution'].shape[0] + # Gamma factors and multipliers + _add_gamma_multipliers(bem) + kind = { + FIFF.FIFFV_BEM_APPROX_CONST: 'constant collocation', + FIFF.FIFFV_BEM_APPROX_LINEAR: 'linear_collocation', + }[bem['bem_method']] + logger.info('Loaded %s BEM solution from %s', kind, fname) + return bem + + +def _read_bem_solution_fif(fname): + logger.info('Loading surfaces...') + surfs = read_bem_surfaces(fname, patch_stats=True, verbose=False) + + # convert from surfaces to solution + logger.info('\nLoading the solution matrix...\n') + f, tree, _ = fiff_open(fname) + with f as fid: + # Find the BEM data + nodes = dir_tree_find(tree, FIFF.FIFFB_BEM) + if len(nodes) == 0: + raise RuntimeError('No BEM data in %s' % fname) + bem_node = nodes[0] + + # Approximation method + tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX) + if tag is None: + raise RuntimeError('No BEM solution found in %s' % fname) + method = tag.data[0] + tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION) + sol = tag.data + + return dict(solution=sol, bem_method=method, surfs=surfs) + + +def _add_gamma_multipliers(bem): + """Add gamma and multipliers in-place.""" + bem['sigma'] = np.array([surf['sigma'] for surf in bem['surfs']]) + # Dirty trick for the zero conductivity outside + sigma = np.r_[0.0, bem['sigma']] + bem['source_mult'] = 2.0 / (sigma[1:] + sigma[:-1]) + bem['field_mult'] = sigma[1:] - sigma[:-1] + # make sure subsequent "zip"s work correctly + assert len(bem['surfs']) == len(bem['field_mult']) + bem['gamma'] = ((sigma[1:] - sigma[:-1])[np.newaxis, :] / + (sigma[1:] + sigma[:-1])[:, np.newaxis]) + + +# In our BEM code we do not model the CSF so we assign the innermost surface +# the id BRAIN. Our 4-layer sphere we model CSF (at least by default), so when +# searching for and referring to surfaces we need to keep track of this. +_sm_surf_dict = OrderedDict([ + ('brain', FIFF.FIFFV_BEM_SURF_ID_BRAIN), + ('inner_skull', FIFF.FIFFV_BEM_SURF_ID_CSF), + ('outer_skull', FIFF.FIFFV_BEM_SURF_ID_SKULL), + ('head', FIFF.FIFFV_BEM_SURF_ID_HEAD), +]) +_bem_surf_dict = { + 'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN, + 'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL, + 'head': FIFF.FIFFV_BEM_SURF_ID_HEAD, +} +_bem_surf_name = { + FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull', + FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull', + FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ', + FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ', +} +_sm_surf_name = { + FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'brain', + FIFF.FIFFV_BEM_SURF_ID_CSF: 'csf', + FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull', + FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ', + FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ', +} + + +def _bem_find_surface(bem, id_): + """Find surface from already-loaded conductor model.""" + if bem['is_sphere']: + _surf_dict = _sm_surf_dict + _name_dict = _sm_surf_name + kind = 'Sphere model' + tri = 'boundary' + else: + _surf_dict = _bem_surf_dict + _name_dict = _bem_surf_name + kind = 'BEM' + tri = 'triangulation' + if isinstance(id_, str): + name = id_ + id_ = _surf_dict[id_] + else: + name = _name_dict[id_] + kind = 'Sphere model' if bem['is_sphere'] else 'BEM' + idx = np.where(np.array([s['id'] for s in bem['surfs']]) == id_)[0] + if len(idx) != 1: + raise RuntimeError(f'{kind} does not have the {name} {tri}') + return bem['surfs'][idx[0]] + + +# ############################################################################ +# Write + + +@verbose +def write_bem_surfaces(fname, surfs, overwrite=False, verbose=None): + """Write BEM surfaces to a fiff file. + + Parameters + ---------- + fname : str + Filename to write. Can end with ``.h5`` to write using HDF5. + surfs : dict | list of dict + The surfaces, or a single surface. + %(overwrite)s + %(verbose)s + """ + if isinstance(surfs, dict): + surfs = [surfs] + fname = _check_fname(fname, overwrite=overwrite, name='fname') + + if fname.endswith('.h5'): + _, write_hdf5 = _import_h5io_funcs() + write_hdf5(fname, dict(surfs=surfs), overwrite=True) + else: + with start_and_end_file(fname) as fid: + start_block(fid, FIFF.FIFFB_BEM) + write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surfs[0]['coord_frame']) + _write_bem_surfaces_block(fid, surfs) + end_block(fid, FIFF.FIFFB_BEM) + + +@verbose +def write_head_bem(fname, rr, tris, on_defects='raise', overwrite=False, + verbose=None): + """Write a head surface to a fiff file. + + Parameters + ---------- + fname : str + Filename to write. + rr : array, shape (n_vertices, 3) + Coordinate points in the MRI coordinate system. + tris : ndarray of int, shape (n_tris, 3) + Triangulation (each line contains indices for three points which + together form a face). + %(on_defects)s + %(overwrite)s + %(verbose)s + """ + surf = _surfaces_to_bem([dict(rr=rr, tris=tris)], + [FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], rescale=False, + incomplete=on_defects) + write_bem_surfaces(fname, surf, overwrite=overwrite) + + +def _write_bem_surfaces_block(fid, surfs): + """Write bem surfaces to open file handle.""" + for surf in surfs: + start_block(fid, FIFF.FIFFB_BEM_SURF) + write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma']) + write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id']) + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf['coord_frame']) + write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np']) + write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri']) + write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr']) + # index start at 0 in Python + write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES, + surf['tris'] + 1) + if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0: + write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf['nn']) + end_block(fid, FIFF.FIFFB_BEM_SURF) + + +@verbose +def write_bem_solution(fname, bem, overwrite=False, verbose=None): + """Write a BEM model with solution. + + Parameters + ---------- + fname : str + The filename to use. Can end with ``.h5`` to write using HDF5. + bem : instance of ConductorModel + The BEM model with solution to save. + %(overwrite)s + %(verbose)s + + See Also + -------- + read_bem_solution + """ + fname = _check_fname(fname, overwrite=overwrite, name='fname') + if fname.endswith('.h5'): + _, write_hdf5 = _import_h5io_funcs() + bem = {k: bem[k] for k in ('surfs', 'solution', 'bem_method')} + write_hdf5(fname, bem, overwrite=True) + else: + _write_bem_solution_fif(fname, bem) + + +def _write_bem_solution_fif(fname, bem): + _check_bem_size(bem['surfs']) + with start_and_end_file(fname) as fid: + start_block(fid, FIFF.FIFFB_BEM) + # Coordinate frame (mainly for backward compatibility) + write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, + bem['surfs'][0]['coord_frame']) + # Surfaces + _write_bem_surfaces_block(fid, bem['surfs']) + # The potential solution + if 'solution' in bem: + if bem['bem_method'] != FWD.BEM_LINEAR_COLL: + raise RuntimeError('Only linear collocation supported') + write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR) + write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION, + bem['solution']) + end_block(fid, FIFF.FIFFB_BEM) + + +# ############################################################################# +# Create 3-Layers BEM model from Flash MRI images + +def _prepare_env(subject, subjects_dir): + """Prepare an env object for subprocess calls.""" + env = os.environ.copy() + + fs_home = _check_freesurfer_home() + + _validate_type(subject, "str") + + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subjects_dir = op.abspath(subjects_dir) # force use of an absolute path + subjects_dir = op.expanduser(subjects_dir) + if not op.isdir(subjects_dir): + raise RuntimeError('Could not find the MRI data directory "%s"' + % subjects_dir) + subject_dir = op.join(subjects_dir, subject) + if not op.isdir(subject_dir): + raise RuntimeError('Could not find the subject data directory "%s"' + % (subject_dir,)) + env.update(SUBJECT=subject, SUBJECTS_DIR=subjects_dir, + FREESURFER_HOME=fs_home) + mri_dir = op.join(subject_dir, 'mri') + bem_dir = op.join(subject_dir, 'bem') + return env, mri_dir, bem_dir + + +@verbose +def convert_flash_mris(subject, flash30=True, convert=True, unwarp=False, + subjects_dir=None, verbose=None): + """Convert DICOM files for use with make_flash_bem. + + Parameters + ---------- + subject : str + Subject name. + flash30 : bool + Use 30-degree flip angle data. + convert : bool + Assume that the Flash MRI images have already been converted + to mgz files. + unwarp : bool + Run grad_unwarp with -unwarp option on each of the converted + data sets. It requires FreeSurfer's MATLAB toolbox to be properly + installed. + %(subjects_dir)s + %(verbose)s + + Notes + ----- + Before running this script do the following: + (unless convert=False is specified) + + 1. Copy all of your FLASH images in a single directory and + create a directory to hold the output of mne_organize_dicom + 2. cd to and run + $ mne_organize_dicom + to create an appropriate directory structure + 3. Create symbolic links to make flash05 and flash30 point to the + appropriate series: + $ ln -s flash05 + $ ln -s flash30 + Some partition formats (e.g. FAT32) do not support symbolic links. + In this case, copy the file to the appropriate series: + $ cp flash05 + $ cp flash30 + 4. cd to the directory where flash05 and flash30 links are + 5. Set SUBJECTS_DIR and SUBJECT environment variables appropriately + 6. Run this script + + This function assumes that the Freesurfer segmentation of the subject + has been completed. In particular, the T1.mgz and brain.mgz MRI volumes + should be, as usual, in the subject's mri directory. + """ + env, mri_dir = _prepare_env(subject, subjects_dir)[:2] + tempdir = _TempDir() # fsl and Freesurfer create some random junk in CWD + run_subprocess_env = partial(run_subprocess, env=env, + cwd=tempdir) + # Step 1a : Data conversion to mgz format + if not op.exists(op.join(mri_dir, 'flash', 'parameter_maps')): + os.makedirs(op.join(mri_dir, 'flash', 'parameter_maps')) + echos_done = 0 + if convert: + logger.info("\n---- Converting Flash images ----") + echos = ['001', '002', '003', '004', '005', '006', '007', '008'] + if flash30: + flashes = ['05', '30'] + else: + flashes = ['05'] + # + missing = False + for flash in flashes: + for echo in echos: + if not op.isdir(op.join('flash' + flash, echo)): + missing = True + if missing: + echos = ['002', '003', '004', '005', '006', '007', '008', '009'] + for flash in flashes: + for echo in echos: + if not op.isdir(op.join('flash' + flash, echo)): + raise RuntimeError("Directory %s is missing." + % op.join('flash' + flash, echo)) + # + for flash in flashes: + for echo in echos: + if not op.isdir(op.join('flash' + flash, echo)): + raise RuntimeError("Directory %s is missing." + % op.join('flash' + flash, echo)) + sample_file = glob.glob(op.join('flash' + flash, echo, '*'))[0] + dest_file = op.join(mri_dir, 'flash', + 'mef' + flash + '_' + echo + '.mgz') + # do not redo if already present + if op.isfile(dest_file): + logger.info("The file %s is already there") + else: + cmd = ['mri_convert', sample_file, dest_file] + run_subprocess_env(cmd) + echos_done += 1 + # Step 1b : Run grad_unwarp on converted files + flash_dir = op.join(mri_dir, "flash") + template = op.join(flash_dir, "mef*.mgz") + files = glob.glob(template) + if len(files) == 0: + raise ValueError('No suitable source files found (%s)' % template) + if unwarp: + logger.info("\n---- Unwarp mgz data sets ----") + for infile in files: + outfile = infile.replace(".mgz", "u.mgz") + cmd = ['grad_unwarp', '-i', infile, '-o', outfile, '-unwarp', + 'true'] + run_subprocess_env(cmd) + # Clear parameter maps if some of the data were reconverted + pm_dir = op.join(flash_dir, 'parameter_maps') + if echos_done > 0 and op.exists(pm_dir): + shutil.rmtree(pm_dir) + logger.info("\nParameter maps directory cleared") + if not op.exists(pm_dir): + os.makedirs(pm_dir) + # Step 2 : Create the parameter maps + if flash30: + logger.info("\n---- Creating the parameter maps ----") + if unwarp: + files = glob.glob(op.join(flash_dir, "mef05*u.mgz")) + if len(os.listdir(pm_dir)) == 0: + cmd = (['mri_ms_fitparms'] + + files + + [op.join(flash_dir, 'parameter_maps')]) + run_subprocess_env(cmd) + else: + logger.info("Parameter maps were already computed") + # Step 3 : Synthesize the flash 5 images + logger.info("\n---- Synthesizing flash 5 images ----") + if not op.exists(op.join(pm_dir, 'flash5.mgz')): + cmd = ['mri_synthesize', '20', '5', '5', + op.join(pm_dir, 'T1.mgz'), + op.join(pm_dir, 'PD.mgz'), + op.join(pm_dir, 'flash5.mgz') + ] + run_subprocess_env(cmd) + os.remove(op.join(pm_dir, 'flash5_reg.mgz')) + else: + logger.info("Synthesized flash 5 volume is already there") + else: + logger.info("\n---- Averaging flash5 echoes ----") + template = op.join(flash_dir, + "mef05*u.mgz" if unwarp else "mef05*.mgz") + files = glob.glob(template) + if len(files) == 0: + raise ValueError('No suitable source files found (%s)' % template) + cmd = (['mri_average', '-noconform'] + + files + + [op.join(pm_dir, 'flash5.mgz')]) + run_subprocess_env(cmd) + if op.exists(op.join(pm_dir, 'flash5_reg.mgz')): + os.remove(op.join(pm_dir, 'flash5_reg.mgz')) + del tempdir # finally done running subprocesses + assert op.isfile(op.join(pm_dir, 'flash5.mgz')) + + +@verbose +def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None, + flash_path=None, copy=True, verbose=None): + """Create 3-Layer BEM model from prepared flash MRI images. + + Parameters + ---------- + subject : str + Subject name. + overwrite : bool + Write over existing .surf files in bem folder. + show : bool + Show surfaces to visually inspect all three BEM surfaces (recommended). + %(subjects_dir)s + flash_path : str | None + Path to the flash images. If None (default), mri/flash/parameter_maps + within the subject reconstruction is used. + + .. versionadded:: 0.13.0 + copy : bool + If True (default), use copies instead of symlinks for surfaces + (if they do not already exist). + + .. versionadded:: 0.18 + .. versionchanged:: 1.1 Use copies instead of symlinks. + %(verbose)s + + See Also + -------- + convert_flash_mris + + Notes + ----- + This program assumes that FreeSurfer is installed and sourced properly. + + This function extracts the BEM surfaces (outer skull, inner skull, and + outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30 + degrees, in mgz format. + """ + from .viz.misc import plot_bem + + env, mri_dir, bem_dir = _prepare_env(subject, subjects_dir) + tempdir = _TempDir() # fsl and Freesurfer create some random junk in CWD + run_subprocess_env = partial(run_subprocess, env=env, + cwd=tempdir) + + if flash_path is None: + flash_path = op.join(mri_dir, 'flash', 'parameter_maps') + else: + flash_path = op.abspath(flash_path) + subjects_dir = env['SUBJECTS_DIR'] + + logger.info('\nProcessing the flash MRI data to produce BEM meshes with ' + 'the following parameters:\n' + 'SUBJECTS_DIR = %s\n' + 'SUBJECT = %s\n' + 'Result dir = %s\n' % (subjects_dir, subject, + op.join(bem_dir, 'flash'))) + # Step 4 : Register with MPRAGE + logger.info("\n---- Registering flash 5 with MPRAGE ----") + flash5 = op.join(flash_path, 'flash5.mgz') + flash5_reg = op.join(flash_path, 'flash5_reg.mgz') + if not op.exists(flash5_reg): + if op.exists(op.join(mri_dir, 'T1.mgz')): + ref_volume = op.join(mri_dir, 'T1.mgz') + else: + ref_volume = op.join(mri_dir, 'T1') + cmd = ['fsl_rigid_register', '-r', ref_volume, '-i', flash5, + '-o', flash5_reg] + run_subprocess_env(cmd) + else: + logger.info("Registered flash 5 image is already there") + # Step 5a : Convert flash5 into COR + logger.info("\n---- Converting flash5 volume into COR format ----") + flash5_dir = op.join(mri_dir, 'flash5') + shutil.rmtree(flash5_dir, ignore_errors=True) + os.makedirs(flash5_dir) + cmd = ['mri_convert', flash5_reg, op.join(mri_dir, 'flash5')] + run_subprocess_env(cmd) + # Step 5b and c : Convert the mgz volumes into COR + convert_T1 = False + T1_dir = op.join(mri_dir, 'T1') + if not op.isdir(T1_dir) or len(glob.glob(op.join(T1_dir, 'COR*'))) == 0: + convert_T1 = True + convert_brain = False + brain_dir = op.join(mri_dir, 'brain') + if not op.isdir(brain_dir) or \ + len(glob.glob(op.join(brain_dir, 'COR*'))) == 0: + convert_brain = True + logger.info("\n---- Converting T1 volume into COR format ----") + if convert_T1: + T1_fname = op.join(mri_dir, 'T1.mgz') + if not op.isfile(T1_fname): + raise RuntimeError("Both T1 mgz and T1 COR volumes missing.") + os.makedirs(T1_dir) + cmd = ['mri_convert', T1_fname, T1_dir] + run_subprocess_env(cmd) + else: + logger.info("T1 volume is already in COR format") + logger.info("\n---- Converting brain volume into COR format ----") + if convert_brain: + brain_fname = op.join(mri_dir, 'brain.mgz') + if not op.isfile(brain_fname): + raise RuntimeError("Both brain mgz and brain COR volumes missing.") + os.makedirs(brain_dir) + cmd = ['mri_convert', brain_fname, brain_dir] + run_subprocess_env(cmd) + else: + logger.info("Brain volume is already in COR format") + # Finally ready to go + logger.info("\n---- Creating the BEM surfaces ----") + cmd = ['mri_make_bem_surfaces', subject] + run_subprocess_env(cmd) + del tempdir # ran our last subprocess; clean up directory + + logger.info("\n---- Converting the tri files into surf files ----") + flash_bem_dir = op.join(bem_dir, 'flash') + if not op.exists(flash_bem_dir): + os.makedirs(flash_bem_dir) + surfs = ['inner_skull', 'outer_skull', 'outer_skin'] + for surf in surfs: + out_fname = op.join(flash_bem_dir, surf + '.tri') + shutil.move(op.join(bem_dir, surf + '.tri'), out_fname) + nodes, tris = read_tri(out_fname, swap=True) + # Do not write volume info here because the tris are already in + # standard Freesurfer coords + write_surface(op.splitext(out_fname)[0] + '.surf', nodes, tris, + overwrite=True) + + # Cleanup section + logger.info("\n---- Cleaning up ----") + os.remove(op.join(bem_dir, 'inner_skull_tmp.tri')) + # os.chdir(mri_dir) + if convert_T1: + shutil.rmtree(T1_dir) + logger.info("Deleted the T1 COR volume") + if convert_brain: + shutil.rmtree(brain_dir) + logger.info("Deleted the brain COR volume") + shutil.rmtree(flash5_dir) + logger.info("Deleted the flash5 COR volume") + # Create symbolic links to the .surf files in the bem folder + logger.info("\n---- Creating symbolic links ----") + # os.chdir(bem_dir) + for surf in surfs: + surf = op.join(bem_dir, surf + '.surf') + if not overwrite and op.exists(surf): + skip_symlink = True + else: + if op.exists(surf): + os.remove(surf) + _symlink(op.join(flash_bem_dir, op.basename(surf)), surf, copy) + skip_symlink = False + if skip_symlink: + logger.info("Unable to create all symbolic links to .surf files " + "in bem folder. Use --overwrite option to recreate them.") + dest = op.join(bem_dir, 'flash') + else: + logger.info("Symbolic links to .surf files created in bem folder") + dest = bem_dir + logger.info("\nThank you for waiting.\nThe BEM triangulations for this " + "subject are now available at:\n%s.\nWe hope the BEM meshes " + "created will facilitate your MEG and EEG data analyses." + % dest) + # Show computed BEM surfaces + if show: + plot_bem(subject=subject, subjects_dir=subjects_dir, + orientation='coronal', slices=None, show=True) + + +def _check_bem_size(surfs): + """Check bem surface sizes.""" + if len(surfs) > 1 and surfs[0]['np'] > 10000: + warn('The bem surfaces have %s data points. 5120 (ico grade=4) ' + 'should be enough. Dense 3-layer bems may not save properly.' % + surfs[0]['np']) + + +def _symlink(src, dest, copy=False): + """Create a relative symlink (or just copy).""" + if not copy: + src_link = op.relpath(src, op.dirname(dest)) + try: + os.symlink(src_link, dest) + except OSError: + warn('Could not create symbolic link %s. Check that your ' + 'partition handles symbolic links. The file will be copied ' + 'instead.' % dest) + copy = True + if copy: + shutil.copy(src, dest) + + +def _ensure_bem_surfaces(bem, extra_allow=(), name='bem'): + # by default only allow path-like and list, but handle None and + # ConductorModel properly if need be. Always return a ConductorModel + # even though it's incomplete (and might have is_sphere=True). + assert all(extra in (None, ConductorModel) for extra in extra_allow) + allowed = ('path-like', list) + extra_allow + _validate_type(bem, allowed, name) + if isinstance(bem, path_like): + # Load the surfaces + logger.info(f'Loading BEM surfaces from {str(bem)}...') + bem = read_bem_surfaces(bem) + bem = ConductorModel(is_sphere=False, surfs=bem) + elif isinstance(bem, list): + for ii, this_surf in enumerate(bem): + _validate_type(this_surf, dict, f'{name}[{ii}]') + if isinstance(bem, list): + bem = ConductorModel(is_sphere=False, surfs=bem) + # add surfaces in the spherical case + if isinstance(bem, ConductorModel) and bem['is_sphere']: + bem = bem.copy() + bem['surfs'] = [] + if len(bem['layers']) == 4: + for idx, id_ in enumerate(_sm_surf_dict.values()): + bem['surfs'].append(_complete_sphere_surf( + bem, idx, 4, complete=False)) + bem['surfs'][-1]['id'] = id_ + + return bem + + +def _check_file(fname, overwrite): + """Prevent overwrites.""" + if op.isfile(fname) and not overwrite: + raise IOError(f'File {fname} exists, use --overwrite to overwrite it') + + +@verbose +def make_scalp_surfaces(subject, subjects_dir=None, force=True, + overwrite=False, no_decimate=False, verbose=None): + """Create surfaces of the scalp and neck. + + The scalp surfaces are required for using the MNE coregistration GUI, and + allow for a visualization of the alignment between anatomy and channel + locations. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + force : bool + Force creation of the surface even if it has some topological defects. + Defaults to ``True``. + %(overwrite)s + no_decimate : bool + Disable the "medium" and "sparse" decimations. In this case, only + a "dense" surface will be generated. Defaults to ``False``, i.e., + create surfaces for all three types of decimations. + %(verbose)s + """ + this_env = deepcopy(os.environ) + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + this_env['SUBJECTS_DIR'] = subjects_dir + this_env['SUBJECT'] = subject + this_env['subjdir'] = subjects_dir + '/' + subject + if 'FREESURFER_HOME' not in this_env: + raise RuntimeError('The FreeSurfer environment needs to be set up ' + 'for this script') + incomplete = 'warn' if force else 'raise' + subj_path = op.join(subjects_dir, subject) + if not op.exists(subj_path): + raise RuntimeError('%s does not exist. Please check your subject ' + 'directory path.' % subj_path) + + mri = 'T1.mgz' if op.exists(op.join(subj_path, 'mri', 'T1.mgz')) else 'T1' + + logger.info('1. Creating a dense scalp tessellation with mkheadsurf...') + + def check_seghead(surf_path=op.join(subj_path, 'surf')): + surf = None + for k in ['lh.seghead', 'lh.smseghead']: + this_surf = op.join(surf_path, k) + if op.exists(this_surf): + surf = this_surf + break + return surf + + my_seghead = check_seghead() + if my_seghead is None: + run_subprocess(['mkheadsurf', '-subjid', subject, '-srcvol', mri], + env=this_env) + + surf = check_seghead() + if surf is None: + raise RuntimeError('mkheadsurf did not produce the standard output ' + 'file.') + + bem_dir = op.join(subjects_dir, subject, 'bem') + if not op.isdir(bem_dir): + os.mkdir(bem_dir) + fname_template = op.join(bem_dir, '%s-head-{}.fif' % subject) + dense_fname = fname_template.format('dense') + logger.info('2. Creating %s ...' % dense_fname) + _check_file(dense_fname, overwrite) + # Helpful message if we get a topology error + msg = '\n\nConsider using --force as an additional input parameter.' + surf = _surfaces_to_bem( + [surf], [FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], + incomplete=incomplete, extra=msg)[0] + write_bem_surfaces(dense_fname, surf, overwrite=overwrite) + levels = 'medium', 'sparse' + tris = [] if no_decimate else [30000, 2500] + if os.getenv('_MNE_TESTING_SCALP', 'false') == 'true': + tris = [len(surf['tris'])] # don't actually decimate + for ii, (n_tri, level) in enumerate(zip(tris, levels), 3): + logger.info('%i. Creating %s tessellation...' % (ii, level)) + logger.info('%i.1 Decimating the dense tessellation...' % ii) + points, tris = decimate_surface(points=surf['rr'], + triangles=surf['tris'], + n_triangles=n_tri) + dec_fname = fname_template.format(level) + logger.info('%i.2 Creating %s' % (ii, dec_fname)) + _check_file(dec_fname, overwrite) + dec_surf = _surfaces_to_bem( + [dict(rr=points, tris=tris)], + [FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], rescale=False, + incomplete=incomplete, extra=msg) + write_bem_surfaces(dec_fname, dec_surf, overwrite=overwrite) diff --git a/python/libs/mne/channels/__init__.py b/python/libs/mne/channels/__init__.py new file mode 100644 index 0000000..e122edd --- /dev/null +++ b/python/libs/mne/channels/__init__.py @@ -0,0 +1,44 @@ +"""Module dedicated to manipulation of channels. + +Can be used for setting of sensor locations used for processing and plotting. +""" + +from ..defaults import HEAD_SIZE_DEFAULT +from .layout import (Layout, make_eeg_layout, make_grid_layout, read_layout, + find_layout, generate_2d_layout) +from .montage import (DigMontage, + get_builtin_montages, make_dig_montage, read_dig_dat, + read_dig_egi, read_dig_captrak, read_dig_fif, + read_dig_polhemus_isotrak, read_polhemus_fastscan, + compute_dev_head_t, make_standard_montage, + read_custom_montage, read_dig_hpts, read_dig_localite, + compute_native_head_t) +from .channels import (equalize_channels, rename_channels, fix_mag_coil_types, + read_ch_adjacency, _get_ch_type, find_ch_adjacency, + make_1020_channel_selections, combine_channels, + read_vectorview_selection, _SELECTIONS, _EEG_SELECTIONS, + _divide_to_regions) + +__all__ = [ + # Data Structures + 'DigMontage', 'Layout', + + # Factory Methods + 'make_dig_montage', 'make_eeg_layout', 'make_grid_layout', + 'make_standard_montage', + + # Readers + 'read_ch_adjacency', 'read_dig_captrak', 'read_dig_dat', + 'read_dig_egi', 'read_dig_fif', 'read_dig_localite', + 'read_dig_polhemus_isotrak', 'read_layout', + 'read_polhemus_fastscan', 'read_custom_montage', 'read_dig_hpts', + + # Helpers + 'rename_channels', 'make_1020_channel_selections', + '_get_ch_type', 'equalize_channels', 'find_ch_adjacency', 'find_layout', + 'fix_mag_coil_types', 'generate_2d_layout', 'get_builtin_montages', + 'combine_channels', 'read_vectorview_selection', + + # Other + 'compute_dev_head_t', 'compute_native_head_t', +] diff --git a/python/libs/mne/channels/_dig_montage_utils.py b/python/libs/mne/channels/_dig_montage_utils.py new file mode 100644 index 0000000..a60418e --- /dev/null +++ b/python/libs/mne/channels/_dig_montage_utils.py @@ -0,0 +1,100 @@ +# Authors: Alexandre Gramfort +# Denis Engemann +# Martin Luessi +# Eric Larson +# Marijn van Vliet +# Jona Sassenhagen +# Teon Brooks +# Christian Brodbeck +# Stefan Appelhoff +# Joan Massich +# +# License: Simplified BSD + +import xml.etree.ElementTree as ElementTree + +import numpy as np + + +from ..utils import _check_fname, Bunch, warn + + +def _read_dig_montage_egi( + fname, + _scaling, + _all_data_kwargs_are_none, +): + + if not _all_data_kwargs_are_none: + raise ValueError('hsp, hpi, elp, point_names, fif must all be ' + 'None if egi is not None') + _check_fname(fname, overwrite='read', must_exist=True) + + root = ElementTree.parse(fname).getroot() + ns = root.tag[root.tag.index('{'):root.tag.index('}') + 1] + sensors = root.find('%ssensorLayout/%ssensors' % (ns, ns)) + fids = dict() + dig_ch_pos = dict() + + fid_name_map = {'Nasion': 'nasion', + 'Right periauricular point': 'rpa', + 'Left periauricular point': 'lpa'} + + for s in sensors: + name, number, kind = s[0].text, int(s[1].text), int(s[2].text) + coordinates = np.array([float(s[3].text), float(s[4].text), + float(s[5].text)]) + + coordinates *= _scaling + + # EEG Channels + if kind == 0: + dig_ch_pos['EEG %03d' % number] = coordinates + # Reference + elif kind == 1: + dig_ch_pos['EEG %03d' % + (len(dig_ch_pos.keys()) + 1)] = coordinates + # Fiducials + elif kind == 2: + fid_name = fid_name_map[name] + fids[fid_name] = coordinates + # Unknown + else: + warn('Unknown sensor type %s detected. Skipping sensor...' + 'Proceed with caution!' % kind) + + return Bunch( + # EGI stuff + nasion=fids['nasion'], lpa=fids['lpa'], rpa=fids['rpa'], + ch_pos=dig_ch_pos, coord_frame='unknown', + ) + + +def _parse_brainvision_dig_montage(fname, scale): + FID_NAME_MAP = {'Nasion': 'nasion', 'RPA': 'rpa', 'LPA': 'lpa'} + + root = ElementTree.parse(fname).getroot() + sensors = root.find('CapTrakElectrodeList') + + fids, dig_ch_pos = dict(), dict() + + for s in sensors: + name = s.find('Name').text + + is_fid = name in FID_NAME_MAP + coordinates = scale * np.array([float(s.find('X').text), + float(s.find('Y').text), + float(s.find('Z').text)]) + + # Fiducials + if is_fid: + fids[FID_NAME_MAP[name]] = coordinates + # EEG Channels + else: + dig_ch_pos[name] = coordinates + + return dict( + # BVCT stuff + nasion=fids['nasion'], lpa=fids['lpa'], rpa=fids['rpa'], + ch_pos=dig_ch_pos, coord_frame='unknown' + ) diff --git a/python/libs/mne/channels/_standard_montage_utils.py b/python/libs/mne/channels/_standard_montage_utils.py new file mode 100644 index 0000000..6683f39 --- /dev/null +++ b/python/libs/mne/channels/_standard_montage_utils.py @@ -0,0 +1,393 @@ +# Authors: Joan Massich +# Alexandre Gramfort +# +# License: BSD-3-Clause +from collections import OrderedDict +import csv + +import os.path as op +import numpy as np + +from functools import partial +import xml.etree.ElementTree as ElementTree + +from .montage import make_dig_montage +from .._freesurfer import get_mni_fiducials +from ..transforms import _sph_to_cart +from ..utils import warn, _pl +from . import __file__ as _CHANNELS_INIT_FILE + +MONTAGE_PATH = op.join(op.dirname(_CHANNELS_INIT_FILE), 'data', 'montages') + +_str = 'U100' + + +# In standard_1020, T9=LPA, T10=RPA, Nasion is the same as Iz with a +# sign-flipped Y value + +def _egi_256(head_size): + fname = op.join(MONTAGE_PATH, 'EGI_256.csd') + montage = _read_csd(fname, head_size) + ch_pos = montage._get_ch_pos() + + # For this cap, the Nasion is the frontmost electrode, + # LPA/RPA we approximate by putting 75% of the way (toward the front) + # between the two electrodes that are halfway down the ear holes + nasion = ch_pos['E31'] + lpa = 0.75 * ch_pos['E67'] + 0.25 * ch_pos['E94'] + rpa = 0.75 * ch_pos['E219'] + 0.25 * ch_pos['E190'] + + fids_montage = make_dig_montage( + coord_frame='unknown', nasion=nasion, lpa=lpa, rpa=rpa, + ) + + montage += fids_montage # add fiducials to montage + + return montage + + +def _easycap(basename, head_size): + fname = op.join(MONTAGE_PATH, basename) + montage = _read_theta_phi_in_degrees(fname, head_size, add_fiducials=True) + return montage + + +def _hydrocel(basename, head_size): + fname = op.join(MONTAGE_PATH, basename) + return _read_sfp(fname, head_size) + + +def _str_names(ch_names): + return [str(ch_name) for ch_name in ch_names] + + +def _safe_np_loadtxt(fname, **kwargs): + out = np.genfromtxt(fname, **kwargs) + ch_names = _str_names(out['f0']) + others = tuple(out['f%d' % ii] for ii in range(1, len(out.dtype.fields))) + return (ch_names,) + others + + +def _biosemi(basename, head_size): + fname = op.join(MONTAGE_PATH, basename) + fid_names = ('Nz', 'LPA', 'RPA') + return _read_theta_phi_in_degrees(fname, head_size, fid_names) + + +def _mgh_or_standard(basename, head_size, coord_frame='unknown'): + fid_names = ('Nz', 'LPA', 'RPA') + fname = op.join(MONTAGE_PATH, basename) + + ch_names_, pos = [], [] + with open(fname) as fid: + # Ignore units as we will scale later using the norms anyway + for line in fid: + if 'Positions\n' in line: + break + pos = [] + for line in fid: + if 'Labels\n' in line: + break + pos.append(list(map(float, line.split()))) + for line in fid: + if not line or not set(line) - {' '}: + break + ch_names_.append(line.strip(' ').strip('\n')) + + pos = np.array(pos) / 1000. + ch_pos = _check_dupes_odict(ch_names_, pos) + nasion, lpa, rpa = [ch_pos.pop(n) for n in fid_names] + if head_size is None: + scale = 1. + else: + scale = head_size / np.median(np.linalg.norm(pos, axis=1)) + for value in ch_pos.values(): + value *= scale + # if we are in MRI/MNI coordinates, we need to replace nasion, LPA, and RPA + # with those of fsaverage for ``trans='fsaverage'`` to work + if coord_frame == 'mri': + lpa, nasion, rpa = [ + x['r'].copy() for x in get_mni_fiducials('fsaverage')] + nasion *= scale + lpa *= scale + rpa *= scale + + return make_dig_montage(ch_pos=ch_pos, coord_frame=coord_frame, + nasion=nasion, lpa=lpa, rpa=rpa) + + +standard_montage_look_up_table = { + 'EGI_256': _egi_256, + + 'easycap-M1': partial(_easycap, basename='easycap-M1.txt'), + 'easycap-M10': partial(_easycap, basename='easycap-M10.txt'), + + 'GSN-HydroCel-128': partial(_hydrocel, basename='GSN-HydroCel-128.sfp'), + 'GSN-HydroCel-129': partial(_hydrocel, basename='GSN-HydroCel-129.sfp'), + 'GSN-HydroCel-256': partial(_hydrocel, basename='GSN-HydroCel-256.sfp'), + 'GSN-HydroCel-257': partial(_hydrocel, basename='GSN-HydroCel-257.sfp'), + 'GSN-HydroCel-32': partial(_hydrocel, basename='GSN-HydroCel-32.sfp'), + 'GSN-HydroCel-64_1.0': partial(_hydrocel, + basename='GSN-HydroCel-64_1.0.sfp'), + 'GSN-HydroCel-65_1.0': partial(_hydrocel, + basename='GSN-HydroCel-65_1.0.sfp'), + + 'biosemi128': partial(_biosemi, basename='biosemi128.txt'), + 'biosemi16': partial(_biosemi, basename='biosemi16.txt'), + 'biosemi160': partial(_biosemi, basename='biosemi160.txt'), + 'biosemi256': partial(_biosemi, basename='biosemi256.txt'), + 'biosemi32': partial(_biosemi, basename='biosemi32.txt'), + 'biosemi64': partial(_biosemi, basename='biosemi64.txt'), + + 'mgh60': partial(_mgh_or_standard, basename='mgh60.elc', + coord_frame='mri'), + 'mgh70': partial(_mgh_or_standard, basename='mgh70.elc', + coord_frame='mri'), + 'standard_1005': partial(_mgh_or_standard, + basename='standard_1005.elc', coord_frame='mri'), + 'standard_1020': partial(_mgh_or_standard, + basename='standard_1020.elc', coord_frame='mri'), + 'standard_alphabetic': partial(_mgh_or_standard, + basename='standard_alphabetic.elc', + coord_frame='mri'), + 'standard_postfixed': partial(_mgh_or_standard, + basename='standard_postfixed.elc', + coord_frame='mri'), + 'standard_prefixed': partial(_mgh_or_standard, + basename='standard_prefixed.elc', + coord_frame='mri'), + 'standard_primed': partial(_mgh_or_standard, + basename='standard_primed.elc', + coord_frame='mri'), + 'artinis-octamon': partial(_mgh_or_standard, coord_frame='mri', + basename='artinis-octamon.elc'), + 'artinis-brite23': partial(_mgh_or_standard, coord_frame='mri', + basename='artinis-brite23.elc'), +} + + +def _read_sfp(fname, head_size): + """Read .sfp BESA/EGI files.""" + # fname has been already checked + fid_names = ('FidNz', 'FidT9', 'FidT10') + options = dict(dtype=(_str, 'f4', 'f4', 'f4')) + ch_names, xs, ys, zs = _safe_np_loadtxt(fname, **options) + # deal with "headshape" + mask = np.array([ch_name == 'headshape' for ch_name in ch_names], bool) + hsp = np.stack([xs[mask], ys[mask], zs[mask]], axis=-1) + mask = ~mask + pos = np.stack([xs[mask], ys[mask], zs[mask]], axis=-1) + ch_names = [ch_name for ch_name, m in zip(ch_names, mask) if m] + ch_pos = _check_dupes_odict(ch_names, pos) + del xs, ys, zs, ch_names + # no one grants that fid names are there. + nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names] + + if head_size is not None: + scale = head_size / np.median(np.linalg.norm(pos, axis=-1)) + for value in ch_pos.values(): + value *= scale + nasion = nasion * scale if nasion is not None else None + lpa = lpa * scale if lpa is not None else None + rpa = rpa * scale if rpa is not None else None + + return make_dig_montage(ch_pos=ch_pos, coord_frame='unknown', + nasion=nasion, rpa=rpa, lpa=lpa, hsp=hsp) + + +def _read_csd(fname, head_size): + # Label, Theta, Phi, Radius, X, Y, Z, off sphere surface + options = dict(comments='//', + dtype=(_str, 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')) + ch_names, _, _, _, xs, ys, zs, _ = _safe_np_loadtxt(fname, **options) + pos = np.stack([xs, ys, zs], axis=-1) + + if head_size is not None: + pos *= head_size / np.median(np.linalg.norm(pos, axis=1)) + + return make_dig_montage(ch_pos=_check_dupes_odict(ch_names, pos)) + + +def _check_dupes_odict(ch_names, pos): + """Warn if there are duplicates, then turn to ordered dict.""" + ch_names = list(ch_names) + dups = OrderedDict((ch_name, ch_names.count(ch_name)) + for ch_name in ch_names) + dups = OrderedDict((ch_name, count) for ch_name, count in dups.items() + if count > 1) + n = len(dups) + if n: + dups = ', '.join( + f'{ch_name} ({count})' for ch_name, count in dups.items()) + warn(f'Duplicate channel position{_pl(n)} found, the last will be ' + f'used for {dups}') + return OrderedDict(zip(ch_names, pos)) + + +def _read_elc(fname, head_size): + """Read .elc files. + + Parameters + ---------- + fname : str + File extension is expected to be '.elc'. + head_size : float | None + The size of the head in [m]. If none, returns the values read from the + file with no modification. + + Returns + ------- + montage : instance of DigMontage + The montage in [m]. + """ + fid_names = ('Nz', 'LPA', 'RPA') + + ch_names_, pos = [], [] + with open(fname) as fid: + # _read_elc does require to detect the units. (see _mgh_or_standard) + for line in fid: + if 'UnitPosition' in line: + units = line.split()[1] + scale = dict(m=1., mm=1e-3)[units] + break + else: + raise RuntimeError('Could not detect units in file %s' % fname) + for line in fid: + if 'Positions\n' in line: + break + pos = [] + for line in fid: + if 'Labels\n' in line: + break + pos.append(list(map(float, line.split()))) + for line in fid: + if not line or not set(line) - {' '}: + break + ch_names_.append(line.strip(' ').strip('\n')) + + pos = np.array(pos) * scale + if head_size is not None: + pos *= head_size / np.median(np.linalg.norm(pos, axis=1)) + + ch_pos = _check_dupes_odict(ch_names_, pos) + nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names] + + return make_dig_montage(ch_pos=ch_pos, coord_frame='unknown', + nasion=nasion, lpa=lpa, rpa=rpa) + + +def _read_theta_phi_in_degrees(fname, head_size, fid_names=None, + add_fiducials=False): + ch_names, theta, phi = _safe_np_loadtxt(fname, skip_header=1, + dtype=(_str, 'i4', 'i4')) + if add_fiducials: + # Add fiducials based on 10/20 spherical coordinate definitions + # http://chgd.umich.edu/wp-content/uploads/2014/06/ + # 10-20_system_positioning.pdf + # extrapolated from other sensor coordinates in the Easycap layouts + # https://www.easycap.de/wp-content/uploads/2018/02/ + # Easycap-Equidistant-Layouts.pdf + assert fid_names is None + fid_names = ['Nasion', 'LPA', 'RPA'] + ch_names.extend(fid_names) + theta = np.append(theta, [115, -115, 115]) + phi = np.append(phi, [90, 0, 0]) + + radii = np.full(len(phi), head_size) + pos = _sph_to_cart(np.array([radii, np.deg2rad(phi), np.deg2rad(theta)]).T) + ch_pos = _check_dupes_odict(ch_names, pos) + + nasion, lpa, rpa = None, None, None + if fid_names is not None: + nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names] + + return make_dig_montage(ch_pos=ch_pos, coord_frame='unknown', + nasion=nasion, lpa=lpa, rpa=rpa) + + +def _read_elp_besa(fname, head_size): + # This .elp is not the same as polhemus elp. see _read_isotrak_elp_points + dtype = np.dtype('S8, S8, f8, f8, f8') + data = np.loadtxt(fname, dtype=dtype, skiprows=1) + + ch_names = data['f1'].astype(str).tolist() + az = data['f2'] + horiz = data['f3'] + radius = np.abs(az / 180.) + az = np.deg2rad(np.array([h if a >= 0. else 180 + h + for h, a in zip(horiz, az)])) + pol = radius * np.pi + rad = data['f4'] / 100 + pos = _sph_to_cart(np.array([rad, az, pol]).T) + + if head_size is not None: + pos *= head_size / np.median(np.linalg.norm(pos, axis=1)) + + ch_pos = _check_dupes_odict(ch_names, pos) + + fid_names = ('Nz', 'LPA', 'RPA') + # No one grants that the fid names actually exist. + nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names] + + return make_dig_montage(ch_pos=ch_pos, nasion=nasion, lpa=lpa, rpa=rpa) + + +def _read_brainvision(fname, head_size): + # 'BrainVision Electrodes File' format + # Based on BrainVision Analyzer coordinate system: Defined between + # standard electrode positions: X-axis from T7 to T8, Y-axis from Oz to + # Fpz, Z-axis orthogonal from XY-plane through Cz, fit to a sphere if + # idealized (when radius=1), specified in millimeters + root = ElementTree.parse(fname).getroot() + ch_names = [s.text for s in root.findall("./Electrode/Name")] + theta = [float(s.text) for s in root.findall("./Electrode/Theta")] + pol = np.deg2rad(np.array(theta)) + phi = [float(s.text) for s in root.findall("./Electrode/Phi")] + az = np.deg2rad(np.array(phi)) + rad = [float(s.text) for s in root.findall("./Electrode/Radius")] + rad = np.array(rad) # specified in mm + pos = _sph_to_cart(np.array([rad, az, pol]).T) + + if head_size is not None: + pos *= head_size / np.median(np.linalg.norm(pos, axis=1)) + + return make_dig_montage(ch_pos=_check_dupes_odict(ch_names, pos)) + + +def _read_xyz(fname): + """Import EEG channel locations from CSV, TSV, or XYZ files. + + CSV and TSV files should have columns 4 columns containing + ch_name, x, y, and z. Each row represents one channel. + XYZ files should have 5 columns containing + count, x, y, z, and ch_name. Each row represents one channel + CSV files should be separated by commas, TSV and XYZ files should be + separated by tabs. + + Parameters + ---------- + fname : str + Name of the file to read channel locations from. + + Returns + ------- + montage : instance of DigMontage + The montage. + """ + ch_names = [] + pos = [] + file_format = op.splitext(fname)[1].lower() + with open(fname, "r") as f: + if file_format != ".xyz": + f.readline() # skip header + delimiter = "," if file_format == ".csv" else "\t" + for row in csv.reader(f, delimiter=delimiter): + if file_format == ".xyz": + _, x, y, z, ch_name, *_ = row + ch_name = ch_name.strip() # deals with variable tab size + else: + ch_name, x, y, z, *_ = row + ch_names.append(ch_name) + pos.append((x, y, z)) + d = _check_dupes_odict(ch_names, np.array(pos, dtype=float)) + return make_dig_montage(ch_pos=d) diff --git a/python/libs/mne/channels/channels.py b/python/libs/mne/channels/channels.py new file mode 100644 index 0000000..c411184 --- /dev/null +++ b/python/libs/mne/channels/channels.py @@ -0,0 +1,1877 @@ +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Martin Luessi +# Denis Engemann +# Andrew Dykstra +# Teon Brooks +# Daniel McCloy +# +# License: BSD-3-Clause + + +import os +import os.path as op +import sys +from collections import OrderedDict +from copy import deepcopy +from functools import partial + +import numpy as np + +from ..defaults import HEAD_SIZE_DEFAULT, _handle_default +from ..utils import (verbose, logger, warn, + _check_preload, _validate_type, fill_doc, _check_option, + _get_stim_channel, _check_fname, _check_dict_keys) +from ..io.constants import FIFF +from ..io.meas_info import (anonymize_info, Info, MontageMixin, create_info, + _rename_comps) +from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type, + _check_excludes_includes, _contains_ch_type, + channel_indices_by_type, pick_channels, _picks_to_idx, + get_channel_type_constants, + _pick_data_channels) +from ..io.tag import _rename_list +from ..io.write import DATE_NONE +from ..io.proj import setup_proj + + +def _get_meg_system(info): + """Educated guess for the helmet type based on channels.""" + have_helmet = True + for ch in info['chs']: + if ch['kind'] == FIFF.FIFFV_MEG_CH: + # Only take first 16 bits, as higher bits store CTF grad comp order + coil_type = ch['coil_type'] & 0xFFFF + nmag = np.sum( + [c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']]) + if coil_type == FIFF.FIFFV_COIL_NM_122: + system = '122m' + break + elif coil_type // 1000 == 3: # All Vectorview coils are 30xx + system = '306m' + break + elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or + coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD): + system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh' + break + elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD: + system = 'CTF_275' + break + elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD: + system = 'KIT' + # Our helmet does not match very well, so let's just create it + have_helmet = False + break + elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD: + system = 'BabySQUID' + break + elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD: + system = 'ARTEMIS123' + have_helmet = False + break + else: + system = 'unknown' + have_helmet = False + return system, have_helmet + + +def _get_ch_type(inst, ch_type, allow_ref_meg=False): + """Choose a single channel type (usually for plotting). + + Usually used in plotting to plot a single datatype, e.g. look for mags, + then grads, then ... to plot. + """ + if ch_type is None: + allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', + 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od', 'hbo', 'hbr', + 'ecog', 'seeg', 'dbs'] + allowed_types += ['ref_meg'] if allow_ref_meg else [] + for type_ in allowed_types: + if isinstance(inst, Info): + if _contains_ch_type(inst, type_): + ch_type = type_ + break + elif type_ in inst: + ch_type = type_ + break + else: + raise RuntimeError('No plottable channel types found') + return ch_type + + +@verbose +def equalize_channels(instances, copy=True, verbose=None): + """Equalize channel picks and ordering across multiple MNE-Python objects. + + First, all channels that are not common to each object are dropped. Then, + using the first object in the list as a template, the channels of each + object are re-ordered to match the template. The end result is that all + given objects define the same channels, in the same order. + + Parameters + ---------- + instances : list + A list of MNE-Python objects to equalize the channels for. Objects can + be of type Raw, Epochs, Evoked, AverageTFR, Forward, Covariance, + CrossSpectralDensity or Info. + copy : bool + When dropping and/or re-ordering channels, an object will be copied + when this parameter is set to ``True``. When set to ``False`` (the + default) the dropping and re-ordering of channels happens in-place. + + .. versionadded:: 0.20.0 + %(verbose)s + + Returns + ------- + equalized_instances : list + A list of MNE-Python objects that have the same channels defined in the + same order. + + Notes + ----- + This function operates inplace. + """ + from ..cov import Covariance + from ..io.base import BaseRaw + from ..io.meas_info import Info + from ..epochs import BaseEpochs + from ..evoked import Evoked + from ..forward import Forward + from ..time_frequency import _BaseTFR, CrossSpectralDensity + + # Instances need to have a `ch_names` attribute and a `pick_channels` + # method that supports `ordered=True`. + allowed_types = (BaseRaw, BaseEpochs, Evoked, _BaseTFR, Forward, + Covariance, CrossSpectralDensity, Info) + allowed_types_str = ("Raw, Epochs, Evoked, TFR, Forward, Covariance, " + "CrossSpectralDensity or Info") + for inst in instances: + _validate_type(inst, allowed_types, "Instances to be modified", + allowed_types_str) + + chan_template = instances[0].ch_names + logger.info('Identifying common channels ...') + channels = [set(inst.ch_names) for inst in instances] + common_channels = set(chan_template).intersection(*channels) + all_channels = set(chan_template).union(*channels) + dropped = list(set(all_channels - common_channels)) + + # Preserve the order of chan_template + order = np.argsort([chan_template.index(ch) for ch in common_channels]) + common_channels = np.array(list(common_channels))[order].tolist() + + # Update all instances to match the common_channels list + reordered = False + equalized_instances = [] + for inst in instances: + # Only perform picking when needed + if inst.ch_names != common_channels: + if isinstance(inst, Info): + sel = pick_channels(inst.ch_names, common_channels, exclude=[], + ordered=True) + inst = pick_info(inst, sel, copy=copy, verbose=False) + else: + if copy: + inst = inst.copy() + inst.pick_channels(common_channels, ordered=True) + if len(inst.ch_names) == len(common_channels): + reordered = True + equalized_instances.append(inst) + + if dropped: + logger.info('Dropped the following channels:\n%s' % dropped) + elif reordered: + logger.info('Channels have been re-ordered.') + + return equalized_instances + + +channel_type_constants = get_channel_type_constants() +_human2fiff = {k: v.get('kind', FIFF.FIFFV_COIL_NONE) for k, v in + channel_type_constants.items()} +_human2unit = {k: v.get('unit', FIFF.FIFF_UNIT_NONE) for k, v in + channel_type_constants.items()} +_unit2human = {FIFF.FIFF_UNIT_V: 'V', + FIFF.FIFF_UNIT_T: 'T', + FIFF.FIFF_UNIT_T_M: 'T/m', + FIFF.FIFF_UNIT_MOL: 'M', + FIFF.FIFF_UNIT_NONE: 'NA', + FIFF.FIFF_UNIT_CEL: 'C'} + + +def _check_set(ch, projs, ch_type): + """Ensure type change is compatible with projectors.""" + new_kind = _human2fiff[ch_type] + if ch['kind'] != new_kind: + for proj in projs: + if ch['ch_name'] in proj['data']['col_names']: + raise RuntimeError('Cannot change channel type for channel %s ' + 'in projector "%s"' + % (ch['ch_name'], proj['desc'])) + ch['kind'] = new_kind + + +class SetChannelsMixin(MontageMixin): + """Mixin class for Raw, Evoked, Epochs.""" + + @verbose + def set_eeg_reference(self, ref_channels='average', projection=False, + ch_type='auto', forward=None, verbose=None): + """Specify which reference to use for EEG data. + + Use this function to explicitly specify the desired reference for EEG. + This can be either an existing electrode or a new virtual channel. + This function will re-reference the data according to the desired + reference. + + Parameters + ---------- + %(ref_channels_set_eeg_reference)s + %(projection_set_eeg_reference)s + %(ch_type_set_eeg_reference)s + %(forward_set_eeg_reference)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + Data with EEG channels re-referenced. If ``ref_channels='average'`` + and ``projection=True`` a projection will be added instead of + directly re-referencing the data. + %(set_eeg_reference_see_also_notes)s + """ + from ..io.reference import set_eeg_reference + return set_eeg_reference(self, ref_channels=ref_channels, copy=False, + projection=projection, ch_type=ch_type, + forward=forward)[0] + + def _get_channel_positions(self, picks=None): + """Get channel locations from info. + + Parameters + ---------- + picks : str | list | slice | None + None gets good data indices. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + picks = _picks_to_idx(self.info, picks) + chs = self.info['chs'] + pos = np.array([chs[k]['loc'][:3] for k in picks]) + n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0) + if n_zero > 1: # XXX some systems have origin (0, 0, 0) + raise ValueError('Could not extract channel positions for ' + '{} channels'.format(n_zero)) + return pos + + def _set_channel_positions(self, pos, names): + """Update channel locations in info. + + Parameters + ---------- + pos : array-like | np.ndarray, shape (n_points, 3) + The channel positions to be set. + names : list of str + The names of the channels to be set. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + if len(pos) != len(names): + raise ValueError('Number of channel positions not equal to ' + 'the number of names given.') + pos = np.asarray(pos, dtype=np.float64) + if pos.shape[-1] != 3 or pos.ndim != 2: + msg = ('Channel positions must have the shape (n_points, 3) ' + 'not %s.' % (pos.shape,)) + raise ValueError(msg) + for name, p in zip(names, pos): + if name in self.ch_names: + idx = self.ch_names.index(name) + self.info['chs'][idx]['loc'][:3] = p + else: + msg = ('%s was not found in the info. Cannot be updated.' + % name) + raise ValueError(msg) + + @verbose + def set_channel_types(self, mapping, verbose=None): + """Define the sensor type of channels. + + Parameters + ---------- + mapping : dict + A dictionary mapping a channel to a sensor type (str), e.g., + ``{'EEG061': 'eog'}``. + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The instance (modified in place). + + .. versionchanged:: 0.20 + Return the instance. + + Notes + ----- + The following sensor types are accepted: + + ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, dbs, stim, syst, + ecog, hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude, + fnirs_fd_phase, fnirs_od + + .. versionadded:: 0.9.0 + """ + ch_names = self.info['ch_names'] + + # first check and assemble clean mappings of index and name + unit_changes = dict() + for ch_name, ch_type in mapping.items(): + if ch_name not in ch_names: + raise ValueError("This channel name (%s) doesn't exist in " + "info." % ch_name) + + c_ind = ch_names.index(ch_name) + if ch_type not in _human2fiff: + raise ValueError('This function cannot change to this ' + 'channel type: %s. Accepted channel types ' + 'are %s.' + % (ch_type, + ", ".join(sorted(_human2unit.keys())))) + # Set sensor type + _check_set(self.info['chs'][c_ind], self.info['projs'], ch_type) + unit_old = self.info['chs'][c_ind]['unit'] + unit_new = _human2unit[ch_type] + if unit_old not in _unit2human: + raise ValueError("Channel '%s' has unknown unit (%s). Please " + "fix the measurement info of your data." + % (ch_name, unit_old)) + if unit_old != _human2unit[ch_type]: + this_change = (_unit2human[unit_old], _unit2human[unit_new]) + if this_change not in unit_changes: + unit_changes[this_change] = list() + unit_changes[this_change].append(ch_name) + self.info['chs'][c_ind]['unit'] = _human2unit[ch_type] + if ch_type in ['eeg', 'seeg', 'ecog', 'dbs']: + coil_type = FIFF.FIFFV_COIL_EEG + elif ch_type == 'hbo': + coil_type = FIFF.FIFFV_COIL_FNIRS_HBO + elif ch_type == 'hbr': + coil_type = FIFF.FIFFV_COIL_FNIRS_HBR + elif ch_type == 'fnirs_cw_amplitude': + coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE + elif ch_type == 'fnirs_fd_ac_amplitude': + coil_type = FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE + elif ch_type == 'fnirs_fd_phase': + coil_type = FIFF.FIFFV_COIL_FNIRS_FD_PHASE + elif ch_type == 'fnirs_od': + coil_type = FIFF.FIFFV_COIL_FNIRS_OD + else: + coil_type = FIFF.FIFFV_COIL_NONE + self.info['chs'][c_ind]['coil_type'] = coil_type + msg = "The unit for channel(s) {0} has changed from {1} to {2}." + for this_change, names in unit_changes.items(): + warn(msg.format(", ".join(sorted(names)), *this_change)) + return self + + @verbose + def rename_channels(self, mapping, allow_duplicates=False, verbose=None): + """Rename channels. + + Parameters + ---------- + %(mapping_rename_channels_duplicates)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The instance (modified in place). + + .. versionchanged:: 0.20 + Return the instance. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + from ..io import BaseRaw + + ch_names_orig = list(self.info['ch_names']) + rename_channels(self.info, mapping, allow_duplicates) + + # Update self._orig_units for Raw + if isinstance(self, BaseRaw): + # whatever mapping was provided, now we can just use a dict + mapping = dict(zip(ch_names_orig, self.info['ch_names'])) + if self._orig_units is not None: + for old_name, new_name in mapping.items(): + if old_name != new_name: + self._orig_units[new_name] = self._orig_units[old_name] + del self._orig_units[old_name] + ch_names = self.annotations.ch_names + for ci, ch in enumerate(ch_names): + ch_names[ci] = tuple(mapping.get(name, name) for name in ch) + + return self + + @verbose + def plot_sensors(self, kind='topomap', ch_type=None, title=None, + show_names=False, ch_groups=None, to_sphere=True, + axes=None, block=False, show=True, sphere=None, + verbose=None): + """Plot sensor positions. + + Parameters + ---------- + kind : str + Whether to plot the sensors as 3d, topomap or as an interactive + sensor selection dialog. Available options 'topomap', '3d', + 'select'. If 'select', a set of channels can be selected + interactively by using lasso selector or clicking while holding + control key. The selected channels are returned along with the + figure instance. Defaults to 'topomap'. + ch_type : None | str + The channel type to plot. Available options 'mag', 'grad', 'eeg', + 'seeg', 'dbs', 'ecog', 'all'. If ``'all'``, all the available mag, + grad, eeg, seeg, dbs, and ecog channels are plotted. If + None (default), then channels are chosen in the order given above. + title : str | None + Title for the figure. If None (default), equals to ``'Sensor + positions (%%s)' %% ch_type``. + show_names : bool | array of str + Whether to display all channel names. If an array, only the channel + names in the array are shown. Defaults to False. + ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None + Channel groups for coloring the sensors. If None (default), default + coloring scheme is used. If 'position', the sensors are divided + into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If + array, the channels are divided by picks given in the array. + + .. versionadded:: 0.13.0 + to_sphere : bool + Whether to project the 3d locations to a sphere. When False, the + sensor array appears similar as to looking downwards straight above + the subject's head. Has no effect when kind='3d'. Defaults to True. + + .. versionadded:: 0.14.0 + axes : instance of Axes | instance of Axes3D | None + Axes to draw the sensors to. If ``kind='3d'``, axes must be an + instance of Axes3D. If None (default), a new axes will be created. + + .. versionadded:: 0.13.0 + block : bool + Whether to halt program execution until the figure is closed. + Defaults to False. + + .. versionadded:: 0.13.0 + show : bool + Show figure if True. Defaults to True. + %(sphere_topomap_auto)s + %(verbose)s + + Returns + ------- + fig : instance of Figure + Figure containing the sensor topography. + selection : list + A list of selected channels. Only returned if ``kind=='select'``. + + See Also + -------- + mne.viz.plot_layout + + Notes + ----- + This function plots the sensor locations from the info structure using + matplotlib. For drawing the sensors using PyVista see + :func:`mne.viz.plot_alignment`. + + .. versionadded:: 0.12.0 + """ + from ..viz.utils import plot_sensors + return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title, + show_names=show_names, ch_groups=ch_groups, + to_sphere=to_sphere, axes=axes, block=block, + show=show, sphere=sphere, verbose=verbose) + + @verbose + def anonymize(self, daysback=None, keep_his=False, verbose=None): + """Anonymize measurement information in place. + + Parameters + ---------- + %(daysback_anonymize_info)s + %(keep_his_anonymize_info)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The modified instance. + + Notes + ----- + %(anonymize_info_notes)s + + .. versionadded:: 0.13.0 + """ + anonymize_info(self.info, daysback=daysback, keep_his=keep_his, + verbose=verbose) + self.set_meas_date(self.info['meas_date']) # unify annot update + return self + + def set_meas_date(self, meas_date): + """Set the measurement start date. + + Parameters + ---------- + meas_date : datetime | float | tuple | None + The new measurement date. + If datetime object, it must be timezone-aware and in UTC. + A tuple of (seconds, microseconds) or float (alias for + ``(meas_date, 0)``) can also be passed and a datetime + object will be automatically created. If None, will remove + the time reference. + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The modified raw instance. Operates in place. + + See Also + -------- + mne.io.Raw.anonymize + + Notes + ----- + If you want to remove all time references in the file, call + :func:`mne.io.anonymize_info(inst.info) ` + after calling ``inst.set_meas_date(None)``. + + .. versionadded:: 0.20 + """ + from ..annotations import _handle_meas_date + meas_date = _handle_meas_date(meas_date) + with self.info._unlock(): + self.info['meas_date'] = meas_date + + # clear file_id and meas_id if needed + if meas_date is None: + for key in ('file_id', 'meas_id'): + value = self.info.get(key) + if value is not None: + assert 'msecs' not in value + value['secs'] = DATE_NONE[0] + value['usecs'] = DATE_NONE[1] + # The following copy is needed for a test CTF dataset + # otherwise value['machid'][:] = 0 would suffice + _tmp = value['machid'].copy() + _tmp[:] = 0 + value['machid'] = _tmp + + if hasattr(self, 'annotations'): + self.annotations._orig_time = meas_date + return self + + +class UpdateChannelsMixin(object): + """Mixin class for Raw, Evoked, Epochs, AverageTFR.""" + + @verbose + def pick_types(self, meg=False, eeg=False, stim=False, eog=False, + ecg=False, emg=False, ref_meg='auto', misc=False, + resp=False, chpi=False, exci=False, ias=False, syst=False, + seeg=False, dipole=False, gof=False, bio=False, + ecog=False, fnirs=False, csd=False, dbs=False, include=(), + exclude='bads', selection=None, verbose=None): + """Pick some channels by type and names. + + Parameters + ---------- + meg : bool | str + If True include MEG channels. If string it can be 'mag', 'grad', + 'planar1' or 'planar2' to select only magnetometers, all + gradiometers, or a specific type of gradiometer. + eeg : bool + If True include EEG channels. + stim : bool + If True include stimulus channels. + eog : bool + If True include EOG channels. + ecg : bool + If True include ECG channels. + emg : bool + If True include EMG channels. + ref_meg : bool | str + If True include CTF / 4D reference channels. If 'auto', reference + channels are included if compensations are present and ``meg`` is + not False. Can also be the string options for the ``meg`` + parameter. + misc : bool + If True include miscellaneous analog channels. + resp : bool + If ``True`` include respiratory channels. + chpi : bool + If True include continuous HPI coil channels. + exci : bool + Flux excitation channel used to be a stimulus channel. + ias : bool + Internal Active Shielding data (maybe on Triux only). + syst : bool + System status channel information (on Triux systems only). + seeg : bool + Stereotactic EEG channels. + dipole : bool + Dipole time course channels. + gof : bool + Dipole goodness of fit channels. + bio : bool + Bio channels. + ecog : bool + Electrocorticography channels. + fnirs : bool | str + Functional near-infrared spectroscopy channels. If True include all + fNIRS channels. If False (default) include none. If string it can + be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to + include channels measuring deoxyhemoglobin). + csd : bool + EEG-CSD channels. + dbs : bool + Deep brain stimulation channels. + include : list of str + List of additional channels to include. If empty do not include + any. + exclude : list of str | str + List of channels to exclude. If 'bads' (default), exclude channels + in ``info['bads']``. + selection : list of str + Restrict sensor channels (MEG, EEG) to this list of channel names. + %(verbose)s + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + See Also + -------- + pick_channels + + Notes + ----- + .. versionadded:: 0.9.0 + """ + idx = pick_types( + self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, + ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci, + ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio, + ecog=ecog, fnirs=fnirs, dbs=dbs, include=include, exclude=exclude, + selection=selection) + + self._pick_drop_channels(idx) + + # remove dropped channel types from reject and flat + if getattr(self, 'reject', None) is not None: + # use list(self.reject) to avoid RuntimeError for changing + # dictionary size during iteration + for ch_type in list(self.reject): + if ch_type not in self: + del self.reject[ch_type] + + if getattr(self, 'flat', None) is not None: + for ch_type in list(self.flat): + if ch_type not in self: + del self.flat[ch_type] + + return self + + def pick_channels(self, ch_names, ordered=False): + """Pick some channels. + + Parameters + ---------- + ch_names : list + The list of channels to select. + ordered : bool + If True (default False), ensure that the order of the channels in + the modified instance matches the order of ``ch_names``. + + .. versionadded:: 0.20.0 + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + See Also + -------- + drop_channels + pick_types + reorder_channels + + Notes + ----- + The channel names given are assumed to be a set, i.e. the order + does not matter. The original order of the channels is preserved. + You can use ``reorder_channels`` to set channel order if necessary. + + .. versionadded:: 0.9.0 + """ + picks = pick_channels(self.info['ch_names'], ch_names, ordered=ordered) + return self._pick_drop_channels(picks) + + @verbose + def pick(self, picks, exclude=(), *, verbose=None): + """Pick a subset of channels. + + Parameters + ---------- + %(picks_all)s + exclude : list | str + Set of channels to exclude, only used when picking based on + types (e.g., exclude="bads" when picks="meg"). + %(verbose)s + + .. versionadded:: 0.24.0 + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + """ + picks = _picks_to_idx(self.info, picks, 'all', exclude, + allow_empty=False) + return self._pick_drop_channels(picks) + + def reorder_channels(self, ch_names): + """Reorder channels. + + Parameters + ---------- + ch_names : list + The desired channel order. + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + See Also + -------- + drop_channels + pick_types + pick_channels + + Notes + ----- + Channel names must be unique. Channels that are not in ``ch_names`` + are dropped. + + .. versionadded:: 0.16.0 + """ + _check_excludes_includes(ch_names) + idx = list() + for ch_name in ch_names: + ii = self.ch_names.index(ch_name) + if ii in idx: + raise ValueError('Channel name repeated: %s' % (ch_name,)) + idx.append(ii) + return self._pick_drop_channels(idx) + + def drop_channels(self, ch_names): + """Drop channel(s). + + Parameters + ---------- + ch_names : iterable or str + Iterable (e.g. list) of channel name(s) or channel name to remove. + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + See Also + -------- + reorder_channels + pick_channels + pick_types + + Notes + ----- + .. versionadded:: 0.9.0 + """ + if isinstance(ch_names, str): + ch_names = [ch_names] + + try: + all_str = all([isinstance(ch, str) for ch in ch_names]) + except TypeError: + raise ValueError("'ch_names' must be iterable, got " + "type {} ({}).".format(type(ch_names), ch_names)) + + if not all_str: + raise ValueError("Each element in 'ch_names' must be str, got " + "{}.".format([type(ch) for ch in ch_names])) + + missing = [ch for ch in ch_names if ch not in self.ch_names] + if len(missing) > 0: + msg = "Channel(s) {0} not found, nothing dropped." + raise ValueError(msg.format(", ".join(missing))) + + bad_idx = [self.ch_names.index(ch) for ch in ch_names + if ch in self.ch_names] + idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx) + return self._pick_drop_channels(idx) + + @verbose + def _pick_drop_channels(self, idx, *, verbose=None): + # avoid circular imports + from ..io import BaseRaw + from ..time_frequency import AverageTFR, EpochsTFR + + msg = 'adding, dropping, or reordering channels' + if isinstance(self, BaseRaw): + if self._projector is not None: + _check_preload(self, f'{msg} after calling .apply_proj()') + else: + _check_preload(self, msg) + + if getattr(self, 'picks', None) is not None: + self.picks = self.picks[idx] + + if getattr(self, '_read_picks', None) is not None: + self._read_picks = [r[idx] for r in self._read_picks] + + if hasattr(self, '_cals'): + self._cals = self._cals[idx] + + pick_info(self.info, idx, copy=False) + + for key in ('_comp', '_projector'): + mat = getattr(self, key, None) + if mat is not None: + setattr(self, key, mat[idx][:, idx]) + + # All others (Evoked, Epochs, Raw) have chs axis=-2 + axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2 + if hasattr(self, '_data'): # skip non-preloaded Raw + self._data = self._data.take(idx, axis=axis) + else: + assert isinstance(self, BaseRaw) and not self.preload + + if isinstance(self, BaseRaw): + self.annotations._prune_ch_names(self.info, on_missing='ignore') + + self._pick_projs() + return self + + def _pick_projs(self): + """Keep only projectors which apply to at least 1 data channel.""" + drop_idx = [] + for idx, proj in enumerate(self.info['projs']): + if not set(self.info['ch_names']) & set(proj['data']['col_names']): + drop_idx.append(idx) + + for idx in drop_idx: + logger.info(f"Removing projector {self.info['projs'][idx]}") + + if drop_idx and hasattr(self, 'del_proj'): + self.del_proj(drop_idx) + + return self + + def add_channels(self, add_list, force_update_info=False): + """Append new channels to the instance. + + Parameters + ---------- + add_list : list + A list of objects to append to self. Must contain all the same + type as the current object. + force_update_info : bool + If True, force the info for objects to be appended to match the + values in ``self``. This should generally only be used when adding + stim channels for which important metadata won't be overwritten. + + .. versionadded:: 0.12 + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + See Also + -------- + drop_channels + + Notes + ----- + If ``self`` is a Raw instance that has been preloaded into a + :obj:`numpy.memmap` instance, the memmap will be resized. + """ + # avoid circular imports + from ..io import BaseRaw, _merge_info + from ..epochs import BaseEpochs + + _validate_type(add_list, (list, tuple), 'Input') + + # Object-specific checks + for inst in add_list + [self]: + _check_preload(inst, "adding channels") + if isinstance(self, BaseRaw): + con_axis = 0 + comp_class = BaseRaw + elif isinstance(self, BaseEpochs): + con_axis = 1 + comp_class = BaseEpochs + else: + con_axis = 0 + comp_class = type(self) + for inst in add_list: + _validate_type(inst, comp_class, 'All input') + data = [inst._data for inst in [self] + add_list] + + # Make sure that all dimensions other than channel axis are the same + compare_axes = [i for i in range(data[0].ndim) if i != con_axis] + shapes = np.array([dat.shape for dat in data])[:, compare_axes] + for shape in shapes: + if not ((shapes[0] - shape) == 0).all(): + raise AssertionError('All data dimensions except channels ' + 'must match, got %s != %s' + % (shapes[0], shape)) + del shapes + + # Create final data / info objects + infos = [self.info] + [inst.info for inst in add_list] + new_info = _merge_info(infos, force_update_to_first=force_update_info) + + # Now update the attributes + if isinstance(self._data, np.memmap) and con_axis == 0 and \ + sys.platform != 'darwin': # resizing not available--no mremap + # Use a resize and fill in other ones + out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:] + n_bytes = np.prod(out_shape) * self._data.dtype.itemsize + self._data.flush() + self._data.base.resize(n_bytes) + self._data = np.memmap(self._data.filename, mode='r+', + dtype=self._data.dtype, shape=out_shape) + assert self._data.shape == out_shape + assert self._data.nbytes == n_bytes + offset = len(data[0]) + for d in data[1:]: + this_len = len(d) + self._data[offset:offset + this_len] = d + offset += this_len + else: + self._data = np.concatenate(data, axis=con_axis) + self.info = new_info + if isinstance(self, BaseRaw): + self._cals = np.concatenate([getattr(inst, '_cals') + for inst in [self] + add_list]) + # We should never use these since data are preloaded, let's just + # set it to something large and likely to break (2 ** 31 - 1) + extra_idx = [2147483647] * sum(info['nchan'] for info in infos[1:]) + assert all(len(r) == infos[0]['nchan'] for r in self._read_picks) + self._read_picks = [ + np.concatenate([r, extra_idx]) for r in self._read_picks] + assert all(len(r) == self.info['nchan'] for r in self._read_picks) + elif isinstance(self, BaseEpochs): + self.picks = np.arange(self._data.shape[1]) + if hasattr(self, '_projector'): + activate = False if self._do_delayed_proj else self.proj + self._projector, self.info = setup_proj(self.info, False, + activate=activate) + + return self + + @fill_doc + def add_reference_channels(self, ref_channels): + """Add reference channels to data that consists of all zeros. + + Adds reference channels to data that were not included during + recording. This is useful when you need to re-reference your data + to different channels. These added channels will consist of all zeros. + + Parameters + ---------- + %(ref_channels)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The modified instance. + """ + from ..io.reference import add_reference_channels + + return add_reference_channels(self, ref_channels, copy=False) + + +class InterpolationMixin(object): + """Mixin class for Raw, Evoked, Epochs.""" + + @verbose + def interpolate_bads(self, reset_bads=True, mode='accurate', + origin='auto', method=None, exclude=(), + verbose=None): + """Interpolate bad MEG and EEG channels. + + Operates in place. + + Parameters + ---------- + reset_bads : bool + If True, remove the bads from info. + mode : str + Either ``'accurate'`` or ``'fast'``, determines the quality of the + Legendre polynomial expansion used for interpolation of channels + using the minimum-norm method. + origin : array-like, shape (3,) | str + Origin of the sphere in the head coordinate frame and in meters. + Can be ``'auto'`` (default), which means a head-digitization-based + origin fit. + + .. versionadded:: 0.17 + method : dict + Method to use for each channel type. + Currently only the key "eeg" has multiple options: + + - ``"spline"`` (default) + Use spherical spline interpolation. + - ``"MNE"`` + Use minimum-norm projection to a sphere and back. + This is the method used for MEG channels. + + The value for "meg" is "MNE", and the value for + "fnirs" is "nearest". The default (None) is thus an alias for:: + + method=dict(meg="MNE", eeg="spline", fnirs="nearest") + + .. versionadded:: 0.21 + exclude : list | tuple + The channels to exclude from interpolation. If excluded a bad + channel will stay in bads. + %(verbose)s + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + from ..bem import _check_origin + from .interpolation import _interpolate_bads_eeg,\ + _interpolate_bads_meeg, _interpolate_bads_nirs + + _check_preload(self, "interpolation") + method = _handle_default('interpolation_method', method) + for key in method: + _check_option('method[key]', key, ('meg', 'eeg', 'fnirs')) + _check_option("method['eeg']", method['eeg'], ('spline', 'MNE')) + _check_option("method['meg']", method['meg'], ('MNE',)) + _check_option("method['fnirs']", method['fnirs'], ('nearest',)) + + if len(self.info['bads']) == 0: + warn('No bad channels to interpolate. Doing nothing...') + return self + logger.info('Interpolating bad channels') + origin = _check_origin(origin, self.info) + if method['eeg'] == 'spline': + _interpolate_bads_eeg(self, origin=origin, exclude=exclude) + eeg_mne = False + else: + eeg_mne = True + _interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne, + exclude=exclude) + _interpolate_bads_nirs(self, exclude=exclude) + + if reset_bads is True: + self.info['bads'] = \ + [ch for ch in self.info['bads'] if ch in exclude] + + return self + + +@verbose +def rename_channels(info, mapping, allow_duplicates=False, verbose=None): + """Rename channels. + + Parameters + ---------- + %(info_not_none)s Note: modified in place. + %(mapping_rename_channels_duplicates)s + %(verbose)s + """ + _validate_type(info, Info, 'info') + info._check_consistency() + bads = list(info['bads']) # make our own local copies + ch_names = list(info['ch_names']) + + # first check and assemble clean mappings of index and name + if isinstance(mapping, dict): + _check_dict_keys(mapping, ch_names, key_description="channel name(s)", + valid_key_source="info") + new_names = [(ch_names.index(ch_name), new_name) + for ch_name, new_name in mapping.items()] + elif callable(mapping): + new_names = [(ci, mapping(ch_name)) + for ci, ch_name in enumerate(ch_names)] + else: + raise ValueError('mapping must be callable or dict, not %s' + % (type(mapping),)) + + # check we got all strings out of the mapping + for new_name in new_names: + _validate_type(new_name[1], 'str', 'New channel mappings') + + # do the remapping locally + for c_ind, new_name in new_names: + for bi, bad in enumerate(bads): + if bad == ch_names[c_ind]: + bads[bi] = new_name + ch_names[c_ind] = new_name + + # check that all the channel names are unique + if len(ch_names) != len(np.unique(ch_names)) and not allow_duplicates: + raise ValueError('New channel names are not unique, renaming failed') + + # do the remapping in info + info['bads'] = bads + ch_names_mapping = dict() + for ch, ch_name in zip(info['chs'], ch_names): + ch_names_mapping[ch['ch_name']] = ch_name + ch['ch_name'] = ch_name + # .get b/c fwd info omits it + _rename_comps(info.get('comps', []), ch_names_mapping) + if 'projs' in info: # fwd might omit it + for proj in info['projs']: + proj['data']['col_names'][:] = \ + _rename_list(proj['data']['col_names'], ch_names_mapping) + info._update_redundant() + info._check_consistency() + + +def _recursive_flatten(cell, dtype): + """Unpack mat files in Python.""" + if len(cell) > 0: + while not isinstance(cell[0], dtype): + cell = [c for d in cell for c in d] + return cell + + +@fill_doc +def read_ch_adjacency(fname, picks=None): + """Parse FieldTrip neighbors .mat file. + + More information on these neighbor definitions can be found on the related + `FieldTrip documentation pages + `__. + + Parameters + ---------- + fname : str + The file name. Example: 'neuromag306mag', 'neuromag306planar', + 'ctf275', 'biosemi64', etc. + %(picks_all)s + Picks Must match the template. + + Returns + ------- + ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels) + The adjacency matrix. + ch_names : list + The list of channel names present in adjacency matrix. + + See Also + -------- + find_ch_adjacency + + Notes + ----- + This function is closely related to :func:`find_ch_adjacency`. If you + don't know the correct file for the neighbor definitions, + :func:`find_ch_adjacency` can compute the adjacency matrix from 2d + sensor locations. + """ + from scipy.io import loadmat + if not op.isabs(fname): + templates_dir = op.realpath(op.join(op.dirname(__file__), + 'data', 'neighbors')) + templates = os.listdir(templates_dir) + for f in templates: + if f == fname: + break + if f == fname + '_neighb.mat': + fname += '_neighb.mat' + break + else: + raise ValueError('I do not know about this neighbor ' + 'template: "{}"'.format(fname)) + + fname = op.join(templates_dir, fname) + + nb = loadmat(fname)['neighbours'] + ch_names = _recursive_flatten(nb['label'], str) + picks = _picks_to_idx(len(ch_names), picks) + neighbors = [_recursive_flatten(c, str) for c in + nb['neighblabel'].flatten()] + assert len(ch_names) == len(neighbors) + adjacency = _ch_neighbor_adjacency(ch_names, neighbors) + # picking before constructing matrix is buggy + adjacency = adjacency[picks][:, picks] + ch_names = [ch_names[p] for p in picks] + return adjacency, ch_names + + +def _ch_neighbor_adjacency(ch_names, neighbors): + """Compute sensor adjacency matrix. + + Parameters + ---------- + ch_names : list of str + The channel names. + neighbors : list of list + A list of list of channel names. The neighbors to + which the channels in ch_names are connected with. + Must be of the same length as ch_names. + + Returns + ------- + ch_adjacency : scipy.sparse matrix + The adjacency matrix. + """ + from scipy import sparse + if len(ch_names) != len(neighbors): + raise ValueError('`ch_names` and `neighbors` must ' + 'have the same length') + set_neighbors = {c for d in neighbors for c in d} + rest = set_neighbors - set(ch_names) + if len(rest) > 0: + raise ValueError('Some of your neighbors are not present in the ' + 'list of channel names') + + for neigh in neighbors: + if (not isinstance(neigh, list) and + not all(isinstance(c, str) for c in neigh)): + raise ValueError('`neighbors` must be a list of lists of str') + + ch_adjacency = np.eye(len(ch_names), dtype=bool) + for ii, neigbs in enumerate(neighbors): + ch_adjacency[ii, [ch_names.index(i) for i in neigbs]] = True + ch_adjacency = sparse.csr_matrix(ch_adjacency) + return ch_adjacency + + +@fill_doc +def find_ch_adjacency(info, ch_type): + """Find the adjacency matrix for the given channels. + + This function tries to infer the appropriate adjacency matrix template + for the given channels. If a template is not found, the adjacency matrix + is computed using Delaunay triangulation based on 2d sensor locations. + + Parameters + ---------- + %(info_not_none)s + ch_type : str | None + The channel type for computing the adjacency matrix. Currently + supports 'mag', 'grad', 'eeg' and None. If None, the info must contain + only one channel type. + + Returns + ------- + ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels) + The adjacency matrix. + ch_names : list + The list of channel names present in adjacency matrix. + + See Also + -------- + read_ch_adjacency + + Notes + ----- + .. versionadded:: 0.15 + + Automatic detection of an appropriate adjacency matrix template only + works for MEG data at the moment. This means that the adjacency matrix + is always computed for EEG data and never loaded from a template file. If + you want to load a template for a given montage use + :func:`read_ch_adjacency` directly. + """ + if ch_type is None: + picks = channel_indices_by_type(info) + if sum([len(p) != 0 for p in picks.values()]) != 1: + raise ValueError('info must contain only one channel type if ' + 'ch_type is None.') + ch_type = channel_type(info, 0) + else: + _check_option('ch_type', ch_type, ['mag', 'grad', 'eeg']) + (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types, + has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils, + has_eeg_coils_and_meg, has_eeg_coils_only, + has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info) + conn_name = None + if has_vv_mag and ch_type == 'mag': + conn_name = 'neuromag306mag' + elif has_vv_grad and ch_type == 'grad': + conn_name = 'neuromag306planar' + elif has_4D_mag: + if 'MEG 248' in info['ch_names']: + idx = info['ch_names'].index('MEG 248') + grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD + mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG + if ch_type == 'grad' and grad: + conn_name = 'bti248grad' + elif ch_type == 'mag' and mag: + conn_name = 'bti248' + elif 'MEG 148' in info['ch_names'] and ch_type == 'mag': + idx = info['ch_names'].index('MEG 148') + if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG: + conn_name = 'bti148' + elif has_CTF_grad and ch_type == 'mag': + if info['nchan'] < 100: + conn_name = 'ctf64' + elif info['nchan'] > 200: + conn_name = 'ctf275' + else: + conn_name = 'ctf151' + elif n_kit_grads > 0: + from ..io.kit.constants import KIT_NEIGHBORS + conn_name = KIT_NEIGHBORS.get(info['kit_system_id']) + + if conn_name is not None: + logger.info('Reading adjacency matrix for %s.' % conn_name) + return read_ch_adjacency(conn_name) + logger.info('Could not find a adjacency matrix for the data. ' + 'Computing adjacency based on Delaunay triangulations.') + return _compute_ch_adjacency(info, ch_type) + + +@fill_doc +def _compute_ch_adjacency(info, ch_type): + """Compute channel adjacency matrix using Delaunay triangulations. + + Parameters + ---------- + %(info_not_none)s + ch_type : str + The channel type for computing the adjacency matrix. Currently + supports 'mag', 'grad' and 'eeg'. + + Returns + ------- + ch_adjacency : scipy.sparse matrix, shape (n_channels, n_channels) + The adjacency matrix. + ch_names : list + The list of channel names present in adjacency matrix. + """ + from scipy import sparse + from scipy.spatial import Delaunay + from .. import spatial_tris_adjacency + from ..channels.layout import _find_topomap_coords, _pair_grad_sensors + combine_grads = (ch_type == 'grad' + and any([coil_type in [ch['coil_type'] + for ch in info['chs']] + for coil_type in + [FIFF.FIFFV_COIL_VV_PLANAR_T1, + FIFF.FIFFV_COIL_NM_122]])) + + picks = dict(_picks_by_type(info, exclude=[]))[ch_type] + ch_names = [info['ch_names'][pick] for pick in picks] + if combine_grads: + pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[]) + if len(pairs) != len(picks): + raise RuntimeError('Cannot find a pair for some of the ' + 'gradiometers. Cannot compute adjacency ' + 'matrix.') + # only for one of the pair + xy = _find_topomap_coords(info, picks[::2], sphere=HEAD_SIZE_DEFAULT) + else: + xy = _find_topomap_coords(info, picks, sphere=HEAD_SIZE_DEFAULT) + tri = Delaunay(xy) + neighbors = spatial_tris_adjacency(tri.simplices) + + if combine_grads: + ch_adjacency = np.eye(len(picks), dtype=bool) + for idx, neigbs in zip(neighbors.row, neighbors.col): + for ii in range(2): # make sure each pair is included + for jj in range(2): + ch_adjacency[idx * 2 + ii, neigbs * 2 + jj] = True + ch_adjacency[idx * 2 + ii, idx * 2 + jj] = True # pair + ch_adjacency = sparse.csr_matrix(ch_adjacency) + else: + ch_adjacency = sparse.lil_matrix(neighbors) + ch_adjacency.setdiag(np.repeat(1, ch_adjacency.shape[0])) + ch_adjacency = ch_adjacency.tocsr() + + return ch_adjacency, ch_names + + +@fill_doc +def fix_mag_coil_types(info, use_cal=False): + """Fix magnetometer coil types. + + Parameters + ---------- + %(info_not_none)s Corrections are done in-place. + use_cal : bool + If True, further refine the check for old coil types by checking + ``info['chs'][ii]['cal']``. + + Notes + ----- + This function changes magnetometer coil types 3022 (T1: SQ20483N) and + 3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition + records in the info structure. + + Neuromag Vectorview systems can contain magnetometers with two + different coil sizes (3022 and 3023 vs. 3024). The systems + incorporating coils of type 3024 were introduced last and are used at + the majority of MEG sites. At some sites with 3024 magnetometers, + the data files have still defined the magnetometers to be of type + 3022 to ensure compatibility with older versions of Neuromag software. + In the MNE software as well as in the present version of Neuromag + software coil type 3024 is fully supported. Therefore, it is now safe + to upgrade the data files to use the true coil type. + + .. note:: The effect of the difference between the coil sizes on the + current estimates computed by the MNE software is very small. + Therefore the use of ``fix_mag_coil_types`` is not mandatory. + """ + old_mag_inds = _get_T1T2_mag_inds(info, use_cal) + + for ii in old_mag_inds: + info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3 + logger.info('%d of %d magnetometer types replaced with T3.' % + (len(old_mag_inds), len(pick_types(info, meg='mag')))) + info._check_consistency() + + +def _get_T1T2_mag_inds(info, use_cal=False): + """Find T1/T2 magnetometer coil types.""" + picks = pick_types(info, meg='mag') + old_mag_inds = [] + # From email exchanges, systems with the larger T2 coil only use the cal + # value of 2.09e-11. Newer T3 magnetometers use 4.13e-11 or 1.33e-10 + # (Triux). So we can use a simple check for > 3e-11. + for ii in picks: + ch = info['chs'][ii] + if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1, + FIFF.FIFFV_COIL_VV_MAG_T2): + if use_cal: + if ch['cal'] > 3e-11: + old_mag_inds.append(ii) + else: + old_mag_inds.append(ii) + return old_mag_inds + + +def _get_ch_info(info): + """Get channel info for inferring acquisition device.""" + chs = info['chs'] + # Only take first 16 bits, as higher bits store CTF comp order + coil_types = {ch['coil_type'] & 0xFFFF for ch in chs} + channel_types = {ch['kind'] for ch in chs} + + has_vv_mag = any(k in coil_types for k in + [FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2, + FIFF.FIFFV_COIL_VV_MAG_T3]) + has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1, + FIFF.FIFFV_COIL_VV_PLANAR_T2, + FIFF.FIFFV_COIL_VV_PLANAR_T3]) + has_neuromag_122_grad = any(k in coil_types + for k in [FIFF.FIFFV_COIL_NM_122]) + + is_old_vv = ' ' in chs[0]['ch_name'] + + has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types + ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG, + FIFF.FIFFV_COIL_CTF_REF_GRAD, + FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD) + has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or + (FIFF.FIFFV_MEG_CH in channel_types and + any(k in ctf_other_types for k in coil_types))) + # hack due to MNE-C bug in IO of CTF + # only take first 16 bits, as higher bits store CTF comp order + n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD + for ch in chs) + + has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad, + n_kit_grads]) + has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and + FIFF.FIFFV_EEG_CH in channel_types) + has_eeg_coils_and_meg = has_eeg_coils and has_any_meg + has_eeg_coils_only = has_eeg_coils and not has_any_meg + has_csd_coils = (FIFF.FIFFV_COIL_EEG_CSD in coil_types and + FIFF.FIFFV_EEG_CH in channel_types) + + return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types, + has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils, + has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad, + has_csd_coils) + + +@fill_doc +def make_1020_channel_selections(info, midline="z"): + """Return dict mapping from ROI names to lists of picks for 10/20 setups. + + This passes through all channel names, and uses a simple heuristic to + separate channel names into three Region of Interest-based selections: + Left, Midline and Right. The heuristic is that channels ending on any of + the characters in ``midline`` are filed under that heading, otherwise those + ending in odd numbers under "Left", those in even numbers under "Right". + Other channels are ignored. This is appropriate for 10/20 files, but not + for other channel naming conventions. + If an info object is provided, lists are sorted from posterior to anterior. + + Parameters + ---------- + %(info_not_none)s If possible, the channel lists will be sorted + posterior-to-anterior; otherwise they default to the order specified in + ``info["ch_names"]``. + midline : str + Names ending in any of these characters are stored under the + ``Midline`` key. Defaults to 'z'. Note that capitalization is ignored. + + Returns + ------- + selections : dict + A dictionary mapping from ROI names to lists of picks (integers). + """ + _validate_type(info, "info") + + try: + from .layout import find_layout + layout = find_layout(info) + pos = layout.pos + ch_names = layout.names + except RuntimeError: # no channel positions found + ch_names = info["ch_names"] + pos = None + + selections = dict(Left=[], Midline=[], Right=[]) + for pick, channel in enumerate(ch_names): + last_char = channel[-1].lower() # in 10/20, last char codes hemisphere + if last_char in midline: + selection = "Midline" + elif last_char.isdigit(): + selection = "Left" if int(last_char) % 2 else "Right" + else: # ignore the channel + continue + selections[selection].append(pick) + + if pos is not None: + # sort channels from front to center + # (y-coordinate of the position info in the layout) + selections = {selection: np.array(picks)[pos[picks, 1].argsort()] + for selection, picks in selections.items()} + + return selections + + +def combine_channels(inst, groups, method='mean', keep_stim=False, + drop_bad=False): + """Combine channels based on specified channel grouping. + + Parameters + ---------- + inst : instance of Raw, Epochs, or Evoked + An MNE-Python object to combine the channels for. The object can be of + type Raw, Epochs, or Evoked. + groups : dict + Specifies which channels are aggregated into a single channel, with + aggregation method determined by the ``method`` parameter. One new + pseudo-channel is made per dict entry; the dict values must be lists of + picks (integer indices of ``ch_names``). For example:: + + groups=dict(Left=[1, 2, 3, 4], Right=[5, 6, 7, 8]) + + Note that within a dict entry all channels must have the same type. + method : str | callable + Which method to use to combine channels. If a :class:`str`, must be one + of 'mean', 'median', or 'std' (standard deviation). If callable, the + callable must accept one positional input (data of shape ``(n_channels, + n_times)``, or ``(n_epochs, n_channels, n_times)``) and return an + :class:`array ` of shape ``(n_times,)``, or ``(n_epochs, + n_times)``. For example with an instance of Raw or Evoked:: + + method = lambda data: np.mean(data, axis=0) + + Another example with an instance of Epochs:: + + method = lambda data: np.median(data, axis=1) + + Defaults to ``'mean'``. + keep_stim : bool + If ``True``, include stimulus channels in the resulting object. + Defaults to ``False``. + drop_bad : bool + If ``True``, drop channels marked as bad before combining. Defaults to + ``False``. + + Returns + ------- + combined_inst : instance of Raw, Epochs, or Evoked + An MNE-Python object of the same type as the input ``inst``, containing + one virtual channel for each group in ``groups`` (and, if ``keep_stim`` + is ``True``, also containing stimulus channels). + """ + from ..io import BaseRaw, RawArray + from .. import BaseEpochs, EpochsArray, Evoked, EvokedArray + + ch_axis = 1 if isinstance(inst, BaseEpochs) else 0 + ch_idx = list(range(inst.info['nchan'])) + ch_names = inst.info['ch_names'] + ch_types = inst.get_channel_types() + inst_data = inst.data if isinstance(inst, Evoked) else inst.get_data() + groups = OrderedDict(deepcopy(groups)) + + # Convert string values of ``method`` into callables + # XXX Possibly de-duplicate with _make_combine_callable of mne/viz/utils.py + if isinstance(method, str): + method_dict = {key: partial(getattr(np, key), axis=ch_axis) + for key in ('mean', 'median', 'std')} + try: + method = method_dict[method] + except KeyError: + raise ValueError('"method" must be a callable, or one of "mean", ' + f'"median", or "std"; got "{method}".') + + # Instantiate channel info and data + new_ch_names, new_ch_types, new_data = [], [], [] + if not isinstance(keep_stim, bool): + raise TypeError('"keep_stim" must be of type bool, not ' + f'{type(keep_stim)}.') + if keep_stim: + stim_ch_idx = list(pick_types(inst.info, meg=False, stim=True)) + if stim_ch_idx: + new_ch_names = [ch_names[idx] for idx in stim_ch_idx] + new_ch_types = [ch_types[idx] for idx in stim_ch_idx] + new_data = [np.take(inst_data, idx, axis=ch_axis) + for idx in stim_ch_idx] + else: + warn('Could not find stimulus channels.') + + # Get indices of bad channels + ch_idx_bad = [] + if not isinstance(drop_bad, bool): + raise TypeError('"drop_bad" must be of type bool, not ' + f'{type(drop_bad)}.') + if drop_bad and inst.info['bads']: + ch_idx_bad = pick_channels(ch_names, inst.info['bads']) + + # Check correctness of combinations + for this_group, this_picks in groups.items(): + # Check if channel indices are out of bounds + if not all(idx in ch_idx for idx in this_picks): + raise ValueError('Some channel indices are out of bounds.') + # Check if heterogeneous sensor type combinations + this_ch_type = np.array(ch_types)[this_picks] + if len(set(this_ch_type)) > 1: + types = ', '.join(set(this_ch_type)) + raise ValueError('Cannot combine sensors of different types; ' + f'"{this_group}" contains types {types}.') + # Remove bad channels + these_bads = [idx for idx in this_picks if idx in ch_idx_bad] + this_picks = [idx for idx in this_picks if idx not in ch_idx_bad] + if these_bads: + logger.info('Dropped the following channels in group ' + f'{this_group}: {these_bads}') + # Check if combining less than 2 channel + if len(set(this_picks)) < 2: + warn(f'Less than 2 channels in group "{this_group}" when ' + f'combining by method "{method}".') + # If all good create more detailed dict without bad channels + groups[this_group] = dict(picks=this_picks, ch_type=this_ch_type[0]) + + # Combine channels and add them to the new instance + for this_group, this_group_dict in groups.items(): + new_ch_names.append(this_group) + new_ch_types.append(this_group_dict['ch_type']) + this_picks = this_group_dict['picks'] + this_data = np.take(inst_data, this_picks, axis=ch_axis) + new_data.append(method(this_data)) + new_data = np.swapaxes(new_data, 0, ch_axis) + info = create_info(sfreq=inst.info['sfreq'], ch_names=new_ch_names, + ch_types=new_ch_types) + if isinstance(inst, BaseRaw): + combined_inst = RawArray(new_data, info, first_samp=inst.first_samp) + elif isinstance(inst, BaseEpochs): + combined_inst = EpochsArray(new_data, info, events=inst.events, + tmin=inst.times[0]) + if inst.metadata is not None: + combined_inst.metadata = inst.metadata.copy() + elif isinstance(inst, Evoked): + combined_inst = EvokedArray(new_data, info, tmin=inst.times[0]) + + return combined_inst + + +# NeuroMag channel groupings +_SELECTIONS = ['Vertex', 'Left-temporal', 'Right-temporal', 'Left-parietal', + 'Right-parietal', 'Left-occipital', 'Right-occipital', + 'Left-frontal', 'Right-frontal'] +_EEG_SELECTIONS = ['EEG 1-32', 'EEG 33-64', 'EEG 65-96', 'EEG 97-128'] + + +def _divide_to_regions(info, add_stim=True): + """Divide channels to regions by positions.""" + from scipy.stats import zscore + picks = _pick_data_channels(info, exclude=[]) + chs_in_lobe = len(picks) // 4 + pos = np.array([ch['loc'][:3] for ch in info['chs']]) + x, y, z = pos.T + + frontal = picks[np.argsort(y[picks])[-chs_in_lobe:]] + picks = np.setdiff1d(picks, frontal) + + occipital = picks[np.argsort(y[picks])[:chs_in_lobe]] + picks = np.setdiff1d(picks, occipital) + + temporal = picks[np.argsort(z[picks])[:chs_in_lobe]] + picks = np.setdiff1d(picks, temporal) + + lt, rt = _divide_side(temporal, x) + lf, rf = _divide_side(frontal, x) + lo, ro = _divide_side(occipital, x) + lp, rp = _divide_side(picks, x) # Parietal lobe from the remaining picks. + + # Because of the way the sides are divided, there may be outliers in the + # temporal lobes. Here we switch the sides for these outliers. For other + # lobes it is not a big problem because of the vicinity of the lobes. + with np.errstate(invalid='ignore'): # invalid division, greater compare + zs = np.abs(zscore(x[rt])) + outliers = np.array(rt)[np.where(zs > 2.)[0]] + rt = list(np.setdiff1d(rt, outliers)) + + with np.errstate(invalid='ignore'): # invalid division, greater compare + zs = np.abs(zscore(x[lt])) + outliers = np.append(outliers, (np.array(lt)[np.where(zs > 2.)[0]])) + lt = list(np.setdiff1d(lt, outliers)) + + l_mean = np.mean(x[lt]) + r_mean = np.mean(x[rt]) + for outlier in outliers: + if abs(l_mean - x[outlier]) < abs(r_mean - x[outlier]): + lt.append(outlier) + else: + rt.append(outlier) + + if add_stim: + stim_ch = _get_stim_channel(None, info, raise_error=False) + if len(stim_ch) > 0: + for region in [lf, rf, lo, ro, lp, rp, lt, rt]: + region.append(info['ch_names'].index(stim_ch[0])) + return OrderedDict([('Left-frontal', lf), ('Right-frontal', rf), + ('Left-parietal', lp), ('Right-parietal', rp), + ('Left-occipital', lo), ('Right-occipital', ro), + ('Left-temporal', lt), ('Right-temporal', rt)]) + + +def _divide_side(lobe, x): + """Make a separation between left and right lobe evenly.""" + lobe = np.asarray(lobe) + median = np.median(x[lobe]) + + left = lobe[np.where(x[lobe] < median)[0]] + right = lobe[np.where(x[lobe] > median)[0]] + medians = np.where(x[lobe] == median)[0] + + left = np.sort(np.concatenate([left, lobe[medians[1::2]]])) + right = np.sort(np.concatenate([right, lobe[medians[::2]]])) + return list(left), list(right) + + +@verbose +def read_vectorview_selection(name, fname=None, info=None, verbose=None): + """Read Neuromag Vector View channel selection from a file. + + Parameters + ---------- + name : str | list of str + Name of the selection. If a list, the selections are combined. + Supported selections are: ``'Vertex'``, ``'Left-temporal'``, + ``'Right-temporal'``, ``'Left-parietal'``, ``'Right-parietal'``, + ``'Left-occipital'``, ``'Right-occipital'``, ``'Left-frontal'`` and + ``'Right-frontal'``. Selections can also be matched and combined by + spcecifying common substrings. For example, ``name='temporal`` will + produce a combination of ``'Left-temporal'`` and ``'Right-temporal'``. + fname : str + Filename of the selection file (if ``None``, built-in selections are + used). + %(info)s Used to determine which channel naming convention to use, e.g. + ``'MEG 0111'`` (with space) for old Neuromag systems and ``'MEG0111'`` + (without space) for new ones. + %(verbose)s + + Returns + ------- + sel : list of str + List with channel names in the selection. + """ + # convert name to list of string + if not isinstance(name, (list, tuple)): + name = [name] + if isinstance(info, Info): + picks = pick_types(info, meg=True, exclude=()) + if len(picks) > 0 and ' ' not in info['ch_names'][picks[0]]: + spacing = 'new' + else: + spacing = 'old' + elif info is not None: + raise TypeError('info must be an instance of Info or None, not %s' + % (type(info),)) + else: # info is None + spacing = 'old' + + # use built-in selections by default + if fname is None: + fname = op.join(op.dirname(__file__), '..', 'data', 'mne_analyze.sel') + + fname = _check_fname(fname, must_exist=True, overwrite='read') + + # use this to make sure we find at least one match for each name + name_found = {n: False for n in name} + with open(fname, 'r') as fid: + sel = [] + for line in fid: + line = line.strip() + # skip blank lines and comments + if len(line) == 0 or line[0] == '#': + continue + # get the name of the selection in the file + pos = line.find(':') + if pos < 0: + logger.info('":" delimiter not found in selections file, ' + 'skipping line') + continue + sel_name_file = line[:pos] + # search for substring match with name provided + for n in name: + if sel_name_file.find(n) >= 0: + sel.extend(line[pos + 1:].split('|')) + name_found[n] = True + break + + # make sure we found at least one match for each name + for n, found in name_found.items(): + if not found: + raise ValueError('No match for selection name "%s" found' % n) + + # make the selection a sorted list with unique elements + sel = list(set(sel)) + sel.sort() + if spacing == 'new': # "new" or "old" by now, "old" is default + sel = [s.replace('MEG ', 'MEG') for s in sel] + return sel diff --git a/python/libs/mne/channels/data/layouts/CTF-275.lout b/python/libs/mne/channels/data/layouts/CTF-275.lout new file mode 100644 index 0000000..53d924c --- /dev/null +++ b/python/libs/mne/channels/data/layouts/CTF-275.lout @@ -0,0 +1,276 @@ + -42.27 42.33 -39.99 31.80 +001 -4.09 10.91 4.00 3.00 MLC11-2622 +002 -7.25 8.87 4.00 3.00 MLC12-2622 +003 -10.79 7.43 4.00 3.00 MLC13-2622 +004 -14.40 5.31 4.00 3.00 MLC14-2622 +005 -17.45 2.88 4.00 3.00 MLC15-2622 +006 -19.94 -0.21 4.00 3.00 MLC16-2622 +007 -22.30 -3.88 4.00 3.00 MLC17-2622 +008 -7.70 5.16 4.00 3.00 MLC21-2622 +009 -11.18 3.69 4.00 3.00 MLC22-2622 +010 -14.17 1.40 4.00 3.00 MLC23-2622 +011 -16.42 -1.52 4.00 3.00 MLC24-2622 +012 -18.64 -4.88 4.00 3.00 MLC25-2622 +013 -12.55 -2.00 4.00 3.00 MLC31-2622 +014 -15.13 -5.41 4.00 3.00 MLC32-2622 +015 -9.57 0.28 4.00 3.00 MLC41-2622 +016 -11.51 -5.56 4.00 3.00 MLC42-2622 +017 -4.04 4.58 4.00 3.00 MLC51-2622 +018 -6.04 1.35 4.00 3.00 MLC52-2622 +019 -8.79 -3.34 4.00 3.00 MLC53-2622 +020 -8.32 -7.10 4.00 3.00 MLC54-2622 +021 -6.60 -10.22 4.00 3.00 MLC55-2622 +022 -4.01 -1.76 4.00 3.00 MLC61-2622 +023 -5.55 -4.97 4.00 3.00 MLC62-2622 +024 -3.74 -8.12 4.00 3.00 MLC63-2622 +025 -7.63 28.14 4.00 3.00 MLF11-2622 +026 -12.92 27.01 4.00 3.00 MLF12-2622 +027 -18.14 25.41 4.00 3.00 MLF13-2622 +028 -23.34 23.65 4.00 3.00 MLF14-2622 +029 -4.64 25.47 4.00 3.00 MLF21-2622 +030 -9.22 24.68 4.00 3.00 MLF22-2622 +031 -13.60 23.41 4.00 3.00 MLF23-2622 +032 -18.31 21.53 4.00 3.00 MLF24-2622 +033 -22.68 19.69 4.00 3.00 MLF25-2622 +034 -6.57 22.14 4.00 3.00 MLF31-2622 +035 -10.75 21.22 4.00 3.00 MLF32-2622 +036 -15.16 19.49 4.00 3.00 MLF33-2622 +037 -19.01 17.57 4.00 3.00 MLF34-2622 +038 -22.93 15.25 4.00 3.00 MLF35-2622 +039 -4.25 19.38 4.00 3.00 MLF41-2622 +040 -8.17 18.80 4.00 3.00 MLF42-2622 +041 -12.29 17.37 4.00 3.00 MLF43-2622 +042 -15.93 15.49 4.00 3.00 MLF44-2622 +043 -19.89 13.39 4.00 3.00 MLF45-2622 +044 -24.12 10.50 4.00 3.00 MLF46-2622 +045 -5.48 16.15 4.00 3.00 MLF51-2622 +046 -9.58 15.10 4.00 3.00 MLF52-2622 +047 -13.17 13.43 4.00 3.00 MLF53-2622 +048 -16.66 11.39 4.00 3.00 MLF54-2622 +049 -20.76 9.06 4.00 3.00 MLF55-2622 +050 -24.71 5.73 4.00 3.00 MLF56-2622 +051 -7.17 12.78 4.00 3.00 MLF61-2622 +052 -10.58 11.08 4.00 3.00 MLF62-2622 +053 -13.93 9.16 4.00 3.00 MLF63-2622 +054 -17.37 7.29 4.00 3.00 MLF64-2622 +055 -20.83 4.87 4.00 3.00 MLF65-2622 +056 -23.40 1.59 4.00 3.00 MLF66-2622 +057 -25.90 -2.51 4.00 3.00 MLF67-2622 +058 -6.96 -27.32 4.00 3.00 MLO11-2622 +059 -11.88 -25.97 4.00 3.00 MLO12-2622 +060 -16.48 -23.69 4.00 3.00 MLO13-2622 +061 -20.64 -20.44 4.00 3.00 MLO14-2622 +062 -4.82 -30.75 4.00 3.00 MLO21-2622 +063 -10.11 -29.77 4.00 3.00 MLO22-2622 +064 -15.52 -27.87 4.00 3.00 MLO23-2622 +065 -20.40 -24.85 4.00 3.00 MLO24-2622 +066 -7.92 -33.45 4.00 3.00 MLO31-2622 +067 -13.84 -31.94 4.00 3.00 MLO32-2622 +068 -19.61 -29.16 4.00 3.00 MLO33-2622 +069 -24.70 -25.44 4.00 3.00 MLO34-2622 +070 -5.16 -36.86 4.00 3.00 MLO41-2622 +071 -11.67 -35.84 4.00 3.00 MLO42-2622 +072 -17.98 -33.55 4.00 3.00 MLO43-2622 +073 -23.91 -30.00 4.00 3.00 MLO44-2622 +074 -8.79 -39.34 4.00 3.00 MLO51-2622 +075 -15.83 -37.54 4.00 3.00 MLO52-2622 +076 -22.47 -34.34 4.00 3.00 MLO53-2622 +077 -4.98 -13.36 4.00 3.00 MLP11-2622 +078 -10.20 -10.01 4.00 3.00 MLP12-2622 +079 -3.80 -16.69 4.00 3.00 MLP21-2622 +080 -8.73 -13.30 4.00 3.00 MLP22-2622 +081 -13.58 -8.80 4.00 3.00 MLP23-2622 +082 -5.66 -19.72 4.00 3.00 MLP31-2622 +083 -8.41 -16.83 4.00 3.00 MLP32-2622 +084 -12.08 -14.80 4.00 3.00 MLP33-2622 +085 -15.13 -11.95 4.00 3.00 MLP34-2622 +086 -17.18 -8.63 4.00 3.00 MLP35-2622 +087 -9.92 -20.16 4.00 3.00 MLP41-2622 +088 -13.37 -18.09 4.00 3.00 MLP42-2622 +089 -16.59 -15.58 4.00 3.00 MLP43-2622 +090 -19.06 -11.87 4.00 3.00 MLP44-2622 +091 -20.87 -8.06 4.00 3.00 MLP45-2622 +092 -4.02 -24.07 4.00 3.00 MLP51-2622 +093 -8.77 -23.79 4.00 3.00 MLP52-2622 +094 -12.92 -22.08 4.00 3.00 MLP53-2622 +095 -16.83 -19.50 4.00 3.00 MLP54-2622 +096 -20.23 -16.32 4.00 3.00 MLP55-2622 +097 -22.76 -11.97 4.00 3.00 MLP56-2622 +098 -24.58 -7.58 4.00 3.00 MLP57-2622 +099 -27.14 12.98 4.00 3.00 MLT11-2622 +100 -28.19 7.51 4.00 3.00 MLT12-2622 +101 -28.08 2.09 4.00 3.00 MLT13-2622 +102 -28.56 -5.98 4.00 3.00 MLT14-2622 +103 -26.96 -11.17 4.00 3.00 MLT15-2622 +104 -24.11 -16.46 4.00 3.00 MLT16-2622 +105 -27.30 17.85 4.00 3.00 MLT21-2622 +106 -31.47 10.04 4.00 3.00 MLT22-2622 +107 -31.85 3.70 4.00 3.00 MLT23-2622 +108 -32.08 -2.62 4.00 3.00 MLT24-2622 +109 -31.09 -9.80 4.00 3.00 MLT25-2622 +110 -28.71 -15.38 4.00 3.00 MLT26-2622 +111 -24.78 -20.78 4.00 3.00 MLT27-2622 +112 -28.61 21.64 4.00 3.00 MLT31-2622 +113 -32.09 15.32 4.00 3.00 MLT32-2622 +114 -35.40 5.79 4.00 3.00 MLT33-2622 +115 -35.85 -1.29 4.00 3.00 MLT34-2622 +116 -34.97 -7.76 4.00 3.00 MLT35-2622 +117 -32.89 -13.91 4.00 3.00 MLT36-2622 +118 -29.32 -20.20 4.00 3.00 MLT37-2622 +119 -33.87 18.93 4.00 3.00 MLT41-2622 +120 -36.68 11.37 4.00 3.00 MLT42-2622 +121 -38.92 2.11 4.00 3.00 MLT43-2622 +122 -38.70 -5.16 4.00 3.00 MLT44-2622 +123 -36.95 -12.13 4.00 3.00 MLT45-2622 +124 -33.72 -18.79 4.00 3.00 MLT46-2622 +125 -29.28 -25.28 4.00 3.00 MLT47-2622 +126 -38.78 14.74 4.00 3.00 MLT51-2622 +127 -41.29 6.62 4.00 3.00 MLT52-2622 +128 -41.87 -1.80 4.00 3.00 MLT53-2622 +129 -40.62 -9.63 4.00 3.00 MLT54-2622 +130 -37.78 -16.89 4.00 3.00 MLT55-2622 +131 -33.73 -24.02 4.00 3.00 MLT56-2622 +132 -28.51 -29.92 4.00 3.00 MLT57-2622 +133 -0.24 10.97 4.00 3.00 MRC11-2622 +134 2.99 8.95 4.00 3.00 MRC12-2622 +135 6.57 7.62 4.00 3.00 MRC13-2622 +136 10.22 5.56 4.00 3.00 MRC14-2622 +137 13.27 3.22 4.00 3.00 MRC15-2622 +138 15.86 0.21 4.00 3.00 MRC16-2622 +139 18.32 -3.45 4.00 3.00 MRC17-2622 +140 3.53 5.28 4.00 3.00 MRC21-2622 +141 7.00 3.85 4.00 3.00 MRC22-2622 +142 10.06 1.68 4.00 3.00 MRC23-2622 +143 12.33 -1.20 4.00 3.00 MRC24-2622 +144 14.73 -4.52 4.00 3.00 MRC25-2622 +145 8.51 -1.76 4.00 3.00 MRC31-2622 +146 11.17 -5.14 4.00 3.00 MRC32-2622 +147 5.51 0.46 4.00 3.00 MRC41-2622 +148 7.56 -5.33 4.00 3.00 MRC42-2622 +149 -0.17 4.62 4.00 3.00 MRC51-2622 +150 1.93 1.46 4.00 3.00 MRC52-2622 +151 4.78 -3.16 4.00 3.00 MRC53-2622 +152 4.39 -6.98 4.00 3.00 MRC54-2622 +153 2.73 -10.10 4.00 3.00 MRC55-2622 +154 -0.07 -1.75 4.00 3.00 MRC61-2622 +155 1.58 -4.86 4.00 3.00 MRC62-2622 +156 -0.15 -8.08 4.00 3.00 MRC63-2622 +157 2.97 28.24 4.00 3.00 MRF11-2622 +158 8.25 27.25 4.00 3.00 MRF12-2622 +159 13.54 25.74 4.00 3.00 MRF13-2622 +160 18.74 24.12 4.00 3.00 MRF14-2622 +161 0.03 25.52 4.00 3.00 MRF21-2622 +162 4.63 24.85 4.00 3.00 MRF22-2622 +163 9.03 23.67 4.00 3.00 MRF23-2622 +164 13.78 21.87 4.00 3.00 MRF24-2622 +165 18.19 20.13 4.00 3.00 MRF25-2622 +166 2.05 22.22 4.00 3.00 MRF31-2622 +167 6.27 21.38 4.00 3.00 MRF32-2622 +168 10.63 19.79 4.00 3.00 MRF33-2622 +169 14.57 17.90 4.00 3.00 MRF34-2622 +170 18.54 15.70 4.00 3.00 MRF35-2622 +171 -0.22 19.42 4.00 3.00 MRF41-2622 +172 3.75 18.84 4.00 3.00 MRF42-2622 +173 7.86 17.57 4.00 3.00 MRF43-2622 +174 11.53 15.78 4.00 3.00 MRF44-2622 +175 15.55 13.76 4.00 3.00 MRF45-2622 +176 19.83 10.96 4.00 3.00 MRF46-2622 +177 1.08 16.23 4.00 3.00 MRF51-2622 +178 5.20 15.33 4.00 3.00 MRF52-2622 +179 8.81 13.68 4.00 3.00 MRF53-2622 +180 12.37 11.71 4.00 3.00 MRF54-2622 +181 16.53 9.44 4.00 3.00 MRF55-2622 +182 20.54 6.21 4.00 3.00 MRF56-2622 +183 2.82 12.87 4.00 3.00 MRF61-2622 +184 6.27 11.29 4.00 3.00 MRF62-2622 +185 9.66 9.43 4.00 3.00 MRF63-2622 +186 13.14 7.59 4.00 3.00 MRF64-2622 +187 16.52 5.22 4.00 3.00 MRF65-2622 +188 19.31 2.05 4.00 3.00 MRF66-2622 +189 21.91 -1.92 4.00 3.00 MRF67-2622 +190 3.46 -27.20 4.00 3.00 MRO11-2622 +191 8.35 -25.76 4.00 3.00 MRO12-2622 +192 12.92 -23.40 4.00 3.00 MRO13-2622 +193 17.02 -20.06 4.00 3.00 MRO14-2622 +194 1.43 -30.69 4.00 3.00 MRO21-2622 +195 6.66 -29.60 4.00 3.00 MRO22-2622 +196 12.02 -27.57 4.00 3.00 MRO23-2622 +197 16.88 -24.46 4.00 3.00 MRO24-2622 +198 4.55 -33.35 4.00 3.00 MRO31-2622 +199 10.46 -31.70 4.00 3.00 MRO32-2622 +200 16.07 -28.88 4.00 3.00 MRO33-2622 +201 21.16 -24.93 4.00 3.00 MRO34-2622 +202 1.88 -36.78 4.00 3.00 MRO41-2622 +203 8.37 -35.64 4.00 3.00 MRO42-2622 +204 14.63 -33.19 4.00 3.00 MRO43-2622 +205 20.45 -29.57 4.00 3.00 MRO44-2622 +206 5.57 -39.20 4.00 3.00 MRO51-2622 +207 12.57 -37.26 4.00 3.00 MRO52-2622 +208 19.11 -33.96 4.00 3.00 MRO53-2622 +209 1.20 -13.27 4.00 3.00 MRP11-2622 +210 6.34 -9.81 4.00 3.00 MRP12-2622 +211 0.06 -16.65 4.00 3.00 MRP21-2622 +212 4.94 -13.15 4.00 3.00 MRP22-2622 +213 9.72 -8.56 4.00 3.00 MRP23-2622 +214 2.03 -19.64 4.00 3.00 MRP31-2622 +215 4.72 -16.72 4.00 3.00 MRP32-2622 +216 8.28 -14.64 4.00 3.00 MRP33-2622 +217 11.32 -11.68 4.00 3.00 MRP34-2622 +218 13.30 -8.29 4.00 3.00 MRP35-2622 +219 6.32 -19.99 4.00 3.00 MRP41-2622 +220 9.66 -17.86 4.00 3.00 MRP42-2622 +221 12.83 -15.29 4.00 3.00 MRP43-2622 +222 15.21 -11.53 4.00 3.00 MRP44-2622 +223 16.99 -7.64 4.00 3.00 MRP45-2622 +224 0.42 -24.03 4.00 3.00 MRP51-2622 +225 5.29 -23.71 4.00 3.00 MRP52-2622 +226 9.32 -21.86 4.00 3.00 MRP53-2622 +227 13.19 -19.21 4.00 3.00 MRP54-2622 +228 16.49 -15.99 4.00 3.00 MRP55-2622 +229 18.98 -11.54 4.00 3.00 MRP56-2622 +230 20.69 -7.11 4.00 3.00 MRP57-2622 +231 22.81 13.51 4.00 3.00 MRT11-2622 +232 23.97 8.09 4.00 3.00 MRT12-2622 +233 23.97 2.65 4.00 3.00 MRT13-2622 +234 24.63 -5.42 4.00 3.00 MRT14-2622 +235 23.16 -10.65 4.00 3.00 MRT15-2622 +236 20.37 -16.02 4.00 3.00 MRT16-2622 +237 22.88 18.38 4.00 3.00 MRT21-2622 +238 27.23 10.62 4.00 3.00 MRT22-2622 +239 27.73 4.35 4.00 3.00 MRT23-2622 +240 28.08 -1.95 4.00 3.00 MRT24-2622 +241 27.24 -9.21 4.00 3.00 MRT25-2622 +242 24.97 -14.84 4.00 3.00 MRT26-2622 +243 21.15 -20.30 4.00 3.00 MRT27-2622 +244 24.07 22.26 4.00 3.00 MRT31-2622 +245 27.72 15.94 4.00 3.00 MRT32-2622 +246 31.24 6.55 4.00 3.00 MRT33-2622 +247 31.84 -0.55 4.00 3.00 MRT34-2622 +248 31.09 -7.10 4.00 3.00 MRT35-2622 +249 29.13 -13.33 4.00 3.00 MRT36-2622 +250 25.63 -19.73 4.00 3.00 MRT37-2622 +251 29.40 19.66 4.00 3.00 MRT41-2622 +252 32.38 12.17 4.00 3.00 MRT42-2622 +253 34.86 2.97 4.00 3.00 MRT43-2622 +254 34.80 -4.39 4.00 3.00 MRT44-2622 +255 33.11 -11.36 4.00 3.00 MRT45-2622 +256 30.03 -18.16 4.00 3.00 MRT46-2622 +257 25.54 -24.88 4.00 3.00 MRT47-2622 +258 34.47 15.52 4.00 3.00 MRT51-2622 +259 37.12 7.54 4.00 3.00 MRT52-2622 +260 37.93 -0.94 4.00 3.00 MRT53-2622 +261 36.82 -8.89 4.00 3.00 MRT54-2622 +262 34.10 -16.25 4.00 3.00 MRT55-2622 +263 30.13 -23.45 4.00 3.00 MRT56-2622 +264 25.07 -29.43 4.00 3.00 MRT57-2622 +265 -2.13 7.84 4.00 3.00 MZC01-2622 +266 -2.05 1.38 4.00 3.00 MZC02-2622 +267 -1.99 -5.04 4.00 3.00 MZC03-2622 +268 -1.93 -11.44 4.00 3.00 MZC04-2622 +269 -2.33 28.50 4.00 3.00 MZF01-2622 +270 -2.28 22.54 4.00 3.00 MZF02-2622 +271 -2.20 14.52 4.00 3.00 MZF03-2622 +272 -1.77 -27.22 4.00 3.00 MZO01-2622 +273 -1.71 -34.04 4.00 3.00 MZO02-2622 +274 -1.66 -39.69 4.00 3.00 MZO03-2622 +275 -1.81 -21.05 4.00 3.00 MZP01-2622 diff --git a/python/libs/mne/channels/data/layouts/CTF151.lay b/python/libs/mne/channels/data/layouts/CTF151.lay new file mode 100644 index 0000000..c9d68f3 --- /dev/null +++ b/python/libs/mne/channels/data/layouts/CTF151.lay @@ -0,0 +1,153 @@ +1 -0.440000 -4.000000 0.551100 0.351100 MLC11 +2 -1.200000 -4.130000 0.551100 0.351100 MLC12 +3 -2.220000 -4.270000 0.551100 0.351100 MLC13 +4 -2.820000 -4.710000 0.551100 0.351100 MLC14 +5 -3.340000 -5.230000 0.551100 0.351100 MLC15 +6 -0.820000 -4.550000 0.551100 0.351100 MLC21 +7 -1.620000 -4.570000 0.551100 0.351100 MLC22 +8 -2.160000 -4.970000 0.551100 0.351100 MLC23 +9 -2.640000 -5.370000 0.551100 0.351100 MLC24 +10 -1.270000 -5.050000 0.551100 0.351100 MLC31 +11 -1.780000 -5.450000 0.551100 0.351100 MLC32 +12 -1.300000 -5.930000 0.551100 0.351100 MLC33 +13 -0.440000 -5.050000 0.551100 0.351100 MLC41 +14 -0.820000 -5.530000 0.551100 0.351100 MLC42 +15 -0.400000 -6.010000 0.551100 0.351100 MLC43 +16 -1.170000 -2.010000 0.551100 0.351100 MLF11 +17 -2.260000 -2.230000 0.551100 0.351100 MLF12 +18 -0.490000 -2.300000 0.551100 0.351100 MLF21 +19 -1.540000 -2.470000 0.551100 0.351100 MLF22 +20 -2.540000 -2.750000 0.551100 0.351100 MLF23 +21 -1.000000 -2.750000 0.551100 0.351100 MLF31 +22 -1.950000 -2.980000 0.551100 0.351100 MLF32 +23 -2.780000 -3.300000 0.551100 0.351100 MLF33 +24 -3.440000 -3.770000 0.551100 0.351100 MLF34 +25 -0.450000 -3.100000 0.551100 0.351100 MLF41 +26 -1.380000 -3.260000 0.551100 0.351100 MLF42 +27 -2.280000 -3.570000 0.551100 0.351100 MLF43 +28 -2.870000 -4.060000 0.551100 0.351100 MLF44 +29 -3.500000 -4.510000 0.551100 0.351100 MLF45 +30 -0.850000 -3.580000 0.551100 0.351100 MLF51 +31 -1.700000 -3.790000 0.551100 0.351100 MLF52 +32 -0.470000 -7.690000 0.551100 0.351100 MLO11 +33 -1.650000 -7.420000 0.551100 0.351100 MLO12 +34 -1.210000 -7.930000 0.551100 0.351100 MLO21 +35 -2.350000 -7.580000 0.551100 0.351100 MLO22 +36 -0.600000 -8.400000 0.551100 0.351100 MLO31 +37 -1.920000 -8.120000 0.551100 0.351100 MLO32 +38 -3.110000 -7.670000 0.551100 0.351100 MLO33 +39 -1.400000 -8.560000 0.551100 0.351100 MLO41 +40 -2.750000 -8.210000 0.551100 0.351100 MLO42 +41 -3.910000 -7.620000 0.551100 0.351100 MLO43 +42 -0.840000 -6.390000 0.551100 0.351100 MLP11 +43 -1.710000 -6.320000 0.551100 0.351100 MLP12 +44 -2.240000 -5.870000 0.551100 0.351100 MLP13 +45 -0.440000 -6.900000 0.551100 0.351100 MLP21 +46 -1.220000 -6.760000 0.551100 0.351100 MLP22 +47 -0.970000 -7.220000 0.551100 0.351100 MLP31 +48 -1.900000 -6.880000 0.551100 0.351100 MLP32 +49 -2.470000 -6.390000 0.551100 0.351100 MLP33 +50 -2.990000 -5.850000 0.551100 0.351100 MLP34 +51 -3.420000 -3.120000 0.551100 0.351100 MLT11 +52 -4.100000 -4.200000 0.551100 0.351100 MLT12 +53 -4.040000 -5.030000 0.551100 0.351100 MLT13 +54 -3.780000 -5.770000 0.551100 0.351100 MLT14 +55 -3.210000 -6.440000 0.551100 0.351100 MLT15 +56 -2.570000 -7.010000 0.551100 0.351100 MLT16 +57 -3.320000 -2.550000 0.551100 0.351100 MLT21 +58 -4.260000 -3.520000 0.551100 0.351100 MLT22 +59 -4.720000 -4.710000 0.551100 0.351100 MLT23 +60 -4.520000 -5.590000 0.551100 0.351100 MLT24 +61 -4.040000 -6.350000 0.551100 0.351100 MLT25 +62 -3.280000 -7.060000 0.551100 0.351100 MLT26 +63 -4.340000 -2.900000 0.551100 0.351100 MLT31 +64 -5.040000 -4.050000 0.551100 0.351100 MLT32 +65 -5.200000 -5.210000 0.551100 0.351100 MLT33 +66 -4.820000 -6.140000 0.551100 0.351100 MLT34 +67 -4.090000 -7.000000 0.551100 0.351100 MLT35 +68 -5.210000 -3.450000 0.551100 0.351100 MLT41 +69 -5.640000 -4.620000 0.551100 0.351100 MLT42 +70 -5.500000 -5.730000 0.551100 0.351100 MLT43 +71 -4.910000 -6.720000 0.551100 0.351100 MLT44 +72 0.410000 -4.000000 0.551100 0.351100 MRC11 +73 1.170000 -4.130000 0.551100 0.351100 MRC12 +74 2.200000 -4.270000 0.551100 0.351100 MRC13 +75 2.800000 -4.710000 0.551100 0.351100 MRC14 +76 3.320000 -5.230000 0.551100 0.351100 MRC15 +77 0.800000 -4.560000 0.551100 0.351100 MRC21 +78 1.600000 -4.570000 0.551100 0.351100 MRC22 +79 2.140000 -4.970000 0.551100 0.351100 MRC23 +80 2.620000 -5.370000 0.551100 0.351100 MRC24 +81 1.260000 -5.050000 0.551100 0.351100 MRC31 +82 1.760000 -5.450000 0.551100 0.351100 MRC32 +83 1.280000 -5.930000 0.551100 0.351100 MRC33 +84 0.420000 -5.050000 0.551100 0.351100 MRC41 +85 0.810000 -5.540000 0.551100 0.351100 MRC42 +86 0.380000 -6.010000 0.551100 0.351100 MRC43 +87 1.130000 -2.010000 0.551100 0.351100 MRF11 +88 2.240000 -2.230000 0.551100 0.351100 MRF12 +89 0.460000 -2.290000 0.551100 0.351100 MRF21 +90 1.510000 -2.470000 0.551100 0.351100 MRF22 +91 2.520000 -2.740000 0.551100 0.351100 MRF23 +92 0.970000 -2.740000 0.551100 0.351100 MRF31 +93 1.920000 -2.980000 0.551100 0.351100 MRF32 +94 2.760000 -3.300000 0.551100 0.351100 MRF33 +95 3.420000 -3.770000 0.551100 0.351100 MRF34 +96 0.420000 -3.100000 0.551100 0.351100 MRF41 +97 1.360000 -3.260000 0.551100 0.351100 MRF42 +98 2.260000 -3.570000 0.551100 0.351100 MRF43 +99 2.840000 -4.050000 0.551100 0.351100 MRF44 +100 3.480000 -4.510000 0.551100 0.351100 MRF45 +101 0.820000 -3.580000 0.551100 0.351100 MRF51 +102 1.670000 -3.790000 0.551100 0.351100 MRF52 +103 0.470000 -7.690000 0.551100 0.351100 MRO11 +104 1.640000 -7.420000 0.551100 0.351100 MRO12 +105 1.200000 -7.930000 0.551100 0.351100 MRO21 +106 2.350000 -7.580000 0.551100 0.351100 MRO22 +107 0.580000 -8.390000 0.551100 0.351100 MRO31 +108 1.910000 -8.110000 0.551100 0.351100 MRO32 +109 3.110000 -7.670000 0.551100 0.351100 MRO33 +110 1.380000 -8.570000 0.551100 0.351100 MRO41 +111 2.750000 -8.220000 0.551100 0.351100 MRO42 +112 3.900000 -7.610000 0.551100 0.351100 MRO43 +113 0.820000 -6.380000 0.551100 0.351100 MRP11 +114 1.700000 -6.320000 0.551100 0.351100 MRP12 +115 2.220000 -5.870000 0.551100 0.351100 MRP13 +116 0.420000 -6.900000 0.551100 0.351100 MRP21 +117 1.200000 -6.750000 0.551100 0.351100 MRP22 +118 0.960000 -7.220000 0.551100 0.351100 MRP31 +119 1.880000 -6.870000 0.551100 0.351100 MRP32 +120 2.470000 -6.390000 0.551100 0.351100 MRP33 +121 2.990000 -5.850000 0.551100 0.351100 MRP34 +122 3.390000 -3.120000 0.551100 0.351100 MRT11 +123 4.070000 -4.190000 0.551100 0.351100 MRT12 +124 4.020000 -5.030000 0.551100 0.351100 MRT13 +125 3.760000 -5.770000 0.551100 0.351100 MRT14 +126 3.200000 -6.430000 0.551100 0.351100 MRT15 +127 2.570000 -7.010000 0.551100 0.351100 MRT16 +128 3.300000 -2.540000 0.551100 0.351100 MRT21 +129 4.230000 -3.510000 0.551100 0.351100 MRT22 +130 4.700000 -4.710000 0.551100 0.351100 MRT23 +131 4.500000 -5.590000 0.551100 0.351100 MRT24 +132 4.020000 -6.360000 0.551100 0.351100 MRT25 +133 3.260000 -7.060000 0.551100 0.351100 MRT26 +134 4.310000 -2.900000 0.551100 0.351100 MRT31 +135 5.020000 -4.050000 0.551100 0.351100 MRT32 +136 5.180000 -5.210000 0.551100 0.351100 MRT33 +137 4.800000 -6.140000 0.551100 0.351100 MRT34 +138 4.080000 -7.000000 0.551100 0.351100 MRT35 +139 5.200000 -3.450000 0.551100 0.351100 MRT41 +140 5.620000 -4.610000 0.551100 0.351100 MRT42 +141 5.480000 -5.730000 0.551100 0.351100 MRT43 +142 4.900000 -6.710000 0.551100 0.351100 MRT44 +143 0.000000 -4.510000 0.551100 0.351100 MZC01 +144 0.000000 -5.550000 0.551100 0.351100 MZC02 +145 0.000000 -1.930000 0.551100 0.351100 MZF01 +146 0.000000 -2.660000 0.551100 0.351100 MZF02 +147 0.000000 -3.510000 0.551100 0.351100 MZF03 +148 0.000000 -8.050000 0.551100 0.351100 MZO01 +149 0.000000 -8.660000 0.551100 0.351100 MZO02 +150 0.000000 -6.470000 0.551100 0.351100 MZP01 +151 0.000000 -7.290000 0.551100 0.351100 MZP02 +152 5.000000 -2.000000 0.551100 0.351100 SCALE +153 -5.50000 -1.500000 0.551100 0.351100 COMNT diff --git a/python/libs/mne/channels/data/layouts/CTF275.lay b/python/libs/mne/channels/data/layouts/CTF275.lay new file mode 100644 index 0000000..2af28d3 --- /dev/null +++ b/python/libs/mne/channels/data/layouts/CTF275.lay @@ -0,0 +1,275 @@ +1 -0.029414 0.428191 0.100000 0.040000 MLC11 +2 -0.105398 0.378716 0.100000 0.040000 MLC12 +3 -0.187924 0.341472 0.100000 0.040000 MLC13 +4 -0.268071 0.285079 0.100000 0.040000 MLC14 +5 -0.330692 0.221374 0.100000 0.040000 MLC15 +6 -0.378697 0.144627 0.100000 0.040000 MLC16 +7 -0.411309 0.049716 0.100000 0.040000 MLC17 +8 -0.112105 0.295427 0.100000 0.040000 MLC21 +9 -0.189457 0.259287 0.100000 0.040000 MLC22 +10 -0.254180 0.203140 0.100000 0.040000 MLC23 +11 -0.298355 0.137997 0.100000 0.040000 MLC24 +12 -0.337649 0.050767 0.100000 0.040000 MLC25 +13 -0.213750 0.138862 0.100000 0.040000 MLC31 +14 -0.266243 0.056433 0.100000 0.040000 MLC32 +15 -0.150010 0.191395 0.100000 0.040000 MLC41 +16 -0.188739 0.067511 0.100000 0.040000 MLC42 +17 -0.027405 0.285532 0.100000 0.040000 MLC51 +18 -0.072194 0.217381 0.100000 0.040000 MLC52 +19 -0.130467 0.119358 0.100000 0.040000 MLC53 +20 -0.119656 0.041473 0.100000 0.040000 MLC54 +21 -0.083927 -0.021961 0.100000 0.040000 MLC55 +22 -0.027810 0.155198 0.100000 0.040000 MLC61 +23 -0.062042 0.088583 0.100000 0.040000 MLC62 +24 -0.025587 0.023975 0.100000 0.040000 MLC63 +25 -0.154623 0.879985 0.100000 0.040000 MLF11 +26 -0.322264 0.823233 0.100000 0.040000 MLF12 +27 -0.478342 0.740223 0.100000 0.040000 MLF13 +28 -0.622338 0.633371 0.100000 0.040000 MLF14 +29 -0.052995 0.810917 0.100000 0.040000 MLF21 +30 -0.193258 0.778479 0.100000 0.040000 MLF22 +31 -0.319702 0.726613 0.100000 0.040000 MLF23 +32 -0.447065 0.639878 0.100000 0.040000 MLF24 +33 -0.551024 0.545805 0.100000 0.040000 MLF25 +34 -0.106993 0.717661 0.100000 0.040000 MLF31 +35 -0.227303 0.683510 0.100000 0.040000 MLF32 +36 -0.344973 0.613898 0.100000 0.040000 MLF33 +37 -0.437794 0.535071 0.100000 0.040000 MLF34 +38 -0.516944 0.440135 0.100000 0.040000 MLF35 +39 -0.037498 0.646457 0.100000 0.040000 MLF41 +40 -0.145663 0.629747 0.100000 0.040000 MLF42 +41 -0.257022 0.575998 0.100000 0.040000 MLF43 +42 -0.344741 0.511350 0.100000 0.040000 MLF44 +43 -0.434608 0.430669 0.100000 0.040000 MLF45 +44 -0.512928 0.325699 0.100000 0.040000 MLF46 +45 -0.065241 0.564676 0.100000 0.040000 MLF51 +46 -0.176866 0.530203 0.100000 0.040000 MLF52 +47 -0.264799 0.476609 0.100000 0.040000 MLF53 +48 -0.344149 0.409817 0.100000 0.040000 MLF54 +49 -0.432009 0.328939 0.100000 0.040000 MLF55 +50 -0.502082 0.225317 0.100000 0.040000 MLF56 +51 -0.108196 0.473300 0.100000 0.040000 MLF61 +52 -0.191454 0.428184 0.100000 0.040000 MLF62 +53 -0.268505 0.371569 0.100000 0.040000 MLF63 +54 -0.343162 0.314227 0.100000 0.040000 MLF64 +55 -0.415355 0.241209 0.100000 0.040000 MLF65 +56 -0.459435 0.157639 0.100000 0.040000 MLF66 +57 -0.484998 0.050963 0.100000 0.040000 MLF67 +58 -0.086701 -0.382545 0.100000 0.040000 MLO11 +59 -0.173621 -0.361571 0.100000 0.040000 MLO12 +60 -0.257557 -0.329066 0.100000 0.040000 MLO13 +61 -0.337129 -0.278810 0.100000 0.040000 MLO14 +62 -0.050176 -0.456757 0.100000 0.040000 MLO21 +63 -0.138937 -0.440153 0.100000 0.040000 MLO22 +64 -0.234625 -0.414329 0.100000 0.040000 MLO23 +65 -0.323700 -0.370345 0.100000 0.040000 MLO24 +66 -0.099528 -0.519048 0.100000 0.040000 MLO31 +67 -0.201576 -0.499713 0.100000 0.040000 MLO32 +68 -0.300736 -0.464088 0.100000 0.040000 MLO33 +69 -0.395767 -0.412426 0.100000 0.040000 MLO34 +70 -0.054171 -0.598130 0.100000 0.040000 MLO41 +71 -0.162924 -0.587463 0.100000 0.040000 MLO42 +72 -0.270457 -0.559057 0.100000 0.040000 MLO43 +73 -0.375045 -0.514503 0.100000 0.040000 MLO44 +74 -0.114841 -0.674066 0.100000 0.040000 MLO51 +75 -0.232779 -0.654920 0.100000 0.040000 MLO52 +76 -0.347032 -0.617457 0.100000 0.040000 MLO53 +77 -0.050706 -0.086860 0.100000 0.040000 MLP11 +78 -0.157880 -0.022819 0.100000 0.040000 MLP12 +79 -0.027384 -0.156541 0.100000 0.040000 MLP21 +80 -0.125969 -0.090281 0.100000 0.040000 MLP22 +81 -0.229468 -0.007021 0.100000 0.040000 MLP23 +82 -0.063851 -0.221282 0.100000 0.040000 MLP31 +83 -0.117483 -0.164444 0.100000 0.040000 MLP32 +84 -0.191075 -0.130343 0.100000 0.040000 MLP33 +85 -0.256310 -0.076997 0.100000 0.040000 MLP34 +86 -0.301408 -0.017428 0.100000 0.040000 MLP35 +87 -0.145628 -0.236552 0.100000 0.040000 MLP41 +88 -0.211609 -0.201084 0.100000 0.040000 MLP42 +89 -0.277557 -0.161143 0.100000 0.040000 MLP43 +90 -0.330491 -0.093163 0.100000 0.040000 MLP44 +91 -0.372987 -0.024823 0.100000 0.040000 MLP45 +92 -0.032003 -0.311166 0.100000 0.040000 MLP51 +93 -0.120201 -0.309697 0.100000 0.040000 MLP52 +94 -0.197411 -0.282930 0.100000 0.040000 MLP53 +95 -0.273221 -0.242434 0.100000 0.040000 MLP54 +96 -0.341326 -0.192353 0.100000 0.040000 MLP55 +97 -0.397869 -0.117824 0.100000 0.040000 MLP56 +98 -0.439023 -0.040798 0.100000 0.040000 MLP57 +99 -0.600517 0.341742 0.100000 0.040000 MLT11 +100 -0.583854 0.221014 0.100000 0.040000 MLT12 +101 -0.546672 0.118228 0.100000 0.040000 MLT13 +102 -0.525679 -0.043954 0.100000 0.040000 MLT14 +103 -0.482366 -0.132402 0.100000 0.040000 MLT15 +104 -0.408785 -0.217740 0.100000 0.040000 MLT16 +105 -0.657080 0.441193 0.100000 0.040000 MLT21 +106 -0.681569 0.225254 0.100000 0.040000 MLT22 +107 -0.647357 0.101107 0.100000 0.040000 MLT23 +108 -0.618158 -0.017119 0.100000 0.040000 MLT24 +109 -0.570925 -0.147553 0.100000 0.040000 MLT25 +110 -0.505869 -0.237678 0.100000 0.040000 MLT26 +111 -0.406336 -0.310886 0.100000 0.040000 MLT27 +112 -0.758025 0.508412 0.100000 0.040000 MLT31 +113 -0.761740 0.316423 0.100000 0.040000 MLT32 +114 -0.751268 0.088675 0.100000 0.040000 MLT33 +115 -0.712573 -0.047448 0.100000 0.040000 MLT34 +116 -0.658112 -0.159355 0.100000 0.040000 MLT35 +117 -0.592395 -0.256839 0.100000 0.040000 MLT36 +118 -0.495312 -0.345113 0.100000 0.040000 MLT37 +119 -0.885393 0.353401 0.100000 0.040000 MLT41 +120 -0.847844 0.160648 0.100000 0.040000 MLT42 +121 -0.823787 -0.043736 0.100000 0.040000 MLT43 +122 -0.758805 -0.175411 0.100000 0.040000 MLT44 +123 -0.684634 -0.280647 0.100000 0.040000 MLT45 +124 -0.591783 -0.373867 0.100000 0.040000 MLT46 +125 -0.476572 -0.454666 0.100000 0.040000 MLT47 +126 -0.983285 0.161080 0.100000 0.040000 MLT51 +127 -0.944753 -0.028756 0.100000 0.040000 MLT52 +128 -0.872989 -0.188195 0.100000 0.040000 MLT53 +129 -0.785517 -0.310620 0.100000 0.040000 MLT54 +130 -0.688014 -0.407791 0.100000 0.040000 MLT55 +131 -0.571347 -0.497554 0.100000 0.040000 MLT56 +132 -0.457303 -0.565438 0.100000 0.040000 MLT57 +133 0.063389 0.426606 0.100000 0.040000 MRC11 +134 0.137902 0.375428 0.100000 0.040000 MRC12 +135 0.219516 0.336386 0.100000 0.040000 MRC13 +136 0.297688 0.277771 0.100000 0.040000 MRC14 +137 0.355955 0.213304 0.100000 0.040000 MRC15 +138 0.404150 0.135598 0.100000 0.040000 MRC16 +139 0.434870 0.040656 0.100000 0.040000 MRC17 +140 0.142678 0.292126 0.100000 0.040000 MRC21 +141 0.219470 0.254066 0.100000 0.040000 MRC22 +142 0.281922 0.196472 0.100000 0.040000 MRC23 +143 0.325059 0.128269 0.100000 0.040000 MRC24 +144 0.361805 0.044213 0.100000 0.040000 MRC25 +145 0.240157 0.132538 0.100000 0.040000 MRC31 +146 0.290750 0.048681 0.100000 0.040000 MRC32 +147 0.178346 0.187415 0.100000 0.040000 MRC41 +148 0.213493 0.062545 0.100000 0.040000 MRC42 +149 0.058440 0.284194 0.100000 0.040000 MRC51 +150 0.101359 0.215083 0.100000 0.040000 MRC52 +151 0.156968 0.115486 0.100000 0.040000 MRC53 +152 0.144211 0.038238 0.100000 0.040000 MRC54 +153 0.106635 -0.024115 0.100000 0.040000 MRC55 +154 0.055338 0.153928 0.100000 0.040000 MRC61 +155 0.088138 0.086634 0.100000 0.040000 MRC62 +156 0.049557 0.022680 0.100000 0.040000 MRC63 +157 0.197726 0.874477 0.100000 0.040000 MRF11 +158 0.364689 0.811426 0.100000 0.040000 MRF12 +159 0.518245 0.722181 0.100000 0.040000 MRF13 +160 0.658136 0.611411 0.100000 0.040000 MRF14 +161 0.095713 0.807816 0.100000 0.040000 MRF21 +162 0.233999 0.772267 0.100000 0.040000 MRF22 +163 0.358821 0.715911 0.100000 0.040000 MRF23 +164 0.484765 0.623142 0.100000 0.040000 MRF24 +165 0.585405 0.526324 0.100000 0.040000 MRF25 +166 0.147633 0.713396 0.100000 0.040000 MRF31 +167 0.265823 0.676341 0.100000 0.040000 MRF32 +168 0.382256 0.601823 0.100000 0.040000 MRF33 +169 0.473850 0.521768 0.100000 0.040000 MRF34 +170 0.548726 0.424836 0.100000 0.040000 MRF35 +171 0.075451 0.644959 0.100000 0.040000 MRF41 +172 0.182924 0.624842 0.100000 0.040000 MRF42 +173 0.292900 0.568899 0.100000 0.040000 MRF43 +174 0.379529 0.501620 0.100000 0.040000 MRF44 +175 0.465778 0.418231 0.100000 0.040000 MRF45 +176 0.541913 0.311405 0.100000 0.040000 MRF46 +177 0.102375 0.561860 0.100000 0.040000 MRF51 +178 0.212879 0.524802 0.100000 0.040000 MRF52 +179 0.299077 0.468924 0.100000 0.040000 MRF53 +180 0.376186 0.400507 0.100000 0.040000 MRF54 +181 0.461150 0.316311 0.100000 0.040000 MRF55 +182 0.527532 0.213125 0.100000 0.040000 MRF56 +183 0.143360 0.469857 0.100000 0.040000 MRF61 +184 0.224730 0.422291 0.100000 0.040000 MRF62 +185 0.301012 0.364856 0.100000 0.040000 MRF63 +186 0.373056 0.305526 0.100000 0.040000 MRF64 +187 0.443172 0.230008 0.100000 0.040000 MRF65 +188 0.482916 0.144546 0.100000 0.040000 MRF66 +189 0.509363 0.039864 0.100000 0.040000 MRF67 +190 0.101312 -0.384464 0.100000 0.040000 MRO11 +191 0.188777 -0.365285 0.100000 0.040000 MRO12 +192 0.274286 -0.333994 0.100000 0.040000 MRO13 +193 0.354824 -0.285987 0.100000 0.040000 MRO14 +194 0.062633 -0.457476 0.100000 0.040000 MRO21 +195 0.152570 -0.440791 0.100000 0.040000 MRO22 +196 0.248565 -0.418432 0.100000 0.040000 MRO23 +197 0.338845 -0.376241 0.100000 0.040000 MRO24 +198 0.111160 -0.521375 0.100000 0.040000 MRO31 +199 0.212466 -0.502957 0.100000 0.040000 MRO32 +200 0.313063 -0.468465 0.100000 0.040000 MRO33 +201 0.409385 -0.418933 0.100000 0.040000 MRO34 +202 0.063270 -0.599845 0.100000 0.040000 MRO41 +203 0.172480 -0.589865 0.100000 0.040000 MRO42 +204 0.279919 -0.563495 0.100000 0.040000 MRO43 +205 0.386742 -0.520993 0.100000 0.040000 MRO44 +206 0.121969 -0.676100 0.100000 0.040000 MRO51 +207 0.240331 -0.658743 0.100000 0.040000 MRO52 +208 0.356156 -0.623026 0.100000 0.040000 MRO53 +209 0.071855 -0.088269 0.100000 0.040000 MRP11 +210 0.180874 -0.026656 0.100000 0.040000 MRP12 +211 0.047839 -0.157479 0.100000 0.040000 MRP21 +212 0.147221 -0.093053 0.100000 0.040000 MRP22 +213 0.252807 -0.012686 0.100000 0.040000 MRP23 +214 0.082012 -0.222790 0.100000 0.040000 MRP31 +215 0.136825 -0.166819 0.100000 0.040000 MRP32 +216 0.210796 -0.134697 0.100000 0.040000 MRP33 +217 0.277587 -0.083946 0.100000 0.040000 MRP34 +218 0.322867 -0.024718 0.100000 0.040000 MRP35 +219 0.162954 -0.240118 0.100000 0.040000 MRP41 +220 0.230510 -0.205793 0.100000 0.040000 MRP42 +221 0.296283 -0.169213 0.100000 0.040000 MRP43 +222 0.351532 -0.101316 0.100000 0.040000 MRP44 +223 0.395383 -0.032706 0.100000 0.040000 MRP45 +224 0.048690 -0.312307 0.100000 0.040000 MRP51 +225 0.137008 -0.312230 0.100000 0.040000 MRP52 +226 0.214275 -0.287336 0.100000 0.040000 MRP53 +227 0.290637 -0.248388 0.100000 0.040000 MRP54 +228 0.360555 -0.199475 0.100000 0.040000 MRP55 +229 0.419086 -0.126737 0.100000 0.040000 MRP56 +230 0.463976 -0.050387 0.100000 0.040000 MRP57 +231 0.628409 0.323946 0.100000 0.040000 MRT11 +232 0.609835 0.205866 0.100000 0.040000 MRT12 +233 0.571838 0.105198 0.100000 0.040000 MRT13 +234 0.544252 -0.054539 0.100000 0.040000 MRT14 +235 0.500732 -0.143104 0.100000 0.040000 MRT15 +236 0.427582 -0.225716 0.100000 0.040000 MRT16 +237 0.685440 0.421411 0.100000 0.040000 MRT21 +238 0.705800 0.208084 0.100000 0.040000 MRT22 +239 0.667392 0.088109 0.100000 0.040000 MRT23 +240 0.637062 -0.030086 0.100000 0.040000 MRT24 +241 0.588417 -0.159092 0.100000 0.040000 MRT25 +242 0.522350 -0.247039 0.100000 0.040000 MRT26 +243 0.422093 -0.318167 0.100000 0.040000 MRT27 +244 0.789789 0.482334 0.100000 0.040000 MRT31 +245 0.786599 0.293212 0.100000 0.040000 MRT32 +246 0.770320 0.070984 0.100000 0.040000 MRT33 +247 0.731214 -0.061690 0.100000 0.040000 MRT34 +248 0.674802 -0.172109 0.100000 0.040000 MRT35 +249 0.607500 -0.268226 0.100000 0.040000 MRT36 +250 0.510484 -0.353209 0.100000 0.040000 MRT37 +251 0.910695 0.324672 0.100000 0.040000 MRT41 +252 0.867982 0.137317 0.100000 0.040000 MRT42 +253 0.839920 -0.060661 0.100000 0.040000 MRT43 +254 0.773256 -0.189639 0.100000 0.040000 MRT44 +255 0.698444 -0.293384 0.100000 0.040000 MRT45 +256 0.604482 -0.385347 0.100000 0.040000 MRT46 +257 0.489291 -0.462983 0.100000 0.040000 MRT47 +258 1.000000 0.135648 0.100000 0.040000 MRT51 +259 0.959092 -0.049055 0.100000 0.040000 MRT52 +260 0.886964 -0.204289 0.100000 0.040000 MRT53 +261 0.796842 -0.324881 0.100000 0.040000 MRT54 +262 0.698769 -0.420596 0.100000 0.040000 MRT55 +263 0.582500 -0.506810 0.100000 0.040000 MRT56 +264 0.467934 -0.572706 0.100000 0.040000 MRT57 +265 0.016063 0.355556 0.100000 0.040000 MZC01 +266 0.014747 0.217488 0.100000 0.040000 MZC02 +267 0.013199 0.087763 0.100000 0.040000 MZC03 +268 0.011197 -0.046263 0.100000 0.040000 MZC04 +269 0.022267 0.897778 0.100000 0.040000 MZF01 +270 0.019840 0.730557 0.100000 0.040000 MZF02 +271 0.017559 0.517279 0.100000 0.040000 MZF03 +272 0.007392 -0.378522 0.100000 0.040000 MZO01 +273 0.005634 -0.528155 0.100000 0.040000 MZO02 +274 0.003722 -0.675585 0.100000 0.040000 MZO03 +275 0.008864 -0.248776 0.100000 0.040000 MZP01 diff --git a/python/libs/mne/channels/data/layouts/EEG1005.lay b/python/libs/mne/channels/data/layouts/EEG1005.lay new file mode 100644 index 0000000..a600468 --- /dev/null +++ b/python/libs/mne/channels/data/layouts/EEG1005.lay @@ -0,0 +1,337 @@ +1 -0.485328 1.493835 0.069221 0.051916 Fp1 +2 0.000000 1.570696 0.069221 0.051916 Fpz +3 0.485501 1.493884 0.069221 0.051916 Fp2 +4 -1.154207 1.588656 0.069221 0.051916 AF9 +5 -0.923319 1.270781 0.069221 0.051916 AF7 +6 -0.706117 1.226029 0.069221 0.051916 AF5 +7 -0.477022 1.197254 0.069221 0.051916 AF3 +8 -0.240008 1.182594 0.069221 0.051916 AF1 +9 0.000000 1.178022 0.069221 0.051916 AFz +10 0.240008 1.182594 0.069221 0.051916 AF2 +11 0.476904 1.197159 0.069221 0.051916 AF4 +12 0.706117 1.226029 0.069221 0.051916 AF6 +13 0.923319 1.270781 0.069221 0.051916 AF8 +14 1.154207 1.588656 0.069221 0.051916 AF10 +15 -1.588376 1.154294 0.069221 0.051916 F9 +16 -1.270781 0.923319 0.069221 0.051916 F7 +17 -0.968950 0.852434 0.069221 0.051916 F5 +18 -0.652084 0.812357 0.069221 0.051916 F3 +19 -0.327689 0.791876 0.069221 0.051916 F1 +20 0.000000 0.785398 0.069221 0.051916 Fz +21 0.327689 0.791876 0.069221 0.051916 F2 +22 0.652084 0.812357 0.069221 0.051916 F4 +23 0.968950 0.852434 0.069221 0.051916 F6 +24 1.270781 0.923319 0.069221 0.051916 F8 +25 1.588496 1.154168 0.069221 0.051916 F10 +26 -1.867677 0.606883 0.069221 0.051916 FT9 +27 -1.493930 0.485359 0.069221 0.051916 FT7 +28 -1.126134 0.436152 0.069221 0.051916 FC5 +29 -0.752811 0.409634 0.069221 0.051916 FC3 +30 -0.376942 0.396836 0.069221 0.051916 FC1 +31 0.000000 0.392844 0.069221 0.051916 FCz +32 0.376942 0.396836 0.069221 0.051916 FC2 +33 0.752811 0.409634 0.069221 0.051916 FC4 +34 1.126134 0.436152 0.069221 0.051916 FC6 +35 1.493930 0.485359 0.069221 0.051916 FT8 +36 1.867677 0.606883 0.069221 0.051916 FT10 +37 -1.963487 -0.000213 0.069221 0.051916 T9 +38 -1.570796 0.000000 0.069221 0.051916 T7 +39 -1.178106 0.000128 0.069221 0.051916 C5 +40 -0.785398 0.000111 0.069221 0.051916 C3 +41 -0.392736 0.000205 0.069221 0.051916 C1 +42 0.000000 0.000200 0.069221 0.051916 Cz +43 0.392736 0.000103 0.069221 0.051916 C2 +44 0.785398 0.000111 0.069221 0.051916 C4 +45 1.178106 0.000128 0.069221 0.051916 C6 +46 1.570796 -0.000000 0.069221 0.051916 T8 +47 1.963487 -0.000000 0.069221 0.051916 T10 +48 -1.867677 -0.606883 0.069221 0.051916 TP9 +49 -1.494026 -0.485389 0.069221 0.051916 TP7 +50 -1.126048 -0.435839 0.069221 0.051916 CP5 +51 -0.752775 -0.409460 0.069221 0.051916 CP3 +52 -0.376804 -0.396486 0.069221 0.051916 CP1 +53 -0.000000 -0.392551 0.069221 0.051916 CPz +54 0.376804 -0.396486 0.069221 0.051916 CP2 +55 0.752795 -0.409357 0.069221 0.051916 CP4 +56 1.126048 -0.435839 0.069221 0.051916 CP6 +57 1.494026 -0.485389 0.069221 0.051916 TP8 +58 1.867603 -0.607072 0.069221 0.051916 TP10 +59 -1.588496 -1.154168 0.069221 0.051916 P9 +60 -1.270862 -0.923378 0.069221 0.051916 P7 +61 -0.969077 -0.852293 0.069221 0.051916 P5 +62 -0.652231 -0.811998 0.069221 0.051916 P3 +63 -0.327776 -0.791360 0.069221 0.051916 P1 +64 -0.000000 -0.785257 0.069221 0.051916 Pz +65 0.327776 -0.791360 0.069221 0.051916 P2 +66 0.652231 -0.811998 0.069221 0.051916 P4 +67 0.969077 -0.852293 0.069221 0.051916 P6 +68 1.270862 -0.923378 0.069221 0.051916 P8 +69 1.588496 -1.154168 0.069221 0.051916 P10 +70 -1.154207 -1.588656 0.069221 0.051916 PO9 +71 -0.923319 -1.270781 0.069221 0.051916 PO7 +72 -0.706303 -1.225606 0.069221 0.051916 PO5 +73 -0.476710 -1.197888 0.069221 0.051916 PO3 +74 -0.240097 -1.182523 0.069221 0.051916 PO1 +75 -0.000000 -1.178022 0.069221 0.051916 POz +76 0.240223 -1.182505 0.069221 0.051916 PO2 +77 0.476710 -1.197888 0.069221 0.051916 PO4 +78 0.706303 -1.225606 0.069221 0.051916 PO6 +79 0.923319 -1.270781 0.069221 0.051916 PO8 +80 1.154207 -1.588656 0.069221 0.051916 PO10 +81 -0.485359 -1.493930 0.069221 0.051916 O1 +82 -0.000000 -1.570796 0.069221 0.051916 Oz +83 0.485359 -1.493930 0.069221 0.051916 O2 +84 -0.606613 -1.867239 0.069221 0.051916 I1 +85 -0.000000 -1.963478 0.069221 0.051916 Iz +86 0.606613 -1.867239 0.069221 0.051916 I2 +87 -0.802226 1.574520 0.069221 0.051916 AFp9h +88 -0.626475 1.393612 0.069221 0.051916 AFp7h +89 -0.451133 1.382849 0.069221 0.051916 AFp5h +90 -0.271959 1.376738 0.069221 0.051916 AFp3h +91 -0.090887 1.374548 0.069221 0.051916 AFp1h +92 0.090887 1.374548 0.069221 0.051916 AFp2h +93 0.271959 1.376738 0.069221 0.051916 AFp4h +94 0.451133 1.382849 0.069221 0.051916 AFp6h +95 0.626475 1.393612 0.069221 0.051916 AFp8h +96 0.802226 1.574520 0.069221 0.051916 AFp10h +97 -1.249550 1.249550 0.069221 0.051916 AFF9h +98 -0.982948 1.075122 0.069221 0.051916 AFF7h +99 -0.713694 1.024626 0.069221 0.051916 AFF5h +100 -0.432315 0.996167 0.069221 0.051916 AFF3h +101 -0.144727 0.983315 0.069221 0.051916 AFF1h +102 0.144727 0.983315 0.069221 0.051916 AFF2h +103 0.432315 0.996167 0.069221 0.051916 AFF4h +104 0.713694 1.024626 0.069221 0.051916 AFF6h +105 0.982881 1.075049 0.069221 0.051916 AFF8h +106 1.249550 1.249550 0.069221 0.051916 AFF10h +107 -1.574645 0.802293 0.069221 0.051916 FFT9h +108 -1.232019 0.675885 0.069221 0.051916 FFT7h +109 -0.886990 0.627578 0.069221 0.051916 FFC5h +110 -0.534535 0.601827 0.069221 0.051916 FFC3h +111 -0.178478 0.590622 0.069221 0.051916 FFC1h +112 0.178478 0.590622 0.069221 0.051916 FFC2h +113 0.534535 0.601827 0.069221 0.051916 FFC4h +114 0.886990 0.627578 0.069221 0.051916 FFC6h +115 1.232019 0.675885 0.069221 0.051916 FFT8h +116 1.574645 0.802293 0.069221 0.051916 FFT10h +117 -1.745475 0.276484 0.069221 0.051916 FTT9h +118 -1.358553 0.230430 0.069221 0.051916 FTT7h +119 -0.971386 0.211155 0.069221 0.051916 FCC5h +120 -0.583084 0.201295 0.069221 0.051916 FCC3h +121 -0.194460 0.196994 0.069221 0.051916 FCC1h +122 0.194460 0.196994 0.069221 0.051916 FCC2h +123 0.583084 0.201295 0.069221 0.051916 FCC4h +124 0.971386 0.211155 0.069221 0.051916 FCC6h +125 1.358553 0.230430 0.069221 0.051916 FTT8h +126 1.745475 0.276484 0.069221 0.051916 FTT10h +127 -1.745506 -0.276309 0.069221 0.051916 TTP9h +128 -1.358573 -0.230293 0.069221 0.051916 TTP7h +129 -0.971375 -0.211008 0.069221 0.051916 CCP5h +130 -0.583085 -0.200906 0.069221 0.051916 CCP3h +131 -0.194448 -0.196679 0.069221 0.051916 CCP1h +132 0.194448 -0.196679 0.069221 0.051916 CCP2h +133 0.583078 -0.201010 0.069221 0.051916 CCP4h +134 0.971375 -0.211008 0.069221 0.051916 CCP6h +135 1.358573 -0.230293 0.069221 0.051916 TTP8h +136 1.745475 -0.276484 0.069221 0.051916 TTP10h +137 -1.574667 -0.802213 0.069221 0.051916 TPP9h +138 -1.232021 -0.675979 0.069221 0.051916 TPP7h +139 -0.887025 -0.627306 0.069221 0.051916 CPP5h +140 -0.534524 -0.601312 0.069221 0.051916 CPP3h +141 -0.178473 -0.590144 0.069221 0.051916 CPP1h +142 0.178473 -0.590144 0.069221 0.051916 CPP2h +143 0.534524 -0.601312 0.069221 0.051916 CPP4h +144 0.887025 -0.627306 0.069221 0.051916 CPP6h +145 1.231976 -0.676032 0.069221 0.051916 TPP8h +146 1.574586 -0.802352 0.069221 0.051916 TPP10h +147 -1.249639 -1.249639 0.069221 0.051916 PPO9h +148 -0.983137 -1.074700 0.069221 0.051916 PPO7h +149 -0.713821 -1.024109 0.069221 0.051916 PPO5h +150 -0.432363 -0.995909 0.069221 0.051916 PPO3h +151 -0.144761 -0.982953 0.069221 0.051916 PPO1h +152 0.144761 -0.982953 0.069221 0.051916 PPO2h +153 0.432253 -0.995937 0.069221 0.051916 PPO4h +154 0.713967 -1.023998 0.069221 0.051916 PPO6h +155 0.983137 -1.074700 0.069221 0.051916 PPO8h +156 1.249639 -1.249639 0.069221 0.051916 PPO10h +157 -0.802293 -1.574645 0.069221 0.051916 POO9h +158 -0.626849 -1.393237 0.069221 0.051916 POO7h +159 -0.451236 -1.382715 0.069221 0.051916 POO5h +160 -0.271951 -1.377572 0.069221 0.051916 POO3h +161 -0.090910 -1.374606 0.069221 0.051916 POO1h +162 0.090910 -1.374606 0.069221 0.051916 POO2h +163 0.271951 -1.377572 0.069221 0.051916 POO4h +164 0.451236 -1.382715 0.069221 0.051916 POO6h +165 0.626849 -1.393237 0.069221 0.051916 POO8h +166 0.802293 -1.574645 0.069221 0.051916 POO10h +167 -0.276453 -1.745460 0.069221 0.051916 OI1h +168 0.276453 -1.745460 0.069221 0.051916 OI2h +169 -0.245655 1.551367 0.069221 0.051916 Fp1h +170 0.245655 1.551367 0.069221 0.051916 Fp2h +171 -1.038573 1.429729 0.069221 0.051916 AF9h +172 -0.816811 1.245775 0.069221 0.051916 AF7h +173 -0.592502 1.210176 0.069221 0.051916 AF5h +174 -0.359066 1.188527 0.069221 0.051916 AF3h +175 -0.120203 1.179114 0.069221 0.051916 AF1h +176 0.120212 1.179076 0.069221 0.051916 AF2h +177 0.359066 1.188527 0.069221 0.051916 AF4h +178 0.592545 1.210263 0.069221 0.051916 AF6h +179 0.816811 1.245775 0.069221 0.051916 AF8h +180 1.038668 1.429679 0.069221 0.051916 AF10h +181 -1.429588 1.038701 0.069221 0.051916 F9h +182 -1.122287 0.883303 0.069221 0.051916 F7h +183 -0.811863 0.829210 0.069221 0.051916 F5h +184 -0.490601 0.800049 0.069221 0.051916 F3h +185 -0.164017 0.787126 0.069221 0.051916 F1h +186 0.164017 0.787126 0.069221 0.051916 F2h +187 0.490601 0.800049 0.069221 0.051916 F4h +188 0.811863 0.829210 0.069221 0.051916 F6h +189 1.122287 0.883303 0.069221 0.051916 F8h +190 1.429588 1.038701 0.069221 0.051916 F10h +191 -1.680799 0.546075 0.069221 0.051916 FT9h +192 -1.310995 0.457012 0.069221 0.051916 FT7h +193 -0.939857 0.420814 0.069221 0.051916 FC5h +194 -0.565142 0.401905 0.069221 0.051916 FC3h +195 -0.188491 0.393826 0.069221 0.051916 FC1h +196 0.188491 0.393826 0.069221 0.051916 FC2h +197 0.565142 0.401905 0.069221 0.051916 FC4h +198 0.939857 0.420814 0.069221 0.051916 FC6h +199 1.310995 0.457012 0.069221 0.051916 FT8h +200 1.680740 0.546236 0.069221 0.051916 FT10h +201 -1.767191 0.000000 0.069221 0.051916 T9h +202 -1.374500 0.000000 0.069221 0.051916 T7h +203 -0.981850 0.000118 0.069221 0.051916 C5h +204 -0.589058 0.000212 0.069221 0.051916 C3h +205 -0.196395 0.000101 0.069221 0.051916 C1h +206 0.196395 0.000201 0.069221 0.051916 C2h +207 0.589058 0.000212 0.069221 0.051916 C4h +208 0.981850 0.000118 0.069221 0.051916 C6h +209 1.374500 -0.000000 0.069221 0.051916 T8h +210 1.767191 -0.000000 0.069221 0.051916 T10h +211 -1.680646 -0.546088 0.069221 0.051916 TP9h +212 -1.310970 -0.456960 0.069221 0.051916 TP7h +213 -0.939815 -0.420500 0.069221 0.051916 CP5h +214 -0.565062 -0.401491 0.069221 0.051916 CP3h +215 -0.188515 -0.393352 0.069221 0.051916 CP1h +216 0.188515 -0.393352 0.069221 0.051916 CP2h +217 0.565062 -0.401491 0.069221 0.051916 CP4h +218 0.939815 -0.420500 0.069221 0.051916 CP6h +219 1.310970 -0.456960 0.069221 0.051916 TP8h +220 1.680646 -0.546088 0.069221 0.051916 TP10h +221 -1.429668 -1.038758 0.069221 0.051916 P9h +222 -1.122286 -0.883271 0.069221 0.051916 P7h +223 -0.812037 -0.829137 0.069221 0.051916 P5h +224 -0.490726 -0.799336 0.069221 0.051916 P3h +225 -0.164146 -0.786762 0.069221 0.051916 P1h +226 0.164146 -0.786762 0.069221 0.051916 P2h +227 0.490600 -0.799436 0.069221 0.051916 P4h +228 0.812037 -0.829137 0.069221 0.051916 P6h +229 1.122286 -0.883271 0.069221 0.051916 P8h +230 1.429668 -1.038758 0.069221 0.051916 P10h +231 -1.038821 -1.429709 0.069221 0.051916 PO9h +232 -0.816502 -1.246067 0.069221 0.051916 PO7h +233 -0.593079 -1.209372 0.069221 0.051916 PO5h +234 -0.359230 -1.188332 0.069221 0.051916 PO3h +235 -0.120221 -1.179168 0.069221 0.051916 PO1h +236 0.120348 -1.179159 0.069221 0.051916 PO2h +237 0.359230 -1.188332 0.069221 0.051916 PO4h +238 0.593079 -1.209372 0.069221 0.051916 PO6h +239 0.816502 -1.246067 0.069221 0.051916 PO8h +240 1.038710 -1.429804 0.069221 0.051916 PO10h +241 -0.245671 -1.551466 0.069221 0.051916 O1h +242 0.245671 -1.551466 0.069221 0.051916 O2h +243 -0.307129 -1.939338 0.069221 0.051916 I1h +244 0.307129 -1.939338 0.069221 0.051916 I2h +245 -0.891328 1.749684 0.069221 0.051916 AFp9 +246 -0.713143 1.399582 0.069221 0.051916 AFp7 +247 -0.539182 1.387878 0.069221 0.051916 AFp5 +248 -0.361777 1.379743 0.069221 0.051916 AFp3 +249 -0.181624 1.374948 0.069221 0.051916 AFp1 +250 0.000000 1.374461 0.069221 0.051916 AFpz +251 0.181624 1.374948 0.069221 0.051916 AFp2 +252 0.361802 1.379839 0.069221 0.051916 AFp4 +253 0.539182 1.387878 0.069221 0.051916 AFp6 +254 0.713143 1.399582 0.069221 0.051916 AFp8 +255 0.891489 1.749582 0.069221 0.051916 AFp10 +256 -1.388504 1.388504 0.069221 0.051916 AFF9 +257 -1.110721 1.110721 0.069221 0.051916 AFF7 +258 -0.850463 1.046170 0.069221 0.051916 AFF5 +259 -0.574170 1.008058 0.069221 0.051916 AFF3 +260 -0.288981 0.988233 0.069221 0.051916 AFF1 +261 0.000000 0.981739 0.069221 0.051916 AFFz +262 0.288981 0.988233 0.069221 0.051916 AFF2 +263 0.574170 1.008058 0.069221 0.051916 AFF4 +264 0.850463 1.046170 0.069221 0.051916 AFF6 +265 1.110721 1.110721 0.069221 0.051916 AFF8 +266 1.388504 1.388504 0.069221 0.051916 AFF10 +267 -1.749576 0.891591 0.069221 0.051916 FFT9 +268 -1.399582 0.713143 0.069221 0.051916 FFT7 +269 -1.060830 0.648168 0.069221 0.051916 FFC5 +270 -0.711350 0.612390 0.069221 0.051916 FFC3 +271 -0.356750 0.594619 0.069221 0.051916 FFC1 +272 0.000000 0.589085 0.069221 0.051916 FFCz +273 0.356750 0.594619 0.069221 0.051916 FFC2 +274 0.711350 0.612390 0.069221 0.051916 FFC4 +275 1.060749 0.648119 0.069221 0.051916 FFC6 +276 1.399582 0.713143 0.069221 0.051916 FFT8 +277 1.749576 0.891591 0.069221 0.051916 FFT10 +278 -1.939489 0.307119 0.069221 0.051916 FTT9 +279 -1.551442 0.245824 0.069221 0.051916 FTT7 +280 -1.165132 0.219351 0.069221 0.051916 FCC5 +281 -0.777319 0.205363 0.069221 0.051916 FCC3 +282 -0.388766 0.198515 0.069221 0.051916 FCC1 +283 0.000000 0.196434 0.069221 0.051916 FCCz +284 0.388766 0.198515 0.069221 0.051916 FCC2 +285 0.777319 0.205363 0.069221 0.051916 FCC4 +286 1.165132 0.219351 0.069221 0.051916 FCC6 +287 1.551466 0.245671 0.069221 0.051916 FTT8 +288 1.939489 0.307119 0.069221 0.051916 FTT10 +289 -1.939553 -0.307197 0.069221 0.051916 TTP9 +290 -1.551565 -0.245687 0.069221 0.051916 TTP7 +291 -1.165206 -0.219084 0.069221 0.051916 CCP5 +292 -0.777275 -0.205069 0.069221 0.051916 CCP3 +293 -0.388806 -0.198175 0.069221 0.051916 CCP1 +294 -0.000000 -0.196218 0.069221 0.051916 CCPz +295 0.388801 -0.198275 0.069221 0.051916 CCP2 +296 0.777275 -0.205069 0.069221 0.051916 CCP4 +297 1.165206 -0.219084 0.069221 0.051916 CCP6 +298 1.551565 -0.245687 0.069221 0.051916 TTP8 +299 1.939553 -0.307197 0.069221 0.051916 TTP10 +300 -1.749664 -0.891531 0.069221 0.051916 TPP9 +301 -1.399671 -0.713188 0.069221 0.051916 TPP7 +302 -1.060852 -0.647970 0.069221 0.051916 CPP5 +303 -0.711356 -0.612379 0.069221 0.051916 CPP3 +304 -0.356663 -0.594548 0.069221 0.051916 CPP1 +305 -0.000000 -0.588863 0.069221 0.051916 CPPz +306 0.356778 -0.594448 0.069221 0.051916 CPP2 +307 0.711384 -0.612287 0.069221 0.051916 CPP4 +308 1.060852 -0.647970 0.069221 0.051916 CPP6 +309 1.399671 -0.713188 0.069221 0.051916 TPP8 +310 1.749664 -0.891531 0.069221 0.051916 TPP10 +311 -1.388427 -1.388427 0.069221 0.051916 PPO9 +312 -1.110721 -1.110721 0.069221 0.051916 PPO7 +313 -0.850511 -1.046155 0.069221 0.051916 PPO5 +314 -0.574228 -1.007462 0.069221 0.051916 PPO3 +315 -0.289055 -0.987715 0.069221 0.051916 PPO1 +316 -0.000000 -0.981655 0.069221 0.051916 PPOz +317 0.289055 -0.987715 0.069221 0.051916 PPO2 +318 0.574228 -1.007462 0.069221 0.051916 PPO4 +319 0.850454 -1.046223 0.069221 0.051916 PPO6 +320 1.110721 -1.110721 0.069221 0.051916 PPO8 +321 1.388427 -1.388427 0.069221 0.051916 PPO10 +322 -0.891143 -1.749540 0.069221 0.051916 POO9 +323 -0.713143 -1.399582 0.069221 0.051916 POO7 +324 -0.539360 -1.387717 0.069221 0.051916 POO5 +325 -0.362020 -1.379310 0.069221 0.051916 POO3 +326 -0.181486 -1.375484 0.069221 0.051916 POO1 +327 -0.000000 -1.374422 0.069221 0.051916 POOz +328 0.181626 -1.375468 0.069221 0.051916 POO2 +329 0.362020 -1.379310 0.069221 0.051916 POO4 +330 0.539360 -1.387717 0.069221 0.051916 POO6 +331 0.713143 -1.399582 0.069221 0.051916 POO8 +332 0.891143 -1.749540 0.069221 0.051916 POO10 +333 -0.546073 -1.680586 0.069221 0.051916 OI1 +334 -0.000000 -1.767132 0.069221 0.051916 OIz +335 0.546073 -1.680586 0.069221 0.051916 OI2 +336 -1.963487 1.749684 0.069221 0.051916 COMNT +337 1.963487 1.749684 0.069221 0.051916 SCALE diff --git a/python/libs/mne/channels/data/layouts/EGI256.lout b/python/libs/mne/channels/data/layouts/EGI256.lout new file mode 100644 index 0000000..bc9076a --- /dev/null +++ b/python/libs/mne/channels/data/layouts/EGI256.lout @@ -0,0 +1,259 @@ +-42.19 43.52 -41.70 28.71 +001 0.235020883 0.231411875 0.023840595 0.024283894 EEG 001 +002 0.180062322 0.24066255 0.023840595 0.024283894 EEG 002 +003 0.134498312 0.239722125 0.023840595 0.024283894 EEG 003 +004 0.098183698 0.230899463 0.023840595 0.024283894 EEG 004 +005 0.066117291 0.206774428 0.023840595 0.024283894 EEG 005 +006 0.038417416 0.175224454 0.023840595 0.024283894 EEG 006 +007 0.019093339 0.142334211 0.023840595 0.024283894 EEG 007 +008 0 0.106825455 0.023840595 0.024283894 EEG 008 +009 -0.017539353 0.062826857 0.023840595 0.024283894 EEG 009 +010 0.181942866 0.296413546 0.023840595 0.024283894 EEG 010 +011 0.13038807 0.293232492 0.023840595 0.024283894 EEG 011 +012 0.084273706 0.277147412 0.023840595 0.024283894 EEG 012 +013 0.050175359 0.251802841 0.023840595 0.024283894 EEG 013 +014 0.021773201 0.21699757 0.023840595 0.024283894 EEG 014 +015 0 0.180469732 0.023840595 0.024283894 EEG 015 +016 -0.019093339 0.142334211 0.023840595 0.024283894 EEG 016 +017 -0.036255497 0.09269913 0.023840595 0.024283894 EEG 017 +018 0.113098849 0.348229946 0.023840595 0.024283894 EEG 018 +019 0.069000992 0.329792276 0.023840595 0.024283894 EEG 019 +020 0.029776066 0.297506089 0.023840595 0.024283894 EEG 020 +021 0 0.258687873 0.023840595 0.024283894 EEG 021 +022 -0.021773201 0.21699757 0.023840595 0.024283894 EEG 022 +023 -0.038417416 0.175224454 0.023840595 0.024283894 EEG 023 +024 -0.055153266 0.126645408 0.023840595 0.024283894 EEG 024 +025 0.036940443 0.37703699 0.023840595 0.024283894 EEG 025 +026 0 0.343720309 0.023840595 0.024283894 EEG 026 +027 -0.029776066 0.297506089 0.023840595 0.024283894 EEG 027 +028 -0.050175359 0.251802841 0.023840595 0.024283894 EEG 028 +029 -0.066117291 0.206774428 0.023840595 0.024283894 EEG 029 +030 -0.079525249 0.158534511 0.023840595 0.024283894 EEG 030 +031 0 0.415202995 0.023840595 0.024283894 EEG 031 +032 -0.036940443 0.37703699 0.023840595 0.024283894 EEG 032 +033 -0.069000992 0.329792276 0.023840595 0.024283894 EEG 033 +034 -0.084273706 0.277147412 0.023840595 0.024283894 EEG 034 +035 -0.098183698 0.230899463 0.023840595 0.024283894 EEG 035 +036 -0.098479668 0.187945851 0.023840595 0.024283894 EEG 036 +037 -0.113098849 0.348229946 0.023840595 0.024283894 EEG 037 +038 -0.13038807 0.293232492 0.023840595 0.024283894 EEG 038 +039 -0.134498312 0.239722125 0.023840595 0.024283894 EEG 039 +040 -0.130890927 0.191286703 0.023840595 0.024283894 EEG 040 +041 -0.116009122 0.150111634 0.023840595 0.024283894 EEG 041 +042 -0.094840856 0.116834626 0.023840595 0.024283894 EEG 042 +043 -0.076990927 0.086006856 0.023840595 0.024283894 EEG 043 +044 -0.055587556 0.053147386 0.023840595 0.024283894 EEG 044 +045 -0.029699902 0.019405615 0.023840595 0.024283894 EEG 045 +046 -0.181942866 0.296413546 0.023840595 0.024283894 EEG 046 +047 -0.180062322 0.24066255 0.023840595 0.024283894 EEG 047 +048 -0.17285275 0.187572361 0.023840595 0.024283894 EEG 048 +049 -0.156410469 0.141423921 0.023840595 0.024283894 EEG 049 +050 -0.132742164 0.104084677 0.023840595 0.024283894 EEG 050 +051 -0.108362109 0.07207399 0.023840595 0.024283894 EEG 051 +052 -0.087032894 0.041560718 0.023840595 0.024283894 EEG 052 +053 -0.057033727 0.006635523 0.023840595 0.024283894 EEG 053 +054 -0.235020883 0.231411875 0.023840595 0.024283894 EEG 054 +055 -0.21721779 0.1735557 0.023840595 0.024283894 EEG 055 +056 -0.196096643 0.121848964 0.023840595 0.024283894 EEG 056 +057 -0.169122926 0.084563661 0.023840595 0.024283894 EEG 057 +058 -0.142622009 0.056366314 0.023840595 0.024283894 EEG 058 +059 -0.11607512 0.026701856 0.023840595 0.024283894 EEG 059 +060 -0.086703907 -0.006962228 0.023840595 0.024283894 EEG 060 +061 -0.271241865 0.131933691 0.023840595 0.024283894 EEG 061 +062 -0.237546771 0.082946276 0.023840595 0.024283894 EEG 062 +063 -0.20434592 0.049982898 0.023840595 0.024283894 EEG 063 +064 -0.175001011 0.027246728 0.023840595 0.024283894 EEG 064 +065 -0.144183544 0.006552794 0.023840595 0.024283894 EEG 065 +066 -0.117629392 -0.020953359 0.023840595 0.024283894 EEG 066 +067 -0.32017538 0.064356008 0.023840595 0.024283894 EEG 067 +068 -0.277394242 0.035815905 0.023840595 0.024283894 EEG 068 +069 -0.241320281 0.000293927 0.023840595 0.024283894 EEG 069 +070 -0.202988841 -0.017932839 0.023840595 0.024283894 EEG 070 +071 -0.170816713 -0.027588171 0.023840595 0.024283894 EEG 071 +072 -0.142940198 -0.038849379 0.023840595 0.024283894 EEG 072 +073 -0.364333595 -0.009526546 0.023840595 0.024283894 EEG 073 +074 -0.227828247 -0.074709585 0.023840595 0.024283894 EEG 074 +075 -0.186334435 -0.079063391 0.023840595 0.024283894 EEG 075 +076 -0.152612576 -0.080357072 0.023840595 0.024283894 EEG 076 +077 -0.122986168 -0.070147895 0.023840595 0.024283894 EEG 077 +078 -0.092860036 -0.059724481 0.023840595 0.024283894 EEG 078 +079 -0.063373134 -0.044961361 0.023840595 0.024283894 EEG 079 +080 -0.033138055 -0.028518783 0.023840595 0.024283894 EEG 080 +081 0 -0.006448832 0.023840595 0.024283894 EEG 081 +082 -0.384631539 -0.115563191 0.023840595 0.024283894 EEG 082 +083 -0.230231782 -0.157310034 0.023840595 0.024283894 EEG 083 +084 -0.201004697 -0.132397774 0.023840595 0.024283894 EEG 084 +085 -0.158874627 -0.130476761 0.023840595 0.024283894 EEG 085 +086 -0.125435162 -0.117006671 0.023840595 0.024283894 EEG 086 +087 -0.093818787 -0.102184911 0.023840595 0.024283894 EEG 087 +088 -0.063690231 -0.085009427 0.023840595 0.024283894 EEG 088 +089 -0.034226984 -0.069230419 0.023840595 0.024283894 EEG 089 +090 0 -0.043222928 0.023840595 0.024283894 EEG 090 +091 -0.376606255 -0.236283155 0.023840595 0.024283894 EEG 091 +092 -0.320841548 -0.246056831 0.023840595 0.024283894 EEG 092 +093 -0.264511728 -0.247963981 0.023840595 0.024283894 EEG 093 +094 -0.235119884 -0.22133859 0.023840595 0.024283894 EEG 094 +095 -0.200260526 -0.201104991 0.023840595 0.024283894 EEG 095 +096 -0.16089296 -0.182074387 0.023840595 0.024283894 EEG 096 +097 -0.123315473 -0.169463521 0.023840595 0.024283894 EEG 097 +098 -0.093577895 -0.148219199 0.023840595 0.024283894 EEG 098 +099 -0.062757092 -0.127508907 0.023840595 0.024283894 EEG 099 +100 -0.033465994 -0.105718695 0.023840595 0.024283894 EEG 100 +101 0 -0.123212516 0.023840595 0.024283894 EEG 101 +102 -0.309236143 -0.330394078 0.023840595 0.024283894 EEG 102 +103 -0.264402365 -0.317489099 0.023840595 0.024283894 EEG 103 +104 -0.215607267 -0.297916345 0.023840595 0.024283894 EEG 104 +105 -0.194042397 -0.266008675 0.023840595 0.024283894 EEG 105 +106 -0.156365562 -0.241406814 0.023840595 0.024283894 EEG 106 +107 -0.117304936 -0.222733874 0.023840595 0.024283894 EEG 107 +108 -0.08375779 -0.200153314 0.023840595 0.024283894 EEG 108 +109 -0.056791169 -0.173578646 0.023840595 0.024283894 EEG 109 +110 -0.028490371 -0.146436894 0.023840595 0.024283894 EEG 110 +111 -0.235425173 -0.391140875 0.023840595 0.024283894 EEG 111 +112 -0.20031364 -0.367491502 0.023840595 0.024283894 EEG 112 +113 -0.160198907 -0.335751192 0.023840595 0.024283894 EEG 113 +114 -0.148968879 -0.297338854 0.023840595 0.024283894 EEG 114 +115 -0.09913078 -0.279612547 0.023840595 0.024283894 EEG 115 +116 -0.06561825 -0.2506161 0.023840595 0.024283894 EEG 116 +117 -0.036528871 -0.219887692 0.023840595 0.024283894 EEG 117 +118 -0.01914107 -0.187670154 0.023840595 0.024283894 EEG 118 +119 0 -0.159638357 0.023840595 0.024283894 EEG 119 +120 -0.178151028 -0.424680349 0.023840595 0.024283894 EEG 120 +121 -0.142872329 -0.395550026 0.023840595 0.024283894 EEG 121 +122 -0.106134228 -0.360226213 0.023840595 0.024283894 EEG 122 +123 -0.074015552 -0.317797572 0.023840595 0.024283894 EEG 123 +124 -0.049414286 -0.292978277 0.023840595 0.024283894 EEG 124 +125 -0.020856534 -0.260833466 0.023840595 0.024283894 EEG 125 +126 0 -0.223512279 0.023840595 0.024283894 EEG 126 +127 0.01914107 -0.187670154 0.023840595 0.024283894 EEG 127 +128 0.028490371 -0.146436894 0.023840595 0.024283894 EEG 128 +129 0.033465994 -0.105718695 0.023840595 0.024283894 EEG 129 +130 0.034226984 -0.069230419 0.023840595 0.024283894 EEG 130 +131 0.033138055 -0.028518783 0.023840595 0.024283894 EEG 131 +132 0.029699902 0.019405615 0.023840595 0.024283894 EEG 132 +133 -0.11640639 -0.433892117 0.023840595 0.024283894 EEG 133 +134 -0.085226238 -0.411234759 0.023840595 0.024283894 EEG 134 +135 -0.054701526 -0.36252645 0.023840595 0.024283894 EEG 135 +136 -0.02321088 -0.335534555 0.023840595 0.024283894 EEG 136 +137 0 -0.303018075 0.023840595 0.024283894 EEG 137 +138 0.020856534 -0.260833466 0.023840595 0.024283894 EEG 138 +139 0.036528871 -0.219887692 0.023840595 0.024283894 EEG 139 +140 0.056791169 -0.173578646 0.023840595 0.024283894 EEG 140 +141 0.062757092 -0.127508907 0.023840595 0.024283894 EEG 141 +142 0.063690231 -0.085009427 0.023840595 0.024283894 EEG 142 +143 0.063373134 -0.044961361 0.023840595 0.024283894 EEG 143 +144 0.057033727 0.006635523 0.023840595 0.024283894 EEG 144 +145 -0.061719572 -0.45 0.023840595 0.024283894 EEG 145 +146 -0.032116421 -0.419782634 0.023840595 0.024283894 EEG 146 +147 -9.99E-17 -0.379508917 0.023840595 0.024283894 EEG 147 +148 0.02321088 -0.335534555 0.023840595 0.024283894 EEG 148 +149 0.049414286 -0.292978277 0.023840595 0.024283894 EEG 149 +150 0.06561825 -0.2506161 0.023840595 0.024283894 EEG 150 +151 0.08375779 -0.200153314 0.023840595 0.024283894 EEG 151 +152 0.093577895 -0.148219199 0.023840595 0.024283894 EEG 152 +153 0.093818787 -0.102184911 0.023840595 0.024283894 EEG 153 +154 0.092860036 -0.059724481 0.023840595 0.024283894 EEG 154 +155 0.086703907 -0.006962228 0.023840595 0.024283894 EEG 155 +156 0.032116421 -0.419782634 0.023840595 0.024283894 EEG 156 +157 0.054701526 -0.36252645 0.023840595 0.024283894 EEG 157 +158 0.074015552 -0.317797572 0.023840595 0.024283894 EEG 158 +159 0.09913078 -0.279612547 0.023840595 0.024283894 EEG 159 +160 0.117304936 -0.222733874 0.023840595 0.024283894 EEG 160 +161 0.123315473 -0.169463521 0.023840595 0.024283894 EEG 161 +162 0.125435162 -0.117006671 0.023840595 0.024283894 EEG 162 +163 0.122986168 -0.070147895 0.023840595 0.024283894 EEG 163 +164 0.117629392 -0.020953359 0.023840595 0.024283894 EEG 164 +165 0.061719572 -0.45 0.023840595 0.024283894 EEG 165 +166 0.085226238 -0.411234759 0.023840595 0.024283894 EEG 166 +167 0.106134228 -0.360226213 0.023840595 0.024283894 EEG 167 +168 0.148968879 -0.297338854 0.023840595 0.024283894 EEG 168 +169 0.156365562 -0.241406814 0.023840595 0.024283894 EEG 169 +170 0.16089296 -0.182074387 0.023840595 0.024283894 EEG 170 +171 0.158874627 -0.130476761 0.023840595 0.024283894 EEG 171 +172 0.152612576 -0.080357072 0.023840595 0.024283894 EEG 172 +173 0.142940198 -0.038849379 0.023840595 0.024283894 EEG 173 +174 0.11640639 -0.433892117 0.023840595 0.024283894 EEG 174 +175 0.142872329 -0.395550026 0.023840595 0.024283894 EEG 175 +176 0.160198907 -0.335751192 0.023840595 0.024283894 EEG 176 +177 0.194042397 -0.266008675 0.023840595 0.024283894 EEG 177 +178 0.200260526 -0.201104991 0.023840595 0.024283894 EEG 178 +179 0.201004697 -0.132397774 0.023840595 0.024283894 EEG 179 +180 0.186334435 -0.079063391 0.023840595 0.024283894 EEG 180 +181 0.170816713 -0.027588171 0.023840595 0.024283894 EEG 181 +182 0.144183544 0.006552794 0.023840595 0.024283894 EEG 182 +183 0.11607512 0.026701856 0.023840595 0.024283894 EEG 183 +184 0.087032894 0.041560718 0.023840595 0.024283894 EEG 184 +185 0.055587556 0.053147386 0.023840595 0.024283894 EEG 185 +186 0.017539353 0.062826857 0.023840595 0.024283894 EEG 186 +187 0.178151028 -0.424680349 0.023840595 0.024283894 EEG 187 +188 0.20031364 -0.367491502 0.023840595 0.024283894 EEG 188 +189 0.215607267 -0.297916345 0.023840595 0.024283894 EEG 189 +190 0.235119884 -0.22133859 0.023840595 0.024283894 EEG 190 +191 0.230231782 -0.157310034 0.023840595 0.024283894 EEG 191 +192 0.227828247 -0.074709585 0.023840595 0.024283894 EEG 192 +193 0.202988841 -0.017932839 0.023840595 0.024283894 EEG 193 +194 0.175001011 0.027246728 0.023840595 0.024283894 EEG 194 +195 0.142622009 0.056366314 0.023840595 0.024283894 EEG 195 +196 0.108362109 0.07207399 0.023840595 0.024283894 EEG 196 +197 0.076990927 0.086006856 0.023840595 0.024283894 EEG 197 +198 0.036255497 0.09269913 0.023840595 0.024283894 EEG 198 +199 0.235425173 -0.391140875 0.023840595 0.024283894 EEG 199 +200 0.264402365 -0.317489099 0.023840595 0.024283894 EEG 200 +201 0.264511728 -0.247963981 0.023840595 0.024283894 EEG 201 +202 0.241320281 0.000293927 0.023840595 0.024283894 EEG 202 +203 0.20434592 0.049982898 0.023840595 0.024283894 EEG 203 +204 0.169122926 0.084563661 0.023840595 0.024283894 EEG 204 +205 0.132742164 0.104084677 0.023840595 0.024283894 EEG 205 +206 0.094840856 0.116834626 0.023840595 0.024283894 EEG 206 +207 0.055153266 0.126645408 0.023840595 0.024283894 EEG 207 +208 0.309236143 -0.330394078 0.023840595 0.024283894 EEG 208 +209 0.320841548 -0.246056831 0.023840595 0.024283894 EEG 209 +210 0.277394242 0.035815905 0.023840595 0.024283894 EEG 210 +211 0.237546771 0.082946276 0.023840595 0.024283894 EEG 211 +212 0.196096643 0.121848964 0.023840595 0.024283894 EEG 212 +213 0.156410469 0.141423921 0.023840595 0.024283894 EEG 213 +214 0.116009122 0.150111634 0.023840595 0.024283894 EEG 214 +215 0.079525249 0.158534511 0.023840595 0.024283894 EEG 215 +216 0.376606255 -0.236283155 0.023840595 0.024283894 EEG 216 +217 0.384631539 -0.115563191 0.023840595 0.024283894 EEG 217 +218 0.364333595 -0.009526546 0.023840595 0.024283894 EEG 218 +219 0.32017538 0.064356008 0.023840595 0.024283894 EEG 219 +220 0.271241865 0.131933691 0.023840595 0.024283894 EEG 220 +221 0.21721779 0.1735557 0.023840595 0.024283894 EEG 221 +222 0.17285275 0.187572361 0.023840595 0.024283894 EEG 222 +223 0.130890927 0.191286703 0.023840595 0.024283894 EEG 223 +224 0.098479668 0.187945851 0.023840595 0.024283894 EEG 224 +225 0.316289645 0.145736715 0.023840595 0.024283894 EEG 225 +226 0.302702771 0.230332844 0.023840595 0.024283894 EEG 226 +227 0.368412876 0.104246485 0.023840595 0.024283894 EEG 227 +228 0.409165374 0.012374488 0.023840595 0.024283894 EEG 228 +229 0.423731189 -0.12797492 0.023840595 0.024283894 EEG 229 +230 0.298254153 0.303894316 0.023840595 0.024283894 EEG 230 +231 0.362100214 0.20909316 0.023840595 0.024283894 EEG 231 +232 0.410199617 0.143137194 0.023840595 0.024283894 EEG 232 +233 0.447869069 0.013249996 0.023840595 0.024283894 EEG 233 +234 0.269381414 0.382730951 0.023840595 0.024283894 EEG 234 +235 0.342518502 0.308483235 0.023840595 0.024283894 EEG 235 +236 0.395968691 0.254174349 0.023840595 0.024283894 EEG 236 +237 0.45 0.157922288 0.023840595 0.024283894 EEG 237 +238 0.2187115 0.45 0.023840595 0.024283894 EEG 238 +239 0.327880174 0.384827106 0.023840595 0.024283894 EEG 239 +240 0.38583302 0.329449945 0.023840595 0.024283894 EEG 240 +241 -0.2187115 0.45 0.023840595 0.024283894 EEG 241 +242 -0.327880174 0.384827106 0.023840595 0.024283894 EEG 242 +243 -0.38583302 0.329449945 0.023840595 0.024283894 EEG 243 +244 -0.269381414 0.382730951 0.023840595 0.024283894 EEG 244 +245 -0.342518502 0.308483235 0.023840595 0.024283894 EEG 245 +246 -0.395968691 0.254174349 0.023840595 0.024283894 EEG 246 +247 -0.45 0.157922288 0.023840595 0.024283894 EEG 247 +248 -0.298254153 0.303894316 0.023840595 0.024283894 EEG 248 +249 -0.362100214 0.20909316 0.023840595 0.024283894 EEG 249 +250 -0.410199617 0.143137194 0.023840595 0.024283894 EEG 250 +251 -0.447869069 0.013249996 0.023840595 0.024283894 EEG 251 +252 -0.302702771 0.230332844 0.023840595 0.024283894 EEG 252 +253 -0.316289645 0.145736715 0.023840595 0.024283894 EEG 253 +254 -0.368412876 0.104246485 0.023840595 0.024283894 EEG 254 +255 -0.409165374 0.012374488 0.023840595 0.024283894 EEG 255 +256 -0.423731189 -0.12797492 0.023840595 0.024283894 EEG 256 +257 -0.45 -0.45 0.023840595 0.024283894 EEG 257 +258 0.45 -0.45 0.023840595 0.024283894 EEG 258 diff --git a/python/libs/mne/channels/data/layouts/KIT-125.lout b/python/libs/mne/channels/data/layouts/KIT-125.lout new file mode 100644 index 0000000..5b1e987 --- /dev/null +++ b/python/libs/mne/channels/data/layouts/KIT-125.lout @@ -0,0 +1,126 @@ + -0.50 0.50 -0.50 0.50 +001 0.12 -0.10 0.04 0.03 MEG 001 +002 0.15 -0.06 0.04 0.03 MEG 002 +003 0.03 -0.15 0.04 0.03 MEG 003 +004 -0.22 -0.29 0.04 0.03 MEG 004 +005 -0.28 -0.23 0.04 0.03 MEG 005 +006 -0.33 -0.15 0.04 0.03 MEG 006 +007 -0.07 -0.36 0.04 0.03 MEG 007 +008 0.09 -0.36 0.04 0.03 MEG 008 +009 -0.06 -0.25 0.04 0.03 MEG 009 +010 -0.18 0.18 0.04 0.03 MEG 010 +011 -0.10 0.25 0.04 0.03 MEG 011 +012 -0.15 0.22 0.04 0.03 MEG 012 +013 -0.37 0.13 0.04 0.03 MEG 013 +014 -0.36 -0.06 0.04 0.03 MEG 014 +015 -0.18 -0.41 0.04 0.03 MEG 015 +016 -0.27 -0.35 0.04 0.03 MEG 016 +017 -0.16 -0.19 0.04 0.03 MEG 017 +018 -0.18 -0.10 0.04 0.03 MEG 018 +019 -0.14 -0.14 0.04 0.03 MEG 019 +020 -0.30 -0.04 0.04 0.03 MEG 020 +021 -0.31 0.11 0.04 0.03 MEG 021 +022 -0.37 0.04 0.04 0.03 MEG 022 +023 -0.20 -0.14 0.04 0.03 MEG 023 +024 -0.11 -0.23 0.04 0.03 MEG 024 +025 -0.11 -0.11 0.04 0.03 MEG 025 +026 -0.02 -0.14 0.04 0.03 MEG 026 +027 -0.13 -0.28 0.04 0.03 MEG 027 +028 -0.24 -0.18 0.04 0.03 MEG 028 +029 -0.19 -0.23 0.04 0.03 MEG 029 +030 -0.21 0.01 0.04 0.03 MEG 030 +031 -0.21 0.07 0.04 0.03 MEG 031 +032 -0.20 0.13 0.04 0.03 MEG 032 +033 -0.12 0.08 0.04 0.03 MEG 033 +034 -0.09 -0.07 0.04 0.03 MEG 034 +035 -0.12 0.03 0.04 0.03 MEG 035 +036 -0.25 0.11 0.04 0.03 MEG 036 +037 -0.25 -0.03 0.04 0.03 MEG 037 +038 -0.23 -0.09 0.04 0.03 MEG 038 +039 -0.26 0.04 0.04 0.03 MEG 039 +040 -0.14 -0.06 0.04 0.03 MEG 040 +041 -0.00 0.29 0.04 0.03 MEG 041 +042 0.09 0.26 0.04 0.03 MEG 042 +043 -0.07 -0.00 0.04 0.03 MEG 043 +044 -0.07 0.09 0.04 0.03 MEG 044 +045 -0.34 -0.28 0.04 0.03 MEG 045 +046 -0.43 -0.09 0.04 0.03 MEG 046 +047 -0.45 0.03 0.04 0.03 MEG 047 +048 -0.44 0.14 0.04 0.03 MEG 048 +049 -0.07 0.21 0.04 0.03 MEG 049 +050 -0.15 0.15 0.04 0.03 MEG 050 +051 -0.16 -0.02 0.04 0.03 MEG 051 +052 -0.17 0.04 0.04 0.03 MEG 052 +053 0.07 0.17 0.04 0.03 MEG 053 +054 -0.07 0.17 0.04 0.03 MEG 054 +055 0.00 0.14 0.04 0.03 MEG 055 +056 0.08 0.09 0.04 0.03 MEG 056 +057 0.21 -0.39 0.04 0.03 MEG 057 +058 0.09 -0.06 0.04 0.03 MEG 058 +059 -0.04 -0.09 0.04 0.03 MEG 059 +060 0.05 -0.09 0.04 0.03 MEG 060 +061 0.17 -0.18 0.04 0.03 MEG 061 +062 0.06 -0.19 0.04 0.03 MEG 062 +063 -0.04 -0.19 0.04 0.03 MEG 063 +064 0.01 -0.20 0.04 0.03 MEG 064 +065 0.19 -0.09 0.04 0.03 MEG 065 +066 0.01 -0.31 0.04 0.03 MEG 066 +067 0.14 -0.27 0.04 0.03 MEG 067 +068 0.24 -0.28 0.04 0.03 MEG 068 +069 0.34 -0.13 0.04 0.03 MEG 069 +070 0.29 -0.21 0.04 0.03 MEG 070 +071 0.30 -0.33 0.04 0.03 MEG 071 +072 0.02 -0.45 0.04 0.03 MEG 072 +073 0.21 -0.14 0.04 0.03 MEG 073 +074 0.24 -0.08 0.04 0.03 MEG 074 +075 0.26 0.12 0.04 0.03 MEG 075 +076 0.26 0.05 0.04 0.03 MEG 076 +077 0.07 -0.24 0.04 0.03 MEG 077 +078 0.12 -0.22 0.04 0.03 MEG 078 +079 0.22 0.02 0.04 0.03 MEG 079 +080 0.15 -0.13 0.04 0.03 MEG 080 +081 0.43 0.16 0.04 0.03 MEG 081 +082 0.22 0.09 0.04 0.03 MEG 082 +083 0.18 0.19 0.04 0.03 MEG 083 +084 0.14 0.23 0.04 0.03 MEG 084 +085 0.20 0.14 0.04 0.03 MEG 085 +086 0.36 -0.04 0.04 0.03 MEG 086 +087 0.36 0.15 0.04 0.03 MEG 087 +088 0.26 -0.02 0.04 0.03 MEG 088 +089 0.25 -0.17 0.04 0.03 MEG 089 +090 0.30 0.13 0.04 0.03 MEG 090 +091 0.30 -0.03 0.04 0.03 MEG 091 +092 0.37 0.05 0.04 0.03 MEG 092 +093 0.14 0.15 0.04 0.03 MEG 093 +094 0.17 0.05 0.04 0.03 MEG 094 +095 0.17 -0.01 0.04 0.03 MEG 095 +096 0.45 0.06 0.04 0.03 MEG 096 +097 0.13 0.03 0.04 0.03 MEG 097 +098 -0.04 0.07 0.04 0.03 MEG 098 +099 0.04 0.07 0.04 0.03 MEG 099 +100 -0.04 0.01 0.04 0.03 MEG 100 +101 0.04 0.02 0.04 0.03 MEG 101 +102 0.36 -0.25 0.04 0.03 MEG 102 +103 0.44 -0.05 0.04 0.03 MEG 103 +104 0.20 -0.23 0.04 0.03 MEG 104 +105 0.08 -0.00 0.04 0.03 MEG 105 +106 -0.04 -0.04 0.04 0.03 MEG 106 +107 0.00 -0.05 0.04 0.03 MEG 107 +108 0.05 -0.04 0.04 0.03 MEG 108 +109 0.02 0.23 0.04 0.03 MEG 109 +110 -0.03 0.23 0.04 0.03 MEG 110 +111 0.07 0.22 0.04 0.03 MEG 111 +112 0.13 0.09 0.04 0.03 MEG 112 +113 0.18 -0.33 0.04 0.03 MEG 113 +114 -0.10 0.13 0.04 0.03 MEG 114 +115 0.11 -0.43 0.04 0.03 MEG 115 +116 0.29 -0.10 0.04 0.03 MEG 116 +117 -0.06 -0.30 0.04 0.03 MEG 117 +118 0.11 0.13 0.04 0.03 MEG 118 +119 0.21 -0.03 0.04 0.03 MEG 119 +120 0.08 -0.30 0.04 0.03 MEG 120 +121 -0.20 -0.05 0.04 0.03 MEG 121 +122 -0.08 -0.44 0.04 0.03 MEG 122 +123 -0.15 -0.34 0.04 0.03 MEG 123 +124 0.02 -0.37 0.04 0.03 MEG 124 +125 -0.28 -0.11 0.04 0.03 MEG 125 diff --git a/python/libs/mne/channels/data/layouts/KIT-157.lout b/python/libs/mne/channels/data/layouts/KIT-157.lout new file mode 100644 index 0000000..2cf5637 --- /dev/null +++ b/python/libs/mne/channels/data/layouts/KIT-157.lout @@ -0,0 +1,158 @@ +-42.19 43.52 -41.7 28.71 +001 9.78 -14.18 4.00 3.00 MEG 001 +002 3.31 -16.56 4.00 3.00 MEG 002 +003 12.02 -19.42 4.00 3.00 MEG 003 +004 8.08 -21.05 4.00 3.00 MEG 004 +005 4.12 -22.01 4.00 3.00 MEG 005 +006 15.80 -16.63 4.00 3.00 MEG 006 +007 10.21 -12.01 4.00 3.00 MEG 007 +008 7.23 -13.67 4.00 3.00 MEG 008 +009 -22.12 -3.07 4.00 3.00 MEG 009 +010 -13.99 -13.09 4.00 3.00 MEG 010 +011 -21.05 -7.51 4.00 3.00 MEG 011 +012 -18.85 -12.06 4.00 3.00 MEG 012 +013 -0.14 -16.77 4.00 3.00 MEG 013 +014 -6.69 -15.41 4.00 3.00 MEG 014 +015 -10.69 -15.56 4.00 3.00 MEG 015 +016 -3.91 -10.00 4.00 3.00 MEG 016 +017 0.80 -6.66 4.00 3.00 MEG 017 +018 3.74 -20.66 4.00 3.00 MEG 018 +019 15.01 -15.63 4.00 3.00 MEG 019 +020 4.16 -14.75 4.00 3.00 MEG 020 +021 16.72 -0.60 4.00 3.00 MEG 021 +022 14.31 -7.30 4.00 3.00 MEG 022 +023 1.27 -13.23 4.00 3.00 MEG 023 +024 9.63 -10.10 4.00 3.00 MEG 024 +025 -1.74 -14.94 4.00 3.00 MEG 025 +026 -4.68 -14.12 4.00 3.00 MEG 026 +027 -1.65 -8.33 4.00 3.00 MEG 027 +028 -6.53 -8.53 4.00 3.00 MEG 028 +029 -8.52 -6.61 4.00 3.00 MEG 029 +030 -10.18 -4.27 4.00 3.00 MEG 030 +031 -11.14 -1.21 4.00 3.00 MEG 031 +032 -4.02 -18.39 4.00 3.00 MEG 032 +033 19.69 0.13 4.00 3.00 MEG 033 +034 4.03 -8.21 4.00 3.00 MEG 034 +035 3.56 0.14 4.00 3.00 MEG 035 +036 4.19 -12.79 4.00 3.00 MEG 036 +037 19.43 -3.03 4.00 3.00 MEG 037 +038 20.99 -9.54 4.00 3.00 MEG 038 +039 15.93 -11.27 4.00 3.00 MEG 039 +040 22.46 -5.52 4.00 3.00 MEG 040 +041 -9.37 -8.82 4.00 3.00 MEG 041 +042 -6.93 -10.92 4.00 3.00 MEG 042 +043 -1.56 -13.07 4.00 3.00 MEG 043 +044 -7.75 -20.89 4.00 3.00 MEG 044 +045 -11.74 -19.07 4.00 3.00 MEG 045 +046 0.31 -22.23 4.00 3.00 MEG 046 +047 -3.75 -21.89 4.00 3.00 MEG 047 +048 -3.89 -5.28 4.00 3.00 MEG 048 +049 23.23 -0.95 4.00 3.00 MEG 049 +050 13.94 -14.13 4.00 3.00 MEG 050 +051 7.41 -17.72 4.00 3.00 MEG 051 +052 19.50 -8.59 4.00 3.00 MEG 052 +053 18.26 -7.47 4.00 3.00 MEG 053 +054 18.19 -2.34 4.00 3.00 MEG 054 +055 14.76 -9.91 4.00 3.00 MEG 055 +056 21.32 -0.18 4.00 3.00 MEG 056 +057 -1.88 -3.98 4.00 3.00 MEG 057 +058 3.56 -3.73 4.00 3.00 MEG 058 +059 -12.57 -8.25 4.00 3.00 MEG 059 +060 -7.56 -12.70 4.00 3.00 MEG 060 +061 -15.02 -1.73 4.00 3.00 MEG 061 +062 -11.53 -17.47 4.00 3.00 MEG 062 +063 -0.18 -18.90 4.00 3.00 MEG 063 +064 -6.61 -0.05 4.00 3.00 MEG 064 +065 6.73 -9.47 4.00 3.00 MEG 065 +066 1.16 -8.63 4.00 3.00 MEG 066 +067 18.43 8.05 4.00 3.00 MEG 067 +068 16.27 12.00 4.00 3.00 MEG 068 +069 19.53 3.47 4.00 3.00 MEG 069 +070 11.49 5.68 4.00 3.00 MEG 070 +071 12.54 -0.07 4.00 3.00 MEG 071 +072 12.40 3.05 4.00 3.00 MEG 072 +073 -15.98 -9.55 4.00 3.00 MEG 073 +074 -18.65 -1.75 4.00 3.00 MEG 074 +075 -17.81 -5.83 4.00 3.00 MEG 075 +076 -1.09 0.06 4.00 3.00 MEG 076 +077 -1.11 2.07 4.00 3.00 MEG 077 +078 -17.59 -10.78 4.00 3.00 MEG 078 +079 -20.36 -2.47 4.00 3.00 MEG 079 +080 -16.06 10.29 4.00 3.00 MEG 080 +081 10.71 -5.93 4.00 3.00 MEG 081 +082 12.02 -3.35 4.00 3.00 MEG 082 +083 19.99 8.66 4.00 3.00 MEG 083 +084 15.61 15.53 4.00 3.00 MEG 084 +085 5.76 -4.95 4.00 3.00 MEG 085 +086 12.48 13.62 4.00 3.00 MEG 086 +087 18.03 3.69 4.00 3.00 MEG 087 +088 14.69 11.11 4.00 3.00 MEG 088 +089 -19.42 6.89 4.00 3.00 MEG 089 +090 -16.09 14.39 4.00 3.00 MEG 090 +091 -6.70 -5.77 4.00 3.00 MEG 091 +092 -12.37 -11.31 4.00 3.00 MEG 092 +093 -1.72 9.34 4.00 3.00 MEG 093 +094 -4.12 1.65 4.00 3.00 MEG 094 +095 -18.66 2.58 4.00 3.00 MEG 095 +096 -17.76 6.59 4.00 3.00 MEG 096 +097 8.82 -5.11 4.00 3.00 MEG 097 +098 8.79 -7.85 4.00 3.00 MEG 098 +099 15.43 6.10 4.00 3.00 MEG 099 +100 11.93 11.57 4.00 3.00 MEG 100 +101 16.58 7.80 4.00 3.00 MEG 101 +102 8.27 6.69 4.00 3.00 MEG 102 +103 11.62 -8.00 4.00 3.00 MEG 103 +104 13.11 -5.40 4.00 3.00 MEG 104 +105 -13.38 0.11 4.00 3.00 MEG 105 +106 -12.78 -3.22 4.00 3.00 MEG 106 +107 -12.98 3.35 4.00 3.00 MEG 107 +108 -11.84 6.58 4.00 3.00 MEG 108 +109 -10.08 9.11 4.00 3.00 MEG 109 +110 -16.27 -5.03 4.00 3.00 MEG 110 +111 -11.45 -6.21 4.00 3.00 MEG 111 +112 -0.59 5.83 4.00 3.00 MEG 112 +113 14.18 -2.06 4.00 3.00 MEG 113 +114 14.48 1.15 4.00 3.00 MEG 114 +115 12.68 7.37 4.00 3.00 MEG 115 +116 13.93 4.46 4.00 3.00 MEG 116 +117 8.98 11.57 4.00 3.00 MEG 117 +118 6.35 12.95 4.00 3.00 MEG 118 +119 11.01 9.71 4.00 3.00 MEG 119 +120 0.01 16.08 4.00 3.00 MEG 120 +121 -16.87 2.69 4.00 3.00 MEG 121 +122 -16.02 6.38 4.00 3.00 MEG 122 +123 -14.38 9.83 4.00 3.00 MEG 123 +124 -12.23 12.65 4.00 3.00 MEG 124 +125 -10.14 5.19 4.00 3.00 MEG 125 +126 -5.63 12.72 4.00 3.00 MEG 126 +127 -2.90 13.72 4.00 3.00 MEG 127 +128 -7.93 11.11 4.00 3.00 MEG 128 +129 6.83 14.86 4.00 3.00 MEG 129 +130 7.63 3.51 4.00 3.00 MEG 130 +131 8.56 0.40 4.00 3.00 MEG 131 +132 -2.70 7.01 4.00 3.00 MEG 132 +133 3.09 11.73 4.00 3.00 MEG 133 +134 8.14 9.62 4.00 3.00 MEG 134 +135 2.84 2.47 4.00 3.00 MEG 135 +136 4.05 6.89 4.00 3.00 MEG 136 +137 -6.16 14.64 4.00 3.00 MEG 137 +138 -11.02 2.49 4.00 3.00 MEG 138 +139 -6.78 6.65 4.00 3.00 MEG 139 +140 -6.24 3.18 4.00 3.00 MEG 140 +141 -6.83 9.47 4.00 3.00 MEG 141 +142 -2.48 11.64 4.00 3.00 MEG 142 +143 -17.59 14.92 4.00 3.00 MEG 143 +144 -22.23 2.07 4.00 3.00 MEG 144 +145 3.20 13.71 4.00 3.00 MEG 145 +146 2.06 5.84 4.00 3.00 MEG 146 +147 5.76 1.93 4.00 3.00 MEG 147 +148 23.08 3.86 4.00 3.00 MEG 148 +149 21.96 8.34 4.00 3.00 MEG 149 +150 20.00 12.43 4.00 3.00 MEG 150 +151 17.22 16.08 4.00 3.00 MEG 151 +152 3.91 9.37 4.00 3.00 MEG 152 +153 -21.58 6.32 4.00 3.00 MEG 153 +154 -20.17 10.61 4.00 3.00 MEG 154 +155 -11.01 10.95 4.00 3.00 MEG 155 +156 -14.51 5.43 4.00 3.00 MEG 156 +157 1.28 9.74 4.00 3.00 MEG 157 diff --git a/python/libs/mne/channels/data/layouts/KIT-160.lay b/python/libs/mne/channels/data/layouts/KIT-160.lay new file mode 100644 index 0000000..1f5780f --- /dev/null +++ b/python/libs/mne/channels/data/layouts/KIT-160.lay @@ -0,0 +1,162 @@ +001 -0.0758202152 0.3520500341 0.03188472676 0.02713339699 MEG 001 +002 -0.1261117022 0.328933222 0.03188472676 0.02713339699 MEG 002 +003 -0.1696053658 0.2965692769 0.03188472676 0.02713339699 MEG 003 +004 0.1650030446 0.2798950608 0.03188472676 0.02713339699 MEG 004 +005 -0.1114275357 0.2868555816 0.03188472676 0.02713339699 MEG 005 +006 -0.06544380774 0.2622312709 0.03188472676 0.02713339699 MEG 006 +007 -0.1353647314 0.2073255917 0.03188472676 0.02713339699 MEG 007 +008 -0.17422271 0.1205755843 0.03188472676 0.02713339699 MEG 008 +009 -0.01368858767 0.2301473849 0.03188472676 0.02713339699 MEG 009 +010 -0.06470805562 0.2189319658 0.03188472676 0.02713339699 MEG 010 +011 -0.1247701784 0.1499178411 0.03188472676 0.02713339699 MEG 011 +012 -0.03961772545 0.1793694653 0.03188472676 0.02713339699 MEG 012 +013 -0.0711276654 0.1599000923 0.03188472676 0.02713339699 MEG 013 +014 -0.09668684076 0.1249745081 0.03188472676 0.02713339699 MEG 014 +015 -0.1103655395 0.08862749713 0.03188472676 0.02713339699 MEG 015 +016 -0.03953495363 0.1363424548 0.03188472676 0.02713339699 MEG 016 +017 -0.1781804786 0.01931847664 0.03188472676 0.02713339699 MEG 017 +018 -0.142520225 -0.06752066402 0.03188472676 0.02713339699 MEG 018 +019 -0.08088893708 -0.1241365481 0.03188472676 0.02713339699 MEG 019 +020 -0.04456843369 -0.1415706457 0.03188472676 0.02713339699 MEG 020 +021 -0.1426655535 0.05244256024 0.03188472676 0.02713339699 MEG 021 +022 -0.1340581452 0.000388349131 0.03188472676 0.02713339699 MEG 022 +023 -0.08512707038 -0.07696214533 0.03188472676 0.02713339699 MEG 023 +024 -0.04736054836 -0.09618399923 0.03188472676 0.02713339699 MEG 024 +025 -0.114643504 -0.04085422212 0.03188472676 0.02713339699 MEG 025 +026 -0.1103503321 0.03823179105 0.03188472676 0.02713339699 MEG 026 +027 -0.03958310463 -0.05556958642 0.03188472676 0.02713339699 MEG 027 +028 -0.06993629917 0.01943095503 0.03188472676 0.02713339699 MEG 028 +029 -0.04398320652 -0.01300040853 0.03188472676 0.02713339699 MEG 029 +030 -0.004227454924 -0.01962159408 0.03188472676 0.02713339699 MEG 030 +031 -0.01056467818 0.1062293634 0.03188472676 0.02713339699 MEG 031 +032 -0.04021127484 0.08385147042 0.03188472676 0.02713339699 MEG 032 +033 -0.3500780541 0.3978039282 0.03188472676 0.02713339699 MEG 033 +034 -0.400516673 0.3077821901 0.03188472676 0.02713339699 MEG 034 +035 -0.4325895921 0.2136911051 0.03188472676 0.02713339699 MEG 035 +036 -0.45 0.1074214926 0.03188472676 0.02713339699 MEG 036 +037 -0.3046138565 0.3570489454 0.03188472676 0.02713339699 MEG 037 +038 -0.3775870934 0.1989319321 0.03188472676 0.02713339699 MEG 038 +039 -0.3470032996 0.2860012743 0.03188472676 0.02713339699 MEG 039 +040 -0.2596101607 0.2361677074 0.03188472676 0.02713339699 MEG 040 +041 -0.3370312654 0.1080933205 0.03188472676 0.02713339699 MEG 041 +042 -0.2054494635 0.2529931344 0.03188472676 0.02713339699 MEG 042 +043 -0.2819761985 0.1711718789 0.03188472676 0.02713339699 MEG 043 +044 -0.2293126541 0.2020325726 0.03188472676 0.02713339699 MEG 044 +045 -0.253186216 0.08822084019 0.03188472676 0.02713339699 MEG 045 +046 -0.177239753 0.2148932642 0.03188472676 0.02713339699 MEG 046 +047 -0.1982663002 0.1636997157 0.03188472676 0.02713339699 MEG 047 +048 -0.2443663193 0.1429437606 0.03188472676 0.02713339699 MEG 048 +049 -0.3888843678 0.1059383909 0.03188472676 0.02713339699 MEG 049 +050 -0.4270282413 -0.09637491351 0.03188472676 0.02713339699 MEG 050 +051 -0.2842037041 -0.02907823435 0.03188472676 0.02713339699 MEG 051 +052 -0.3447270537 -0.2595887593 0.03188472676 0.02713339699 MEG 052 +053 -0.3909909615 0.01655882049 0.03188472676 0.02713339699 MEG 053 +054 -0.2988307343 -0.1206055812 0.03188472676 0.02713339699 MEG 054 +055 -0.2625165926 -0.18469877 0.03188472676 0.02713339699 MEG 055 +056 -0.3742205763 -0.06701211297 0.03188472676 0.02713339699 MEG 056 +057 -0.3368815045 0.02914339448 0.03188472676 0.02713339699 MEG 057 +058 -0.2614922293 -0.09693316038 0.03188472676 0.02713339699 MEG 058 +059 -0.2296354398 -0.1520887173 0.03188472676 0.02713339699 MEG 059 +060 -0.2424341314 -0.03356215166 0.03188472676 0.02713339699 MEG 060 +061 -0.1876464844 -0.1390883676 0.03188472676 0.02713339699 MEG 061 +062 -0.2141382597 0.06121102293 0.03188472676 0.02713339699 MEG 062 +063 -0.210559287 0.002243140577 0.03188472676 0.02713339699 MEG 063 +064 -0.1972138638 -0.04829819556 0.03188472676 0.02713339699 MEG 064 +065 0.1239897025 0.3184822507 0.03188472676 0.02713339699 MEG 065 +066 0.07602269198 0.346841814 0.03188472676 0.02713339699 MEG 066 +067 0.02730949028 0.3618289046 0.03188472676 0.02713339699 MEG 067 +068 -0.02876209065 0.3665275653 0.03188472676 0.02713339699 MEG 068 +069 0.06023566248 0.305037035 0.03188472676 0.02713339699 MEG 069 +070 0.01553893996 0.3208156125 0.03188472676 0.02713339699 MEG 070 +071 0.1455353008 0.1519564037 0.03188472676 0.02713339699 MEG 071 +072 0.09261086754 0.2300225572 0.03188472676 0.02713339699 MEG 072 +073 0.01800727232 0.2722816956 0.03188472676 0.02713339699 MEG 073 +074 0.09471660492 0.1660243591 0.03188472676 0.02713339699 MEG 074 +075 0.02256442482 0.2241822666 0.03188472676 0.02713339699 MEG 075 +076 0.1172275823 0.121354496 0.03188472676 0.02713339699 MEG 076 +077 0.06434989605 0.1443350384 0.03188472676 0.02713339699 MEG 077 +078 0.03192340214 0.1736460766 0.03188472676 0.02713339699 MEG 078 +079 0.002050178715 0.1879975831 0.03188472676 0.02713339699 MEG 079 +080 0.003697062517 0.143421051 0.03188472676 0.02713339699 MEG 080 +081 -0.003899772644 -0.1490601771 0.03188472676 0.02713339699 MEG 081 +082 0.0711394085 -0.1177609441 0.03188472676 0.02713339699 MEG 082 +083 0.1339233002 -0.04641972764 0.03188472676 0.02713339699 MEG 083 +084 0.1624045334 0.04808542023 0.03188472676 0.02713339699 MEG 084 +085 0.03165333222 -0.09469832945 0.03188472676 0.02713339699 MEG 085 +086 0.07137560881 -0.06228631109 0.03188472676 0.02713339699 MEG 086 +087 0.1185350219 0.01900269558 0.03188472676 0.02713339699 MEG 087 +088 0.1266742656 0.06930579768 0.03188472676 0.02713339699 MEG 088 +089 -0.004420218989 -0.06278528823 0.03188472676 0.02713339699 MEG 089 +090 0.06173501644 -0.0229233209 0.03188472676 0.02713339699 MEG 090 +091 0.08381840152 0.01449327322 0.03188472676 0.02713339699 MEG 091 +092 0.02911019425 -0.004626517545 0.03188472676 0.02713339699 MEG 092 +093 0.05293614742 0.03703628974 0.03188472676 0.02713339699 MEG 093 +094 0.08589158435 0.06075797622 0.03188472676 0.02713339699 MEG 094 +095 -0.007916726978 0.01526702488 0.03188472676 0.02713339699 MEG 095 +096 0.02152774438 0.08873530965 0.03188472676 0.02713339699 MEG 096 +097 0.4428288832 0.1535592899 0.03188472676 0.02713339699 MEG 097 +098 0.4162971034 0.267171892 0.03188472676 0.02713339699 MEG 098 +099 0.3668411201 0.3681231645 0.03188472676 0.02713339699 MEG 099 +100 0.3105057204 0.45 0.03188472676 0.02713339699 MEG 100 +101 0.3522874782 0.2435209125 0.03188472676 0.02713339699 MEG 101 +102 0.2611099363 0.4005591579 0.03188472676 0.02713339699 MEG 102 +103 0.3199035742 0.1404206704 0.03188472676 0.02713339699 MEG 103 +104 0.2620111685 0.2979083124 0.03188472676 0.02713339699 MEG 104 +105 0.2183237036 0.3586832133 0.03188472676 0.02713339699 MEG 105 +106 0.253424964 0.2038387029 0.03188472676 0.02713339699 MEG 106 +107 0.1821371767 0.3235092766 0.03188472676 0.02713339699 MEG 107 +108 0.2320142778 0.1055366971 0.03188472676 0.02713339699 MEG 108 +109 0.1969983757 0.2287392657 0.03188472676 0.02713339699 MEG 109 +110 0.1892169645 0.138858437 0.03188472676 0.02713339699 MEG 110 +111 0.1718069319 0.1949879663 0.03188472676 0.02713339699 MEG 111 +112 0.1412427238 0.2390846129 0.03188472676 0.02713339699 MEG 112 +113 0.3413341674 -0.2749014578 0.03188472676 0.02713339699 MEG 113 +114 0.3957163081 -0.1867381122 0.03188472676 0.02713339699 MEG 114 +115 0.4343284389 -0.0791376981 0.03188472676 0.02713339699 MEG 115 +116 0.45 0.04115848657 0.03188472676 0.02713339699 MEG 116 +117 0.294108122 -0.2248146657 0.03188472676 0.02713339699 MEG 117 +118 0.3408651095 -0.144258791 0.03188472676 0.02713339699 MEG 118 +119 0.3829160873 0.05059902865 0.03188472676 0.02713339699 MEG 119 +120 0.3147171717 -0.02712556599 0.03188472676 0.02713339699 MEG 120 +121 0.2904098027 -0.1079793618 0.03188472676 0.02713339699 MEG 121 +122 0.2489284377 -0.08302604569 0.03188472676 0.02713339699 MEG 122 +123 0.2791450822 0.06065203717 0.03188472676 0.02713339699 MEG 123 +124 0.1699476764 -0.1323891552 0.03188472676 0.02713339699 MEG 124 +125 0.2213406675 -0.02048593019 0.03188472676 0.02713339699 MEG 125 +126 0.1793293141 -0.03692175528 0.03188472676 0.02713339699 MEG 126 +127 0.2340431716 0.04272096725 0.03188472676 0.02713339699 MEG 127 +128 0.1989990214 0.07992534312 0.03188472676 0.02713339699 MEG 128 +129 -0.2893372271 -0.3287872551 0.03188472676 0.02713339699 MEG 129 +130 -0.2271848223 -0.3869397712 0.03188472676 0.02713339699 MEG 130 +131 -0.1541857375 -0.4262216107 0.03188472676 0.02713339699 MEG 131 +132 -0.07352758894 -0.45 0.03188472676 0.02713339699 MEG 132 +133 -0.2537949872 -0.2821367359 0.03188472676 0.02713339699 MEG 133 +134 -0.05789427224 -0.3854591968 0.03188472676 0.02713339699 MEG 134 +135 -0.2180547699 -0.2381599549 0.03188472676 0.02713339699 MEG 135 +136 -0.1654458556 -0.2836078687 0.03188472676 0.02713339699 MEG 136 +137 -0.1282115583 -0.3636295479 0.03188472676 0.02713339699 MEG 137 +138 -0.1869048087 -0.2008317859 0.03188472676 0.02713339699 MEG 138 +139 -0.092414085 -0.2712662931 0.03188472676 0.02713339699 MEG 139 +140 -0.04464737067 -0.3262626614 0.03188472676 0.02713339699 MEG 140 +141 -0.1084802139 -0.2080301215 0.03188472676 0.02713339699 MEG 141 +142 -0.008347885446 -0.2337992621 0.03188472676 0.02713339699 MEG 142 +143 -0.1346335691 -0.1372039628 0.03188472676 0.02713339699 MEG 143 +144 -0.05019800217 -0.1837431338 0.03188472676 0.02713339699 MEG 144 +145 0.08934861049 -0.3726540907 0.03188472676 0.02713339699 MEG 145 +146 0.106172944 -0.4435639299 0.03188472676 0.02713339699 MEG 146 +147 0.2218612721 0.2682623426 0.03188472676 0.02713339699 MEG 147 +148 0.2670027616 -0.3555067924 0.03188472676 0.02713339699 MEG 148 +149 0.01625933145 -0.3877305948 0.03188472676 0.02713339699 MEG 149 +150 0.1626843509 -0.342433819 0.03188472676 0.02713339699 MEG 150 +151 0.07960240502 -0.3134489525 0.03188472676 0.02713339699 MEG 151 +152 0.1456745581 -0.2843789795 0.03188472676 0.02713339699 MEG 152 +153 0.2343957441 -0.2951725192 0.03188472676 0.02713339699 MEG 153 +154 0.01783071962 -0.2738066839 0.03188472676 0.02713339699 MEG 154 +155 0.1274451621 -0.2352949445 0.03188472676 0.02713339699 MEG 155 +156 0.1746647823 -0.1970760538 0.03188472676 0.02713339699 MEG 156 +157 0.0872449245 -0.2076405522 0.03188472676 0.02713339699 MEG 157 +158 0.03888945369 -0.183846741 0.03188472676 0.02713339699 MEG 158 +159 0.08527772847 -0.1590694194 0.03188472676 0.02713339699 MEG 159 +160 0.1230026134 -0.1283845973 0.03188472676 0.02713339699 MEG 160 +161 -0.547000000 -0.5000000000 0.03188472676 0.02713339699 COMNT +162 -0.547000000 -0.5000000000 0.03188472676 0.02713339699 SCALE diff --git a/python/libs/mne/channels/data/layouts/KIT-AD.lout b/python/libs/mne/channels/data/layouts/KIT-AD.lout new file mode 100644 index 0000000..e06356a --- /dev/null +++ b/python/libs/mne/channels/data/layouts/KIT-AD.lout @@ -0,0 +1,209 @@ + 0.00 1.00 0.00 1.00 +001 0.61 0.56 0.02 0.04 MEG 001 +002 0.59 0.50 0.02 0.04 MEG 002 +003 0.48 0.42 0.02 0.04 MEG 003 +004 0.52 0.43 0.02 0.04 MEG 004 +005 0.43 0.44 0.02 0.04 MEG 005 +006 0.39 0.48 0.02 0.04 MEG 006 +007 0.52 0.70 0.02 0.04 MEG 007 +008 0.58 0.59 0.02 0.04 MEG 008 +009 0.47 0.71 0.02 0.04 MEG 009 +010 0.53 0.49 0.02 0.04 MEG 010 +011 0.57 0.53 0.02 0.04 MEG 011 +012 0.43 0.50 0.02 0.04 MEG 012 +013 0.40 0.55 0.02 0.04 MEG 013 +014 0.57 0.39 0.02 0.04 MEG 014 +015 0.38 0.41 0.02 0.04 MEG 015 +016 0.48 0.37 0.02 0.04 MEG 016 +017 0.16 0.84 0.02 0.04 MEG 017 +018 0.53 0.63 0.02 0.04 MEG 018 +019 0.48 0.53 0.02 0.04 MEG 019 +020 0.44 0.63 0.02 0.04 MEG 020 +021 0.53 0.56 0.02 0.04 MEG 021 +022 0.44 0.57 0.02 0.04 MEG 022 +023 0.56 0.46 0.02 0.04 MEG 023 +024 0.59 0.68 0.02 0.04 MEG 024 +025 0.34 0.86 0.02 0.04 MEG 025 +026 0.39 0.89 0.02 0.04 MEG 026 +027 0.50 0.91 0.02 0.04 MEG 027 +028 0.61 0.87 0.02 0.04 MEG 028 +029 0.66 0.84 0.02 0.04 MEG 029 +030 0.59 0.76 0.02 0.04 MEG 030 +031 0.39 0.62 0.02 0.04 MEG 031 +032 0.55 0.85 0.02 0.04 MEG 032 +033 0.28 0.39 0.02 0.04 MEG 033 +034 0.37 0.52 0.02 0.04 MEG 034 +035 0.36 0.59 0.02 0.04 MEG 035 +036 0.38 0.70 0.02 0.04 MEG 036 +037 0.07 0.87 0.02 0.04 MEG 037 +038 0.24 0.61 0.02 0.04 MEG 038 +039 0.32 0.68 0.02 0.04 MEG 039 +040 0.30 0.81 0.02 0.04 MEG 040 +041 0.43 0.96 0.02 0.04 MEG 041 +042 0.55 0.95 0.02 0.04 MEG 042 +043 0.42 0.74 0.02 0.04 MEG 043 +044 0.56 0.72 0.02 0.04 MEG 044 +045 0.47 0.76 0.02 0.04 MEG 045 +046 0.52 0.75 0.02 0.04 MEG 046 +047 0.45 0.85 0.02 0.04 MEG 047 +048 0.40 0.79 0.02 0.04 MEG 048 +049 0.24 0.79 0.02 0.04 MEG 049 +050 0.21 0.46 0.02 0.04 MEG 050 +051 0.32 0.76 0.02 0.04 MEG 051 +052 0.20 0.63 0.02 0.04 MEG 052 +053 0.27 0.33 0.02 0.04 MEG 053 +054 0.17 0.74 0.02 0.04 MEG 054 +055 0.05 0.65 0.02 0.04 MEG 055 +056 0.28 0.63 0.02 0.04 MEG 056 +057 0.70 0.62 0.02 0.04 MEG 057 +058 0.94 0.38 0.02 0.04 MEG 058 +059 0.91 0.73 0.02 0.04 MEG 059 +060 0.82 0.93 0.02 0.04 MEG 060 +061 0.93 0.63 0.02 0.04 MEG 061 +062 0.75 0.78 0.02 0.04 MEG 062 +063 0.69 0.78 0.02 0.04 MEG 063 +064 0.43 0.00 0.02 0.04 MEG 064 +065 0.18 0.40 0.02 0.04 MEG 065 +066 0.19 0.29 0.02 0.04 MEG 066 +067 0.15 0.56 0.02 0.04 MEG 067 +068 0.33 0.53 0.02 0.04 MEG 068 +069 0.35 0.47 0.02 0.04 MEG 069 +070 0.25 0.89 0.02 0.04 MEG 070 +071 0.24 0.53 0.02 0.04 MEG 071 +072 0.16 0.95 0.02 0.04 MEG 072 +073 0.67 0.75 0.02 0.04 MEG 073 +074 0.74 0.86 0.02 0.04 MEG 074 +075 0.81 0.71 0.02 0.04 MEG 075 +076 0.78 0.62 0.02 0.04 MEG 076 +077 0.65 0.65 0.02 0.04 MEG 077 +078 0.83 0.81 0.02 0.04 MEG 078 +079 0.82 0.53 0.02 0.04 MEG 079 +080 0.78 0.36 0.02 0.04 MEG 080 +081 0.56 0.65 0.02 0.04 MEG 081 +082 0.35 0.74 0.02 0.04 MEG 082 +083 0.21 0.71 0.02 0.04 MEG 083 +084 0.12 0.75 0.02 0.04 MEG 084 +085 0.11 0.66 0.02 0.04 MEG 085 +086 0.21 0.92 0.02 0.04 MEG 086 +087 0.13 0.96 0.02 0.04 MEG 087 +088 0.03 0.76 0.02 0.04 MEG 088 +089 0.66 0.89 0.02 0.04 MEG 089 +090 0.61 0.93 0.02 0.04 MEG 090 +091 0.63 0.79 0.02 0.04 MEG 091 +092 0.71 0.84 0.02 0.04 MEG 092 +093 0.44 0.91 0.02 0.04 MEG 093 +094 0.56 0.89 0.02 0.04 MEG 094 +095 0.42 0.68 0.02 0.04 MEG 095 +096 0.54 0.79 0.02 0.04 MEG 096 +097 0.11 0.86 0.02 0.04 MEG 097 +098 0.14 0.36 0.02 0.04 MEG 098 +099 0.32 0.60 0.02 0.04 MEG 099 +100 0.25 0.45 0.02 0.04 MEG 100 +101 0.19 0.54 0.02 0.04 MEG 101 +102 0.27 0.85 0.02 0.04 MEG 102 +103 0.27 0.75 0.02 0.04 MEG 103 +104 0.01 0.64 0.02 0.04 MEG 104 +105 0.69 0.68 0.02 0.04 MEG 105 +106 0.88 0.82 0.02 0.04 MEG 106 +107 0.45 0.80 0.02 0.04 MEG 107 +108 0.50 0.86 0.02 0.04 MEG 108 +109 0.36 0.80 0.02 0.04 MEG 109 +110 0.49 0.96 0.02 0.04 MEG 110 +111 0.37 0.93 0.02 0.04 MEG 111 +112 0.32 0.90 0.02 0.04 MEG 112 +113 0.07 0.42 0.02 0.04 MEG 113 +114 0.73 0.72 0.02 0.04 MEG 114 +115 0.19 0.12 0.02 0.04 MEG 115 +116 0.01 0.51 0.02 0.04 MEG 116 +117 0.07 0.29 0.02 0.04 MEG 117 +118 0.16 0.47 0.02 0.04 MEG 118 +119 0.22 0.33 0.02 0.04 MEG 119 +120 0.10 0.54 0.02 0.04 MEG 120 +121 0.78 0.89 0.02 0.04 MEG 121 +122 0.87 0.63 0.02 0.04 MEG 122 +123 0.86 0.72 0.02 0.04 MEG 123 +124 0.77 0.70 0.02 0.04 MEG 124 +125 0.63 0.71 0.02 0.04 MEG 125 +126 0.89 0.27 0.02 0.04 MEG 126 +127 0.97 0.62 0.02 0.04 MEG 127 +128 0.83 0.62 0.02 0.04 MEG 128 +129 0.77 0.11 0.02 0.04 MEG 129 +130 0.86 0.95 0.02 0.04 MEG 130 +131 0.71 0.42 0.02 0.04 MEG 131 +132 0.78 0.53 0.02 0.04 MEG 132 +133 0.65 0.57 0.02 0.04 MEG 133 +134 0.16 0.67 0.02 0.04 MEG 134 +135 0.29 0.71 0.02 0.04 MEG 135 +136 0.16 0.23 0.02 0.04 MEG 136 +137 0.82 0.34 0.02 0.04 MEG 137 +138 0.87 0.52 0.02 0.04 MEG 138 +139 0.81 0.22 0.02 0.04 MEG 139 +140 0.90 0.40 0.02 0.04 MEG 140 +141 0.97 0.49 0.02 0.04 MEG 141 +142 0.74 0.30 0.02 0.04 MEG 142 +143 0.81 0.44 0.02 0.04 MEG 143 +144 0.95 0.75 0.02 0.04 MEG 144 +145 0.13 0.19 0.02 0.04 MEG 145 +146 0.28 0.56 0.02 0.04 MEG 146 +147 0.74 0.15 0.02 0.04 MEG 147 +148 0.10 0.33 0.02 0.04 MEG 148 +149 0.35 0.02 0.02 0.04 MEG 149 +150 0.03 0.39 0.02 0.04 MEG 150 +151 0.27 0.06 0.02 0.04 MEG 151 +152 0.31 0.43 0.02 0.04 MEG 152 +153 0.77 0.26 0.02 0.04 MEG 153 +154 0.67 0.10 0.02 0.04 MEG 154 +155 0.76 0.44 0.02 0.04 MEG 155 +156 0.83 0.18 0.02 0.04 MEG 156 +157 0.61 0.02 0.02 0.04 MEG 157 +158 0.91 0.86 0.02 0.04 MEG 158 +159 0.92 0.51 0.02 0.04 MEG 159 +160 0.86 0.30 0.02 0.04 MEG 160 +161 0.44 0.12 0.02 0.04 MEG 161 +162 0.37 0.30 0.02 0.04 MEG 162 +163 0.30 0.17 0.02 0.04 MEG 163 +164 0.36 0.25 0.02 0.04 MEG 164 +165 0.41 0.22 0.02 0.04 MEG 165 +166 0.31 0.28 0.02 0.04 MEG 166 +167 0.05 0.53 0.02 0.04 MEG 167 +168 0.08 0.76 0.02 0.04 MEG 168 +169 0.69 0.24 0.02 0.04 MEG 169 +170 0.57 0.18 0.02 0.04 MEG 170 +171 0.50 0.17 0.02 0.04 MEG 171 +172 0.64 0.20 0.02 0.04 MEG 172 +173 0.65 0.42 0.02 0.04 MEG 173 +174 0.69 0.53 0.02 0.04 MEG 174 +175 0.61 0.44 0.02 0.04 MEG 175 +176 0.70 0.32 0.02 0.04 MEG 176 +177 0.44 0.17 0.02 0.04 MEG 177 +178 0.38 0.18 0.02 0.04 MEG 178 +179 0.32 0.22 0.02 0.04 MEG 179 +180 0.44 0.06 0.02 0.04 MEG 180 +181 0.22 0.16 0.02 0.04 MEG 181 +182 0.36 0.07 0.02 0.04 MEG 182 +183 0.28 0.11 0.02 0.04 MEG 183 +184 0.42 0.27 0.02 0.04 MEG 184 +185 0.52 0.32 0.02 0.04 MEG 185 +186 0.57 0.33 0.02 0.04 MEG 186 +187 0.47 0.32 0.02 0.04 MEG 187 +188 0.62 0.37 0.02 0.04 MEG 188 +189 0.73 0.49 0.02 0.04 MEG 189 +190 0.67 0.36 0.02 0.04 MEG 190 +191 0.74 0.57 0.02 0.04 MEG 191 +192 0.64 0.49 0.02 0.04 MEG 192 +193 0.59 0.06 0.02 0.04 MEG 193 +194 0.52 -0.00 0.02 0.04 MEG 194 +195 0.58 0.29 0.02 0.04 MEG 195 +196 0.53 0.27 0.02 0.04 MEG 196 +197 0.47 0.26 0.02 0.04 MEG 197 +198 0.34 0.39 0.02 0.04 MEG 198 +199 0.42 0.33 0.02 0.04 MEG 199 +200 0.38 0.35 0.02 0.04 MEG 200 +201 0.53 0.22 0.02 0.04 MEG 201 +202 0.59 0.24 0.02 0.04 MEG 202 +203 0.65 0.27 0.02 0.04 MEG 203 +204 0.27 0.26 0.02 0.04 MEG 204 +205 0.51 0.11 0.02 0.04 MEG 205 +206 0.65 0.15 0.02 0.04 MEG 206 +207 0.51 0.05 0.02 0.04 MEG 207 +208 0.69 0.05 0.02 0.04 MEG 208 diff --git a/python/libs/mne/channels/data/layouts/KIT-AS-2008.lout b/python/libs/mne/channels/data/layouts/KIT-AS-2008.lout new file mode 100644 index 0000000..84b135e --- /dev/null +++ b/python/libs/mne/channels/data/layouts/KIT-AS-2008.lout @@ -0,0 +1,158 @@ + 0.00 0.00 0.04 0.02 +000 0.43 0.98 0.10 0.05 MEG 001 +001 0.38 0.96 0.10 0.05 MEG 002 +002 0.32 0.92 0.10 0.05 MEG 003 +003 0.44 0.93 0.10 0.05 MEG 004 +004 0.39 0.91 0.10 0.05 MEG 005 +005 0.45 0.88 0.10 0.05 MEG 006 +006 0.36 0.82 0.10 0.05 MEG 007 +007 0.32 0.78 0.10 0.05 MEG 008 +008 0.33 0.68 0.10 0.05 MEG 009 +009 0.40 0.79 0.10 0.05 MEG 010 +010 0.36 0.74 0.10 0.05 MEG 011 +011 0.48 0.78 0.10 0.05 MEG 012 +012 0.39 0.71 0.10 0.05 MEG 013 +013 0.37 0.66 0.10 0.05 MEG 014 +014 0.48 0.72 0.10 0.05 MEG 015 +015 0.44 0.69 0.10 0.05 MEG 016 +016 0.28 0.57 0.10 0.05 MEG 017 +017 0.29 0.51 0.10 0.05 MEG 018 +018 0.32 0.45 0.10 0.05 MEG 019 +019 0.40 0.36 0.10 0.05 MEG 020 +020 0.46 0.44 0.10 0.05 MEG 021 +021 0.33 0.60 0.10 0.05 MEG 022 +022 0.34 0.53 0.10 0.05 MEG 023 +023 0.41 0.42 0.10 0.05 MEG 024 +024 0.46 0.51 0.10 0.05 MEG 025 +025 0.38 0.59 0.10 0.05 MEG 026 +026 0.50 0.38 0.10 0.05 MEG 027 +027 0.41 0.48 0.10 0.05 MEG 028 +028 0.42 0.56 0.10 0.05 MEG 029 +029 0.51 0.49 0.10 0.05 MEG 030 +030 0.46 0.58 0.10 0.05 MEG 031 +031 0.47 0.64 0.10 0.05 MEG 032 +032 0.12 0.99 0.10 0.05 MEG 033 +033 0.07 0.90 0.10 0.05 MEG 034 +034 0.11 0.88 0.10 0.05 MEG 035 +035 0.13 0.77 0.10 0.05 MEG 036 +036 0.16 0.97 0.10 0.05 MEG 037 +037 0.07 0.78 0.10 0.05 MEG 038 +038 0.20 0.94 0.10 0.05 MEG 039 +039 0.16 0.86 0.10 0.05 MEG 040 +040 0.10 0.67 0.10 0.05 MEG 041 +041 0.25 0.90 0.10 0.05 MEG 042 +042 0.20 0.83 0.10 0.05 MEG 043 +043 0.17 0.76 0.10 0.05 MEG 044 +044 0.24 0.80 0.10 0.05 MEG 045 +045 0.20 0.65 0.10 0.05 MEG 046 +046 0.29 0.82 0.10 0.05 MEG 047 +047 0.25 0.69 0.10 0.05 MEG 048 +048 0.00 0.52 0.10 0.05 MEG 049 +049 0.02 0.40 0.10 0.05 MEG 050 +050 0.07 0.30 0.10 0.05 MEG 051 +051 0.12 0.20 0.10 0.05 MEG 052 +052 0.05 0.53 0.10 0.05 MEG 053 +053 0.07 0.42 0.10 0.05 MEG 054 +054 0.16 0.24 0.10 0.05 MEG 055 +055 0.10 0.56 0.10 0.05 MEG 056 +056 0.15 0.37 0.10 0.05 MEG 057 +057 0.16 0.56 0.10 0.05 MEG 058 +058 0.17 0.48 0.10 0.05 MEG 059 +059 0.20 0.40 0.10 0.05 MEG 060 +060 0.21 0.48 0.10 0.05 MEG 061 +061 0.28 0.34 0.10 0.05 MEG 062 +062 0.24 0.61 0.10 0.05 MEG 063 +063 0.30 0.39 0.10 0.05 MEG 064 +064 0.67 0.93 0.10 0.05 MEG 065 +065 0.62 0.96 0.10 0.05 MEG 066 +066 0.56 0.98 0.10 0.05 MEG 067 +067 0.50 0.99 0.10 0.05 MEG 068 +068 0.60 0.86 0.10 0.05 MEG 069 +069 0.56 0.93 0.10 0.05 MEG 070 +070 0.71 0.72 0.10 0.05 MEG 071 +071 0.65 0.83 0.10 0.05 MEG 072 +072 0.56 0.88 0.10 0.05 MEG 073 +073 0.65 0.76 0.10 0.05 MEG 074 +074 0.56 0.83 0.10 0.05 MEG 075 +075 0.64 0.66 0.10 0.05 MEG 076 +076 0.62 0.71 0.10 0.05 MEG 077 +077 0.53 0.78 0.10 0.05 MEG 078 +078 0.57 0.68 0.10 0.05 MEG 079 +079 0.53 0.72 0.10 0.05 MEG 080 +080 0.50 0.33 0.10 0.05 MEG 081 +081 0.55 0.34 0.10 0.05 MEG 082 +082 0.60 0.36 0.10 0.05 MEG 083 +083 0.69 0.44 0.10 0.05 MEG 084 +084 0.72 0.57 0.10 0.05 MEG 085 +085 0.61 0.42 0.10 0.05 MEG 086 +086 0.67 0.53 0.10 0.05 MEG 087 +087 0.69 0.61 0.10 0.05 MEG 088 +088 0.56 0.45 0.10 0.05 MEG 089 +089 0.60 0.48 0.10 0.05 MEG 090 +090 0.64 0.59 0.10 0.05 MEG 091 +091 0.56 0.51 0.10 0.05 MEG 092 +092 0.59 0.55 0.10 0.05 MEG 093 +093 0.51 0.55 0.10 0.05 MEG 094 +094 0.54 0.58 0.10 0.05 MEG 095 +095 0.54 0.64 0.10 0.05 MEG 096 +096 1.00 0.69 0.10 0.05 MEG 097 +097 0.97 0.81 0.10 0.05 MEG 098 +098 0.93 0.92 0.10 0.05 MEG 099 +099 0.87 1.00 0.10 0.05 MEG 100 +100 0.93 0.80 0.10 0.05 MEG 101 +101 0.83 0.97 0.10 0.05 MEG 102 +102 0.89 0.68 0.10 0.05 MEG 103 +103 0.84 0.87 0.10 0.05 MEG 104 +104 0.79 0.94 0.10 0.05 MEG 105 +105 0.85 0.68 0.10 0.05 MEG 106 +106 0.83 0.76 0.10 0.05 MEG 107 +107 0.76 0.91 0.10 0.05 MEG 108 +108 0.74 0.76 0.10 0.05 MEG 109 +109 0.76 0.81 0.10 0.05 MEG 110 +110 0.76 0.69 0.10 0.05 MEG 111 +111 0.71 0.83 0.10 0.05 MEG 112 +112 0.88 0.22 0.10 0.05 MEG 113 +113 0.94 0.32 0.10 0.05 MEG 114 +114 0.98 0.42 0.10 0.05 MEG 115 +115 1.00 0.54 0.10 0.05 MEG 116 +116 0.84 0.26 0.10 0.05 MEG 117 +117 0.93 0.45 0.10 0.05 MEG 118 +118 0.95 0.56 0.10 0.05 MEG 119 +119 0.81 0.30 0.10 0.05 MEG 120 +120 0.85 0.38 0.10 0.05 MEG 121 +121 0.81 0.41 0.10 0.05 MEG 122 +122 0.83 0.49 0.10 0.05 MEG 123 +123 0.85 0.58 0.10 0.05 MEG 124 +124 0.73 0.35 0.10 0.05 MEG 125 +125 0.79 0.49 0.10 0.05 MEG 126 +126 0.74 0.46 0.10 0.05 MEG 127 +127 0.77 0.61 0.10 0.05 MEG 128 +128 0.20 0.12 0.10 0.05 MEG 129 +129 0.37 0.02 0.10 0.05 MEG 130 +130 0.46 0.00 0.10 0.05 MEG 131 +131 0.30 0.11 0.10 0.05 MEG 132 +132 0.47 0.06 0.10 0.05 MEG 133 +133 0.25 0.21 0.10 0.05 MEG 134 +134 0.32 0.17 0.10 0.05 MEG 135 +135 0.39 0.13 0.10 0.05 MEG 136 +136 0.29 0.26 0.10 0.05 MEG 137 +137 0.41 0.19 0.10 0.05 MEG 138 +138 0.47 0.18 0.10 0.05 MEG 139 +139 0.39 0.26 0.10 0.05 MEG 140 +140 0.50 0.22 0.10 0.05 MEG 141 +141 0.33 0.29 0.10 0.05 MEG 142 +142 0.45 0.29 0.10 0.05 MEG 143 +143 0.50 0.28 0.10 0.05 MEG 144 +144 0.65 0.03 0.10 0.05 MEG 145 +145 0.82 0.13 0.10 0.05 MEG 146 +146 0.55 0.06 0.10 0.05 MEG 147 +147 0.71 0.12 0.10 0.05 MEG 148 +148 0.62 0.14 0.10 0.05 MEG 149 +149 0.69 0.18 0.10 0.05 MEG 150 +150 0.76 0.23 0.10 0.05 MEG 151 +151 0.54 0.18 0.10 0.05 MEG 152 +152 0.61 0.20 0.10 0.05 MEG 153 +153 0.73 0.27 0.10 0.05 MEG 154 +154 0.63 0.25 0.10 0.05 MEG 155 +155 0.56 0.28 0.10 0.05 MEG 156 +156 0.67 0.35 0.10 0.05 MEG 157 diff --git a/python/libs/mne/channels/data/layouts/KIT-UMD-3.lout b/python/libs/mne/channels/data/layouts/KIT-UMD-3.lout new file mode 100644 index 0000000..72cd69f --- /dev/null +++ b/python/libs/mne/channels/data/layouts/KIT-UMD-3.lout @@ -0,0 +1,158 @@ + -25.00 28.00 -21.35 23.75 +000 -23.42 20.48 3.20 2.40 MEG 001 +001 -22.32 15.16 3.20 2.40 MEG 002 +002 -24.20 10.24 3.20 2.40 MEG 003 +003 -25.00 5.27 3.20 2.40 MEG 004 +004 -24.75 -0.21 3.20 2.40 MEG 005 +005 -23.41 -5.22 3.20 2.40 MEG 006 +006 -22.35 -11.37 3.20 2.40 MEG 007 +007 -14.06 -15.64 3.20 2.40 MEG 008 +008 -15.12 -18.15 3.20 2.40 MEG 009 +009 -11.26 -20.73 3.20 2.40 MEG 010 +010 -6.28 -20.94 3.20 2.40 MEG 011 +011 -2.04 -21.35 3.20 2.40 MEG 012 +012 2.04 -21.35 3.20 2.40 MEG 013 +013 6.28 -20.94 3.20 2.40 MEG 014 +014 11.26 -20.73 3.20 2.40 MEG 015 +015 15.12 -18.15 3.20 2.40 MEG 016 +016 19.41 -14.06 3.20 2.40 MEG 017 +017 22.35 -11.37 3.20 2.40 MEG 018 +018 24.06 -3.70 3.20 2.40 MEG 019 +019 24.23 1.80 3.20 2.40 MEG 020 +020 24.80 5.19 3.20 2.40 MEG 021 +021 22.03 13.42 3.20 2.40 MEG 022 +022 21.58 16.68 3.20 2.40 MEG 023 +023 23.42 20.48 3.20 2.40 MEG 024 +024 20.15 19.33 3.20 2.40 MEG 025 +025 7.46 -2.58 3.20 2.40 MEG 026 +026 22.86 7.70 3.20 2.40 MEG 027 +027 20.76 2.91 3.20 2.40 MEG 028 +028 19.70 -8.80 3.20 2.40 MEG 029 +029 3.41 -5.91 3.20 2.40 MEG 030 +030 14.06 -15.64 3.20 2.40 MEG 031 +031 0.12 -5.34 3.20 2.40 MEG 032 +032 1.80 -18.87 3.20 2.40 MEG 033 +033 -1.80 -18.87 3.20 2.40 MEG 034 +034 -10.12 -18.16 3.20 2.40 MEG 035 +035 -3.41 -5.91 3.20 2.40 MEG 036 +036 -18.35 -13.97 3.20 2.40 MEG 037 +037 -19.70 -8.80 3.20 2.40 MEG 038 +038 -20.76 2.91 3.20 2.40 MEG 039 +039 -22.86 7.70 3.20 2.40 MEG 040 +040 -7.46 -2.58 3.20 2.40 MEG 041 +041 -20.15 19.33 3.20 2.40 MEG 042 +042 -16.84 18.53 3.20 2.40 MEG 043 +043 -18.55 14.46 3.20 2.40 MEG 044 +044 -20.31 10.64 3.20 2.40 MEG 045 +045 -10.05 0.17 3.20 2.40 MEG 046 +046 -20.62 -2.66 3.20 2.40 MEG 047 +047 -17.20 -6.26 3.20 2.40 MEG 048 +048 -16.21 -11.50 3.20 2.40 MEG 049 +049 -8.92 -15.60 3.20 2.40 MEG 050 +050 -5.79 -18.42 3.20 2.40 MEG 051 +051 -1.62 -16.14 3.20 2.40 MEG 052 +052 -8.25 6.10 3.20 2.40 MEG 053 +053 5.79 -18.42 3.20 2.40 MEG 054 +054 8.92 -15.60 3.20 2.40 MEG 055 +055 16.21 -11.50 3.20 2.40 MEG 056 +056 17.20 -6.26 3.20 2.40 MEG 057 +057 20.62 -2.66 3.20 2.40 MEG 058 +058 -6.11 13.61 3.20 2.40 MEG 059 +059 20.31 10.64 3.20 2.40 MEG 060 +060 17.58 15.92 3.20 2.40 MEG 061 +061 16.84 18.53 3.20 2.40 MEG 062 +062 13.49 18.47 3.20 2.40 MEG 063 +063 15.28 13.32 3.20 2.40 MEG 064 +064 -4.11 11.13 3.20 2.40 MEG 065 +065 19.39 7.54 3.20 2.40 MEG 066 +066 17.50 3.47 3.20 2.40 MEG 067 +067 -6.54 8.57 3.20 2.40 MEG 068 +068 11.44 -8.04 3.20 2.40 MEG 069 +069 12.41 -13.14 3.20 2.40 MEG 070 +070 8.16 -13.13 3.20 2.40 MEG 071 +071 -7.60 2.77 3.20 2.40 MEG 072 +072 1.62 -16.14 3.20 2.40 MEG 073 +073 -6.80 0.14 3.20 2.40 MEG 074 +074 -5.40 -15.93 3.20 2.40 MEG 075 +075 -8.16 -13.13 3.20 2.40 MEG 076 +076 -12.41 -13.14 3.20 2.40 MEG 077 +077 -14.81 -8.97 3.20 2.40 MEG 078 +078 -3.23 -2.94 3.20 2.40 MEG 079 +079 -17.50 3.47 3.20 2.40 MEG 080 +080 -19.39 7.54 3.20 2.40 MEG 081 +081 4.03 -2.84 3.20 2.40 MEG 082 +082 -15.28 13.32 3.20 2.40 MEG 083 +083 -13.49 18.47 3.20 2.40 MEG 084 +084 -12.29 15.99 3.20 2.40 MEG 085 +085 -16.74 10.63 3.20 2.40 MEG 086 +086 6.80 0.14 3.20 2.40 MEG 087 +087 -17.30 -2.88 3.20 2.40 MEG 088 +088 -13.99 -4.86 3.20 2.40 MEG 089 +089 11.58 6.13 3.20 2.40 MEG 090 +090 -11.44 -8.04 3.20 2.40 MEG 091 +091 -3.30 -13.45 3.20 2.40 MEG 092 +092 6.54 8.57 3.20 2.40 MEG 093 +093 -9.52 -10.67 3.20 2.40 MEG 094 +094 9.52 -10.67 3.20 2.40 MEG 095 +095 4.11 11.13 3.20 2.40 MEG 096 +096 13.99 -4.86 3.20 2.40 MEG 097 +097 18.10 -0.17 3.20 2.40 MEG 098 +098 0.74 11.38 3.20 2.40 MEG 099 +099 16.74 10.63 3.20 2.40 MEG 100 +100 12.29 15.99 3.20 2.40 MEG 101 +101 10.11 18.86 3.20 2.40 MEG 102 +102 6.83 19.80 3.20 2.40 MEG 103 +103 3.48 21.35 3.20 2.40 MEG 104 +104 0.00 21.35 3.20 2.40 MEG 105 +105 -3.48 21.35 3.20 2.40 MEG 106 +106 -6.83 19.80 3.20 2.40 MEG 107 +107 -10.11 18.86 3.20 2.40 MEG 108 +108 -12.03 13.52 3.20 2.40 MEG 109 +109 -1.63 8.64 3.20 2.40 MEG 110 +110 -3.36 18.88 3.20 2.40 MEG 111 +111 -0.02 18.88 3.20 2.40 MEG 112 +112 3.36 18.88 3.20 2.40 MEG 113 +113 1.63 8.64 3.20 2.40 MEG 114 +114 9.01 16.34 3.20 2.40 MEG 115 +115 4.97 5.29 3.20 2.40 MEG 116 +116 13.28 10.76 3.20 2.40 MEG 117 +117 15.78 7.58 3.20 2.40 MEG 118 +118 14.24 3.60 3.20 2.40 MEG 119 +119 14.69 -0.31 3.20 2.40 MEG 120 +120 3.37 -0.21 3.20 2.40 MEG 121 +121 8.20 -8.14 3.20 2.40 MEG 122 +122 6.11 -10.67 3.20 2.40 MEG 123 +123 2.77 -10.98 3.20 2.40 MEG 124 +124 0.10 -13.43 3.20 2.40 MEG 125 +125 0.02 -0.57 3.20 2.40 MEG 126 +126 -2.77 -10.98 3.20 2.40 MEG 127 +127 -8.20 -8.14 3.20 2.40 MEG 128 +128 -3.37 -0.21 3.20 2.40 MEG 129 +129 -14.69 -0.31 3.20 2.40 MEG 130 +130 -14.24 3.60 3.20 2.40 MEG 131 +131 -15.78 7.58 3.20 2.40 MEG 132 +132 -13.28 10.76 3.20 2.40 MEG 133 +133 -4.97 5.29 3.20 2.40 MEG 134 +134 -9.46 11.02 3.20 2.40 MEG 135 +135 -12.21 7.84 3.20 2.40 MEG 136 +136 -10.93 3.58 3.20 2.40 MEG 137 +137 -10.71 -3.82 3.20 2.40 MEG 138 +138 -6.89 -5.51 3.20 2.40 MEG 139 +139 -1.66 5.24 3.20 2.40 MEG 140 +140 -2.40 -8.39 3.20 2.40 MEG 141 +141 2.40 -8.39 3.20 2.40 MEG 142 +142 -4.29 2.66 3.20 2.40 MEG 143 +143 6.89 -5.51 3.20 2.40 MEG 144 +144 10.71 -3.82 3.20 2.40 MEG 145 +145 10.93 3.58 3.20 2.40 MEG 146 +146 4.29 2.66 3.20 2.40 MEG 147 +147 9.46 11.02 3.20 2.40 MEG 148 +148 5.70 16.39 3.20 2.40 MEG 149 +149 1.66 5.24 3.20 2.40 MEG 150 +150 -2.37 16.38 3.20 2.40 MEG 151 +151 -5.70 16.39 3.20 2.40 MEG 152 +152 8.25 6.10 3.20 2.40 MEG 153 +153 -0.58 13.96 3.20 2.40 MEG 154 +154 2.81 13.89 3.20 2.40 MEG 155 +155 6.11 13.61 3.20 2.40 MEG 156 +156 2.37 16.38 3.20 2.40 MEG 157 diff --git a/python/libs/mne/channels/data/layouts/Neuromag_122.lout b/python/libs/mne/channels/data/layouts/Neuromag_122.lout new file mode 100644 index 0000000..c97746a --- /dev/null +++ b/python/libs/mne/channels/data/layouts/Neuromag_122.lout @@ -0,0 +1,123 @@ +-3 28 -17 15 +1 25.381295 -0.771781 2 1.5 MEG 001 +2 25.381295 0.727697 2 1.5 MEG 002 +3 22.715372 -0.733246 2 1.5 MEG 003 +4 22.715372 0.766753 2 1.5 MEG 004 +5 19.911143 -0.608748 2 1.5 MEG 005 +6 19.911143 0.891252 2 1.5 MEG 006 +7 24.481102 4.347077 2 1.5 MEG 007 +8 24.481102 5.847077 2 1.5 MEG 008 +9 21.9673 3.613717 2 1.5 MEG 009 +10 21.9673 5.113717 2 1.5 MEG 010 +11 19.345958 3.110359 2 1.5 MEG 011 +12 19.345958 4.610058 2 1.5 MEG 012 +13 16.706588 2.875744 2 1.5 MEG 013 +14 16.706588 4.375643 2 1.5 MEG 014 +15 14.09047 2.753697 2 1.5 MEG 015 +16 14.09047 4.253697 2 1.5 MEG 016 +17 19.559995 7.243332 2 1.5 MEG 017 +18 19.559995 8.743163 2 1.5 MEG 018 +19 16.942979 6.237191 2 1.5 MEG 019 +20 16.942979 7.737225 2 1.5 MEG 020 +21 14.204774 5.792745 2 1.5 MEG 021 +22 14.204774 7.292858 2 1.5 MEG 022 +23 11.5 5.70429 2 1.5 MEG 023 +24 11.5 7.204446 2 1.5 MEG 024 +25 16.662514 9.87843 2 1.5 MEG 025 +26 16.662514 11.37843 2 1.5 MEG 026 +27 13.466339 11.859999 2 1.5 MEG 027 +28 13.466339 13.359952 2 1.5 MEG 028 +29 13.450371 8.807222 2 1.5 MEG 029 +30 13.450371 10.307518 2 1.5 MEG 030 +31 9.533661 11.859999 2 1.5 MEG 031 +32 9.533661 13.359952 2 1.5 MEG 032 +33 9.54963 8.807222 2 1.5 MEG 033 +34 9.54963 10.307518 2 1.5 MEG 034 +35 6.3374865 9.87843 2 1.5 MEG 035 +36 6.337486 11.37843 2 1.5 MEG 036 +37 3.440005 7.243332 2 1.5 MEG 037 +38 3.440005 8.743163 2 1.5 MEG 038 +39 6.057021 6.237192 2 1.5 MEG 039 +40 6.057021 7.737225 2 1.5 MEG 040 +41 8.795226 5.792745 2 1.5 MEG 041 +42 8.795226 7.292858 2 1.5 MEG 042 +43 -1.481102 4.347078 2 1.5 MEG 043 +44 -1.481102 5.847078 2 1.5 MEG 044 +45 1.0327 3.613581 2 1.5 MEG 045 +46 1.0327 5.113717 2 1.5 MEG 046 +47 3.654042 3.11036 2 1.5 MEG 047 +48 3.654042 4.610058 2 1.5 MEG 048 +49 6.293412 2.875744 2 1.5 MEG 049 +50 6.293412 4.375643 2 1.5 MEG 050 +51 8.90953 2.753697 2 1.5 MEG 051 +52 8.90953 4.253697 2 1.5 MEG 052 +53 11.5 2.731327 2 1.5 MEG 053 +54 11.5 4.231464 2 1.5 MEG 054 +55 -2.381295 -0.771781 2 1.5 MEG 055 +56 -2.381295 0.727697 2 1.5 MEG 056 +57 0.284628 -0.733246 2 1.5 MEG 057 +58 0.284628 0.766753 2 1.5 MEG 058 +59 3.088857 -0.608748 2 1.5 MEG 059 +60 3.088857 0.891252 2 1.5 MEG 060 +61 5.895393 -0.521429 2 1.5 MEG 061 +62 5.895393 0.978571 2 1.5 MEG 062 +63 8.696664 -0.481488 2 1.5 MEG 063 +64 8.696664 1.018793 2 1.5 MEG 064 +65 11.5 -0.46314 2 1.5 MEG 065 +66 11.5 1.036853 2 1.5 MEG 066 +67 -1.590015 -6.177621 2 1.5 MEG 067 +68 -1.590015 -4.677286 2 1.5 MEG 068 +69 0.893853 -5.313065 2 1.5 MEG 069 +70 0.893853 -3.813065 2 1.5 MEG 070 +71 3.788197 -4.494587 2 1.5 MEG 071 +72 3.788197 -2.994811 2 1.5 MEG 072 +73 6.749538 -3.95458 2 1.5 MEG 073 +74 6.749538 -2.454261 2 1.5 MEG 074 +75 1.096738 -10.894836 2 1.5 MEG 075 +76 1.096738 -9.394836 2 1.5 MEG 076 +77 3.402274 -9.346367 2 1.5 MEG 077 +78 3.402274 -7.846579 2 1.5 MEG 078 +79 6.182132 -8.131419 2 1.5 MEG 079 +80 6.182132 -6.631304 2 1.5 MEG 080 +81 6.102499 -15.409053 2 1.5 MEG 081 +82 6.102499 -13.908834 2 1.5 MEG 082 +83 6.914234 -12.406122 2 1.5 MEG 083 +84 6.914234 -10.906034 2 1.5 MEG 084 +85 9.307503 -10.644013 2 1.5 MEG 085 +86 9.307503 -9.143762 2 1.5 MEG 086 +87 9.660984 -7.199067 2 1.5 MEG 087 +88 9.660984 -5.699067 2 1.5 MEG 088 +89 9.807536 -3.822648 2 1.5 MEG 089 +90 9.807536 -2.322552 2 1.5 MEG 090 +91 11.5 -16.259918 2 1.5 MEG 091 +92 11.5 -14.759918 2 1.5 MEG 092 +93 11.5 -13.097164 2 1.5 MEG 093 +94 11.5 -11.597439 2 1.5 MEG 094 +95 13.692497 -10.644013 2 1.5 MEG 095 +96 13.692497 -9.143762 2 1.5 MEG 096 +97 13.339016 -7.199067 2 1.5 MEG 097 +98 13.339016 -5.699067 2 1.5 MEG 098 +99 13.192464 -3.822648 2 1.5 MEG 099 +100 13.192464 -2.322552 2 1.5 MEG 100 +101 16.897501 -15.409053 2 1.5 MEG 101 +102 16.897501 -13.908834 2 1.5 MEG 102 +103 16.085766 -12.406122 2 1.5 MEG 103 +104 16.085766 -10.906034 2 1.5 MEG 104 +105 21.903262 -10.894836 2 1.5 MEG 105 +106 21.903262 -9.394836 2 1.5 MEG 106 +107 19.597726 -9.346367 2 1.5 MEG 107 +108 19.597726 -7.846579 2 1.5 MEG 108 +109 16.817868 -8.131419 2 1.5 MEG 109 +110 16.817868 -6.631304 2 1.5 MEG 110 +111 24.590015 -6.177621 2 1.5 MEG 111 +112 24.590015 -4.677286 2 1.5 MEG 112 +113 22.106147 -5.313065 2 1.5 MEG 113 +114 22.106147 -3.813065 2 1.5 MEG 114 +115 19.211802 -4.494588 2 1.5 MEG 115 +116 19.211802 -2.994811 2 1.5 MEG 116 +117 16.250462 -3.95458 2 1.5 MEG 117 +118 16.250462 -2.454261 2 1.5 MEG 118 +119 17.104607 -0.521429 2 1.5 MEG 119 +120 17.104607 0.978571 2 1.5 MEG 120 +121 14.303336 -0.481488 2 1.5 MEG 121 +122 14.303336 1.018792 2 1.5 MEG 122 diff --git a/python/libs/mne/channels/data/layouts/Vectorview-all.lout b/python/libs/mne/channels/data/layouts/Vectorview-all.lout new file mode 100644 index 0000000..b6395fb --- /dev/null +++ b/python/libs/mne/channels/data/layouts/Vectorview-all.lout @@ -0,0 +1,307 @@ +-85.000000 90.000000 -83.000000 75.000000 +113 -73.416206 33.416687 6.000000 5.000000 MEG 0113 +112 -73.416206 38.416687 6.000000 5.000000 MEG 0112 +111 -67.416206 35.916687 6.000000 5.000000 MEG 0111 +122 -59.602242 38.489067 6.000000 5.000000 MEG 0122 +123 -59.602242 43.489067 6.000000 5.000000 MEG 0123 +121 -53.602242 40.989067 6.000000 5.000000 MEG 0121 +132 -68.018288 18.676970 6.000000 5.000000 MEG 0132 +133 -68.018288 23.676970 6.000000 5.000000 MEG 0133 +131 -62.018288 21.176970 6.000000 5.000000 MEG 0131 +143 -80.582848 8.095787 6.000000 5.000000 MEG 0143 +142 -80.582848 13.095787 6.000000 5.000000 MEG 0142 +141 -74.582848 10.595787 6.000000 5.000000 MEG 0141 +213 -56.595154 17.019251 6.000000 5.000000 MEG 0213 +212 -56.595154 22.019251 6.000000 5.000000 MEG 0212 +211 -50.595154 19.519251 6.000000 5.000000 MEG 0211 +222 -44.599728 17.543873 6.000000 5.000000 MEG 0222 +223 -44.599728 22.543873 6.000000 5.000000 MEG 0223 +221 -38.599728 20.043873 6.000000 5.000000 MEG 0221 +232 -47.416420 -0.216784 6.000000 5.000000 MEG 0232 +233 -47.416420 4.783216 6.000000 5.000000 MEG 0233 +231 -41.416420 2.283216 6.000000 5.000000 MEG 0231 +243 -59.280643 -2.761772 6.000000 5.000000 MEG 0243 +242 -59.280643 2.238228 6.000000 5.000000 MEG 0242 +241 -53.280643 -0.261772 6.000000 5.000000 MEG 0241 +313 -39.790501 47.430138 6.000000 5.000000 MEG 0313 +312 -39.790501 52.430138 6.000000 5.000000 MEG 0312 +311 -33.790501 49.930138 6.000000 5.000000 MEG 0311 +322 -38.014336 32.768585 6.000000 5.000000 MEG 0322 +323 -38.014336 37.768585 6.000000 5.000000 MEG 0323 +321 -32.014336 35.268585 6.000000 5.000000 MEG 0321 +333 -27.679966 28.868065 6.000000 5.000000 MEG 0333 +332 -27.679966 33.868065 6.000000 5.000000 MEG 0332 +331 -21.679966 31.368065 6.000000 5.000000 MEG 0331 +343 -49.684467 34.078434 6.000000 5.000000 MEG 0343 +342 -49.684467 39.078434 6.000000 5.000000 MEG 0342 +341 -43.684467 36.578434 6.000000 5.000000 MEG 0341 +413 -32.997990 15.607347 6.000000 5.000000 MEG 0413 +412 -32.997990 20.607347 6.000000 5.000000 MEG 0412 +411 -26.997990 18.107347 6.000000 5.000000 MEG 0411 +422 -21.084751 13.953575 6.000000 5.000000 MEG 0422 +423 -21.084751 18.953575 6.000000 5.000000 MEG 0423 +421 -15.084751 16.453575 6.000000 5.000000 MEG 0421 +432 -21.930935 -0.085500 6.000000 5.000000 MEG 0432 +433 -21.930935 4.914500 6.000000 5.000000 MEG 0433 +431 -15.930935 2.414500 6.000000 5.000000 MEG 0431 +443 -34.824663 0.362587 6.000000 5.000000 MEG 0443 +442 -34.824663 5.362587 6.000000 5.000000 MEG 0442 +441 -28.824663 2.862587 6.000000 5.000000 MEG 0441 +513 -27.861498 55.439636 6.000000 5.000000 MEG 0513 +512 -27.861498 60.439636 6.000000 5.000000 MEG 0512 +511 -21.861498 57.939636 6.000000 5.000000 MEG 0511 +523 -15.506709 59.619865 6.000000 5.000000 MEG 0523 +522 -15.506709 64.619865 6.000000 5.000000 MEG 0522 +521 -9.506709 62.119865 6.000000 5.000000 MEG 0521 +532 -14.616095 49.308380 6.000000 5.000000 MEG 0532 +533 -14.616095 54.308380 6.000000 5.000000 MEG 0533 +531 -8.616095 51.808380 6.000000 5.000000 MEG 0531 +542 -27.240477 43.863430 6.000000 5.000000 MEG 0542 +543 -27.240477 48.863430 6.000000 5.000000 MEG 0543 +541 -21.240477 46.363430 6.000000 5.000000 MEG 0541 +613 -14.782405 38.147827 6.000000 5.000000 MEG 0613 +612 -14.782405 43.147827 6.000000 5.000000 MEG 0612 +611 -8.782405 40.647827 6.000000 5.000000 MEG 0611 +622 -2.967276 27.260933 6.000000 5.000000 MEG 0622 +623 -2.967276 32.260933 6.000000 5.000000 MEG 0623 +621 3.032724 29.760933 6.000000 5.000000 MEG 0621 +633 -9.094766 14.700909 6.000000 5.000000 MEG 0633 +632 -9.094766 19.700909 6.000000 5.000000 MEG 0632 +631 -3.094766 17.200909 6.000000 5.000000 MEG 0631 +642 -15.199021 26.631405 6.000000 5.000000 MEG 0642 +643 -15.199021 31.631405 6.000000 5.000000 MEG 0643 +641 -9.199021 29.131405 6.000000 5.000000 MEG 0641 +713 -9.246834 1.693846 6.000000 5.000000 MEG 0713 +712 -9.246834 6.693846 6.000000 5.000000 MEG 0712 +711 -3.246834 4.193846 6.000000 5.000000 MEG 0711 +723 3.314525 1.573887 6.000000 5.000000 MEG 0723 +722 3.314525 6.573887 6.000000 5.000000 MEG 0722 +721 9.314525 4.073887 6.000000 5.000000 MEG 0721 +733 3.387173 -10.588106 6.000000 5.000000 MEG 0733 +732 3.387173 -5.588106 6.000000 5.000000 MEG 0732 +731 9.387173 -8.088106 6.000000 5.000000 MEG 0731 +743 -9.422897 -10.519942 6.000000 5.000000 MEG 0743 +742 -9.422897 -5.519942 6.000000 5.000000 MEG 0742 +741 -3.422897 -8.019942 6.000000 5.000000 MEG 0741 +813 -2.962408 61.007698 6.000000 5.000000 MEG 0813 +812 -2.962408 66.007698 6.000000 5.000000 MEG 0812 +811 3.037592 63.507698 6.000000 5.000000 MEG 0811 +822 -2.965545 50.641838 6.000000 5.000000 MEG 0822 +823 -2.965545 55.641838 6.000000 5.000000 MEG 0823 +821 3.034455 53.141838 6.000000 5.000000 MEG 0821 +913 9.504830 59.655254 6.000000 5.000000 MEG 0913 +912 9.504830 64.655254 6.000000 5.000000 MEG 0912 +911 15.504830 62.155254 6.000000 5.000000 MEG 0911 +923 21.967310 55.408710 6.000000 5.000000 MEG 0923 +922 21.967310 60.408710 6.000000 5.000000 MEG 0922 +921 27.967310 57.908710 6.000000 5.000000 MEG 0921 +932 21.254196 43.889683 6.000000 5.000000 MEG 0932 +933 21.254196 48.889683 6.000000 5.000000 MEG 0933 +931 27.254196 46.389683 6.000000 5.000000 MEG 0931 +942 8.661931 49.358044 6.000000 5.000000 MEG 0942 +943 8.661931 54.358044 6.000000 5.000000 MEG 0943 +941 14.661931 51.858044 6.000000 5.000000 MEG 0941 +1013 -2.967087 39.669956 6.000000 5.000000 MEG 1013 +1012 -2.967087 44.669956 6.000000 5.000000 MEG 1012 +1011 3.032913 42.169956 6.000000 5.000000 MEG 1011 +1023 8.751018 38.154079 6.000000 5.000000 MEG 1023 +1022 8.751018 43.154079 6.000000 5.000000 MEG 1022 +1021 14.751018 40.654079 6.000000 5.000000 MEG 1021 +1032 9.123913 26.648697 6.000000 5.000000 MEG 1032 +1033 9.123913 31.648697 6.000000 5.000000 MEG 1033 +1031 15.123913 29.148697 6.000000 5.000000 MEG 1031 +1043 3.200539 14.795620 6.000000 5.000000 MEG 1043 +1042 3.200539 19.795620 6.000000 5.000000 MEG 1042 +1041 9.200539 17.295620 6.000000 5.000000 MEG 1041 +1112 15.014965 13.912239 6.000000 5.000000 MEG 1112 +1113 15.014965 18.912239 6.000000 5.000000 MEG 1113 +1111 21.014965 16.412239 6.000000 5.000000 MEG 1111 +1123 26.958527 15.562130 6.000000 5.000000 MEG 1123 +1122 26.958527 20.562130 6.000000 5.000000 MEG 1122 +1121 32.958527 18.062130 6.000000 5.000000 MEG 1121 +1133 28.757563 0.227141 6.000000 5.000000 MEG 1133 +1132 28.757563 5.227141 6.000000 5.000000 MEG 1132 +1131 34.757563 2.727141 6.000000 5.000000 MEG 1131 +1142 15.882982 0.037700 6.000000 5.000000 MEG 1142 +1143 15.882982 5.037700 6.000000 5.000000 MEG 1143 +1141 21.882982 2.537700 6.000000 5.000000 MEG 1141 +1213 33.958897 47.388790 6.000000 5.000000 MEG 1213 +1212 33.958897 52.388790 6.000000 5.000000 MEG 1212 +1211 39.958897 49.888790 6.000000 5.000000 MEG 1211 +1223 43.923473 33.914738 6.000000 5.000000 MEG 1223 +1222 43.923473 38.914738 6.000000 5.000000 MEG 1222 +1221 49.923473 36.414738 6.000000 5.000000 MEG 1221 +1232 32.014336 32.768585 6.000000 5.000000 MEG 1232 +1233 32.014336 37.768585 6.000000 5.000000 MEG 1233 +1231 38.014336 35.268585 6.000000 5.000000 MEG 1231 +1243 21.600079 28.898149 6.000000 5.000000 MEG 1243 +1242 21.600079 33.898149 6.000000 5.000000 MEG 1242 +1241 27.600079 31.398149 6.000000 5.000000 MEG 1241 +1312 38.599728 17.543867 6.000000 5.000000 MEG 1312 +1313 38.599728 22.543867 6.000000 5.000000 MEG 1313 +1311 44.599728 20.043867 6.000000 5.000000 MEG 1311 +1323 50.558392 16.887651 6.000000 5.000000 MEG 1323 +1322 50.558392 21.887651 6.000000 5.000000 MEG 1322 +1321 56.558392 19.387651 6.000000 5.000000 MEG 1321 +1333 53.420483 -2.919475 6.000000 5.000000 MEG 1333 +1332 53.420483 2.080525 6.000000 5.000000 MEG 1332 +1331 59.420483 -0.419475 6.000000 5.000000 MEG 1331 +1342 41.371586 -0.216817 6.000000 5.000000 MEG 1342 +1343 41.371586 4.783183 6.000000 5.000000 MEG 1343 +1341 47.371586 2.283183 6.000000 5.000000 MEG 1341 +1412 53.704369 38.563030 6.000000 5.000000 MEG 1412 +1413 53.704369 43.563030 6.000000 5.000000 MEG 1413 +1411 59.704369 41.063030 6.000000 5.000000 MEG 1411 +1423 67.119286 33.843739 6.000000 5.000000 MEG 1423 +1422 67.119286 38.843739 6.000000 5.000000 MEG 1422 +1421 73.119286 36.343739 6.000000 5.000000 MEG 1421 +1433 74.438919 8.335863 6.000000 5.000000 MEG 1433 +1432 74.438919 13.335863 6.000000 5.000000 MEG 1432 +1431 80.438919 10.835863 6.000000 5.000000 MEG 1431 +1442 61.883209 18.562304 6.000000 5.000000 MEG 1442 +1443 61.883209 23.562304 6.000000 5.000000 MEG 1443 +1441 67.883209 21.062304 6.000000 5.000000 MEG 1441 +1512 -71.298943 -4.707253 6.000000 5.000000 MEG 1512 +1513 -71.298943 0.292747 6.000000 5.000000 MEG 1513 +1511 -65.298943 -2.207253 6.000000 5.000000 MEG 1511 +1522 -67.281609 -25.407852 6.000000 5.000000 MEG 1522 +1523 -67.281609 -20.407852 6.000000 5.000000 MEG 1523 +1521 -61.281609 -22.907852 6.000000 5.000000 MEG 1521 +1533 -71.702820 -40.152336 6.000000 5.000000 MEG 1533 +1532 -71.702820 -35.152336 6.000000 5.000000 MEG 1532 +1531 -65.702820 -37.652336 6.000000 5.000000 MEG 1531 +1543 -79.907913 -17.418098 6.000000 5.000000 MEG 1543 +1542 -79.907913 -12.418098 6.000000 5.000000 MEG 1542 +1541 -73.907913 -14.918098 6.000000 5.000000 MEG 1541 +1613 -56.916454 -20.312164 6.000000 5.000000 MEG 1613 +1612 -56.916454 -15.312164 6.000000 5.000000 MEG 1612 +1611 -50.916454 -17.812164 6.000000 5.000000 MEG 1611 +1622 -45.631779 -16.320436 6.000000 5.000000 MEG 1622 +1623 -45.631779 -11.320436 6.000000 5.000000 MEG 1623 +1621 -39.631779 -13.820436 6.000000 5.000000 MEG 1621 +1632 -37.896103 -30.578358 6.000000 5.000000 MEG 1632 +1633 -37.896103 -25.578358 6.000000 5.000000 MEG 1633 +1631 -31.896103 -28.078358 6.000000 5.000000 MEG 1631 +1643 -48.859089 -36.176094 6.000000 5.000000 MEG 1643 +1642 -48.859089 -31.176094 6.000000 5.000000 MEG 1642 +1641 -42.859089 -33.676094 6.000000 5.000000 MEG 1641 +1713 -56.796040 -59.082275 6.000000 5.000000 MEG 1713 +1712 -56.796040 -54.082275 6.000000 5.000000 MEG 1712 +1711 -50.796040 -56.582275 6.000000 5.000000 MEG 1711 +1722 -57.188797 -44.057373 6.000000 5.000000 MEG 1722 +1723 -57.188797 -39.057373 6.000000 5.000000 MEG 1723 +1721 -51.188797 -41.557373 6.000000 5.000000 MEG 1721 +1732 -41.902962 -58.279526 6.000000 5.000000 MEG 1732 +1733 -41.902962 -53.279526 6.000000 5.000000 MEG 1733 +1731 -35.902962 -55.779526 6.000000 5.000000 MEG 1731 +1743 -37.408134 -72.449036 6.000000 5.000000 MEG 1743 +1742 -37.408134 -67.449036 6.000000 5.000000 MEG 1742 +1741 -31.408134 -69.949036 6.000000 5.000000 MEG 1741 +1813 -33.801163 -13.768716 6.000000 5.000000 MEG 1813 +1812 -33.801163 -8.768716 6.000000 5.000000 MEG 1812 +1811 -27.801163 -11.268716 6.000000 5.000000 MEG 1811 +1822 -21.685101 -12.619589 6.000000 5.000000 MEG 1822 +1823 -21.685101 -7.619589 6.000000 5.000000 MEG 1823 +1821 -15.685101 -10.119589 6.000000 5.000000 MEG 1821 +1832 -9.600111 -22.190945 6.000000 5.000000 MEG 1832 +1833 -9.600111 -17.190945 6.000000 5.000000 MEG 1833 +1831 -3.600111 -19.690945 6.000000 5.000000 MEG 1831 +1843 -24.483526 -26.850609 6.000000 5.000000 MEG 1843 +1842 -24.483526 -21.850609 6.000000 5.000000 MEG 1842 +1841 -18.483526 -24.350609 6.000000 5.000000 MEG 1841 +1912 -25.866816 -40.850040 6.000000 5.000000 MEG 1912 +1913 -25.866816 -35.850040 6.000000 5.000000 MEG 1913 +1911 -19.866816 -38.350040 6.000000 5.000000 MEG 1911 +1923 -20.513481 -56.355225 6.000000 5.000000 MEG 1923 +1922 -20.513481 -51.355225 6.000000 5.000000 MEG 1922 +1921 -14.513481 -53.855225 6.000000 5.000000 MEG 1921 +1932 -23.428471 -67.375893 6.000000 5.000000 MEG 1932 +1933 -23.428471 -62.375893 6.000000 5.000000 MEG 1933 +1931 -17.428471 -64.875893 6.000000 5.000000 MEG 1931 +1943 -36.237587 -48.444530 6.000000 5.000000 MEG 1943 +1942 -36.237587 -43.444530 6.000000 5.000000 MEG 1942 +1941 -30.237587 -45.944530 6.000000 5.000000 MEG 1941 +2013 -10.441930 -34.308243 6.000000 5.000000 MEG 2013 +2012 -10.441930 -29.308243 6.000000 5.000000 MEG 2012 +2011 -4.441930 -31.808243 6.000000 5.000000 MEG 2011 +2023 4.357624 -34.289736 6.000000 5.000000 MEG 2023 +2022 4.357624 -29.289736 6.000000 5.000000 MEG 2022 +2021 10.357624 -31.789736 6.000000 5.000000 MEG 2021 +2032 4.645295 -46.290749 6.000000 5.000000 MEG 2032 +2033 4.645295 -41.290749 6.000000 5.000000 MEG 2033 +2031 10.645295 -43.790749 6.000000 5.000000 MEG 2031 +2042 -10.645079 -46.244335 6.000000 5.000000 MEG 2042 +2043 -10.645079 -41.244335 6.000000 5.000000 MEG 2043 +2041 -4.645079 -43.744335 6.000000 5.000000 MEG 2041 +2113 -3.052351 -58.889515 6.000000 5.000000 MEG 2113 +2112 -3.052351 -53.889515 6.000000 5.000000 MEG 2112 +2111 2.947649 -56.389515 6.000000 5.000000 MEG 2111 +2122 -2.999999 -70.362061 6.000000 5.000000 MEG 2122 +2123 -2.999999 -65.362061 6.000000 5.000000 MEG 2123 +2121 3.000001 -67.862061 6.000000 5.000000 MEG 2121 +2133 8.918572 -79.441826 6.000000 5.000000 MEG 2133 +2132 8.918572 -74.441826 6.000000 5.000000 MEG 2132 +2131 14.918572 -76.941826 6.000000 5.000000 MEG 2131 +2143 -14.987089 -79.428932 6.000000 5.000000 MEG 2143 +2142 -14.987089 -74.428932 6.000000 5.000000 MEG 2142 +2141 -8.987089 -76.928932 6.000000 5.000000 MEG 2141 +2212 15.641460 -12.579389 6.000000 5.000000 MEG 2212 +2213 15.641460 -7.579389 6.000000 5.000000 MEG 2213 +2211 21.641460 -10.079389 6.000000 5.000000 MEG 2211 +2223 27.786499 -13.669980 6.000000 5.000000 MEG 2223 +2222 27.786499 -8.669980 6.000000 5.000000 MEG 2222 +2221 33.786499 -11.169980 6.000000 5.000000 MEG 2221 +2233 18.501518 -26.949615 6.000000 5.000000 MEG 2233 +2232 18.501518 -21.949615 6.000000 5.000000 MEG 2232 +2231 24.501518 -24.449615 6.000000 5.000000 MEG 2231 +2242 3.641699 -22.206125 6.000000 5.000000 MEG 2242 +2243 3.641699 -17.206125 6.000000 5.000000 MEG 2243 +2241 9.641699 -19.706125 6.000000 5.000000 MEG 2241 +2312 19.852789 -40.871220 6.000000 5.000000 MEG 2312 +2313 19.852789 -35.871220 6.000000 5.000000 MEG 2313 +2311 25.852789 -38.371220 6.000000 5.000000 MEG 2311 +2323 30.078903 -48.474960 6.000000 5.000000 MEG 2323 +2322 30.078903 -43.474960 6.000000 5.000000 MEG 2322 +2321 36.078903 -45.974960 6.000000 5.000000 MEG 2321 +2332 17.363274 -67.365387 6.000000 5.000000 MEG 2332 +2333 17.363274 -62.365387 6.000000 5.000000 MEG 2333 +2331 23.363274 -64.865387 6.000000 5.000000 MEG 2331 +2343 14.329920 -56.380260 6.000000 5.000000 MEG 2343 +2342 14.329920 -51.380260 6.000000 5.000000 MEG 2342 +2341 20.329920 -53.880260 6.000000 5.000000 MEG 2341 +2412 39.644810 -16.175139 6.000000 5.000000 MEG 2412 +2413 39.644810 -11.175139 6.000000 5.000000 MEG 2413 +2411 45.644810 -13.675139 6.000000 5.000000 MEG 2411 +2423 50.812263 -20.401899 6.000000 5.000000 MEG 2423 +2422 50.812263 -15.401899 6.000000 5.000000 MEG 2422 +2421 56.812263 -17.901899 6.000000 5.000000 MEG 2421 +2433 42.694180 -36.278580 6.000000 5.000000 MEG 2433 +2432 42.694180 -31.278580 6.000000 5.000000 MEG 2432 +2431 48.694180 -33.778580 6.000000 5.000000 MEG 2431 +2442 31.896111 -30.578348 6.000000 5.000000 MEG 2442 +2443 31.896111 -25.578348 6.000000 5.000000 MEG 2443 +2441 37.896111 -28.078348 6.000000 5.000000 MEG 2441 +2512 35.812634 -58.300888 6.000000 5.000000 MEG 2512 +2513 35.812634 -53.300888 6.000000 5.000000 MEG 2513 +2511 41.812634 -55.800888 6.000000 5.000000 MEG 2511 +2522 51.171906 -43.981274 6.000000 5.000000 MEG 2522 +2523 51.171906 -38.981274 6.000000 5.000000 MEG 2523 +2521 57.171906 -41.481274 6.000000 5.000000 MEG 2521 +2533 50.704624 -59.132656 6.000000 5.000000 MEG 2533 +2532 50.704624 -54.132656 6.000000 5.000000 MEG 2532 +2531 56.704624 -56.632656 6.000000 5.000000 MEG 2531 +2543 31.320171 -72.484848 6.000000 5.000000 MEG 2543 +2542 31.320171 -67.484848 6.000000 5.000000 MEG 2542 +2541 37.320171 -69.984848 6.000000 5.000000 MEG 2541 +2612 65.137360 -4.702045 6.000000 5.000000 MEG 2612 +2613 65.137360 0.297955 6.000000 5.000000 MEG 2613 +2611 71.137360 -2.202045 6.000000 5.000000 MEG 2611 +2623 73.822243 -17.329140 6.000000 5.000000 MEG 2623 +2622 73.822243 -12.329140 6.000000 5.000000 MEG 2622 +2621 79.822243 -14.829140 6.000000 5.000000 MEG 2621 +2633 65.490112 -40.332645 6.000000 5.000000 MEG 2633 +2632 65.490112 -35.332645 6.000000 5.000000 MEG 2632 +2631 71.490112 -37.832645 6.000000 5.000000 MEG 2631 +2642 61.220192 -25.385981 6.000000 5.000000 MEG 2642 +2643 61.220192 -20.385981 6.000000 5.000000 MEG 2643 +2641 67.220192 -22.885981 6.000000 5.000000 MEG 2641 diff --git a/python/libs/mne/channels/data/layouts/Vectorview-grad.lout b/python/libs/mne/channels/data/layouts/Vectorview-grad.lout new file mode 100644 index 0000000..1f133a1 --- /dev/null +++ b/python/libs/mne/channels/data/layouts/Vectorview-grad.lout @@ -0,0 +1,205 @@ +-55.000000 55.000000 -65.000000 60.000000 +113 -48.186871 26.886379 6.000000 5.000000 MEG 0113 +112 -48.186871 31.886379 6.000000 5.000000 MEG 0112 +122 -39.322296 31.036510 6.000000 5.000000 MEG 0122 +123 -39.322296 36.036510 6.000000 5.000000 MEG 0123 +132 -44.722965 14.826612 6.000000 5.000000 MEG 0132 +133 -44.722965 19.826612 6.000000 5.000000 MEG 0133 +143 -52.785782 6.169280 6.000000 5.000000 MEG 0143 +142 -52.785782 11.169280 6.000000 5.000000 MEG 0142 +213 -37.392612 13.470296 6.000000 5.000000 MEG 0213 +212 -37.392612 18.470296 6.000000 5.000000 MEG 0212 +222 -29.695013 13.899532 6.000000 5.000000 MEG 0222 +223 -29.695013 18.899532 6.000000 5.000000 MEG 0223 +232 -31.502516 -0.631914 6.000000 5.000000 MEG 0232 +233 -31.502516 4.368086 6.000000 5.000000 MEG 0233 +243 -39.115921 -2.709978 6.000000 5.000000 MEG 0243 +242 -39.115921 2.290022 6.000000 5.000000 MEG 0242 +313 -26.608879 38.351933 6.000000 5.000000 MEG 0313 +312 -26.608879 43.351933 6.000000 5.000000 MEG 0312 +322 -25.469093 26.356115 6.000000 5.000000 MEG 0322 +323 -25.469093 31.356115 6.000000 5.000000 MEG 0323 +333 -18.837411 23.164780 6.000000 5.000000 MEG 0333 +332 -18.837411 28.164780 6.000000 5.000000 MEG 0332 +343 -32.957949 27.427811 6.000000 5.000000 MEG 0343 +342 -32.957949 32.427811 6.000000 5.000000 MEG 0342 +413 -22.250046 12.315103 6.000000 5.000000 MEG 0413 +412 -22.250046 17.315103 6.000000 5.000000 MEG 0412 +422 -14.605187 10.962016 6.000000 5.000000 MEG 0422 +423 -14.605187 15.962016 6.000000 5.000000 MEG 0423 +432 -15.148193 -0.524500 6.000000 5.000000 MEG 0432 +433 -15.148193 4.475500 6.000000 5.000000 MEG 0433 +443 -23.422245 -0.157884 6.000000 5.000000 MEG 0443 +442 -23.422245 4.842116 6.000000 5.000000 MEG 0442 +513 -18.953902 44.905155 6.000000 5.000000 MEG 0513 +512 -18.953902 49.905155 6.000000 5.000000 MEG 0512 +523 -11.025696 48.325344 6.000000 5.000000 MEG 0523 +522 -11.025696 53.325344 6.000000 5.000000 MEG 0522 +532 -10.454178 39.888676 6.000000 5.000000 MEG 0532 +533 -10.454178 44.888676 6.000000 5.000000 MEG 0533 +542 -18.555386 35.433716 6.000000 5.000000 MEG 0542 +543 -18.555386 40.433716 6.000000 5.000000 MEG 0543 +613 -10.560901 30.757313 6.000000 5.000000 MEG 0613 +612 -10.560901 35.757313 6.000000 5.000000 MEG 0612 +622 -2.979000 21.849854 6.000000 5.000000 MEG 0622 +623 -2.979000 26.849854 6.000000 5.000000 MEG 0623 +633 -6.911079 11.573471 6.000000 5.000000 MEG 0633 +632 -6.911079 16.573471 6.000000 5.000000 MEG 0632 +642 -10.828249 21.334785 6.000000 5.000000 MEG 0642 +643 -10.828249 26.334785 6.000000 5.000000 MEG 0643 +713 -7.008664 0.931329 6.000000 5.000000 MEG 0713 +712 -7.008664 5.931329 6.000000 5.000000 MEG 0712 +723 1.052102 0.833180 6.000000 5.000000 MEG 0723 +722 1.052102 5.833180 6.000000 5.000000 MEG 0722 +733 1.098721 -8.987786 6.000000 5.000000 MEG 0733 +732 1.098721 -3.987786 6.000000 5.000000 MEG 0732 +743 -7.121645 -8.933109 6.000000 5.000000 MEG 0743 +742 -7.121645 -3.933109 6.000000 5.000000 MEG 0742 +813 -2.975877 49.460842 6.000000 5.000000 MEG 0813 +812 -2.975877 54.460842 6.000000 5.000000 MEG 0812 +822 -2.977890 40.979687 6.000000 5.000000 MEG 0822 +823 -2.977890 45.979687 6.000000 5.000000 MEG 0823 +913 5.024490 48.354298 6.000000 5.000000 MEG 0913 +912 5.024490 53.354298 6.000000 5.000000 MEG 0912 +923 13.021803 44.879852 6.000000 5.000000 MEG 0923 +922 13.021803 49.879852 6.000000 5.000000 MEG 0922 +932 12.564190 35.455193 6.000000 5.000000 MEG 0932 +933 12.564190 40.455193 6.000000 5.000000 MEG 0933 +942 4.483593 39.929310 6.000000 5.000000 MEG 0942 +943 4.483593 44.929310 6.000000 5.000000 MEG 0943 +1013 -2.978879 32.002693 6.000000 5.000000 MEG 1013 +1012 -2.978879 37.002693 6.000000 5.000000 MEG 1012 +1023 4.540760 30.762428 6.000000 5.000000 MEG 1023 +1022 4.540760 35.762428 6.000000 5.000000 MEG 1022 +1032 4.780051 21.348934 6.000000 5.000000 MEG 1032 +1033 4.780051 26.348934 6.000000 5.000000 MEG 1033 +1043 0.978956 11.650963 6.000000 5.000000 MEG 1043 +1042 0.978956 16.650963 6.000000 5.000000 MEG 1042 +1112 8.560405 10.928195 6.000000 5.000000 MEG 1112 +1113 8.560405 15.928195 6.000000 5.000000 MEG 1113 +1123 16.224724 12.278107 6.000000 5.000000 MEG 1123 +1122 16.224724 17.278107 6.000000 5.000000 MEG 1122 +1133 17.379185 -0.268703 6.000000 5.000000 MEG 1133 +1132 17.379185 4.731297 6.000000 5.000000 MEG 1132 +1142 9.117422 -0.423700 6.000000 5.000000 MEG 1142 +1143 9.117422 4.576300 6.000000 5.000000 MEG 1143 +1213 20.716938 38.318100 6.000000 5.000000 MEG 1213 +1212 20.716938 43.318100 6.000000 5.000000 MEG 1212 +1223 27.111319 27.293877 6.000000 5.000000 MEG 1223 +1222 27.111319 32.293877 6.000000 5.000000 MEG 1222 +1232 19.469093 26.356115 6.000000 5.000000 MEG 1232 +1233 19.469093 31.356115 6.000000 5.000000 MEG 1233 +1243 12.786146 23.189396 6.000000 5.000000 MEG 1243 +1242 12.786146 28.189396 6.000000 5.000000 MEG 1242 +1312 23.695013 13.899529 6.000000 5.000000 MEG 1312 +1313 23.695013 18.899529 6.000000 5.000000 MEG 1313 +1323 31.369019 13.362624 6.000000 5.000000 MEG 1323 +1322 31.369019 18.362624 6.000000 5.000000 MEG 1322 +1333 33.205658 -2.836478 6.000000 5.000000 MEG 1333 +1332 33.205658 2.163522 6.000000 5.000000 MEG 1332 +1342 25.473745 -0.631941 6.000000 5.000000 MEG 1342 +1343 25.473745 4.368059 6.000000 5.000000 MEG 1343 +1412 33.387833 31.097027 6.000000 5.000000 MEG 1412 +1413 33.387833 36.097027 6.000000 5.000000 MEG 1413 +1423 41.996334 27.235786 6.000000 5.000000 MEG 1423 +1422 41.996334 32.235786 6.000000 5.000000 MEG 1422 +1433 46.693424 6.365705 6.000000 5.000000 MEG 1433 +1432 46.693424 11.365705 6.000000 5.000000 MEG 1432 +1442 38.636284 14.732794 6.000000 5.000000 MEG 1442 +1443 38.636284 19.732794 6.000000 5.000000 MEG 1443 +1512 -46.828197 -4.270524 6.000000 5.000000 MEG 1512 +1513 -46.828197 0.729476 6.000000 5.000000 MEG 1513 +1522 -44.250233 -20.875282 6.000000 5.000000 MEG 1522 +1523 -44.250233 -15.875282 6.000000 5.000000 MEG 1523 +1533 -47.087372 -32.702410 6.000000 5.000000 MEG 1533 +1532 -47.087372 -27.702410 6.000000 5.000000 MEG 1532 +1543 -52.352669 -14.466389 6.000000 5.000000 MEG 1543 +1542 -52.352669 -9.466389 6.000000 5.000000 MEG 1542 +1613 -37.598797 -16.787832 6.000000 5.000000 MEG 1613 +1612 -37.598797 -11.787832 6.000000 5.000000 MEG 1612 +1622 -30.357292 -13.585911 6.000000 5.000000 MEG 1622 +1623 -30.357292 -8.585911 6.000000 5.000000 MEG 1623 +1632 -25.393221 -25.022747 6.000000 5.000000 MEG 1632 +1633 -25.393221 -20.022747 6.000000 5.000000 MEG 1633 +1643 -32.428291 -29.512911 6.000000 5.000000 MEG 1643 +1642 -32.428291 -24.512911 6.000000 5.000000 MEG 1642 +1713 -37.521523 -47.886852 6.000000 5.000000 MEG 1713 +1712 -37.521523 -42.886852 6.000000 5.000000 MEG 1712 +1722 -37.773560 -35.834789 6.000000 5.000000 MEG 1722 +1723 -37.773560 -30.834789 6.000000 5.000000 MEG 1723 +1732 -27.964468 -47.242935 6.000000 5.000000 MEG 1732 +1733 -27.964468 -42.242935 6.000000 5.000000 MEG 1733 +1743 -25.080088 -58.608849 6.000000 5.000000 MEG 1743 +1742 -25.080088 -53.608849 6.000000 5.000000 MEG 1742 +1813 -22.765453 -11.539077 6.000000 5.000000 MEG 1813 +1812 -22.765453 -6.539077 6.000000 5.000000 MEG 1812 +1822 -14.990439 -10.617317 6.000000 5.000000 MEG 1822 +1823 -14.990439 -5.617317 6.000000 5.000000 MEG 1823 +1832 -7.235366 -18.294876 6.000000 5.000000 MEG 1832 +1833 -7.235366 -13.294876 6.000000 5.000000 MEG 1833 +1843 -16.786220 -22.032574 6.000000 5.000000 MEG 1843 +1842 -16.786220 -17.032574 6.000000 5.000000 MEG 1842 +1912 -17.673892 -33.262066 6.000000 5.000000 MEG 1912 +1913 -17.673892 -28.262066 6.000000 5.000000 MEG 1913 +1923 -14.238597 -45.699379 6.000000 5.000000 MEG 1923 +1922 -14.238597 -40.699379 6.000000 5.000000 MEG 1922 +1932 -16.109179 -54.539486 6.000000 5.000000 MEG 1932 +1933 -16.109179 -49.539486 6.000000 5.000000 MEG 1933 +1943 -24.328934 -39.353901 6.000000 5.000000 MEG 1943 +1942 -24.328934 -34.353901 6.000000 5.000000 MEG 1942 +2013 -7.775570 -28.014633 6.000000 5.000000 MEG 2013 +2012 -7.775570 -23.014633 6.000000 5.000000 MEG 2012 +2023 1.721470 -27.999788 6.000000 5.000000 MEG 2023 +2022 1.721470 -22.999788 6.000000 5.000000 MEG 2022 +2032 1.906072 -37.626270 6.000000 5.000000 MEG 2032 +2033 1.906072 -32.626270 6.000000 5.000000 MEG 2033 +2042 -7.905933 -37.589039 6.000000 5.000000 MEG 2042 +2043 -7.905933 -32.589039 6.000000 5.000000 MEG 2043 +2113 -3.033595 -47.732231 6.000000 5.000000 MEG 2113 +2112 -3.033595 -42.732231 6.000000 5.000000 MEG 2112 +2122 -2.999999 -56.934807 6.000000 5.000000 MEG 2122 +2123 -2.999999 -51.934807 6.000000 5.000000 MEG 2123 +2133 4.648282 -64.218044 6.000000 5.000000 MEG 2133 +2132 4.648282 -59.218044 6.000000 5.000000 MEG 2132 +2143 -10.692250 -64.207703 6.000000 5.000000 MEG 2143 +2142 -10.692250 -59.207703 6.000000 5.000000 MEG 2142 +2212 8.962435 -10.585071 6.000000 5.000000 MEG 2212 +2213 8.962435 -5.585071 6.000000 5.000000 MEG 2213 +2223 16.756042 -11.459877 6.000000 5.000000 MEG 2223 +2222 16.756042 -6.459877 6.000000 5.000000 MEG 2222 +2233 10.797766 -22.111992 6.000000 5.000000 MEG 2233 +2232 10.797766 -17.111992 6.000000 5.000000 MEG 2232 +2242 1.262053 -18.307052 6.000000 5.000000 MEG 2242 +2243 1.262053 -13.307052 6.000000 5.000000 MEG 2243 +2312 11.664891 -33.279053 6.000000 5.000000 MEG 2312 +2313 11.664891 -28.279053 6.000000 5.000000 MEG 2313 +2323 18.227104 -39.378311 6.000000 5.000000 MEG 2323 +2322 18.227104 -34.378311 6.000000 5.000000 MEG 2322 +2332 10.067341 -54.531059 6.000000 5.000000 MEG 2332 +2333 10.067341 -49.531059 6.000000 5.000000 MEG 2333 +2343 8.120804 -45.719460 6.000000 5.000000 MEG 2343 +2342 8.120804 -40.719460 6.000000 5.000000 MEG 2342 +2412 24.365654 -13.469363 6.000000 5.000000 MEG 2412 +2413 24.365654 -8.469363 6.000000 5.000000 MEG 2413 +2423 31.531933 -16.859812 6.000000 5.000000 MEG 2423 +2422 31.531933 -11.859812 6.000000 5.000000 MEG 2422 +2433 26.322470 -29.595119 6.000000 5.000000 MEG 2433 +2432 26.322470 -24.595119 6.000000 5.000000 MEG 2432 +2442 19.393225 -25.022739 6.000000 5.000000 MEG 2442 +2443 19.393225 -20.022739 6.000000 5.000000 MEG 2443 +2512 21.906504 -47.260071 6.000000 5.000000 MEG 2512 +2513 21.906504 -42.260071 6.000000 5.000000 MEG 2513 +2522 31.762718 -35.773750 6.000000 5.000000 MEG 2522 +2523 31.762718 -30.773750 6.000000 5.000000 MEG 2523 +2533 31.462860 -47.927265 6.000000 5.000000 MEG 2533 +2532 31.462860 -42.927265 6.000000 5.000000 MEG 2532 +2543 19.023640 -58.637577 6.000000 5.000000 MEG 2543 +2542 19.023640 -53.637577 6.000000 5.000000 MEG 2542 +2612 40.724506 -4.266347 6.000000 5.000000 MEG 2612 +2613 40.724506 0.733653 6.000000 5.000000 MEG 2613 +2623 46.297695 -14.395032 6.000000 5.000000 MEG 2623 +2622 46.297695 -9.395032 6.000000 5.000000 MEG 2622 +2633 40.950874 -32.847042 6.000000 5.000000 MEG 2633 +2632 40.950874 -27.847042 6.000000 5.000000 MEG 2632 +2642 38.210819 -20.857738 6.000000 5.000000 MEG 2642 +2643 38.210819 -15.857738 6.000000 5.000000 MEG 2643 diff --git a/python/libs/mne/channels/data/layouts/Vectorview-grad_norm.lout b/python/libs/mne/channels/data/layouts/Vectorview-grad_norm.lout new file mode 100644 index 0000000..d06ce01 --- /dev/null +++ b/python/libs/mne/channels/data/layouts/Vectorview-grad_norm.lout @@ -0,0 +1,103 @@ +-50.000000 50.000000 -50.000000 38.000000 +11 -41.408840 17.090919 6.000000 5.000000 MEG 011X +12 -33.873951 19.857674 6.000000 5.000000 MEG 012X +13 -38.464523 9.051075 6.000000 5.000000 MEG 013X +14 -45.317917 3.279520 6.000000 5.000000 MEG 014X +21 -32.233719 8.146864 6.000000 5.000000 MEG 021X +22 -25.690760 8.433022 6.000000 5.000000 MEG 022X +23 -27.227139 -1.254610 6.000000 5.000000 MEG 023X +24 -33.698534 -2.642785 6.000000 5.000000 MEG 024X +31 -23.067547 24.734621 6.000000 5.000000 MEG 031X +32 -22.098728 16.737410 6.000000 5.000000 MEG 032X +33 -16.461800 14.609854 6.000000 5.000000 MEG 033X +34 -28.464256 17.451874 6.000000 5.000000 MEG 034X +41 -19.362539 7.376735 6.000000 5.000000 MEG 041X +42 -12.864409 6.474677 6.000000 5.000000 MEG 042X +43 -13.325964 -1.183000 6.000000 5.000000 MEG 043X +44 -20.358908 -0.938589 6.000000 5.000000 MEG 044X +51 -16.560817 29.103437 6.000000 5.000000 MEG 051X +52 -9.821842 31.383564 6.000000 5.000000 MEG 052X +53 -9.336051 25.759117 6.000000 5.000000 MEG 053X +54 -16.222077 22.789145 6.000000 5.000000 MEG 054X +61 -9.426766 19.671541 6.000000 5.000000 MEG 061X +62 -2.982150 13.733236 6.000000 5.000000 MEG 062X +63 -6.324418 6.882314 6.000000 5.000000 MEG 063X +64 -9.654012 13.389857 6.000000 5.000000 MEG 064X +71 -6.407364 -0.212448 6.000000 5.000000 MEG 071X +72 0.444286 -0.277880 6.000000 5.000000 MEG 072X +73 0.483912 -6.911695 6.000000 5.000000 MEG 073X +74 -6.503398 -6.874514 6.000000 5.000000 MEG 074X +81 -2.979496 32.140564 6.000000 5.000000 MEG 081X +82 -2.981206 26.486458 6.000000 5.000000 MEG 082X +91 3.820817 31.402866 6.000000 5.000000 MEG 091X +92 10.618533 29.086569 6.000000 5.000000 MEG 092X +93 10.229562 22.803463 6.000000 5.000000 MEG 093X +94 3.361053 25.786205 6.000000 5.000000 MEG 094X +101 -2.982047 20.501795 6.000000 5.000000 MEG 101X +102 3.409646 19.674952 6.000000 5.000000 MEG 102X +103 3.613043 13.399289 6.000000 5.000000 MEG 103X +104 0.382112 6.933975 6.000000 5.000000 MEG 104X +111 6.826344 6.452130 6.000000 5.000000 MEG 111X +112 13.341015 7.352071 6.000000 5.000000 MEG 112X +113 14.322306 -1.012468 6.000000 5.000000 MEG 113X +114 7.299809 -1.115800 6.000000 5.000000 MEG 114X +121 17.159397 24.712067 6.000000 5.000000 MEG 121X +122 22.594622 17.362583 6.000000 5.000000 MEG 122X +123 16.098728 16.737411 6.000000 5.000000 MEG 123X +124 10.418224 14.626265 6.000000 5.000000 MEG 124X +131 19.690762 8.433019 6.000000 5.000000 MEG 131X +132 26.213667 8.075083 6.000000 5.000000 MEG 132X +133 27.774809 -2.728805 6.000000 5.000000 MEG 133X +134 21.202684 -1.254627 6.000000 5.000000 MEG 134X +141 27.929657 19.898018 6.000000 5.000000 MEG 141X +142 35.246883 17.323858 6.000000 5.000000 MEG 142X +143 39.239410 3.410470 6.000000 5.000000 MEG 143X +144 32.390839 8.988529 6.000000 5.000000 MEG 144X +151 -40.253967 -3.703956 6.000000 5.000000 MEG 151X +152 -38.062698 -14.995193 6.000000 5.000000 MEG 152X +153 -40.474266 -23.037640 6.000000 5.000000 MEG 153X +154 -44.949768 -10.637144 6.000000 5.000000 MEG 154X +161 -32.408976 -12.215726 6.000000 5.000000 MEG 161X +162 -26.253698 -10.038419 6.000000 5.000000 MEG 162X +163 -22.034237 -17.815468 6.000000 5.000000 MEG 163X +164 -28.014048 -20.868780 6.000000 5.000000 MEG 164X +171 -32.343294 -33.363060 6.000000 5.000000 MEG 171X +172 -32.557526 -25.167658 6.000000 5.000000 MEG 172X +173 -24.219797 -32.925196 6.000000 5.000000 MEG 173X +174 -21.768074 -40.654018 6.000000 5.000000 MEG 174X +181 -19.800634 -8.646573 6.000000 5.000000 MEG 181X +182 -13.191874 -8.019776 6.000000 5.000000 MEG 182X +183 -6.600061 -13.240516 6.000000 5.000000 MEG 183X +184 -14.718287 -15.782150 6.000000 5.000000 MEG 184X +191 -15.472808 -23.418205 6.000000 5.000000 MEG 191X +192 -12.552808 -31.875578 6.000000 5.000000 MEG 192X +193 -14.142802 -37.886852 6.000000 5.000000 MEG 193X +194 -21.129593 -27.560652 6.000000 5.000000 MEG 194X +201 -7.059234 -19.849951 6.000000 5.000000 MEG 201X +202 1.013249 -19.839857 6.000000 5.000000 MEG 202X +203 1.170161 -26.385864 6.000000 5.000000 MEG 203X +204 -7.170043 -26.360546 6.000000 5.000000 MEG 204X +211 -3.028555 -33.257917 6.000000 5.000000 MEG 211X +212 -3.000000 -39.515667 6.000000 5.000000 MEG 212X +213 3.501040 -44.468269 6.000000 5.000000 MEG 213X +214 -9.538412 -44.461239 6.000000 5.000000 MEG 214X +221 7.168070 -7.997848 6.000000 5.000000 MEG 221X +222 13.792637 -8.592716 6.000000 5.000000 MEG 222X +223 8.728101 -15.836154 6.000000 5.000000 MEG 223X +224 0.622745 -13.248796 6.000000 5.000000 MEG 224X +231 9.465158 -23.429756 6.000000 5.000000 MEG 231X +232 15.043037 -27.577251 6.000000 5.000000 MEG 232X +233 8.107240 -37.881119 6.000000 5.000000 MEG 233X +234 6.452683 -31.889233 6.000000 5.000000 MEG 234X +241 20.260805 -9.959167 6.000000 5.000000 MEG 241X +242 26.352144 -12.264672 6.000000 5.000000 MEG 242X +243 21.924099 -20.924681 6.000000 5.000000 MEG 243X +244 16.034241 -17.815463 6.000000 5.000000 MEG 244X +251 18.170528 -32.936850 6.000000 5.000000 MEG 251X +252 26.548311 -25.126150 6.000000 5.000000 MEG 252X +253 26.293430 -33.390539 6.000000 5.000000 MEG 253X +254 15.720093 -40.673553 6.000000 5.000000 MEG 254X +261 34.165833 -3.701116 6.000000 5.000000 MEG 261X +262 38.903042 -10.588621 6.000000 5.000000 MEG 262X +263 34.358242 -23.135988 6.000000 5.000000 MEG 263X +264 32.029198 -14.983262 6.000000 5.000000 MEG 264X diff --git a/python/libs/mne/channels/data/layouts/Vectorview-mag.lout b/python/libs/mne/channels/data/layouts/Vectorview-mag.lout new file mode 100644 index 0000000..c5f4c60 --- /dev/null +++ b/python/libs/mne/channels/data/layouts/Vectorview-mag.lout @@ -0,0 +1,103 @@ +-50.000000 50.000000 -50.000000 38.000000 +111 -41.408840 17.090919 6.000000 5.000000 MEG 0111 +121 -33.873951 19.857674 6.000000 5.000000 MEG 0121 +131 -38.464523 9.051075 6.000000 5.000000 MEG 0131 +141 -45.317917 3.279520 6.000000 5.000000 MEG 0141 +211 -32.233719 8.146864 6.000000 5.000000 MEG 0211 +221 -25.690760 8.433022 6.000000 5.000000 MEG 0221 +231 -27.227139 -1.254610 6.000000 5.000000 MEG 0231 +241 -33.698534 -2.642785 6.000000 5.000000 MEG 0241 +311 -23.067547 24.734621 6.000000 5.000000 MEG 0311 +321 -22.098728 16.737410 6.000000 5.000000 MEG 0321 +331 -16.461800 14.609854 6.000000 5.000000 MEG 0331 +341 -28.464256 17.451874 6.000000 5.000000 MEG 0341 +411 -19.362539 7.376735 6.000000 5.000000 MEG 0411 +421 -12.864409 6.474677 6.000000 5.000000 MEG 0421 +431 -13.325964 -1.183000 6.000000 5.000000 MEG 0431 +441 -20.358908 -0.938589 6.000000 5.000000 MEG 0441 +511 -16.560817 29.103437 6.000000 5.000000 MEG 0511 +521 -9.821842 31.383564 6.000000 5.000000 MEG 0521 +531 -9.336051 25.759117 6.000000 5.000000 MEG 0531 +541 -16.222077 22.789145 6.000000 5.000000 MEG 0541 +611 -9.426766 19.671541 6.000000 5.000000 MEG 0611 +621 -2.982150 13.733236 6.000000 5.000000 MEG 0621 +631 -6.324418 6.882314 6.000000 5.000000 MEG 0631 +641 -9.654012 13.389857 6.000000 5.000000 MEG 0641 +711 -6.407364 -0.212448 6.000000 5.000000 MEG 0711 +721 0.444286 -0.277880 6.000000 5.000000 MEG 0721 +731 0.483912 -6.911695 6.000000 5.000000 MEG 0731 +741 -6.503398 -6.874514 6.000000 5.000000 MEG 0741 +811 -2.979496 32.140564 6.000000 5.000000 MEG 0811 +821 -2.981206 26.486458 6.000000 5.000000 MEG 0821 +911 3.820817 31.402866 6.000000 5.000000 MEG 0911 +921 10.618533 29.086569 6.000000 5.000000 MEG 0921 +931 10.229562 22.803463 6.000000 5.000000 MEG 0931 +941 3.361053 25.786205 6.000000 5.000000 MEG 0941 +1011 -2.982047 20.501795 6.000000 5.000000 MEG 1011 +1021 3.409646 19.674952 6.000000 5.000000 MEG 1021 +1031 3.613043 13.399289 6.000000 5.000000 MEG 1031 +1041 0.382112 6.933975 6.000000 5.000000 MEG 1041 +1111 6.826344 6.452130 6.000000 5.000000 MEG 1111 +1121 13.341015 7.352071 6.000000 5.000000 MEG 1121 +1131 14.322306 -1.012468 6.000000 5.000000 MEG 1131 +1141 7.299809 -1.115800 6.000000 5.000000 MEG 1141 +1211 17.159397 24.712067 6.000000 5.000000 MEG 1211 +1221 22.594622 17.362583 6.000000 5.000000 MEG 1221 +1231 16.098728 16.737411 6.000000 5.000000 MEG 1231 +1241 10.418224 14.626265 6.000000 5.000000 MEG 1241 +1311 19.690762 8.433019 6.000000 5.000000 MEG 1311 +1321 26.213667 8.075083 6.000000 5.000000 MEG 1321 +1331 27.774809 -2.728805 6.000000 5.000000 MEG 1331 +1341 21.202684 -1.254627 6.000000 5.000000 MEG 1341 +1411 27.929657 19.898018 6.000000 5.000000 MEG 1411 +1421 35.246883 17.323858 6.000000 5.000000 MEG 1421 +1431 39.239410 3.410470 6.000000 5.000000 MEG 1431 +1441 32.390839 8.988529 6.000000 5.000000 MEG 1441 +1511 -40.253967 -3.703956 6.000000 5.000000 MEG 1511 +1521 -38.062698 -14.995193 6.000000 5.000000 MEG 1521 +1531 -40.474266 -23.037640 6.000000 5.000000 MEG 1531 +1541 -44.949768 -10.637144 6.000000 5.000000 MEG 1541 +1611 -32.408976 -12.215726 6.000000 5.000000 MEG 1611 +1621 -26.253698 -10.038419 6.000000 5.000000 MEG 1621 +1631 -22.034237 -17.815468 6.000000 5.000000 MEG 1631 +1641 -28.014048 -20.868780 6.000000 5.000000 MEG 1641 +1711 -32.343294 -33.363060 6.000000 5.000000 MEG 1711 +1721 -32.557526 -25.167658 6.000000 5.000000 MEG 1721 +1731 -24.219797 -32.925196 6.000000 5.000000 MEG 1731 +1741 -21.768074 -40.654018 6.000000 5.000000 MEG 1741 +1811 -19.800634 -8.646573 6.000000 5.000000 MEG 1811 +1821 -13.191874 -8.019776 6.000000 5.000000 MEG 1821 +1831 -6.600061 -13.240516 6.000000 5.000000 MEG 1831 +1841 -14.718287 -15.782150 6.000000 5.000000 MEG 1841 +1911 -15.472808 -23.418205 6.000000 5.000000 MEG 1911 +1921 -12.552808 -31.875578 6.000000 5.000000 MEG 1921 +1931 -14.142802 -37.886852 6.000000 5.000000 MEG 1931 +1941 -21.129593 -27.560652 6.000000 5.000000 MEG 1941 +2011 -7.059234 -19.849951 6.000000 5.000000 MEG 2011 +2021 1.013249 -19.839857 6.000000 5.000000 MEG 2021 +2031 1.170161 -26.385864 6.000000 5.000000 MEG 2031 +2041 -7.170043 -26.360546 6.000000 5.000000 MEG 2041 +2111 -3.028555 -33.257917 6.000000 5.000000 MEG 2111 +2121 -3.000000 -39.515667 6.000000 5.000000 MEG 2121 +2131 3.501040 -44.468269 6.000000 5.000000 MEG 2131 +2141 -9.538412 -44.461239 6.000000 5.000000 MEG 2141 +2211 7.168070 -7.997848 6.000000 5.000000 MEG 2211 +2221 13.792637 -8.592716 6.000000 5.000000 MEG 2221 +2231 8.728101 -15.836154 6.000000 5.000000 MEG 2231 +2241 0.622745 -13.248796 6.000000 5.000000 MEG 2241 +2311 9.465158 -23.429756 6.000000 5.000000 MEG 2311 +2321 15.043037 -27.577251 6.000000 5.000000 MEG 2321 +2331 8.107240 -37.881119 6.000000 5.000000 MEG 2331 +2341 6.452683 -31.889233 6.000000 5.000000 MEG 2341 +2411 20.260805 -9.959167 6.000000 5.000000 MEG 2411 +2421 26.352144 -12.264672 6.000000 5.000000 MEG 2421 +2431 21.924099 -20.924681 6.000000 5.000000 MEG 2431 +2441 16.034241 -17.815463 6.000000 5.000000 MEG 2441 +2511 18.170528 -32.936850 6.000000 5.000000 MEG 2511 +2521 26.548311 -25.126150 6.000000 5.000000 MEG 2521 +2531 26.293430 -33.390539 6.000000 5.000000 MEG 2531 +2541 15.720093 -40.673553 6.000000 5.000000 MEG 2541 +2611 34.165833 -3.701116 6.000000 5.000000 MEG 2611 +2621 38.903042 -10.588621 6.000000 5.000000 MEG 2621 +2631 34.358242 -23.135988 6.000000 5.000000 MEG 2631 +2641 32.029198 -14.983262 6.000000 5.000000 MEG 2641 diff --git a/python/libs/mne/channels/data/layouts/biosemi.lay b/python/libs/mne/channels/data/layouts/biosemi.lay new file mode 100644 index 0000000..ca74816 --- /dev/null +++ b/python/libs/mne/channels/data/layouts/biosemi.lay @@ -0,0 +1,64 @@ +1 -0.496189 1.527114 0.290000 0.230000 Fp1 +2 -0.943808 1.299041 0.290000 0.230000 AF7 +3 -0.545830 1.170536 0.290000 0.230000 AF3 +4 -0.326906 0.809121 0.290000 0.230000 F1 +5 -0.659023 0.813825 0.290000 0.230000 F3 +6 -0.987913 0.858779 0.290000 0.230000 F5 +7 -1.299041 0.943808 0.290000 0.230000 F7 +8 -1.527114 0.496189 0.290000 0.230000 FT7 +9 -1.173172 0.450338 0.290000 0.230000 FC5 +10 -0.770517 0.409691 0.290000 0.230000 FC3 +11 -0.394923 0.394923 0.290000 0.230000 FC1 +12 -0.401426 -0.000000 0.290000 0.230000 C1 +13 -0.802851 -0.000000 0.290000 0.230000 C3 +14 -1.204277 -0.000000 0.290000 0.230000 C5 +15 -1.605703 -0.000000 0.290000 0.230000 T7 +16 -1.527114 -0.496189 0.290000 0.230000 TP7 +17 -1.173172 -0.450338 0.290000 0.230000 CP5 +18 -0.770517 -0.409691 0.290000 0.230000 CP3 +19 -0.394923 -0.394923 0.290000 0.230000 CP1 +20 -0.326906 -0.809121 0.290000 0.230000 P1 +21 -0.659023 -0.813825 0.290000 0.230000 P3 +22 -0.987913 -0.858779 0.290000 0.230000 P5 +23 -1.299041 -0.943808 0.290000 0.230000 P7 +24 -1.537550 -1.290157 0.290000 0.230000 P9 +25 -0.943808 -1.299041 0.290000 0.230000 PO7 +26 -0.545830 -1.170536 0.290000 0.230000 PO3 +27 -0.496189 -1.527114 0.290000 0.230000 O1 +28 0.000000 -2.007129 0.290000 0.230000 Iz +29 0.000000 -1.605703 0.290000 0.230000 Oz +30 0.000000 -1.204277 0.290000 0.230000 POz +31 0.000000 -0.802851 0.290000 0.230000 Pz +32 0.000000 -0.401426 0.290000 0.230000 CPz +33 0.000000 1.605703 0.290000 0.230000 Fpz +34 0.496189 1.527114 0.290000 0.230000 Fp2 +35 0.943808 1.299041 0.290000 0.230000 AF8 +36 0.545830 1.170536 0.290000 0.230000 AF4 +37 0.000000 1.204277 0.290000 0.230000 AFz +38 0.000000 0.802851 0.290000 0.230000 Fz +39 0.326906 0.809121 0.290000 0.230000 F2 +40 0.659023 0.813825 0.290000 0.230000 F4 +41 0.987913 0.858779 0.290000 0.230000 F6 +42 1.299041 0.943808 0.290000 0.230000 F8 +43 1.527114 0.496189 0.290000 0.230000 FT8 +44 1.173172 0.450338 0.290000 0.230000 FC6 +45 0.770517 0.409691 0.290000 0.230000 FC4 +46 0.394923 0.394923 0.290000 0.230000 FC2 +47 0.000000 0.401426 0.290000 0.230000 FCz +48 0.000000 0.000000 0.290000 0.230000 Cz +49 0.401426 0.000000 0.290000 0.230000 C2 +50 0.802851 0.000000 0.290000 0.230000 C4 +51 1.204277 0.000000 0.290000 0.230000 C6 +52 1.605703 0.000000 0.290000 0.230000 T8 +53 1.527114 -0.496189 0.290000 0.230000 TP8 +54 1.173172 -0.450338 0.290000 0.230000 CP6 +55 0.770517 -0.409691 0.290000 0.230000 CP4 +56 0.394923 -0.394923 0.290000 0.230000 CP2 +57 0.326906 -0.809121 0.290000 0.230000 P2 +58 0.659023 -0.813825 0.290000 0.230000 P4 +59 0.987913 -0.858779 0.290000 0.230000 P6 +60 1.299041 -0.943808 0.290000 0.230000 P8 +61 1.537550 -1.290157 0.290000 0.230000 P10 +62 0.943808 -1.299041 0.290000 0.230000 PO8 +63 0.545830 -1.170536 0.290000 0.230000 PO4 +64 0.496189 -1.527114 0.290000 0.230000 O2 \ No newline at end of file diff --git a/python/libs/mne/channels/data/layouts/magnesWH3600.lout b/python/libs/mne/channels/data/layouts/magnesWH3600.lout new file mode 100644 index 0000000..577e953 --- /dev/null +++ b/python/libs/mne/channels/data/layouts/magnesWH3600.lout @@ -0,0 +1,249 @@ + -42.19 43.52 -41.70 28.71 +001 -1.28 -5.13 4.00 3.00 MEG 001 +002 -1.22 -1.43 4.00 3.00 MEG 002 +003 -1.37 2.53 4.00 3.00 MEG 003 +004 -1.36 5.90 4.00 3.00 MEG 004 +005 -1.45 9.27 4.00 3.00 MEG 005 +006 -4.89 9.36 4.00 3.00 MEG 006 +007 -5.20 5.86 4.00 3.00 MEG 007 +008 -5.26 2.40 4.00 3.00 MEG 008 +009 -5.34 -1.29 4.00 3.00 MEG 009 +010 -5.12 -5.08 4.00 3.00 MEG 010 +011 -4.73 -8.47 4.00 3.00 MEG 011 +012 -1.31 -8.81 4.00 3.00 MEG 012 +013 2.04 -8.49 4.00 3.00 MEG 013 +014 2.54 -5.16 4.00 3.00 MEG 014 +015 2.69 -1.43 4.00 3.00 MEG 015 +016 2.62 2.56 4.00 3.00 MEG 016 +017 2.50 5.89 4.00 3.00 MEG 017 +018 2.10 9.34 4.00 3.00 MEG 018 +019 -1.45 12.55 4.00 3.00 MEG 019 +020 -5.76 12.42 4.00 3.00 MEG 020 +021 -8.30 9.98 4.00 3.00 MEG 021 +022 -9.16 5.97 4.00 3.00 MEG 022 +023 -9.32 2.49 4.00 3.00 MEG 023 +024 -9.42 -1.32 4.00 3.00 MEG 024 +025 -9.13 -5.11 4.00 3.00 MEG 025 +026 -8.43 -9.18 4.00 3.00 MEG 026 +027 -5.45 -12.10 4.00 3.00 MEG 027 +028 -1.40 -12.51 4.00 3.00 MEG 028 +029 2.64 -12.08 4.00 3.00 MEG 029 +030 5.77 -9.29 4.00 3.00 MEG 030 +031 6.50 -5.19 4.00 3.00 MEG 031 +032 6.85 -1.37 4.00 3.00 MEG 032 +033 6.70 2.65 4.00 3.00 MEG 033 +034 6.46 6.18 4.00 3.00 MEG 034 +035 5.61 10.08 4.00 3.00 MEG 035 +036 2.95 12.49 4.00 3.00 MEG 036 +037 -1.47 15.77 4.00 3.00 MEG 037 +038 -5.48 15.52 4.00 3.00 MEG 038 +039 -8.97 13.31 4.00 3.00 MEG 039 +040 -11.91 10.42 4.00 3.00 MEG 040 +041 -12.96 6.84 4.00 3.00 MEG 041 +042 -13.39 3.21 4.00 3.00 MEG 042 +043 -13.58 -0.70 4.00 3.00 MEG 043 +044 -13.08 -4.42 4.00 3.00 MEG 044 +045 -12.52 -8.05 4.00 3.00 MEG 045 +046 -11.13 -11.34 4.00 3.00 MEG 046 +047 -8.45 -14.21 4.00 3.00 MEG 047 +048 -5.08 -15.56 4.00 3.00 MEG 048 +049 -1.60 -16.17 4.00 3.00 MEG 049 +050 2.22 -15.61 4.00 3.00 MEG 050 +051 5.63 -14.28 4.00 3.00 MEG 051 +052 8.38 -11.70 4.00 3.00 MEG 052 +053 9.89 -8.24 4.00 3.00 MEG 053 +054 10.43 -4.42 4.00 3.00 MEG 054 +055 10.94 -0.62 4.00 3.00 MEG 055 +056 10.72 3.35 4.00 3.00 MEG 056 +057 10.22 7.01 4.00 3.00 MEG 057 +058 9.04 10.61 4.00 3.00 MEG 058 +059 6.20 13.42 4.00 3.00 MEG 059 +060 2.52 15.65 4.00 3.00 MEG 060 +061 -1.53 18.91 4.00 3.00 MEG 061 +062 -5.68 18.61 4.00 3.00 MEG 062 +063 -9.46 16.89 4.00 3.00 MEG 063 +064 -12.95 14.48 4.00 3.00 MEG 064 +065 -15.67 11.24 4.00 3.00 MEG 065 +066 -17.06 7.05 4.00 3.00 MEG 066 +067 -17.65 3.16 4.00 3.00 MEG 067 +068 -17.98 -1.20 4.00 3.00 MEG 068 +069 -17.13 -5.53 4.00 3.00 MEG 069 +070 -16.60 -9.33 4.00 3.00 MEG 070 +071 -14.32 -12.91 4.00 3.00 MEG 071 +072 -11.85 -15.75 4.00 3.00 MEG 072 +073 -8.78 -17.93 4.00 3.00 MEG 073 +074 -5.30 -19.40 4.00 3.00 MEG 074 +075 -1.58 -19.85 4.00 3.00 MEG 075 +076 2.41 -19.42 4.00 3.00 MEG 076 +077 5.94 -18.13 4.00 3.00 MEG 077 +078 9.16 -15.98 4.00 3.00 MEG 078 +079 11.79 -13.08 4.00 3.00 MEG 079 +080 13.62 -9.59 4.00 3.00 MEG 080 +081 14.57 -5.64 4.00 3.00 MEG 081 +082 15.42 -1.35 4.00 3.00 MEG 082 +083 15.05 3.30 4.00 3.00 MEG 083 +084 14.29 7.20 4.00 3.00 MEG 084 +085 12.81 11.43 4.00 3.00 MEG 085 +086 9.96 14.67 4.00 3.00 MEG 086 +087 6.46 17.06 4.00 3.00 MEG 087 +088 2.60 18.73 4.00 3.00 MEG 088 +089 -1.60 22.21 4.00 3.00 MEG 089 +090 -5.83 21.82 4.00 3.00 MEG 090 +091 -9.75 20.43 4.00 3.00 MEG 091 +092 -13.45 18.45 4.00 3.00 MEG 092 +093 -16.67 15.62 4.00 3.00 MEG 093 +094 -19.33 12.13 4.00 3.00 MEG 094 +095 -20.94 7.82 4.00 3.00 MEG 095 +096 -21.81 3.65 4.00 3.00 MEG 096 +097 -22.23 -1.27 4.00 3.00 MEG 097 +098 -21.14 -5.87 4.00 3.00 MEG 098 +099 -20.30 -9.97 4.00 3.00 MEG 099 +100 -18.46 -13.84 4.00 3.00 MEG 100 +101 -16.07 -17.08 4.00 3.00 MEG 101 +102 -12.88 -19.71 4.00 3.00 MEG 102 +103 -9.34 -21.89 4.00 3.00 MEG 103 +104 -5.64 -23.02 4.00 3.00 MEG 104 +105 -1.72 -23.54 4.00 3.00 MEG 105 +106 2.48 -23.24 4.00 3.00 MEG 106 +107 6.42 -22.00 4.00 3.00 MEG 107 +108 9.86 -20.19 4.00 3.00 MEG 108 +109 13.22 -17.32 4.00 3.00 MEG 109 +110 15.75 -14.15 4.00 3.00 MEG 110 +111 17.67 -10.19 4.00 3.00 MEG 111 +112 18.65 -6.08 4.00 3.00 MEG 112 +113 19.69 -1.27 4.00 3.00 MEG 113 +114 19.27 3.70 4.00 3.00 MEG 114 +115 18.30 8.05 4.00 3.00 MEG 115 +116 16.46 12.48 4.00 3.00 MEG 116 +117 13.74 15.93 4.00 3.00 MEG 117 +118 10.41 18.72 4.00 3.00 MEG 118 +119 6.64 20.69 4.00 3.00 MEG 119 +120 2.67 22.02 4.00 3.00 MEG 120 +121 -1.74 25.41 4.00 3.00 MEG 121 +122 -6.59 24.84 4.00 3.00 MEG 122 +123 -11.16 23.37 4.00 3.00 MEG 123 +124 -15.46 21.07 4.00 3.00 MEG 124 +125 -19.25 17.84 4.00 3.00 MEG 125 +126 -22.45 13.89 4.00 3.00 MEG 126 +127 -24.89 8.96 4.00 3.00 MEG 127 +128 -26.13 4.36 4.00 3.00 MEG 128 +129 -26.65 -1.22 4.00 3.00 MEG 129 +130 -25.30 -6.36 4.00 3.00 MEG 130 +131 -24.16 -11.45 4.00 3.00 MEG 131 +132 -21.98 -15.88 4.00 3.00 MEG 132 +133 -18.81 -19.82 4.00 3.00 MEG 133 +134 -15.20 -22.99 4.00 3.00 MEG 134 +135 -11.11 -25.29 4.00 3.00 MEG 135 +136 -6.51 -26.74 4.00 3.00 MEG 136 +137 -1.86 -27.28 4.00 3.00 MEG 137 +138 3.17 -26.90 4.00 3.00 MEG 138 +139 7.79 -25.55 4.00 3.00 MEG 139 +140 12.07 -23.15 4.00 3.00 MEG 140 +141 15.93 -20.09 4.00 3.00 MEG 141 +142 19.04 -16.25 4.00 3.00 MEG 142 +143 21.39 -11.67 4.00 3.00 MEG 143 +144 22.75 -6.58 4.00 3.00 MEG 144 +145 23.99 -1.23 4.00 3.00 MEG 145 +146 23.36 4.49 4.00 3.00 MEG 146 +147 22.02 9.37 4.00 3.00 MEG 147 +148 19.51 14.31 4.00 3.00 MEG 148 +149 16.20 18.23 4.00 3.00 MEG 149 +150 12.16 21.54 4.00 3.00 MEG 150 +151 7.85 23.69 4.00 3.00 MEG 151 +152 3.16 25.01 4.00 3.00 MEG 152 +153 -23.01 18.82 4.00 3.00 MEG 153 +154 -26.06 15.31 4.00 3.00 MEG 154 +155 -28.76 10.18 4.00 3.00 MEG 155 +156 -31.71 3.39 4.00 3.00 MEG 156 +157 -32.05 -2.89 4.00 3.00 MEG 157 +158 -31.42 -8.67 4.00 3.00 MEG 158 +159 -26.22 -15.24 4.00 3.00 MEG 159 +160 -23.31 -19.72 4.00 3.00 MEG 160 +161 -19.33 -23.66 4.00 3.00 MEG 161 +162 -14.75 -26.73 4.00 3.00 MEG 162 +163 -9.92 -28.91 4.00 3.00 MEG 163 +164 -4.52 -30.10 4.00 3.00 MEG 164 +165 1.25 -30.15 4.00 3.00 MEG 165 +166 6.17 -29.40 4.00 3.00 MEG 166 +167 11.43 -27.39 4.00 3.00 MEG 167 +168 16.20 -24.37 4.00 3.00 MEG 168 +169 20.37 -20.27 4.00 3.00 MEG 169 +170 23.54 -15.56 4.00 3.00 MEG 170 +171 28.66 -8.94 4.00 3.00 MEG 171 +172 29.46 -3.00 4.00 3.00 MEG 172 +173 29.04 3.51 4.00 3.00 MEG 173 +174 25.94 10.77 4.00 3.00 MEG 174 +175 23.08 15.80 4.00 3.00 MEG 175 +176 19.78 19.54 4.00 3.00 MEG 176 +177 -26.70 20.52 4.00 3.00 MEG 177 +178 -29.66 16.81 4.00 3.00 MEG 178 +179 -32.55 11.68 4.00 3.00 MEG 179 +180 -32.47 -13.23 4.00 3.00 MEG 180 +181 -27.63 -19.12 4.00 3.00 MEG 181 +182 -23.75 -23.89 4.00 3.00 MEG 182 +183 -18.94 -27.77 4.00 3.00 MEG 183 +184 -13.64 -30.59 4.00 3.00 MEG 184 +185 -7.93 -32.70 4.00 3.00 MEG 185 +186 -2.12 -33.31 4.00 3.00 MEG 186 +187 4.06 -32.74 4.00 3.00 MEG 187 +188 10.04 -31.14 4.00 3.00 MEG 188 +189 15.57 -28.41 4.00 3.00 MEG 189 +190 20.44 -24.69 4.00 3.00 MEG 190 +191 24.62 -19.81 4.00 3.00 MEG 191 +192 29.49 -13.87 4.00 3.00 MEG 192 +193 29.48 12.54 4.00 3.00 MEG 193 +194 26.49 17.54 4.00 3.00 MEG 194 +195 23.28 21.40 4.00 3.00 MEG 195 +196 -36.84 4.15 4.00 3.00 MEG 196 +197 -37.22 -3.16 4.00 3.00 MEG 197 +198 -36.14 -9.68 4.00 3.00 MEG 198 +199 -28.42 -23.63 4.00 3.00 MEG 199 +200 -23.68 -28.05 4.00 3.00 MEG 200 +201 -18.03 -31.89 4.00 3.00 MEG 201 +202 -11.97 -34.42 4.00 3.00 MEG 202 +203 -5.32 -35.88 4.00 3.00 MEG 203 +204 1.03 -36.08 4.00 3.00 MEG 204 +205 7.92 -35.00 4.00 3.00 MEG 205 +206 13.99 -32.64 4.00 3.00 MEG 206 +207 19.78 -29.06 4.00 3.00 MEG 207 +208 24.79 -24.52 4.00 3.00 MEG 208 +209 33.39 -10.13 4.00 3.00 MEG 209 +210 34.62 -3.11 4.00 3.00 MEG 210 +211 34.23 4.57 4.00 3.00 MEG 211 +212 -32.38 19.14 4.00 3.00 MEG 212 +213 -35.90 13.21 4.00 3.00 MEG 213 +214 -36.70 -14.70 4.00 3.00 MEG 214 +215 -32.93 -22.44 4.00 3.00 MEG 215 +216 -28.17 -28.07 4.00 3.00 MEG 216 +217 -22.65 -32.41 4.00 3.00 MEG 217 +218 -16.53 -35.71 4.00 3.00 MEG 218 +219 -9.52 -37.92 4.00 3.00 MEG 219 +220 -2.58 -38.82 4.00 3.00 MEG 220 +221 4.65 -38.54 4.00 3.00 MEG 221 +222 11.78 -36.65 4.00 3.00 MEG 222 +223 18.43 -33.60 4.00 3.00 MEG 223 +224 24.26 -29.21 4.00 3.00 MEG 224 +225 29.52 -23.44 4.00 3.00 MEG 225 +226 33.73 -15.36 4.00 3.00 MEG 226 +227 33.02 14.20 4.00 3.00 MEG 227 +228 29.24 19.93 4.00 3.00 MEG 228 +229 -36.80 18.24 4.00 3.00 MEG 229 +230 -40.03 12.76 4.00 3.00 MEG 230 +231 -41.35 5.03 4.00 3.00 MEG 231 +232 -41.79 -3.17 4.00 3.00 MEG 232 +233 -40.48 -10.59 4.00 3.00 MEG 233 +234 -32.92 -26.79 4.00 3.00 MEG 234 +235 -27.40 -32.12 4.00 3.00 MEG 235 +236 -20.92 -36.72 4.00 3.00 MEG 236 +237 -14.11 -39.49 4.00 3.00 MEG 237 +238 -6.76 -41.18 4.00 3.00 MEG 238 +239 1.45 -41.40 4.00 3.00 MEG 239 +240 8.96 -40.25 4.00 3.00 MEG 240 +241 16.27 -37.84 4.00 3.00 MEG 241 +242 22.75 -33.68 4.00 3.00 MEG 242 +243 29.08 -28.20 4.00 3.00 MEG 243 +244 37.59 -11.05 4.00 3.00 MEG 244 +245 39.12 -3.16 4.00 3.00 MEG 245 +246 38.59 5.47 4.00 3.00 MEG 246 +247 37.16 13.60 4.00 3.00 MEG 247 +248 33.62 18.93 4.00 3.00 MEG 248 diff --git a/python/libs/mne/channels/data/neighbors/KIT-157_neighb.mat b/python/libs/mne/channels/data/neighbors/KIT-157_neighb.mat new file mode 100644 index 0000000..1cae3fc Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/KIT-157_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/KIT-208_neighb.mat b/python/libs/mne/channels/data/neighbors/KIT-208_neighb.mat new file mode 100644 index 0000000..81de840 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/KIT-208_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/KIT-NYU-2019_neighb.mat b/python/libs/mne/channels/data/neighbors/KIT-NYU-2019_neighb.mat new file mode 100644 index 0000000..700d193 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/KIT-NYU-2019_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/KIT-UMD-1_neighb.mat b/python/libs/mne/channels/data/neighbors/KIT-UMD-1_neighb.mat new file mode 100644 index 0000000..f860666 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/KIT-UMD-1_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/KIT-UMD-2_neighb.mat b/python/libs/mne/channels/data/neighbors/KIT-UMD-2_neighb.mat new file mode 100644 index 0000000..19ad03c Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/KIT-UMD-2_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/KIT-UMD-3_neighb.mat b/python/libs/mne/channels/data/neighbors/KIT-UMD-3_neighb.mat new file mode 100644 index 0000000..c7ded3d Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/KIT-UMD-3_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/KIT-UMD-4_neighb.mat b/python/libs/mne/channels/data/neighbors/KIT-UMD-4_neighb.mat new file mode 100644 index 0000000..55158e7 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/KIT-UMD-4_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/__init__.py b/python/libs/mne/channels/data/neighbors/__init__.py new file mode 100644 index 0000000..b49a56b --- /dev/null +++ b/python/libs/mne/channels/data/neighbors/__init__.py @@ -0,0 +1,9 @@ +"""Neighbor definitions for clustering permutation analysis.""" +# This is a selection of files from http://fieldtrip.fcdonders.nl/template +# Additional definitions can be obtained through the FieldTrip software. +# For additional information on how these definitions were computed, please +# consider the related fieldtrip documentation: +# http://fieldtrip.fcdonders.nl/template/neighbours. +# +# KIT neighbor files were computed with ft_prepare_neighbours using the +# triangulation method. diff --git a/python/libs/mne/channels/data/neighbors/biosemi16_neighb.mat b/python/libs/mne/channels/data/neighbors/biosemi16_neighb.mat new file mode 100644 index 0000000..56b7fb6 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/biosemi16_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/biosemi32_neighb.mat b/python/libs/mne/channels/data/neighbors/biosemi32_neighb.mat new file mode 100644 index 0000000..1c29040 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/biosemi32_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/biosemi64_neighb.mat b/python/libs/mne/channels/data/neighbors/biosemi64_neighb.mat new file mode 100644 index 0000000..4afbf6f Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/biosemi64_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/bti148_neighb.mat b/python/libs/mne/channels/data/neighbors/bti148_neighb.mat new file mode 100644 index 0000000..527e435 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/bti148_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/bti248_neighb.mat b/python/libs/mne/channels/data/neighbors/bti248_neighb.mat new file mode 100644 index 0000000..9bde76b Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/bti248_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/bti248grad_neighb.mat b/python/libs/mne/channels/data/neighbors/bti248grad_neighb.mat new file mode 100644 index 0000000..4e5d620 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/bti248grad_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/ctf151_neighb.mat b/python/libs/mne/channels/data/neighbors/ctf151_neighb.mat new file mode 100644 index 0000000..611a0bc Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/ctf151_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/ctf275_neighb.mat b/python/libs/mne/channels/data/neighbors/ctf275_neighb.mat new file mode 100644 index 0000000..91cf84e Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/ctf275_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/ctf64_neighb.mat b/python/libs/mne/channels/data/neighbors/ctf64_neighb.mat new file mode 100644 index 0000000..fd001e6 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/ctf64_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat b/python/libs/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat new file mode 100644 index 0000000..020392d Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat b/python/libs/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat new file mode 100644 index 0000000..62c88f0 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat b/python/libs/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat new file mode 100644 index 0000000..e59536c Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/easycapM11_neighb.mat b/python/libs/mne/channels/data/neighbors/easycapM11_neighb.mat new file mode 100644 index 0000000..28131e7 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/easycapM11_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/easycapM14_neighb.mat b/python/libs/mne/channels/data/neighbors/easycapM14_neighb.mat new file mode 100644 index 0000000..be2ad3d Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/easycapM14_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/easycapM15_neighb.mat b/python/libs/mne/channels/data/neighbors/easycapM15_neighb.mat new file mode 100644 index 0000000..7dfa554 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/easycapM15_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/easycapM1_neighb.mat b/python/libs/mne/channels/data/neighbors/easycapM1_neighb.mat new file mode 100644 index 0000000..f60d60d Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/easycapM1_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/neuromag306mag_neighb.mat b/python/libs/mne/channels/data/neighbors/neuromag306mag_neighb.mat new file mode 100644 index 0000000..d7ffc98 Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/neuromag306mag_neighb.mat differ diff --git a/python/libs/mne/channels/data/neighbors/neuromag306planar_neighb.mat b/python/libs/mne/channels/data/neighbors/neuromag306planar_neighb.mat new file mode 100644 index 0000000..aa0529e Binary files /dev/null and b/python/libs/mne/channels/data/neighbors/neuromag306planar_neighb.mat differ diff --git a/python/libs/mne/channels/interpolation.py b/python/libs/mne/channels/interpolation.py new file mode 100644 index 0000000..35f3f1f --- /dev/null +++ b/python/libs/mne/channels/interpolation.py @@ -0,0 +1,245 @@ +# Authors: Denis Engemann +# +# License: BSD-3-Clause + +import numpy as np +from numpy.polynomial.legendre import legval + +from ..utils import logger, warn, verbose +from ..io.meas_info import _simplify_info +from ..io.pick import pick_types, pick_channels, pick_info +from ..surface import _normalize_vectors +from ..forward import _map_meg_or_eeg_channels +from ..utils import _check_option, _validate_type + + +def _calc_h(cosang, stiffness=4, n_legendre_terms=50): + """Calculate spherical spline h function between points on a sphere. + + Parameters + ---------- + cosang : array-like | float + cosine of angles between pairs of points on a spherical surface. This + is equivalent to the dot product of unit vectors. + stiffness : float + stiffnes of the spline. Also referred to as ``m``. + n_legendre_terms : int + number of Legendre terms to evaluate. + """ + factors = [(2 * n + 1) / + (n ** (stiffness - 1) * (n + 1) ** (stiffness - 1) * 4 * np.pi) + for n in range(1, n_legendre_terms + 1)] + return legval(cosang, [0] + factors) + + +def _calc_g(cosang, stiffness=4, n_legendre_terms=50): + """Calculate spherical spline g function between points on a sphere. + + Parameters + ---------- + cosang : array-like of float, shape(n_channels, n_channels) + cosine of angles between pairs of points on a spherical surface. This + is equivalent to the dot product of unit vectors. + stiffness : float + stiffness of the spline. + n_legendre_terms : int + number of Legendre terms to evaluate. + + Returns + ------- + G : np.ndrarray of float, shape(n_channels, n_channels) + The G matrix. + """ + factors = [(2 * n + 1) / (n ** stiffness * (n + 1) ** stiffness * + 4 * np.pi) + for n in range(1, n_legendre_terms + 1)] + return legval(cosang, [0] + factors) + + +def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5): + """Compute interpolation matrix based on spherical splines. + + Implementation based on [1] + + Parameters + ---------- + pos_from : np.ndarray of float, shape(n_good_sensors, 3) + The positions to interpoloate from. + pos_to : np.ndarray of float, shape(n_bad_sensors, 3) + The positions to interpoloate. + alpha : float + Regularization parameter. Defaults to 1e-5. + + Returns + ------- + interpolation : np.ndarray of float, shape(len(pos_from), len(pos_to)) + The interpolation matrix that maps good signals to the location + of bad signals. + + References + ---------- + [1] Perrin, F., Pernier, J., Bertrand, O. and Echallier, JF. (1989). + Spherical splines for scalp potential and current density mapping. + Electroencephalography Clinical Neurophysiology, Feb; 72(2):184-7. + """ + from scipy import linalg + pos_from = pos_from.copy() + pos_to = pos_to.copy() + n_from = pos_from.shape[0] + n_to = pos_to.shape[0] + + # normalize sensor positions to sphere + _normalize_vectors(pos_from) + _normalize_vectors(pos_to) + + # cosine angles between source positions + cosang_from = pos_from.dot(pos_from.T) + cosang_to_from = pos_to.dot(pos_from.T) + G_from = _calc_g(cosang_from) + G_to_from = _calc_g(cosang_to_from) + assert G_from.shape == (n_from, n_from) + assert G_to_from.shape == (n_to, n_from) + + if alpha is not None: + G_from.flat[::len(G_from) + 1] += alpha + + C = np.vstack([np.hstack([G_from, np.ones((n_from, 1))]), + np.hstack([np.ones((1, n_from)), [[0]]])]) + C_inv = linalg.pinv(C) + + interpolation = np.hstack([G_to_from, np.ones((n_to, 1))]) @ C_inv[:, :-1] + assert interpolation.shape == (n_to, n_from) + return interpolation + + +def _do_interp_dots(inst, interpolation, goods_idx, bads_idx): + """Dot product of channel mapping matrix to channel data.""" + from ..io.base import BaseRaw + from ..epochs import BaseEpochs + from ..evoked import Evoked + _validate_type(inst, (BaseRaw, BaseEpochs, Evoked), 'inst') + inst._data[..., bads_idx, :] = np.matmul( + interpolation, inst._data[..., goods_idx, :]) + + +@verbose +def _interpolate_bads_eeg(inst, origin, exclude=None, verbose=None): + if exclude is None: + exclude = list() + bads_idx = np.zeros(len(inst.ch_names), dtype=bool) + goods_idx = np.zeros(len(inst.ch_names), dtype=bool) + + picks = pick_types(inst.info, meg=False, eeg=True, exclude=exclude) + inst.info._check_consistency() + bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks] + + if len(picks) == 0 or bads_idx.sum() == 0: + return + + goods_idx[picks] = True + goods_idx[bads_idx] = False + + pos = inst._get_channel_positions(picks) + + # Make sure only EEG are used + bads_idx_pos = bads_idx[picks] + goods_idx_pos = goods_idx[picks] + + # test spherical fit + distance = np.linalg.norm(pos - origin, axis=-1) + distance = np.mean(distance / np.mean(distance)) + if np.abs(1. - distance) > 0.1: + warn('Your spherical fit is poor, interpolation results are ' + 'likely to be inaccurate.') + + pos_good = pos[goods_idx_pos] - origin + pos_bad = pos[bads_idx_pos] - origin + logger.info('Computing interpolation matrix from {} sensor ' + 'positions'.format(len(pos_good))) + interpolation = _make_interpolation_matrix(pos_good, pos_bad) + + logger.info('Interpolating {} sensors'.format(len(pos_bad))) + _do_interp_dots(inst, interpolation, goods_idx, bads_idx) + + +def _interpolate_bads_meg(inst, mode='accurate', origin=(0., 0., 0.04), + verbose=None, ref_meg=False): + return _interpolate_bads_meeg( + inst, mode, origin, ref_meg=ref_meg, eeg=False, verbose=verbose) + + +@verbose +def _interpolate_bads_meeg(inst, mode='accurate', origin=(0., 0., 0.04), + meg=True, eeg=True, ref_meg=False, + exclude=(), verbose=None): + bools = dict(meg=meg, eeg=eeg) + info = _simplify_info(inst.info) + for ch_type, do in bools.items(): + if not do: + continue + kw = dict(meg=False, eeg=False) + kw[ch_type] = True + picks_type = pick_types(info, ref_meg=ref_meg, exclude=exclude, **kw) + picks_good = pick_types(info, ref_meg=ref_meg, exclude='bads', **kw) + use_ch_names = [inst.info['ch_names'][p] for p in picks_type] + bads_type = [ch for ch in inst.info['bads'] if ch in use_ch_names] + if len(bads_type) == 0 or len(picks_type) == 0: + continue + # select the bad channels to be interpolated + picks_bad = pick_channels(inst.info['ch_names'], bads_type, + exclude=[]) + if ch_type == 'eeg': + picks_to = picks_type + bad_sel = np.in1d(picks_type, picks_bad) + else: + picks_to = picks_bad + bad_sel = slice(None) + info_from = pick_info(inst.info, picks_good) + info_to = pick_info(inst.info, picks_to) + mapping = _map_meg_or_eeg_channels( + info_from, info_to, mode=mode, origin=origin) + mapping = mapping[bad_sel] + _do_interp_dots(inst, mapping, picks_good, picks_bad) + + +@verbose +def _interpolate_bads_nirs(inst, method='nearest', exclude=(), verbose=None): + from scipy.spatial.distance import pdist, squareform + from mne.preprocessing.nirs import _validate_nirs_info + + # Returns pick of all nirs and ensures channels are correctly ordered + picks_nirs = _validate_nirs_info(inst.info) + if len(picks_nirs) == 0: + return + + nirs_ch_names = [inst.info['ch_names'][p] for p in picks_nirs] + nirs_ch_names = [ch for ch in nirs_ch_names if ch not in exclude] + bads_nirs = [ch for ch in inst.info['bads'] if ch in nirs_ch_names] + if len(bads_nirs) == 0: + return + picks_bad = pick_channels(inst.info['ch_names'], bads_nirs, exclude=[]) + bads_mask = [p in picks_bad for p in picks_nirs] + + chs = [inst.info['chs'][i] for i in picks_nirs] + locs3d = np.array([ch['loc'][:3] for ch in chs]) + + _check_option('fnirs_method', method, ['nearest']) + + if method == 'nearest': + + dist = pdist(locs3d) + dist = squareform(dist) + + for bad in picks_bad: + dists_to_bad = dist[bad] + # Ignore distances to self + dists_to_bad[dists_to_bad == 0] = np.inf + # Ignore distances to other bad channels + dists_to_bad[bads_mask] = np.inf + # Find closest remaining channels for same frequency + closest_idx = np.argmin(dists_to_bad) + (bad % 2) + inst._data[bad] = inst._data[closest_idx] + + inst.info['bads'] = [ch for ch in inst.info['bads'] if ch in exclude] + + return inst diff --git a/python/libs/mne/channels/layout.py b/python/libs/mne/channels/layout.py new file mode 100644 index 0000000..d74e88f --- /dev/null +++ b/python/libs/mne/channels/layout.py @@ -0,0 +1,1072 @@ +# Authors: Alexandre Gramfort +# Denis Engemann +# Martin Luessi +# Eric Larson +# Marijn van Vliet +# Jona Sassenhagen +# Teon Brooks +# Robert Luke +# +# License: Simplified BSD + +import logging +from collections import defaultdict +from itertools import combinations +import os.path as op + +import numpy as np + +from ..transforms import _pol_to_cart, _cart_to_sph +from ..io.pick import pick_types, _picks_to_idx, _FNIRS_CH_TYPES_SPLIT +from ..io.constants import FIFF +from ..io.meas_info import Info +from ..utils import (_clean_names, warn, _check_ch_locs, fill_doc, + _check_option, _check_sphere, logger) +from .channels import _get_ch_info + + +class Layout(object): + """Sensor layouts. + + Layouts are typically loaded from a file using read_layout. Only use this + class directly if you're constructing a new layout. + + Parameters + ---------- + box : tuple of length 4 + The box dimension (x_min, x_max, y_min, y_max). + pos : array, shape=(n_channels, 4) + The unit-normalized positions of the channels in 2d + (x, y, width, height). + names : list + The channel names. + ids : list + The channel ids. + kind : str + The type of Layout (e.g. 'Vectorview-all'). + """ + + def __init__(self, box, pos, names, ids, kind): # noqa: D102 + self.box = box + self.pos = pos + self.names = names + self.ids = ids + self.kind = kind + + def save(self, fname): + """Save Layout to disk. + + Parameters + ---------- + fname : str + The file name (e.g. 'my_layout.lout'). + + See Also + -------- + read_layout + """ + x = self.pos[:, 0] + y = self.pos[:, 1] + width = self.pos[:, 2] + height = self.pos[:, 3] + if fname.endswith('.lout'): + out_str = '%8.2f %8.2f %8.2f %8.2f\n' % self.box + elif fname.endswith('.lay'): + out_str = '' + else: + raise ValueError('Unknown layout type. Should be of type ' + '.lout or .lay.') + + for ii in range(x.shape[0]): + out_str += ('%03d %8.2f %8.2f %8.2f %8.2f %s\n' + % (self.ids[ii], x[ii], y[ii], + width[ii], height[ii], self.names[ii])) + + f = open(fname, 'w') + f.write(out_str) + f.close() + + def __repr__(self): + """Return the string representation.""" + return '' % (self.kind, + ', '.join(self.names[:3])) + + @fill_doc + def plot(self, picks=None, show_axes=False, show=True): + """Plot the sensor positions. + + Parameters + ---------- + %(picks_nostr)s + show_axes : bool + Show layout axes if True. Defaults to False. + show : bool + Show figure if True. Defaults to True. + + Returns + ------- + fig : instance of matplotlib.figure.Figure + Figure containing the sensor topography. + + Notes + ----- + .. versionadded:: 0.12.0 + """ + from ..viz.topomap import plot_layout + return plot_layout(self, picks=picks, show_axes=show_axes, show=show) + + +def _read_lout(fname): + """Aux function.""" + with open(fname) as f: + box_line = f.readline() # first line contains box dimension + box = tuple(map(float, box_line.split())) + names, pos, ids = [], [], [] + for line in f: + splits = line.split() + if len(splits) == 7: + cid, x, y, dx, dy, chkind, nb = splits + name = chkind + ' ' + nb + else: + cid, x, y, dx, dy, name = splits + pos.append(np.array([x, y, dx, dy], dtype=np.float64)) + names.append(name) + ids.append(int(cid)) + + pos = np.array(pos) + + return box, pos, names, ids + + +def _read_lay(fname): + """Aux function.""" + with open(fname) as f: + box = None + names, pos, ids = [], [], [] + for line in f: + splits = line.split() + if len(splits) == 7: + cid, x, y, dx, dy, chkind, nb = splits + name = chkind + ' ' + nb + else: + cid, x, y, dx, dy, name = splits + pos.append(np.array([x, y, dx, dy], dtype=np.float64)) + names.append(name) + ids.append(int(cid)) + + pos = np.array(pos) + + return box, pos, names, ids + + +def read_layout(kind, path=None, scale=True): + """Read layout from a file. + + Parameters + ---------- + kind : str + The name of the .lout file (e.g. kind='Vectorview-all' for + 'Vectorview-all.lout'). + + path : str | None + The path of the folder containing the Layout file. Defaults to the + mne/channels/data/layouts folder inside your mne-python installation. + + scale : bool + Apply useful scaling for out the box plotting using layout.pos. + Defaults to True. + + Returns + ------- + layout : instance of Layout + The layout. + + See Also + -------- + Layout.save + """ + if path is None: + path = op.join(op.dirname(__file__), 'data', 'layouts') + if not kind.endswith('.lout') and op.exists(op.join(path, kind + '.lout')): + kind += '.lout' + elif not kind.endswith('.lay') and op.exists(op.join(path, kind + '.lay')): + kind += '.lay' + + if kind.endswith('.lout'): + fname = op.join(path, kind) + kind = kind[:-5] + box, pos, names, ids = _read_lout(fname) + elif kind.endswith('.lay'): + fname = op.join(path, kind) + kind = kind[:-4] + box, pos, names, ids = _read_lay(fname) + kind.endswith('.lay') + else: + raise ValueError('Unknown layout type. Should be of type ' + '.lout or .lay.') + + if scale: + pos[:, 0] -= np.min(pos[:, 0]) + pos[:, 1] -= np.min(pos[:, 1]) + scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2] + pos /= scaling + pos[:, :2] += 0.03 + pos[:, :2] *= 0.97 / 1.03 + pos[:, 2:] *= 0.94 + + return Layout(box=box, pos=pos, names=names, kind=kind, ids=ids) + + +@fill_doc +def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads', + csd=False): + """Create .lout file from EEG electrode digitization. + + Parameters + ---------- + %(info_not_none)s + radius : float + Viewport radius as a fraction of main figure height. Defaults to 0.5. + width : float | None + Width of sensor axes as a fraction of main figure height. By default, + this will be the maximum width possible without axes overlapping. + height : float | None + Height of sensor axes as a fraction of main figure height. By default, + this will be the maximum height possible without axes overlapping. + exclude : list of str | str + List of channels to exclude. If empty do not exclude any. + If 'bads', exclude channels in info['bads'] (default). + csd : bool + Whether the channels contain current-source-density-transformed data. + + Returns + ------- + layout : Layout + The generated Layout. + + See Also + -------- + make_grid_layout, generate_2d_layout + """ + if not (0 <= radius <= 0.5): + raise ValueError('The radius parameter should be between 0 and 0.5.') + if width is not None and not (0 <= width <= 1.0): + raise ValueError('The width parameter should be between 0 and 1.') + if height is not None and not (0 <= height <= 1.0): + raise ValueError('The height parameter should be between 0 and 1.') + + pick_kwargs = dict(meg=False, eeg=True, ref_meg=False, exclude=exclude) + if csd: + pick_kwargs.update(csd=True, eeg=False) + picks = pick_types(info, **pick_kwargs) + loc2d = _find_topomap_coords(info, picks) + names = [info['chs'][i]['ch_name'] for i in picks] + + # Scale [x, y] to be in the range [-0.5, 0.5] + # Don't mess with the origin or aspect ratio + scale = np.maximum(-np.min(loc2d, axis=0), np.max(loc2d, axis=0)).max() * 2 + loc2d /= scale + + # If no width or height specified, calculate the maximum value possible + # without axes overlapping. + if width is None or height is None: + width, height = _box_size(loc2d, width, height, padding=0.1) + + # Scale to viewport radius + loc2d *= 2 * radius + + # Some subplot centers will be at the figure edge. Shrink everything so it + # fits in the figure. + scaling = min(1 / (1. + width), 1 / (1. + height)) + loc2d *= scaling + width *= scaling + height *= scaling + + # Shift to center + loc2d += 0.5 + + n_channels = loc2d.shape[0] + pos = np.c_[loc2d[:, 0] - 0.5 * width, + loc2d[:, 1] - 0.5 * height, + width * np.ones(n_channels), + height * np.ones(n_channels)] + + box = (0, 1, 0, 1) + ids = 1 + np.arange(n_channels) + layout = Layout(box=box, pos=pos, names=names, kind='EEG', ids=ids) + return layout + + +@fill_doc +def make_grid_layout(info, picks=None, n_col=None): + """Generate .lout file for custom data, i.e., ICA sources. + + Parameters + ---------- + %(info_not_none)s + %(picks_base)s all good misc channels. + n_col : int | None + Number of columns to generate. If None, a square grid will be produced. + + Returns + ------- + layout : Layout + The generated layout. + + See Also + -------- + make_eeg_layout, generate_2d_layout + """ + picks = _picks_to_idx(info, picks, 'misc') + + names = [info['chs'][k]['ch_name'] for k in picks] + + if not names: + raise ValueError('No misc data channels found.') + + ids = list(range(len(picks))) + size = len(picks) + + if n_col is None: + # prepare square-like layout + n_row = n_col = np.sqrt(size) # try square + if n_col % 1: + # try n * (n-1) rectangle + n_col, n_row = int(n_col + 1), int(n_row) + + if n_col * n_row < size: # jump to the next full square + n_row += 1 + else: + n_row = int(np.ceil(size / float(n_col))) + + # setup position grid + x, y = np.meshgrid(np.linspace(-0.5, 0.5, n_col), + np.linspace(-0.5, 0.5, n_row)) + x, y = x.ravel()[:size], y.ravel()[:size] + width, height = _box_size(np.c_[x, y], padding=0.1) + + # Some axes will be at the figure edge. Shrink everything so it fits in the + # figure. Add 0.01 border around everything + border_x, border_y = (0.01, 0.01) + x_scaling = 1 / (1. + width + border_x) + y_scaling = 1 / (1. + height + border_y) + x = x * x_scaling + y = y * y_scaling + width *= x_scaling + height *= y_scaling + + # Shift to center + x += 0.5 + y += 0.5 + + # calculate pos + pos = np.c_[x - 0.5 * width, y - 0.5 * height, + width * np.ones(size), height * np.ones(size)] + box = (0, 1, 0, 1) + + layout = Layout(box=box, pos=pos, names=names, kind='grid-misc', ids=ids) + return layout + + +@fill_doc +def find_layout(info, ch_type=None, exclude='bads'): + """Choose a layout based on the channels in the info 'chs' field. + + Parameters + ---------- + %(info_not_none)s + ch_type : {'mag', 'grad', 'meg', 'eeg'} | None + The channel type for selecting single channel layouts. + Defaults to None. Note, this argument will only be considered for + VectorView type layout. Use ``'meg'`` to force using the full layout + in situations where the info does only contain one sensor type. + exclude : list of str | str + List of channels to exclude. If empty do not exclude any. + If 'bads', exclude channels in info['bads'] (default). + + Returns + ------- + layout : Layout instance | None + None if layout not found. + """ + _check_option('ch_type', ch_type, [None, 'mag', 'grad', 'meg', 'eeg', + 'csd']) + + (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types, + has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils, + has_eeg_coils_and_meg, has_eeg_coils_only, + has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info) + has_vv_meg = has_vv_mag and has_vv_grad + has_vv_only_mag = has_vv_mag and not has_vv_grad + has_vv_only_grad = has_vv_grad and not has_vv_mag + if ch_type == "meg" and not has_any_meg: + raise RuntimeError('No MEG channels present. Cannot find MEG layout.') + + if ch_type == "eeg" and not has_eeg_coils: + raise RuntimeError('No EEG channels present. Cannot find EEG layout.') + + layout_name = None + if ((has_vv_meg and ch_type is None) or + (any([has_vv_mag, has_vv_grad]) and ch_type == 'meg')): + layout_name = 'Vectorview-all' + elif has_vv_only_mag or (has_vv_meg and ch_type == 'mag'): + layout_name = 'Vectorview-mag' + elif has_vv_only_grad or (has_vv_meg and ch_type == 'grad'): + if info['ch_names'][0].endswith('X'): + layout_name = 'Vectorview-grad_norm' + else: + layout_name = 'Vectorview-grad' + elif has_neuromag_122_grad: + layout_name = 'Neuromag_122' + elif ((has_eeg_coils_only and ch_type in [None, 'eeg']) or + (has_eeg_coils_and_meg and ch_type == 'eeg')): + if not isinstance(info, (dict, Info)): + raise RuntimeError('Cannot make EEG layout, no measurement info ' + 'was passed to `find_layout`') + return make_eeg_layout(info, exclude=exclude) + elif has_csd_coils and ch_type in [None, 'csd']: + return make_eeg_layout(info, exclude=exclude, csd=True) + elif has_4D_mag: + layout_name = 'magnesWH3600' + elif has_CTF_grad: + layout_name = 'CTF-275' + elif n_kit_grads > 0: + layout_name = _find_kit_layout(info, n_kit_grads) + + # If no known layout is found, fall back on automatic layout + if layout_name is None: + picks = _picks_to_idx(info, 'data', exclude=(), with_ref_meg=False) + ch_names = [info['ch_names'][pick] for pick in picks] + xy = _find_topomap_coords(info, picks=picks, ignore_overlap=True) + return generate_2d_layout(xy, ch_names=ch_names, name='custom', + normalize=True) + + layout = read_layout(layout_name) + if not is_old_vv: + layout.names = _clean_names(layout.names, remove_whitespace=True) + if has_CTF_grad: + layout.names = _clean_names(layout.names, before_dash=True) + + # Apply mask for excluded channels. + if exclude == 'bads': + exclude = info['bads'] + idx = [ii for ii, name in enumerate(layout.names) if name not in exclude] + layout.names = [layout.names[ii] for ii in idx] + layout.pos = layout.pos[idx] + layout.ids = [layout.ids[ii] for ii in idx] + + return layout + + +@fill_doc +def _find_kit_layout(info, n_grads): + """Determine the KIT layout. + + Parameters + ---------- + %(info_not_none)s + n_grads : int + Number of KIT-gradiometers in the info. + + Returns + ------- + kit_layout : str | None + String naming the detected KIT layout or ``None`` if layout is missing. + """ + if info['kit_system_id'] is not None: + # avoid circular import + from ..io.kit.constants import KIT_LAYOUT + return KIT_LAYOUT.get(info['kit_system_id']) + elif n_grads == 160: + return 'KIT-160' + elif n_grads == 125: + return 'KIT-125' + elif n_grads > 157: + return 'KIT-AD' + + # channels which are on the left hemisphere for NY and right for UMD + test_chs = ('MEG 13', 'MEG 14', 'MEG 15', 'MEG 16', 'MEG 25', + 'MEG 26', 'MEG 27', 'MEG 28', 'MEG 29', 'MEG 30', + 'MEG 31', 'MEG 32', 'MEG 57', 'MEG 60', 'MEG 61', + 'MEG 62', 'MEG 63', 'MEG 64', 'MEG 73', 'MEG 90', + 'MEG 93', 'MEG 95', 'MEG 96', 'MEG 105', 'MEG 112', + 'MEG 120', 'MEG 121', 'MEG 122', 'MEG 123', 'MEG 124', + 'MEG 125', 'MEG 126', 'MEG 142', 'MEG 144', 'MEG 153', + 'MEG 154', 'MEG 155', 'MEG 156') + x = [ch['loc'][0] < 0 for ch in info['chs'] if ch['ch_name'] in test_chs] + if np.all(x): + return 'KIT-157' # KIT-NY + elif np.all(np.invert(x)): + raise NotImplementedError("Guessing sensor layout for legacy UMD " + "files is not implemented. Please convert " + "your files using MNE-Python 0.13 or " + "higher.") + else: + raise RuntimeError("KIT system could not be determined for data") + + +def _box_size(points, width=None, height=None, padding=0.0): + """Given a series of points, calculate an appropriate box size. + + Parameters + ---------- + points : array, shape (n_points, 2) + The centers of the axes as a list of (x, y) coordinate pairs. Normally + these are points in the range [0, 1] centered at 0.5. + width : float | None + An optional box width to enforce. When set, only the box height will be + calculated by the function. + height : float | None + An optional box height to enforce. When set, only the box width will be + calculated by the function. + padding : float + Portion of the box to reserve for padding. The value can range between + 0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding). + + Returns + ------- + width : float + Width of the box + height : float + Height of the box + """ + from scipy.spatial.distance import pdist + + def xdiff(a, b): + return np.abs(a[0] - b[0]) + + def ydiff(a, b): + return np.abs(a[1] - b[1]) + + points = np.asarray(points) + all_combinations = list(combinations(points, 2)) + + if width is None and height is None: + if len(points) <= 1: + # Trivial case first + width = 1.0 + height = 1.0 + else: + # Find the closest two points A and B. + a, b = all_combinations[np.argmin(pdist(points))] + + # The closest points define either the max width or max height. + w, h = xdiff(a, b), ydiff(a, b) + if w > h: + width = w + else: + height = h + + # At this point, either width or height is known, or both are known. + if height is None: + # Find all axes that could potentially overlap horizontally. + hdist = pdist(points, xdiff) + candidates = [all_combinations[i] for i, d in enumerate(hdist) + if d < width] + + if len(candidates) == 0: + # No axes overlap, take all the height you want. + height = 1.0 + else: + # Find an appropriate height so all none of the found axes will + # overlap. + height = np.min([ydiff(*c) for c in candidates]) + + elif width is None: + # Find all axes that could potentially overlap vertically. + vdist = pdist(points, ydiff) + candidates = [all_combinations[i] for i, d in enumerate(vdist) + if d < height] + + if len(candidates) == 0: + # No axes overlap, take all the width you want. + width = 1.0 + else: + # Find an appropriate width so all none of the found axes will + # overlap. + width = np.min([xdiff(*c) for c in candidates]) + + # Add a bit of padding between boxes + width *= 1 - padding + height *= 1 - padding + + return width, height + + +@fill_doc +def _find_topomap_coords(info, picks, layout=None, ignore_overlap=False, + to_sphere=True, sphere=None): + """Guess the E/MEG layout and return appropriate topomap coordinates. + + Parameters + ---------- + %(info_not_none)s + picks : str | list | slice | None + None will choose all channels. + layout : None | instance of Layout + Enforce using a specific layout. With None, a new map is generated + and a layout is chosen based on the channels in the picks + parameter. + sphere : array-like | str + Definition of the head sphere. + + Returns + ------- + coords : array, shape = (n_chs, 2) + 2 dimensional coordinates for each sensor for a topomap plot. + """ + picks = _picks_to_idx(info, picks, 'all', exclude=(), allow_empty=False) + + if layout is not None: + chs = [info['chs'][i] for i in picks] + pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs] + pos = np.asarray(pos) + else: + pos = _auto_topomap_coords( + info, picks, ignore_overlap=ignore_overlap, to_sphere=to_sphere, + sphere=sphere) + + return pos + + +@fill_doc +def _auto_topomap_coords(info, picks, ignore_overlap, to_sphere, sphere): + """Make a 2 dimensional sensor map from sensor positions in an info dict. + + The default is to use the electrode locations. The fallback option is to + attempt using digitization points of kind FIFFV_POINT_EEG. This only works + with EEG and requires an equal number of digitization points and sensors. + + Parameters + ---------- + %(info_not_none)s + picks : list | str | slice | None + None will pick all channels. + ignore_overlap : bool + Whether to ignore overlapping positions in the layout. If False and + positions overlap, an error is thrown. + to_sphere : bool + If True, the radial distance of spherical coordinates is ignored, in + effect fitting the xyz-coordinates to a sphere. + sphere : array-like | str + The head sphere definition. + + Returns + ------- + locs : array, shape = (n_sensors, 2) + An array of positions of the 2 dimensional map. + """ + from scipy.spatial.distance import pdist, squareform + sphere = _check_sphere(sphere, info) + logger.debug(f'Generating coords using: {sphere}') + + picks = _picks_to_idx(info, picks, 'all', exclude=(), allow_empty=False) + chs = [info['chs'][i] for i in picks] + + # Use channel locations if available + locs3d = np.array([ch['loc'][:3] for ch in chs]) + + # If electrode locations are not available, use digization points + if not _check_ch_locs(info=info, picks=picks): + logging.warning('Did not find any electrode locations (in the info ' + 'object), will attempt to use digitization points ' + 'instead. However, if digitization points do not ' + 'correspond to the EEG electrodes, this will lead to ' + 'bad results. Please verify that the sensor locations ' + 'in the plot are accurate.') + + # MEG/EOG/ECG sensors don't have digitization points; all requested + # channels must be EEG + for ch in chs: + if ch['kind'] != FIFF.FIFFV_EEG_CH: + raise ValueError("Cannot determine location of MEG/EOG/ECG " + "channels using digitization points.") + + eeg_ch_names = [ch['ch_name'] for ch in info['chs'] + if ch['kind'] == FIFF.FIFFV_EEG_CH] + + # Get EEG digitization points + if info['dig'] is None or len(info['dig']) == 0: + raise RuntimeError('No digitization points found.') + + locs3d = np.array([point['r'] for point in info['dig'] + if point['kind'] == FIFF.FIFFV_POINT_EEG]) + + if len(locs3d) == 0: + raise RuntimeError('Did not find any digitization points of ' + 'kind FIFFV_POINT_EEG (%d) in the info.' + % FIFF.FIFFV_POINT_EEG) + + if len(locs3d) != len(eeg_ch_names): + raise ValueError("Number of EEG digitization points (%d) " + "doesn't match the number of EEG channels " + "(%d)" % (len(locs3d), len(eeg_ch_names))) + + # We no longer center digitization points on head origin, as we work + # in head coordinates always + + # Match the digitization points with the requested + # channels. + eeg_ch_locs = dict(zip(eeg_ch_names, locs3d)) + locs3d = np.array([eeg_ch_locs[ch['ch_name']] for ch in chs]) + + # Sometimes we can get nans + locs3d[~np.isfinite(locs3d)] = 0. + + # Duplicate points cause all kinds of trouble during visualization + dist = pdist(locs3d) + if len(locs3d) > 1 and np.min(dist) < 1e-10 and not ignore_overlap: + problematic_electrodes = [ + chs[elec_i]['ch_name'] + for elec_i in squareform(dist < 1e-10).any(axis=0).nonzero()[0] + ] + + raise ValueError('The following electrodes have overlapping positions,' + ' which causes problems during visualization:\n' + + ', '.join(problematic_electrodes)) + + if to_sphere: + # translate to sphere origin, transform/flatten Z, translate back + locs3d -= sphere[:3] + # use spherical (theta, pol) as (r, theta) for polar->cartesian + cart_coords = _cart_to_sph(locs3d) + out = _pol_to_cart(cart_coords[:, 1:][:, ::-1]) + # scale from radians to mm + out *= cart_coords[:, [0]] / (np.pi / 2.) + out += sphere[:2] + else: + out = _pol_to_cart(_cart_to_sph(locs3d)) + return out + + +def _topo_to_sphere(pos, eegs): + """Transform xy-coordinates to sphere. + + Parameters + ---------- + pos : array-like, shape (n_channels, 2) + xy-oordinates to transform. + eegs : list of int + Indices of EEG channels that are included when calculating the sphere. + + Returns + ------- + coords : array, shape (n_channels, 3) + xyz-coordinates. + """ + xs, ys = np.array(pos).T + + sqs = np.max(np.sqrt((xs[eegs] ** 2) + (ys[eegs] ** 2))) + xs /= sqs # Shape to a sphere and normalize + ys /= sqs + + xs += 0.5 - np.mean(xs[eegs]) # Center the points + ys += 0.5 - np.mean(ys[eegs]) + + xs = xs * 2. - 1. # Values ranging from -1 to 1 + ys = ys * 2. - 1. + + rs = np.clip(np.sqrt(xs ** 2 + ys ** 2), 0., 1.) + alphas = np.arccos(rs) + zs = np.sin(alphas) + return np.column_stack([xs, ys, zs]) + + +@fill_doc +def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads', + raise_error=True): + """Find the picks for pairing grad channels. + + Parameters + ---------- + %(info_not_none)s + layout : Layout | None + The layout if available. Defaults to None. + topomap_coords : bool + Return the coordinates for a topomap plot along with the picks. If + False, only picks are returned. Defaults to True. + exclude : list of str | str + List of channels to exclude. If empty, do not exclude any. + If 'bads', exclude channels in info['bads']. Defaults to 'bads'. + raise_error : bool + Whether to raise an error when no pairs are found. If False, raises a + warning. + + Returns + ------- + picks : array of int + Picks for the grad channels, ordered in pairs. + coords : array, shape = (n_grad_channels, 3) + Coordinates for a topomap plot (optional, only returned if + topomap_coords == True). + """ + # find all complete pairs of grad channels + pairs = defaultdict(list) + grad_picks = pick_types(info, meg='grad', ref_meg=False, exclude=exclude) + + _, has_vv_grad, *_, has_neuromag_122_grad, _ = _get_ch_info(info) + + for i in grad_picks: + ch = info['chs'][i] + name = ch['ch_name'] + if has_vv_grad and name.startswith('MEG'): + if name.endswith(('2', '3')): + key = name[-4:-1] + pairs[key].append(ch) + if has_neuromag_122_grad and name.startswith('MEG'): + key = (int(name[-3:]) - 1) // 2 + pairs[key].append(ch) + + pairs = [p for p in pairs.values() if len(p) == 2] + if len(pairs) == 0: + if raise_error: + raise ValueError("No 'grad' channel pairs found.") + else: + warn("No 'grad' channel pairs found.") + return list() + + # find the picks corresponding to the grad channels + grad_chs = sum(pairs, []) + ch_names = info['ch_names'] + picks = [ch_names.index(c['ch_name']) for c in grad_chs] + + if topomap_coords: + shape = (len(pairs), 2, -1) + coords = (_find_topomap_coords(info, picks, layout) + .reshape(shape).mean(axis=1)) + return picks, coords + else: + return picks + + +# this function is used to pair grad when info is not present +# it is the case of Projection that don't have the info. +def _pair_grad_sensors_ch_names_vectorview(ch_names): + """Find the indices for pairing grad channels in a Vectorview system. + + Parameters + ---------- + ch_names : list of str + A list of channel names. + + Returns + ------- + indexes : list of int + Indices of the grad channels, ordered in pairs. + """ + pairs = defaultdict(list) + for i, name in enumerate(ch_names): + if name.startswith('MEG'): + if name.endswith(('2', '3')): + key = name[-4:-1] + pairs[key].append(i) + + pairs = [p for p in pairs.values() if len(p) == 2] + + grad_chs = sum(pairs, []) + return grad_chs + + +# this function is used to pair grad when info is not present +# it is the case of Projection that don't have the info. +def _pair_grad_sensors_ch_names_neuromag122(ch_names): + """Find the indices for pairing grad channels in a Neuromag 122 system. + + Parameters + ---------- + ch_names : list of str + A list of channel names. + + Returns + ------- + indexes : list of int + Indices of the grad channels, ordered in pairs. + """ + pairs = defaultdict(list) + for i, name in enumerate(ch_names): + if name.startswith('MEG'): + key = (int(name[-3:]) - 1) // 2 + pairs[key].append(i) + + pairs = [p for p in pairs.values() if len(p) == 2] + + grad_chs = sum(pairs, []) + return grad_chs + + +def _merge_ch_data(data, ch_type, names, method='rms'): + """Merge data from channel pairs. + + Parameters + ---------- + data : array, shape = (n_channels, ..., n_times) + Data for channels, ordered in pairs. + ch_type : str + Channel type. + names : list + List of channel names. + method : str + Can be 'rms' or 'mean'. + + Returns + ------- + data : array, shape = (n_channels / 2, ..., n_times) + The root mean square or mean for each pair. + names : list + List of channel names. + """ + if ch_type == 'grad': + data = _merge_grad_data(data, method) + else: + assert ch_type in _FNIRS_CH_TYPES_SPLIT + data, names = _merge_nirs_data(data, names) + return data, names + + +def _merge_grad_data(data, method='rms'): + """Merge data from channel pairs using the RMS or mean. + + Parameters + ---------- + data : array, shape = (n_channels, ..., n_times) + Data for channels, ordered in pairs. + method : str + Can be 'rms' or 'mean'. + + Returns + ------- + data : array, shape = (n_channels / 2, ..., n_times) + The root mean square or mean for each pair. + """ + data, orig_shape = data.reshape((len(data) // 2, 2, -1)), data.shape + if method == 'mean': + data = np.mean(data, axis=1) + elif method == 'rms': + data = np.sqrt(np.sum(data ** 2, axis=1) / 2) + else: + raise ValueError('method must be "rms" or "mean", got %s.' % method) + return data.reshape(data.shape[:1] + orig_shape[1:]) + + +def _merge_nirs_data(data, merged_names): + """Merge data from multiple nirs channel using the mean. + + Channel names that have an x in them will be merged. The first channel in + the name is replaced with the mean of all listed channels. The other + channels are removed. + + Parameters + ---------- + data : array, shape = (n_channels, ..., n_times) + Data for channels. + merged_names : list + List of strings containing the channel names. Channels that are to be + merged contain an x between them. + + Returns + ------- + data : array + Data for channels with requested channels merged. Channels used in the + merge are removed from the array. + """ + to_remove = np.empty(0, dtype=np.int32) + for idx, ch in enumerate(merged_names): + if 'x' in ch: + indices = np.empty(0, dtype=np.int32) + channels = ch.split("x") + for sub_ch in channels[1:]: + indices = np.append(indices, merged_names.index(sub_ch)) + data[idx] = np.mean(data[np.append(idx, indices)], axis=0) + to_remove = np.append(to_remove, indices) + to_remove = np.unique(to_remove) + for rem in sorted(to_remove, reverse=True): + del merged_names[rem] + data = np.delete(data, rem, 0) + return data, merged_names + + +def generate_2d_layout(xy, w=.07, h=.05, pad=.02, ch_names=None, + ch_indices=None, name='ecog', bg_image=None, + normalize=True): + """Generate a custom 2D layout from xy points. + + Generates a 2-D layout for plotting with plot_topo methods and + functions. XY points will be normalized between 0 and 1, where + normalization extremes will be either the min/max of xy, or + the width/height of bg_image. + + Parameters + ---------- + xy : ndarray, shape (N, 2) + The xy coordinates of sensor locations. + w : float + The width of each sensor's axis (between 0 and 1). + h : float + The height of each sensor's axis (between 0 and 1). + pad : float + Portion of the box to reserve for padding. The value can range between + 0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding). + ch_names : list + The names of each channel. Must be a list of strings, with one + string per channel. + ch_indices : list + Index of each channel - must be a collection of unique integers, + one index per channel. + name : str + The name of this layout type. + bg_image : str | ndarray + The image over which sensor axes will be plotted. Either a path to an + image file, or an array that can be plotted with plt.imshow. If + provided, xy points will be normalized by the width/height of this + image. If not, xy points will be normalized by their own min/max. + normalize : bool + Whether to normalize the coordinates to run from 0 to 1. Defaults to + True. + + Returns + ------- + layout : Layout + A Layout object that can be plotted with plot_topo + functions and methods. + + See Also + -------- + make_eeg_layout, make_grid_layout + + Notes + ----- + .. versionadded:: 0.9.0 + """ + import matplotlib.pyplot as plt + if ch_indices is None: + ch_indices = np.arange(xy.shape[0]) + if ch_names is None: + ch_names = ['{}'.format(i) for i in ch_indices] + + if len(ch_names) != len(ch_indices): + raise ValueError('# channel names and indices must be equal') + if len(ch_names) != len(xy): + raise ValueError('# channel names and xy vals must be equal') + + x, y = xy.copy().astype(float).T + + # Normalize xy to 0-1 + if bg_image is not None: + # Normalize by image dimensions + img = plt.imread(bg_image) if isinstance(bg_image, str) else bg_image + x /= img.shape[1] + y /= img.shape[0] + elif normalize: + # Normalize x and y by their maxes + for i_dim in [x, y]: + i_dim -= i_dim.min(0) + i_dim /= (i_dim.max(0) - i_dim.min(0)) + + # Create box and pos variable + box = _box_size(np.vstack([x, y]).T, padding=pad) + box = (0, 0, box[0], box[1]) + w, h = [np.array([i] * x.shape[0]) for i in [w, h]] + loc_params = np.vstack([x, y, w, h]).T + + layout = Layout(box, loc_params, ch_names, ch_indices, name) + return layout diff --git a/python/libs/mne/channels/montage.py b/python/libs/mne/channels/montage.py new file mode 100644 index 0000000..70a1b98 --- /dev/null +++ b/python/libs/mne/channels/montage.py @@ -0,0 +1,1588 @@ +# Authors: Alexandre Gramfort +# Denis Engemann +# Martin Luessi +# Eric Larson +# Marijn van Vliet +# Jona Sassenhagen +# Teon Brooks +# Christian Brodbeck +# Stefan Appelhoff +# Joan Massich +# +# License: Simplified BSD + +from collections import OrderedDict +from copy import deepcopy +import os.path as op +import re + +import numpy as np + +from ..defaults import HEAD_SIZE_DEFAULT +from .._freesurfer import get_mni_fiducials +from ..viz import plot_montage +from ..transforms import (apply_trans, get_ras_to_neuromag_trans, _sph_to_cart, + _topo_to_sph, _frame_to_str, Transform, + _verbose_frames, _fit_matched_points, + _quat_to_affine, _ensure_trans) +from ..io._digitization import (_count_points_by_type, + _get_dig_eeg, _make_dig_points, write_dig, + _read_dig_fif, _format_dig_points, + _get_fid_coords, _coord_frame_const, + _get_data_as_dict_from_dig) +from ..io.meas_info import create_info +from ..io.open import fiff_open +from ..io.pick import pick_types, _picks_to_idx, channel_type +from ..io.constants import FIFF, CHANNEL_LOC_ALIASES +from ..utils import (warn, copy_function_doc_to_method_doc, _pl, verbose, + _check_option, _validate_type, _check_fname, _on_missing, + fill_doc, _docdict) + +from ._dig_montage_utils import _read_dig_montage_egi +from ._dig_montage_utils import _parse_brainvision_dig_montage + +_BUILT_IN_MONTAGES = [ + 'EGI_256', + 'GSN-HydroCel-128', 'GSN-HydroCel-129', 'GSN-HydroCel-256', + 'GSN-HydroCel-257', 'GSN-HydroCel-32', 'GSN-HydroCel-64_1.0', + 'GSN-HydroCel-65_1.0', + 'biosemi128', 'biosemi16', 'biosemi160', 'biosemi256', + 'biosemi32', 'biosemi64', + 'easycap-M1', 'easycap-M10', + 'mgh60', 'mgh70', + 'standard_1005', 'standard_1020', 'standard_alphabetic', + 'standard_postfixed', 'standard_prefixed', 'standard_primed', + 'artinis-octamon', 'artinis-brite23' +] + + +def _check_get_coord_frame(dig): + dig_coord_frames = sorted(set(d['coord_frame'] for d in dig)) + if len(dig_coord_frames) != 1: + raise RuntimeError( + 'Only a single coordinate frame in dig is supported, got ' + f'{dig_coord_frames}') + return _frame_to_str[dig_coord_frames.pop()] if dig_coord_frames else None + + +def get_builtin_montages(): + """Get a list of all builtin montages. + + Returns + ------- + montages : list + Names of all builtin montages that can be used by + :func:`make_standard_montage`. + """ + return _BUILT_IN_MONTAGES + + +def make_dig_montage(ch_pos=None, nasion=None, lpa=None, rpa=None, + hsp=None, hpi=None, coord_frame='unknown'): + r"""Make montage from arrays. + + Parameters + ---------- + ch_pos : dict | None + Dictionary of channel positions. Keys are channel names and values + are 3D coordinates - array of shape (3,) - in native digitizer space + in m. + nasion : None | array, shape (3,) + The position of the nasion fiducial point. + This point is assumed to be in the native digitizer space in m. + lpa : None | array, shape (3,) + The position of the left periauricular fiducial point. + This point is assumed to be in the native digitizer space in m. + rpa : None | array, shape (3,) + The position of the right periauricular fiducial point. + This point is assumed to be in the native digitizer space in m. + hsp : None | array, shape (n_points, 3) + This corresponds to an array of positions of the headshape points in + 3d. These points are assumed to be in the native digitizer space in m. + hpi : None | array, shape (n_hpi, 3) + This corresponds to an array of HPI points in the native digitizer + space. They only necessary if computation of a ``compute_dev_head_t`` + is True. + coord_frame : str + The coordinate frame of the points. Usually this is ``'unknown'`` + for native digitizer space. + Other valid values are: ``'head'``, ``'meg'``, ``'mri'``, + ``'mri_voxel'``, ``'mri_tal'``, ``'ras'``, ``'fs_tal'``, + ``'ctf_head'``, and ``'ctf_meg'``. + + .. note:: + For custom montages without fiducials, this parameter must be set + to ``'head'``. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_captrak + read_dig_egi + read_dig_fif + read_dig_localite + read_dig_polhemus_isotrak + """ + _validate_type(ch_pos, (dict, None), 'ch_pos') + if ch_pos is None: + ch_names = None + else: + ch_names = list(ch_pos) + dig = _make_dig_points( + nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, extra_points=hsp, + dig_ch_pos=ch_pos, coord_frame=coord_frame + ) + + return DigMontage(dig=dig, ch_names=ch_names) + + +class DigMontage(object): + """Montage for digitized electrode and headshape position data. + + .. warning:: Montages are typically created using one of the helper + functions in the ``See Also`` section below instead of + instantiating this class directly. + + Parameters + ---------- + dig : list of dict + The object containing all the dig points. + ch_names : list of str + The names of the EEG channels. + + See Also + -------- + read_dig_captrak + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_hpts + read_dig_localite + read_dig_polhemus_isotrak + make_dig_montage + + Notes + ----- + .. versionadded:: 0.9.0 + """ + + def __init__(self, *, dig=None, ch_names=None): + dig = list() if dig is None else dig + _validate_type(item=dig, types=list, item_name='dig') + ch_names = list() if ch_names is None else ch_names + n_eeg = sum([1 for d in dig if d['kind'] == FIFF.FIFFV_POINT_EEG]) + if n_eeg != len(ch_names): + raise ValueError( + 'The number of EEG channels (%d) does not match the number' + ' of channel names provided (%d)' % (n_eeg, len(ch_names)) + ) + + self.dig = dig + self.ch_names = ch_names + + def __repr__(self): + """Return string representation.""" + n_points = _count_points_by_type(self.dig) + return ('').format(**n_points) + + @copy_function_doc_to_method_doc(plot_montage) + def plot(self, scale_factor=20, show_names=True, kind='topomap', show=True, + sphere=None, verbose=None): + return plot_montage(self, scale_factor=scale_factor, + show_names=show_names, kind=kind, show=show, + sphere=sphere) + + @fill_doc + def rename_channels(self, mapping, allow_duplicates=False): + """Rename the channels. + + Parameters + ---------- + %(mapping_rename_channels_duplicates)s + + Returns + ------- + inst : instance of DigMontage + The instance. Operates in-place. + """ + from .channels import rename_channels + temp_info = create_info(list(self._get_ch_pos()), 1000., 'eeg') + rename_channels(temp_info, mapping, allow_duplicates) + self.ch_names = temp_info['ch_names'] + + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Save digitization points to FIF. + + Parameters + ---------- + fname : str + The filename to use. Should end in .fif or .fif.gz. + %(overwrite)s + %(verbose)s + """ + coord_frame = _check_get_coord_frame(self.dig) + write_dig(fname, self.dig, coord_frame, overwrite=overwrite) + + def __iadd__(self, other): + """Add two DigMontages in place. + + Notes + ----- + Two DigMontages can only be added if there are no duplicated ch_names + and if fiducials are present they should share the same coordinate + system and location values. + """ + def is_fid_defined(fid): + return not( + fid.nasion is None and fid.lpa is None and fid.rpa is None + ) + + # Check for none duplicated ch_names + ch_names_intersection = set(self.ch_names).intersection(other.ch_names) + if ch_names_intersection: + raise RuntimeError(( + "Cannot add two DigMontage objects if they contain duplicated" + " channel names. Duplicated channel(s) found: {}." + ).format( + ', '.join(['%r' % v for v in sorted(ch_names_intersection)]) + )) + + # Check for unique matching fiducials + self_fid, self_coord = _get_fid_coords(self.dig) + other_fid, other_coord = _get_fid_coords(other.dig) + + if is_fid_defined(self_fid) and is_fid_defined(other_fid): + if self_coord != other_coord: + raise RuntimeError('Cannot add two DigMontage objects if ' + 'fiducial locations are not in the same ' + 'coordinate system.') + + for kk in self_fid: + if not np.array_equal(self_fid[kk], other_fid[kk]): + raise RuntimeError('Cannot add two DigMontage objects if ' + 'fiducial locations do not match ' + '(%s)' % kk) + + # keep self + self.dig = _format_dig_points( + self.dig + [d for d in other.dig + if d['kind'] != FIFF.FIFFV_POINT_CARDINAL] + ) + else: + self.dig = _format_dig_points(self.dig + other.dig) + + self.ch_names += other.ch_names + return self + + def copy(self): + """Copy the DigMontage object. + + Returns + ------- + dig : instance of DigMontage + The copied DigMontage instance. + """ + return deepcopy(self) + + def __add__(self, other): + """Add two DigMontages.""" + out = self.copy() + out += other + return out + + def _get_ch_pos(self): + pos = [d['r'] for d in _get_dig_eeg(self.dig)] + assert len(self.ch_names) == len(pos) + return OrderedDict(zip(self.ch_names, pos)) + + def _get_dig_names(self): + NAMED_KIND = (FIFF.FIFFV_POINT_EEG,) + is_eeg = np.array([d['kind'] in NAMED_KIND for d in self.dig]) + assert len(self.ch_names) == is_eeg.sum() + dig_names = [None] * len(self.dig) + for ch_name_idx, dig_idx in enumerate(np.where(is_eeg)[0]): + dig_names[dig_idx] = self.ch_names[ch_name_idx] + + return dig_names + + def get_positions(self): + """Get all channel and fiducial positions. + + Returns + ------- + positions : dict + A dictionary of the positions for channels (``ch_pos``), + coordinate frame (``coord_frame``), nasion (``nasion``), + left preauricular point (``lpa``), + right preauricular point (``rpa``), + Head Shape Polhemus (``hsp``), and + Head Position Indicator(``hpi``). + E.g.:: + + { + 'ch_pos': {'EEG061': [0, 0, 0]}, + 'nasion': [0, 0, 1], + 'coord_frame': 'mni_tal', + 'lpa': [0, 1, 0], + 'rpa': [1, 0, 0], + 'hsp': None, + 'hpi': None + } + """ + # get channel positions as dict + ch_pos = self._get_ch_pos() + + # get coordframe and fiducial coordinates + montage_bunch = _get_data_as_dict_from_dig(self.dig) + coord_frame = _frame_to_str.get(montage_bunch.coord_frame) + + # return dictionary + positions = dict( + ch_pos=ch_pos, + coord_frame=coord_frame, + nasion=montage_bunch.nasion, + lpa=montage_bunch.lpa, + rpa=montage_bunch.rpa, + hsp=montage_bunch.hsp, + hpi=montage_bunch.hpi, + ) + return positions + + @verbose + def apply_trans(self, trans, verbose=None): + """Apply a transformation matrix to the montage. + + Parameters + ---------- + trans : instance of mne.transforms.Transform + The transformation matrix to be applied. + %(verbose)s + """ + _validate_type(trans, Transform, 'trans') + coord_frame = self.get_positions()['coord_frame'] + trans = _ensure_trans(trans, fro=coord_frame, to=trans['to']) + for d in self.dig: + d['r'] = apply_trans(trans, d['r']) + d['coord_frame'] = trans['to'] + + @verbose + def add_estimated_fiducials(self, subject, subjects_dir=None, + verbose=None): + """Estimate fiducials based on FreeSurfer ``fsaverage`` subject. + + This takes a montage with the ``mri`` coordinate frame, + corresponding to the FreeSurfer RAS (xyz in the volume) T1w + image of the specific subject. It will call + :func:`mne.coreg.get_mni_fiducials` to estimate LPA, RPA and + Nasion fiducial points. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + inst : instance of DigMontage + The instance, modified in-place. + + See Also + -------- + :ref:`tut-source-alignment` + + Notes + ----- + Since MNE uses the FIF data structure, it relies on the ``head`` + coordinate frame. Any coordinate frame can be transformed + to ``head`` if the fiducials (i.e. LPA, RPA and Nasion) are + defined. One can use this function to estimate those fiducials + and then use ``mne.channels.compute_native_head_t(montage)`` + to get the head <-> MRI transform. + """ + # get coordframe and fiducial coordinates + montage_bunch = _get_data_as_dict_from_dig(self.dig) + + # get the coordinate frame and check that it's MRI + if montage_bunch.coord_frame != FIFF.FIFFV_COORD_MRI: + raise RuntimeError( + f'Montage should be in the "mri" coordinate frame ' + f'to use `add_estimated_fiducials`. The current coordinate ' + f'frame is {montage_bunch.coord_frame}') + + # estimate LPA, nasion, RPA from FreeSurfer fsaverage + fids_mri = list(get_mni_fiducials(subject, subjects_dir)) + + # add those digpoints to front of montage + self.dig = fids_mri + self.dig + return self + + @verbose + def add_mni_fiducials(self, subjects_dir=None, verbose=None): + """Add fiducials to a montage in MNI space. + + Parameters + ---------- + %(subjects_dir)s + %(verbose)s + + Returns + ------- + inst : instance of DigMontage + The instance, modified in-place. + + Notes + ----- + ``fsaverage`` is in MNI space and so its fiducials can be + added to a montage in "mni_tal". MNI is an ACPC-aligned + coordinate system (the posterior commissure is the origin) + so since BIDS requires channel locations for ECoG, sEEG and + DBS to be in ACPC space, this function can be used to allow + those coordinate to be transformed to "head" space (origin + between LPA and RPA). + """ + montage_bunch = _get_data_as_dict_from_dig(self.dig) + + # get the coordinate frame and check that it's MNI TAL + if montage_bunch.coord_frame != FIFF.FIFFV_MNE_COORD_MNI_TAL: + raise RuntimeError( + f'Montage should be in the "mni_tal" coordinate frame ' + f'to use `add_estimated_fiducials`. The current coordinate ' + f'frame is {montage_bunch.coord_frame}') + + fids_mni = get_mni_fiducials('fsaverage', subjects_dir) + for fid in fids_mni: + # "mri" and "mni_tal" are equivalent for fsaverage + assert fid['coord_frame'] == FIFF.FIFFV_COORD_MRI + fid['coord_frame'] = FIFF.FIFFV_MNE_COORD_MNI_TAL + self.dig = fids_mni + self.dig + return self + + @verbose + def remove_fiducials(self, verbose=None): + """Remove the fiducial points from a montage. + + Parameters + ---------- + %(verbose)s + + Returns + ------- + inst : instance of DigMontage + The instance, modified in-place. + + Notes + ----- + MNE will transform a montage to the internal "head" coordinate + frame if the fiducials are present. Under most circumstances, this + is ideal as it standardizes the coordinate frame for things like + plotting. However, in some circumstances, such as saving a ``raw`` + with intracranial data to BIDS format, the coordinate frame + should not be changed by removing fiducials. + """ + for d in self.dig.copy(): + if d['kind'] == FIFF.FIFFV_POINT_CARDINAL: + self.dig.remove(d) + return self + + +VALID_SCALES = dict(mm=1e-3, cm=1e-2, m=1) + + +def _check_unit_and_get_scaling(unit): + _check_option('unit', unit, sorted(VALID_SCALES.keys())) + return VALID_SCALES[unit] + + +def transform_to_head(montage): + """Transform a DigMontage object into head coordinate. + + It requires that the LPA, RPA and Nasion fiducial + point are available. It requires that all fiducial + points are in the same coordinate e.g. 'unknown' + and it will convert all the point in this coordinate + system to Neuromag head coordinate system. + + Parameters + ---------- + montage : instance of DigMontage + The montage. + + Returns + ------- + montage : instance of DigMontage + The montage after transforming the points to head + coordinate system. + """ + # Get fiducial points and their coord_frame + native_head_t = compute_native_head_t(montage) + montage = montage.copy() # to avoid inplace modification + if native_head_t['from'] != FIFF.FIFFV_COORD_HEAD: + for d in montage.dig: + if d['coord_frame'] == native_head_t['from']: + d['r'] = apply_trans(native_head_t, d['r']) + d['coord_frame'] = FIFF.FIFFV_COORD_HEAD + return montage + + +def read_dig_dat(fname): + r"""Read electrode positions from a ``*.dat`` file. + + .. Warning:: + This function was implemented based on ``*.dat`` files available from + `Compumedics `__ and might not work as expected with novel + files. If it does not read your files correctly please contact the + mne-python developers. + + Parameters + ---------- + fname : path-like + File from which to read electrode locations. + + Returns + ------- + montage : DigMontage + The montage. + + See Also + -------- + read_dig_captrak + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_hpts + read_dig_localite + read_dig_polhemus_isotrak + make_dig_montage + + Notes + ----- + ``*.dat`` files are plain text files and can be inspected and amended with + a plain text editor. + """ + from ._standard_montage_utils import _check_dupes_odict + fname = _check_fname(fname, overwrite='read', must_exist=True) + + with open(fname, 'r') as fid: + lines = fid.readlines() + + ch_names, poss = list(), list() + nasion = lpa = rpa = None + for i, line in enumerate(lines): + items = line.split() + if not items: + continue + elif len(items) != 5: + raise ValueError( + "Error reading %s, line %s has unexpected number of entries:\n" + "%s" % (fname, i, line.rstrip())) + num = items[1] + if num == '67': + continue # centroid + pos = np.array([float(item) for item in items[2:]]) + if num == '78': + nasion = pos + elif num == '76': + lpa = pos + elif num == '82': + rpa = pos + else: + ch_names.append(items[0]) + poss.append(pos) + electrodes = _check_dupes_odict(ch_names, poss) + return make_dig_montage(electrodes, nasion, lpa, rpa) + + +def read_dig_fif(fname): + r"""Read digitized points from a .fif file. + + Note that electrode names are not present in the .fif file so + they are here defined with the convention from VectorView + systems (EEG001, EEG002, etc.) + + Parameters + ---------- + fname : path-like + FIF file from which to read digitization locations. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_dat + read_dig_egi + read_dig_captrak + read_dig_polhemus_isotrak + read_dig_hpts + read_dig_localite + make_dig_montage + """ + _check_fname(fname, overwrite='read', must_exist=True) + # Load the dig data + f, tree = fiff_open(fname)[:2] + with f as fid: + dig = _read_dig_fif(fid, tree) + + ch_names = [] + for d in dig: + if d['kind'] == FIFF.FIFFV_POINT_EEG: + ch_names.append('EEG%03d' % d['ident']) + + montage = DigMontage(dig=dig, ch_names=ch_names) + return montage + + +def read_dig_hpts(fname, unit='mm'): + """Read historical .hpts mne-c files. + + Parameters + ---------- + fname : str + The filepath of .hpts file. + unit : 'm' | 'cm' | 'mm' + Unit of the positions. Defaults to 'mm'. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_captrak + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_localite + read_dig_polhemus_isotrak + make_dig_montage + + Notes + ----- + The hpts format digitzer data file may contain comment lines starting + with the pound sign (#) and data lines of the form:: + + <*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*> + + where: + + ``<*category*>`` + defines the type of points. Allowed categories are: ``hpi``, + ``cardinal`` (fiducial), ``eeg``, and ``extra`` corresponding to + head-position indicator coil locations, cardinal landmarks, EEG + electrode locations, and additional head surface points, + respectively. + + ``<*identifier*>`` + identifies the point. The identifiers are usually sequential + numbers. For cardinal landmarks, 1 = left auricular point, + 2 = nasion, and 3 = right auricular point. For EEG electrodes, + identifier = 0 signifies the reference electrode. + + ``<*x/mm*> , <*y/mm*> , <*z/mm*>`` + Location of the point, usually in the head coordinate system + in millimeters. If your points are in [m] then unit parameter can + be changed. + + For example:: + + cardinal 2 -5.6729 -12.3873 -30.3671 + cardinal 1 -37.6782 -10.4957 91.5228 + cardinal 3 -131.3127 9.3976 -22.2363 + hpi 1 -30.4493 -11.8450 83.3601 + hpi 2 -122.5353 9.2232 -28.6828 + hpi 3 -6.8518 -47.0697 -37.0829 + hpi 4 7.3744 -50.6297 -12.1376 + hpi 5 -33.4264 -43.7352 -57.7756 + eeg FP1 3.8676 -77.0439 -13.0212 + eeg FP2 -31.9297 -70.6852 -57.4881 + eeg F7 -6.1042 -68.2969 45.4939 + ... + """ + from ._standard_montage_utils import _str_names, _str + _scale = _check_unit_and_get_scaling(unit) + + out = np.genfromtxt(fname, comments='#', + dtype=(_str, _str, 'f8', 'f8', 'f8')) + kind, label = _str_names(out['f0']), _str_names(out['f1']) + kind = [k.lower() for k in kind] + xyz = np.array([out['f%d' % ii] for ii in range(2, 5)]).T + xyz *= _scale + del _scale + fid_idx_to_label = {'1': 'lpa', '2': 'nasion', '3': 'rpa'} + fid = {fid_idx_to_label[label[ii]]: this_xyz + for ii, this_xyz in enumerate(xyz) if kind[ii] == 'cardinal'} + ch_pos = {label[ii]: this_xyz + for ii, this_xyz in enumerate(xyz) if kind[ii] == 'eeg'} + hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz) + if kind[ii] == 'hpi']) + hpi.shape = (-1, 3) # in case it's empty + hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz) + if kind[ii] == 'extra']) + hsp.shape = (-1, 3) # in case it's empty + return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp) + + +def read_dig_egi(fname): + """Read electrode locations from EGI system. + + Parameters + ---------- + fname : path-like + EGI MFF XML coordinates file from which to read digitization locations. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_captrak + read_dig_dat + read_dig_fif + read_dig_hpts + read_dig_localite + read_dig_polhemus_isotrak + make_dig_montage + """ + _check_fname(fname, overwrite='read', must_exist=True) + + data = _read_dig_montage_egi( + fname=fname, + _scaling=1., + _all_data_kwargs_are_none=True + ) + return make_dig_montage(**data) + + +def read_dig_captrak(fname): + """Read electrode locations from CapTrak Brain Products system. + + Parameters + ---------- + fname : path-like + BrainVision CapTrak coordinates file from which to read EEG electrode + locations. This is typically in XML format with the .bvct extension. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_hpts + read_dig_localite + read_dig_polhemus_isotrak + make_dig_montage + """ + _check_fname(fname, overwrite='read', must_exist=True) + data = _parse_brainvision_dig_montage(fname, scale=1e-3) + + return make_dig_montage(**data) + + +def read_dig_localite(fname, nasion=None, lpa=None, rpa=None): + """Read Localite .csv file. + + Parameters + ---------- + fname : path-like + File name. + nasion : str | None + Name of nasion fiducial point. + lpa : str | None + Name of left preauricular fiducial point. + rpa : str | None + Name of right preauricular fiducial point. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_captrak + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_hpts + read_dig_polhemus_isotrak + make_dig_montage + """ + ch_pos = {} + with open(fname) as f: + f.readline() # skip first row + for row in f: + _, name, x, y, z = row.split(",") + ch_pos[name] = np.array((float(x), float(y), float(z))) / 1000 + + if nasion is not None: + nasion = ch_pos.pop(nasion) + if lpa is not None: + lpa = ch_pos.pop(lpa) + if rpa is not None: + rpa = ch_pos.pop(rpa) + + return make_dig_montage(ch_pos, nasion, lpa, rpa) + + +def _get_montage_in_head(montage): + coords = set([d['coord_frame'] for d in montage.dig]) + if len(coords) == 1 and coords.pop() == FIFF.FIFFV_COORD_HEAD: + return montage + else: + return transform_to_head(montage.copy()) + + +def _set_montage_fnirs(info, montage): + """Set the montage for fNIRS data. + + This needs to be different to electrodes as each channel has three + coordinates that need to be set. For each channel there is a source optode + location, a detector optode location, and a channel midpoint that must be + stored. This function modifies info['chs'][#]['loc'] and info['dig'] in + place. + """ + from ..preprocessing.nirs import _validate_nirs_info + # Validate that the fNIRS info is correctly formatted + picks = _validate_nirs_info(info) + + # Modify info['chs'][#]['loc'] in place + num_ficiduals = len(montage.dig) - len(montage.ch_names) + for ch_idx in picks: + ch = info['chs'][ch_idx]['ch_name'] + source, detector = ch.split(' ')[0].split('_') + source_pos = montage.dig[montage.ch_names.index(source) + + num_ficiduals]['r'] + detector_pos = montage.dig[montage.ch_names.index(detector) + + num_ficiduals]['r'] + + info['chs'][ch_idx]['loc'][3:6] = source_pos + info['chs'][ch_idx]['loc'][6:9] = detector_pos + midpoint = (source_pos + detector_pos) / 2 + info['chs'][ch_idx]['loc'][:3] = midpoint + info['chs'][ch_idx]['coord_frame'] = FIFF.FIFFV_COORD_HEAD + + # Modify info['dig'] in place + with info._unlock(): + info['dig'] = montage.dig + + +@fill_doc +def _set_montage(info, montage, match_case=True, match_alias=False, + on_missing='raise'): + """Apply montage to data. + + With a DigMontage, this function will replace the digitizer info with + the values specified for the particular montage. + + Usually, a montage is expected to contain the positions of all EEG + electrodes and a warning is raised when this is not the case. + + Parameters + ---------- + %(info_not_none)s + %(montage)s + %(match_case)s + %(match_alias)s + %(on_missing_montage)s + + Notes + ----- + This function will change the info variable in place. + """ + _validate_type(montage, (DigMontage, None, str), 'montage') + if montage is None: + # Next line modifies info['dig'] in place + with info._unlock(): + info['dig'] = None + for ch in info['chs']: + # Next line modifies info['chs'][#]['loc'] in place + ch['loc'] = np.full(12, np.nan) + return + if isinstance(montage, str): # load builtin montage + _check_option('montage', montage, _BUILT_IN_MONTAGES) + montage = make_standard_montage(montage) + + mnt_head = _get_montage_in_head(montage) + del montage + + def _backcompat_value(pos, ref_pos): + if any(np.isnan(pos)): + return np.full(6, np.nan) + else: + return np.concatenate((pos, ref_pos)) + + # get the channels in the montage in head + ch_pos = mnt_head._get_ch_pos() + + # only get the eeg, seeg, dbs, ecog channels + picks = pick_types( + info, meg=False, eeg=True, seeg=True, dbs=True, ecog=True, + exclude=()) + non_picks = np.setdiff1d(np.arange(info['nchan']), picks) + + # get the reference position from the loc[3:6] + chs = [info['chs'][ii] for ii in picks] + non_names = [info['chs'][ii]['ch_name'] for ii in non_picks] + del picks + ref_pos = [ch['loc'][3:6] for ch in chs] + + # keep reference location from EEG-like channels if they + # already exist and are all the same. + custom_eeg_ref_dig = False + # Note: ref position is an empty list for fieldtrip data + if ref_pos: + if all([np.equal(ref_pos[0], pos).all() for pos in ref_pos]) \ + and not np.equal(ref_pos[0], [0, 0, 0]).all(): + eeg_ref_pos = ref_pos[0] + # since we have an EEG reference position, we have + # to add it into the info['dig'] as EEG000 + custom_eeg_ref_dig = True + if not custom_eeg_ref_dig: + refs = set(ch_pos) & {'EEG000', 'REF'} + assert len(refs) <= 1 + eeg_ref_pos = np.zeros(3) if not(refs) else ch_pos.pop(refs.pop()) + + # This raises based on info being subset/superset of montage + info_names = [ch['ch_name'] for ch in chs] + dig_names = mnt_head._get_dig_names() + ref_names = [None, 'EEG000', 'REF'] + + if match_case: + info_names_use = info_names + dig_names_use = dig_names + non_names_use = non_names + else: + ch_pos_use = OrderedDict( + (name.lower(), pos) for name, pos in ch_pos.items()) + info_names_use = [name.lower() for name in info_names] + dig_names_use = [name.lower() if name is not None else name + for name in dig_names] + non_names_use = [name.lower() for name in non_names] + ref_names = [name.lower() if name is not None else name + for name in ref_names] + n_dup = len(ch_pos) - len(ch_pos_use) + if n_dup: + raise ValueError('Cannot use match_case=False as %s montage ' + 'name(s) require case sensitivity' % n_dup) + n_dup = len(info_names_use) - len(set(info_names_use)) + if n_dup: + raise ValueError('Cannot use match_case=False as %s channel ' + 'name(s) require case sensitivity' % n_dup) + ch_pos = ch_pos_use + del ch_pos_use + del dig_names + + # use lookup table to match unrecognized channel names to known aliases + if match_alias: + alias_dict = (match_alias if isinstance(match_alias, dict) else + CHANNEL_LOC_ALIASES) + if not match_case: + alias_dict = { + ch_name.lower(): ch_alias.lower() + for ch_name, ch_alias in alias_dict.items() + } + + # excluded ch_alias not in info, to prevent unnecessary mapping and + # warning messages based on aliases. + alias_dict = { + ch_name: ch_alias + for ch_name, ch_alias in alias_dict.items() + } + info_names_use = [ + alias_dict.get(ch_name, ch_name) for ch_name in info_names_use + ] + non_names_use = [ + alias_dict.get(ch_name, ch_name) for ch_name in non_names_use + ] + + # warn user if there is not a full overlap of montage with info_chs + missing = np.where([use not in ch_pos for use in info_names_use])[0] + if len(missing): # DigMontage is subset of info + missing_names = [info_names[ii] for ii in missing] + missing_coord_msg = ( + 'DigMontage is only a subset of info. There are ' + f'{len(missing)} channel position{_pl(missing)} ' + 'not present in the DigMontage. The required channels are:\n\n' + f'{missing_names}.\n\nConsider using inst.set_channel_types ' + 'if these are not EEG channels, or use the on_missing ' + 'parameter if the channel positions are allowed to be unknown ' + 'in your analyses.' + ) + _on_missing(on_missing, missing_coord_msg) + + # set ch coordinates and names from digmontage or nan coords + for ii in missing: + ch_pos[info_names_use[ii]] = [np.nan] * 3 + del info_names + + assert len(non_names_use) == len(non_names) + # There are no issues here with fNIRS being in non_names_use because + # these names are like "D1_S1_760" and the ch_pos for a fNIRS montage + # will have entries "D1" and "S1". + extra = np.where([non in ch_pos for non in non_names_use])[0] + if len(extra): + types = '/'.join(sorted(set( + channel_type(info, non_picks[ii]) for ii in extra))) + names = [non_names[ii] for ii in extra] + warn(f'Not setting position{_pl(extra)} of {len(extra)} {types} ' + f'channel{_pl(extra)} found in montage:\n{names}\n' + 'Consider setting the channel types to be of ' + f'{_docdict["montage_types"]} ' + 'using inst.set_channel_types before calling inst.set_montage, ' + 'or omit these channels when creating your montage.') + + for ch, use in zip(chs, info_names_use): + # Next line modifies info['chs'][#]['loc'] in place + if use in ch_pos: + ch['loc'][:6] = _backcompat_value(ch_pos[use], eeg_ref_pos) + ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD + del ch_pos + + # XXX this is probably wrong as it uses the order from the montage + # rather than the order of our info['ch_names'] ... + digpoints = [ + mnt_head.dig[ii] for ii, name in enumerate(dig_names_use) + if name in (info_names_use + ref_names)] + + # get a copy of the old dig + if info['dig'] is not None: + old_dig = info['dig'].copy() + else: + old_dig = [] + + # determine if needed to add an extra EEG REF DigPoint + if custom_eeg_ref_dig: + # ref_name = 'EEG000' if match_case else 'eeg000' + ref_dig_dict = {'kind': FIFF.FIFFV_POINT_EEG, + 'r': eeg_ref_pos, + 'ident': 0, + 'coord_frame': info['dig'].pop()['coord_frame']} + ref_dig_point = _format_dig_points([ref_dig_dict])[0] + # only append the reference dig point if it was already + # in the old dig + if ref_dig_point in old_dig: + digpoints.append(ref_dig_point) + # Next line modifies info['dig'] in place + with info._unlock(): + info['dig'] = _format_dig_points(digpoints, enforce_order=True) + + # Handle fNIRS with source, detector and channel + fnirs_picks = _picks_to_idx(info, 'fnirs', allow_empty=True) + if len(fnirs_picks) > 0: + _set_montage_fnirs(info, mnt_head) + + +def _read_isotrak_elp_points(fname): + """Read Polhemus Isotrak digitizer data from a ``.elp`` file. + + Parameters + ---------- + fname : str + The filepath of .elp Polhemus Isotrak file. + + Returns + ------- + out : dict of arrays + The dictionary containing locations for 'nasion', 'lpa', 'rpa' + and 'points'. + """ + value_pattern = r"\-?\d+\.?\d*e?\-?\d*" + coord_pattern = r"({0})\s+({0})\s+({0})\s*$".format(value_pattern) + + with open(fname) as fid: + file_str = fid.read() + + points_str = [m.groups() for m in re.finditer(coord_pattern, file_str, + re.MULTILINE)] + points = np.array(points_str, dtype=float) + + return { + 'nasion': points[0], 'lpa': points[1], 'rpa': points[2], + 'points': points[3:] + } + + +def _read_isotrak_hsp_points(fname): + """Read Polhemus Isotrak digitizer data from a ``.hsp`` file. + + Parameters + ---------- + fname : str + The filepath of .hsp Polhemus Isotrak file. + + Returns + ------- + out : dict of arrays + The dictionary containing locations for 'nasion', 'lpa', 'rpa' + and 'points'. + """ + def get_hsp_fiducial(line): + return np.fromstring(line.replace('%F', ''), dtype=float, sep='\t') + + with open(fname) as ff: + for line in ff: + if 'position of fiducials' in line.lower(): + break + + nasion = get_hsp_fiducial(ff.readline()) + lpa = get_hsp_fiducial(ff.readline()) + rpa = get_hsp_fiducial(ff.readline()) + + _ = ff.readline() + line = ff.readline() + if line: + n_points, n_cols = np.fromstring(line, dtype=int, sep='\t') + points = np.fromstring( + string=ff.read(), dtype=float, sep='\t', + ).reshape(-1, n_cols) + assert points.shape[0] == n_points + else: + points = np.empty((0, 3)) + + return { + 'nasion': nasion, 'lpa': lpa, 'rpa': rpa, 'points': points + } + + +def read_dig_polhemus_isotrak(fname, ch_names=None, unit='m'): + """Read Polhemus digitizer data from a file. + + Parameters + ---------- + fname : str + The filepath of Polhemus ISOTrak formatted file. + File extension is expected to be '.hsp', '.elp' or '.eeg'. + ch_names : None | list of str + The names of the points. This will make the points + considered as EEG channels. If None, channels will be assumed + to be HPI if the extension is ``'.elp'``, and extra headshape + points otherwise. + unit : 'm' | 'cm' | 'mm' + Unit of the digitizer file. Polhemus ISOTrak systems data is usually + exported in meters. Defaults to 'm'. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + make_dig_montage + read_polhemus_fastscan + read_dig_captrak + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_localite + """ + VALID_FILE_EXT = ('.hsp', '.elp', '.eeg') + _scale = _check_unit_and_get_scaling(unit) + + _, ext = op.splitext(fname) + _check_option('fname', ext, VALID_FILE_EXT) + + if ext == '.elp': + data = _read_isotrak_elp_points(fname) + else: + # Default case we read points as hsp since is the most likely scenario + data = _read_isotrak_hsp_points(fname) + + if _scale != 1: + data = {key: val * _scale for key, val in data.items()} + else: + pass # noqa + + if ch_names is None: + keyword = 'hpi' if ext == '.elp' else 'hsp' + data[keyword] = data.pop('points') + + else: + points = data.pop('points') + if points.shape[0] == len(ch_names): + data['ch_pos'] = OrderedDict(zip(ch_names, points)) + else: + raise ValueError(( + "Length of ``ch_names`` does not match the number of points" + " in {fname}. Expected ``ch_names`` length {n_points:d}," + " given {n_chnames:d}" + ).format( + fname=fname, n_points=points.shape[0], n_chnames=len(ch_names) + )) + + return make_dig_montage(**data) + + +def _is_polhemus_fastscan(fname): + header = '' + with open(fname, 'r') as fid: + for line in fid: + if not line.startswith('%'): + break + header += line + + return 'FastSCAN' in header + + +@verbose +def read_polhemus_fastscan(fname, unit='mm', on_header_missing='raise', *, + verbose=None): + """Read Polhemus FastSCAN digitizer data from a ``.txt`` file. + + Parameters + ---------- + fname : str + The filepath of .txt Polhemus FastSCAN file. + unit : 'm' | 'cm' | 'mm' + Unit of the digitizer file. Polhemus FastSCAN systems data is usually + exported in millimeters. Defaults to 'mm'. + %(on_header_missing)s + %(verbose)s + + Returns + ------- + points : array, shape (n_points, 3) + The digitization points in digitizer coordinates. + + See Also + -------- + read_dig_polhemus_isotrak + make_dig_montage + """ + VALID_FILE_EXT = ['.txt'] + _scale = _check_unit_and_get_scaling(unit) + + _, ext = op.splitext(fname) + _check_option('fname', ext, VALID_FILE_EXT) + + if not _is_polhemus_fastscan(fname): + msg = "%s does not contain a valid Polhemus FastSCAN header" % fname + _on_missing(on_header_missing, msg) + + points = _scale * np.loadtxt(fname, comments='%', ndmin=2) + _check_dig_shape(points) + return points + + +def _read_eeglab_locations(fname): + ch_names = np.genfromtxt(fname, dtype=str, usecols=3).tolist() + topo = np.loadtxt(fname, dtype=float, usecols=[1, 2]) + sph = _topo_to_sph(topo) + pos = _sph_to_cart(sph) + pos[:, [0, 1]] = pos[:, [1, 0]] * [-1, 1] + + return ch_names, pos + + +def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None): + """Read a montage from a file. + + Parameters + ---------- + fname : str + File extension is expected to be: + '.loc' or '.locs' or '.eloc' (for EEGLAB files), + '.sfp' (BESA/EGI files), '.csd', + '.elc', '.txt', '.csd', '.elp' (BESA spherical), + '.bvef' (BrainVision files), + '.csv', '.tsv', '.xyz' (XYZ coordinates). + head_size : float | None + The size of the head (radius, in [m]). If ``None``, returns the values + read from the montage file with no modification. Defaults to 0.095m. + coord_frame : str | None + The coordinate frame of the points. Usually this is "unknown" + for native digitizer space. Defaults to None, which is "unknown" for + most readers but "head" for EEGLAB. + + .. versionadded:: 0.20 + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + make_dig_montage + make_standard_montage + + Notes + ----- + The function is a helper to read electrode positions you may have + in various formats. Most of these format are weakly specified + in terms of units, coordinate systems. It implies that setting + a montage using a DigMontage produced by this function may + be problematic. If you use a standard/template (eg. 10/20, + 10/10 or 10/05) we recommend you use :func:`make_standard_montage`. + If you can have positions in memory you can also use + :func:`make_dig_montage` that takes arrays as input. + """ + from ._standard_montage_utils import ( + _read_theta_phi_in_degrees, _read_sfp, _read_csd, _read_elc, + _read_elp_besa, _read_brainvision, _read_xyz + ) + SUPPORTED_FILE_EXT = { + 'eeglab': ('.loc', '.locs', '.eloc', ), + 'hydrocel': ('.sfp', ), + 'matlab': ('.csd', ), + 'asa electrode': ('.elc', ), + 'generic (Theta-phi in degrees)': ('.txt', ), + 'standard BESA spherical': ('.elp', ), # NB: not same as polhemus elp + 'brainvision': ('.bvef', ), + 'xyz': ('.csv', '.tsv', '.xyz'), + } + + _, ext = op.splitext(fname) + _check_option('fname', ext, list(sum(SUPPORTED_FILE_EXT.values(), ()))) + + if ext in SUPPORTED_FILE_EXT['eeglab']: + if head_size is None: + raise ValueError( + "``head_size`` cannot be None for '{}'".format(ext)) + ch_names, pos = _read_eeglab_locations(fname) + scale = head_size / np.median(np.linalg.norm(pos, axis=-1)) + pos *= scale + + montage = make_dig_montage( + ch_pos=OrderedDict(zip(ch_names, pos)), + coord_frame='head', + ) + + elif ext in SUPPORTED_FILE_EXT['hydrocel']: + montage = _read_sfp(fname, head_size=head_size) + + elif ext in SUPPORTED_FILE_EXT['matlab']: + montage = _read_csd(fname, head_size=head_size) + + elif ext in SUPPORTED_FILE_EXT['asa electrode']: + montage = _read_elc(fname, head_size=head_size) + + elif ext in SUPPORTED_FILE_EXT['generic (Theta-phi in degrees)']: + if head_size is None: + raise ValueError( + "``head_size`` cannot be None for '{}'".format(ext)) + montage = _read_theta_phi_in_degrees(fname, head_size=head_size, + fid_names=('Nz', 'LPA', 'RPA')) + + elif ext in SUPPORTED_FILE_EXT['standard BESA spherical']: + montage = _read_elp_besa(fname, head_size) + + elif ext in SUPPORTED_FILE_EXT['brainvision']: + montage = _read_brainvision(fname, head_size) + + elif ext in SUPPORTED_FILE_EXT['xyz']: + montage = _read_xyz(fname) + + if coord_frame is not None: + coord_frame = _coord_frame_const(coord_frame) + for d in montage.dig: + d['coord_frame'] = coord_frame + + return montage + + +def compute_dev_head_t(montage): + """Compute device to head transform from a DigMontage. + + Parameters + ---------- + montage : instance of DigMontage + The DigMontage must contain the fiducials in head + coordinate system and hpi points in both head and + meg device coordinate system. + + Returns + ------- + dev_head_t : instance of Transform + A Device-to-Head transformation matrix. + """ + _, coord_frame = _get_fid_coords(montage.dig) + if coord_frame != FIFF.FIFFV_COORD_HEAD: + raise ValueError('montage should have been set to head coordinate ' + 'system with transform_to_head function.') + + hpi_head = np.array( + [d['r'] for d in montage.dig + if (d['kind'] == FIFF.FIFFV_POINT_HPI and + d['coord_frame'] == FIFF.FIFFV_COORD_HEAD)], float) + hpi_dev = np.array( + [d['r'] for d in montage.dig + if (d['kind'] == FIFF.FIFFV_POINT_HPI and + d['coord_frame'] == FIFF.FIFFV_COORD_DEVICE)], float) + + if not (len(hpi_head) == len(hpi_dev) and len(hpi_dev) > 0): + raise ValueError(( + "To compute Device-to-Head transformation, the same number of HPI" + " points in device and head coordinates is required. (Got {dev}" + " points in device and {head} points in head coordinate systems)" + ).format(dev=len(hpi_dev), head=len(hpi_head))) + + trans = _quat_to_affine(_fit_matched_points(hpi_dev, hpi_head)[0]) + return Transform(fro='meg', to='head', trans=trans) + + +def compute_native_head_t(montage): + """Compute the native-to-head transformation for a montage. + + This uses the fiducials in the native space to transform to compute the + transform to the head coordinate frame. + + Parameters + ---------- + montage : instance of DigMontage + The montage. + + Returns + ------- + native_head_t : instance of Transform + A native-to-head transformation matrix. + """ + # Get fiducial points and their coord_frame + fid_coords, coord_frame = _get_fid_coords(montage.dig, raise_error=False) + if coord_frame is None: + coord_frame = FIFF.FIFFV_COORD_UNKNOWN + if coord_frame == FIFF.FIFFV_COORD_HEAD: + native_head_t = np.eye(4) + else: + fid_keys = ('nasion', 'lpa', 'rpa') + for key in fid_keys: + if fid_coords[key] is None: + warn('Fiducial point %s not found, assuming identity %s to ' + 'head transformation' + % (key, _verbose_frames[coord_frame],)) + native_head_t = np.eye(4) + break + else: + native_head_t = get_ras_to_neuromag_trans( + *[fid_coords[key] for key in fid_keys]) + return Transform(coord_frame, 'head', native_head_t) + + +def make_standard_montage(kind, head_size='auto'): + """Read a generic (built-in) montage. + + Parameters + ---------- + kind : str + The name of the montage to use. See notes for valid kinds. + head_size : float | None | str + The head size (radius, in meters) to use for spherical montages. + Can be None to not scale the read sizes. ``'auto'`` (default) will + use 95mm for all montages except the ``'standard*'``, ``'mgh*'``, and + ``'artinis*'``, which are already in fsaverage's MRI coordinates + (same as MNI). + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + make_dig_montage + read_custom_montage + + Notes + ----- + Individualized (digitized) electrode positions should be read in using + :func:`read_dig_captrak`, :func:`read_dig_dat`, :func:`read_dig_egi`, + :func:`read_dig_fif`, :func:`read_dig_polhemus_isotrak`, + :func:`read_dig_hpts` or made with :func:`make_dig_montage`. + + Valid ``kind`` arguments are: + + =================== ===================================================== + Kind Description + =================== ===================================================== + standard_1005 Electrodes are named and positioned according to the + international 10-05 system (343+3 locations) + standard_1020 Electrodes are named and positioned according to the + international 10-20 system (94+3 locations) + standard_alphabetic Electrodes are named with LETTER-NUMBER combinations + (A1, B2, F4, ...) (65+3 locations) + standard_postfixed Electrodes are named according to the international + 10-20 system using postfixes for intermediate + positions (100+3 locations) + standard_prefixed Electrodes are named according to the international + 10-20 system using prefixes for intermediate + positions (74+3 locations) + standard_primed Electrodes are named according to the international + 10-20 system using prime marks (' and '') for + intermediate positions (100+3 locations) + + biosemi16 BioSemi cap with 16 electrodes (16+3 locations) + biosemi32 BioSemi cap with 32 electrodes (32+3 locations) + biosemi64 BioSemi cap with 64 electrodes (64+3 locations) + biosemi128 BioSemi cap with 128 electrodes (128+3 locations) + biosemi160 BioSemi cap with 160 electrodes (160+3 locations) + biosemi256 BioSemi cap with 256 electrodes (256+3 locations) + + easycap-M1 EasyCap with 10-05 electrode names (74 locations) + easycap-M10 EasyCap with numbered electrodes (61 locations) + + EGI_256 Geodesic Sensor Net (256 locations) + + GSN-HydroCel-32 HydroCel Geodesic Sensor Net and Cz (33+3 locations) + GSN-HydroCel-64_1.0 HydroCel Geodesic Sensor Net (64+3 locations) + GSN-HydroCel-65_1.0 HydroCel Geodesic Sensor Net and Cz (65+3 locations) + GSN-HydroCel-128 HydroCel Geodesic Sensor Net (128+3 locations) + GSN-HydroCel-129 HydroCel Geodesic Sensor Net and Cz (129+3 locations) + GSN-HydroCel-256 HydroCel Geodesic Sensor Net (256+3 locations) + GSN-HydroCel-257 HydroCel Geodesic Sensor Net and Cz (257+3 locations) + + mgh60 The (older) 60-channel cap used at + MGH (60+3 locations) + mgh70 The (newer) 70-channel BrainVision cap used at + MGH (70+3 locations) + + artinis-octamon Artinis OctaMon fNIRS (8 sources, 2 detectors) + + artinis-brite23 Artinis Brite23 fNIRS (11 sources, 7 detectors) + =================== ===================================================== + + .. versionadded:: 0.19.0 + """ + from ._standard_montage_utils import standard_montage_look_up_table + _validate_type(kind, str, 'kind') + _check_option('kind', kind, _BUILT_IN_MONTAGES) + _validate_type(head_size, ('numeric', str, None), 'head_size') + if isinstance(head_size, str): + _check_option('head_size', head_size, ('auto',), extra='when str') + if kind.startswith(('standard', 'mgh', 'artinis')): + head_size = None + else: + head_size = HEAD_SIZE_DEFAULT + return standard_montage_look_up_table[kind](head_size=head_size) + + +def _check_dig_shape(pts): + _validate_type(pts, np.ndarray, 'points') + if pts.ndim != 2 or pts.shape[-1] != 3: + raise ValueError( + f'Points must be of shape (n, 3) instead of {pts.shape}') diff --git a/python/libs/mne/channels/tests/__init__.py b/python/libs/mne/channels/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/channels/tests/test_channels.py b/python/libs/mne/channels/tests/test_channels.py new file mode 100644 index 0000000..7dd6c55 --- /dev/null +++ b/python/libs/mne/channels/tests/test_channels.py @@ -0,0 +1,539 @@ +# Author: Daniel G Wakeman +# Denis A. Engemann +# +# License: BSD-3-Clause + +import os.path as op + +from copy import deepcopy +from functools import partial + +import pytest +import numpy as np +from scipy.io import savemat +from numpy.testing import assert_array_equal, assert_equal, assert_allclose + +from mne.channels import (rename_channels, read_ch_adjacency, combine_channels, + find_ch_adjacency, make_1020_channel_selections, + read_custom_montage, equalize_channels) +from mne.channels.channels import (_ch_neighbor_adjacency, + _compute_ch_adjacency) +from mne.io import (read_info, read_raw_fif, read_raw_ctf, read_raw_bti, + read_raw_eeglab, read_raw_kit, RawArray) +from mne.io.constants import FIFF +from mne import (pick_types, pick_channels, EpochsArray, EvokedArray, + make_ad_hoc_cov, create_info, read_events, Epochs) +from mne.datasets import testing +from mne.utils import requires_pandas, requires_version + +io_dir = op.join(op.dirname(__file__), '..', '..', 'io') +base_dir = op.join(io_dir, 'tests', 'data') +raw_fname = op.join(base_dir, 'test_raw.fif') +eve_fname = op.join(base_dir, 'test-eve.fif') +fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd') + +testing_path = testing.data_path(download=False) + + +@pytest.mark.parametrize('preload', (True, False)) +@pytest.mark.parametrize('proj', (True, False)) +def test_reorder_channels(preload, proj): + """Test reordering of channels.""" + raw = read_raw_fif(raw_fname).crop(0, 0.1).del_proj() + if proj: # a no-op but should test it + raw._projector = np.eye(len(raw.ch_names)) + if preload: + raw.load_data() + # with .reorder_channels + if proj and not preload: + with pytest.raises(RuntimeError, match='load data'): + raw.copy().reorder_channels(raw.ch_names[::-1]) + return + raw_new = raw.copy().reorder_channels(raw.ch_names[::-1]) + assert raw_new.ch_names == raw.ch_names[::-1] + if proj: + assert_allclose(raw_new._projector, raw._projector, atol=1e-12) + else: + assert raw._projector is None + assert raw_new._projector is None + assert_array_equal(raw[:][0], raw_new[:][0][::-1]) + raw_new.reorder_channels(raw_new.ch_names[::-1][1:-1]) + raw.drop_channels(raw.ch_names[:1] + raw.ch_names[-1:]) + assert_array_equal(raw[:][0], raw_new[:][0]) + with pytest.raises(ValueError, match='repeated'): + raw.reorder_channels(raw.ch_names[:1] + raw.ch_names[:1]) + # and with .pick + reord = [1, 0] + list(range(2, len(raw.ch_names))) + rev = np.argsort(reord) + raw_new = raw.copy().pick(reord) + assert_array_equal(raw[:][0], raw_new[rev][0]) + + +def test_rename_channels(): + """Test rename channels.""" + info = read_info(raw_fname) + # Error Tests + # Test channel name exists in ch_names + mapping = {'EEG 160': 'EEG060'} + pytest.raises(ValueError, rename_channels, info, mapping) + # Test improper mapping configuration + mapping = {'MEG 2641': 1.0} + pytest.raises(TypeError, rename_channels, info, mapping) + # Test non-unique mapping configuration + mapping = {'MEG 2641': 'MEG 2642'} + pytest.raises(ValueError, rename_channels, info, mapping) + # Test bad input + pytest.raises(ValueError, rename_channels, info, 1.) + pytest.raises(ValueError, rename_channels, info, 1.) + + # Test successful changes + # Test ch_name and ch_names are changed + info2 = deepcopy(info) # for consistency at the start of each test + info2['bads'] = ['EEG 060', 'EOG 061'] + mapping = {'EEG 060': 'EEG060', 'EOG 061': 'EOG061'} + rename_channels(info2, mapping) + assert info2['chs'][374]['ch_name'] == 'EEG060' + assert info2['ch_names'][374] == 'EEG060' + assert info2['chs'][375]['ch_name'] == 'EOG061' + assert info2['ch_names'][375] == 'EOG061' + assert_array_equal(['EEG060', 'EOG061'], info2['bads']) + info2 = deepcopy(info) + rename_channels(info2, lambda x: x.replace(' ', '')) + assert info2['chs'][373]['ch_name'] == 'EEG059' + info2 = deepcopy(info) + info2['bads'] = ['EEG 060', 'EEG 060'] + rename_channels(info2, mapping) + assert_array_equal(['EEG060', 'EEG060'], info2['bads']) + + # test that keys in Raw._orig_units will be renamed, too + raw = read_raw_fif(raw_fname).crop(0, 0.1) + old, new = 'EEG 060', 'New' + raw._orig_units = {old: 'V'} + + raw.rename_channels({old: new}) + assert old not in raw._orig_units + assert new in raw._orig_units + + +def test_set_channel_types(): + """Test set_channel_types.""" + raw = read_raw_fif(raw_fname) + # Error Tests + # Test channel name exists in ch_names + mapping = {'EEG 160': 'EEG060'} + with pytest.raises(ValueError, match=r"name \(EEG 160\) doesn't exist"): + raw.set_channel_types(mapping) + # Test change to illegal channel type + mapping = {'EOG 061': 'xxx'} + with pytest.raises(ValueError, match='cannot change to this channel type'): + raw.set_channel_types(mapping) + # Test changing type if in proj + mapping = {'EEG 057': 'dbs', 'EEG 058': 'ecog', 'EEG 059': 'ecg', + 'EEG 060': 'eog', 'EOG 061': 'seeg', 'MEG 2441': 'eeg', + 'MEG 2443': 'eeg', 'MEG 2442': 'hbo', 'EEG 001': 'resp'} + raw2 = read_raw_fif(raw_fname) + raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061'] + with pytest.raises(RuntimeError, match='type .* in projector "PCA-v1"'): + raw2.set_channel_types(mapping) # has prj + raw2.add_proj([], remove_existing=True) + with pytest.warns(RuntimeWarning, match='unit for channel.* has changed'): + raw2 = raw2.set_channel_types(mapping) + info = raw2.info + assert info['chs'][371]['ch_name'] == 'EEG 057' + assert info['chs'][371]['kind'] == FIFF.FIFFV_DBS_CH + assert info['chs'][371]['unit'] == FIFF.FIFF_UNIT_V + assert info['chs'][371]['coil_type'] == FIFF.FIFFV_COIL_EEG + assert info['chs'][372]['ch_name'] == 'EEG 058' + assert info['chs'][372]['kind'] == FIFF.FIFFV_ECOG_CH + assert info['chs'][372]['unit'] == FIFF.FIFF_UNIT_V + assert info['chs'][372]['coil_type'] == FIFF.FIFFV_COIL_EEG + assert info['chs'][373]['ch_name'] == 'EEG 059' + assert info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH + assert info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V + assert info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE + assert info['chs'][374]['ch_name'] == 'EEG 060' + assert info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH + assert info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V + assert info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE + assert info['chs'][375]['ch_name'] == 'EOG 061' + assert info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH + assert info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V + assert info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG + for idx in pick_channels(raw.ch_names, ['MEG 2441', 'MEG 2443']): + assert info['chs'][idx]['kind'] == FIFF.FIFFV_EEG_CH + assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V + assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_EEG + idx = pick_channels(raw.ch_names, ['MEG 2442'])[0] + assert info['chs'][idx]['kind'] == FIFF.FIFFV_FNIRS_CH + assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_MOL + assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO + + # resp channel type + idx = pick_channels(raw.ch_names, ['EEG 001'])[0] + assert info['chs'][idx]['kind'] == FIFF.FIFFV_RESP_CH + assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V + assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_NONE + + # Test meaningful error when setting channel type with unknown unit + raw.info['chs'][0]['unit'] = 0. + ch_types = {raw.ch_names[0]: 'misc'} + pytest.raises(ValueError, raw.set_channel_types, ch_types) + + +def test_read_ch_adjacency(tmp_path): + """Test reading channel adjacency templates.""" + tempdir = str(tmp_path) + a = partial(np.array, dtype=' ps + + # are channels in the correct selection? + fz_c3_c4 = [raw.ch_names.index(ch) for ch in ("Fz", "C3", "C4")] + for channel, roi in zip(fz_c3_c4, ("Midline", "Left", "Right")): + assert channel in sels[roi] + + +@testing.requires_testing_data +def test_find_ch_adjacency(): + """Test computing the adjacency matrix.""" + raw = read_raw_fif(raw_fname, preload=True) + sizes = {'mag': 828, 'grad': 1700, 'eeg': 384} + nchans = {'mag': 102, 'grad': 204, 'eeg': 60} + for ch_type in ['mag', 'grad', 'eeg']: + conn, ch_names = find_ch_adjacency(raw.info, ch_type) + # Silly test for checking the number of neighbors. + assert_equal(conn.getnnz(), sizes[ch_type]) + assert_equal(len(ch_names), nchans[ch_type]) + pytest.raises(ValueError, find_ch_adjacency, raw.info, None) + + # Test computing the conn matrix with gradiometers. + conn, ch_names = _compute_ch_adjacency(raw.info, 'grad') + assert_equal(conn.getnnz(), 2680) + + # Test ch_type=None. + raw.pick_types(meg='mag') + find_ch_adjacency(raw.info, None) + + bti_fname = op.join(testing_path, 'BTi', 'erm_HFH', 'c,rfDC') + bti_config_name = op.join(testing_path, 'BTi', 'erm_HFH', 'config') + raw = read_raw_bti(bti_fname, bti_config_name, None) + _, ch_names = find_ch_adjacency(raw.info, 'mag') + assert 'A1' in ch_names + + ctf_fname = op.join(testing_path, 'CTF', 'testdata_ctf_short.ds') + raw = read_raw_ctf(ctf_fname) + _, ch_names = find_ch_adjacency(raw.info, 'mag') + assert 'MLC11' in ch_names + + pytest.raises(ValueError, find_ch_adjacency, raw.info, 'eog') + + raw_kit = read_raw_kit(fname_kit_157) + neighb, ch_names = find_ch_adjacency(raw_kit.info, 'mag') + assert neighb.data.size == 1329 + assert ch_names[0] == 'MEG 001' + + +@testing.requires_testing_data +def test_neuromag122_adjacency(): + """Test computing the adjacency matrix of Neuromag122-Data.""" + nm122_fname = op.join(testing_path, 'misc', + 'neuromag122_test_file-raw.fif') + raw = read_raw_fif(nm122_fname, preload=True) + conn, ch_names = find_ch_adjacency(raw.info, 'grad') + assert conn.getnnz() == 1564 + assert len(ch_names) == 122 + assert conn.shape == (122, 122) + + +def test_drop_channels(): + """Test if dropping channels works with various arguments.""" + raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1) + raw.drop_channels(["MEG 0111"]) # list argument + raw.drop_channels("MEG 0112") # str argument + raw.drop_channels({"MEG 0132", "MEG 0133"}) # set argument + pytest.raises(ValueError, raw.drop_channels, ["MEG 0111", 5]) + pytest.raises(ValueError, raw.drop_channels, 5) # must be list or str + + +def test_pick_channels(): + """Test if picking channels works with various arguments.""" + raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1) + + # selected correctly 3 channels + raw.pick(['MEG 0113', 'MEG 0112', 'MEG 0111']) + assert len(raw.ch_names) == 3 + + # selected correctly 3 channels and ignored 'meg', and emit warning + with pytest.warns(RuntimeWarning, match='not present in the info'): + raw.pick(['MEG 0113', "meg", 'MEG 0112', 'MEG 0111']) + assert len(raw.ch_names) == 3 + + names_len = len(raw.ch_names) + raw.pick(['all']) # selected correctly all channels + assert len(raw.ch_names) == names_len + raw.pick('all') # selected correctly all channels + assert len(raw.ch_names) == names_len + + +def test_add_reference_channels(): + """Test if there is a new reference channel that consist of all zeros.""" + raw = read_raw_fif(raw_fname, preload=True) + n_raw_original_channels = len(raw.ch_names) + epochs = Epochs(raw, read_events(eve_fname)) + epochs.load_data() + epochs_original_shape = epochs._data.shape[1] + evoked = epochs.average() + n_evoked_original_channels = len(evoked.ch_names) + + # Raw object + raw.add_reference_channels(['REF 123']) + assert len(raw.ch_names) == n_raw_original_channels + 1 + assert np.all(raw.get_data()[-1] == 0) + + # Epochs object + epochs.add_reference_channels(['REF 123']) + assert epochs._data.shape[1] == epochs_original_shape + 1 + + # Evoked object + evoked.add_reference_channels(['REF 123']) + assert len(evoked.ch_names) == n_evoked_original_channels + 1 + assert np.all(evoked._data[-1] == 0) + + +def test_equalize_channels(): + """Test equalizing channels and their ordering.""" + # This function only tests the generic functionality of equalize_channels. + # Additional tests for each instance type are included in the accompanying + # test suite for each type. + pytest.raises(TypeError, equalize_channels, ['foo', 'bar'], + match='Instances to be modified must be an instance of') + + raw = RawArray([[1.], [2.], [3.], [4.]], + create_info(['CH1', 'CH2', 'CH3', 'CH4'], sfreq=1.)) + epochs = EpochsArray([[[1.], [2.], [3.]]], + create_info(['CH5', 'CH2', 'CH1'], sfreq=1.)) + cov = make_ad_hoc_cov(create_info(['CH2', 'CH1', 'CH8'], sfreq=1., + ch_types='eeg')) + cov['bads'] = ['CH1'] + ave = EvokedArray([[1.], [2.]], create_info(['CH1', 'CH2'], sfreq=1.)) + + raw2, epochs2, cov2, ave2 = equalize_channels([raw, epochs, cov, ave], + copy=True) + + # The Raw object was the first in the list, so should have been used as + # template for the ordering of the channels. No bad channels should have + # been dropped. + assert raw2.ch_names == ['CH1', 'CH2'] + assert_array_equal(raw2.get_data(), [[1.], [2.]]) + assert epochs2.ch_names == ['CH1', 'CH2'] + assert_array_equal(epochs2.get_data(), [[[3.], [2.]]]) + assert cov2.ch_names == ['CH1', 'CH2'] + assert cov2['bads'] == cov['bads'] + assert ave2.ch_names == ave.ch_names + assert_array_equal(ave2.data, ave.data) + + # All objects should have been copied, except for the Evoked object which + # did not have to be touched. + assert raw is not raw2 + assert epochs is not epochs2 + assert cov is not cov2 + assert ave is ave2 + + # Test in-place operation + raw2, epochs2 = equalize_channels([raw, epochs], copy=False) + assert raw is raw2 + assert epochs is epochs2 + + +def test_combine_channels(): + """Test channel combination on Raw, Epochs, and Evoked.""" + raw = read_raw_fif(raw_fname, preload=True) + raw_ch_bad = read_raw_fif(raw_fname, preload=True) + raw_ch_bad.info['bads'] = ['MEG 0113', 'MEG 0112'] + epochs = Epochs(raw, read_events(eve_fname)) + evoked = epochs.average() + good = dict(foo=[0, 1, 3, 4], bar=[5, 2]) # good grad and mag + + # Test good cases + combine_channels(raw, good) + combined_epochs = combine_channels(epochs, good) + assert_array_equal(combined_epochs.events, epochs.events) + combine_channels(evoked, good) + combine_channels(raw, good, drop_bad=True) + combine_channels(raw_ch_bad, good, drop_bad=True) + + # Test with stimulus channels + combine_stim = combine_channels(raw, good, keep_stim=True) + target_nchan = len(good) + len(pick_types(raw.info, meg=False, stim=True)) + assert combine_stim.info['nchan'] == target_nchan + + # Test results with one ROI + good_single = dict(foo=[0, 1, 3, 4]) # good grad + combined_mean = combine_channels(raw, good_single, method='mean') + combined_median = combine_channels(raw, good_single, method='median') + combined_std = combine_channels(raw, good_single, method='std') + foo_mean = np.mean(raw.get_data()[good_single['foo']], axis=0) + foo_median = np.median(raw.get_data()[good_single['foo']], axis=0) + foo_std = np.std(raw.get_data()[good_single['foo']], axis=0) + assert_array_equal(combined_mean.get_data(), + np.expand_dims(foo_mean, axis=0)) + assert_array_equal(combined_median.get_data(), + np.expand_dims(foo_median, axis=0)) + assert_array_equal(combined_std.get_data(), + np.expand_dims(foo_std, axis=0)) + + # Test bad cases + bad1 = dict(foo=[0, 376], bar=[5, 2]) # out of bounds + bad2 = dict(foo=[0, 2], bar=[5, 2]) # type mix in same group + with pytest.raises(ValueError, match='"method" must be a callable, or'): + combine_channels(raw, good, method='bad_method') + with pytest.raises(TypeError, match='"keep_stim" must be of type bool'): + combine_channels(raw, good, keep_stim='bad_type') + with pytest.raises(TypeError, match='"drop_bad" must be of type bool'): + combine_channels(raw, good, drop_bad='bad_type') + with pytest.raises(ValueError, match='Some channel indices are out of'): + combine_channels(raw, bad1) + with pytest.raises(ValueError, match='Cannot combine sensors of diff'): + combine_channels(raw, bad2) + + # Test warnings + raw_no_stim = read_raw_fif(raw_fname, preload=True) + raw_no_stim.pick_types(meg=True, stim=False) + warn1 = dict(foo=[375, 375], bar=[5, 2]) # same channel in same group + warn2 = dict(foo=[375], bar=[5, 2]) # one channel (last channel) + warn3 = dict(foo=[0, 4], bar=[5, 2]) # one good channel left + with pytest.warns(RuntimeWarning, match='Could not find stimulus'): + combine_channels(raw_no_stim, good, keep_stim=True) + with pytest.warns(RuntimeWarning, match='Less than 2 channels') as record: + combine_channels(raw, warn1) + combine_channels(raw, warn2) + combine_channels(raw_ch_bad, warn3, drop_bad=True) + assert len(record) == 3 + + +@requires_pandas +def test_combine_channels_metadata(): + """Test if metadata is correctly retained in combined object.""" + import pandas as pd + + raw = read_raw_fif(raw_fname, preload=True) + epochs = Epochs(raw, read_events(eve_fname), preload=True) + + metadata = pd.DataFrame({"A": np.arange(len(epochs)), + "B": np.ones(len(epochs))}) + epochs.metadata = metadata + + good = dict(foo=[0, 1, 3, 4], bar=[5, 2]) # good grad and mag + combined_epochs = combine_channels(epochs, good) + pd.testing.assert_frame_equal(epochs.metadata, combined_epochs.metadata) diff --git a/python/libs/mne/channels/tests/test_interpolation.py b/python/libs/mne/channels/tests/test_interpolation.py new file mode 100644 index 0000000..37da056 --- /dev/null +++ b/python/libs/mne/channels/tests/test_interpolation.py @@ -0,0 +1,307 @@ +import os.path as op + +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal +import pytest +from itertools import compress + +from mne import io, pick_types, pick_channels, read_events, Epochs +from mne.channels.interpolation import _make_interpolation_matrix +from mne.datasets import testing +from mne.preprocessing.nirs import (optical_density, scalp_coupling_index, + beer_lambert_law) +from mne.io import read_raw_nirx +from mne.io.proj import _has_eeg_average_ref_proj +from mne.utils import _record_warnings, requires_version + +base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') +raw_fname = op.join(base_dir, 'test_raw.fif') +event_name = op.join(base_dir, 'test-eve.fif') +raw_fname_ctf = op.join(base_dir, 'test_ctf_raw.fif') + +testing_path = testing.data_path(download=False) + +event_id, tmin, tmax = 1, -0.2, 0.5 +event_id_2 = 2 + + +def _load_data(kind): + """Load data.""" + # It is more memory efficient to load data in a separate + # function so it's loaded on-demand + raw = io.read_raw_fif(raw_fname) + events = read_events(event_name) + # subselect channels for speed + if kind == 'eeg': + picks = pick_types(raw.info, meg=False, eeg=True, exclude=[])[:15] + epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, + preload=True, reject=dict(eeg=80e-6)) + else: + picks = pick_types(raw.info, meg=True, eeg=False, exclude=[])[1:200:2] + assert kind == 'meg' + with pytest.warns(RuntimeWarning, match='projection'): + epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, + preload=True, + reject=dict(grad=1000e-12, mag=4e-12)) + return raw, epochs + + +@pytest.mark.parametrize('offset', (0., 0.1)) +@pytest.mark.parametrize('avg_proj, ctol', [ + (True, (0.86, 0.93)), + (False, (0.97, 0.99)), +]) +@pytest.mark.parametrize('method, atol', [ + pytest.param(None, 3e-6, marks=pytest.mark.slowtest), # slow on Azure + (dict(eeg='MNE'), 4e-6), +]) +@pytest.mark.filterwarnings('ignore:.*than 20 mm from head frame origin.*') +def test_interpolation_eeg(offset, avg_proj, ctol, atol, method): + """Test interpolation of EEG channels.""" + raw, epochs_eeg = _load_data('eeg') + epochs_eeg = epochs_eeg.copy() + assert not _has_eeg_average_ref_proj(epochs_eeg.info['projs']) + # Offsetting the coordinate frame should have no effect on the output + for inst in (raw, epochs_eeg): + for ch in inst.info['chs']: + if ch['kind'] == io.constants.FIFF.FIFFV_EEG_CH: + ch['loc'][:3] += offset + ch['loc'][3:6] += offset + for d in inst.info['dig']: + d['r'] += offset + + # check that interpolation does nothing if no bads are marked + epochs_eeg.info['bads'] = [] + evoked_eeg = epochs_eeg.average() + kw = dict(method=method) + with pytest.warns(RuntimeWarning, match='Doing nothing'): + evoked_eeg.interpolate_bads(**kw) + + # create good and bad channels for EEG + epochs_eeg.info['bads'] = [] + goods_idx = np.ones(len(epochs_eeg.ch_names), dtype=bool) + goods_idx[epochs_eeg.ch_names.index('EEG 012')] = False + bads_idx = ~goods_idx + pos = epochs_eeg._get_channel_positions() + + evoked_eeg = epochs_eeg.average() + if avg_proj: + evoked_eeg.set_eeg_reference(projection=True).apply_proj() + assert_allclose(evoked_eeg.data.mean(0), 0., atol=1e-20) + ave_before = evoked_eeg.data[bads_idx] + + # interpolate bad channels for EEG + epochs_eeg.info['bads'] = ['EEG 012'] + evoked_eeg = epochs_eeg.average() + if avg_proj: + evoked_eeg.set_eeg_reference(projection=True).apply_proj() + good_picks = pick_types(evoked_eeg.info, meg=False, eeg=True) + assert_allclose(evoked_eeg.data[good_picks].mean(0), 0., atol=1e-20) + evoked_eeg_bad = evoked_eeg.copy() + bads_picks = pick_channels( + epochs_eeg.ch_names, include=epochs_eeg.info['bads'], ordered=True + ) + evoked_eeg_bad.data[bads_picks, :] = 1e10 + + # Test first the exclude parameter + evoked_eeg_2_bads = evoked_eeg_bad.copy() + evoked_eeg_2_bads.info['bads'] = ['EEG 004', 'EEG 012'] + evoked_eeg_2_bads.data[ + pick_channels(evoked_eeg_bad.ch_names, ['EEG 004', 'EEG 012']) + ] = 1e10 + evoked_eeg_interp = evoked_eeg_2_bads.interpolate_bads( + origin=(0., 0., 0.), exclude=['EEG 004'], **kw) + assert evoked_eeg_interp.info['bads'] == ['EEG 004'] + assert np.all(evoked_eeg_interp.get_data('EEG 004') == 1e10) + assert np.all(evoked_eeg_interp.get_data('EEG 012') != 1e10) + + # Now test without exclude parameter + evoked_eeg_bad.info['bads'] = ['EEG 012'] + evoked_eeg_interp = evoked_eeg_bad.copy().interpolate_bads( + origin=(0., 0., 0.), **kw) + if avg_proj: + assert_allclose(evoked_eeg_interp.data.mean(0), 0., atol=1e-6) + interp_zero = evoked_eeg_interp.data[bads_idx] + if method is None: # using + pos_good = pos[goods_idx] + pos_bad = pos[bads_idx] + interpolation = _make_interpolation_matrix(pos_good, pos_bad) + assert interpolation.shape == (1, len(epochs_eeg.ch_names) - 1) + interp_manual = np.dot(interpolation, evoked_eeg_bad.data[goods_idx]) + assert_array_equal(interp_manual, interp_zero) + del interp_manual, interpolation, pos, pos_good, pos_bad + assert_allclose(ave_before, interp_zero, atol=atol) + assert ctol[0] < np.corrcoef(ave_before, interp_zero)[0, 1] < ctol[1] + interp_fit = evoked_eeg_bad.copy().interpolate_bads(**kw).data[bads_idx] + assert_allclose(ave_before, interp_fit, atol=2.5e-6) + assert ctol[1] < np.corrcoef(ave_before, interp_fit)[0, 1] # better + + # check that interpolation fails when preload is False + epochs_eeg.preload = False + with pytest.raises(RuntimeError, match='requires epochs data to be loade'): + epochs_eeg.interpolate_bads(**kw) + epochs_eeg.preload = True + + # check that interpolation changes the data in raw + raw_eeg = io.RawArray(data=epochs_eeg._data[0], info=epochs_eeg.info) + raw_before = raw_eeg._data[bads_idx] + raw_after = raw_eeg.interpolate_bads(**kw)._data[bads_idx] + assert not np.all(raw_before == raw_after) + + # check that interpolation fails when preload is False + for inst in [raw, epochs_eeg]: + assert hasattr(inst, 'preload') + inst.preload = False + inst.info['bads'] = [inst.ch_names[1]] + with pytest.raises(RuntimeError, match='requires.*data to be loaded'): + inst.interpolate_bads(**kw) + + # check that interpolation works with few channels + raw_few = raw.copy().crop(0, 0.1).load_data() + raw_few.pick_channels(raw_few.ch_names[:1] + raw_few.ch_names[3:4]) + assert len(raw_few.ch_names) == 2 + raw_few.del_proj() + raw_few.info['bads'] = [raw_few.ch_names[-1]] + orig_data = raw_few[1][0] + with _record_warnings() as w: + raw_few.interpolate_bads(reset_bads=False, **kw) + assert len([ww for ww in w if 'more than' not in str(ww.message)]) == 0 + new_data = raw_few[1][0] + assert (new_data == 0).mean() < 0.5 + assert np.corrcoef(new_data, orig_data)[0, 1] > 0.2 + + +@pytest.mark.slowtest +def test_interpolation_meg(): + """Test interpolation of MEG channels.""" + # speed accuracy tradeoff: channel subselection is faster but the + # correlation drops + thresh = 0.68 + + raw, epochs_meg = _load_data('meg') + + # check that interpolation works when non M/EEG channels are present + # before MEG channels + raw.crop(0, 0.1).load_data().pick_channels(epochs_meg.ch_names) + raw.info.normalize_proj() + with pytest.warns(RuntimeWarning, match='unit .* changed from .* to .*'): + raw.set_channel_types({raw.ch_names[0]: 'stim'}) + raw.info['bads'] = [raw.ch_names[1]] + raw.load_data() + raw.interpolate_bads(mode='fast') + del raw + + # check that interpolation works for MEG + epochs_meg.info['bads'] = ['MEG 0141'] + evoked = epochs_meg.average() + pick = pick_channels(epochs_meg.info['ch_names'], epochs_meg.info['bads']) + + # MEG -- raw + raw_meg = io.RawArray(data=epochs_meg._data[0], info=epochs_meg.info) + raw_meg.info['bads'] = ['MEG 0141'] + data1 = raw_meg[pick, :][0][0] + + raw_meg.info.normalize_proj() + data2 = raw_meg.interpolate_bads(reset_bads=False, + mode='fast')[pick, :][0][0] + assert np.corrcoef(data1, data2)[0, 1] > thresh + # the same number of bads as before + assert len(raw_meg.info['bads']) == len(raw_meg.info['bads']) + + # MEG -- epochs + data1 = epochs_meg.get_data()[:, pick, :].ravel() + epochs_meg.info.normalize_proj() + epochs_meg.interpolate_bads(mode='fast') + data2 = epochs_meg.get_data()[:, pick, :].ravel() + assert np.corrcoef(data1, data2)[0, 1] > thresh + assert len(epochs_meg.info['bads']) == 0 + + # MEG -- evoked (plus auto origin) + data1 = evoked.data[pick] + evoked.info.normalize_proj() + data2 = evoked.interpolate_bads(origin='auto').data[pick] + assert np.corrcoef(data1, data2)[0, 1] > thresh + + # MEG -- with exclude + evoked.info['bads'] = ['MEG 0141', 'MEG 0121'] + pick = pick_channels(evoked.ch_names, evoked.info['bads'], ordered=True) + evoked.data[pick[-1]] = 1e10 + data1 = evoked.data[pick] + evoked.info.normalize_proj() + data2 = evoked.interpolate_bads( + origin='auto', exclude=['MEG 0121'] + ).data[pick] + assert np.corrcoef(data1[0], data2[0])[0, 1] > thresh + assert np.all(data2[1] == 1e10) + + +def _this_interpol(inst, ref_meg=False): + from mne.channels.interpolation import _interpolate_bads_meg + _interpolate_bads_meg(inst, ref_meg=ref_meg, mode='fast') + return inst + + +@pytest.mark.slowtest +def test_interpolate_meg_ctf(): + """Test interpolation of MEG channels from CTF system.""" + thresh = .85 + tol = .05 # assert the new interpol correlates at least .05 "better" + bad = 'MLC22-2622' # select a good channel to test the interpolation + + raw = io.read_raw_fif(raw_fname_ctf).crop(0, 1.0).load_data() # 3 secs + raw.apply_gradient_compensation(3) + + # Show that we have to exclude ref_meg for interpolating CTF MEG-channels + # (fixed in #5965): + raw.info['bads'] = [bad] + pick_bad = pick_channels(raw.info['ch_names'], raw.info['bads']) + data_orig = raw[pick_bad, :][0] + # mimic old behavior (the ref_meg-arg in _interpolate_bads_meg only serves + # this purpose): + data_interp_refmeg = _this_interpol(raw, ref_meg=True)[pick_bad, :][0] + # new: + data_interp_no_refmeg = _this_interpol(raw, ref_meg=False)[pick_bad, :][0] + + R = dict() + R['no_refmeg'] = np.corrcoef(data_orig, data_interp_no_refmeg)[0, 1] + R['with_refmeg'] = np.corrcoef(data_orig, data_interp_refmeg)[0, 1] + + print('Corrcoef of interpolated with original channel: ', R) + assert R['no_refmeg'] > R['with_refmeg'] + tol + assert R['no_refmeg'] > thresh + + +@testing.requires_testing_data +def test_interpolation_ctf_comp(): + """Test interpolation with compensated CTF data.""" + raw_fname = op.join(testing_path, 'CTF', 'somMDYO-18av.ds') + raw = io.read_raw_ctf(raw_fname, preload=True) + raw.info['bads'] = [raw.ch_names[5], raw.ch_names[-5]] + raw.interpolate_bads(mode='fast', origin=(0., 0., 0.04)) + assert raw.info['bads'] == [] + + +@requires_version('pymatreader') +@testing.requires_testing_data +def test_interpolation_nirs(): + """Test interpolating bad nirs channels.""" + fname = op.join(testing_path, + 'NIRx', 'nirscout', 'nirx_15_2_recording_w_overlap') + raw_intensity = read_raw_nirx(fname, preload=False) + raw_od = optical_density(raw_intensity) + sci = scalp_coupling_index(raw_od) + raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) + bad_0 = np.where([name == raw_od.info['bads'][0] for + name in raw_od.ch_names])[0][0] + bad_0_std_pre_interp = np.std(raw_od._data[bad_0]) + bads_init = list(raw_od.info['bads']) + raw_od.interpolate_bads(exclude=bads_init[:2]) + assert raw_od.info['bads'] == bads_init[:2] + raw_od.interpolate_bads() + assert raw_od.info['bads'] == [] + assert bad_0_std_pre_interp > np.std(raw_od._data[bad_0]) + raw_haemo = beer_lambert_law(raw_od, ppf=6) + raw_haemo.info['bads'] = raw_haemo.ch_names[2:4] + assert raw_haemo.info['bads'] == ['S1_D2 hbo', 'S1_D2 hbr'] + raw_haemo.interpolate_bads() + assert raw_haemo.info['bads'] == [] diff --git a/python/libs/mne/channels/tests/test_layout.py b/python/libs/mne/channels/tests/test_layout.py new file mode 100644 index 0000000..1d0f1c8 --- /dev/null +++ b/python/libs/mne/channels/tests/test_layout.py @@ -0,0 +1,371 @@ +# Authors: Alexandre Gramfort +# Denis Engemann +# Martin Luessi +# Eric Larson +# +# License: Simplified BSD + +import copy +import os.path as op + +import numpy as np +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_allclose, assert_equal) +import pytest +import matplotlib.pyplot as plt + +from mne.channels import (make_eeg_layout, make_grid_layout, read_layout, + find_layout, HEAD_SIZE_DEFAULT) +from mne.channels.layout import (_box_size, _find_topomap_coords, + generate_2d_layout) +from mne import pick_types, pick_info +from mne.io import read_raw_kit, _empty_info, read_info +from mne.io.constants import FIFF + +io_dir = op.join(op.dirname(__file__), '..', '..', 'io') +fif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif') +lout_path = op.join(io_dir, 'tests', 'data') +bti_dir = op.join(io_dir, 'bti', 'tests', 'data') +fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif') +fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd') +fname_kit_umd = op.join(io_dir, 'kit', 'tests', 'data', 'test_umd-raw.sqd') + + +def _get_test_info(): + """Make test info.""" + test_info = _empty_info(1000) + loc = np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.], + dtype=np.float32) + test_info['chs'] = [ + {'cal': 1, 'ch_name': 'ICA 001', 'coil_type': 0, 'coord_frame': 0, + 'kind': 502, 'loc': loc.copy(), 'logno': 1, 'range': 1.0, 'scanno': 1, + 'unit': -1, 'unit_mul': 0}, + {'cal': 1, 'ch_name': 'ICA 002', 'coil_type': 0, 'coord_frame': 0, + 'kind': 502, 'loc': loc.copy(), 'logno': 2, 'range': 1.0, 'scanno': 2, + 'unit': -1, 'unit_mul': 0}, + {'cal': 0.002142000012099743, 'ch_name': 'EOG 061', 'coil_type': 1, + 'coord_frame': 0, 'kind': 202, 'loc': loc.copy(), 'logno': 61, + 'range': 1.0, 'scanno': 376, 'unit': 107, 'unit_mul': 0}] + test_info._unlocked = False + test_info._update_redundant() + test_info._check_consistency() + return test_info + + +def test_io_layout_lout(tmp_path): + """Test IO with .lout files.""" + tempdir = str(tmp_path) + layout = read_layout('Vectorview-all', scale=False) + layout.save(op.join(tempdir, 'foobar.lout')) + layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./', + scale=False) + assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2) + assert layout.names == layout_read.names + print(layout) # test repr + + +def test_io_layout_lay(tmp_path): + """Test IO with .lay files.""" + tempdir = str(tmp_path) + layout = read_layout('CTF151', scale=False) + layout.save(op.join(tempdir, 'foobar.lay')) + layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./', + scale=False) + assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2) + assert layout.names == layout_read.names + + +def test_find_topomap_coords(): + """Test mapping of coordinates in 3D space to 2D.""" + info = read_info(fif_fname) + picks = pick_types(info, meg=False, eeg=True, eog=False, stim=False) + + # Remove extra digitization point, so EEG digitization points match up + # with the EEG channels + del info['dig'][85] + + # Use channel locations + kwargs = dict(ignore_overlap=False, to_sphere=True, + sphere=HEAD_SIZE_DEFAULT) + l0 = _find_topomap_coords(info, picks, **kwargs) + + # Remove electrode position information, use digitization points from now + # on. + for ch in info['chs']: + ch['loc'].fill(np.nan) + + l1 = _find_topomap_coords(info, picks, **kwargs) + assert_allclose(l1, l0, atol=1e-3) + + for z_pt in ((HEAD_SIZE_DEFAULT, 0., 0.), + (0., HEAD_SIZE_DEFAULT, 0.)): + info['dig'][-1]['r'] = np.array(z_pt) + l1 = _find_topomap_coords(info, picks, **kwargs) + assert_allclose(l1[-1], z_pt[:2], err_msg='Z=0 point moved', atol=1e-6) + + # Test plotting mag topomap without channel locations: it should fail + mag_picks = pick_types(info, meg='mag') + with pytest.raises(ValueError, match='Cannot determine location'): + _find_topomap_coords(info, mag_picks, **kwargs) + + # Test function with too many EEG digitization points: it should fail + info['dig'].append({'r': [1, 2, 3], 'kind': FIFF.FIFFV_POINT_EEG}) + with pytest.raises(ValueError, match='Number of EEG digitization points'): + _find_topomap_coords(info, picks, **kwargs) + + # Test function with too little EEG digitization points: it should fail + info._unlocked = True + info['dig'] = info['dig'][:-2] + with pytest.raises(ValueError, match='Number of EEG digitization points'): + _find_topomap_coords(info, picks, **kwargs) + + # Electrode positions must be unique + info['dig'].append(info['dig'][-1]) + with pytest.raises(ValueError, match='overlapping positions'): + _find_topomap_coords(info, picks, **kwargs) + + # Test function without EEG digitization points: it should fail + info['dig'] = [d for d in info['dig'] + if d['kind'] != FIFF.FIFFV_POINT_EEG] + with pytest.raises(RuntimeError, match='Did not find any digitization'): + _find_topomap_coords(info, picks, **kwargs) + + # Test function without any digitization points, it should fail + info['dig'] = None + with pytest.raises(RuntimeError, match='No digitization points found'): + _find_topomap_coords(info, picks, **kwargs) + info['dig'] = [] + with pytest.raises(RuntimeError, match='No digitization points found'): + _find_topomap_coords(info, picks, **kwargs) + + +def test_make_eeg_layout(tmp_path): + """Test creation of EEG layout.""" + tempdir = str(tmp_path) + tmp_name = 'foo' + lout_name = 'test_raw' + lout_orig = read_layout(kind=lout_name, path=lout_path) + info = read_info(fif_fname) + info['bads'].append(info['ch_names'][360]) + layout = make_eeg_layout(info, exclude=[]) + assert_array_equal(len(layout.names), len([ch for ch in info['ch_names'] + if ch.startswith('EE')])) + layout.save(op.join(tempdir, tmp_name + '.lout')) + lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False) + assert_array_equal(lout_new.kind, tmp_name) + assert_allclose(layout.pos, lout_new.pos, atol=0.1) + assert_array_equal(lout_orig.names, lout_new.names) + + # Test input validation + pytest.raises(ValueError, make_eeg_layout, info, radius=-0.1) + pytest.raises(ValueError, make_eeg_layout, info, radius=0.6) + pytest.raises(ValueError, make_eeg_layout, info, width=-0.1) + pytest.raises(ValueError, make_eeg_layout, info, width=1.1) + pytest.raises(ValueError, make_eeg_layout, info, height=-0.1) + pytest.raises(ValueError, make_eeg_layout, info, height=1.1) + + +def test_make_grid_layout(tmp_path): + """Test creation of grid layout.""" + tempdir = str(tmp_path) + tmp_name = 'bar' + lout_name = 'test_ica' + lout_orig = read_layout(kind=lout_name, path=lout_path) + layout = make_grid_layout(_get_test_info()) + layout.save(op.join(tempdir, tmp_name + '.lout')) + lout_new = read_layout(kind=tmp_name, path=tempdir) + assert_array_equal(lout_new.kind, tmp_name) + assert_array_equal(lout_orig.pos, lout_new.pos) + assert_array_equal(lout_orig.names, lout_new.names) + + # Test creating grid layout with specified number of columns + layout = make_grid_layout(_get_test_info(), n_col=2) + # Vertical positions should be equal + assert layout.pos[0, 1] == layout.pos[1, 1] + # Horizontal positions should be unequal + assert layout.pos[0, 0] != layout.pos[1, 0] + # Box sizes should be equal + assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:]) + + +def test_find_layout(): + """Test finding layout.""" + pytest.raises(ValueError, find_layout, _get_test_info(), ch_type='meep') + + sample_info = read_info(fif_fname) + grads = pick_types(sample_info, meg='grad') + sample_info2 = pick_info(sample_info, grads) + + mags = pick_types(sample_info, meg='mag') + sample_info3 = pick_info(sample_info, mags) + + # mock new convention + sample_info4 = copy.deepcopy(sample_info) + for ii, name in enumerate(sample_info4['ch_names']): + new = name.replace(' ', '') + sample_info4['chs'][ii]['ch_name'] = new + + eegs = pick_types(sample_info, meg=False, eeg=True) + sample_info5 = pick_info(sample_info, eegs) + + lout = find_layout(sample_info, ch_type=None) + assert lout.kind == 'Vectorview-all' + assert all(' ' in k for k in lout.names) + + lout = find_layout(sample_info2, ch_type='meg') + assert_equal(lout.kind, 'Vectorview-all') + + # test new vector-view + lout = find_layout(sample_info4, ch_type=None) + assert_equal(lout.kind, 'Vectorview-all') + assert all(' ' not in k for k in lout.names) + + lout = find_layout(sample_info, ch_type='grad') + assert_equal(lout.kind, 'Vectorview-grad') + lout = find_layout(sample_info2) + assert_equal(lout.kind, 'Vectorview-grad') + lout = find_layout(sample_info2, ch_type='grad') + assert_equal(lout.kind, 'Vectorview-grad') + lout = find_layout(sample_info2, ch_type='meg') + assert_equal(lout.kind, 'Vectorview-all') + + lout = find_layout(sample_info, ch_type='mag') + assert_equal(lout.kind, 'Vectorview-mag') + lout = find_layout(sample_info3) + assert_equal(lout.kind, 'Vectorview-mag') + lout = find_layout(sample_info3, ch_type='mag') + assert_equal(lout.kind, 'Vectorview-mag') + lout = find_layout(sample_info3, ch_type='meg') + assert_equal(lout.kind, 'Vectorview-all') + + lout = find_layout(sample_info, ch_type='eeg') + assert_equal(lout.kind, 'EEG') + lout = find_layout(sample_info5) + assert_equal(lout.kind, 'EEG') + lout = find_layout(sample_info5, ch_type='eeg') + assert_equal(lout.kind, 'EEG') + # no common layout, 'meg' option not supported + + lout = find_layout(read_info(fname_ctf_raw)) + assert_equal(lout.kind, 'CTF-275') + + fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif') + lout = find_layout(read_info(fname_bti_raw)) + assert_equal(lout.kind, 'magnesWH3600') + + raw_kit = read_raw_kit(fname_kit_157) + lout = find_layout(raw_kit.info) + assert_equal(lout.kind, 'KIT-157') + + raw_kit.info['bads'] = ['MEG 013', 'MEG 014', 'MEG 015', 'MEG 016'] + raw_kit.info._check_consistency() + lout = find_layout(raw_kit.info) + assert_equal(lout.kind, 'KIT-157') + # fallback for missing IDs + for val in (35, 52, 54, 1001): + with raw_kit.info._unlock(): + raw_kit.info['kit_system_id'] = val + lout = find_layout(raw_kit.info) + assert lout.kind == 'custom' + + raw_umd = read_raw_kit(fname_kit_umd) + lout = find_layout(raw_umd.info) + assert_equal(lout.kind, 'KIT-UMD-3') + + # Test plotting + lout.plot() + lout.plot(picks=np.arange(10)) + plt.close('all') + + +def test_box_size(): + """Test calculation of box sizes.""" + # No points. Box size should be 1,1. + assert_allclose(_box_size([]), (1.0, 1.0)) + + # Create one point. Box size should be 1,1. + point = [(0, 0)] + assert_allclose(_box_size(point), (1.0, 1.0)) + + # Create two points. Box size should be 0.5,1. + points = [(0.25, 0.5), (0.75, 0.5)] + assert_allclose(_box_size(points), (0.5, 1.0)) + + # Create three points. Box size should be (0.5, 0.5). + points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)] + assert_allclose(_box_size(points), (0.5, 0.5)) + + # Create a grid of points. Box size should be (0.1, 0.1). + x, y = np.meshgrid(np.linspace(-0.5, 0.5, 11), np.linspace(-0.5, 0.5, 11)) + x, y = x.ravel(), y.ravel() + assert_allclose(_box_size(np.c_[x, y]), (0.1, 0.1)) + + # Create a random set of points. This should never break the function. + rng = np.random.RandomState(42) + points = rng.rand(100, 2) + width, height = _box_size(points) + assert width is not None + assert height is not None + + # Test specifying an existing width. + points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)] + assert_allclose(_box_size(points, width=0.4), (0.4, 0.5)) + + # Test specifying an existing width that has influence on the calculated + # height. + points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)] + assert_allclose(_box_size(points, width=0.2), (0.2, 1.0)) + + # Test specifying an existing height. + points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)] + assert_allclose(_box_size(points, height=0.4), (0.5, 0.4)) + + # Test specifying an existing height that has influence on the calculated + # width. + points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)] + assert_allclose(_box_size(points, height=0.1), (1.0, 0.1)) + + # Test specifying both width and height. The function should simply return + # these. + points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)] + assert_array_equal(_box_size(points, width=0.1, height=0.1), (0.1, 0.1)) + + # Test specifying a width that will cause unfixable horizontal overlap and + # essentially breaks the function (height will be 0). + points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)] + assert_array_equal(_box_size(points, width=1), (1, 0)) + + # Test adding some padding. + # Create three points. Box size should be a little less than (0.5, 0.5). + points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)] + assert_allclose(_box_size(points, padding=0.1), (0.9 * 0.5, 0.9 * 0.5)) + + +def test_generate_2d_layout(): + """Test creation of a layout from 2d points.""" + snobg = 10 + sbg = 15 + side = range(snobg) + bg_image = np.random.RandomState(42).randn(sbg, sbg) + w, h = [.2, .5] + + # Generate fake data + xy = np.array([(i, j) for i in side for j in side]) + lt = generate_2d_layout(xy, w=w, h=h) + + # Correct points ordering / minmaxing + comp_1, comp_2 = [(5, 0), (7, 0)] + assert lt.pos[:, :2].max() == 1 + assert lt.pos[:, :2].min() == 0 + with np.errstate(invalid='ignore'): # divide by zero + assert_allclose(xy[comp_2] / float(xy[comp_1]), + lt.pos[comp_2] / float(lt.pos[comp_1])) + assert_allclose(lt.pos[0, [2, 3]], [w, h]) + + # Correct number elements + assert lt.pos.shape[1] == 4 + assert len(lt.box) == 4 + + # Make sure background image normalizing is correct + lt_bg = generate_2d_layout(xy, bg_image=bg_image) + assert_allclose(lt_bg.pos[:, :2].max(), xy.max() / float(sbg)) diff --git a/python/libs/mne/channels/tests/test_montage.py b/python/libs/mne/channels/tests/test_montage.py new file mode 100644 index 0000000..fd6dad3 --- /dev/null +++ b/python/libs/mne/channels/tests/test_montage.py @@ -0,0 +1,1656 @@ +# Author: Teon Brooks +# Stefan Appelhoff +# +# License: BSD-3-Clause + +from contextlib import nullcontext +from itertools import chain +import os +import os.path as op + +import pytest + +import numpy as np +from functools import partial +from string import ascii_lowercase + +from numpy.testing import (assert_array_equal, + assert_allclose, assert_equal) +import matplotlib.pyplot as plt + +from mne import __file__ as _mne_file, create_info, read_evokeds, pick_types +from mne.source_space import get_mni_fiducials +from mne.utils._testing import assert_object_equal +from mne.channels import (get_builtin_montages, DigMontage, read_dig_dat, + read_dig_egi, read_dig_captrak, read_dig_fif, + make_standard_montage, read_custom_montage, + compute_dev_head_t, make_dig_montage, + read_dig_polhemus_isotrak, compute_native_head_t, + read_polhemus_fastscan, read_dig_localite, + read_dig_hpts) +from mne.channels.montage import transform_to_head, _check_get_coord_frame +from mne.utils import assert_dig_allclose, _record_warnings +from mne.bem import _fit_sphere +from mne.io.constants import FIFF +from mne.io._digitization import (_format_dig_points, + _get_fid_coords, _get_dig_eeg, + _count_points_by_type) +from mne.transforms import (_ensure_trans, apply_trans, invert_transform, + _get_trans) +from mne.viz._3d import _fiducial_coords + +from mne.io.kit import read_mrk +from mne.io import (read_raw_brainvision, read_raw_egi, read_raw_fif, + read_fiducials, __file__ as _MNE_IO_FILE) + +from mne.io import RawArray +from mne.datasets import testing +from mne.io.brainvision import __file__ as _BRAINVISON_FILE + + +data_path = testing.data_path(download=False) +fif_dig_montage_fname = op.join(data_path, 'montage', 'eeganes07.fif') +egi_dig_montage_fname = op.join(data_path, 'montage', 'coordinates.xml') +egi_raw_fname = op.join(data_path, 'montage', 'egi_dig_test.raw') +egi_fif_fname = op.join(data_path, 'montage', 'egi_dig_raw.fif') +bvct_dig_montage_fname = op.join(data_path, 'montage', 'captrak_coords.bvct') +bv_raw_fname = op.join(data_path, 'montage', 'bv_dig_test.vhdr') +bv_fif_fname = op.join(data_path, 'montage', 'bv_dig_raw.fif') +locs_montage_fname = op.join(data_path, 'EEGLAB', 'test_chans.locs') +evoked_fname = op.join(data_path, 'montage', 'level2_raw-ave.fif') +eeglab_fname = op.join(data_path, 'EEGLAB', 'test_raw.set') +bdf_fname1 = op.join(data_path, 'BDF', 'test_generator_2.bdf') +bdf_fname2 = op.join(data_path, 'BDF', 'test_bdf_stim_channel.bdf') +egi_fname1 = op.join(data_path, 'EGI', 'test_egi.mff') +cnt_fname = op.join(data_path, 'CNT', 'scan41_short.cnt') +subjects_dir = op.join(data_path, 'subjects') + +io_dir = op.dirname(_MNE_IO_FILE) +kit_dir = op.join(io_dir, 'kit', 'tests', 'data') +elp = op.join(kit_dir, 'test_elp.txt') +hsp = op.join(kit_dir, 'test_hsp.txt') +hpi = op.join(kit_dir, 'test_mrk.sqd') +bv_fname = op.join(io_dir, 'brainvision', 'tests', 'data', 'test.vhdr') +fif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif') +edf_path = op.join(io_dir, 'edf', 'tests', 'data', 'test.edf') +bdf_path = op.join(io_dir, 'edf', 'tests', 'data', 'test_bdf_eeglab.mat') +egi_fname2 = op.join(io_dir, 'egi', 'tests', 'data', 'test_egi.raw') +vhdr_path = op.join(io_dir, 'brainvision', 'tests', 'data', 'test.vhdr') +ctf_fif_fname = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif') +nicolet_fname = op.join(io_dir, 'nicolet', 'tests', 'data', + 'test_nicolet_raw.data') + + +def _make_toy_raw(n_channels): + return RawArray( + data=np.empty([n_channels, 1]), + info=create_info( + ch_names=list(ascii_lowercase[:n_channels]), + sfreq=1, ch_types='eeg' + ) + ) + + +def _make_toy_dig_montage(n_channels, **kwargs): + return make_dig_montage( + ch_pos=dict(zip( + list(ascii_lowercase[:n_channels]), + np.arange(n_channels * 3).reshape(n_channels, 3), + )), + **kwargs + ) + + +def _get_dig_montage_pos(montage): + return np.array([d['r'] for d in _get_dig_eeg(montage.dig)]) + + +def test_dig_montage_trans(tmp_path): + """Test getting a trans from and applying a trans to a montage.""" + nasion, lpa, rpa, *ch_pos = np.random.RandomState(0).randn(10, 3) + ch_pos = {f'EEG{ii:3d}': pos for ii, pos in enumerate(ch_pos, 1)} + montage = make_dig_montage(ch_pos, nasion=nasion, lpa=lpa, rpa=rpa, + coord_frame='mri') + trans = compute_native_head_t(montage) + _ensure_trans(trans) + # ensure that we can save and load it, too + fname = tmp_path / 'temp-mon.fif' + _check_roundtrip(montage, fname, 'mri') + # test applying a trans + position1 = montage.get_positions() + montage.apply_trans(trans) + assert montage.get_positions()['coord_frame'] == 'head' + montage.apply_trans(invert_transform(trans)) + position2 = montage.get_positions() + assert str(position1) == str(position2) # exactly equal + + +def test_fiducials(): + """Test handling of fiducials.""" + # Eventually the code used here should be unified with montage.py, but for + # now it uses code in odd places + for fname in (fif_fname, ctf_fif_fname): + fids, coord_frame = read_fiducials(fname) + points = _fiducial_coords(fids, coord_frame) + assert points.shape == (3, 3) + # Fids + assert_allclose(points[:, 2], 0., atol=1e-6) + assert_allclose(points[::2, 1], 0., atol=1e-6) + assert points[2, 0] > 0 # RPA + assert points[0, 0] < 0 # LPA + # Nasion + assert_allclose(points[1, 0], 0., atol=1e-6) + assert points[1, 1] > 0 + + +def test_documented(): + """Test that standard montages are documented.""" + docs = make_standard_montage.__doc__ + lines = [line[4:] for line in docs.splitlines()] + start = stop = None + for li, line in enumerate(lines): + if line.startswith('====') and li < len(lines) - 2 and \ + lines[li + 1].startswith('Kind') and\ + lines[li + 2].startswith('===='): + start = li + 3 + elif start is not None and li > start and line.startswith('===='): + stop = li + break + assert (start is not None) + assert (stop is not None) + kinds = [line.split(' ')[0] for line in lines[start:stop]] + kinds = [kind for kind in kinds if kind != ''] + montages = os.listdir(op.join(op.dirname(_mne_file), 'channels', 'data', + 'montages')) + montages = sorted(op.splitext(m)[0] for m in montages) + assert_equal(len(set(montages)), len(montages)) + assert_equal(len(set(kinds)), len(kinds), err_msg=str(sorted(kinds))) + assert_equal(set(montages), set(kinds)) + + +@pytest.mark.parametrize('reader, file_content, expected_dig, ext, warning', [ + pytest.param( + partial(read_custom_montage, head_size=None), + ('FidNz 0 9.071585155 -2.359754454\n' + 'FidT9 -6.711765 0.040402876 -3.251600355\n' + 'very_very_very_long_name -5.831241498 -4.494821698 4.955347697\n' + 'Cz 0 0 1\n' + 'Cz 0 0 8.899186843'), + make_dig_montage( + ch_pos={ + 'very_very_very_long_name': [-5.8312416, -4.4948215, 4.9553475], # noqa + 'Cz': [0., 0., 8.899187], + }, + nasion=[0., 9.071585, -2.3597546], + lpa=[-6.711765, 0.04040287, -3.2516003], + rpa=None, + ), + 'sfp', + (RuntimeWarning, r'Duplicate.*last will be used for Cz \(2\)'), + id='sfp_duplicate'), + + pytest.param( + partial(read_custom_montage, head_size=None), + ('FidNz 0 9.071585155 -2.359754454\n' + 'FidT9 -6.711765 0.040402876 -3.251600355\n' + 'headshape 1 2 3\n' + 'headshape 4 5 6\n' + 'Cz 0 0 8.899186843'), + make_dig_montage( + hsp=[ + [1, 2, 3], + [4, 5, 6], + ], + ch_pos={ + 'Cz': [0., 0., 8.899187], + }, + nasion=[0., 9.071585, -2.3597546], + lpa=[-6.711765, 0.04040287, -3.2516003], + rpa=None, + ), + 'sfp', + None, + id='sfp_headshape'), + + pytest.param( + partial(read_custom_montage, head_size=1), + ('1 0 0.50669 FPz\n' + '2 23 0.71 EOG1\n' + '3 -39.947 0.34459 F3\n' + '4 0 0.25338 Fz\n'), + make_dig_montage( + ch_pos={ + 'EOG1': [0.30873816, 0.72734152, -0.61290705], + 'F3': [-0.56705965, 0.67706631, 0.46906776], + 'FPz': [0., 0.99977915, -0.02101571], + 'Fz': [0., 0.71457525, 0.69955859], + }, + nasion=None, lpa=None, rpa=None, coord_frame='head', + ), + 'loc', + None, + id='EEGLAB'), + + pytest.param( + partial(read_custom_montage, head_size=None, coord_frame='mri'), + ('// MatLab Sphere coordinates [degrees] Cartesian coordinates\n' # noqa: E501 + '// Label Theta Phi Radius X Y Z off sphere surface\n' # noqa: E501 + 'E1 37.700 -14.000 1.000 0.7677 0.5934 -0.2419 -0.00000000000000011\n' # noqa: E501 + 'E3 51.700 11.000 1.000 0.6084 0.7704 0.1908 0.00000000000000000\n' # noqa: E501 + 'E31 90.000 -11.000 1.000 0.0000 0.9816 -0.1908 0.00000000000000000\n' # noqa: E501 + 'E61 158.000 -17.200 1.000 -0.8857 0.3579 -0.2957 -0.00000000000000022'), # noqa: E501 + make_dig_montage( + ch_pos={ + 'E1': [0.7677, 0.5934, -0.2419], + 'E3': [0.6084, 0.7704, 0.1908], + 'E31': [0., 0.9816, -0.1908], + 'E61': [-0.8857, 0.3579, -0.2957], + }, + nasion=None, lpa=None, rpa=None, coord_frame='mri', + ), + 'csd', + None, + id='matlab'), + + pytest.param( + partial(read_custom_montage, head_size=None), + ('# ASA electrode file\nReferenceLabel avg\nUnitPosition mm\n' + 'NumberPositions= 68\n' + 'Positions\n' + '-86.0761 -19.9897 -47.9860\n' + '85.7939 -20.0093 -48.0310\n' + '0.0083 86.8110 -39.9830\n' + '-86.0761 -24.9897 -67.9860\n' + 'Labels\nLPA\nRPA\nNz\nDummy\n'), + make_dig_montage( + ch_pos={ + 'Dummy': [-0.0860761, -0.0249897, -0.067986], + }, + nasion=[8.3000e-06, 8.6811e-02, -3.9983e-02], + lpa=[-0.0860761, -0.0199897, -0.047986], + rpa=[0.0857939, -0.0200093, -0.048031], + ), + 'elc', + None, + id='ASA electrode'), + + pytest.param( + partial(read_custom_montage, head_size=1), + ('Site Theta Phi\n' + 'Fp1 -92 -72\n' + 'Fp2 92 72\n' + 'very_very_very_long_name -92 72\n' + 'O2 92 -90\n'), + make_dig_montage( + ch_pos={ + 'Fp1': [-0.30882875, 0.95047716, -0.0348995], + 'Fp2': [0.30882875, 0.95047716, -0.0348995], + 'very_very_very_long_name': [-0.30882875, -0.95047716, -0.0348995], # noqa + 'O2': [6.11950389e-17, -9.99390827e-01, -3.48994967e-02] + }, + nasion=None, lpa=None, rpa=None, + ), + 'txt', + None, + id='generic theta-phi (txt)'), + + pytest.param( + partial(read_custom_montage, head_size=None), + ('346\n' # XXX: this should actually raise an error 346 != 4 + 'FID\t LPA\t -120.03\t 0\t 85\n' + 'FID\t RPA\t 120.03\t 0\t 85\n' + 'FID\t Nz\t 114.03\t 90\t 85\n' + 'EEG\t F3\t -62.027\t -50.053\t 85\n' + 'EEG\t Fz\t 45.608\t 90\t 85\n' + 'EEG\t F4\t 62.01\t 50.103\t 85\n' + 'EEG\t FCz\t 68.01\t 58.103\t 85\n'), + make_dig_montage( + ch_pos={ + 'F3': [-0.48200427, 0.57551063, 0.39869712], + 'Fz': [3.71915931e-17, 6.07384809e-01, 5.94629038e-01], + 'F4': [0.48142596, 0.57584026, 0.39891983], + 'FCz': [0.41645989, 0.66914889, 0.31827805], + }, + nasion=[4.75366562e-17, 7.76332511e-01, -3.46132681e-01], + lpa=[-7.35898963e-01, 9.01216309e-17, -4.25385374e-01], + rpa=[0.73589896, 0., -0.42538537], + ), + 'elp', + None, + id='BESA spherical model'), + + pytest.param( + partial(read_dig_hpts, unit='m'), + ('eeg Fp1 -95.0 -3. -3.\n' + 'eeg AF7 -1 -1 -3\n' + 'eeg A3 -2 -2 2\n' + 'eeg A 0 0 0'), + make_dig_montage( + ch_pos={ + 'A': [0., 0., 0.], 'A3': [-2., -2., 2.], + 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.], + }, + nasion=None, lpa=None, rpa=None, + ), + 'hpts', + None, + id='legacy mne-c'), + + pytest.param( + read_custom_montage, + ('ch_name, x, y, z\n' + 'Fp1, -95.0, -3., -3.\n' + 'AF7, -1, -1, -3\n' + 'A3, -2, -2, 2\n' + 'A, 0, 0, 0'), + make_dig_montage( + ch_pos={ + 'A': [0., 0., 0.], 'A3': [-2., -2., 2.], + 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.], + }, + nasion=None, lpa=None, rpa=None, + ), + 'csv', + None, + id='CSV file'), + + pytest.param( + read_custom_montage, + ('1\t-95.0\t-3.\t-3.\tFp1\n' + '2\t-1\t-1\t-3\tAF7\n' + '3\t-2\t-2\t2\tA3\n' + '4\t0\t0\t0\tA'), + make_dig_montage( + ch_pos={ + 'A': [0., 0., 0.], 'A3': [-2., -2., 2.], + 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.], + }, + nasion=None, lpa=None, rpa=None, + ), + 'xyz', + None, + id='XYZ file'), + + pytest.param( + read_custom_montage, + ('ch_name\tx\ty\tz\n' + 'Fp1\t-95.0\t-3.\t-3.\n' + 'AF7\t-1\t-1\t-3\n' + 'A3\t-2\t-2\t2\n' + 'A\t0\t0\t0'), + make_dig_montage( + ch_pos={ + 'A': [0., 0., 0.], 'A3': [-2., -2., 2.], + 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.], + }, + nasion=None, lpa=None, rpa=None, + ), + 'tsv', + None, + id='TSV file'), + + pytest.param( + partial(read_custom_montage, head_size=None), + ('\n' + '\n' + '\n' + ' \n' + ' Fp1\n' + ' -90\n' + ' -72\n' + ' 1\n' + ' 1\n' + ' \n' + ' \n' + ' Fz\n' + ' 45\n' + ' 90\n' + ' 1\n' + ' 2\n' + ' \n' + ' \n' + ' F3\n' + ' -60\n' + ' -51\n' + ' 1\n' + ' 3\n' + ' \n' + ' \n' + ' F7\n' + ' -90\n' + ' -36\n' + ' 1\n' + ' 4\n' + ' \n' + ''), + make_dig_montage( + ch_pos={ + 'Fp1': [-3.09016994e-01, 9.51056516e-01, 6.12323400e-17], + 'Fz': [4.32978028e-17, 7.07106781e-01, 7.07106781e-01], + 'F3': [-0.54500745, 0.67302815, 0.5], + 'F7': [-8.09016994e-01, 5.87785252e-01, 6.12323400e-17], + }, + nasion=None, lpa=None, rpa=None, + ), + 'bvef', + None, + id='brainvision'), +]) +def test_montage_readers( + reader, file_content, expected_dig, ext, warning, tmp_path +): + """Test that we have an equivalent of read_montage for all file formats.""" + fname = op.join(str(tmp_path), 'test.{ext}'.format(ext=ext)) + with open(fname, 'w') as fid: + fid.write(file_content) + + if warning is None: + ctx = nullcontext() + else: + ctx = pytest.warns(warning[0], match=warning[1]) + with ctx: + dig_montage = reader(fname) + assert isinstance(dig_montage, DigMontage) + + actual_ch_pos = dig_montage._get_ch_pos() + expected_ch_pos = expected_dig._get_ch_pos() + for kk in actual_ch_pos: + assert_allclose(actual_ch_pos[kk], expected_ch_pos[kk], atol=1e-5) + assert len(dig_montage.dig) == len(expected_dig.dig) + for d1, d2 in zip(dig_montage.dig, expected_dig.dig): + assert d1['coord_frame'] == d2['coord_frame'] + for key in ('coord_frame', 'ident', 'kind'): + assert isinstance(d1[key], int) + assert isinstance(d2[key], int) + with _record_warnings() as w: + xform = compute_native_head_t(dig_montage) + assert xform['to'] == FIFF.FIFFV_COORD_HEAD + assert xform['from'] == FIFF.FIFFV_COORD_UNKNOWN + n = int(np.allclose(xform['trans'], np.eye(4))) + assert len(w) == n + + +@testing.requires_testing_data +def test_read_locs(): + """Test reading EEGLAB locs.""" + data = read_custom_montage(locs_montage_fname)._get_ch_pos() + assert_allclose( + actual=np.stack( + [data[kk] for kk in ('FPz', 'EOG1', 'F3', 'Fz')] # 4 random chs + ), + desired=[[0., 0.094979, -0.001996], + [0.02933, 0.069097, -0.058226], + [-0.053871, 0.064321, 0.044561], + [0., 0.067885, 0.066458]], + atol=1e-6 + ) + + +def test_read_dig_dat(tmp_path): + """Test reading *.dat electrode locations.""" + rows = [ + ['Nasion', 78, 0.00, 1.00, 0.00], + ['Left', 76, -1.00, 0.00, 0.00], + ['Right', 82, 1.00, -0.00, 0.00], + ['O2', 69, -0.50, -0.90, 0.05], + ['O2', 68, 0.00, 0.01, 0.02], + ['Centroid', 67, 0.00, 0.00, 0.00], + ] + # write mock test.dat file + temp_dir = str(tmp_path) + fname_temp = op.join(temp_dir, 'test.dat') + with open(fname_temp, 'w') as fid: + for row in rows: + name = row[0].rjust(10) + data = '\t'.join(map(str, row[1:])) + fid.write("%s\t%s\n" % (name, data)) + # construct expected value + idents = { + 78: FIFF.FIFFV_POINT_NASION, + 76: FIFF.FIFFV_POINT_LPA, + 82: FIFF.FIFFV_POINT_RPA, + 68: 1, + 69: 1, + } + kinds = { + 78: FIFF.FIFFV_POINT_CARDINAL, + 76: FIFF.FIFFV_POINT_CARDINAL, + 82: FIFF.FIFFV_POINT_CARDINAL, + 69: FIFF.FIFFV_POINT_EEG, + 68: FIFF.FIFFV_POINT_EEG, + } + target = {row[0]: {'r': row[2:], 'ident': idents[row[1]], + 'kind': kinds[row[1]], 'coord_frame': 0} + for row in rows[:-1]} + assert_allclose(target['O2']['r'], [0, 0.01, 0.02]) + # read it + with pytest.warns(RuntimeWarning, match=r'Duplic.*for O2 \(2\)'): + dig = read_dig_dat(fname_temp) + assert set(dig.ch_names) == {'O2'} + keys = chain(['Left', 'Nasion', 'Right'], dig.ch_names) + target = [target[k] for k in keys] + assert dig.dig == target + + +def test_read_dig_montage_using_polhemus_fastscan(): + """Test FastScan.""" + N_EEG_CH = 10 + + my_electrode_positions = read_polhemus_fastscan( + op.join(kit_dir, 'test_elp.txt') + ) + + montage = make_dig_montage( + # EEG_CH + ch_pos=dict(zip(ascii_lowercase[:N_EEG_CH], + np.random.RandomState(0).rand(N_EEG_CH, 3))), + # NO NAMED points + nasion=my_electrode_positions[0], + lpa=my_electrode_positions[1], + rpa=my_electrode_positions[2], + hpi=my_electrode_positions[3:], + hsp=read_polhemus_fastscan(op.join(kit_dir, 'test_hsp.txt')), + + # Other defaults + coord_frame='unknown' + ) + + assert repr(montage) == ( + '' + ) + + assert set([d['coord_frame'] for d in montage.dig]) == { + FIFF.FIFFV_COORD_UNKNOWN + } + + EXPECTED_FID_IN_POLHEMUS = { + 'nasion': [0.001393, 0.0131613, -0.0046967], + 'lpa': [-0.0624997, -0.0737271, 0.07996], + 'rpa': [-0.0748957, 0.0873785, 0.0811943], + } + fiducials, fid_coordframe = _get_fid_coords(montage.dig) + assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN + for kk, val in fiducials.items(): + assert_allclose(val, EXPECTED_FID_IN_POLHEMUS[kk]) + + +def test_read_dig_montage_using_polhemus_fastscan_error_handling(tmp_path): + """Test reading Polhemus FastSCAN errors.""" + with open(op.join(kit_dir, 'test_elp.txt')) as fid: + content = fid.read().replace('FastSCAN', 'XxxxXXXX') + + fname = tmp_path / 'faulty_FastSCAN.txt' + with open(fname, 'w') as fid: + fid.write(content) + + with pytest.raises(ValueError, match='not contain.*Polhemus FastSCAN'): + _ = read_polhemus_fastscan(fname) + + EXPECTED_ERR_MSG = "allowed value is '.txt', but got '.bar' instead" + with pytest.raises(ValueError, match=EXPECTED_ERR_MSG): + _ = read_polhemus_fastscan(fname=tmp_path / 'foo.bar') + + +def test_read_dig_polhemus_isotrak_hsp(): + """Test reading Polhemus IsoTrak HSP file.""" + EXPECTED_FID_IN_POLHEMUS = { + 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]), + 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]), + 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]), + } + montage = read_dig_polhemus_isotrak(fname=op.join(kit_dir, 'test.hsp'), + ch_names=None) + assert repr(montage) == ( + '' + ) + + fiducials, fid_coordframe = _get_fid_coords(montage.dig) + + assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN + for kk, val in fiducials.items(): + assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk]) + + +def test_read_dig_polhemus_isotrak_elp(): + """Test reading Polhemus IsoTrak ELP file.""" + EXPECTED_FID_IN_POLHEMUS = { + 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]), + 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]), + 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]), + } + montage = read_dig_polhemus_isotrak(fname=op.join(kit_dir, 'test.elp'), + ch_names=None) + assert repr(montage) == ( + '' + ) + fiducials, fid_coordframe = _get_fid_coords(montage.dig) + + assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN + for kk, val in fiducials.items(): + assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk]) + + +@pytest.fixture(scope='module') +def isotrak_eeg(tmp_path_factory): + """Mock isotrak file with EEG positions.""" + _SEED = 42 + N_ROWS, N_COLS = 5, 3 + content = np.random.RandomState(_SEED).randn(N_ROWS, N_COLS) + + fname = tmp_path_factory.mktemp('data') / 'test.eeg' + with open(str(fname), 'w') as fid: + fid.write(( + '3 200\n' + '//Shape file\n' + '//Minor revision number\n' + '2\n' + '//Subject Name\n' + '%N Name \n' + '////Shape code, number of digitized points\n' + )) + fid.write('0 {rows:d}\n'.format(rows=N_ROWS)) + fid.write(( + '//Position of fiducials X+, Y+, Y- on the subject\n' + '%F 0.11056 -5.421e-19 0 \n' + '%F -0.00021075 0.080793 -7.5894e-19 \n' + '%F 0.00021075 -0.080793 -2.8731e-18 \n' + '//No of rows, no of columns; position of digitized points\n' + )) + fid.write('{rows:d} {cols:d}\n'.format(rows=N_ROWS, cols=N_COLS)) + for row in content: + fid.write('\t'.join('%0.18e' % cell for cell in row) + '\n') + + return str(fname) + + +def test_read_dig_polhemus_isotrak_eeg(isotrak_eeg): + """Test reading Polhemus IsoTrak EEG positions.""" + N_CHANNELS = 5 + _SEED = 42 + EXPECTED_FID_IN_POLHEMUS = { + 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]), + 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]), + 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]), + } + ch_names = ['eeg {:01d}'.format(ii) for ii in range(N_CHANNELS)] + EXPECTED_CH_POS = dict(zip( + ch_names, np.random.RandomState(_SEED).randn(N_CHANNELS, 3))) + + montage = read_dig_polhemus_isotrak(fname=isotrak_eeg, ch_names=ch_names) + assert repr(montage) == ( + '' + ) + + fiducials, fid_coordframe = _get_fid_coords(montage.dig) + + assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN + for kk, val in fiducials.items(): + assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk]) + + for kk, dig_point in zip(montage.ch_names, _get_dig_eeg(montage.dig)): + assert_array_equal(dig_point['r'], EXPECTED_CH_POS[kk]) + assert dig_point['coord_frame'] == FIFF.FIFFV_COORD_UNKNOWN + + +def test_read_dig_polhemus_isotrak_error_handling(isotrak_eeg, tmp_path): + """Test errors in reading Polhemus IsoTrak files. + + 1 - matching ch_names and number of points in isotrak file. + 2 - error for unsupported file extensions. + """ + # Check ch_names + N_CHANNELS = 5 + EXPECTED_ERR_MSG = "not match the number of points.*Expected.*5, given 47" + with pytest.raises(ValueError, match=EXPECTED_ERR_MSG): + _ = read_dig_polhemus_isotrak( + fname=isotrak_eeg, + ch_names=['eeg {:01d}'.format(ii) for ii in range(N_CHANNELS + 42)] + ) + + # Check fname extensions + fname = op.join(tmp_path, 'foo.bar') + with pytest.raises( + ValueError, + match="Allowed val.*'.hsp', '.elp', and '.eeg', but got '.bar' instead" + ): + _ = read_dig_polhemus_isotrak(fname=fname, ch_names=None) + + +def test_combining_digmontage_objects(): + """Test combining different DigMontage objects.""" + rng = np.random.RandomState(0) + fiducials = dict(zip(('nasion', 'lpa', 'rpa'), rng.rand(3, 3))) + + # hsp positions are [1X, 1X, 1X] + hsp1 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 11.)) + hsp2 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 12.)) + hsp3 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 13.)) + + # hpi positions are [2X, 2X, 2X] + hpi1 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 21.)) + hpi2 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 22.)) + hpi3 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 23.)) + + # channels have positions at 40s, 50s, and 60s. + ch_pos1 = make_dig_montage( + **fiducials, + ch_pos={'h': [41, 41, 41], 'b': [42, 42, 42], 'g': [43, 43, 43]} + ) + ch_pos2 = make_dig_montage( + **fiducials, + ch_pos={'n': [51, 51, 51], 'y': [52, 52, 52], 'p': [53, 53, 53]} + ) + ch_pos3 = make_dig_montage( + **fiducials, + ch_pos={'v': [61, 61, 61], 'a': [62, 62, 62], 'l': [63, 63, 63]} + ) + + montage = ( + DigMontage() + hsp1 + hsp2 + hsp3 + hpi1 + hpi2 + hpi3 + ch_pos1 + + ch_pos2 + ch_pos3 + ) + assert repr(montage) == ( + '' + ) + + EXPECTED_MONTAGE = make_dig_montage( + **fiducials, + hsp=np.concatenate([np.full((2, 3), 11.), np.full((2, 3), 12.), + np.full((2, 3), 13.)]), + hpi=np.concatenate([np.full((2, 3), 21.), np.full((2, 3), 22.), + np.full((2, 3), 23.)]), + ch_pos={ + 'h': [41, 41, 41], 'b': [42, 42, 42], 'g': [43, 43, 43], + 'n': [51, 51, 51], 'y': [52, 52, 52], 'p': [53, 53, 53], + 'v': [61, 61, 61], 'a': [62, 62, 62], 'l': [63, 63, 63], + } + ) + + # Do some checks to ensure they are the same DigMontage + assert len(montage.ch_names) == len(EXPECTED_MONTAGE.ch_names) + assert all([c in montage.ch_names for c in EXPECTED_MONTAGE.ch_names]) + actual_occurrences = _count_points_by_type(montage.dig) + expected_occurrences = _count_points_by_type(EXPECTED_MONTAGE.dig) + assert actual_occurrences == expected_occurrences + + +def test_combining_digmontage_forbiden_behaviors(): + """Test combining different DigMontage objects with repeated names.""" + rng = np.random.RandomState(0) + fiducials = dict(zip(('nasion', 'lpa', 'rpa'), rng.rand(3, 3))) + dig1 = make_dig_montage( + **fiducials, + ch_pos=dict(zip(list('abc'), rng.rand(3, 3))), + ) + dig2 = make_dig_montage( + **fiducials, + ch_pos=dict(zip(list('bcd'), rng.rand(3, 3))), + ) + dig2_wrong_fid = make_dig_montage( + nasion=rng.rand(3), lpa=rng.rand(3), rpa=rng.rand(3), + ch_pos=dict(zip(list('ghi'), rng.rand(3, 3))), + ) + dig2_wrong_coordframe = make_dig_montage( + **fiducials, + ch_pos=dict(zip(list('ghi'), rng.rand(3, 3))), + coord_frame='meg' + ) + + EXPECTED_ERR_MSG = "Cannot.*duplicated channel.*found: \'b\', \'c\'." + with pytest.raises(RuntimeError, match=EXPECTED_ERR_MSG): + _ = dig1 + dig2 + + with pytest.raises(RuntimeError, match='fiducial locations do not match'): + _ = dig1 + dig2_wrong_fid + + with pytest.raises(RuntimeError, match='not in the same coordinate '): + _ = dig1 + dig2_wrong_coordframe + + +def test_set_dig_montage(): + """Test setting DigMontage with toy understandable points.""" + N_CHANNELS, N_HSP, N_HPI = 3, 2, 1 + ch_names = list(ascii_lowercase[:N_CHANNELS]) + ch_pos = dict(zip( + ch_names, + np.arange(N_CHANNELS * 3).reshape(N_CHANNELS, 3), + )) + + montage_ch_only = make_dig_montage(ch_pos=ch_pos, coord_frame='head') + + assert repr(montage_ch_only) == ( + '' + ) + info = create_info(ch_names, sfreq=1, ch_types='eeg') + info.set_montage(montage_ch_only) + assert len(info['dig']) == len(montage_ch_only.dig) + + assert_allclose(actual=np.array([ch['loc'][:6] for ch in info['chs']]), + desired=[[0., 1., 2., 0., 0., 0.], + [3., 4., 5., 0., 0., 0.], + [6., 7., 8., 0., 0., 0.]]) + + montage_full = make_dig_montage( + ch_pos=dict(**ch_pos, EEG000=np.full(3, 42)), # 4 = 3 egg + 1 eeg_ref + nasion=[1, 1, 1], lpa=[2, 2, 2], rpa=[3, 3, 3], + hsp=np.full((N_HSP, 3), 4), + hpi=np.full((N_HPI, 3), 4), + coord_frame='head' + ) + + assert repr(montage_full) == ( + '' + ) + + info = create_info(ch_names, sfreq=1, ch_types='eeg') + info.set_montage(montage_full) + EXPECTED_LEN = sum({'hsp': 2, 'hpi': 1, 'fid': 3, 'eeg': 4}.values()) + assert len(info['dig']) == EXPECTED_LEN + assert_allclose(actual=np.array([ch['loc'][:6] for ch in info['chs']]), + desired=[[0., 1., 2., 42., 42., 42.], + [3., 4., 5., 42., 42., 42.], + [6., 7., 8., 42., 42., 42.]]) + + +@testing.requires_testing_data +def test_fif_dig_montage(tmp_path): + """Test FIF dig montage support.""" + dig_montage = read_dig_fif(fif_dig_montage_fname) + + # test round-trip IO + temp_dir = str(tmp_path) + fname_temp = op.join(temp_dir, 'test.fif') + _check_roundtrip(dig_montage, fname_temp) + + # Make a BrainVision file like the one the user would have had + raw_bv = read_raw_brainvision(bv_fname, preload=True) + raw_bv_2 = raw_bv.copy() + mapping = dict() + for ii, ch_name in enumerate(raw_bv.ch_names): + mapping[ch_name] = 'EEG%03d' % (ii + 1,) + raw_bv.rename_channels(mapping) + for ii, ch_name in enumerate(raw_bv_2.ch_names): + mapping[ch_name] = 'EEG%03d' % (ii + 33,) + raw_bv_2.rename_channels(mapping) + raw_bv.add_channels([raw_bv_2]) + for ch in raw_bv.info['chs']: + ch['kind'] = FIFF.FIFFV_EEG_CH + + # Set the montage + raw_bv.set_montage(dig_montage) + + # Check the result + evoked = read_evokeds(evoked_fname)[0] + + # check info[chs] matches + assert_equal(len(raw_bv.ch_names), len(evoked.ch_names) - 1) + for ch_py, ch_c in zip(raw_bv.info['chs'], evoked.info['chs'][:-1]): + assert_equal(ch_py['ch_name'], + ch_c['ch_name'].replace('EEG ', 'EEG')) + # C actually says it's unknown, but it's not (?): + # assert_equal(ch_py['coord_frame'], ch_c['coord_frame']) + assert_equal(ch_py['coord_frame'], FIFF.FIFFV_COORD_HEAD) + c_loc = ch_c['loc'].copy() + c_loc[c_loc == 0] = np.nan + assert_allclose(ch_py['loc'], c_loc, atol=1e-7) + + # check info[dig] + assert_dig_allclose(raw_bv.info, evoked.info) + + # Roundtrip of non-FIF start + montage = make_dig_montage(hsp=read_polhemus_fastscan(hsp), + hpi=read_mrk(hpi)) + elp_points = read_polhemus_fastscan(elp) + ch_pos = {"EEG%03d" % (k + 1): pos for k, pos in enumerate(elp_points[8:])} + montage += make_dig_montage(nasion=elp_points[0], + lpa=elp_points[1], + rpa=elp_points[2], + ch_pos=ch_pos) + _check_roundtrip(montage, fname_temp, 'unknown') + montage = transform_to_head(montage) + _check_roundtrip(montage, fname_temp) + montage.dig[0]['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN + with pytest.raises(RuntimeError, match='Only a single coordinate'): + montage.save(fname_temp) + + +@testing.requires_testing_data +def test_egi_dig_montage(tmp_path): + """Test EGI MFF XML dig montage support.""" + dig_montage = read_dig_egi(egi_dig_montage_fname) + fid, coord = _get_fid_coords(dig_montage.dig) + + assert coord == FIFF.FIFFV_COORD_UNKNOWN + assert_allclose( + actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]), + desired=[[ 0. , 10.564, -2.051], # noqa + [-8.592, 0.498, -4.128], # noqa + [ 8.592, 0.498, -4.128]], # noqa + ) + + # Test accuracy and embedding within raw object + raw_egi = read_raw_egi(egi_raw_fname, channel_naming='EEG %03d') + + raw_egi.set_montage(dig_montage) + test_raw_egi = read_raw_fif(egi_fif_fname) + + assert_equal(len(raw_egi.ch_names), len(test_raw_egi.ch_names)) + for ch_raw, ch_test_raw in zip(raw_egi.info['chs'], + test_raw_egi.info['chs']): + assert_equal(ch_raw['ch_name'], ch_test_raw['ch_name']) + assert_equal(ch_raw['coord_frame'], FIFF.FIFFV_COORD_HEAD) + assert_allclose(ch_raw['loc'], ch_test_raw['loc'], atol=1e-7) + + assert_dig_allclose(raw_egi.info, test_raw_egi.info) + + dig_montage_in_head = transform_to_head(dig_montage.copy()) + fid, coord = _get_fid_coords(dig_montage_in_head.dig) + assert coord == FIFF.FIFFV_COORD_HEAD + assert_allclose( + actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]), + desired=[[0., 10.278, 0.], [-8.592, 0., 0.], [8.592, 0., 0.]], + atol=1e-4, + ) + + # test round-trip IO + fname_temp = tmp_path / 'egi_test.fif' + _check_roundtrip(dig_montage, fname_temp, 'unknown') + _check_roundtrip(dig_montage_in_head, fname_temp) + + +def _pop_montage(dig_montage, ch_name): + # remove reference that was not used in old API + name_idx = dig_montage.ch_names.index(ch_name) + dig_idx = dig_montage._get_dig_names().index(ch_name) + + del dig_montage.dig[dig_idx] + del dig_montage.ch_names[name_idx] + for k in range(dig_idx, len(dig_montage.dig)): + dig_montage.dig[k]['ident'] -= 1 + + +@testing.requires_testing_data +def test_read_dig_captrak(tmp_path): + """Test reading a captrak montage file.""" + EXPECTED_CH_NAMES_OLD = [ + 'AF3', 'AF4', 'AF7', 'AF8', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'CP1', + 'CP2', 'CP3', 'CP4', 'CP5', 'CP6', 'CPz', 'Cz', 'F1', 'F2', 'F3', 'F4', + 'F5', 'F6', 'F7', 'F8', 'FC1', 'FC2', 'FC3', 'FC4', 'FC5', 'FC6', + 'FT10', 'FT7', 'FT8', 'FT9', 'Fp1', 'Fp2', 'Fz', 'GND', 'O1', 'O2', + 'Oz', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'PO10', 'PO3', + 'PO4', 'PO7', 'PO8', 'PO9', 'POz', 'Pz', 'REF', 'T7', 'T8', 'TP10', + 'TP7', 'TP8', 'TP9' + ] + EXPECTED_CH_NAMES = [ + 'T7', 'FC5', 'F7', 'C5', 'FT7', 'FT9', 'TP7', 'TP9', 'P7', 'CP5', + 'PO7', 'C3', 'CP3', 'P5', 'P3', 'PO3', 'PO9', 'O1', 'Oz', 'POz', 'O2', + 'PO4', 'P1', 'Pz', 'P2', 'CP2', 'CP1', 'CPz', 'Cz', 'C1', 'FC1', 'FC3', + 'REF', 'F3', 'F1', 'Fz', 'F5', 'AF7', 'AF3', 'Fp1', 'GND', 'F2', 'AF4', + 'Fp2', 'F4', 'F8', 'F6', 'AF8', 'FC2', 'FC6', 'FC4', 'C2', 'C4', 'P4', + 'CP4', 'PO8', 'P8', 'P6', 'CP6', 'PO10', 'TP10', 'TP8', 'FT10', 'T8', + 'C6', 'FT8' + ] + assert set(EXPECTED_CH_NAMES) == set(EXPECTED_CH_NAMES_OLD) + montage = read_dig_captrak( + fname=op.join(data_path, 'montage', 'captrak_coords.bvct') + ) + + assert montage.ch_names == EXPECTED_CH_NAMES + assert repr(montage) == ( + '' + ) + + montage = transform_to_head(montage) # transform_to_head has to be tested + _check_roundtrip(montage=montage, + fname=str(tmp_path / 'bvct_test.fif')) + + fid, _ = _get_fid_coords(montage.dig) + assert_allclose( + actual=np.array([fid.nasion, fid.lpa, fid.rpa]), + desired=[[0, 0.11309, 0], [-0.09189, 0, 0], [0.09240, 0, 0]], + atol=1e-5, + ) + + raw_bv = read_raw_brainvision(bv_raw_fname) + raw_bv.set_channel_types({"HEOG": 'eog', "VEOG": 'eog', "ECG": 'ecg'}) + + raw_bv.set_montage(montage) + + test_raw_bv = read_raw_fif(bv_fif_fname) + + # compare after set_montage using chs loc. + for actual, expected in zip(raw_bv.info['chs'], test_raw_bv.info['chs']): + assert_allclose(actual['loc'][:3], expected['loc'][:3]) + if actual['kind'] == FIFF.FIFFV_EEG_CH: + assert_allclose(actual['loc'][3:6], + [-0.005103, 0.05395, 0.144622], rtol=1e-04) + + +# https://gist.github.com/larsoner/2264fb5895070d29a8c9aa7c0dc0e8a6 +_MGH60 = [ + 'Fz', 'F2', 'AF4', 'Fpz', 'Fp1', 'AF8', 'FT9', 'F7', 'FC5', 'FC6', 'FT7', + 'F1', 'AF7', 'FT8', 'F6', 'F5', 'FC1', 'FC2', 'FT10', 'T9', 'Cz', 'F4', + 'T7', 'C2', 'C4', 'C1', 'C3', 'F8', 'F3', 'C5', 'Fp2', 'AF3', + 'CP2', 'P2', 'O2', 'Iz', 'Oz', 'PO4', 'O1', 'P8', 'PO8', 'P6', 'PO7', 'PO3', 'C6', 'TP9', 'TP8', 'CP4', 'P4', # noqa + 'CP3', 'CP1', 'TP7', 'P3', 'Pz', 'P1', 'P7', 'P5', 'TP10', 'T8', 'T10', +] + + +@pytest.mark.parametrize('rename', ('raw', 'montage', 'custom')) +def test_set_montage_mgh(rename): + """Test setting 'mgh60' montage to old fif.""" + raw = read_raw_fif(fif_fname) + eeg_picks = pick_types(raw.info, meg=False, eeg=True, exclude=()) + assert list(eeg_picks) == [ii for ii, name in enumerate(raw.ch_names) + if name.startswith('EEG')] + orig_pos = np.array([raw.info['chs'][pick]['loc'][:3] + for pick in eeg_picks]) + atol = 1e-6 + mon = None + if rename == 'raw': + raw.rename_channels(lambda x: x.replace('EEG ', 'EEG')) + raw.set_montage('mgh60') # test loading with string argument + elif rename == 'montage': + mon = make_standard_montage('mgh60') + mon.rename_channels(lambda x: x.replace('EEG', 'EEG ')) + assert [raw.ch_names[pick] for pick in eeg_picks] == mon.ch_names + raw.set_montage(mon) + else: + atol = 3e-3 # different subsets of channel locations + assert rename == 'custom' + assert len(_MGH60) == 60 + mon = make_standard_montage('standard_1020') + assert len(mon._get_ch_pos()) == 94 + + def renamer(x): + try: + return 'EEG %03d' % (_MGH60.index(x) + 1,) + except ValueError: + return x + + mon.rename_channels(renamer) + raw.set_montage(mon) + + if mon is not None: + # first two are 'Fz' and 'F2', take them from standard_1020.elc -- + # they should not be changed on load! + want_pos = [[0.3122, 58.5120, 66.4620], [29.5142, 57.6019, 59.5400]] + got_pos = [mon.get_positions()['ch_pos'][f'EEG {x:03d}'] * 1000 + for x in range(1, 3)] + assert_allclose(want_pos, got_pos) + assert mon.dig[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI + trans = compute_native_head_t(mon) + trans_2 = _get_trans('fsaverage', 'mri', 'head')[0] + assert trans['to'] == trans_2['to'] + assert trans['from'] == trans_2['from'] + assert_allclose(trans['trans'], trans_2['trans'], atol=1e-6) + + new_pos = np.array([ch['loc'][:3] for ch in raw.info['chs'] + if ch['ch_name'].startswith('EEG')]) + assert ((orig_pos != new_pos).all()) + + r0 = _fit_sphere(new_pos)[1] + assert_allclose(r0, [-0.001021, 0.014554, 0.041404], atol=1e-4) + # spot check + assert_allclose(new_pos[:2], [[-0.001229, 0.093274, 0.102639], + [0.027968, 0.09187, 0.09578]], atol=atol) + + +# XXX: this does not check ch_names + it cannot work because of write_dig +def _check_roundtrip(montage, fname, coord_frame='head'): + """Check roundtrip writing.""" + montage.save(fname, overwrite=True) + montage_read = read_dig_fif(fname=fname) + + assert_equal(repr(montage), repr(montage_read)) + assert_equal(_check_get_coord_frame(montage_read.dig), coord_frame) + assert_dig_allclose(montage, montage_read) + + +def _fake_montage(ch_names): + pos = np.random.RandomState(42).randn(len(ch_names), 3) + return make_dig_montage(ch_pos=dict(zip(ch_names, pos)), + coord_frame='head') + + +cnt_ignore_warns = [ + pytest.mark.filterwarnings( + 'ignore:.*Could not parse meas date from the header. Setting to None.' + ), + pytest.mark.filterwarnings(( + 'ignore:.*Could not define the number of bytes automatically.' + ' Defaulting to 2.') + ), +] + + +def test_digmontage_constructor_errors(): + """Test proper error messaging.""" + with pytest.raises(ValueError, match='does not match the number'): + _ = DigMontage(ch_names=['foo', 'bar'], dig=list()) + + +def test_transform_to_head_and_compute_dev_head_t(): + """Test transform_to_head and compute_dev_head_t.""" + EXPECTED_DEV_HEAD_T = \ + [[-3.72201691e-02, -9.98212167e-01, -4.67667497e-02, -7.31583414e-04], + [8.98064989e-01, -5.39382685e-02, 4.36543170e-01, 1.60134431e-02], + [-4.38285221e-01, -2.57513699e-02, 8.98466990e-01, 6.13035748e-02], + [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]] + + EXPECTED_FID_IN_POLHEMUS = { + 'nasion': np.array([0.001393, 0.0131613, -0.0046967]), + 'lpa': np.array([-0.0624997, -0.0737271, 0.07996]), + 'rpa': np.array([-0.0748957, 0.0873785, 0.0811943]), + } + + EXPECTED_FID_IN_HEAD = { + 'nasion': np.array([-8.94466792e-18, 1.10559624e-01, -3.85185989e-34]), + 'lpa': np.array([-8.10816716e-02, 6.56321671e-18, 0]), + 'rpa': np.array([8.05048781e-02, -6.47441364e-18, 0]), + } + + hpi_dev = np.array( + [[ 2.13951493e-02, 8.47444056e-02, -5.65431188e-02], # noqa + [ 2.10299433e-02, -8.03141101e-02, -6.34420259e-02], # noqa + [ 1.05916829e-01, 8.18485672e-05, 1.19928083e-02], # noqa + [ 9.26595105e-02, 4.64804385e-02, 8.45141253e-03], # noqa + [ 9.42554419e-02, -4.35206589e-02, 8.78999363e-03]] # noqa + ) + + hpi_polhemus = np.array( + [[-0.0595004, -0.0704836, 0.075893 ], # noqa + [-0.0646373, 0.0838228, 0.0762123], # noqa + [-0.0135035, 0.0072522, -0.0268405], # noqa + [-0.0202967, -0.0351498, -0.0129305], # noqa + [-0.0277519, 0.0452628, -0.0222407]] # noqa + ) + + montage_polhemus = make_dig_montage( + **EXPECTED_FID_IN_POLHEMUS, hpi=hpi_polhemus, coord_frame='unknown' + ) + + montage_meg = make_dig_montage(hpi=hpi_dev, coord_frame='meg') + + # Test regular workflow to get dev_head_t + montage = montage_polhemus + montage_meg + fids, _ = _get_fid_coords(montage.dig) + for kk in fids: + assert_allclose(fids[kk], EXPECTED_FID_IN_POLHEMUS[kk], atol=1e-5) + + with pytest.raises(ValueError, match='set to head coordinate system'): + _ = compute_dev_head_t(montage) + + montage = transform_to_head(montage) + + fids, _ = _get_fid_coords(montage.dig) + for kk in fids: + assert_allclose(fids[kk], EXPECTED_FID_IN_HEAD[kk], atol=1e-5) + + dev_head_t = compute_dev_head_t(montage) + assert_allclose(dev_head_t['trans'], EXPECTED_DEV_HEAD_T, atol=5e-7) + + # Test errors when number of HPI points do not match + EXPECTED_ERR_MSG = 'Device-to-Head .*Got 0 .*device and 5 points in head' + with pytest.raises(ValueError, match=EXPECTED_ERR_MSG): + _ = compute_dev_head_t(transform_to_head(montage_polhemus)) + + EXPECTED_ERR_MSG = 'Device-to-Head .*Got 5 .*device and 0 points in head' + with pytest.raises(ValueError, match=EXPECTED_ERR_MSG): + _ = compute_dev_head_t(transform_to_head( + montage_meg + make_dig_montage(**EXPECTED_FID_IN_POLHEMUS) + )) + + EXPECTED_ERR_MSG = 'Device-to-Head .*Got 3 .*device and 5 points in head' + with pytest.raises(ValueError, match=EXPECTED_ERR_MSG): + _ = compute_dev_head_t(transform_to_head( + DigMontage(dig=_format_dig_points(montage_meg.dig[:3])) + + montage_polhemus + )) + + +def test_set_montage_with_mismatching_ch_names(): + """Test setting a DigMontage with mismatching ch_names.""" + raw = read_raw_fif(fif_fname) + montage = make_standard_montage('mgh60') + + # 'EEG 001' and 'EEG001' won't match + missing_err = '60 channel positions not present' + with pytest.raises(ValueError, match=missing_err): + raw.set_montage(montage) + + montage.ch_names = [ # modify the names in place + name.replace('EEG', 'EEG ') for name in montage.ch_names + ] + raw.set_montage(montage) # does not raise + + # Case sensitivity + raw.rename_channels(lambda x: x.lower()) + with pytest.raises(ValueError, match=missing_err): + raw.set_montage(montage) + # should work + raw.set_montage(montage, match_case=False) + raw.rename_channels(lambda x: x.upper()) # restore + assert 'EEG 001' in raw.ch_names and 'eeg 001' not in raw.ch_names + raw.rename_channels({'EEG 002': 'eeg 001'}) + assert 'EEG 001' in raw.ch_names and 'eeg 001' in raw.ch_names + raw.set_channel_types({'eeg 001': 'misc'}) + raw.set_montage(montage) + raw.set_channel_types({'eeg 001': 'eeg'}) + with pytest.raises(ValueError, match='1 channel position not present'): + raw.set_montage(montage) + with pytest.raises(ValueError, match='match_case=False as 1 channel name'): + raw.set_montage(montage, match_case=False) + info = create_info(['EEG 001'], 1000., 'eeg') + mon = make_dig_montage({'EEG 001': np.zeros(3), 'eeg 001': np.zeros(3)}, + nasion=[0, 1., 0], rpa=[1., 0, 0], lpa=[-1., 0, 0]) + info.set_montage(mon) + with pytest.raises(ValueError, match='match_case=False as 1 montage name'): + info.set_montage(mon, match_case=False) + + +def test_set_montage_with_sub_super_set_of_ch_names(): + """Test info and montage ch_names matching criteria.""" + N_CHANNELS = len('abcdef') + montage = _make_toy_dig_montage(N_CHANNELS, coord_frame='head') + + # montage and info match + info = create_info(ch_names=list('abcdef'), sfreq=1, ch_types='eeg') + info.set_montage(montage) + + # montage is a SUPERset of info + info = create_info(list('abc'), sfreq=1, ch_types='eeg') + info.set_montage(montage) + assert len(info['dig']) == len(list('abc')) + + # montage is a SUBset of info + _MSG = 'subset of info. There are 2 .* not present in the DigMontage' + info = create_info(ch_names=list('abcdfgh'), sfreq=1, ch_types='eeg') + with pytest.raises(ValueError, match=_MSG) as exc: + info.set_montage(montage) + # plus suggestions + assert exc.match('set_channel_types') + assert exc.match('on_missing') + + +def test_set_montage_with_known_aliases(): + """Test matching unrecognized channel locations to known aliases.""" + # montage and info match + mock_montage_ch_names = ['POO7', 'POO8'] + n_channels = len(mock_montage_ch_names) + + montage = make_dig_montage(ch_pos=dict( + zip( + mock_montage_ch_names, + np.arange(n_channels * 3).reshape(n_channels, 3), + )), + coord_frame='head') + + mock_info_ch_names = ['Cb1', 'Cb2'] + info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types='eeg') + info.set_montage(montage, match_alias=True) + + # work with match_case + mock_info_ch_names = ['cb1', 'cb2'] + info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types='eeg') + info.set_montage(montage, match_case=False, match_alias=True) + + # should warn user T1 instead of its alias T9 + mock_info_ch_names = ['Cb1', 'T1'] + info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types='eeg') + with pytest.raises(ValueError, match='T1'): + info.set_montage(montage, match_case=False, match_alias=True) + + +def test_heterogeneous_ch_type(): + """Test ch_names matching criteria with heterogeneous ch_type.""" + VALID_MONTAGE_NAMED_CHS = ('eeg', 'ecog', 'seeg', 'dbs') + + montage = _make_toy_dig_montage( + n_channels=len(VALID_MONTAGE_NAMED_CHS), + coord_frame='head', + ) + + # Montage and info match + info = create_info(montage.ch_names, 1., list(VALID_MONTAGE_NAMED_CHS)) + RawArray(np.zeros((4, 1)), info, copy=None).set_montage(montage) + + +def test_set_montage_coord_frame_in_head_vs_unknown(): + """Test set montage using head and unknown only.""" + N_CHANNELS, NaN = 3, np.nan + + raw = _make_toy_raw(N_CHANNELS) + montage_in_head = _make_toy_dig_montage(N_CHANNELS, coord_frame='head') + montage_in_unknown = _make_toy_dig_montage( + N_CHANNELS, coord_frame='unknown' + ) + montage_in_unknown_with_fid = _make_toy_dig_montage( + N_CHANNELS, coord_frame='unknown', + nasion=[0, 1, 0], lpa=[1, 0, 0], rpa=[-1, 0, 0], + ) + + assert_allclose( + actual=np.array([ch['loc'] for ch in raw.info['chs']]), + desired=np.full((N_CHANNELS, 12), np.nan) + ) + + raw.set_montage(montage_in_head) + assert_allclose( + actual=np.array([ch['loc'] for ch in raw.info['chs']]), + desired=[ + [0., 1., 2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], + [3., 4., 5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], + [6., 7., 8., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], + ] + ) + + with pytest.warns(RuntimeWarning, match='assuming identity'): + raw.set_montage(montage_in_unknown) + + raw.set_montage(montage_in_unknown_with_fid) + assert_allclose( + actual=np.array([ch['loc'] for ch in raw.info['chs']]), + desired=[ + [-0., 1., -2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], + [-3., 4., -5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], + [-6., 7., -8., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], + ] + ) + + # check no collateral effects from transforming montage + assert _check_get_coord_frame(montage_in_unknown_with_fid.dig) == 'unknown' + assert_array_equal( + _get_dig_montage_pos(montage_in_unknown_with_fid), + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + ) + + +@testing.requires_testing_data +@pytest.mark.parametrize('ch_type', ('eeg', 'ecog', 'seeg', 'dbs')) +def test_montage_head_frame(ch_type): + """Test that head frame is set properly.""" + # gh-9446 + data = np.random.randn(2, 100) + info = create_info(['a', 'b'], 512, ch_type) + for ch in info['chs']: + assert ch['coord_frame'] == FIFF.FIFFV_COORD_HEAD + raw = RawArray(data, info) + ch_pos = dict(a=[-0.00250136, 0.04913788, 0.05047056], + b=[-0.00528394, 0.05066484, 0.05061559]) + lpa, nasion, rpa = get_mni_fiducials( + 'fsaverage', subjects_dir=subjects_dir) + lpa, nasion, rpa = lpa['r'], nasion['r'], rpa['r'] + montage = make_dig_montage( + ch_pos, coord_frame='mri', nasion=nasion, lpa=lpa, rpa=rpa) + mri_head_t = compute_native_head_t(montage) + raw.set_montage(montage) + pos = apply_trans(mri_head_t, np.array(list(ch_pos.values()))) + for p, ch in zip(pos, raw.info['chs']): + assert ch['coord_frame'] == FIFF.FIFFV_COORD_HEAD + assert_allclose(p, ch['loc'][:3]) + + # Also test that including channels in the montage that will not have their + # positions set will emit a warning + raw.set_channel_types(dict(a='misc')) + with pytest.warns(RuntimeWarning, match='Not setting .*of 1 misc channel'): + raw.set_montage(montage) + + # and with a bunch of bad types + raw = read_raw_fif(fif_fname) + ch_pos = {ch_name: np.zeros(3) for ch_name in raw.ch_names} + mon = make_dig_montage(ch_pos, coord_frame='head') + with pytest.warns(RuntimeWarning, match='316 eog/grad/mag/stim channels'): + raw.set_montage(mon) + + +def test_set_montage_with_missing_coordinates(): + """Test set montage with missing coordinates.""" + N_CHANNELS, NaN = 3, np.nan + + raw = _make_toy_raw(N_CHANNELS) + raw.set_channel_types({ch: 'ecog' for ch in raw.ch_names}) + # don't include all the channels + ch_names = raw.ch_names[1:] + n_channels = len(ch_names) + ch_coords = np.arange(n_channels * 3).reshape(n_channels, 3) + montage_in_mri = make_dig_montage( + ch_pos=dict(zip(ch_names, ch_coords,)), + coord_frame='unknown', + nasion=[0, 1, 0], lpa=[1, 0, 0], rpa=[-1, 0, 0], + ) + + with pytest.raises(ValueError, match='DigMontage is ' + 'only a subset of info'): + raw.set_montage(montage_in_mri) + + with pytest.raises(ValueError, match='Invalid value'): + raw.set_montage(montage_in_mri, on_missing='foo') + + with pytest.raises(TypeError, match='must be an instance'): + raw.set_montage(montage_in_mri, on_missing=True) + + with pytest.warns(RuntimeWarning, match='DigMontage is ' + 'only a subset of info'): + raw.set_montage(montage_in_mri, on_missing='warn') + + raw.set_montage(montage_in_mri, on_missing='ignore') + assert_allclose( + actual=np.array([ch['loc'] for ch in raw.info['chs']]), + desired=[ + [NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], + [0., 1., -2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], + [-3., 4., -5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], + ] + ) + + +@testing.requires_testing_data +def test_get_montage(): + """Test get montage from Instance. + + Test with standard montage and then loaded in montage. + """ + # 1. read in testing data and assert montage roundtrip + # for testing dataset: 'test_raw.fif' + raw = read_raw_fif(fif_fname) + raw = raw.rename_channels(lambda name: name.replace('EEG ', 'EEG')) + raw2 = raw.copy() + # get montage and then set montage and + # it should be the same + montage = raw.get_montage() + raw.set_montage(montage, on_missing='raise') + test_montage = raw.get_montage() + assert_object_equal(raw.info['chs'], raw2.info['chs']) + assert_dig_allclose(raw2.info, raw.info) + assert_object_equal(raw2.info['dig'], raw.info['dig']) + + # the montage does not change + assert_object_equal(montage.dig, test_montage.dig) + + # the montage should fulfill a roundtrip with make_dig_montage + test2_montage = make_dig_montage(**montage.get_positions()) + assert_object_equal(test2_montage.dig, test_montage.dig) + + # 2. now do a standard montage + montage = make_standard_montage('mgh60') + # set the montage; note renaming to make standard montage map + raw.set_montage(montage) + + # get montage back and set it + # the channel locations should be the same + raw2 = raw.copy() + test_montage = raw.get_montage() + raw.set_montage(test_montage, on_missing='ignore') + + # the montage should fulfill a roundtrip with make_dig_montage + test2_montage = make_dig_montage(**test_montage.get_positions()) + assert_object_equal(test2_montage.dig, test_montage.dig) + + # chs should not change + assert_object_equal(raw2.info['chs'], raw.info['chs']) + # dig order might be different after set_montage + assert montage.ch_names == test_montage.ch_names + # note that test_montage will have different coordinate frame + # compared to standard montage + assert_dig_allclose(raw2.info, raw.info) + assert_object_equal(raw2.info['dig'], raw.info['dig']) + + # 3. if montage gets set to None + raw.set_montage(None) + assert raw.get_montage() is None + + # 4. read in BV test dataset and make sure montage + # fulfills roundtrip on non-standard montage + dig_montage = read_dig_fif(fif_dig_montage_fname) + + # Make a BrainVision file like the one the user would have had + # with testing dataset 'test.vhdr' + raw_bv = read_raw_brainvision(bv_fname, preload=True) + raw_bv_2 = raw_bv.copy() + + # rename channels to make it have the full set + # of channels + mapping = dict() + for ii, ch_name in enumerate(raw_bv.ch_names): + mapping[ch_name] = 'EEG%03d' % (ii + 1,) + raw_bv.rename_channels(mapping) + for ii, ch_name in enumerate(raw_bv_2.ch_names): + mapping[ch_name] = 'EEG%03d' % (ii + 33,) + raw_bv_2.rename_channels(mapping) + raw_bv.add_channels([raw_bv_2]) + for ch in raw_bv.info['chs']: + ch['kind'] = FIFF.FIFFV_EEG_CH + + # Set the montage and roundtrip + raw_bv.set_montage(dig_montage) + raw_bv2 = raw_bv.copy() + + # reset the montage + test_montage = raw_bv.get_montage() + raw_bv.set_montage(test_montage, on_missing='ignore') + # dig order might be different after set_montage + assert_object_equal(raw_bv2.info['dig'], raw_bv.info['dig']) + assert_dig_allclose(raw_bv2.info, raw_bv.info) + + # if dig is not set in the info, then montage returns None + with raw.info._unlock(): + raw.info['dig'] = None + assert raw.get_montage() is None + + # the montage should fulfill a roundtrip with make_dig_montage + test2_montage = make_dig_montage(**test_montage.get_positions()) + assert_object_equal(test2_montage.dig, test_montage.dig) + + +def test_read_dig_hpts(): + """Test reading .hpts file (from MNE legacy).""" + fname = op.join( + op.dirname(_BRAINVISON_FILE), 'tests', 'data', 'test.hpts' + ) + + montage = read_dig_hpts(fname) + assert repr(montage) == ( + '' + ) + + +def test_get_builtin_montages(): + """Test help function to obtain builtin montages.""" + EXPECTED_NUM = 26 + assert len(get_builtin_montages()) == EXPECTED_NUM + + +@testing.requires_testing_data +def test_plot_montage(): + """Test plotting montage.""" + # gh-8025 + montage = read_dig_captrak(bvct_dig_montage_fname) + montage.plot() + plt.close('all') + + +@testing.requires_testing_data +def test_montage_add_fiducials(): + """Test montage can add estimated fiducials for rpa, lpa, nas.""" + # get the fiducials from test file + subjects_dir = op.join(data_path, 'subjects') + subject = 'sample' + fid_fname = op.join(subjects_dir, subject, 'bem', + 'sample-fiducials.fif') + test_fids, test_coord_frame = read_fiducials(fid_fname) + test_fids = np.array([f['r'] for f in test_fids]) + + # create test montage and add estimated fiducials + test_ch_pos = {'A1': [0, 0, 0]} + montage = make_dig_montage(ch_pos=test_ch_pos, coord_frame='mri') + montage.add_estimated_fiducials(subject=subject, subjects_dir=subjects_dir) + + # check that adding MNI fiducials fails because we're in MRI + with pytest.raises(RuntimeError, match='Montage should be in the ' + '"mni_tal" coordinate frame'): + montage.add_mni_fiducials(subjects_dir=subjects_dir) + + # check that these fiducials are close to the estimated fiducials + ch_pos = montage.get_positions() + fids_est = [ch_pos['lpa'], ch_pos['nasion'], ch_pos['rpa']] + + dists = np.linalg.norm(test_fids - fids_est, axis=-1) * 1000. # -> mm + assert (dists < 8).all(), dists + + # an error should be raised if the montage is not in `mri` coord_frame + # which is the FreeSurfer RAS + montage = make_dig_montage(ch_pos=test_ch_pos, coord_frame='mni_tal') + with pytest.raises(RuntimeError, match='Montage should be in the ' + '"mri" coordinate frame'): + montage.add_estimated_fiducials(subject=subject, + subjects_dir=subjects_dir) + + # test that adding MNI fiducials works + montage.add_mni_fiducials(subjects_dir=subjects_dir) + test_fids = get_mni_fiducials('fsaverage', subjects_dir=subjects_dir) + for fid, test_fid in zip(montage.dig[:3], test_fids): + assert_array_equal(fid['r'], test_fid['r']) + + # test remove fiducials + montage.remove_fiducials() + assert all([d['kind'] != FIFF.FIFFV_POINT_CARDINAL for d in montage.dig]) + + +def test_read_dig_localite(tmp_path): + """Test reading Localite .csv file.""" + contents = """#,id,x,y,z + 1,Nasion,-2.016253511,6.243001715,34.63167712 + 2,LPA,71.96698724,-29.88835576,113.6703679 + 3,RPA,-82.77279316,-22.45928121,116.4005828 + 4,ch01,53.62814378,-91.37837488,29.69071863 + 5,ch02,54.02504821,-59.96228146,23.21714217 + 6,ch03,47.93261613,-29.99373786,24.56468867 + 7,ch04,29.04824633,-86.60006321,13.5073523 + 8,ch05,25.76285783,-58.1658606,3.854848377 + 9,ch06,25.39636794,-27.28186717,9.78490351 + 10,ch07,-5.181242819,-85.52115113,7.201882904 + 11,ch08,-4.995704801,-60.47053977,0.998486757 + 12,ch09,-2.680020493,-31.14357171,6.114621138 + 13,ch10,-33.65019131,-92.34198454,13.2326512 + 14,ch11,-36.22420417,-61.23822776,6.028649571 + 15,ch12,-33.21551039,-31.21772978,8.458854072 + 16,ch13,-61.38400606,-92.67546012,29.5783456 + 17,ch14,-61.16539571,-61.86866187,26.23986153 + 18,ch15,-55.82855386,-34.77319103,25.8083942""" + + fname = tmp_path / 'localite.csv' + with open(fname, 'w') as f: + for row in contents.split('\n'): + f.write(f'{row.lstrip()}\n') + montage = read_dig_localite(fname, nasion="Nasion", lpa="LPA", rpa="RPA") + s = '' + assert repr(montage) == s + assert montage.ch_names == [f'ch{i:02}' for i in range(1, 16)] diff --git a/python/libs/mne/channels/tests/test_standard_montage.py b/python/libs/mne/channels/tests/test_standard_montage.py new file mode 100644 index 0000000..9c85874 --- /dev/null +++ b/python/libs/mne/channels/tests/test_standard_montage.py @@ -0,0 +1,244 @@ +# Authors: Joan Massich +# Alexandre Gramfort +# +# License: BSD-3-Clause + + +import pytest + +import numpy as np + +from numpy.testing import (assert_allclose, assert_array_almost_equal, + assert_raises) + +from mne import create_info +from mne.channels import make_standard_montage, compute_native_head_t +from mne.channels.montage import get_builtin_montages, HEAD_SIZE_DEFAULT +from mne.io import RawArray +from mne.io._digitization import _get_dig_eeg, _get_fid_coords +from mne.io.constants import FIFF +from mne.preprocessing.nirs import optical_density, beer_lambert_law +from mne.transforms import _get_trans, _angle_between_quats, rot_to_quat + + +@pytest.mark.parametrize('kind', get_builtin_montages()) +def test_standard_montages_have_fids(kind): + """Test standard montage are all in unknown coord (have fids).""" + montage = make_standard_montage(kind) + fids, coord_frame = _get_fid_coords(montage.dig) + for k, v in fids.items(): + assert v is not None, k + for d in montage.dig: + if kind.startswith(('artinis', 'standard', 'mgh')): + want = FIFF.FIFFV_COORD_MRI + else: + want = FIFF.FIFFV_COORD_UNKNOWN + assert d['coord_frame'] == want + + +def test_standard_montage_errors(): + """Test error handling for wrong keys.""" + _msg = "Invalid value for the 'kind' parameter..*but got.*not-here" + with pytest.raises(ValueError, match=_msg): + _ = make_standard_montage('not-here') + + +@pytest.mark.parametrize('head_size', (HEAD_SIZE_DEFAULT, 0.05)) +@pytest.mark.parametrize('kind, tol', [ + ['EGI_256', 1e-5], + ['easycap-M1', 1e-8], + ['easycap-M10', 1e-8], + ['biosemi128', 1e-8], + ['biosemi16', 1e-8], + ['biosemi160', 1e-8], + ['biosemi256', 1e-8], + ['biosemi32', 1e-8], + ['biosemi64', 1e-8], +]) +def test_standard_montages_on_sphere(kind, tol, head_size): + """Test some standard montage are on sphere.""" + kwargs = dict() + if head_size != HEAD_SIZE_DEFAULT: + kwargs['head_size'] = head_size + montage = make_standard_montage(kind, **kwargs) + eeg_loc = np.array([ch['r'] for ch in _get_dig_eeg(montage.dig)]) + + assert_allclose( + actual=np.linalg.norm(eeg_loc, axis=1), + desired=np.full((eeg_loc.shape[0], ), head_size), + atol=tol, + ) + + +def test_standard_superset(): + """Test some properties that should hold for superset montages.""" + # new montages, tweaked to end up at the same size as the others + m_1005 = make_standard_montage('standard_1005', 0.0970) + m_1020 = make_standard_montage('standard_1020', 0.0991) + assert len(set(m_1005.ch_names) - set(m_1020.ch_names)) > 0 + # XXX weird that this is not a proper superset... + assert set(m_1020.ch_names) - set(m_1005.ch_names) == {'O10', 'O9'} + c_1005 = m_1005._get_ch_pos() + for key, value in m_1020._get_ch_pos().items(): + if key not in ('O10', 'O9'): + assert_allclose(c_1005[key], value, atol=1e-4, err_msg=key) + + +def _simulate_artinis_octamon(): + """Simulate artinis OctaMon channel data from numpy data. + + This is to test data that is imported with missing or incorrect montage + info. This data can then be used to test the set_montage function. + """ + np.random.seed(42) + data = np.absolute(np.random.normal(size=(16, 100))) + ch_names = ['S1_D1 760', 'S1_D1 850', 'S2_D1 760', 'S2_D1 850', + 'S3_D1 760', 'S3_D1 850', 'S4_D1 760', 'S4_D1 850', + 'S5_D2 760', 'S5_D2 850', 'S6_D2 760', 'S6_D2 850', + 'S7_D2 760', 'S7_D2 850', 'S8_D2 760', 'S8_D2 850'] + ch_types = ['fnirs_cw_amplitude' for _ in ch_names] + sfreq = 10. # Hz + info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq) + for i, ch_name in enumerate(ch_names): + info['chs'][i]['loc'][9] = int(ch_name.split(' ')[1]) + raw = RawArray(data, info) + + return raw + + +def _simulate_artinis_brite23(): + """Simulate artinis Brite23 channel data from numpy data. + + This is to test data that is imported with missing or incorrect montage + info. This data can then be used to test the set_montage function. + """ + np.random.seed(0) + data = np.random.normal(size=(46, 100)) + sd_names = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', 'S3_D2', 'S4_D2', 'S5_D2', + 'S4_D3', 'S5_D3', 'S6_D3', 'S5_D4', 'S6_D4', 'S7_D4', 'S6_D5', + 'S7_D5', 'S8_D5', 'S7_D6', 'S8_D6', 'S9_D6', 'S8_D7', 'S9_D7', + 'S10_D7', 'S11_D7'] + ch_names = [] + ch_types = [] + for name in sd_names: + ch_names.append(name + ' hbo') + ch_types.append('hbo') + ch_names.append(name + ' hbr') + ch_types.append('hbr') + sfreq = 10. # Hz + info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq) + raw = RawArray(data, info) + + return raw + + +@pytest.mark.parametrize('kind', ('octamon', 'brite23')) +def test_set_montage_artinis_fsaverage(kind): + """Test that artinis montages match fsaverage's head<->MRI transform.""" + # Compare OctaMon and Brite23 to fsaverage + trans_fs, _ = _get_trans('fsaverage') + montage = make_standard_montage(f'artinis-{kind}') + trans = compute_native_head_t(montage) + assert trans['to'] == trans_fs['to'] + assert trans['from'] == trans_fs['from'] + translation = 1000 * np.linalg.norm(trans['trans'][:3, 3] - + trans_fs['trans'][:3, 3]) + assert 0 < translation < 1 # mm + rotation = np.rad2deg( + _angle_between_quats(rot_to_quat(trans['trans'][:3, :3]), + rot_to_quat(trans_fs['trans'][:3, :3]))) + assert 0 < rotation < 1 # degrees + + +def test_set_montage_artinis_basic(): + """Test that OctaMon and Brite23 montages are set properly.""" + # Test OctaMon montage + montage_octamon = make_standard_montage('artinis-octamon') + montage_brite23 = make_standard_montage('artinis-brite23') + raw = _simulate_artinis_octamon() + raw_od = optical_density(raw) + old_info = raw.info.copy() + old_info_od = raw_od.info.copy() + raw.set_montage(montage_octamon) + raw_od.set_montage(montage_octamon) + raw_hb = beer_lambert_law(raw_od, ppf=6) # montage needed for BLL + # Check that the montage was actually modified + assert_raises(AssertionError, assert_array_almost_equal, + old_info['chs'][0]['loc'][:9], + raw.info['chs'][0]['loc'][:9]) + assert_raises(AssertionError, assert_array_almost_equal, + old_info_od['chs'][0]['loc'][:9], + raw_od.info['chs'][0]['loc'][:9]) + + # Check a known location + assert_array_almost_equal(raw.info['chs'][0]['loc'][:3], + [0.054243, 0.081884, 0.054544]) + assert_array_almost_equal(raw.info['chs'][8]['loc'][:3], + [-0.03013, 0.105097, 0.055894]) + assert_array_almost_equal(raw.info['chs'][12]['loc'][:3], + [-0.055681, 0.086566, 0.055858]) + assert_array_almost_equal(raw_od.info['chs'][12]['loc'][:3], + [-0.055681, 0.086566, 0.055858]) + assert_array_almost_equal(raw_hb.info['chs'][12]['loc'][:3], + [-0.055681, 0.086566, 0.055858]) + # Check that locations are identical for a pair of channels (all elements + # except the 10th which is the wavelength if not hbo and hbr type) + assert_array_almost_equal(raw.info['chs'][0]['loc'][:9], + raw.info['chs'][1]['loc'][:9]) + assert_array_almost_equal(raw_od.info['chs'][0]['loc'][:9], + raw_od.info['chs'][1]['loc'][:9]) + assert_array_almost_equal(raw_hb.info['chs'][0]['loc'][:9], + raw_hb.info['chs'][1]['loc'][:9]) + + # Test Brite23 montage + raw = _simulate_artinis_brite23() + old_info = raw.info.copy() + raw.set_montage(montage_brite23) + # Check that the montage was actually modified + assert_raises(AssertionError, assert_array_almost_equal, + old_info['chs'][0]['loc'][:9], + raw.info['chs'][0]['loc'][:9]) + # Check a known location + assert_array_almost_equal(raw.info['chs'][0]['loc'][:3], + [0.068931, 0.046201, 0.072055]) + assert_array_almost_equal(raw.info['chs'][8]['loc'][:3], + [0.055196, 0.082757, 0.052165]) + assert_array_almost_equal(raw.info['chs'][12]['loc'][:3], + [0.033592, 0.102607, 0.047423]) + # Check that locations are identical for a pair of channels (all elements + # except the 10th which is the wavelength if not hbo and hbr type) + assert_array_almost_equal(raw.info['chs'][0]['loc'][:9], + raw.info['chs'][1]['loc'][:9]) + + # Test channel variations + raw_old = _simulate_artinis_brite23() + # Raw missing some channels that are in the montage: pass + raw = raw_old.copy() + raw.pick(['S1_D1 hbo', 'S1_D1 hbr']) + raw.set_montage('artinis-brite23') + + # Unconventional channel pair: pass + raw = raw_old.copy() + info_new = create_info(['S11_D1 hbo', 'S11_D1 hbr'], raw.info['sfreq'], + ['hbo', 'hbr']) + new = RawArray(np.random.normal(size=(2, len(raw))), info_new) + raw.add_channels([new], force_update_info=True) + raw.set_montage('artinis-brite23') + + # Source not in montage: fail + raw = raw_old.copy() + info_new = create_info(['S12_D7 hbo', 'S12_D7 hbr'], raw.info['sfreq'], + ['hbo', 'hbr']) + new = RawArray(np.random.normal(size=(2, len(raw))), info_new) + raw.add_channels([new], force_update_info=True) + with pytest.raises(ValueError, match='is not in list'): + raw.set_montage('artinis-brite23') + + # Detector not in montage: fail + raw = raw_old.copy() + info_new = create_info(['S11_D8 hbo', 'S11_D8 hbr'], raw.info['sfreq'], + ['hbo', 'hbr']) + new = RawArray(np.random.normal(size=(2, len(raw))), info_new) + raw.add_channels([new], force_update_info=True) + with pytest.raises(ValueError, match='is not in list'): + raw.set_montage('artinis-brite23') diff --git a/python/libs/mne/chpi.py b/python/libs/mne/chpi.py new file mode 100644 index 0000000..6f6c48f --- /dev/null +++ b/python/libs/mne/chpi.py @@ -0,0 +1,1385 @@ +# -*- coding: utf-8 -*- +"""Functions for fitting head positions with (c)HPI coils.""" + +# Next, ``compute_head_pos`` can be used to: +# +# 1. Drop coils whose GOF are below ``gof_limit``. If fewer than 3 coils +# remain, abandon fitting for the chunk. +# 2. Fit dev_head_t quaternion (using ``_fit_chpi_quat_subset``), +# iteratively dropping coils (as long as 3 remain) to find the best GOF +# (using ``_fit_chpi_quat``). +# 3. If fewer than 3 coils meet the ``dist_limit`` criteria following +# projection of the fitted device coil locations into the head frame, +# abandon fitting for the chunk. +# +# The function ``filter_chpi`` uses the same linear model to filter cHPI +# and (optionally) line frequencies from the data. + +# Authors: Eric Larson +# +# License: BSD-3-Clause + +from functools import partial + +import numpy as np +import itertools + +from .event import find_events +from .io.base import BaseRaw +from .io.kit.constants import KIT +from .io.kit.kit import RawKIT as _RawKIT +from .io.meas_info import _simplify_info, Info +from .io.pick import (pick_types, pick_channels, pick_channels_regexp, + pick_info, _picks_to_idx) +from .io.proj import Projection, setup_proj +from .io.constants import FIFF +from .io.ctf.trans import _make_ctf_coord_trans_set +from .forward import (_magnetic_dipole_field_vec, _create_meg_coils, + _concatenate_coils) +from .cov import make_ad_hoc_cov, compute_whitener +from .dipole import _make_guesses +from .fixes import jit +from .preprocessing.maxwell import (_sss_basis, _prep_mf_coils, + _regularize_out, _get_mf_picks_fix_mags) +from .transforms import (apply_trans, invert_transform, _angle_between_quats, + quat_to_rot, rot_to_quat, _fit_matched_points, + _quat_to_affine, als_ras_trans) +from .utils import (verbose, logger, use_log_level, _check_fname, warn, + _validate_type, ProgressBar, _check_option, _pl, + _on_missing) + +# Eventually we should add: +# hpicons +# high-passing of data during fits +# parsing cHPI coil information from acq pars, then to PSD if necessary + + +# ############################################################################ +# Reading from text or FIF file + +def read_head_pos(fname): + """Read MaxFilter-formatted head position parameters. + + Parameters + ---------- + fname : str + The filename to read. This can be produced by e.g., + ``maxfilter -headpos .pos``. + + Returns + ------- + pos : array, shape (N, 10) + The position and quaternion parameters from cHPI fitting. + + See Also + -------- + write_head_pos + head_pos_to_trans_rot_t + + Notes + ----- + .. versionadded:: 0.12 + """ + _check_fname(fname, must_exist=True, overwrite='read') + data = np.loadtxt(fname, skiprows=1) # first line is header, skip it + data.shape = (-1, 10) # ensure it's the right size even if empty + if np.isnan(data).any(): # make sure we didn't do something dumb + raise RuntimeError('positions could not be read properly from %s' + % fname) + return data + + +def write_head_pos(fname, pos): + """Write MaxFilter-formatted head position parameters. + + Parameters + ---------- + fname : str + The filename to write. + pos : array, shape (N, 10) + The position and quaternion parameters from cHPI fitting. + + See Also + -------- + read_head_pos + head_pos_to_trans_rot_t + + Notes + ----- + .. versionadded:: 0.12 + """ + _check_fname(fname, overwrite=True) + pos = np.array(pos, np.float64) + if pos.ndim != 2 or pos.shape[1] != 10: + raise ValueError('pos must be a 2D array of shape (N, 10)') + with open(fname, 'wb') as fid: + fid.write(' Time q1 q2 q3 q4 q5 ' + 'q6 g-value error velocity\n'.encode('ASCII')) + for p in pos: + fmts = ['% 9.3f'] + ['% 8.5f'] * 9 + fid.write(((' ' + ' '.join(fmts) + '\n') + % tuple(p)).encode('ASCII')) + + +def head_pos_to_trans_rot_t(quats): + """Convert Maxfilter-formatted head position quaternions. + + Parameters + ---------- + quats : ndarray, shape (N, 10) + MaxFilter-formatted position and quaternion parameters. + + Returns + ------- + translation : ndarray, shape (N, 3) + Translations at each time point. + rotation : ndarray, shape (N, 3, 3) + Rotations at each time point. + t : ndarray, shape (N,) + The time points. + + See Also + -------- + read_head_pos + write_head_pos + """ + t = quats[..., 0].copy() + rotation = quat_to_rot(quats[..., 1:4]) + translation = quats[..., 4:7].copy() + return translation, rotation, t + + +@verbose +def extract_chpi_locs_ctf(raw, verbose=None): + r"""Extract cHPI locations from CTF data. + + Parameters + ---------- + raw : instance of Raw + Raw data with CTF cHPI information. + %(verbose)s + + Returns + ------- + %(chpi_locs)s + + Notes + ----- + CTF continuous head monitoring stores the x,y,z location (m) of each chpi + coil as separate channels in the dataset: + + - ``HLC001[123]\\*`` - nasion + - ``HLC002[123]\\*`` - lpa + - ``HLC003[123]\\*`` - rpa + + This extracts these positions for use with + :func:`~mne.chpi.compute_head_pos`. + + .. versionadded:: 0.20 + """ + # Pick channels corresponding to the cHPI positions + hpi_picks = pick_channels_regexp(raw.info['ch_names'], 'HLC00[123][123].*') + + # make sure we get 9 channels + if len(hpi_picks) != 9: + raise RuntimeError('Could not find all 9 cHPI channels') + + # get indices in alphabetical order + sorted_picks = np.array(sorted(hpi_picks, + key=lambda k: raw.info['ch_names'][k])) + + # make picks to match order of dig cardinial ident codes. + # LPA (HPIC002[123]-*), NAS(HPIC001[123]-*), RPA(HPIC003[123]-*) + hpi_picks = sorted_picks[[3, 4, 5, 0, 1, 2, 6, 7, 8]] + del sorted_picks + + # process the entire run + time_sl = slice(0, len(raw.times)) + chpi_data = raw[hpi_picks, time_sl][0] + + # transforms + tmp_trans = _make_ctf_coord_trans_set(None, None) + ctf_dev_dev_t = tmp_trans['t_ctf_dev_dev'] + del tmp_trans + + # find indices where chpi locations change + indices = [0] + indices.extend(np.where(np.all(np.diff(chpi_data, axis=1), axis=0))[0] + 1) + # data in channels are in ctf device coordinates (cm) + rrs = chpi_data[:, indices].T.reshape(len(indices), 3, 3) # m + # map to mne device coords + rrs = apply_trans(ctf_dev_dev_t, rrs) + gofs = np.ones(rrs.shape[:2]) # not encoded, set all good + moments = np.zeros(rrs.shape) # not encoded, set all zero + times = raw.times[indices] + raw._first_time + return dict(rrs=rrs, gofs=gofs, times=times, moments=moments) + + +@verbose +def extract_chpi_locs_kit(raw, stim_channel='MISC 064', *, verbose=None): + """Extract cHPI locations from KIT data. + + Parameters + ---------- + raw : instance of RawKIT + Raw data with KIT cHPI information. + stim_channel : str + The stimulus channel that encodes HPI measurement intervals. + %(verbose)s + + Returns + ------- + %(chpi_locs)s + + Notes + ----- + .. versionadded:: 0.23 + """ + _validate_type(raw, (_RawKIT,), 'raw') + stim_chs = [ + raw.info['ch_names'][pick] for pick in pick_types( + raw.info, stim=True, misc=True, ref_meg=False)] + _validate_type(stim_channel, str, 'stim_channel') + _check_option('stim_channel', stim_channel, stim_chs) + idx = raw.ch_names.index(stim_channel) + events_on = find_events( + raw, stim_channel=raw.ch_names[idx], output='onset', + verbose=False)[:, 0] + events_off = find_events( + raw, stim_channel=raw.ch_names[idx], output='offset', + verbose=False)[:, 0] + bad = False + if len(events_on) == 0 or len(events_off) == 0: + bad = True + else: + if events_on[-1] > events_off[-1]: + events_on = events_on[:-1] + if events_on.size != events_off.size or not \ + (events_on < events_off).all(): + bad = True + if bad: + raise RuntimeError( + f'Could not find appropriate cHPI intervals from {stim_channel}') + # use the midpoint for times + times = (events_on + events_off) / (2 * raw.info['sfreq']) + del events_on, events_off + # XXX remove first two rows. It is unknown currently if there is a way to + # determine from the con file the number of initial pulses that + # indicate the start of reading. The number is shown by opening the con + # file in MEG160, but I couldn't find the value in the .con file, so it + # may just always be 2... + times = times[2:] + n_coils = 5 # KIT always has 5 (hard-coded in reader) + header = raw._raw_extras[0]['dirs'][KIT.DIR_INDEX_CHPI_DATA] + dtype = np.dtype([('good', ' 0 else None + # grab codes indicating a coil is active + hpi_on = [coil['event_bits'][0] for coil in hpi_sub['hpi_coils']] + # not all HPI coils will actually be used + hpi_on = np.array([hpi_on[hc['number'] - 1] for hc in hpi_coils]) + # mask for coils that may be active + hpi_mask = np.array([event_bit != 0 for event_bit in hpi_on]) + hpi_on = hpi_on[hpi_mask] + hpi_freqs = hpi_freqs[hpi_mask] + else: + hpi_on = np.zeros(len(hpi_freqs)) + + return hpi_freqs, hpi_pick, hpi_on + + +@verbose +def _get_hpi_initial_fit(info, adjust=False, verbose=None): + """Get HPI fit locations from raw.""" + if info['hpi_results'] is None or len(info['hpi_results']) == 0: + raise RuntimeError('no initial cHPI head localization performed') + + hpi_result = info['hpi_results'][-1] + hpi_dig = sorted([d for d in info['dig'] + if d['kind'] == FIFF.FIFFV_POINT_HPI], + key=lambda x: x['ident']) # ascending (dig) order + if len(hpi_dig) == 0: # CTF data, probably + hpi_dig = sorted(hpi_result['dig_points'], key=lambda x: x['ident']) + if all(d['coord_frame'] in (FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_COORD_UNKNOWN) + for d in hpi_dig): + for dig in hpi_dig: + dig.update(r=apply_trans(info['dev_head_t'], dig['r']), + coord_frame=FIFF.FIFFV_COORD_HEAD) + + # zero-based indexing, dig->info + # CTF does not populate some entries so we use .get here + pos_order = hpi_result.get('order', np.arange(1, len(hpi_dig) + 1)) - 1 + used = hpi_result.get('used', np.arange(len(hpi_dig))) + dist_limit = hpi_result.get('dist_limit', 0.005) + good_limit = hpi_result.get('good_limit', 0.98) + goodness = hpi_result.get('goodness', np.ones(len(hpi_dig))) + + # this shouldn't happen, eventually we could add the transforms + # necessary to put it in head coords + if not all(d['coord_frame'] == FIFF.FIFFV_COORD_HEAD for d in hpi_dig): + raise RuntimeError('cHPI coordinate frame incorrect') + # Give the user some info + logger.info('HPIFIT: %s coils digitized in order %s' + % (len(pos_order), ' '.join(str(o + 1) for o in pos_order))) + logger.debug('HPIFIT: %s coils accepted: %s' + % (len(used), ' '.join(str(h) for h in used))) + hpi_rrs = np.array([d['r'] for d in hpi_dig])[pos_order] + assert len(hpi_rrs) >= 3 + + # Fitting errors + hpi_rrs_fit = sorted([d for d in info['hpi_results'][-1]['dig_points']], + key=lambda x: x['ident']) + hpi_rrs_fit = np.array([d['r'] for d in hpi_rrs_fit]) + # hpi_result['dig_points'] are in FIFFV_COORD_UNKNOWN coords, but this + # is probably a misnomer because it should be FIFFV_COORD_DEVICE for this + # to work + assert hpi_result['coord_trans']['to'] == FIFF.FIFFV_COORD_HEAD + hpi_rrs_fit = apply_trans(hpi_result['coord_trans']['trans'], hpi_rrs_fit) + if 'moments' in hpi_result: + logger.debug('Hpi coil moments (%d %d):' + % hpi_result['moments'].shape[::-1]) + for moment in hpi_result['moments']: + logger.debug("%g %g %g" % tuple(moment)) + errors = np.linalg.norm(hpi_rrs - hpi_rrs_fit, axis=1) + logger.debug('HPIFIT errors: %s mm.' + % ', '.join('%0.1f' % (1000. * e) for e in errors)) + if errors.sum() < len(errors) * dist_limit: + logger.info('HPI consistency of isotrak and hpifit is OK.') + elif not adjust and (len(used) == len(hpi_dig)): + warn('HPI consistency of isotrak and hpifit is poor.') + else: + # adjust HPI coil locations using the hpifit transformation + for hi, (err, r_fit) in enumerate(zip(errors, hpi_rrs_fit)): + # transform to head frame + d = 1000 * err + if not adjust: + if err >= dist_limit: + warn('Discrepancy of HPI coil %d isotrak and hpifit is ' + '%.1f mm!' % (hi + 1, d)) + elif hi + 1 not in used: + if goodness[hi] >= good_limit: + logger.info('Note: HPI coil %d isotrak is adjusted by ' + '%.1f mm!' % (hi + 1, d)) + hpi_rrs[hi] = r_fit + else: + warn('Discrepancy of HPI coil %d isotrak and hpifit of ' + '%.1f mm was not adjusted!' % (hi + 1, d)) + logger.debug('HP fitting limits: err = %.1f mm, gval = %.3f.' + % (1000 * dist_limit, good_limit)) + + return hpi_rrs.astype(float) + + +def _magnetic_dipole_objective(x, B, B2, coils, whitener, too_close, + return_moment=False): + """Project data onto right eigenvectors of whitened forward.""" + fwd = _magnetic_dipole_field_vec(x[np.newaxis], coils, too_close) + out, u, s, one = _magnetic_dipole_delta(fwd, whitener, B, B2) + if return_moment: + one /= s + Q = np.dot(one, u.T) + out = (out, Q) + return out + + +@jit() +def _magnetic_dipole_delta(fwd, whitener, B, B2): + # Here we use .T to get whitener to Fortran order, which speeds things up + fwd = np.dot(fwd, whitener.T) + u, s, v = np.linalg.svd(fwd, full_matrices=False) + one = np.dot(v, B) + Bm2 = np.dot(one, one) + return B2 - Bm2, u, s, one + + +def _magnetic_dipole_delta_multi(whitened_fwd_svd, B, B2): + # Here we use .T to get whitener to Fortran order, which speeds things up + one = np.matmul(whitened_fwd_svd, B) + Bm2 = np.sum(one * one, axis=1) + return B2 - Bm2 + + +def _fit_magnetic_dipole(B_orig, x0, too_close, whitener, coils, guesses): + """Fit a single bit of data (x0 = pos).""" + from scipy.optimize import fmin_cobyla + B = np.dot(whitener, B_orig) + B2 = np.dot(B, B) + objective = partial(_magnetic_dipole_objective, B=B, B2=B2, + coils=coils, whitener=whitener, + too_close=too_close) + if guesses is not None: + res0 = objective(x0) + res = _magnetic_dipole_delta_multi( + guesses['whitened_fwd_svd'], B, B2) + assert res.shape == (guesses['rr'].shape[0],) + idx = np.argmin(res) + if res[idx] < res0: + x0 = guesses['rr'][idx] + x = fmin_cobyla(objective, x0, (), rhobeg=1e-3, rhoend=1e-5, disp=False) + gof, moment = objective(x, return_moment=True) + gof = 1. - gof / B2 + return x, gof, moment + + +@jit() +def _chpi_objective(x, coil_dev_rrs, coil_head_rrs): + """Compute objective function.""" + d = np.dot(coil_dev_rrs, quat_to_rot(x[:3]).T) + d += x[3:] + d -= coil_head_rrs + d *= d + return d.sum() + + +def _fit_chpi_quat(coil_dev_rrs, coil_head_rrs): + """Fit rotation and translation (quaternion) parameters for cHPI coils.""" + denom = np.linalg.norm(coil_head_rrs - np.mean(coil_head_rrs, axis=0)) + denom *= denom + # We could try to solve it the analytic way: + # XXX someday we could choose to weight these points by their goodness + # of fit somehow. + quat = _fit_matched_points(coil_dev_rrs, coil_head_rrs)[0] + gof = 1. - _chpi_objective(quat, coil_dev_rrs, coil_head_rrs) / denom + return quat, gof + + +def _fit_coil_order_dev_head_trans(dev_pnts, head_pnts, bias=True): + """Compute Device to Head transform allowing for permutiatons of points.""" + id_quat = np.zeros(6) + best_order = None + best_g = -999 + best_quat = id_quat + for this_order in itertools.permutations(np.arange(len(head_pnts))): + head_pnts_tmp = head_pnts[np.array(this_order)] + this_quat, g = _fit_chpi_quat(dev_pnts, head_pnts_tmp) + assert np.linalg.det(quat_to_rot(this_quat[:3])) > 0.9999 + if bias: + # For symmetrical arrangements, flips can produce roughly + # equivalent g values. To avoid this, heavily penalize + # large rotations. + rotation = _angle_between_quats(this_quat[:3], np.zeros(3)) + check_g = g * max(1. - rotation / np.pi, 0) ** 0.25 + else: + check_g = g + if check_g > best_g: + out_g = g + best_g = check_g + best_order = np.array(this_order) + best_quat = this_quat + + # Convert Quaterion to transform + dev_head_t = _quat_to_affine(best_quat) + return dev_head_t, best_order, out_g + + +@verbose +def _setup_hpi_amplitude_fitting(info, t_window, remove_aliased=False, + ext_order=1, allow_empty=False, verbose=None): + """Generate HPI structure for HPI localization.""" + # grab basic info. + on_missing = 'raise' if not allow_empty else 'ignore' + hpi_freqs, hpi_pick, hpi_ons = get_chpi_info(info, on_missing=on_missing) + + _validate_type(t_window, (str, 'numeric'), 't_window') + if info['line_freq'] is not None: + line_freqs = np.arange(info['line_freq'], info['sfreq'] / 3., + info['line_freq']) + else: + line_freqs = np.zeros([0]) + logger.info('Line interference frequencies: %s Hz' + % ' '.join(['%d' % lf for lf in line_freqs])) + # worry about resampled/filtered data. + # What to do e.g. if Raw has been resampled and some of our + # HPI freqs would now be aliased + highest = info.get('lowpass') + highest = info['sfreq'] / 2. if highest is None else highest + keepers = hpi_freqs <= highest + if remove_aliased: + hpi_freqs = hpi_freqs[keepers] + hpi_ons = hpi_ons[keepers] + elif not keepers.all(): + raise RuntimeError('Found HPI frequencies %s above the lowpass ' + '(or Nyquist) frequency %0.1f' + % (hpi_freqs[~keepers].tolist(), highest)) + # calculate optimal window length. + if isinstance(t_window, str): + _check_option('t_window', t_window, ('auto',), extra='if a string') + if len(hpi_freqs): + all_freqs = np.concatenate((hpi_freqs, line_freqs)) + delta_freqs = np.diff(np.unique(all_freqs)) + t_window = max(5. / all_freqs.min(), 1. / delta_freqs.min()) + else: + t_window = 0.2 + t_window = float(t_window) + if t_window <= 0: + raise ValueError('t_window (%s) must be > 0' % (t_window,)) + logger.info('Using time window: %0.1f ms' % (1000 * t_window,)) + window_nsamp = np.rint(t_window * info['sfreq']).astype(int) + model = _setup_hpi_glm(hpi_freqs, line_freqs, info['sfreq'], window_nsamp) + inv_model = np.linalg.pinv(model) + inv_model_reord = _reorder_inv_model(inv_model, len(hpi_freqs)) + proj, proj_op, meg_picks = _setup_ext_proj(info, ext_order) + # include mag and grad picks separately, for SNR computations + mag_picks = _picks_to_idx(info, 'mag', allow_empty=True) + grad_picks = _picks_to_idx(info, 'grad', allow_empty=True) + # Set up magnetic dipole fits + hpi = dict( + meg_picks=meg_picks, mag_picks=mag_picks, grad_picks=grad_picks, + hpi_pick=hpi_pick, model=model, inv_model=inv_model, t_window=t_window, + inv_model_reord=inv_model_reord, on=hpi_ons, n_window=window_nsamp, + proj=proj, proj_op=proj_op, freqs=hpi_freqs, line_freqs=line_freqs) + return hpi + + +def _setup_hpi_glm(hpi_freqs, line_freqs, sfreq, window_nsamp): + """Initialize a general linear model for HPI amplitude estimation.""" + slope = np.linspace(-0.5, 0.5, window_nsamp)[:, np.newaxis] + radians_per_sec = 2 * np.pi * np.arange(window_nsamp, dtype=float) / sfreq + f_t = hpi_freqs[np.newaxis, :] * radians_per_sec[:, np.newaxis] + l_t = line_freqs[np.newaxis, :] * radians_per_sec[:, np.newaxis] + model = [np.sin(f_t), np.cos(f_t), # hpi freqs + np.sin(l_t), np.cos(l_t), # line freqs + slope, np.ones_like(slope)] # drift, DC + return np.hstack(model) + + +@jit() +def _reorder_inv_model(inv_model, n_freqs): + # Reorder for faster computation + idx = np.arange(2 * n_freqs).reshape(2, n_freqs).T.ravel() + return inv_model[idx] + + +def _setup_ext_proj(info, ext_order): + from scipy import linalg + meg_picks = pick_types(info, meg=True, eeg=False, exclude='bads') + info = pick_info(_simplify_info(info), meg_picks) # makes a copy + _, _, _, _, mag_or_fine = _get_mf_picks_fix_mags( + info, int_order=0, ext_order=ext_order, ignore_ref=True, + verbose='error') + mf_coils = _prep_mf_coils(info, verbose='error') + ext = _sss_basis( + dict(origin=(0., 0., 0.), int_order=0, ext_order=ext_order), + mf_coils).T + out_removes = _regularize_out(0, 1, mag_or_fine, []) + ext = ext[~np.in1d(np.arange(len(ext)), out_removes)] + ext = linalg.orth(ext.T).T + assert ext.shape[1] == len(meg_picks) + proj = Projection( + kind=FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD, desc='SSS', active=False, + data=dict(data=ext, ncol=info['nchan'], col_names=info['ch_names'], + nrow=len(ext))) + with info._unlock(): + info['projs'] = [proj] + proj_op, _ = setup_proj( + info, add_eeg_ref=False, activate=False, verbose=False) + assert proj_op.shape == (len(meg_picks),) * 2 + return proj, proj_op, meg_picks + + +def _time_prefix(fit_time): + """Format log messages.""" + return (' t=%0.3f:' % fit_time).ljust(17) + + +def _fit_chpi_amplitudes(raw, time_sl, hpi, snr=False): + """Fit amplitudes for each channel from each of the N cHPI sinusoids. + + Returns + ------- + sin_fit : ndarray, shape (n_freqs, n_channels) + The sin amplitudes matching each cHPI frequency. + Will be all nan if this time window should be skipped. + snr : ndarray, shape (n_freqs, 2) + Estimated SNR for this window, separately for mag and grad channels. + """ + # No need to detrend the data because our model has a DC term + with use_log_level(False): + # loads good channels + this_data = raw[hpi['meg_picks'], time_sl][0] + + # which HPI coils to use + if hpi['hpi_pick'] is not None: + with use_log_level(False): + # loads hpi_stim channel + chpi_data = raw[hpi['hpi_pick'], time_sl][0] + + ons = (np.round(chpi_data).astype(np.int64) & + hpi['on'][:, np.newaxis]).astype(bool) + n_on = ons.all(axis=-1).sum(axis=0) + if not (n_on >= 3).all(): + return None + if snr: + return _fast_fit_snr( + this_data, len(hpi['freqs']), hpi['model'], hpi['inv_model'], + hpi['mag_picks'], hpi['grad_picks']) + return _fast_fit(this_data, hpi['proj_op'], len(hpi['freqs']), + hpi['model'], hpi['inv_model_reord']) + + +@jit() +def _fast_fit(this_data, proj, n_freqs, model, inv_model_reord): + # first or last window + if this_data.shape[1] != model.shape[0]: + model = model[:this_data.shape[1]] + inv_model_reord = _reorder_inv_model(np.linalg.pinv(model), n_freqs) + proj_data = proj @ this_data + X = inv_model_reord @ proj_data.T + + sin_fit = np.zeros((n_freqs, X.shape[1])) + for fi in range(n_freqs): + # use SVD across all sensors to estimate the sinusoid phase + u, s, vt = np.linalg.svd(X[2 * fi:2 * fi + 2], full_matrices=False) + # the first component holds the predominant phase direction + # (so ignore the second, effectively doing s[1] = 0): + sin_fit[fi] = vt[0] * s[0] + return sin_fit + + +@jit() +def _fast_fit_snr(this_data, n_freqs, model, inv_model, mag_picks, grad_picks): + # first or last window + if this_data.shape[1] != model.shape[0]: + model = model[:this_data.shape[1]] + inv_model = np.linalg.pinv(model) + coefs = np.ascontiguousarray(inv_model) @ np.ascontiguousarray(this_data.T) + # average sin & cos terms (special property of sinusoids: power=A²/2) + hpi_power = (coefs[:n_freqs] ** 2 + coefs[n_freqs:(2 * n_freqs)] ** 2) / 2 + resid = this_data - np.ascontiguousarray((model @ coefs).T) + # can't use np.var(..., axis=1) with Numba, so do it manually: + resid_mean = np.atleast_2d(resid.sum(axis=1) / resid.shape[1]).T + squared_devs = np.abs(resid - resid_mean) ** 2 + resid_var = squared_devs.sum(axis=1) / squared_devs.shape[1] + # output array will be (n_freqs, 3 * n_ch_types). The 3 columns for each + # channel type are the SNR, the mean cHPI power and the residual variance + # (which gets tiled to shape (n_freqs,) because it's a scalar). + snrs = np.empty((n_freqs, 0)) + # average power & compute residual variance separately for each ch type + for _picks in (mag_picks, grad_picks): + if len(_picks): + avg_power = hpi_power[:, _picks].sum(axis=1) / len(_picks) + avg_resid = resid_var[_picks].mean() * np.ones(n_freqs) + snr = 10 * np.log10(avg_power / avg_resid) + snrs = np.hstack((snrs, np.stack((snr, avg_power, avg_resid), 1))) + return snrs + + +def _check_chpi_param(chpi_, name): + if name == 'chpi_locs': + want_ndims = dict(times=1, rrs=3, moments=3, gofs=2) + extra_keys = list() + else: + assert name == 'chpi_amplitudes' + want_ndims = dict(times=1, slopes=3) + extra_keys = ['proj'] + + _validate_type(chpi_, dict, name) + want_keys = list(want_ndims.keys()) + extra_keys + if set(want_keys).symmetric_difference(chpi_): + raise ValueError('%s must be a dict with entries %s, got %s' + % (name, want_keys, sorted(chpi_.keys()))) + n_times = None + for key, want_ndim in want_ndims.items(): + key_str = '%s[%s]' % (name, key) + val = chpi_[key] + _validate_type(val, np.ndarray, key_str) + shape = val.shape + if val.ndim != want_ndim: + raise ValueError('%s must have ndim=%d, got %d' + % (key_str, want_ndim, val.ndim)) + if n_times is None and key != 'proj': + n_times = shape[0] + if n_times != shape[0] and key != 'proj': + raise ValueError('%s have inconsistent number of time ' + 'points in %s' % (name, want_keys)) + if name == 'chpi_locs': + n_coils = chpi_['rrs'].shape[1] + for key in ('gofs', 'moments'): + val = chpi_[key] + if val.shape[1] != n_coils: + raise ValueError('chpi_locs["rrs"] had values for %d coils but' + ' chpi_locs["%s"] had values for %d coils' + % (n_coils, key, val.shape[1])) + for key in ('rrs', 'moments'): + val = chpi_[key] + if val.shape[2] != 3: + raise ValueError('chpi_locs["%s"].shape[2] must be 3, got ' + 'shape %s' % (key, shape)) + else: + assert name == 'chpi_amplitudes' + slopes, proj = chpi_['slopes'], chpi_['proj'] + _validate_type(proj, Projection, 'chpi_amplitudes["proj"]') + n_ch = len(proj['data']['col_names']) + if slopes.shape[0] != n_times or slopes.shape[2] != n_ch: + raise ValueError('slopes must have shape[0]==%d and shape[2]==%d,' + ' got shape %s' % (n_times, n_ch, slopes.shape)) + + +@verbose +def compute_head_pos(info, chpi_locs, dist_limit=0.005, gof_limit=0.98, + adjust_dig=False, verbose=None): + """Compute time-varying head positions. + + Parameters + ---------- + %(info_not_none)s + %(chpi_locs)s + Typically obtained by :func:`~mne.chpi.compute_chpi_locs` or + :func:`~mne.chpi.extract_chpi_locs_ctf`. + dist_limit : float + Minimum distance (m) to accept for coil position fitting. + gof_limit : float + Minimum goodness of fit to accept for each coil. + %(adjust_dig_chpi)s + %(verbose)s + + Returns + ------- + quats : ndarray, shape (n_pos, 10) + The ``[t, q1, q2, q3, x, y, z, gof, err, v]`` for each fit. + + See Also + -------- + compute_chpi_locs + extract_chpi_locs_ctf + read_head_pos + write_head_pos + + Notes + ----- + .. versionadded:: 0.20 + """ + _check_chpi_param(chpi_locs, 'chpi_locs') + _validate_type(info, Info, 'info') + hpi_dig_head_rrs = _get_hpi_initial_fit(info, adjust=adjust_dig, + verbose='error') + n_coils = len(hpi_dig_head_rrs) + coil_dev_rrs = apply_trans(invert_transform(info['dev_head_t']), + hpi_dig_head_rrs) + dev_head_t = info['dev_head_t']['trans'] + pos_0 = dev_head_t[:3, 3] + last = dict(quat_fit_time=-0.1, coil_dev_rrs=coil_dev_rrs, + quat=np.concatenate([rot_to_quat(dev_head_t[:3, :3]), + dev_head_t[:3, 3]])) + del coil_dev_rrs + quats = [] + for fit_time, this_coil_dev_rrs, g_coils in zip( + *(chpi_locs[key] for key in ('times', 'rrs', 'gofs'))): + use_idx = np.where(g_coils >= gof_limit)[0] + + # + # 1. Check number of good ones + # + if len(use_idx) < 3: + msg = (_time_prefix(fit_time) + '%s/%s good HPI fits, cannot ' + 'determine the transformation (%s GOF)!' + % (len(use_idx), n_coils, + ', '.join('%0.2f' % g for g in g_coils))) + warn(msg) + continue + + # + # 2. Fit the head translation and rotation params (minimize error + # between coil positions and the head coil digitization + # positions) iteratively using different sets of coils. + # + this_quat, g, use_idx = _fit_chpi_quat_subset( + this_coil_dev_rrs, hpi_dig_head_rrs, use_idx) + + # + # 3. Stop if < 3 good + # + + # Convert quaterion to transform + this_dev_head_t = _quat_to_affine(this_quat) + est_coil_head_rrs = apply_trans(this_dev_head_t, this_coil_dev_rrs) + errs = np.linalg.norm(hpi_dig_head_rrs - est_coil_head_rrs, axis=1) + n_good = ((g_coils >= gof_limit) & (errs < dist_limit)).sum() + if n_good < 3: + warn(_time_prefix(fit_time) + '%s/%s good HPI fits, cannot ' + 'determine the transformation (%s mm/GOF)!' + % (n_good, n_coils, + ', '.join(f'{1000 * e:0.1f}::{g:0.2f}' + for e, g in zip(errs, g_coils)))) + continue + + # velocities, in device coords, of HPI coils + dt = fit_time - last['quat_fit_time'] + vs = tuple(1000. * np.linalg.norm(last['coil_dev_rrs'] - + this_coil_dev_rrs, axis=1) / dt) + logger.info(_time_prefix(fit_time) + + ('%s/%s good HPI fits, movements [mm/s] = ' + + ' / '.join(['% 8.1f'] * n_coils)) + % ((n_good, n_coils) + vs)) + + # Log results + # MaxFilter averages over a 200 ms window for display, but we don't + for ii in range(n_coils): + if ii in use_idx: + start, end = ' ', '/' + else: + start, end = '(', ')' + log_str = (' ' + start + + '{0:6.1f} {1:6.1f} {2:6.1f} / ' + + '{3:6.1f} {4:6.1f} {5:6.1f} / ' + + 'g = {6:0.3f} err = {7:4.1f} ' + + end) + vals = np.concatenate((1000 * hpi_dig_head_rrs[ii], + 1000 * est_coil_head_rrs[ii], + [g_coils[ii], 1000 * errs[ii]])) + if len(use_idx) >= 3: + if ii <= 2: + log_str += '{8:6.3f} {9:6.3f} {10:6.3f}' + vals = np.concatenate( + (vals, this_dev_head_t[ii, :3])) + elif ii == 3: + log_str += '{8:6.1f} {9:6.1f} {10:6.1f}' + vals = np.concatenate( + (vals, this_dev_head_t[:3, 3] * 1000.)) + logger.debug(log_str.format(*vals)) + + # resulting errors in head coil positions + d = np.linalg.norm(last['quat'][3:] - this_quat[3:]) # m + r = _angle_between_quats(last['quat'][:3], this_quat[:3]) / dt + v = d / dt # m/sec + d = 100 * np.linalg.norm(this_quat[3:] - pos_0) # dis from 1st + logger.debug(' #t = %0.3f, #e = %0.2f cm, #g = %0.3f, ' + '#v = %0.2f cm/s, #r = %0.2f rad/s, #d = %0.2f cm' + % (fit_time, 100 * errs.mean(), g, 100 * v, r, d)) + logger.debug(' #t = %0.3f, #q = %s ' + % (fit_time, ' '.join(map('{:8.5f}'.format, this_quat)))) + + quats.append(np.concatenate(([fit_time], this_quat, [g], + [errs[use_idx].mean()], [v]))) + last['quat_fit_time'] = fit_time + last['quat'] = this_quat + last['coil_dev_rrs'] = this_coil_dev_rrs + quats = np.array(quats, np.float64) + quats = np.zeros((0, 10)) if quats.size == 0 else quats + return quats + + +def _fit_chpi_quat_subset(coil_dev_rrs, coil_head_rrs, use_idx): + quat, g = _fit_chpi_quat(coil_dev_rrs[use_idx], coil_head_rrs[use_idx]) + out_idx = use_idx.copy() + if len(use_idx) > 3: # try dropping one (recursively) + for di in range(len(use_idx)): + this_use_idx = list(use_idx[:di]) + list(use_idx[di + 1:]) + this_quat, this_g, this_use_idx = _fit_chpi_quat_subset( + coil_dev_rrs, coil_head_rrs, this_use_idx) + if this_g > g: + quat, g, out_idx = this_quat, this_g, this_use_idx + return quat, g, np.array(out_idx, int) + + +@jit() +def _unit_quat_constraint(x): + """Constrain our 3 quaternion rot params (ignoring w) to have norm <= 1.""" + return 1 - (x * x).sum() + + +@verbose +def compute_chpi_snr(raw, t_step_min=0.01, t_window='auto', ext_order=1, + tmin=0, tmax=None, verbose=None): + """Compute time-varying estimates of cHPI SNR. + + Parameters + ---------- + raw : instance of Raw + Raw data with cHPI information. + t_step_min : float + Minimum time step to use. + %(t_window_chpi_t)s + %(ext_order_chpi)s + %(tmin_raw)s + %(tmax_raw)s + %(verbose)s + + Returns + ------- + chpi_snrs : dict + The time-varying cHPI SNR estimates, with entries "times", "freqs", + "snr_mag", "power_mag", and "resid_mag" (and/or "snr_grad", + "power_grad", and "resid_grad", depending on which channel types are + present in ``raw``). + + See Also + -------- + mne.chpi.compute_chpi_locs, mne.chpi.compute_chpi_amplitudes + + Notes + ----- + .. versionadded:: 0.24 + """ + return _compute_chpi_amp_or_snr(raw, t_step_min, t_window, ext_order, + tmin, tmax, verbose, snr=True) + + +@verbose +def compute_chpi_amplitudes(raw, t_step_min=0.01, t_window='auto', + ext_order=1, tmin=0, tmax=None, verbose=None): + """Compute time-varying cHPI amplitudes. + + Parameters + ---------- + raw : instance of Raw + Raw data with cHPI information. + t_step_min : float + Minimum time step to use. + %(t_window_chpi_t)s + %(ext_order_chpi)s + %(tmin_raw)s + %(tmax_raw)s + %(verbose)s + + Returns + ------- + %(chpi_amplitudes)s + + See Also + -------- + mne.chpi.compute_chpi_locs, mne.chpi.compute_chpi_snr + + Notes + ----- + This function will: + + 1. Get HPI frequencies, HPI status channel, HPI status bits, + and digitization order using ``_setup_hpi_amplitude_fitting``. + 2. Window data using ``t_window`` (half before and half after ``t``) and + ``t_step_min``. + 3. Use a linear model (DC + linear slope + sin + cos terms) to fit + sinusoidal amplitudes to MEG channels. + It uses SVD to determine the phase/amplitude of the sinusoids. + + In "auto" mode, ``t_window`` will be set to the longer of: + + 1. Five cycles of the lowest HPI or line frequency. + Ensures that the frequency estimate is stable. + 2. The reciprocal of the smallest difference between HPI and line freqs. + Ensures that neighboring frequencies can be disambiguated. + + The output is meant to be used with :func:`~mne.chpi.compute_chpi_locs`. + + .. versionadded:: 0.20 + """ + return _compute_chpi_amp_or_snr(raw, t_step_min, t_window, ext_order, + tmin, tmax, verbose) + + +def _compute_chpi_amp_or_snr(raw, t_step_min=0.01, t_window='auto', + ext_order=1, tmin=0, tmax=None, verbose=None, + snr=False): + """Compute cHPI amplitude or SNR. + + See compute_chpi_amplitudes for parameter descriptions. One additional + boolean parameter ``snr`` signals whether to return SNR instead of + amplitude. + """ + hpi = _setup_hpi_amplitude_fitting(raw.info, t_window, ext_order=ext_order) + tmin, tmax = raw._tmin_tmax_to_start_stop(tmin, tmax) + tmin = tmin / raw.info['sfreq'] + tmax = tmax / raw.info['sfreq'] + need_win = hpi['t_window'] / 2. + fit_idxs = raw.time_as_index(np.arange( + tmin + need_win, tmax, t_step_min), use_rounding=True) + logger.info('Fitting %d HPI coil locations at up to %s time points ' + '(%0.1f sec duration)' + % (len(hpi['freqs']), len(fit_idxs), tmax - tmin)) + del tmin, tmax + sin_fits = dict() + sin_fits['proj'] = hpi['proj'] + sin_fits['times'] = np.round(fit_idxs + raw.first_samp - + hpi['n_window'] / 2.) / raw.info['sfreq'] + n_times = len(sin_fits['times']) + n_freqs = len(hpi['freqs']) + n_chans = len(sin_fits['proj']['data']['col_names']) + if snr: + del sin_fits['proj'] + sin_fits['freqs'] = hpi['freqs'] + ch_types = raw.get_channel_types() + grad_offset = 3 if 'mag' in ch_types else 0 + for ch_type in ('mag', 'grad'): + if ch_type in ch_types: + for key in ('snr', 'power', 'resid'): + cols = 1 if key == 'resid' else n_freqs + sin_fits[f'{ch_type}_{key}'] = np.empty((n_times, cols)) + else: + sin_fits['slopes'] = np.empty((n_times, n_freqs, n_chans)) + message = f"cHPI {'SNRs' if snr else 'amplitudes'}" + for mi, midpt in enumerate(ProgressBar(fit_idxs, mesg=message)): + # + # 0. determine samples to fit. + # + time_sl = midpt - hpi['n_window'] // 2 + time_sl = slice(max(time_sl, 0), + min(time_sl + hpi['n_window'], len(raw.times))) + + # + # 1. Fit amplitudes for each channel from each of the N sinusoids + # + amps_or_snrs = _fit_chpi_amplitudes(raw, time_sl, hpi, snr) + if snr: + # unpack the SNR estimates. mag & grad are returned in one array + # (because of Numba) so take care with which column is which. + # note that mean residual is a scalar (same for all HPI freqs) but + # is returned as a (tiled) vector (again, because Numba) so that's + # why below we take amps_or_snrs[0, 2] instead of [:, 2] + ch_types = raw.get_channel_types() + if 'mag' in ch_types: + sin_fits['mag_snr'][mi] = amps_or_snrs[:, 0] # SNR + sin_fits['mag_power'][mi] = amps_or_snrs[:, 1] # mean power + sin_fits['mag_resid'][mi] = amps_or_snrs[0, 2] # mean resid + if 'grad' in ch_types: + sin_fits['grad_snr'][mi] = amps_or_snrs[:, grad_offset] + sin_fits['grad_power'][mi] = amps_or_snrs[:, grad_offset + 1] + sin_fits['grad_resid'][mi] = amps_or_snrs[0, grad_offset + 2] + else: + sin_fits['slopes'][mi] = amps_or_snrs + return sin_fits + + +@verbose +def compute_chpi_locs(info, chpi_amplitudes, t_step_max=1., too_close='raise', + adjust_dig=False, verbose=None): + """Compute locations of each cHPI coils over time. + + Parameters + ---------- + %(info_not_none)s + %(chpi_amplitudes)s + Typically obtained by :func:`mne.chpi.compute_chpi_amplitudes`. + t_step_max : float + Maximum time step to use. + too_close : str + How to handle HPI positions too close to the sensors, + can be 'raise' (default), 'warning', or 'info'. + %(adjust_dig_chpi)s + %(verbose)s + + Returns + ------- + %(chpi_locs)s + + See Also + -------- + compute_chpi_amplitudes + compute_head_pos + read_head_pos + write_head_pos + extract_chpi_locs_ctf + + Notes + ----- + This function is designed to take the output of + :func:`mne.chpi.compute_chpi_amplitudes` and: + + 1. Get HPI coil locations (as digitized in ``info['dig']``) in head coords. + 2. If the amplitudes are 98%% correlated with last position + (and Δt < t_step_max), skip fitting. + 3. Fit magnetic dipoles using the amplitudes for each coil frequency. + + The number of fitted points ``n_pos`` will depend on the velocity of head + movements as well as ``t_step_max`` (and ``t_step_min`` from + :func:`mne.chpi.compute_chpi_amplitudes`). + + .. versionadded:: 0.20 + """ + # Set up magnetic dipole fits + _check_option('too_close', too_close, ['raise', 'warning', 'info']) + _check_chpi_param(chpi_amplitudes, 'chpi_amplitudes') + _validate_type(info, Info, 'info') + sin_fits = chpi_amplitudes # use the old name below + del chpi_amplitudes + proj = sin_fits['proj'] + meg_picks = pick_channels( + info['ch_names'], proj['data']['col_names'], ordered=True) + info = pick_info(info, meg_picks) # makes a copy + with info._unlock(): + info['projs'] = [proj] + del meg_picks, proj + meg_coils = _concatenate_coils(_create_meg_coils(info['chs'], 'accurate')) + + # Set up external model for interference suppression + cov = make_ad_hoc_cov(info, verbose=False) + whitener, _ = compute_whitener(cov, info, verbose=False) + + # Make some location guesses (1 cm grid) + R = np.linalg.norm(meg_coils[0], axis=1).min() + guesses = _make_guesses(dict(R=R, r0=np.zeros(3)), 0.01, 0., 0.005, + verbose=False)[0]['rr'] + logger.info('Computing %d HPI location guesses (1 cm grid in a %0.1f cm ' + 'sphere)' % (len(guesses), R * 100)) + fwd = _magnetic_dipole_field_vec(guesses, meg_coils, too_close) + fwd = np.dot(fwd, whitener.T) + fwd.shape = (guesses.shape[0], 3, -1) + fwd = np.linalg.svd(fwd, full_matrices=False)[2] + guesses = dict(rr=guesses, whitened_fwd_svd=fwd) + del fwd, R + + iter_ = list(zip(sin_fits['times'], sin_fits['slopes'])) + chpi_locs = dict(times=[], rrs=[], gofs=[], moments=[]) + # setup last iteration structure + hpi_dig_dev_rrs = apply_trans( + invert_transform(info['dev_head_t'])['trans'], + _get_hpi_initial_fit(info, adjust=adjust_dig)) + last = dict(sin_fit=None, coil_fit_time=sin_fits['times'][0] - 1, + coil_dev_rrs=hpi_dig_dev_rrs) + n_hpi = len(hpi_dig_dev_rrs) + del hpi_dig_dev_rrs + for fit_time, sin_fit in ProgressBar(iter_, mesg='cHPI locations '): + # skip this window if bad + if not np.isfinite(sin_fit).all(): + continue + + # check if data has sufficiently changed + if last['sin_fit'] is not None: # first iteration + corrs = np.array( + [np.corrcoef(s, l)[0, 1] + for s, l in zip(sin_fit, last['sin_fit'])]) + corrs *= corrs + # check to see if we need to continue + if fit_time - last['coil_fit_time'] <= t_step_max - 1e-7 and \ + (corrs > 0.98).sum() >= 3: + # don't need to refit data + continue + + # update 'last' sin_fit *before* inplace sign mult + last['sin_fit'] = sin_fit.copy() + + # + # 2. Fit magnetic dipole for each coil to obtain coil positions + # in device coordinates + # + coil_fits = [_fit_magnetic_dipole(f, x0, too_close, whitener, + meg_coils, guesses) + for f, x0 in zip(sin_fit, last['coil_dev_rrs'])] + rrs, gofs, moments = zip(*coil_fits) + chpi_locs['times'].append(fit_time) + chpi_locs['rrs'].append(rrs) + chpi_locs['gofs'].append(gofs) + chpi_locs['moments'].append(moments) + last['coil_fit_time'] = fit_time + last['coil_dev_rrs'] = rrs + n_times = len(chpi_locs['times']) + shapes = dict( + times=(n_times,), + rrs=(n_times, n_hpi, 3), + gofs=(n_times, n_hpi), + moments=(n_times, n_hpi, 3), + ) + for key, val in chpi_locs.items(): + chpi_locs[key] = np.array(val, float).reshape(shapes[key]) + return chpi_locs + + +def _chpi_locs_to_times_dig(chpi_locs): + """Reformat chpi_locs as list of dig (dict).""" + dig = list() + for rrs, gofs in zip(*(chpi_locs[key] for key in ('rrs', 'gofs'))): + dig.append([{'r': rr, 'ident': idx, 'gof': gof, + 'kind': FIFF.FIFFV_POINT_HPI, + 'coord_frame': FIFF.FIFFV_COORD_DEVICE} + for idx, (rr, gof) in enumerate(zip(rrs, gofs), 1)]) + return chpi_locs['times'], dig + + +@verbose +def filter_chpi(raw, include_line=True, t_step=0.01, t_window='auto', + ext_order=1, allow_line_only=False, verbose=None): + """Remove cHPI and line noise from data. + + .. note:: This function will only work properly if cHPI was on + during the recording. + + Parameters + ---------- + raw : instance of Raw + Raw data with cHPI information. Must be preloaded. Operates in-place. + include_line : bool + If True, also filter line noise. + t_step : float + Time step to use for estimation, default is 0.01 (10 ms). + %(t_window_chpi_t)s + %(ext_order_chpi)s + allow_line_only : bool + If True, allow filtering line noise only. The default is False, + which only allows the function to run when cHPI information is present. + + .. versionadded:: 0.20 + %(verbose)s + + Returns + ------- + raw : instance of Raw + The raw data. + + Notes + ----- + cHPI signals are in general not stationary, because head movements act + like amplitude modulators on cHPI signals. Thus it is recommended to + to use this procedure, which uses an iterative fitting method, to + remove cHPI signals, as opposed to notch filtering. + + .. versionadded:: 0.12 + """ + _validate_type(raw, BaseRaw, 'raw') + if not raw.preload: + raise RuntimeError('raw data must be preloaded') + t_step = float(t_step) + if t_step <= 0: + raise ValueError('t_step (%s) must be > 0' % (t_step,)) + n_step = int(np.ceil(t_step * raw.info['sfreq'])) + if include_line and raw.info['line_freq'] is None: + raise RuntimeError('include_line=True but raw.info["line_freq"] is ' + 'None, consider setting it to the line frequency') + hpi = _setup_hpi_amplitude_fitting( + raw.info, t_window, remove_aliased=True, ext_order=ext_order, + allow_empty=allow_line_only, verbose=False) + + fit_idxs = np.arange(0, len(raw.times) + hpi['n_window'] // 2, n_step) + n_freqs = len(hpi['freqs']) + n_remove = 2 * n_freqs + meg_picks = pick_types(raw.info, meg=True, exclude=()) # filter all chs + n_times = len(raw.times) + + msg = 'Removing %s cHPI' % n_freqs + if include_line: + n_remove += 2 * len(hpi['line_freqs']) + msg += ' and %s line harmonic' % len(hpi['line_freqs']) + msg += ' frequencies from %s MEG channels' % len(meg_picks) + + recon = np.dot(hpi['model'][:, :n_remove], hpi['inv_model'][:n_remove]).T + logger.info(msg) + chunks = list() # the chunks to subtract + last_endpt = 0 + pb = ProgressBar(fit_idxs, mesg='Filtering') + for ii, midpt in enumerate(pb): + left_edge = midpt - hpi['n_window'] // 2 + time_sl = slice(max(left_edge, 0), + min(left_edge + hpi['n_window'], len(raw.times))) + this_len = time_sl.stop - time_sl.start + if this_len == hpi['n_window']: + this_recon = recon + else: # first or last window + model = hpi['model'][:this_len] + inv_model = np.linalg.pinv(model) + this_recon = np.dot(model[:, :n_remove], inv_model[:n_remove]).T + this_data = raw._data[meg_picks, time_sl] + subt_pt = min(midpt + n_step, n_times) + if last_endpt != subt_pt: + fit_left_edge = left_edge - time_sl.start + hpi['n_window'] // 2 + fit_sl = slice(fit_left_edge, + fit_left_edge + (subt_pt - last_endpt)) + chunks.append((subt_pt, np.dot(this_data, this_recon[:, fit_sl]))) + last_endpt = subt_pt + + # Consume (trailing) chunks that are now safe to remove because + # our windows will no longer touch them + if ii < len(fit_idxs) - 1: + next_left_edge = fit_idxs[ii + 1] - hpi['n_window'] // 2 + else: + next_left_edge = np.inf + while len(chunks) > 0 and chunks[0][0] <= next_left_edge: + right_edge, chunk = chunks.pop(0) + raw._data[meg_picks, + right_edge - chunk.shape[1]:right_edge] -= chunk + return raw + + +def _compute_good_distances(hpi_coil_dists, new_pos, dist_limit=0.005): + """Compute good coils based on distances.""" + from scipy.spatial.distance import cdist + these_dists = cdist(new_pos, new_pos) + these_dists = np.abs(hpi_coil_dists - these_dists) + # there is probably a better algorithm for finding the bad ones... + good = False + use_mask = np.ones(len(hpi_coil_dists), bool) + while not good: + d = these_dists[use_mask][:, use_mask] + d_bad = (d > dist_limit) + good = not d_bad.any() + if not good: + if use_mask.sum() == 2: + use_mask[:] = False + break # failure + # exclude next worst point + badness = (d * d_bad).sum(axis=0) + exclude_coils = np.where(use_mask)[0][np.argmax(badness)] + use_mask[exclude_coils] = False + return use_mask, these_dists diff --git a/python/libs/mne/commands/__init__.py b/python/libs/mne/commands/__init__.py new file mode 100644 index 0000000..62eef81 --- /dev/null +++ b/python/libs/mne/commands/__init__.py @@ -0,0 +1,3 @@ +"""Command-line utilities.""" + +from . import utils diff --git a/python/libs/mne/commands/mne_anonymize.py b/python/libs/mne/commands/mne_anonymize.py new file mode 100644 index 0000000..3e5e096 --- /dev/null +++ b/python/libs/mne/commands/mne_anonymize.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +# Authors : Dominik Krzeminski +# Luke Bloy + +"""Anonymize raw fif file. + +To anonymize other file types call :func:`mne.io.anonymize_info` on their +:class:`~mne.Info` objects and resave to disk. + +Examples +-------- +.. code-block:: console + + $ mne anonymize -f sample_audvis_raw.fif + +""" + +import sys +import mne +import os.path as op + +ANONYMIZE_FILE_PREFIX = 'anon' + + +def mne_anonymize(fif_fname, out_fname, keep_his, daysback, overwrite): + """Call *anonymize_info* on fif file and save. + + Parameters + ---------- + fif_fname : str + Raw fif File + out_fname : str | None + Output file name + relative paths are saved relative to parent dir of fif_fname + None will save to parent dir of fif_fname with default prefix + daysback : int | None + Number of days to subtract from all dates. + If None will default to move date of service to Jan 1 2000 + keep_his : bool + If True his_id of subject_info will NOT be overwritten. + defaults to False + overwrite : bool + Overwrite output file if it already exists + """ + raw = mne.io.read_raw_fif(fif_fname, allow_maxshield=True) + raw.anonymize(daysback=daysback, keep_his=keep_his) + + # determine out_fname + dir_name = op.split(fif_fname)[0] + if out_fname is None: + fif_bname = op.basename(fif_fname) + out_fname = op.join(dir_name, + "{}-{}".format(ANONYMIZE_FILE_PREFIX, fif_bname)) + elif not op.isabs(out_fname): + out_fname = op.join(dir_name, out_fname) + + raw.save(out_fname, overwrite=overwrite) + + +def run(): + """Run *mne_anonymize* command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option("-f", "--file", type="string", dest="file", + help="Name of file to modify.", metavar="FILE", + default=None) + parser.add_option("-o", "--output", type="string", dest="output", + help="Name of anonymized output file." + "`anon-` prefix is added to FILE if not given", + metavar="OUTFILE", default=None) + parser.add_option("--keep_his", dest="keep_his", action="store_true", + help="Keep the HIS tag (not advised)", default=False) + parser.add_option("-d", "--daysback", type="int", dest="daysback", + help="Move dates in file backwards by this many days.", + metavar="N_DAYS", default=None) + parser.add_option("--overwrite", dest="overwrite", action="store_true", + help="Overwrite input file.", default=False) + + options, args = parser.parse_args() + if options.file is None: + parser.print_help() + sys.exit(1) + + fname = options.file + out_fname = options.output + keep_his = options.keep_his + daysback = options.daysback + overwrite = options.overwrite + if not fname.endswith('.fif'): + raise ValueError('%s does not seem to be a .fif file.' % fname) + + mne_anonymize(fname, out_fname, keep_his, daysback, overwrite) + + +is_main = (__name__ == '__main__') +if is_main: + run() diff --git a/python/libs/mne/commands/mne_browse_raw.py b/python/libs/mne/commands/mne_browse_raw.py new file mode 100644 index 0000000..79f9665 --- /dev/null +++ b/python/libs/mne/commands/mne_browse_raw.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python +r"""Browse raw data. + +This uses :func:`mne.io.read_raw` so it supports the same formats +(without keyword arguments). + +Examples +-------- +.. code-block:: console + + $ mne browse_raw sample_audvis_raw.fif \ + --proj sample_audvis_ecg-proj.fif \ + --eve sample_audvis_raw-eve.fif +""" + +# Authors : Eric Larson, PhD + +import sys +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser, _add_verbose_flag + from mne.viz import _RAW_CLIP_DEF + + parser = get_optparser(__file__, usage='usage: %prog raw [options]') + + parser.add_option("--raw", dest="raw_in", + help="Input raw FIF file (can also be specified " + "directly as an argument without the --raw prefix)", + metavar="FILE") + parser.add_option("--proj", dest="proj_in", + help="Projector file", metavar="FILE", + default='') + parser.add_option("--projoff", dest="proj_off", + help="Disable all projectors", + default=False, action="store_true") + parser.add_option("--eve", dest="eve_in", + help="Events file", metavar="FILE", + default='') + parser.add_option("-d", "--duration", dest="duration", type="float", + help="Time window for plotting (sec)", + default=10.0) + parser.add_option("-t", "--start", dest="start", type="float", + help="Initial start time for plotting", + default=0.0) + parser.add_option("-n", "--n_channels", dest="n_channels", type="int", + help="Number of channels to plot at a time", + default=20) + parser.add_option("-o", "--order", dest="group_by", + help="Order to use for grouping during plotting " + "('type' or 'original')", default='type') + parser.add_option("-p", "--preload", dest="preload", + help="Preload raw data (for faster navigaton)", + default=False, action="store_true") + parser.add_option("-s", "--show_options", dest="show_options", + help="Show projection options dialog", + default=False) + parser.add_option("--allowmaxshield", dest="maxshield", + help="Allow loading MaxShield processed data", + action="store_true") + parser.add_option("--highpass", dest="highpass", type="float", + help="Display high-pass filter corner frequency", + default=-1) + parser.add_option("--lowpass", dest="lowpass", type="float", + help="Display low-pass filter corner frequency", + default=-1) + parser.add_option("--filtorder", dest="filtorder", type="int", + help="Display filtering IIR order (or 0 to use FIR)", + default=4) + parser.add_option("--clipping", dest="clipping", + help="Enable trace clipping mode, either 'clamp' or " + "'transparent'", default=_RAW_CLIP_DEF) + parser.add_option("--filterchpi", dest="filterchpi", + help="Enable filtering cHPI signals.", default=None, + action="store_true") + _add_verbose_flag(parser) + options, args = parser.parse_args() + + if len(args): + raw_in = args[0] + else: + raw_in = options.raw_in + duration = options.duration + start = options.start + n_channels = options.n_channels + group_by = options.group_by + preload = options.preload + show_options = options.show_options + proj_in = options.proj_in + proj_off = options.proj_off + eve_in = options.eve_in + maxshield = options.maxshield + highpass = options.highpass + lowpass = options.lowpass + filtorder = options.filtorder + clipping = options.clipping + if isinstance(clipping, str): + if clipping.lower() == 'none': + clipping = None + else: + try: + clipping = float(clipping) # allow float and convert it + except ValueError: + pass + filterchpi = options.filterchpi + verbose = options.verbose + + if raw_in is None: + parser.print_help() + sys.exit(1) + + kwargs = dict(preload=preload) + if maxshield: + kwargs.update(allow_maxshield='yes') + raw = mne.io.read_raw(raw_in, **kwargs) + if len(proj_in) > 0: + projs = mne.read_proj(proj_in) + raw.info['projs'] = projs + if len(eve_in) > 0: + events = mne.read_events(eve_in) + else: + events = None + + if filterchpi: + if not preload: + raise RuntimeError( + 'Raw data must be preloaded for chpi, use --preload') + raw = mne.chpi.filter_chpi(raw) + + highpass = None if highpass < 0 or filtorder < 0 else highpass + lowpass = None if lowpass < 0 or filtorder < 0 else lowpass + raw.plot(duration=duration, start=start, n_channels=n_channels, + group_by=group_by, show_options=show_options, events=events, + highpass=highpass, lowpass=lowpass, filtorder=filtorder, + clipping=clipping, proj=not proj_off, verbose=verbose, + show=True, block=True) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_bti2fiff.py b/python/libs/mne/commands/mne_bti2fiff.py new file mode 100644 index 0000000..db3c37f --- /dev/null +++ b/python/libs/mne/commands/mne_bti2fiff.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python +r"""Import BTi / 4D MagnesWH3600 data to fif file. + +Notes +----- +1. Currently direct inclusion of reference channel weights + is not supported. Please use \'mne_create_comp_data\' to include + the weights or use the low level functions from this module to + include them by yourself. +2. The informed guess for the 4D name is E31 for the ECG channel and + E63, E63 for the EOG channels. Please check and adjust if those channels + are present in your dataset but 'ECG 01' and 'EOG 01', 'EOG 02' don't + appear in the channel names of the raw object. + +Examples +-------- +.. code-block:: console + + $ mne bti2fiff --pdf C,rfDC -o my_raw.fif + +""" + +# Authors: Denis A. Engemann +# Martin Luessi +# Alexandre Gramfort +# Matti Hämäläinen +# Yuval Harpaz +# +# simplified bsd-3 license + + +import sys + +import mne +from mne.io import read_raw_bti + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option('-p', '--pdf', dest='pdf_fname', + help='Input data file name', metavar='FILE') + parser.add_option('-c', '--config', dest='config_fname', + help='Input config file name', metavar='FILE', + default='config') + parser.add_option('--head_shape', dest='head_shape_fname', + help='Headshape file name', metavar='FILE', + default='hs_file') + parser.add_option('-o', '--out_fname', dest='out_fname', + help='Name of the resulting fiff file', + default='as_data_fname') + parser.add_option('-r', '--rotation_x', dest='rotation_x', type='float', + help='Compensatory rotation about Neuromag x axis, deg', + default=2.0) + parser.add_option('-T', '--translation', dest='translation', type='str', + help='Default translation, meter', + default=(0.00, 0.02, 0.11)) + parser.add_option('--ecg_ch', dest='ecg_ch', type='str', + help='4D ECG channel name', + default='E31') + parser.add_option('--eog_ch', dest='eog_ch', type='str', + help='4D EOG channel names', + default='E63,E64') + + options, args = parser.parse_args() + + pdf_fname = options.pdf_fname + if pdf_fname is None: + parser.print_help() + sys.exit(1) + + config_fname = options.config_fname + head_shape_fname = options.head_shape_fname + out_fname = options.out_fname + rotation_x = options.rotation_x + translation = options.translation + ecg_ch = options.ecg_ch + eog_ch = options.ecg_ch.split(',') + + if out_fname == 'as_data_fname': + out_fname = pdf_fname + '_raw.fif' + + raw = read_raw_bti(pdf_fname=pdf_fname, config_fname=config_fname, + head_shape_fname=head_shape_fname, + rotation_x=rotation_x, translation=translation, + ecg_ch=ecg_ch, eog_ch=eog_ch) + + raw.save(out_fname) + raw.close() + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_clean_eog_ecg.py b/python/libs/mne/commands/mne_clean_eog_ecg.py new file mode 100644 index 0000000..d326252 --- /dev/null +++ b/python/libs/mne/commands/mne_clean_eog_ecg.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python +"""Clean a raw file from EOG and ECG artifacts with PCA (ie SSP). + +Examples +-------- +.. code-block:: console + + $ mne clean_eog_ecg -i in_raw.fif -o clean_raw.fif -e -c + +""" +# Authors : Dr Engr. Sheraz Khan, P.Eng, Ph.D. +# Engr. Nandita Shetty, MS. +# Alexandre Gramfort, Ph.D. + + +import sys + +import mne + + +def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True, + ecg_proj_fname=None, eog_proj_fname=None, + ecg_event_fname=None, eog_event_fname=None, in_path='.', + quiet=False): + """Clean ECG from raw fif file. + + Parameters + ---------- + in_fif_fname : str + Raw fif File + eog_event_fname : str + name of EOG event file required. + eog : bool + Reject or not EOG artifacts. + ecg : bool + Reject or not ECG artifacts. + ecg_event_fname : str + name of ECG event file required. + in_path : str + Path where all the files are. + """ + if not eog and not ecg: + raise Exception("EOG and ECG cannot be both disabled") + + # Reading fif File + raw_in = mne.io.read_raw_fif(in_fif_fname) + + if in_fif_fname.endswith('_raw.fif') or in_fif_fname.endswith('-raw.fif'): + prefix = in_fif_fname[:-8] + else: + prefix = in_fif_fname[:-4] + + if out_fif_fname is None: + out_fif_fname = prefix + '_clean_ecg_eog_raw.fif' + if ecg_proj_fname is None: + ecg_proj_fname = prefix + '_ecg-proj.fif' + if eog_proj_fname is None: + eog_proj_fname = prefix + '_eog-proj.fif' + if ecg_event_fname is None: + ecg_event_fname = prefix + '_ecg-eve.fif' + if eog_event_fname is None: + eog_event_fname = prefix + '_eog-eve.fif' + + print('Implementing ECG and EOG artifact rejection on data') + + kwargs = dict() if quiet else dict(stdout=None, stderr=None) + if ecg: + ecg_events, _, _ = mne.preprocessing.find_ecg_events( + raw_in, reject_by_annotation=True) + print("Writing ECG events in %s" % ecg_event_fname) + mne.write_events(ecg_event_fname, ecg_events) + print('Computing ECG projector') + command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname, + '--events', ecg_event_fname, '--makeproj', + '--projtmin', '-0.08', '--projtmax', '0.08', + '--saveprojtag', '_ecg-proj', '--projnmag', '2', + '--projngrad', '1', '--projevent', '999', '--highpass', '5', + '--lowpass', '35', '--projmagrej', '4000', + '--projgradrej', '3000') + mne.utils.run_subprocess(command, **kwargs) + if eog: + eog_events = mne.preprocessing.find_eog_events(raw_in) + print("Writing EOG events in %s" % eog_event_fname) + mne.write_events(eog_event_fname, eog_events) + print('Computing EOG projector') + command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname, + '--events', eog_event_fname, '--makeproj', + '--projtmin', '-0.15', '--projtmax', '0.15', + '--saveprojtag', '_eog-proj', '--projnmag', '2', + '--projngrad', '2', '--projevent', '998', '--lowpass', '35', + '--projmagrej', '4000', '--projgradrej', '3000') + mne.utils.run_subprocess(command, **kwargs) + + if out_fif_fname is not None: + # Applying the ECG EOG projector + print('Applying ECG EOG projector') + command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname, + '--proj', in_fif_fname, '--projoff', '--save', + out_fif_fname, '--filteroff', + '--proj', ecg_proj_fname, '--proj', eog_proj_fname) + mne.utils.run_subprocess(command, **kwargs) + print('Done removing artifacts.') + print("Cleaned raw data saved in: %s" % out_fif_fname) + print('IMPORTANT : Please eye-ball the data !!') + else: + print('Projection not applied to raw data.') + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option("-i", "--in", dest="raw_in", + help="Input raw FIF file", metavar="FILE") + parser.add_option("-o", "--out", dest="raw_out", + help="Output raw FIF file", metavar="FILE", + default=None) + parser.add_option("-e", "--no-eog", dest="eog", action="store_false", + help="Remove EOG", default=True) + parser.add_option("-c", "--no-ecg", dest="ecg", action="store_false", + help="Remove ECG", default=True) + parser.add_option("-q", "--quiet", dest="quiet", action="store_true", + help="Suppress mne_process_raw output", default=False) + + options, args = parser.parse_args() + + if options.raw_in is None: + parser.print_help() + sys.exit(1) + + raw_in = options.raw_in + raw_out = options.raw_out + eog = options.eog + ecg = options.ecg + quiet = options.quiet + + clean_ecg_eog(raw_in, raw_out, eog=eog, ecg=ecg, quiet=quiet) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_compare_fiff.py b/python/libs/mne/commands/mne_compare_fiff.py new file mode 100644 index 0000000..b616a3e --- /dev/null +++ b/python/libs/mne/commands/mne_compare_fiff.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python +"""Compare FIFF files. + +Examples +-------- +.. code-block:: console + + $ mne compare_fiff test_raw.fif test_raw_sss.fif + +""" + +# Authors : Eric Larson, PhD + +import sys +import mne + + +def run(): + """Run command.""" + parser = mne.commands.utils.get_optparser( + __file__, usage='mne compare_fiff ') + options, args = parser.parse_args() + if len(args) != 2: + parser.print_help() + sys.exit(1) + mne.viz.compare_fiff(args[0], args[1]) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_compute_proj_ecg.py b/python/libs/mne/commands/mne_compute_proj_ecg.py new file mode 100644 index 0000000..c42798b --- /dev/null +++ b/python/libs/mne/commands/mne_compute_proj_ecg.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python +r"""Compute SSP/PCA projections for ECG artifacts. + +Examples +-------- +.. code-block:: console + + $ mne compute_proj_ecg -i sample_audvis_raw.fif -c "MEG 1531" -a \ + --l-freq 1 --h-freq 100 \ + --rej-grad 3000 --rej-mag 4000 --rej-eeg 100 + +""" +# Authors : Alexandre Gramfort, Ph.D. +# Martin Luessi, Ph.D. + +import os +import sys +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option("-i", "--in", dest="raw_in", + help="Input raw FIF file", metavar="FILE") + parser.add_option("--tmin", dest="tmin", type="float", + help="Time before event in seconds", + default=-0.2) + parser.add_option("--tmax", dest="tmax", type="float", + help="Time after event in seconds", + default=0.4) + parser.add_option("-g", "--n-grad", dest="n_grad", type="int", + help="Number of SSP vectors for gradiometers", + default=2) + parser.add_option("-m", "--n-mag", dest="n_mag", type="int", + help="Number of SSP vectors for magnetometers", + default=2) + parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int", + help="Number of SSP vectors for EEG", + default=2) + parser.add_option("--l-freq", dest="l_freq", type="float", + help="Filter low cut-off frequency in Hz", + default=1) + parser.add_option("--h-freq", dest="h_freq", type="float", + help="Filter high cut-off frequency in Hz", + default=100) + parser.add_option("--ecg-l-freq", dest="ecg_l_freq", type="float", + help="Filter low cut-off frequency in Hz used " + "for ECG event detection", + default=5) + parser.add_option("--ecg-h-freq", dest="ecg_h_freq", type="float", + help="Filter high cut-off frequency in Hz used " + "for ECG event detection", + default=35) + parser.add_option("-p", "--preload", dest="preload", + help="Temporary file used during computation " + "(to save memory)", + default=True) + parser.add_option("-a", "--average", dest="average", action="store_true", + help="Compute SSP after averaging", + default=False) + parser.add_option("--proj", dest="proj", + help="Use SSP projections from a fif file.", + default=None) + parser.add_option("--filtersize", dest="filter_length", type="int", + help="Number of taps to use for filtering", + default=2048) + parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int", + help="Number of jobs to run in parallel", + default=1) + parser.add_option("-c", "--channel", dest="ch_name", + help="Channel to use for ECG detection " + "(Required if no ECG found)", + default=None) + parser.add_option("--rej-grad", dest="rej_grad", type="float", + help="Gradiometers rejection parameter " + "in fT/cm (peak to peak amplitude)", + default=2000) + parser.add_option("--rej-mag", dest="rej_mag", type="float", + help="Magnetometers rejection parameter " + "in fT (peak to peak amplitude)", + default=3000) + parser.add_option("--rej-eeg", dest="rej_eeg", type="float", + help="EEG rejection parameter in µV " + "(peak to peak amplitude)", + default=50) + parser.add_option("--rej-eog", dest="rej_eog", type="float", + help="EOG rejection parameter in µV " + "(peak to peak amplitude)", + default=250) + parser.add_option("--avg-ref", dest="avg_ref", action="store_true", + help="Add EEG average reference proj", + default=False) + parser.add_option("--no-proj", dest="no_proj", action="store_true", + help="Exclude the SSP projectors currently " + "in the fiff file", + default=False) + parser.add_option("--bad", dest="bad_fname", + help="Text file containing bad channels list " + "(one per line)", + default=None) + parser.add_option("--event-id", dest="event_id", type="int", + help="ID to use for events", + default=999) + parser.add_option("--event-raw", dest="raw_event_fname", + help="raw file to use for event detection", + default=None) + parser.add_option("--tstart", dest="tstart", type="float", + help="Start artifact detection after tstart seconds", + default=0.) + parser.add_option("--qrsthr", dest="qrs_threshold", type="string", + help="QRS detection threshold. Between 0 and 1. Can " + "also be 'auto' for automatic selection", + default='auto') + + options, args = parser.parse_args() + + raw_in = options.raw_in + + if raw_in is None: + parser.print_help() + sys.exit(1) + + tmin = options.tmin + tmax = options.tmax + n_grad = options.n_grad + n_mag = options.n_mag + n_eeg = options.n_eeg + l_freq = options.l_freq + h_freq = options.h_freq + ecg_l_freq = options.ecg_l_freq + ecg_h_freq = options.ecg_h_freq + average = options.average + preload = options.preload + filter_length = options.filter_length + n_jobs = options.n_jobs + ch_name = options.ch_name + reject = dict(grad=1e-13 * float(options.rej_grad), + mag=1e-15 * float(options.rej_mag), + eeg=1e-6 * float(options.rej_eeg), + eog=1e-6 * float(options.rej_eog)) + avg_ref = options.avg_ref + no_proj = options.no_proj + bad_fname = options.bad_fname + event_id = options.event_id + proj_fname = options.proj + raw_event_fname = options.raw_event_fname + tstart = options.tstart + qrs_threshold = options.qrs_threshold + if qrs_threshold != 'auto': + try: + qrs_threshold = float(qrs_threshold) + except ValueError: + raise ValueError('qrsthr must be "auto" or a float') + + if bad_fname is not None: + with open(bad_fname, 'r') as fid: + bads = [w.rstrip() for w in fid.readlines()] + print('Bad channels read : %s' % bads) + else: + bads = [] + + if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'): + prefix = raw_in[:-8] + else: + prefix = raw_in[:-4] + + ecg_event_fname = prefix + '_ecg-eve.fif' + + if average: + ecg_proj_fname = prefix + '_ecg_avg-proj.fif' + else: + ecg_proj_fname = prefix + '_ecg-proj.fif' + + raw = mne.io.read_raw_fif(raw_in, preload=preload) + + if raw_event_fname is not None: + raw_event = mne.io.read_raw_fif(raw_event_fname) + else: + raw_event = raw + + flat = None + projs, events = mne.preprocessing.compute_proj_ecg( + raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg, l_freq, h_freq, + average, filter_length, n_jobs, ch_name, reject, flat, bads, avg_ref, + no_proj, event_id, ecg_l_freq, ecg_h_freq, tstart, qrs_threshold, + copy=False) + + raw.close() + + if raw_event_fname is not None: + raw_event.close() + + if proj_fname is not None: + print('Including SSP projections from : %s' % proj_fname) + # append the ecg projs, so they are last in the list + projs = mne.read_proj(proj_fname) + projs + + if isinstance(preload, str) and os.path.exists(preload): + os.remove(preload) + + print("Writing ECG projections in %s" % ecg_proj_fname) + mne.write_proj(ecg_proj_fname, projs) + + print("Writing ECG events in %s" % ecg_event_fname) + mne.write_events(ecg_event_fname, events) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_compute_proj_eog.py b/python/libs/mne/commands/mne_compute_proj_eog.py new file mode 100644 index 0000000..3494ffa --- /dev/null +++ b/python/libs/mne/commands/mne_compute_proj_eog.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python +r"""Compute SSP/PCA projections for EOG artifacts. + +Examples +-------- +.. code-block:: console + + $ mne compute_proj_eog -i sample_audvis_raw.fif -a \ + --l-freq 1 --h-freq 35 \ + --rej-grad 3000 --rej-mag 4000 --rej-eeg 100 + +or + +.. code-block:: console + + $ mne compute_proj_eog -i sample_audvis_raw.fif -a \ + --l-freq 1 --h-freq 35 \ + --rej-grad 3000 --rej-mag 4000 --rej-eeg 100 \ + --proj sample_audvis_ecg-proj.fif + +to exclude ECG artifacts from projection computation. +""" +# Authors : Alexandre Gramfort, Ph.D. +# Martin Luessi, Ph.D. + +import os +import sys +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option("-i", "--in", dest="raw_in", + help="Input raw FIF file", metavar="FILE") + parser.add_option("--tmin", dest="tmin", type="float", + help="Time before event in seconds", default=-0.2) + parser.add_option("--tmax", dest="tmax", type="float", + help="Time after event in seconds", default=0.2) + parser.add_option("-g", "--n-grad", dest="n_grad", type="int", + help="Number of SSP vectors for gradiometers", + default=2) + parser.add_option("-m", "--n-mag", dest="n_mag", type="int", + help="Number of SSP vectors for magnetometers", + default=2) + parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int", + help="Number of SSP vectors for EEG", default=2) + parser.add_option("--l-freq", dest="l_freq", type="float", + help="Filter low cut-off frequency in Hz", + default=1) + parser.add_option("--h-freq", dest="h_freq", type="float", + help="Filter high cut-off frequency in Hz", + default=35) + parser.add_option("--eog-l-freq", dest="eog_l_freq", type="float", + help="Filter low cut-off frequency in Hz used for " + "EOG event detection", default=1) + parser.add_option("--eog-h-freq", dest="eog_h_freq", type="float", + help="Filter high cut-off frequency in Hz used for " + "EOG event detection", default=10) + parser.add_option("-p", "--preload", dest="preload", + help="Temporary file used during computation (to " + "save memory)", default=True) + parser.add_option("-a", "--average", dest="average", action="store_true", + help="Compute SSP after averaging", + default=False) + parser.add_option("--proj", dest="proj", + help="Use SSP projections from a fif file.", + default=None) + parser.add_option("--filtersize", dest="filter_length", type="int", + help="Number of taps to use for filtering", + default=2048) + parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int", + help="Number of jobs to run in parallel", default=1) + parser.add_option("--rej-grad", dest="rej_grad", type="float", + help="Gradiometers rejection parameter in fT/cm (peak " + "to peak amplitude)", default=2000) + parser.add_option("--rej-mag", dest="rej_mag", type="float", + help="Magnetometers rejection parameter in fT (peak to " + "peak amplitude)", default=3000) + parser.add_option("--rej-eeg", dest="rej_eeg", type="float", + help="EEG rejection parameter in µV (peak to peak " + "amplitude)", default=50) + parser.add_option("--rej-eog", dest="rej_eog", type="float", + help="EOG rejection parameter in µV (peak to peak " + "amplitude)", default=1e9) + parser.add_option("--avg-ref", dest="avg_ref", action="store_true", + help="Add EEG average reference proj", + default=False) + parser.add_option("--no-proj", dest="no_proj", action="store_true", + help="Exclude the SSP projectors currently in the " + "fiff file", default=False) + parser.add_option("--bad", dest="bad_fname", + help="Text file containing bad channels list " + "(one per line)", default=None) + parser.add_option("--event-id", dest="event_id", type="int", + help="ID to use for events", default=998) + parser.add_option("--event-raw", dest="raw_event_fname", + help="raw file to use for event detection", default=None) + parser.add_option("--tstart", dest="tstart", type="float", + help="Start artifact detection after tstart seconds", + default=0.) + parser.add_option("-c", "--channel", dest="ch_name", type="string", + help="Custom EOG channel(s), comma separated", + default=None) + + options, args = parser.parse_args() + + raw_in = options.raw_in + + if raw_in is None: + parser.print_help() + sys.exit(1) + + tmin = options.tmin + tmax = options.tmax + n_grad = options.n_grad + n_mag = options.n_mag + n_eeg = options.n_eeg + l_freq = options.l_freq + h_freq = options.h_freq + eog_l_freq = options.eog_l_freq + eog_h_freq = options.eog_h_freq + average = options.average + preload = options.preload + filter_length = options.filter_length + n_jobs = options.n_jobs + reject = dict(grad=1e-13 * float(options.rej_grad), + mag=1e-15 * float(options.rej_mag), + eeg=1e-6 * float(options.rej_eeg), + eog=1e-6 * float(options.rej_eog)) + avg_ref = options.avg_ref + no_proj = options.no_proj + bad_fname = options.bad_fname + event_id = options.event_id + proj_fname = options.proj + raw_event_fname = options.raw_event_fname + tstart = options.tstart + ch_name = options.ch_name + + if bad_fname is not None: + with open(bad_fname, 'r') as fid: + bads = [w.rstrip() for w in fid.readlines()] + print('Bad channels read : %s' % bads) + else: + bads = [] + + if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'): + prefix = raw_in[:-8] + else: + prefix = raw_in[:-4] + + eog_event_fname = prefix + '_eog-eve.fif' + + if average: + eog_proj_fname = prefix + '_eog_avg-proj.fif' + else: + eog_proj_fname = prefix + '_eog-proj.fif' + + raw = mne.io.read_raw_fif(raw_in, preload=preload) + + if raw_event_fname is not None: + raw_event = mne.io.read_raw_fif(raw_event_fname) + else: + raw_event = raw + + flat = None + projs, events = mne.preprocessing.compute_proj_eog( + raw=raw, raw_event=raw_event, tmin=tmin, tmax=tmax, n_grad=n_grad, + n_mag=n_mag, n_eeg=n_eeg, l_freq=l_freq, h_freq=h_freq, + average=average, filter_length=filter_length, + n_jobs=n_jobs, reject=reject, flat=flat, bads=bads, + avg_ref=avg_ref, no_proj=no_proj, event_id=event_id, + eog_l_freq=eog_l_freq, eog_h_freq=eog_h_freq, + tstart=tstart, ch_name=ch_name, copy=False) + + raw.close() + + if raw_event_fname is not None: + raw_event.close() + + if proj_fname is not None: + print('Including SSP projections from : %s' % proj_fname) + # append the eog projs, so they are last in the list + projs = mne.read_proj(proj_fname) + projs + + if isinstance(preload, str) and os.path.exists(preload): + os.remove(preload) + + print("Writing EOG projections in %s" % eog_proj_fname) + mne.write_proj(eog_proj_fname, projs) + + print("Writing EOG events in %s" % eog_event_fname) + mne.write_events(eog_event_fname, events) + + +is_main = (__name__ == '__main__') +if is_main: + run() diff --git a/python/libs/mne/commands/mne_coreg.py b/python/libs/mne/commands/mne_coreg.py new file mode 100644 index 0000000..c561889 --- /dev/null +++ b/python/libs/mne/commands/mne_coreg.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# Authors: Christian Brodbeck + +"""Open the coregistration GUI. + +Examples +-------- +.. code-block:: console + + $ mne coreg + +""" + +import os.path as op + +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser, _add_verbose_flag + + parser = get_optparser(__file__) + + parser.add_option("-d", "--subjects-dir", dest="subjects_dir", + default=None, help="Subjects directory") + parser.add_option("-s", "--subject", dest="subject", default=None, + help="Subject name") + parser.add_option("-f", "--fiff", dest="inst", default=None, + help="FIFF file with digitizer data for coregistration") + parser.add_option("-t", "--tabbed", dest="tabbed", action="store_true", + default=False, help="Option for small screens: Combine " + "the data source panel and the coregistration panel " + "into a single panel with tabs.") + parser.add_option("--no-guess-mri", dest="guess_mri_subject", + action='store_false', default=None, + help="Prevent the GUI from automatically guessing and " + "changing the MRI subject when a new head shape source " + "file is selected.") + parser.add_option("--head-opacity", type=float, default=None, + dest="head_opacity", + help="The opacity of the head surface, in the range " + "[0, 1].") + parser.add_option("--high-res-head", + action='store_true', default=False, dest="high_res_head", + help="Use a high-resolution head surface.") + parser.add_option("--low-res-head", + action='store_true', default=False, dest="low_res_head", + help="Use a low-resolution head surface.") + parser.add_option('--trans', dest='trans', default=None, + help='Head<->MRI transform FIF file ("-trans.fif")') + parser.add_option('--interaction', + type=str, default=None, dest='interaction', + help='Interaction style to use, can be "trackball" or ' + '"terrain".') + parser.add_option('--scale', + type=float, default=None, dest='scale', + help='Scale factor for the scene.') + parser.add_option('--simple-rendering', action='store_false', + dest='advanced_rendering', + help='Use simplified OpenGL rendering') + _add_verbose_flag(parser) + + options, args = parser.parse_args() + + if options.low_res_head: + if options.high_res_head: + raise ValueError("Can't specify --high-res-head and " + "--low-res-head at the same time.") + head_high_res = False + elif options.high_res_head: + head_high_res = True + else: + head_high_res = None + + # expanduser allows ~ for --subjects-dir + subjects_dir = options.subjects_dir + if subjects_dir is not None: + subjects_dir = op.expanduser(subjects_dir) + trans = options.trans + if trans is not None: + trans = op.expanduser(trans) + import faulthandler + faulthandler.enable() + mne.gui.coregistration( + options.tabbed, inst=options.inst, subject=options.subject, + subjects_dir=subjects_dir, + guess_mri_subject=options.guess_mri_subject, + head_opacity=options.head_opacity, head_high_res=head_high_res, + trans=trans, scrollable=True, + interaction=options.interaction, + scale=options.scale, + advanced_rendering=options.advanced_rendering, + verbose=options.verbose) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_flash_bem.py b/python/libs/mne/commands/mne_flash_bem.py new file mode 100644 index 0000000..58cd77d --- /dev/null +++ b/python/libs/mne/commands/mne_flash_bem.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python +"""Create 3-layer BEM model from Flash MRI images. + +Examples +-------- +.. code-block:: console + + $ mne flash_bem --subject=sample + +Notes +----- +This program assumes that FreeSurfer and MNE are installed and +sourced properly. + +This function extracts the BEM surfaces (outer skull, inner skull, and +outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30 +degrees. The multiecho FLASH data are inputted in DICOM format. +This function assumes that the Freesurfer segmentation of the subject +has been completed. In particular, the T1.mgz and brain.mgz MRI volumes +should be, as usual, in the subject's mri directory. + +Before running this script do the following: +(unless the --noconvert option is specified) + +1. Copy all of your FLASH images in a single directory and + create a directory to hold the output of mne_organize_dicom +2. cd to and run + $ mne_organize_dicom + to create an appropriate directory structure +3. Create symbolic links to make flash05 and flash30 point to the + appropriate series: + $ ln -s flash05 + $ ln -s flash30 + Some partition formats (e.g. FAT32) do not support symbolic links. + In this case, copy the file to the appropriate series: + $ cp flash05 + $ cp flash30 +4. cd to the directory where flash05 and flash30 links are +5. Set SUBJECTS_DIR and SUBJECT environment variables appropriately +6. Run this script +""" +# Authors: Lorenzo De Santis + +import mne +from mne.bem import convert_flash_mris, make_flash_bem + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option("-s", "--subject", dest="subject", + help="Subject name", default=None) + parser.add_option("-d", "--subjects-dir", dest="subjects_dir", + help="Subjects directory", default=None) + parser.add_option("-3", "--noflash30", dest="noflash30", + action="store_true", default=False, + help=("Skip the 30-degree flip angle data"),) + parser.add_option("-n", "--noconvert", dest="noconvert", + action="store_true", default=False, + help=("Assume that the Flash MRI images have already " + "been converted to mgz files")) + parser.add_option("-u", "--unwarp", dest="unwarp", + action="store_true", default=False, + help=("Run grad_unwarp with -unwarp option on " + "each of the converted data sets")) + parser.add_option("-o", "--overwrite", dest="overwrite", + action="store_true", default=False, + help="Write over existing .surf files in bem folder") + parser.add_option("-v", "--view", dest="show", action="store_true", + help="Show BEM model in 3D for visual inspection", + default=False) + parser.add_option("--copy", dest="copy", + help="Use copies instead of symlinks for surfaces", + action="store_true") + parser.add_option("-p", "--flash-path", dest="flash_path", + default=None, + help="The directory containing flash05.mgz and " + "flash30.mgz files (defaults to " + "$SUBJECTS_DIR/$SUBJECT/mri/flash/parameter_maps") + + options, args = parser.parse_args() + + subject = options.subject + subjects_dir = options.subjects_dir + flash30 = not options.noflash30 + convert = not options.noconvert + unwarp = options.unwarp + overwrite = options.overwrite + show = options.show + flash_path = options.flash_path + copy = options.copy + + if options.subject is None: + parser.print_help() + raise RuntimeError('The subject argument must be set') + + convert_flash_mris(subject=subject, subjects_dir=subjects_dir, + flash30=flash30, convert=convert, unwarp=unwarp, + verbose=True) + make_flash_bem(subject=subject, subjects_dir=subjects_dir, + overwrite=overwrite, show=show, flash_path=flash_path, + copy=copy, verbose=True) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_freeview_bem_surfaces.py b/python/libs/mne/commands/mne_freeview_bem_surfaces.py new file mode 100644 index 0000000..fefb0c5 --- /dev/null +++ b/python/libs/mne/commands/mne_freeview_bem_surfaces.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python +"""View the 3-Layers BEM model using Freeview. + +Examples +-------- +.. code-block:: console + + $ mne freeview_bem_surfaces -s sample + +""" +# Authors: Alexandre Gramfort + +import sys +import os +import os.path as op + +import mne +from mne.utils import run_subprocess, get_subjects_dir + + +def freeview_bem_surfaces(subject, subjects_dir, method): + """View 3-Layers BEM model with Freeview. + + Parameters + ---------- + subject : string + Subject name + subjects_dir : string + Directory containing subjects data (Freesurfer SUBJECTS_DIR) + method : string + Can be 'flash' or 'watershed'. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + + if subject is None: + raise ValueError("subject argument is None.") + + subject_dir = op.join(subjects_dir, subject) + + if not op.isdir(subject_dir): + raise ValueError("Wrong path: '{}'. Check subjects-dir or" + "subject argument.".format(subject_dir)) + + env = os.environ.copy() + env['SUBJECT'] = subject + env['SUBJECTS_DIR'] = subjects_dir + + if 'FREESURFER_HOME' not in env: + raise RuntimeError('The FreeSurfer environment needs to be set up.') + + mri_dir = op.join(subject_dir, 'mri') + bem_dir = op.join(subject_dir, 'bem') + mri = op.join(mri_dir, 'T1.mgz') + + if method == 'watershed': + bem_dir = op.join(bem_dir, 'watershed') + outer_skin = op.join(bem_dir, '%s_outer_skin_surface' % subject) + outer_skull = op.join(bem_dir, '%s_outer_skull_surface' % subject) + inner_skull = op.join(bem_dir, '%s_inner_skull_surface' % subject) + else: + if method == 'flash': + bem_dir = op.join(bem_dir, 'flash') + outer_skin = op.join(bem_dir, 'outer_skin.surf') + outer_skull = op.join(bem_dir, 'outer_skull.surf') + inner_skull = op.join(bem_dir, 'inner_skull.surf') + + # put together the command + cmd = ['freeview'] + cmd += ["--volume", mri] + cmd += ["--surface", "%s:color=red:edgecolor=red" % inner_skull] + cmd += ["--surface", "%s:color=yellow:edgecolor=yellow" % outer_skull] + cmd += ["--surface", + "%s:color=255,170,127:edgecolor=255,170,127" % outer_skin] + + run_subprocess(cmd, env=env, stdout=sys.stdout) + print("[done]") + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + subject = os.environ.get('SUBJECT') + subjects_dir = get_subjects_dir() + + parser.add_option("-s", "--subject", dest="subject", + help="Subject name", default=subject) + parser.add_option("-d", "--subjects-dir", dest="subjects_dir", + help="Subjects directory", default=subjects_dir) + parser.add_option("-m", "--method", dest="method", + help=("Method used to generate the BEM model. " + "Can be flash or watershed.")) + + options, args = parser.parse_args() + + subject = options.subject + subjects_dir = options.subjects_dir + method = options.method + + freeview_bem_surfaces(subject, subjects_dir, method) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_kit2fiff.py b/python/libs/mne/commands/mne_kit2fiff.py new file mode 100644 index 0000000..b7c11f7 --- /dev/null +++ b/python/libs/mne/commands/mne_kit2fiff.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python +# Authors: Teon Brooks + +"""Import KIT / NYU data to fif file. + +Examples +-------- +.. code-block:: console + + $ mne kit2fiff --input input.sqd --output output.fif + +Use without arguments to invoke GUI: + +.. code-block:: console + + $ mne kt2fiff + +""" + +import sys + +import mne +from mne.io import read_raw_kit + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option('--input', dest='input_fname', + help='Input data file name', metavar='filename') + parser.add_option('--mrk', dest='mrk_fname', + help='MEG Marker file name', metavar='filename') + parser.add_option('--elp', dest='elp_fname', + help='Headshape points file name', metavar='filename') + parser.add_option('--hsp', dest='hsp_fname', + help='Headshape file name', metavar='filename') + parser.add_option('--stim', dest='stim', + help='Colon Separated Stimulus Trigger Channels', + metavar='chs') + parser.add_option('--slope', dest='slope', help='Slope direction', + metavar='slope') + parser.add_option('--stimthresh', dest='stimthresh', default=1, + help='Threshold value for trigger channels', + metavar='value') + parser.add_option('--output', dest='out_fname', + help='Name of the resulting fiff file', + metavar='filename') + parser.add_option('--debug', dest='debug', action='store_true', + default=False, + help='Set logging level for terminal output to debug') + + options, args = parser.parse_args() + + if options.debug: + mne.set_log_level('debug') + + input_fname = options.input_fname + if input_fname is None: + try: + from mne_kit_gui import kit2fiff # noqa + except ImportError: + raise ImportError( + 'The mne_kit_gui package is required, install it using ' + 'conda or pip') from None + kit2fiff() + sys.exit(0) + + hsp_fname = options.hsp_fname + elp_fname = options.elp_fname + mrk_fname = options.mrk_fname + stim = options.stim + slope = options.slope + stimthresh = options.stimthresh + out_fname = options.out_fname + + if isinstance(stim, str): + stim = map(int, stim.split(':')) + + raw = read_raw_kit(input_fname=input_fname, mrk=mrk_fname, elp=elp_fname, + hsp=hsp_fname, stim=stim, slope=slope, + stimthresh=stimthresh) + + raw.save(out_fname) + raw.close() + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_make_scalp_surfaces.py b/python/libs/mne/commands/mne_make_scalp_surfaces.py new file mode 100644 index 0000000..1e0e4cb --- /dev/null +++ b/python/libs/mne/commands/mne_make_scalp_surfaces.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +# Authors: Denis A. Engemann +# Alexandre Gramfort +# Matti Hämäläinen +# +# simplified bsd-3 license + +"""Create high-resolution head surfaces for coordinate alignment. + +Examples +-------- +.. code-block:: console + + $ mne make_scalp_surfaces --overwrite --subject sample + +""" +import os +import sys + +import mne +from mne.bem import make_scalp_surfaces + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser, _add_verbose_flag + + parser = get_optparser(__file__) + subjects_dir = mne.get_config('SUBJECTS_DIR') + + parser.add_option('-o', '--overwrite', dest='overwrite', + action='store_true', + help='Overwrite previously computed surface') + parser.add_option('-s', '--subject', dest='subject', + help='The name of the subject', type='str') + parser.add_option('-f', '--force', dest='force', action='store_true', + help='Force creation of the surface even if it has ' + 'some topological defects.') + parser.add_option("-d", "--subjects-dir", dest="subjects_dir", + help="Subjects directory", default=subjects_dir) + parser.add_option("-n", "--no-decimate", dest="no_decimate", + help="Disable medium and sparse decimations " + "(dense only)", action='store_true') + _add_verbose_flag(parser) + options, args = parser.parse_args() + + subject = vars(options).get('subject', os.getenv('SUBJECT')) + subjects_dir = options.subjects_dir + if subject is None or subjects_dir is None: + parser.print_help() + sys.exit(1) + make_scalp_surfaces( + subject=subject, + subjects_dir=subjects_dir, + force=options.force, + overwrite=options.overwrite, + no_decimate=options.no_decimate, + verbose=options.verbose) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_maxfilter.py b/python/libs/mne/commands/mne_maxfilter.py new file mode 100644 index 0000000..70e4d6c --- /dev/null +++ b/python/libs/mne/commands/mne_maxfilter.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +"""Apply MaxFilter. + +Examples +-------- +.. code-block:: console + + $ mne maxfilter -i sample_audvis_raw.fif --st + +This will apply MaxFilter with the MaxSt extension. The origin used +by MaxFilter is computed by mne-python by fitting a sphere to the +headshape points. +""" + +# Authors : Martin Luessi + +import sys +import os +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option("-i", "--in", dest="in_fname", + help="Input raw FIF file", metavar="FILE") + parser.add_option("-o", dest="out_fname", + help="Output FIF file (if not set, suffix '_sss' will " + "be used)", metavar="FILE", default=None) + parser.add_option("--origin", dest="origin", + help="Head origin in mm, or a filename to read the " + "origin from. If not set it will be estimated from " + "headshape points", default=None) + parser.add_option("--origin-out", dest="origin_out", + help="Filename to use for computed origin", default=None) + parser.add_option("--frame", dest="frame", type="string", + help="Coordinate frame for head center ('device' or " + "'head')", default="device") + parser.add_option("--bad", dest="bad", type="string", + help="List of static bad channels", + default=None) + parser.add_option("--autobad", dest="autobad", type="string", + help="Set automated bad channel detection ('on', 'off', " + "'n')", default="off") + parser.add_option("--skip", dest="skip", + help="Skips raw data sequences, time intervals pairs in " + "sec, e.g.: 0 30 120 150", default=None) + parser.add_option("--force", dest="force", action="store_true", + help="Ignore program warnings", + default=False) + parser.add_option("--st", dest="st", action="store_true", + help="Apply the time-domain MaxST extension", + default=False) + parser.add_option("--buflen", dest="st_buflen", type="float", + help="MaxSt buffer length in sec", + default=16.0) + parser.add_option("--corr", dest="st_corr", type="float", + help="MaxSt subspace correlation", + default=0.96) + parser.add_option("--trans", dest="mv_trans", + help="Transforms the data into the coil definitions of " + "in_fname, or into the default frame", default=None) + parser.add_option("--movecomp", dest="mv_comp", action="store_true", + help="Estimates and compensates head movements in " + "continuous raw data", default=False) + parser.add_option("--headpos", dest="mv_headpos", action="store_true", + help="Estimates and stores head position parameters, " + "but does not compensate movements", default=False) + parser.add_option("--hp", dest="mv_hp", type="string", + help="Stores head position data in an ascii file", + default=None) + parser.add_option("--hpistep", dest="mv_hpistep", type="float", + help="Sets head position update interval in ms", + default=None) + parser.add_option("--hpisubt", dest="mv_hpisubt", type="string", + help="Subtracts hpi signals: sine amplitudes, amp + " + "baseline, or switch off", default=None) + parser.add_option("--nohpicons", dest="mv_hpicons", action="store_false", + help="Do not check initial consistency isotrak vs " + "hpifit", default=True) + parser.add_option("--linefreq", dest="linefreq", type="float", + help="Sets the basic line interference frequency (50 or " + "60 Hz)", default=None) + parser.add_option("--nooverwrite", dest="overwrite", action="store_false", + help="Do not overwrite output file if it already exists", + default=True) + parser.add_option("--args", dest="mx_args", type="string", + help="Additional command line arguments to pass to " + "MaxFilter", default="") + + options, args = parser.parse_args() + + in_fname = options.in_fname + + if in_fname is None: + parser.print_help() + sys.exit(1) + + out_fname = options.out_fname + origin = options.origin + origin_out = options.origin_out + frame = options.frame + bad = options.bad + autobad = options.autobad + skip = options.skip + force = options.force + st = options.st + st_buflen = options.st_buflen + st_corr = options.st_corr + mv_trans = options.mv_trans + mv_comp = options.mv_comp + mv_headpos = options.mv_headpos + mv_hp = options.mv_hp + mv_hpistep = options.mv_hpistep + mv_hpisubt = options.mv_hpisubt + mv_hpicons = options.mv_hpicons + linefreq = options.linefreq + overwrite = options.overwrite + mx_args = options.mx_args + + if in_fname.endswith('_raw.fif') or in_fname.endswith('-raw.fif'): + prefix = in_fname[:-8] + else: + prefix = in_fname[:-4] + + if out_fname is None: + if st: + out_fname = prefix + '_tsss.fif' + else: + out_fname = prefix + '_sss.fif' + + if origin is not None and os.path.exists(origin): + with open(origin, 'r') as fid: + origin = fid.readlines()[0].strip() + + origin = mne.preprocessing.apply_maxfilter( + in_fname, out_fname, origin, frame, + bad, autobad, skip, force, st, st_buflen, st_corr, mv_trans, + mv_comp, mv_headpos, mv_hp, mv_hpistep, mv_hpisubt, mv_hpicons, + linefreq, mx_args, overwrite) + + if origin_out is not None: + with open(origin_out, 'w') as fid: + fid.write(origin + '\n') + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_prepare_bem_model.py b/python/libs/mne/commands/mne_prepare_bem_model.py new file mode 100644 index 0000000..da308bb --- /dev/null +++ b/python/libs/mne/commands/mne_prepare_bem_model.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +"""Create a BEM solution using the linear collocation approach. + +Examples +-------- +.. code-block:: console + + $ mne prepare_bem_model --bem sample-5120-5120-5120-bem.fif + +""" + +import sys +import os +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser, _add_verbose_flag + + parser = get_optparser(__file__) + + parser.add_option('--bem', dest='bem_fname', + help='The name of the file containing the ' + 'triangulations of the BEM surfaces and the ' + 'conductivities of the compartments. The standard ' + 'ending for this file is -bem.fif.', + metavar="FILE") + parser.add_option('--sol', dest='bem_sol_fname', + help='The name of the resulting file containing BEM ' + 'solution (geometry matrix). It uses the linear ' + 'collocation approach. The file should end with ' + '-bem-sof.fif.', + metavar='FILE', default=None) + _add_verbose_flag(parser) + + options, args = parser.parse_args() + bem_fname = options.bem_fname + bem_sol_fname = options.bem_sol_fname + verbose = True if options.verbose is not None else False + + if bem_fname is None: + parser.print_help() + sys.exit(1) + + if bem_sol_fname is None: + base, _ = os.path.splitext(bem_fname) + bem_sol_fname = base + '-sol.fif' + + bem_model = mne.read_bem_surfaces(bem_fname, patch_stats=False, + verbose=verbose) + bem_solution = mne.make_bem_solution(bem_model, verbose=verbose) + mne.write_bem_solution(bem_sol_fname, bem_solution) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_report.py b/python/libs/mne/commands/mne_report.py new file mode 100644 index 0000000..2d96570 --- /dev/null +++ b/python/libs/mne/commands/mne_report.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python +r"""Create mne report for a folder. + +Examples +-------- +Before getting started with ``mne report``, make sure the files you want to +render follow the filename conventions defined by MNE: + +.. highlight:: console + +.. cssclass:: table-bordered +.. rst-class:: midvalign + +============ ============================================================== +Data object Filename convention (ends with) +============ ============================================================== +raw -raw.fif(.gz), -raw_sss.fif(.gz), -raw_tsss.fif(.gz), + _meg.fif(.gz), _eeg.fif(.gz), _ieeg.fif(.gz) +events -eve.fif(.gz) +epochs -epo.fif(.gz) +evoked -ave.fif(.gz) +covariance -cov.fif(.gz) +trans -trans.fif(.gz) +forward -fwd.fif(.gz) +inverse -inv.fif(.gz) +============ ============================================================== + +To generate a barebones report from all the \*.fif files in the sample +dataset, invoke the following command in a system (e.g., Bash) shell:: + + $ mne report --path MNE-sample-data/ --verbose + +On successful creation of the report, it will open the HTML in a new tab in +the browser. To disable this, use the ``--no-browser`` option. + +TO generate a report for a single subject, give the ``SUBJECT`` name and +the ``SUBJECTS_DIR`` and this will generate the MRI slices (with BEM +contours overlaid on top if available):: + + $ mne report --path MNE-sample-data/ --subject sample --subjects-dir \ + MNE-sample-data/subjects --verbose + +To properly render ``trans`` and ``covariance`` files, add the measurement +information:: + + $ mne report --path MNE-sample-data/ \ + --info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \ + --subject sample --subjects-dir MNE-sample-data/subjects --verbose + +To render whitened ``evoked`` files with baseline correction, add the noise +covariance file:: + + $ mne report --path MNE-sample-data/ \ + --info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \ + --cov MNE-sample-data/MEG/sample/sample_audvis-cov.fif --bmax 0 \ + --subject sample --subjects-dir MNE-sample-data/subjects --verbose + +To generate the report in parallel:: + + $ mne report --path MNE-sample-data/ \ + --info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \ + --subject sample --subjects-dir MNE-sample-data/subjects \ + --verbose --jobs 6 + +For help on all the available options, do:: + + $ mne report --help +""" + +import sys +import time + +import mne +from mne.report import Report +from mne.utils import verbose, logger + + +@verbose +def log_elapsed(t, verbose=None): + """Log elapsed time.""" + logger.info('Report complete in %s seconds' % round(t, 1)) + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser, _add_verbose_flag + + parser = get_optparser(__file__) + + parser.add_option("-p", "--path", dest="path", + help="Path to folder who MNE-Report must be created") + parser.add_option("-i", "--info", dest="info_fname", + help="File from which info dictionary is to be read", + metavar="FILE") + parser.add_option("-c", "--cov", dest="cov_fname", + help="File from which noise covariance is to be read", + metavar="FILE") + parser.add_option("--bmin", dest="bmin", + help="Time at which baseline correction starts for " + "evokeds", default=None) + parser.add_option("--bmax", dest="bmax", + help="Time at which baseline correction stops for " + "evokeds", default=None) + parser.add_option("-d", "--subjects-dir", dest="subjects_dir", + help="The subjects directory") + parser.add_option("-s", "--subject", dest="subject", + help="The subject name") + parser.add_option("--no-browser", dest="no_browser", action='store_false', + help="Do not open MNE-Report in browser") + parser.add_option("--overwrite", dest="overwrite", action='store_false', + help="Overwrite html report if it already exists") + parser.add_option("-j", "--jobs", dest="n_jobs", help="Number of jobs to" + " run in parallel") + parser.add_option("-m", "--mri-decim", type="int", dest="mri_decim", + default=2, help="Integer factor used to decimate " + "BEM plots") + parser.add_option("--image-format", type="str", dest="image_format", + default='png', help="Image format to use " + "(can be 'png' or 'svg')") + _add_verbose_flag(parser) + + options, args = parser.parse_args() + path = options.path + if path is None: + parser.print_help() + sys.exit(1) + info_fname = options.info_fname + cov_fname = options.cov_fname + subjects_dir = options.subjects_dir + subject = options.subject + image_format = options.image_format + mri_decim = int(options.mri_decim) + verbose = True if options.verbose is not None else False + open_browser = False if options.no_browser is not None else True + overwrite = True if options.overwrite is not None else False + n_jobs = int(options.n_jobs) if options.n_jobs is not None else 1 + + bmin = float(options.bmin) if options.bmin is not None else None + bmax = float(options.bmax) if options.bmax is not None else None + # XXX: this means (None, None) cannot be specified through command line + if bmin is None and bmax is None: + baseline = None + else: + baseline = (bmin, bmax) + + t0 = time.time() + report = Report(info_fname, subjects_dir=subjects_dir, + subject=subject, baseline=baseline, + cov_fname=cov_fname, verbose=verbose, + image_format=image_format) + report.parse_folder(path, verbose=verbose, n_jobs=n_jobs, + mri_decim=mri_decim) + log_elapsed(time.time() - t0, verbose=verbose) + report.save(open_browser=open_browser, overwrite=overwrite) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_setup_forward_model.py b/python/libs/mne/commands/mne_setup_forward_model.py new file mode 100644 index 0000000..2f3c577 --- /dev/null +++ b/python/libs/mne/commands/mne_setup_forward_model.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python +"""Create a BEM model for a subject. + +Examples +-------- +.. code-block:: console + + $ mne setup_forward_model -s 'sample' + +""" + +import sys +import os +import mne +from mne.utils import get_subjects_dir, warn + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser, _add_verbose_flag + + parser = get_optparser(__file__) + + parser.add_option("-s", "--subject", + dest="subject", + help="Subject name (required)", + default=None) + parser.add_option("--model", + dest="model", + help="Output file name. Use a name /-bem.fif", + default=None, + type='string') + parser.add_option('--ico', + dest='ico', + help='The surface ico downsampling to use, e.g. ' + ' 5=20484, 4=5120, 3=1280. If None, no subsampling' + ' is applied.', + default=None, + type='int') + parser.add_option('--brainc', + dest='brainc', + help='Defines the brain compartment conductivity. ' + 'The default value is 0.3 S/m.', + default=0.3, + type='float') + parser.add_option('--skullc', + dest='skullc', + help='Defines the skull compartment conductivity. ' + 'The default value is 0.006 S/m.', + default=None, + type='float') + parser.add_option('--scalpc', + dest='scalpc', + help='Defines the scalp compartment conductivity. ' + 'The default value is 0.3 S/m.', + default=None, + type='float') + parser.add_option('--homog', + dest='homog', + help='Use a single compartment model (brain only) ' + 'instead a three layer one (scalp, skull, and ' + ' brain). If this flag is specified, the options ' + '--skullc and --scalpc are irrelevant.', + default=None, action="store_true") + parser.add_option('-d', '--subjects-dir', + dest='subjects_dir', + help='Subjects directory', + default=None) + _add_verbose_flag(parser) + options, args = parser.parse_args() + + if options.subject is None: + parser.print_help() + sys.exit(1) + + subject = options.subject + fname = options.model + subjects_dir = options.subjects_dir + ico = options.ico + brainc = options.brainc + skullc = options.skullc + scalpc = options.scalpc + homog = True if options.homog is not None else False + verbose = True if options.verbose is not None else False + # Parse conductivity option + if homog is True: + if skullc is not None: + warn('Trying to set the skull conductivity for a single layer ' + 'model. To use a 3 layer model, do not set the --homog flag.') + if scalpc is not None: + warn('Trying to set the scalp conductivity for a single layer ' + 'model. To use a 3 layer model, do not set the --homog flag.') + # Single layer + conductivity = [brainc] + else: + if skullc is None: + skullc = 0.006 + if scalpc is None: + scalpc = 0.3 + conductivity = [brainc, skullc, scalpc] + # Create source space + bem_model = mne.make_bem_model(subject, + ico=ico, + conductivity=conductivity, + subjects_dir=subjects_dir, + verbose=verbose) + # Generate filename + if fname is None: + n_faces = list(str(len(surface['tris'])) for surface in bem_model) + fname = subject + '-' + '-'.join(n_faces) + '-bem.fif' + else: + if not (fname.endswith('-bem.fif') or fname.endswith('_bem.fif')): + fname = fname + "-bem.fif" + # Save to subject's directory + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + fname = os.path.join(subjects_dir, subject, "bem", fname) + # Save source space to file + mne.write_bem_surfaces(fname, bem_model) + # Compute the solution + sol_fname = os.path.splitext(fname)[0] + '-sol.fif' + bem_sol = mne.make_bem_solution(bem_model, verbose=verbose) + mne.write_bem_solution(sol_fname, bem_sol) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_setup_source_space.py b/python/libs/mne/commands/mne_setup_source_space.py new file mode 100644 index 0000000..e8b14b7 --- /dev/null +++ b/python/libs/mne/commands/mne_setup_source_space.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python +"""Set up bilateral hemisphere surface-based source space with subsampling. + +Examples +-------- +.. code-block:: console + + $ mne setup_source_space --subject sample + + + .. note : Only one of --ico, --oct or --spacing options can be set at the same + time. Default to oct6. + +""" + +import sys + +import mne +from mne.utils import _check_option + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser, _add_verbose_flag + parser = get_optparser(__file__) + + parser.add_option('-s', '--subject', + dest='subject', + help='Subject name (required)', + default=None) + parser.add_option('--src', dest='fname', + help='Output file name. Use a name /-src.fif', + metavar='FILE', default=None) + parser.add_option('--morph', + dest='subject_to', + help='morph the source space to this subject', + default=None) + parser.add_option('--surf', + dest='surface', + help='The surface to use. (default to white)', + default='white', + type='string') + parser.add_option('--spacing', + dest='spacing', + help='Specifies the approximate grid spacing of the ' + 'source space in mm. (default to 7mm)', + default=None, + type='int') + parser.add_option('--ico', + dest='ico', + help='use the recursively subdivided icosahedron ' + 'to create the source space.', + default=None, + type='int') + parser.add_option('--oct', + dest='oct', + help='use the recursively subdivided octahedron ' + 'to create the source space.', + default=None, + type='int') + parser.add_option('-d', '--subjects-dir', + dest='subjects_dir', + help='Subjects directory', + default=None) + parser.add_option('-n', '--n-jobs', + dest='n_jobs', + help='The number of jobs to run in parallel ' + '(default 1). Requires the joblib package. ' + 'Will use at most 2 jobs' + ' (one for each hemisphere).', + default=1, + type='int') + parser.add_option('--add-dist', + dest='add_dist', + help='Add distances. Can be "True", "False", or "patch" ' + 'to only compute cortical patch statistics (like the ' + '--cps option in MNE-C; requires SciPy >= 1.3)', + default='True') + parser.add_option('-o', '--overwrite', + dest='overwrite', + help='to write over existing files', + default=None, action="store_true") + _add_verbose_flag(parser) + + options, args = parser.parse_args() + + if options.subject is None: + parser.print_help() + sys.exit(1) + + subject = options.subject + subject_to = options.subject_to + fname = options.fname + subjects_dir = options.subjects_dir + spacing = options.spacing + ico = options.ico + oct = options.oct + surface = options.surface + n_jobs = options.n_jobs + add_dist = options.add_dist + _check_option('add_dist', add_dist, ('True', 'False', 'patch')) + add_dist = {'True': True, 'False': False, 'patch': 'patch'}[add_dist] + verbose = True if options.verbose is not None else False + overwrite = True if options.overwrite is not None else False + + # Parse source spacing option + spacing_options = [ico, oct, spacing] + n_options = len([x for x in spacing_options if x is not None]) + if n_options > 1: + raise ValueError('Only one spacing option can be set at the same time') + elif n_options == 0: + # Default to oct6 + use_spacing = 'oct6' + elif n_options == 1: + if ico is not None: + use_spacing = "ico" + str(ico) + elif oct is not None: + use_spacing = "oct" + str(oct) + elif spacing is not None: + use_spacing = spacing + # Generate filename + if fname is None: + if subject_to is None: + fname = subject + '-' + str(use_spacing) + '-src.fif' + else: + fname = (subject_to + '-' + subject + '-' + + str(use_spacing) + '-src.fif') + else: + if not (fname.endswith('_src.fif') or fname.endswith('-src.fif')): + fname = fname + "-src.fif" + # Create source space + src = mne.setup_source_space(subject=subject, spacing=use_spacing, + surface=surface, subjects_dir=subjects_dir, + n_jobs=n_jobs, add_dist=add_dist, + verbose=verbose) + # Morph source space if --morph is set + if subject_to is not None: + src = mne.morph_source_spaces(src, subject_to=subject_to, + subjects_dir=subjects_dir, + surf=surface, verbose=verbose) + + # Save source space to file + src.save(fname=fname, overwrite=overwrite) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_show_fiff.py b/python/libs/mne/commands/mne_show_fiff.py new file mode 100644 index 0000000..be31cde --- /dev/null +++ b/python/libs/mne/commands/mne_show_fiff.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python +"""Show the contents of a FIFF file. + +Examples +-------- +.. code-block:: console + + $ mne show_fiff test_raw.fif + + +To see only tag 102: + +.. code-block:: console + + $ mne show_fiff test_raw.fif --tag=102 + +""" + +# Authors : Eric Larson, PhD + +import sys +import mne + + +def run(): + """Run command.""" + parser = mne.commands.utils.get_optparser( + __file__, usage='mne show_fiff ') + parser.add_option("-t", "--tag", dest="tag", + help="provide information about this tag", metavar="TAG") + options, args = parser.parse_args() + if len(args) != 1: + parser.print_help() + sys.exit(1) + msg = mne.io.show_fiff(args[0], tag=options.tag).strip() + print(msg) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_show_info.py b/python/libs/mne/commands/mne_show_info.py new file mode 100644 index 0000000..44e1fa7 --- /dev/null +++ b/python/libs/mne/commands/mne_show_info.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +"""Show measurement info from .fif file. + +Examples +-------- +.. code-block:: console + + $ mne show_info sample_audvis_raw.fif + +""" + +# Authors : Alexandre Gramfort, Ph.D. + +import sys +import mne + + +def run(): + """Run command.""" + parser = mne.commands.utils.get_optparser( + __file__, usage='mne show_info ') + options, args = parser.parse_args() + if len(args) != 1: + parser.print_help() + sys.exit(1) + + fname = args[0] + + if not fname.endswith('.fif'): + raise ValueError('%s does not seem to be a .fif file.' % fname) + + info = mne.io.read_info(fname) + print("File : %s" % fname) + print(info) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_surf2bem.py b/python/libs/mne/commands/mne_surf2bem.py new file mode 100644 index 0000000..4cb5ade --- /dev/null +++ b/python/libs/mne/commands/mne_surf2bem.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +r"""Convert surface to BEM FIF file. + +Examples +-------- +.. code-block:: console + + $ mne surf2bem --surf ${SUBJECTS_DIR}/${SUBJECT}/surf/lh.seghead \ + --fif ${SUBJECTS_DIR}/${SUBJECT}/bem/${SUBJECT}-head.fif \ + --id=4 + +""" +# Authors: Alexandre Gramfort +# +# License: BSD-3-Clause + +import sys + +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option("-s", "--surf", dest="surf", + help="Surface in Freesurfer format", metavar="FILE") + parser.add_option("-f", "--fif", dest="fif", + help="FIF file produced", metavar="FILE") + parser.add_option("-i", "--id", dest="id", default=4, + help=("Surface Id (e.g. 4 for head surface)")) + + options, args = parser.parse_args() + + if options.surf is None: + parser.print_help() + sys.exit(1) + + print("Converting %s to BEM FIF file." % options.surf) + surf = mne.bem._surfaces_to_bem([options.surf], [int(options.id)], + sigmas=[1]) + mne.write_bem_surfaces(options.fif, surf) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_sys_info.py b/python/libs/mne/commands/mne_sys_info.py new file mode 100644 index 0000000..5827e41 --- /dev/null +++ b/python/libs/mne/commands/mne_sys_info.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +"""Show system information. + +Examples +-------- +.. code-block:: console + + $ mne sys_info + +""" + +# Authors : Eric Larson + +import sys +import mne + + +def run(): + """Run command.""" + parser = mne.commands.utils.get_optparser(__file__, usage='mne sys_info') + parser.add_option('-p', '--show-paths', dest='show_paths', + help='Show module paths', action='store_true') + parser.add_option('-d', '--developer', dest='developer', + help='Show additional developer module information', + action='store_true') + options, args = parser.parse_args() + dependencies = 'developer' if options.developer else 'user' + if len(args) != 0: + parser.print_help() + sys.exit(1) + + mne.sys_info(show_paths=options.show_paths, dependencies=dependencies) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_watershed_bem.py b/python/libs/mne/commands/mne_watershed_bem.py new file mode 100644 index 0000000..b69a280 --- /dev/null +++ b/python/libs/mne/commands/mne_watershed_bem.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# Authors: Lorenzo De Santis +"""Create BEM surfaces using the watershed algorithm included with FreeSurfer. + +Examples +-------- +.. code-block:: console + + $ mne watershed_bem -s sample + +""" + +import sys + +import mne +from mne.bem import make_watershed_bem +from mne.utils import _check_option + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser, _add_verbose_flag + + parser = get_optparser(__file__) + + parser.add_option("-s", "--subject", dest="subject", + help="Subject name (required)", default=None) + parser.add_option("-d", "--subjects-dir", dest="subjects_dir", + help="Subjects directory", default=None) + parser.add_option("-o", "--overwrite", dest="overwrite", + help="Write over existing files", action="store_true") + parser.add_option("-v", "--volume", dest="volume", + help="Defaults to T1", default='T1') + parser.add_option("-a", "--atlas", dest="atlas", + help="Specify the --atlas option for mri_watershed", + default=False, action="store_true") + parser.add_option("-g", "--gcaatlas", dest="gcaatlas", + help="Specify the --brain_atlas option for " + "mri_watershed", default=False, action="store_true") + parser.add_option("-p", "--preflood", dest="preflood", + help="Change the preflood height", default=None) + parser.add_option("--copy", dest="copy", + help="Use copies instead of symlinks for surfaces", + action="store_true") + parser.add_option("-t", "--T1", dest="T1", + help="Whether or not to pass the -T1 flag " + "(can be true, false, 0, or 1). " + "By default it takes the same value as gcaatlas.", + default=None) + parser.add_option("-b", "--brainmask", dest="brainmask", + help="The filename for the brainmask output file " + "relative to the " + "$SUBJECTS_DIR/$SUBJECT/bem/watershed/ directory.", + default="ws") + _add_verbose_flag(parser) + + options, args = parser.parse_args() + + if options.subject is None: + parser.print_help() + sys.exit(1) + + subject = options.subject + subjects_dir = options.subjects_dir + overwrite = options.overwrite + volume = options.volume + atlas = options.atlas + gcaatlas = options.gcaatlas + preflood = options.preflood + copy = options.copy + brainmask = options.brainmask + T1 = options.T1 + if T1 is not None: + T1 = T1.lower() + _check_option("--T1", T1, ('true', 'false', '0', '1')) + T1 = T1 in ('true', '1') + verbose = options.verbose + + make_watershed_bem(subject=subject, subjects_dir=subjects_dir, + overwrite=overwrite, volume=volume, atlas=atlas, + gcaatlas=gcaatlas, preflood=preflood, copy=copy, + T1=T1, brainmask=brainmask, verbose=verbose) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/mne_what.py b/python/libs/mne/commands/mne_what.py new file mode 100644 index 0000000..5d281fa --- /dev/null +++ b/python/libs/mne/commands/mne_what.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python +r"""Check type of FIF file. + +Examples +-------- +.. code-block:: console + + $ mne what sample_audvis_raw.fif + raw +""" + +# Authors : Eric Larson, PhD + +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + parser = get_optparser(__file__, usage='usage: %prog fname [fname2 ...]') + options, args = parser.parse_args() + for arg in args: + print(mne.what(arg)) + + +mne.utils.run_command_if_main() diff --git a/python/libs/mne/commands/tests/__init__.py b/python/libs/mne/commands/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/commands/tests/test_commands.py b/python/libs/mne/commands/tests/test_commands.py new file mode 100644 index 0000000..f742732 --- /dev/null +++ b/python/libs/mne/commands/tests/test_commands.py @@ -0,0 +1,393 @@ +# -*- coding: utf-8 -*- +import glob +import os +from os import path as op +import shutil + +import numpy as np +import pytest +from numpy.testing import assert_equal, assert_allclose + +from mne import (concatenate_raws, read_bem_surfaces, read_surface, + read_source_spaces, read_bem_solution) +from mne.bem import ConductorModel +from mne.commands import (mne_browse_raw, mne_bti2fiff, mne_clean_eog_ecg, + mne_compute_proj_ecg, mne_compute_proj_eog, + mne_coreg, mne_kit2fiff, + mne_make_scalp_surfaces, mne_maxfilter, + mne_report, mne_surf2bem, mne_watershed_bem, + mne_compare_fiff, mne_flash_bem, mne_show_fiff, + mne_show_info, mne_what, mne_setup_source_space, + mne_setup_forward_model, mne_anonymize, + mne_prepare_bem_model, mne_sys_info) +from mne.datasets import testing +from mne.io import read_raw_fif, read_info +from mne.utils import (requires_mne, requires_vtk, requires_freesurfer, + requires_nibabel, ArgvSetter, + _stamp_to_dt, _record_warnings) + +base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') +raw_fname = op.join(base_dir, 'test_raw.fif') + +testing_path = testing.data_path(download=False) +subjects_dir = op.join(testing_path, 'subjects') +bem_model_fname = op.join(testing_path, 'subjects', + 'sample', 'bem', 'sample-320-320-320-bem.fif') + + +def check_usage(module, force_help=False): + """Ensure we print usage.""" + args = ('--help',) if force_help else () + with ArgvSetter(args) as out: + try: + module.run() + except SystemExit: + pass + assert 'Usage: ' in out.stdout.getvalue() + + +@pytest.mark.slowtest +def test_browse_raw(): + """Test mne browse_raw.""" + check_usage(mne_browse_raw) + with ArgvSetter(('--raw', raw_fname)): + with _record_warnings(): # mpl show warning + mne_browse_raw.run() + + +def test_what(): + """Test mne browse_raw.""" + check_usage(mne_browse_raw) + with ArgvSetter((raw_fname,)) as out: + mne_what.run() + assert 'raw' == out.stdout.getvalue().strip() + + +def test_bti2fiff(): + """Test mne bti2fiff.""" + check_usage(mne_bti2fiff) + + +def test_compare_fiff(): + """Test mne compare_fiff.""" + check_usage(mne_compare_fiff) + + +def test_show_fiff(): + """Test mne compare_fiff.""" + check_usage(mne_show_fiff) + with ArgvSetter((raw_fname,)): + mne_show_fiff.run() + with ArgvSetter((raw_fname, '--tag=102')): + mne_show_fiff.run() + + +@requires_mne +def test_clean_eog_ecg(tmp_path): + """Test mne clean_eog_ecg.""" + check_usage(mne_clean_eog_ecg) + tempdir = str(tmp_path) + raw = concatenate_raws([read_raw_fif(f) + for f in [raw_fname, raw_fname, raw_fname]]) + raw.info['bads'] = ['MEG 2443'] + use_fname = op.join(tempdir, op.basename(raw_fname)) + raw.save(use_fname) + with ArgvSetter(('-i', use_fname, '--quiet')): + mne_clean_eog_ecg.run() + for key, count in (('proj', 2), ('-eve', 3)): + fnames = glob.glob(op.join(tempdir, '*%s.fif' % key)) + assert len(fnames) == count + + +@pytest.mark.slowtest +@pytest.mark.parametrize('fun', (mne_compute_proj_ecg, mne_compute_proj_eog)) +def test_compute_proj_exg(tmp_path, fun): + """Test mne compute_proj_ecg/eog.""" + check_usage(fun) + tempdir = str(tmp_path) + use_fname = op.join(tempdir, op.basename(raw_fname)) + bad_fname = op.join(tempdir, 'bads.txt') + with open(bad_fname, 'w') as fid: + fid.write('MEG 2443\n') + shutil.copyfile(raw_fname, use_fname) + with ArgvSetter(('-i', use_fname, '--bad=' + bad_fname, + '--rej-eeg', '150')): + with _record_warnings(): # samples, sometimes + fun.run() + fnames = glob.glob(op.join(tempdir, '*proj.fif')) + assert len(fnames) == 1 + fnames = glob.glob(op.join(tempdir, '*-eve.fif')) + assert len(fnames) == 1 + + +def test_coreg(): + """Test mne coreg.""" + assert hasattr(mne_coreg, 'run') + + +def test_kit2fiff(): + """Test mne kit2fiff.""" + # Can't check + check_usage(mne_kit2fiff, force_help=True) + + +@pytest.mark.slowtest +@pytest.mark.ultraslowtest +@requires_vtk +@testing.requires_testing_data +def test_make_scalp_surfaces(tmp_path, monkeypatch): + """Test mne make_scalp_surfaces.""" + check_usage(mne_make_scalp_surfaces) + has = 'SUBJECTS_DIR' in os.environ + # Copy necessary files to avoid FreeSurfer call + tempdir = str(tmp_path) + surf_path = op.join(subjects_dir, 'sample', 'surf') + surf_path_new = op.join(tempdir, 'sample', 'surf') + os.mkdir(op.join(tempdir, 'sample')) + os.mkdir(surf_path_new) + subj_dir = op.join(tempdir, 'sample', 'bem') + os.mkdir(subj_dir) + shutil.copy(op.join(surf_path, 'lh.seghead'), surf_path_new) + + cmd = ('-s', 'sample', '--subjects-dir', tempdir) + monkeypatch.setenv('_MNE_TESTING_SCALP', 'true') + dense_fname = op.join(subj_dir, 'sample-head-dense.fif') + medium_fname = op.join(subj_dir, 'sample-head-medium.fif') + with ArgvSetter(cmd, disable_stdout=False, disable_stderr=False): + monkeypatch.delenv('FREESURFER_HOME', None) + with pytest.raises(RuntimeError, match='The FreeSurfer environ'): + mne_make_scalp_surfaces.run() + monkeypatch.setenv('FREESURFER_HOME', tempdir) + mne_make_scalp_surfaces.run() + assert op.isfile(dense_fname) + assert op.isfile(medium_fname) + with pytest.raises(IOError, match='overwrite'): + mne_make_scalp_surfaces.run() + # actually check the outputs + head_py = read_bem_surfaces(dense_fname) + assert_equal(len(head_py), 1) + head_py = head_py[0] + head_c = read_bem_surfaces(op.join(subjects_dir, 'sample', 'bem', + 'sample-head-dense.fif'))[0] + assert_allclose(head_py['rr'], head_c['rr']) + if not has: + assert 'SUBJECTS_DIR' not in os.environ + + +def test_maxfilter(): + """Test mne maxfilter.""" + check_usage(mne_maxfilter) + with ArgvSetter(('-i', raw_fname, '--st', '--movecomp', '--linefreq', '60', + '--trans', raw_fname)) as out: + with pytest.warns(RuntimeWarning, match="Don't use"): + os.environ['_MNE_MAXFILTER_TEST'] = 'true' + try: + mne_maxfilter.run() + finally: + del os.environ['_MNE_MAXFILTER_TEST'] + out = out.stdout.getvalue() + for check in ('maxfilter', '-trans', '-movecomp'): + assert check in out, check + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_report(tmp_path): + """Test mne report.""" + check_usage(mne_report) + tempdir = str(tmp_path) + use_fname = op.join(tempdir, op.basename(raw_fname)) + shutil.copyfile(raw_fname, use_fname) + with ArgvSetter(('-p', tempdir, '-i', use_fname, '-d', subjects_dir, + '-s', 'sample', '--no-browser', '-m', '30')): + with _record_warnings(): # contour levels + mne_report.run() + fnames = glob.glob(op.join(tempdir, '*.html')) + assert len(fnames) == 1 + + +def test_surf2bem(): + """Test mne surf2bem.""" + check_usage(mne_surf2bem) + + +@pytest.mark.timeout(900) # took ~400 sec on a local test +@pytest.mark.slowtest +@pytest.mark.ultraslowtest +@requires_nibabel() +@requires_freesurfer('mri_watershed') +@testing.requires_testing_data +def test_watershed_bem(tmp_path): + """Test mne watershed bem.""" + check_usage(mne_watershed_bem) + # from T1.mgz + Mdc = np.array([[-1, 0, 0], [0, 0, -1], [0, 1, 0]]) + Pxyz_c = np.array([-5.273613, 9.039085, -27.287964]) + # Copy necessary files to tempdir + tempdir = str(tmp_path) + mridata_path = op.join(subjects_dir, 'sample', 'mri') + subject_path_new = op.join(tempdir, 'sample') + mridata_path_new = op.join(subject_path_new, 'mri') + os.makedirs(mridata_path_new) + new_fname = op.join(mridata_path_new, 'T1.mgz') + shutil.copyfile(op.join(mridata_path, 'T1.mgz'), new_fname) + old_mode = os.stat(new_fname).st_mode + os.chmod(new_fname, 0) + args = ('-d', tempdir, '-s', 'sample', '-o') + with pytest.raises(PermissionError, match=r'read permissions.*T1\.mgz'): + with ArgvSetter(args): + mne_watershed_bem.run() + os.chmod(new_fname, old_mode) + for s in ('outer_skin', 'outer_skull', 'inner_skull'): + assert not op.isfile(op.join(subject_path_new, 'bem', '%s.surf' % s)) + with ArgvSetter(args): + mne_watershed_bem.run() + + kwargs = dict(rtol=1e-5, atol=1e-5) + for s in ('outer_skin', 'outer_skull', 'inner_skull'): + rr, tris, vol_info = read_surface(op.join(subject_path_new, 'bem', + '%s.surf' % s), + read_metadata=True) + assert_equal(len(tris), 20480) + assert_equal(tris.min(), 0) + assert_equal(rr.shape[0], tris.max() + 1) + # compare the volume info to the mgz header + assert_allclose(vol_info['xras'], Mdc[0], **kwargs) + assert_allclose(vol_info['yras'], Mdc[1], **kwargs) + assert_allclose(vol_info['zras'], Mdc[2], **kwargs) + assert_allclose(vol_info['cras'], Pxyz_c, **kwargs) + + +@pytest.mark.timeout(120) # took ~70 sec locally +@pytest.mark.slowtest +@pytest.mark.ultraslowtest +@requires_freesurfer +@testing.requires_testing_data +def test_flash_bem(tmp_path): + """Test mne flash_bem.""" + check_usage(mne_flash_bem, force_help=True) + # Copy necessary files to tempdir + tempdir = str(tmp_path) + mridata_path = op.join(subjects_dir, 'sample', 'mri') + subject_path_new = op.join(tempdir, 'sample') + mridata_path_new = op.join(subject_path_new, 'mri') + os.makedirs(op.join(mridata_path_new, 'flash')) + os.makedirs(op.join(subject_path_new, 'bem')) + shutil.copyfile(op.join(mridata_path, 'T1.mgz'), + op.join(mridata_path_new, 'T1.mgz')) + shutil.copyfile(op.join(mridata_path, 'brain.mgz'), + op.join(mridata_path_new, 'brain.mgz')) + # Copy the available mri/flash/mef*.mgz files from the dataset + flash_path = op.join(mridata_path_new, 'flash') + for kind in (5, 30): + in_fname = op.join(mridata_path, 'flash', 'mef%02d.mgz' % kind) + shutil.copyfile(in_fname, op.join(flash_path, op.basename(in_fname))) + # Test mne flash_bem with --noconvert option + # (since there are no DICOM Flash images in dataset) + for s in ('outer_skin', 'outer_skull', 'inner_skull'): + assert not op.isfile(op.join(subject_path_new, 'bem', '%s.surf' % s)) + with ArgvSetter(('-d', tempdir, '-s', 'sample', '-n'), + disable_stdout=False, disable_stderr=False): + mne_flash_bem.run() + + kwargs = dict(rtol=1e-5, atol=1e-5) + for s in ('outer_skin', 'outer_skull', 'inner_skull'): + rr, tris = read_surface(op.join(subject_path_new, 'bem', + '%s.surf' % s)) + assert_equal(len(tris), 5120) + assert_equal(tris.min(), 0) + assert_equal(rr.shape[0], tris.max() + 1) + # compare to the testing flash surfaces + rr_c, tris_c = read_surface(op.join(subjects_dir, 'sample', 'bem', + '%s.surf' % s)) + assert_allclose(rr, rr_c, **kwargs) + assert_allclose(tris, tris_c, **kwargs) + + +@testing.requires_testing_data +def test_setup_source_space(tmp_path): + """Test mne setup_source_space.""" + check_usage(mne_setup_source_space, force_help=True) + # Using the sample dataset + use_fname = op.join(tmp_path, "sources-src.fif") + # Test command + with ArgvSetter(('--src', use_fname, '-d', subjects_dir, + '-s', 'sample', '--morph', 'sample', + '--add-dist', 'False', '--ico', '3', '--verbose')): + mne_setup_source_space.run() + src = read_source_spaces(use_fname) + assert len(src) == 2 + with pytest.raises(Exception): + with ArgvSetter(('--src', use_fname, '-d', subjects_dir, + '-s', 'sample', '--ico', '3', '--oct', '3')): + assert mne_setup_source_space.run() + with pytest.raises(Exception): + with ArgvSetter(('--src', use_fname, '-d', subjects_dir, + '-s', 'sample', '--ico', '3', '--spacing', '10')): + assert mne_setup_source_space.run() + with pytest.raises(Exception): + with ArgvSetter(('--src', use_fname, '-d', subjects_dir, + '-s', 'sample', '--ico', '3', '--spacing', '10', + '--oct', '3')): + assert mne_setup_source_space.run() + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_setup_forward_model(tmp_path): + """Test mne setup_forward_model.""" + check_usage(mne_setup_forward_model, force_help=True) + # Using the sample dataset + use_fname = op.join(tmp_path, "model-bem.fif") + # Test command + with ArgvSetter(('--model', use_fname, '-d', subjects_dir, '--homog', + '-s', 'sample', '--ico', '3', '--verbose')): + mne_setup_forward_model.run() + model = read_bem_surfaces(use_fname) + assert len(model) == 1 + sol_fname = op.splitext(use_fname)[0] + '-sol.fif' + read_bem_solution(sol_fname) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_mne_prepare_bem_model(tmp_path): + """Test mne setup_source_space.""" + check_usage(mne_prepare_bem_model, force_help=True) + # Using the sample dataset + bem_solution_fname = op.join(tmp_path, "bem_solution-bem-sol.fif") + # Test command + with ArgvSetter(('--bem', bem_model_fname, '--sol', bem_solution_fname, + '--verbose')): + mne_prepare_bem_model.run() + bem_solution = read_bem_solution(bem_solution_fname) + assert isinstance(bem_solution, ConductorModel) + + +def test_show_info(): + """Test mne show_info.""" + check_usage(mne_show_info) + with ArgvSetter((raw_fname,)): + mne_show_info.run() + + +def test_sys_info(): + """Test mne show_info.""" + check_usage(mne_sys_info, force_help=True) + with ArgvSetter((raw_fname,)): + with pytest.raises(SystemExit, match='1'): + mne_sys_info.run() + with ArgvSetter() as out: + mne_sys_info.run() + assert 'numpy' in out.stdout.getvalue() + + +def test_anonymize(tmp_path): + """Test mne anonymize.""" + check_usage(mne_anonymize) + out_fname = op.join(tmp_path, 'anon_test_raw.fif') + with ArgvSetter(('-f', raw_fname, '-o', out_fname)): + mne_anonymize.run() + info = read_info(out_fname) + assert(op.exists(out_fname)) + assert info['meas_date'] == _stamp_to_dt((946684800, 0)) diff --git a/python/libs/mne/commands/utils.py b/python/libs/mne/commands/utils.py new file mode 100644 index 0000000..415f513 --- /dev/null +++ b/python/libs/mne/commands/utils.py @@ -0,0 +1,107 @@ +"""Some utility functions for commands (e.g., for cmdline handling).""" + +# Authors: Yaroslav Halchenko +# Stefan Appelhoff +# +# License: BSD-3-Clause + +import glob +import importlib +import os +import os.path as op +from optparse import OptionParser +import sys + +import mne + + +def _add_verbose_flag(parser): + parser.add_option("--verbose", dest='verbose', + help="Enable verbose mode (printing of log messages).", + default=None, action="store_true") + + +def load_module(name, path): + """Load module from .py/.pyc file. + + Parameters + ---------- + name : str + Name of the module. + path : str + Path to .py/.pyc file. + + Returns + ------- + mod : module + Imported module. + + """ + from importlib.util import spec_from_file_location, module_from_spec + spec = spec_from_file_location(name, path) + mod = module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + +def get_optparser(cmdpath, usage=None, prog_prefix='mne', version=None): + """Create OptionParser with cmd specific settings (e.g., prog value).""" + # Fetch description + mod = load_module('__temp', cmdpath) + if mod.__doc__: + doc, description, epilog = mod.__doc__, None, None + + doc_lines = doc.split('\n') + description = doc_lines[0] + if len(doc_lines) > 1: + epilog = '\n'.join(doc_lines[1:]) + + # Get the name of the command + command = os.path.basename(cmdpath) + command, _ = os.path.splitext(command) + command = command[len(prog_prefix) + 1:] # +1 is for `_` character + + # Set prog + prog = prog_prefix + ' {}'.format(command) + + # Set version + if version is None: + version = mne.__version__ + + # monkey patch OptionParser to not wrap epilog + OptionParser.format_epilog = lambda self, formatter: self.epilog + parser = OptionParser(prog=prog, + version=version, + description=description, + epilog=epilog, usage=usage) + + return parser + + +def main(): + """Entrypoint for mne usage.""" + mne_bin_dir = op.dirname(op.dirname(__file__)) + valid_commands = sorted(glob.glob(op.join(mne_bin_dir, + 'commands', 'mne_*.py'))) + valid_commands = [c.split(op.sep)[-1][4:-3] for c in valid_commands] + + def print_help(): # noqa + print("Usage : mne command options\n") + print("Accepted commands :\n") + for c in valid_commands: + print("\t- %s" % c) + print("\nExample : mne browse_raw --raw sample_audvis_raw.fif") + print("\nGetting help example : mne compute_proj_eog -h") + + if len(sys.argv) == 1 or "help" in sys.argv[1] or "-h" in sys.argv[1]: + print_help() + elif sys.argv[1] == "--version": + print("MNE %s" % mne.__version__) + elif sys.argv[1] not in valid_commands: + print('Invalid command: "%s"\n' % sys.argv[1]) + print_help() + else: + cmd = sys.argv[1] + cmd = importlib.import_module('.mne_%s' % (cmd,), 'mne.commands') + sys.argv = sys.argv[1:] + cmd.run() diff --git a/python/libs/mne/conftest.py b/python/libs/mne/conftest.py new file mode 100644 index 0000000..5b8e23a --- /dev/null +++ b/python/libs/mne/conftest.py @@ -0,0 +1,898 @@ +# -*- coding: utf-8 -*- +# Author: Eric Larson +# +# License: BSD-3-Clause + +from contextlib import contextmanager +import inspect +from textwrap import dedent +import gc +import os +import os.path as op +from pathlib import Path +import shutil +import sys +import warnings +import pytest +from unittest import mock + +import numpy as np + +import mne +from mne import read_events, pick_types, Epochs +from mne.channels import read_layout +from mne.datasets import testing +from mne.fixes import has_numba, _compare_version +from mne.io import read_raw_fif, read_raw_ctf +from mne.stats import cluster_level +from mne.utils import (_pl, _assert_no_instances, numerics, Bunch, + _check_pyqt5_version, _TempDir) + +# data from sample dataset +from mne.viz._figure import use_browser_backend + +test_path = testing.data_path(download=False) +s_path = op.join(test_path, 'MEG', 'sample') +fname_evoked = op.join(s_path, 'sample_audvis_trunc-ave.fif') +fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif') +fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') +fname_fwd_full = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif') +bem_path = op.join(test_path, 'subjects', 'sample', 'bem') +fname_bem = op.join(bem_path, 'sample-1280-bem.fif') +fname_aseg = op.join(test_path, 'subjects', 'sample', 'mri', 'aseg.mgz') +subjects_dir = op.join(test_path, 'subjects') +fname_src = op.join(bem_path, 'sample-oct-4-src.fif') +fname_trans = op.join(s_path, 'sample_audvis_trunc-trans.fif') + +ctf_dir = op.join(test_path, 'CTF') +fname_ctf_continuous = op.join(ctf_dir, 'testdata_ctf.ds') + +# data from mne.io.tests.data +base_dir = op.join(op.dirname(__file__), 'io', 'tests', 'data') +fname_raw_io = op.join(base_dir, 'test_raw.fif') +fname_event_io = op.join(base_dir, 'test-eve.fif') +fname_cov_io = op.join(base_dir, 'test-cov.fif') +fname_evoked_io = op.join(base_dir, 'test-ave.fif') +event_id, tmin, tmax = 1, -0.1, 1.0 +vv_layout = read_layout('Vectorview-all') + +collect_ignore = ['export/_eeglab.py', 'export/_edf.py'] + + +def pytest_configure(config): + """Configure pytest options.""" + # Markers + for marker in ('slowtest', 'ultraslowtest', 'pgtest'): + config.addinivalue_line('markers', marker) + + # Fixtures + for fixture in ('matplotlib_config', 'close_all', 'check_verbose', + 'qt_config', 'protect_config'): + config.addinivalue_line('usefixtures', fixture) + + # Warnings + # - Once SciPy updates not to have non-integer and non-tuple errors (1.2.0) + # we should remove them from here. + # - This list should also be considered alongside reset_warnings in + # doc/conf.py. + warning_lines = r""" + error:: + ignore:.*deprecated and ignored since IPython.*:DeprecationWarning + ignore::ImportWarning + ignore:the matrix subclass:PendingDeprecationWarning + ignore:numpy.dtype size changed:RuntimeWarning + ignore:.*takes no parameters:DeprecationWarning + ignore:joblib not installed:RuntimeWarning + ignore:Using a non-tuple sequence for multidimensional indexing:FutureWarning + ignore:using a non-integer number instead of an integer will result in an error:DeprecationWarning + ignore:Importing from numpy.testing.decorators is deprecated:DeprecationWarning + ignore:np.loads is deprecated, use pickle.loads instead:DeprecationWarning + ignore:The oldnumeric module will be dropped:DeprecationWarning + ignore:Collection picker None could not be converted to float:UserWarning + ignore:covariance is not positive-semidefinite:RuntimeWarning + ignore:Can only plot ICA components:RuntimeWarning + ignore:Matplotlib is building the font cache using fc-list:UserWarning + ignore:Using or importing the ABCs from 'collections':DeprecationWarning + ignore:`formatargspec` is deprecated:DeprecationWarning + # This is only necessary until sklearn updates their wheels for NumPy 1.16 + ignore:numpy.ufunc size changed:RuntimeWarning + ignore:.*mne-realtime.*:DeprecationWarning + ignore:.*imp.*:DeprecationWarning + ignore:Exception creating Regex for oneOf.*:SyntaxWarning + ignore:scipy\.gradient is deprecated.*:DeprecationWarning + ignore:The sklearn.*module.*deprecated.*:FutureWarning + ignore:.*rich_compare.*metadata.*deprecated.*:DeprecationWarning + ignore:.*In future, it will be an error for 'np.bool_'.*:DeprecationWarning + ignore:.*`np.bool` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.int` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.float` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.object` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.long` is a deprecated alias:DeprecationWarning + ignore:.*Converting `np\.character` to a dtype is deprecated.*:DeprecationWarning + ignore:.*sphinx\.util\.smartypants is deprecated.*: + ignore:.*pandas\.util\.testing is deprecated.*: + ignore:.*tostring.*is deprecated.*:DeprecationWarning + ignore:.*QDesktopWidget\.availableGeometry.*:DeprecationWarning + ignore:Unable to enable faulthandler.*:UserWarning + ignore:Fetchers from the nilearn.*:FutureWarning + ignore:SelectableGroups dict interface is deprecated\. Use select\.:DeprecationWarning + always:.*get_data.* is deprecated in favor of.*:DeprecationWarning + ignore:.*rcParams is deprecated.*global_theme.*:DeprecationWarning + ignore:.*distutils\.sysconfig module is deprecated.*:DeprecationWarning + ignore:.*numpy\.dual is deprecated.*:DeprecationWarning + ignore:.*`np.typeDict` is a deprecated.*:DeprecationWarning + ignore:.*Creating an ndarray from ragged.*:numpy.VisibleDeprecationWarning + ignore:^Please use.*scipy\..*:DeprecationWarning + ignore:.*Passing a schema to Validator.*:DeprecationWarning + ignore:.*Found the following unknown channel type.*:RuntimeWarning + ignore:.*np\.MachAr.*:DeprecationWarning + ignore:.*Passing unrecognized arguments to super.*:DeprecationWarning + ignore:.*numpy.ndarray size changed.*: + ignore:.*There is no current event loop.*:DeprecationWarning + # present in nilearn v 0.8.1, fixed in nilearn main + ignore:.*distutils Version classes are deprecated.*:DeprecationWarning + ignore:.*pandas\.Int64Index is deprecated.*:FutureWarning + always::ResourceWarning + # Jupyter notebook stuff + ignore:.*unclosed context =', '5.12'): + pytest.skip(f'PyQt5 has version {_check_pyqt5_version()}' + f'but pyqtgraph needs >= 5.12!') + # Check mne-qt-browser + try: + import mne_qt_browser # noqa: F401 + # Check mne-qt-browser version + lower_2_0 = _compare_version(mne_qt_browser.__version__, '<', '0.2.0') + m_name = request.function.__module__ + f_name = request.function.__name__ + if lower_2_0 and m_name in pre_2_0_skip_modules: + pytest.skip(f'Test-Module "{m_name}" was skipped for' + f' mne-qt-browser < 0.2.0') + elif lower_2_0 and f_name in pre_2_0_skip_funcs: + pytest.skip(f'Test "{f_name}" was skipped for ' + f'mne-qt-browser < 0.2.0') + except Exception: + pytest.skip('Requires mne_qt_browser') + + +@pytest.mark.pgtest +@pytest.fixture +def pg_backend(request, garbage_collect): + """Use for pyqtgraph-specific test-functions.""" + _check_pyqtgraph(request) + with use_browser_backend('qt') as backend: + backend._close_all() + yield backend + backend._close_all() + # This shouldn't be necessary, but let's make sure nothing is stale + import mne_qt_browser + mne_qt_browser._browser_instances.clear() + + +@pytest.fixture(params=[ + 'matplotlib', + pytest.param('qt', marks=pytest.mark.pgtest), +]) +def browser_backend(request, garbage_collect, monkeypatch): + """Parametrizes the name of the browser backend.""" + backend_name = request.param + if backend_name == 'qt': + _check_pyqtgraph(request) + with use_browser_backend(backend_name) as backend: + backend._close_all() + monkeypatch.setenv('MNE_BROWSE_RAW_SIZE', '10,10') + yield backend + backend._close_all() + if backend_name == 'qt': + # This shouldn't be necessary, but let's make sure nothing is stale + import mne_qt_browser + mne_qt_browser._browser_instances.clear() + + +@pytest.fixture(params=["pyvistaqt"]) +def renderer(request, options_3d, garbage_collect): + """Yield the 3D backends.""" + with _use_backend(request.param, interactive=False) as renderer: + yield renderer + + +@pytest.fixture(params=["pyvistaqt"]) +def renderer_pyvistaqt(request, options_3d, garbage_collect): + """Yield the PyVista backend.""" + with _use_backend(request.param, interactive=False) as renderer: + yield renderer + + +@pytest.fixture(params=["notebook"]) +def renderer_notebook(request, options_3d): + """Yield the 3D notebook renderer.""" + with _use_backend(request.param, interactive=False) as renderer: + yield renderer + + +@pytest.fixture(scope="module", params=["pyvistaqt"]) +def renderer_interactive_pyvistaqt(request, options_3d): + """Yield the interactive PyVista backend.""" + with _use_backend(request.param, interactive=True) as renderer: + yield renderer + + +@pytest.fixture(scope="module", params=["pyvistaqt"]) +def renderer_interactive(request, options_3d): + """Yield the interactive 3D backends.""" + with _use_backend(request.param, interactive=True) as renderer: + yield renderer + + +@contextmanager +def _use_backend(backend_name, interactive): + from mne.viz.backends.renderer import _use_test_3d_backend + _check_skip_backend(backend_name) + with _use_test_3d_backend(backend_name, interactive=interactive): + from mne.viz.backends import renderer + try: + yield renderer + finally: + renderer.backend._close_all() + + +def _check_skip_backend(name): + from mne.viz.backends.tests._utils import (has_pyvista, + has_pyqt5, has_imageio_ffmpeg, + has_pyvistaqt) + if name in ('pyvistaqt', 'notebook'): + if not has_pyvista(): + pytest.skip("Test skipped, requires pyvista.") + if not has_imageio_ffmpeg(): + pytest.skip("Test skipped, requires imageio-ffmpeg") + if name == 'pyvistaqt' and not has_pyqt5(): + pytest.skip("Test skipped, requires PyQt5.") + if name == 'pyvistaqt' and not has_pyvistaqt(): + pytest.skip("Test skipped, requires pyvistaqt") + + +@pytest.fixture(scope='session') +def pixel_ratio(): + """Get the pixel ratio.""" + from mne.viz.backends.tests._utils import has_pyvista, has_pyqt5 + if not has_pyvista() or not has_pyqt5(): + return 1. + from PyQt5.QtWidgets import QApplication, QMainWindow + _ = QApplication.instance() or QApplication([]) + window = QMainWindow() + ratio = float(window.devicePixelRatio()) + window.close() + return ratio + + +@pytest.fixture(scope='function', params=[testing._pytest_param()]) +def subjects_dir_tmp(tmp_path): + """Copy MNE-testing-data subjects_dir to a temp dir for manipulation.""" + for key in ('sample', 'fsaverage'): + shutil.copytree(op.join(subjects_dir, key), str(tmp_path / key)) + return str(tmp_path) + + +# Scoping these as session will make things faster, but need to make sure +# not to modify them in-place in the tests, so keep them private +@pytest.fixture(scope='session', params=[testing._pytest_param()]) +def _evoked_cov_sphere(_evoked): + """Compute a small evoked/cov/sphere combo for use with forwards.""" + evoked = _evoked.copy().pick_types(meg=True) + evoked.pick_channels(evoked.ch_names[::4]) + assert len(evoked.ch_names) == 77 + cov = mne.read_cov(fname_cov) + sphere = mne.make_sphere_model('auto', 'auto', evoked.info) + return evoked, cov, sphere + + +@pytest.fixture(scope='session') +def _fwd_surf(_evoked_cov_sphere): + """Compute a forward for a surface source space.""" + evoked, cov, sphere = _evoked_cov_sphere + src_surf = mne.read_source_spaces(fname_src) + return mne.make_forward_solution( + evoked.info, fname_trans, src_surf, sphere, mindist=5.0) + + +@pytest.fixture(scope='session') +def _fwd_subvolume(_evoked_cov_sphere): + """Compute a forward for a surface source space.""" + pytest.importorskip('nibabel') + evoked, cov, sphere = _evoked_cov_sphere + volume_labels = ['Left-Cerebellum-Cortex', 'right-Cerebellum-Cortex'] + with pytest.raises(ValueError, + match=r"Did you mean one of \['Right-Cere"): + mne.setup_volume_source_space( + 'sample', pos=20., volume_label=volume_labels, + subjects_dir=subjects_dir) + volume_labels[1] = 'R' + volume_labels[1][1:] + src_vol = mne.setup_volume_source_space( + 'sample', pos=20., volume_label=volume_labels, + subjects_dir=subjects_dir, add_interpolator=False) + return mne.make_forward_solution( + evoked.info, fname_trans, src_vol, sphere, mindist=5.0) + + +@pytest.fixture(scope='session') +def _all_src_types_fwd(_fwd_surf, _fwd_subvolume): + """Create all three forward types (surf, vol, mixed).""" + fwds = dict(surface=_fwd_surf, volume=_fwd_subvolume) + with pytest.raises(RuntimeError, + match='Invalid source space with kinds'): + fwds['volume']['src'] + fwds['surface']['src'] + + # mixed (4) + fwd = fwds['surface'].copy() + f2 = fwds['volume'] + for keys, axis in [(('source_rr',), 0), + (('source_nn',), 0), + (('sol', 'data'), 1), + (('_orig_sol',), 1)]: + a, b = fwd, f2 + key = keys[0] + if len(keys) > 1: + a, b = a[key], b[key] + key = keys[1] + a[key] = np.concatenate([a[key], b[key]], axis=axis) + fwd['sol']['ncol'] = fwd['sol']['data'].shape[1] + fwd['nsource'] = fwd['sol']['ncol'] // 3 + fwd['src'] = fwd['src'] + f2['src'] + fwds['mixed'] = fwd + + return fwds + + +@pytest.fixture(scope='session') +def _all_src_types_inv_evoked(_evoked_cov_sphere, _all_src_types_fwd): + """Compute inverses for all source types.""" + evoked, cov, _ = _evoked_cov_sphere + invs = dict() + for kind, fwd in _all_src_types_fwd.items(): + assert fwd['src'].kind == kind + with pytest.warns(RuntimeWarning, match='has been reduced'): + invs[kind] = mne.minimum_norm.make_inverse_operator( + evoked.info, fwd, cov) + return invs, evoked + + +@pytest.fixture(scope='function') +def all_src_types_inv_evoked(_all_src_types_inv_evoked): + """All source types of inverses, allowing for possible modification.""" + invs, evoked = _all_src_types_inv_evoked + invs = {key: val.copy() for key, val in invs.items()} + evoked = evoked.copy() + return invs, evoked + + +@pytest.fixture(scope='function') +def mixed_fwd_cov_evoked(_evoked_cov_sphere, _all_src_types_fwd): + """Compute inverses for all source types.""" + evoked, cov, _ = _evoked_cov_sphere + return _all_src_types_fwd['mixed'].copy(), cov.copy(), evoked.copy() + + +@pytest.fixture(scope='session') +@pytest.mark.slowtest +@pytest.mark.parametrize(params=[testing._pytest_param()]) +def src_volume_labels(): + """Create a 7mm source space with labels.""" + pytest.importorskip('nibabel') + volume_labels = mne.get_volume_labels_from_aseg(fname_aseg) + with pytest.warns(RuntimeWarning, match='Found no usable.*Left-vessel.*'): + src = mne.setup_volume_source_space( + 'sample', 7., mri='aseg.mgz', volume_label=volume_labels, + add_interpolator=False, bem=fname_bem, + subjects_dir=subjects_dir) + lut, _ = mne.read_freesurfer_lut() + assert len(volume_labels) == 46 + assert volume_labels[0] == 'Unknown' + assert lut['Unknown'] == 0 # it will be excluded during label gen + return src, tuple(volume_labels), lut + + +def _fail(*args, **kwargs): + __tracebackhide__ = True + raise AssertionError('Test should not download') + + +@pytest.fixture(scope='function') +def download_is_error(monkeypatch): + """Prevent downloading by raising an error when it's attempted.""" + import pooch + monkeypatch.setattr(pooch, 'retrieve', _fail) + + +# We can't use monkeypatch because its scope (function-level) conflicts with +# the requests fixture (module-level), so we live with a module-scoped version +# that uses mock +@pytest.fixture(scope='module') +def options_3d(): + """Disable advanced 3d rendering.""" + with mock.patch.dict( + os.environ, { + "MNE_3D_OPTION_ANTIALIAS": "false", + "MNE_3D_OPTION_DEPTH_PEELING": "false", + "MNE_3D_OPTION_SMOOTH_SHADING": "false", + } + ): + yield + + +@pytest.fixture(scope='session') +def protect_config(): + """Protect ~/.mne.""" + temp = _TempDir() + with mock.patch.dict(os.environ, {"_MNE_FAKE_HOME_DIR": temp}): + yield + + +@pytest.fixture() +def brain_gc(request): + """Ensure that brain can be properly garbage collected.""" + keys = ( + 'renderer_interactive', + 'renderer_interactive_pyvistaqt', + 'renderer', + 'renderer_pyvistaqt', + 'renderer_notebook', + ) + assert set(request.fixturenames) & set(keys) != set() + for key in keys: + if key in request.fixturenames: + is_pv = \ + request.getfixturevalue(key)._get_3d_backend() == 'pyvistaqt' + close_func = request.getfixturevalue(key).backend._close_all + break + if not is_pv: + yield + return + from mne.viz import Brain + ignore = set(id(o) for o in gc.get_objects()) + yield + close_func() + # no need to warn if the test itself failed, pytest-harvest helps us here + try: + outcome = request.node.harvest_rep_call + except Exception: + outcome = 'failed' + if outcome != 'passed': + return + _assert_no_instances(Brain, 'after') + # Check VTK + objs = gc.get_objects() + bad = list() + for o in objs: + try: + name = o.__class__.__name__ + except Exception: # old Python, probably + pass + else: + if name.startswith('vtk') and id(o) not in ignore: + bad.append(name) + del o + del objs, ignore, Brain + assert len(bad) == 0, 'VTK objects linger:\n' + '\n'.join(bad) + + +def pytest_sessionfinish(session, exitstatus): + """Handle the end of the session.""" + n = session.config.option.durations + if n is None: + return + print('\n') + try: + import pytest_harvest + except ImportError: + print('Module-level timings require pytest-harvest') + return + from py.io import TerminalWriter + # get the number to print + res = pytest_harvest.get_session_synthesis_dct(session) + files = dict() + for key, val in res.items(): + parts = Path(key.split(':')[0]).parts + # split mne/tests/test_whatever.py into separate categories since these + # are essentially submodule-level tests. Keeping just [:3] works, + # except for mne/viz where we want level-4 granulatity + split_submodules = (('mne', 'viz'), ('mne', 'preprocessing')) + parts = parts[:4 if parts[:2] in split_submodules else 3] + if not parts[-1].endswith('.py'): + parts = parts + ('',) + file_key = '/'.join(parts) + files[file_key] = files.get(file_key, 0) + val['pytest_duration_s'] + files = sorted(list(files.items()), key=lambda x: x[1])[::-1] + # print + files = files[:n] + if len(files): + writer = TerminalWriter() + writer.line() # newline + writer.sep('=', f'slowest {n} test module{_pl(n)}') + names, timings = zip(*files) + timings = [f'{timing:0.2f}s total' for timing in timings] + rjust = max(len(timing) for timing in timings) + timings = [timing.rjust(rjust) for timing in timings] + for name, timing in zip(names, timings): + writer.line(f'{timing.ljust(15)}{name}') + + +@pytest.fixture(scope="function", params=('Numba', 'NumPy')) +def numba_conditional(monkeypatch, request): + """Test both code paths on machines that have Numba.""" + assert request.param in ('Numba', 'NumPy') + if request.param == 'NumPy' and has_numba: + monkeypatch.setattr( + cluster_level, '_get_buddies', cluster_level._get_buddies_fallback) + monkeypatch.setattr( + cluster_level, '_get_selves', cluster_level._get_selves_fallback) + monkeypatch.setattr( + cluster_level, '_where_first', cluster_level._where_first_fallback) + monkeypatch.setattr( + numerics, '_arange_div', numerics._arange_div_fallback) + if request.param == 'Numba' and not has_numba: + pytest.skip('Numba not installed') + yield request.param + + +# Create one nbclient and reuse it +@pytest.fixture(scope='session') +def _nbclient(): + try: + import nbformat + from jupyter_client import AsyncKernelManager + from nbclient import NotebookClient + from ipywidgets import Button # noqa + import ipyvtklink # noqa + except Exception as exc: + return pytest.skip(f'Skipping Notebook test: {exc}') + km = AsyncKernelManager(config=None) + nb = nbformat.reads(""" +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata":{}, + "outputs": [], + "source":[] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version":3}, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}""", as_version=4) + client = NotebookClient(nb, km=km) + yield client + client._cleanup_kernel() + + +@pytest.fixture(scope='function') +def nbexec(_nbclient): + """Execute Python code in a notebook.""" + # Adapted/simplified from nbclient/client.py (BSD-3-Clause) + _nbclient._cleanup_kernel() + + def execute(code, reset=False): + _nbclient.reset_execution_trackers() + with _nbclient.setup_kernel(): + assert _nbclient.kc is not None + cell = Bunch(cell_type='code', metadata={}, source=dedent(code)) + _nbclient.execute_cell(cell, 0, execution_count=0) + _nbclient.set_widgets_metadata() + + yield execute + + +def pytest_runtest_call(item): + """Run notebook code written in Python.""" + if 'nbexec' in getattr(item, 'fixturenames', ()): + nbexec = item.funcargs['nbexec'] + code = inspect.getsource(getattr(item.module, item.name.split('[')[0])) + code = code.splitlines() + ci = 0 + for ci, c in enumerate(code): + if c.startswith(' '): # actual content + break + code = '\n'.join(code[ci:]) + + def run(nbexec=nbexec, code=code): + nbexec(code) + + item.runtest = run + return diff --git a/python/libs/mne/coreg.py b/python/libs/mne/coreg.py new file mode 100644 index 0000000..e06a4a2 --- /dev/null +++ b/python/libs/mne/coreg.py @@ -0,0 +1,2075 @@ +# -*- coding: utf-8 -*- +"""Coregistration between different coordinate frames.""" + +# Authors: Christian Brodbeck +# +# License: BSD-3-Clause + +import configparser +import fnmatch +from glob import glob, iglob +import os +import os.path as op +import stat +import sys +import re +import shutil +from functools import reduce + +import numpy as np + +from .io import read_fiducials, write_fiducials, read_info +from .io.constants import FIFF +from .io.meas_info import Info +from .io._digitization import _get_data_as_dict_from_dig +# keep get_mni_fiducials for backward compat (no burden to keep in this +# namespace, too) +from ._freesurfer import (_read_mri_info, get_mni_fiducials, # noqa: F401 + estimate_head_mri_t) # noqa: F401 +from .label import read_label, Label +from .source_space import (add_source_space_distances, read_source_spaces, # noqa: E501,F401 + write_source_spaces) +from .surface import (read_surface, write_surface, _normalize_vectors, + complete_surface_info, decimate_surface, + _DistanceQuery) +from .bem import read_bem_surfaces, write_bem_surfaces +from .transforms import (rotation, rotation3d, scaling, translation, Transform, + _read_fs_xfm, _write_fs_xfm, invert_transform, + combine_transforms, _quat_to_euler, + _fit_matched_points, apply_trans, + rot_to_quat, _angle_between_quats) +from .channels import make_dig_montage +from .utils import (get_config, get_subjects_dir, logger, pformat, verbose, + warn, has_nibabel, fill_doc, _validate_type, + _check_subject, _check_option) +from .viz._3d import _fiducial_coords + +# some path templates +trans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif') +subject_dirname = os.path.join('{subjects_dir}', '{subject}') +bem_dirname = os.path.join(subject_dirname, 'bem') +mri_dirname = os.path.join(subject_dirname, 'mri') +mri_transforms_dirname = os.path.join(subject_dirname, 'mri', 'transforms') +surf_dirname = os.path.join(subject_dirname, 'surf') +bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif") +head_bem_fname = pformat(bem_fname, name='head') +fid_fname = pformat(bem_fname, name='fiducials') +fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif") +src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif') +_head_fnames = (os.path.join(bem_dirname, 'outer_skin.surf'), + head_bem_fname) +_high_res_head_fnames = (os.path.join(bem_dirname, '{subject}-head-dense.fif'), + os.path.join(surf_dirname, 'lh.seghead'), + os.path.join(surf_dirname, 'lh.smseghead')) + + +def _map_fid_name_to_idx(name: str) -> int: + """Map a fiducial name to its index in the DigMontage.""" + name = name.lower() + + if name == 'lpa': + return 0 + elif name == 'nasion': + return 1 + else: + assert name == 'rpa' + return 2 + + +def _make_writable(fname): + """Make a file writable.""" + os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128) # write + + +def _make_writable_recursive(path): + """Recursively set writable.""" + if sys.platform.startswith('win'): + return # can't safely set perms + for root, dirs, files in os.walk(path, topdown=False): + for f in dirs + files: + _make_writable(os.path.join(root, f)) + + +def _find_head_bem(subject, subjects_dir, high_res=False): + """Find a high resolution head.""" + # XXX this should be refactored with mne.surface.get_head_surf ... + fnames = _high_res_head_fnames if high_res else _head_fnames + for fname in fnames: + path = fname.format(subjects_dir=subjects_dir, subject=subject) + if os.path.exists(path): + return path + + +@fill_doc +def coregister_fiducials(info, fiducials, tol=0.01): + """Create a head-MRI transform by aligning 3 fiducial points. + + Parameters + ---------- + %(info_not_none)s + fiducials : str | list of dict + Fiducials in MRI coordinate space (either path to a ``*-fiducials.fif`` + file or list of fiducials as returned by :func:`read_fiducials`. + + Returns + ------- + trans : Transform + The device-MRI transform. + + .. note:: The :class:`mne.Info` object fiducials must be in the + head coordinate space. + """ + if isinstance(info, str): + info = read_info(info) + if isinstance(fiducials, str): + fiducials, coord_frame_to = read_fiducials(fiducials) + else: + coord_frame_to = FIFF.FIFFV_COORD_MRI + frames_from = {d['coord_frame'] for d in info['dig']} + if len(frames_from) > 1: + raise ValueError("info contains fiducials from different coordinate " + "frames") + else: + coord_frame_from = frames_from.pop() + coords_from = _fiducial_coords(info['dig']) + coords_to = _fiducial_coords(fiducials, coord_frame_to) + trans = fit_matched_points(coords_from, coords_to, tol=tol) + return Transform(coord_frame_from, coord_frame_to, trans) + + +@verbose +def create_default_subject(fs_home=None, update=False, subjects_dir=None, + verbose=None): + """Create an average brain subject for subjects without structural MRI. + + Create a copy of fsaverage from the Freesurfer directory in subjects_dir + and add auxiliary files from the mne package. + + Parameters + ---------- + fs_home : None | str + The freesurfer home directory (only needed if FREESURFER_HOME is not + specified as environment variable). + update : bool + In cases where a copy of the fsaverage brain already exists in the + subjects_dir, this option allows to only copy files that don't already + exist in the fsaverage directory. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable + (os.environ['SUBJECTS_DIR']) as destination for the new subject. + %(verbose)s + + Notes + ----- + When no structural MRI is available for a subject, an average brain can be + substituted. Freesurfer comes with such an average brain model, and MNE + comes with some auxiliary files which make coregistration easier. + :py:func:`create_default_subject` copies the relevant + files from Freesurfer into the current subjects_dir, and also adds the + auxiliary files provided by MNE. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + if fs_home is None: + fs_home = get_config('FREESURFER_HOME', fs_home) + if fs_home is None: + raise ValueError( + "FREESURFER_HOME environment variable not found. Please " + "specify the fs_home parameter in your call to " + "create_default_subject().") + + # make sure freesurfer files exist + fs_src = os.path.join(fs_home, 'subjects', 'fsaverage') + if not os.path.exists(fs_src): + raise IOError('fsaverage not found at %r. Is fs_home specified ' + 'correctly?' % fs_src) + for name in ('label', 'mri', 'surf'): + dirname = os.path.join(fs_src, name) + if not os.path.isdir(dirname): + raise IOError("Freesurfer fsaverage seems to be incomplete: No " + "directory named %s found in %s" % (name, fs_src)) + + # make sure destination does not already exist + dest = os.path.join(subjects_dir, 'fsaverage') + if dest == fs_src: + raise IOError( + "Your subjects_dir points to the freesurfer subjects_dir (%r). " + "The default subject can not be created in the freesurfer " + "installation directory; please specify a different " + "subjects_dir." % subjects_dir) + elif (not update) and os.path.exists(dest): + raise IOError( + "Can not create fsaverage because %r already exists in " + "subjects_dir %r. Delete or rename the existing fsaverage " + "subject folder." % ('fsaverage', subjects_dir)) + + # copy fsaverage from freesurfer + logger.info("Copying fsaverage subject from freesurfer directory...") + if (not update) or not os.path.exists(dest): + shutil.copytree(fs_src, dest) + _make_writable_recursive(dest) + + # copy files from mne + source_fname = os.path.join(os.path.dirname(__file__), 'data', 'fsaverage', + 'fsaverage-%s.fif') + dest_bem = os.path.join(dest, 'bem') + if not os.path.exists(dest_bem): + os.mkdir(dest_bem) + logger.info("Copying auxiliary fsaverage files from mne...") + dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif') + _make_writable_recursive(dest_bem) + for name in ('fiducials', 'head', 'inner_skull-bem', 'trans'): + if not os.path.exists(dest_fname % name): + shutil.copy(source_fname % name, dest_bem) + + +def _decimate_points(pts, res=10): + """Decimate the number of points using a voxel grid. + + Create a voxel grid with a specified resolution and retain at most one + point per voxel. For each voxel, the point closest to its center is + retained. + + Parameters + ---------- + pts : array, shape (n_points, 3) + The points making up the head shape. + res : scalar + The resolution of the voxel space (side length of each voxel). + + Returns + ------- + pts : array, shape = (n_points, 3) + The decimated points. + """ + from scipy.spatial.distance import cdist + pts = np.asarray(pts) + + # find the bin edges for the voxel space + xmin, ymin, zmin = pts.min(0) - res / 2. + xmax, ymax, zmax = pts.max(0) + res + xax = np.arange(xmin, xmax, res) + yax = np.arange(ymin, ymax, res) + zax = np.arange(zmin, zmax, res) + + # find voxels containing one or more point + H, _ = np.histogramdd(pts, bins=(xax, yax, zax), normed=False) + X, Y, Z = pts.T + xbins, ybins, zbins = np.nonzero(H) + x = xax[xbins] + y = yax[ybins] + z = zax[zbins] + mids = np.c_[x, y, z] + res / 2. + + # each point belongs to at most one voxel center, so figure those out + # (cKDTree faster than BallTree for these small problems) + tree = _DistanceQuery(mids, method='cKDTree') + _, mid_idx = tree.query(pts) + + # then figure out which to actually use based on proximity + # (take advantage of sorting the mid_idx to get our mapping of + # pts to nearest voxel midpoint) + sort_idx = np.argsort(mid_idx) + bounds = np.cumsum( + np.concatenate([[0], np.bincount(mid_idx, minlength=len(mids))])) + assert len(bounds) == len(mids) + 1 + out = list() + for mi, mid in enumerate(mids): + # Now we do this: + # + # use_pts = pts[mid_idx == mi] + # + # But it's faster for many points than making a big boolean indexer + # over and over (esp. since each point can only belong to a single + # voxel). + use_pts = pts[sort_idx[bounds[mi]:bounds[mi + 1]]] + if not len(use_pts): + out.append([np.inf] * 3) + else: + out.append( + use_pts[np.argmin(cdist(use_pts, mid[np.newaxis])[:, 0])]) + out = np.array(out, float).reshape(-1, 3) + out = out[np.abs(out - mids).max(axis=1) < res / 2.] + # """ + + return out + + +def _trans_from_params(param_info, params): + """Convert transformation parameters into a transformation matrix. + + Parameters + ---------- + param_info : tuple, len = 3 + Tuple describing the parameters in x (do_translate, do_rotate, + do_scale). + params : tuple + The transformation parameters. + + Returns + ------- + trans : array, shape = (4, 4) + Transformation matrix. + """ + do_rotate, do_translate, do_scale = param_info + i = 0 + trans = [] + + if do_rotate: + x, y, z = params[:3] + trans.append(rotation(x, y, z)) + i += 3 + + if do_translate: + x, y, z = params[i:i + 3] + trans.insert(0, translation(x, y, z)) + i += 3 + + if do_scale == 1: + s = params[i] + trans.append(scaling(s, s, s)) + elif do_scale == 3: + x, y, z = params[i:i + 3] + trans.append(scaling(x, y, z)) + + trans = reduce(np.dot, trans) + return trans + + +_ALLOW_ANALITICAL = True + + +# XXX this function should be moved out of coreg as used elsewhere +def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True, + scale=False, tol=None, x0=None, out='trans', + weights=None): + """Find a transform between matched sets of points. + + This minimizes the squared distance between two matching sets of points. + + Uses :func:`scipy.optimize.leastsq` to find a transformation involving + a combination of rotation, translation, and scaling (in that order). + + Parameters + ---------- + src_pts : array, shape = (n, 3) + Points to which the transform should be applied. + tgt_pts : array, shape = (n, 3) + Points to which src_pts should be fitted. Each point in tgt_pts should + correspond to the point in src_pts with the same index. + rotate : bool + Allow rotation of the ``src_pts``. + translate : bool + Allow translation of the ``src_pts``. + scale : bool + Number of scaling parameters. With False, points are not scaled. With + True, points are scaled by the same factor along all axes. + tol : scalar | None + The error tolerance. If the distance between any of the matched points + exceeds this value in the solution, a RuntimeError is raised. With + None, no error check is performed. + x0 : None | tuple + Initial values for the fit parameters. + out : 'params' | 'trans' + In what format to return the estimate: 'params' returns a tuple with + the fit parameters; 'trans' returns a transformation matrix of shape + (4, 4). + + Returns + ------- + trans : array, shape (4, 4) + Transformation that, if applied to src_pts, minimizes the squared + distance to tgt_pts. Only returned if out=='trans'. + params : array, shape (n_params, ) + A single tuple containing the rotation, translation, and scaling + parameters in that order (as applicable). + """ + src_pts = np.atleast_2d(src_pts) + tgt_pts = np.atleast_2d(tgt_pts) + if src_pts.shape != tgt_pts.shape: + raise ValueError("src_pts and tgt_pts must have same shape (got " + "{}, {})".format(src_pts.shape, tgt_pts.shape)) + if weights is not None: + weights = np.asarray(weights, src_pts.dtype) + if weights.ndim != 1 or weights.size not in (src_pts.shape[0], 1): + raise ValueError("weights (shape=%s) must be None or have shape " + "(%s,)" % (weights.shape, src_pts.shape[0],)) + weights = weights[:, np.newaxis] + + param_info = (bool(rotate), bool(translate), int(scale)) + del rotate, translate, scale + + # very common use case, rigid transformation (maybe with one scale factor, + # with or without weighted errors) + if param_info in ((True, True, 0), (True, True, 1)) and _ALLOW_ANALITICAL: + src_pts = np.asarray(src_pts, float) + tgt_pts = np.asarray(tgt_pts, float) + if weights is not None: + weights = np.asarray(weights, float) + x, s = _fit_matched_points( + src_pts, tgt_pts, weights, bool(param_info[2])) + x[:3] = _quat_to_euler(x[:3]) + x = np.concatenate((x, [s])) if param_info[2] else x + else: + x = _generic_fit(src_pts, tgt_pts, param_info, weights, x0) + + # re-create the final transformation matrix + if (tol is not None) or (out == 'trans'): + trans = _trans_from_params(param_info, x) + + # assess the error of the solution + if tol is not None: + src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1)))) + est_pts = np.dot(src_pts, trans.T)[:, :3] + err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1)) + if np.any(err > tol): + raise RuntimeError("Error exceeds tolerance. Error = %r" % err) + + if out == 'params': + return x + elif out == 'trans': + return trans + else: + raise ValueError("Invalid out parameter: %r. Needs to be 'params' or " + "'trans'." % out) + + +def _generic_fit(src_pts, tgt_pts, param_info, weights, x0): + from scipy.optimize import leastsq + if param_info[1]: # translate + src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1)))) + + if param_info == (True, False, 0): + def error(x): + rx, ry, rz = x + trans = rotation3d(rx, ry, rz) + est = np.dot(src_pts, trans.T) + d = tgt_pts - est + if weights is not None: + d *= weights + return d.ravel() + if x0 is None: + x0 = (0, 0, 0) + elif param_info == (True, True, 0): + def error(x): + rx, ry, rz, tx, ty, tz = x + trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz)) + est = np.dot(src_pts, trans.T)[:, :3] + d = tgt_pts - est + if weights is not None: + d *= weights + return d.ravel() + if x0 is None: + x0 = (0, 0, 0, 0, 0, 0) + elif param_info == (True, True, 1): + def error(x): + rx, ry, rz, tx, ty, tz, s = x + trans = reduce(np.dot, (translation(tx, ty, tz), + rotation(rx, ry, rz), + scaling(s, s, s))) + est = np.dot(src_pts, trans.T)[:, :3] + d = tgt_pts - est + if weights is not None: + d *= weights + return d.ravel() + if x0 is None: + x0 = (0, 0, 0, 0, 0, 0, 1) + elif param_info == (True, True, 3): + def error(x): + rx, ry, rz, tx, ty, tz, sx, sy, sz = x + trans = reduce(np.dot, (translation(tx, ty, tz), + rotation(rx, ry, rz), + scaling(sx, sy, sz))) + est = np.dot(src_pts, trans.T)[:, :3] + d = tgt_pts - est + if weights is not None: + d *= weights + return d.ravel() + if x0 is None: + x0 = (0, 0, 0, 0, 0, 0, 1, 1, 1) + else: + raise NotImplementedError( + "The specified parameter combination is not implemented: " + "rotate=%r, translate=%r, scale=%r" % param_info) + + x, _, _, _, _ = leastsq(error, x0, full_output=True) + return x + + +def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None): + """Find paths to label files in a subject's label directory. + + Parameters + ---------- + subject : str + Name of the mri subject. + pattern : str | None + Pattern for finding the labels relative to the label directory in the + MRI subject directory (e.g., "aparc/*.label" will find all labels + in the "subject/label/aparc" directory). With None, find all labels. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable + (sys.environ['SUBJECTS_DIR']) + + Returns + ------- + paths : list + List of paths relative to the subject's label directory + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subject_dir = os.path.join(subjects_dir, subject) + lbl_dir = os.path.join(subject_dir, 'label') + + if pattern is None: + paths = [] + for dirpath, _, filenames in os.walk(lbl_dir): + rel_dir = os.path.relpath(dirpath, lbl_dir) + for filename in fnmatch.filter(filenames, '*.label'): + path = os.path.join(rel_dir, filename) + paths.append(path) + else: + paths = [os.path.relpath(path, lbl_dir) for path in iglob(pattern)] + + return paths + + +def _find_mri_paths(subject, skip_fiducials, subjects_dir): + """Find all files of an mri relevant for source transformation. + + Parameters + ---------- + subject : str + Name of the mri subject. + skip_fiducials : bool + Do not scale the MRI fiducials. If False, an IOError will be raised + if no fiducials file can be found. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable + (sys.environ['SUBJECTS_DIR']) + + Returns + ------- + paths : dict + Dictionary whose keys are relevant file type names (str), and whose + values are lists of paths. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + paths = {} + + # directories to create + paths['dirs'] = [bem_dirname, surf_dirname] + + # surf/ files + paths['surf'] = [] + surf_fname = os.path.join(surf_dirname, '{name}') + surf_names = ('inflated', 'white', 'orig', 'orig_avg', 'inflated_avg', + 'inflated_pre', 'pial', 'pial_avg', 'smoothwm', 'white_avg', + 'seghead', 'smseghead') + if os.getenv('_MNE_FEW_SURFACES', '') == 'true': # for testing + surf_names = surf_names[:4] + for surf_name in surf_names: + for hemi in ('lh.', 'rh.'): + name = hemi + surf_name + path = surf_fname.format(subjects_dir=subjects_dir, + subject=subject, name=name) + if os.path.exists(path): + paths['surf'].append(pformat(surf_fname, name=name)) + surf_fname = os.path.join(bem_dirname, '{name}') + surf_names = ('inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf') + for surf_name in surf_names: + path = surf_fname.format(subjects_dir=subjects_dir, + subject=subject, name=surf_name) + if os.path.exists(path): + paths['surf'].append(pformat(surf_fname, name=surf_name)) + del surf_names, surf_name, path, hemi + + # BEM files + paths['bem'] = bem = [] + path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject) + if os.path.exists(path): + bem.append('head') + bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir, + subject=subject, name='*-bem') + re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject, + name='(.+)').replace('\\', '\\\\') + for path in iglob(bem_pattern): + match = re.match(re_pattern, path) + name = match.group(1) + bem.append(name) + del bem, path, bem_pattern, re_pattern + + # fiducials + if skip_fiducials: + paths['fid'] = [] + else: + paths['fid'] = _find_fiducials_files(subject, subjects_dir) + # check that we found at least one + if len(paths['fid']) == 0: + raise IOError("No fiducials file found for %s. The fiducials " + "file should be named " + "{subject}/bem/{subject}-fiducials.fif. In " + "order to scale an MRI without fiducials set " + "skip_fiducials=True." % subject) + + # duplicate files (curvature and some surfaces) + paths['duplicate'] = [] + path = os.path.join(surf_dirname, '{name}') + surf_fname = os.path.join(surf_dirname, '{name}') + surf_dup_names = ('curv', 'sphere', 'sphere.reg', 'sphere.reg.avg') + for surf_dup_name in surf_dup_names: + for hemi in ('lh.', 'rh.'): + name = hemi + surf_dup_name + path = surf_fname.format(subjects_dir=subjects_dir, + subject=subject, name=name) + if os.path.exists(path): + paths['duplicate'].append(pformat(surf_fname, name=name)) + del surf_dup_name, name, path, hemi + + # transform files (talairach) + paths['transforms'] = [] + transform_fname = os.path.join(mri_transforms_dirname, 'talairach.xfm') + path = transform_fname.format(subjects_dir=subjects_dir, subject=subject) + if os.path.exists(path): + paths['transforms'].append(transform_fname) + del transform_fname, path + + # find source space files + paths['src'] = src = [] + bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject) + fnames = fnmatch.filter(os.listdir(bem_dir), '*-src.fif') + prefix = subject + '-' + for fname in fnames: + if fname.startswith(prefix): + fname = "{subject}-%s" % fname[len(prefix):] + path = os.path.join(bem_dirname, fname) + src.append(path) + + # find MRIs + mri_dir = mri_dirname.format(subjects_dir=subjects_dir, subject=subject) + fnames = fnmatch.filter(os.listdir(mri_dir), '*.mgz') + paths['mri'] = [os.path.join(mri_dir, f) for f in fnames] + + return paths + + +def _find_fiducials_files(subject, subjects_dir): + """Find fiducial files.""" + fid = [] + # standard fiducials + if os.path.exists(fid_fname.format(subjects_dir=subjects_dir, + subject=subject)): + fid.append(fid_fname) + # fiducials with subject name + pattern = pformat(fid_fname_general, subjects_dir=subjects_dir, + subject=subject, head='*') + regex = pformat(fid_fname_general, subjects_dir=subjects_dir, + subject=subject, head='(.+)').replace('\\', '\\\\') + for path in iglob(pattern): + match = re.match(regex, path) + head = match.group(1).replace(subject, '{subject}') + fid.append(pformat(fid_fname_general, head=head)) + return fid + + +def _is_mri_subject(subject, subjects_dir=None): + """Check whether a directory in subjects_dir is an mri subject directory. + + Parameters + ---------- + subject : str + Name of the potential subject/directory. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable. + + Returns + ------- + is_mri_subject : bool + Whether ``subject`` is an mri subject. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + return bool(_find_head_bem(subject, subjects_dir) or + _find_head_bem(subject, subjects_dir, high_res=True)) + + +def _is_scaled_mri_subject(subject, subjects_dir=None): + """Check whether a directory in subjects_dir is a scaled mri subject. + + Parameters + ---------- + subject : str + Name of the potential subject/directory. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable. + + Returns + ------- + is_scaled_mri_subject : bool + Whether ``subject`` is a scaled mri subject. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + if not _is_mri_subject(subject, subjects_dir): + return False + fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg') + return os.path.exists(fname) + + +def _mri_subject_has_bem(subject, subjects_dir=None): + """Check whether an mri subject has a file matching the bem pattern. + + Parameters + ---------- + subject : str + Name of the subject. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable. + + Returns + ------- + has_bem_file : bool + Whether ``subject`` has a bem file. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject, + name='*-bem') + fnames = glob(pattern) + return bool(len(fnames)) + + +def read_mri_cfg(subject, subjects_dir=None): + """Read information from the cfg file of a scaled MRI brain. + + Parameters + ---------- + subject : str + Name of the scaled MRI subject. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable. + + Returns + ------- + cfg : dict + Dictionary with entries from the MRI's cfg file. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg') + + if not os.path.exists(fname): + raise IOError("%r does not seem to be a scaled mri subject: %r does " + "not exist." % (subject, fname)) + + logger.info("Reading MRI cfg file %s" % fname) + config = configparser.RawConfigParser() + config.read(fname) + n_params = config.getint("MRI Scaling", 'n_params') + if n_params == 1: + scale = config.getfloat("MRI Scaling", 'scale') + elif n_params == 3: + scale_str = config.get("MRI Scaling", 'scale') + scale = np.array([float(s) for s in scale_str.split()]) + else: + raise ValueError("Invalid n_params value in MRI cfg: %i" % n_params) + + out = {'subject_from': config.get("MRI Scaling", 'subject_from'), + 'n_params': n_params, 'scale': scale} + return out + + +def _write_mri_config(fname, subject_from, subject_to, scale): + """Write the cfg file describing a scaled MRI subject. + + Parameters + ---------- + fname : str + Target file. + subject_from : str + Name of the source MRI subject. + subject_to : str + Name of the scaled MRI subject. + scale : float | array_like, shape = (3,) + The scaling parameter. + """ + scale = np.asarray(scale) + if np.isscalar(scale) or scale.shape == (): + n_params = 1 + else: + n_params = 3 + + config = configparser.RawConfigParser() + config.add_section("MRI Scaling") + config.set("MRI Scaling", 'subject_from', subject_from) + config.set("MRI Scaling", 'subject_to', subject_to) + config.set("MRI Scaling", 'n_params', str(n_params)) + if n_params == 1: + config.set("MRI Scaling", 'scale', str(scale)) + else: + config.set("MRI Scaling", 'scale', ' '.join([str(s) for s in scale])) + config.set("MRI Scaling", 'version', '1') + with open(fname, 'w') as fid: + config.write(fid) + + +def _scale_params(subject_to, subject_from, scale, subjects_dir): + """Assemble parameters for scaling. + + Returns + ------- + subjects_dir : str + Subjects directory. + subject_from : str + Name of the source subject. + scale : array + Scaling factor, either shape=() for uniform scaling or shape=(3,) for + non-uniform scaling. + uniform : bool + Whether scaling is uniform. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + if (subject_from is None) != (scale is None): + raise TypeError("Need to provide either both subject_from and scale " + "parameters, or neither.") + + if subject_from is None: + cfg = read_mri_cfg(subject_to, subjects_dir) + subject_from = cfg['subject_from'] + n_params = cfg['n_params'] + assert n_params in (1, 3) + scale = cfg['scale'] + scale = np.atleast_1d(scale) + if scale.ndim != 1 or scale.shape[0] not in (1, 3): + raise ValueError("Invalid shape for scale parameer. Need scalar " + "or array of length 3. Got shape %s." + % (scale.shape,)) + n_params = len(scale) + return subjects_dir, subject_from, scale, n_params == 1 + + +@verbose +def scale_bem(subject_to, bem_name, subject_from=None, scale=None, + subjects_dir=None, *, on_defects='raise', verbose=None): + """Scale a bem file. + + Parameters + ---------- + subject_to : str + Name of the scaled MRI subject (the destination mri subject). + bem_name : str + Name of the bem file. For example, to scale + ``fsaverage-inner_skull-bem.fif``, the bem_name would be + "inner_skull-bem". + subject_from : None | str + The subject from which to read the source space. If None, subject_from + is read from subject_to's config file. + scale : None | float | array, shape = (3,) + Scaling factor. Has to be specified if subjects_from is specified, + otherwise it is read from subject_to's config file. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable. + %(on_defects)s + + .. versionadded:: 1.0 + %(verbose)s + """ + subjects_dir, subject_from, scale, uniform = \ + _scale_params(subject_to, subject_from, scale, subjects_dir) + + src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from, + name=bem_name) + dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to, + name=bem_name) + + if os.path.exists(dst): + raise IOError("File already exists: %s" % dst) + + surfs = read_bem_surfaces(src, on_defects=on_defects) + for surf in surfs: + surf['rr'] *= scale + if not uniform: + assert len(surf['nn']) > 0 + surf['nn'] /= scale + _normalize_vectors(surf['nn']) + write_bem_surfaces(dst, surfs) + + +def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None, + scale=None, subjects_dir=None): + r"""Scale labels to match a brain that was previously created by scaling. + + Parameters + ---------- + subject_to : str + Name of the scaled MRI subject (the destination brain). + pattern : str | None + Pattern for finding the labels relative to the label directory in the + MRI subject directory (e.g., "lh.BA3a.label" will scale + "fsaverage/label/lh.BA3a.label"; "aparc/\*.label" will find all labels + in the "fsaverage/label/aparc" directory). With None, scale all labels. + overwrite : bool + Overwrite any label file that already exists for subject_to (otherwise + existing labels are skipped). + subject_from : None | str + Name of the original MRI subject (the brain that was scaled to create + subject_to). If None, the value is read from subject_to's cfg file. + scale : None | float | array_like, shape = (3,) + Scaling parameter. If None, the value is read from subject_to's cfg + file. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable. + """ + subjects_dir, subject_from, scale, _ = _scale_params( + subject_to, subject_from, scale, subjects_dir) + + # find labels + paths = _find_label_paths(subject_from, pattern, subjects_dir) + if not paths: + return + + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + src_root = os.path.join(subjects_dir, subject_from, 'label') + dst_root = os.path.join(subjects_dir, subject_to, 'label') + + # scale labels + for fname in paths: + dst = os.path.join(dst_root, fname) + if not overwrite and os.path.exists(dst): + continue + + dirname = os.path.dirname(dst) + if not os.path.exists(dirname): + os.makedirs(dirname) + + src = os.path.join(src_root, fname) + l_old = read_label(src) + pos = l_old.pos * scale + l_new = Label(l_old.vertices, pos, l_old.values, l_old.hemi, + l_old.comment, subject=subject_to) + l_new.save(dst) + + +@verbose +def scale_mri(subject_from, subject_to, scale, overwrite=False, + subjects_dir=None, skip_fiducials=False, labels=True, + annot=False, *, on_defects='raise', verbose=None): + """Create a scaled copy of an MRI subject. + + Parameters + ---------- + subject_from : str + Name of the subject providing the MRI. + subject_to : str + New subject name for which to save the scaled MRI. + scale : float | array_like, shape = (3,) + The scaling factor (one or 3 parameters). + overwrite : bool + If an MRI already exists for subject_to, overwrite it. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable. + skip_fiducials : bool + Do not scale the MRI fiducials. If False (default), an IOError will be + raised if no fiducials file can be found. + labels : bool + Also scale all labels (default True). + annot : bool + Copy ``*.annot`` files to the new location (default False). + %(on_defects)s + + .. versionadded:: 1.0 + %(verbose)s + + See Also + -------- + scale_bem : Add a scaled BEM to a scaled MRI. + scale_labels : Add labels to a scaled MRI. + scale_source_space : Add a source space to a scaled MRI. + + Notes + ----- + This function will automatically call :func:`scale_bem`, + :func:`scale_labels`, and :func:`scale_source_space` based on expected + filename patterns in the subject directory. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + paths = _find_mri_paths(subject_from, skip_fiducials, subjects_dir) + scale = np.atleast_1d(scale) + if scale.shape == (3,): + if np.isclose(scale[1], scale[0]) and np.isclose(scale[2], scale[0]): + scale = scale[0] # speed up scaling conditionals using a singleton + elif scale.shape != (1,): + raise ValueError('scale must have shape (3,) or (1,), got %s' + % (scale.shape,)) + + # make sure we have an empty target directory + dest = subject_dirname.format(subject=subject_to, + subjects_dir=subjects_dir) + if os.path.exists(dest): + if not overwrite: + raise IOError("Subject directory for %s already exists: %r" + % (subject_to, dest)) + shutil.rmtree(dest) + + logger.debug('create empty directory structure') + for dirname in paths['dirs']: + dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir) + os.makedirs(dir_) + + logger.debug('save MRI scaling parameters') + fname = os.path.join(dest, 'MRI scaling parameters.cfg') + _write_mri_config(fname, subject_from, subject_to, scale) + + logger.debug('surf files [in mm]') + for fname in paths['surf']: + src = fname.format(subject=subject_from, subjects_dir=subjects_dir) + src = os.path.realpath(src) + dest = fname.format(subject=subject_to, subjects_dir=subjects_dir) + pts, tri = read_surface(src) + write_surface(dest, pts * scale, tri) + + logger.debug('BEM files [in m]') + for bem_name in paths['bem']: + scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir, + on_defects=on_defects, verbose=False) + + logger.debug('fiducials [in m]') + for fname in paths['fid']: + src = fname.format(subject=subject_from, subjects_dir=subjects_dir) + src = os.path.realpath(src) + pts, cframe = read_fiducials(src, verbose=False) + for pt in pts: + pt['r'] = pt['r'] * scale + dest = fname.format(subject=subject_to, subjects_dir=subjects_dir) + write_fiducials(dest, pts, cframe, overwrite=True, verbose=False) + + logger.debug('MRIs [nibabel]') + os.mkdir(mri_dirname.format(subjects_dir=subjects_dir, + subject=subject_to)) + for fname in paths['mri']: + mri_name = os.path.basename(fname) + _scale_mri(subject_to, mri_name, subject_from, scale, subjects_dir) + + logger.debug('Transforms') + for mri_name in paths['mri']: + if mri_name.endswith('T1.mgz'): + os.mkdir(mri_transforms_dirname.format(subjects_dir=subjects_dir, + subject=subject_to)) + for fname in paths['transforms']: + xfm_name = os.path.basename(fname) + _scale_xfm(subject_to, xfm_name, mri_name, + subject_from, scale, subjects_dir) + break + + logger.debug('duplicate files') + for fname in paths['duplicate']: + src = fname.format(subject=subject_from, subjects_dir=subjects_dir) + dest = fname.format(subject=subject_to, subjects_dir=subjects_dir) + shutil.copyfile(src, dest) + + logger.debug('source spaces') + for fname in paths['src']: + src_name = os.path.basename(fname) + scale_source_space(subject_to, src_name, subject_from, scale, + subjects_dir, verbose=False) + + logger.debug('labels [in m]') + os.mkdir(os.path.join(subjects_dir, subject_to, 'label')) + if labels: + scale_labels(subject_to, subject_from=subject_from, scale=scale, + subjects_dir=subjects_dir) + + logger.debug('copy *.annot files') + # they don't contain scale-dependent information + if annot: + src_pattern = os.path.join(subjects_dir, subject_from, 'label', + '*.annot') + dst_dir = os.path.join(subjects_dir, subject_to, 'label') + for src_file in iglob(src_pattern): + shutil.copy(src_file, dst_dir) + + +@verbose +def scale_source_space(subject_to, src_name, subject_from=None, scale=None, + subjects_dir=None, n_jobs=1, verbose=None): + """Scale a source space for an mri created with scale_mri(). + + Parameters + ---------- + subject_to : str + Name of the scaled MRI subject (the destination mri subject). + src_name : str + Source space name. Can be a spacing parameter (e.g., ``'7'``, + ``'ico4'``, ``'oct6'``) or a file name of a source space file relative + to the bem directory; if the file name contains the subject name, it + should be indicated as "{subject}" in ``src_name`` (e.g., + ``"{subject}-my_source_space-src.fif"``). + subject_from : None | str + The subject from which to read the source space. If None, subject_from + is read from subject_to's config file. + scale : None | float | array, shape = (3,) + Scaling factor. Has to be specified if subjects_from is specified, + otherwise it is read from subject_to's config file. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable. + n_jobs : int + Number of jobs to run in parallel if recomputing distances (only + applies if scale is an array of length 3, and will not use more cores + than there are source spaces). + %(verbose)s + + Notes + ----- + When scaling volume source spaces, the source (vertex) locations are + scaled, but the reference to the MRI volume is left unchanged. Transforms + are updated so that source estimates can be plotted on the original MRI + volume. + """ + subjects_dir, subject_from, scale, uniform = \ + _scale_params(subject_to, subject_from, scale, subjects_dir) + # if n_params==1 scale is a scalar; if n_params==3 scale is a (3,) array + + # find the source space file names + if src_name.isdigit(): + spacing = src_name # spacing in mm + src_pattern = src_fname + else: + match = re.match(r"(oct|ico|vol)-?(\d+)$", src_name) + if match: + spacing = '-'.join(match.groups()) + src_pattern = src_fname + else: + spacing = None + src_pattern = os.path.join(bem_dirname, src_name) + + src = src_pattern.format(subjects_dir=subjects_dir, subject=subject_from, + spacing=spacing) + dst = src_pattern.format(subjects_dir=subjects_dir, subject=subject_to, + spacing=spacing) + + # read and scale the source space [in m] + sss = read_source_spaces(src) + logger.info("scaling source space %s: %s -> %s", spacing, subject_from, + subject_to) + logger.info("Scale factor: %s", scale) + add_dist = False + for ss in sss: + ss['subject_his_id'] = subject_to + ss['rr'] *= scale + # additional tags for volume source spaces + for key in ('vox_mri_t', 'src_mri_t'): + # maintain transform to original MRI volume ss['mri_volume_name'] + if key in ss: + ss[key]['trans'][:3] *= scale[:, np.newaxis] + # distances and patch info + if uniform: + if ss['dist'] is not None: + ss['dist'] *= scale[0] + # Sometimes this is read-only due to how it's read + ss['nearest_dist'] = ss['nearest_dist'] * scale + ss['dist_limit'] = ss['dist_limit'] * scale + else: # non-uniform scaling + ss['nn'] /= scale + _normalize_vectors(ss['nn']) + if ss['dist'] is not None: + add_dist = True + dist_limit = float(np.abs(sss[0]['dist_limit'])) + elif ss['nearest'] is not None: + add_dist = True + dist_limit = 0 + + if add_dist: + logger.info("Recomputing distances, this might take a while") + add_source_space_distances(sss, dist_limit, n_jobs) + + write_source_spaces(dst, sss) + + +def _scale_mri(subject_to, mri_fname, subject_from, scale, subjects_dir): + """Scale an MRI by setting its affine.""" + subjects_dir, subject_from, scale, _ = _scale_params( + subject_to, subject_from, scale, subjects_dir) + + if not has_nibabel(): + warn('Skipping MRI scaling for %s, please install nibabel') + return + + import nibabel + fname_from = op.join(mri_dirname.format( + subjects_dir=subjects_dir, subject=subject_from), mri_fname) + fname_to = op.join(mri_dirname.format( + subjects_dir=subjects_dir, subject=subject_to), mri_fname) + img = nibabel.load(fname_from) + zooms = np.array(img.header.get_zooms()) + zooms[[0, 2, 1]] *= scale + img.header.set_zooms(zooms) + # Hack to fix nibabel problems, see + # https://github.com/nipy/nibabel/issues/619 + img._affine = img.header.get_affine() # or could use None + nibabel.save(img, fname_to) + + +def _scale_xfm(subject_to, xfm_fname, mri_name, subject_from, scale, + subjects_dir): + """Scale a transform.""" + subjects_dir, subject_from, scale, _ = _scale_params( + subject_to, subject_from, scale, subjects_dir) + + # The nibabel warning should already be there in MRI step, if applicable, + # as we only get here if T1.mgz is present (and thus a scaling was + # attempted) so we can silently return here. + if not has_nibabel(): + return + + fname_from = os.path.join( + mri_transforms_dirname.format( + subjects_dir=subjects_dir, subject=subject_from), xfm_fname) + fname_to = op.join( + mri_transforms_dirname.format( + subjects_dir=subjects_dir, subject=subject_to), xfm_fname) + assert op.isfile(fname_from), fname_from + assert op.isdir(op.dirname(fname_to)), op.dirname(fname_to) + # The "talairach.xfm" file stores the ras_mni transform. + # + # For "from" subj F, "to" subj T, F->T scaling S, some equivalent vertex + # positions F_x and T_x in MRI (Freesurfer RAS) coords, knowing that + # we have T_x = S @ F_x, we want to have the same MNI coords computed + # for these vertices: + # + # T_mri_mni @ T_x = F_mri_mni @ F_x + # + # We need to find the correct T_ras_mni (talaraich.xfm file) that yields + # this. So we derive (where † indicates inversion): + # + # T_mri_mni @ S @ F_x = F_mri_mni @ F_x + # T_mri_mni @ S = F_mri_mni + # T_ras_mni @ T_mri_ras @ S = F_ras_mni @ F_mri_ras + # T_ras_mni @ T_mri_ras = F_ras_mni @ F_mri_ras @ S⁻¹ + # T_ras_mni = F_ras_mni @ F_mri_ras @ S⁻¹ @ T_ras_mri + # + + # prepare the scale (S) transform + scale = np.atleast_1d(scale) + scale = np.tile(scale, 3) if len(scale) == 1 else scale + S = Transform('mri', 'mri', scaling(*scale)) # F_mri->T_mri + + # + # Get the necessary transforms of the "from" subject + # + xfm, kind = _read_fs_xfm(fname_from) + assert kind == 'MNI Transform File', kind + _, _, F_mri_ras, _, _ = _read_mri_info(mri_name, units='mm') + F_ras_mni = Transform('ras', 'mni_tal', xfm) + del xfm + + # + # Get the necessary transforms of the "to" subject + # + mri_name = op.join(mri_dirname.format( + subjects_dir=subjects_dir, subject=subject_to), op.basename(mri_name)) + _, _, T_mri_ras, _, _ = _read_mri_info(mri_name, units='mm') + T_ras_mri = invert_transform(T_mri_ras) + del mri_name, T_mri_ras + + # Finally we construct as above: + # + # T_ras_mni = F_ras_mni @ F_mri_ras @ S⁻¹ @ T_ras_mri + # + # By moving right to left through the equation. + T_ras_mni = \ + combine_transforms( + combine_transforms( + combine_transforms( + T_ras_mri, invert_transform(S), 'ras', 'mri'), + F_mri_ras, 'ras', 'ras'), + F_ras_mni, 'ras', 'mni_tal') + _write_fs_xfm(fname_to, T_ras_mni['trans'], kind) + + +def _read_surface(filename, *, on_defects): + bem = dict() + if filename is not None and op.exists(filename): + if filename.endswith('.fif'): + bem = read_bem_surfaces( + filename, on_defects=on_defects, verbose=False + )[0] + else: + try: + bem = read_surface(filename, return_dict=True)[2] + bem['rr'] *= 1e-3 + complete_surface_info(bem, copy=False) + except Exception: + raise ValueError( + "Error loading surface from %s (see " + "Terminal for details)." % filename) + return bem + + +@fill_doc +class Coregistration(object): + """Class for MRI<->head coregistration. + + Parameters + ---------- + info : instance of Info | None + The measurement info. + %(subject)s + %(subjects_dir)s + %(fiducials)s + %(on_defects)s + + .. versionadded:: 1.0 + + Attributes + ---------- + fiducials : instance of DigMontage + A montage containing the MRI fiducials. + trans : instance of Transform + MRI<->Head coordinate transformation. + + See Also + -------- + mne.scale_mri + + Notes + ----- + Internal computation quantities parameters are in the following units: + + - rotation are in radians + - translation are in m + - scale are in scale proportion + + If using a scale mode, the :func:`~mne.scale_mri` should be used + to create a surrogate MRI subject with the proper scale factors. + """ + + def __init__(self, info, subject, subjects_dir=None, fiducials='auto', *, + on_defects='raise'): + _validate_type(info, (Info, None), 'info') + self._info = info + self._subject = _check_subject(subject, subject) + self._subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + self._scale_mode = None + self._on_defects = on_defects + + self._rot_trans = None + self._default_parameters = \ + np.array([0., 0., 0., 0., 0., 0., 1., 1., 1.]) + + self._rotation = self._default_parameters[:3] + self._translation = self._default_parameters[3:6] + self._scale = self._default_parameters[6:9] + self._icp_iterations = 20 + self._icp_angle = 0.2 + self._icp_distance = 0.2 + self._icp_scale = 0.2 + self._icp_fid_matches = ('nearest', 'matched') + self._icp_fid_match = self._icp_fid_matches[0] + self._lpa_weight = 1. + self._nasion_weight = 10. + self._rpa_weight = 1. + self._hsp_weight = 1. + self._eeg_weight = 1. + self._hpi_weight = 1. + self._extra_points_filter = None + + self._setup_digs() + self._setup_bem() + + self._fid_filename = None + self._setup_fiducials(fiducials) + self.reset() + + def _setup_digs(self): + if self._info is None: + self._dig_dict = dict( + hpi=np.zeros((1, 3)), + dig_ch_pos_location=np.zeros((1, 3)), + hsp=np.zeros((1, 3)), + rpa=np.zeros((1, 3)), + nasion=np.zeros((1, 3)), + lpa=np.zeros((1, 3)), + ) + else: + self._dig_dict = _get_data_as_dict_from_dig( + dig=self._info['dig'], + exclude_ref_channel=False + ) + # adjustments: + # set weights to 0 for None input + # convert fids to float arrays + for k, w_atr in zip(['nasion', 'lpa', 'rpa', 'hsp', 'hpi'], + ['_nasion_weight', '_lpa_weight', + '_rpa_weight', '_hsp_weight', '_hpi_weight']): + if self._dig_dict[k] is None: + self._dig_dict[k] = np.zeros((0, 3)) + setattr(self, w_atr, 0) + elif k in ['rpa', 'nasion', 'lpa']: + self._dig_dict[k] = np.array([self._dig_dict[k]], float) + + def _setup_bem(self): + # find high-res head model (if possible) + high_res_path = _find_head_bem(self._subject, self._subjects_dir, + high_res=True) + low_res_path = _find_head_bem(self._subject, self._subjects_dir, + high_res=False) + if high_res_path is None and low_res_path is None: + raise RuntimeError("No standard head model was " + f"found for subject {self._subject}") + if high_res_path is not None: + self._bem_high_res = _read_surface( + high_res_path, on_defects=self._on_defects + ) + logger.info(f'Using high resolution head model in {high_res_path}') + else: + self._bem_high_res = _read_surface( + low_res_path, on_defects=self._on_defects + ) + logger.info(f'Using low resolution head model in {low_res_path}') + if low_res_path is None: + # This should be very rare! + warn('No low-resolution head found, decimating high resolution ' + 'mesh (%d vertices): %s' % (len(self._bem_high_res.surf.rr), + high_res_path,)) + # Create one from the high res one, which we know we have + rr, tris = decimate_surface(self._bem_high_res.surf.rr, + self._bem_high_res.surf.tris, + n_triangles=5120) + # directly set the attributes of bem_low_res + self._bem_low_res = complete_surface_info( + dict(rr=rr, tris=tris), copy=False, verbose=False) + else: + self._bem_low_res = _read_surface( + low_res_path, on_defects=self._on_defects + ) + + def _setup_fiducials(self, fids): + _validate_type(fids, (str, dict, list)) + # find fiducials file + fid_accurate = None + if fids == 'auto': + fid_files = _find_fiducials_files(self._subject, + self._subjects_dir) + if len(fid_files) > 0: + # Read fiducials from disk + fid_filename = fid_files[0].format( + subjects_dir=self._subjects_dir, subject=self._subject) + logger.info(f'Using fiducials from: {fid_filename}.') + fids, _ = read_fiducials(fid_filename) + fid_accurate = True + self._fid_filename = fid_filename + else: + fids = 'estimated' + + if fids == 'estimated': + logger.info('Estimating fiducials from fsaverage.') + fid_accurate = False + fids = get_mni_fiducials(self._subject, self._subjects_dir) + + fid_accurate = True if fid_accurate is None else fid_accurate + if isinstance(fids, list): + fid_coords = _fiducial_coords(fids) + else: + assert isinstance(fids, dict) + fid_coords = np.array([fids['lpa'], fids['nasion'], fids['rpa']], + dtype=float) + + self._fid_points = fid_coords + self._fid_accurate = fid_accurate + + # does not seem to happen by itself ... so hard code it: + self._reset_fiducials() + + def _reset_fiducials(self): + dig_montage = make_dig_montage( + lpa=self._fid_points[0], + nasion=self._fid_points[1], + rpa=self._fid_points[2], + coord_frame='mri' + ) + self.fiducials = dig_montage + + def _update_params(self, rot=None, tra=None, sca=None, + force_update=False): + if force_update and tra is None: + tra = self._translation + rot_changed = False + if rot is not None: + rot_changed = True + self._last_rotation = self._rotation.copy() + self._rotation = rot + tra_changed = False + if rot_changed or tra is not None: + if tra is None: + tra = self._translation + tra_changed = True + self._last_translation = self._translation.copy() + self._translation = tra + self._head_mri_t = rotation(*self._rotation).T + self._head_mri_t[:3, 3] = \ + -np.dot(self._head_mri_t[:3, :3], tra) + self._transformed_dig_hpi = \ + apply_trans(self._head_mri_t, self._dig_dict['hpi']) + self._transformed_dig_eeg = \ + apply_trans( + self._head_mri_t, self._dig_dict['dig_ch_pos_location']) + self._transformed_dig_extra = \ + apply_trans(self._head_mri_t, + self._filtered_extra_points) + self._transformed_orig_dig_extra = \ + apply_trans(self._head_mri_t, self._dig_dict['hsp']) + self._mri_head_t = rotation(*self._rotation) + self._mri_head_t[:3, 3] = np.array(tra) + if tra_changed or sca is not None: + if sca is None: + sca = self._scale + self._last_scale = self._scale.copy() + self._scale = sca + self._mri_trans = np.eye(4) + self._mri_trans[:, :3] *= sca + self._transformed_high_res_mri_points = \ + apply_trans(self._mri_trans, + self._processed_high_res_mri_points) + self._update_nearest_calc() + + if tra_changed: + self._nearest_transformed_high_res_mri_idx_orig_hsp = \ + self._nearest_calc.query(self._transformed_orig_dig_extra)[1] + self._nearest_transformed_high_res_mri_idx_hpi = \ + self._nearest_calc.query(self._transformed_dig_hpi)[1] + self._nearest_transformed_high_res_mri_idx_eeg = \ + self._nearest_calc.query(self._transformed_dig_eeg)[1] + self._nearest_transformed_high_res_mri_idx_rpa = \ + self._nearest_calc.query( + apply_trans(self._head_mri_t, self._dig_dict['rpa']))[1] + self._nearest_transformed_high_res_mri_idx_nasion = \ + self._nearest_calc.query( + apply_trans(self._head_mri_t, self._dig_dict['nasion']))[1] + self._nearest_transformed_high_res_mri_idx_lpa = \ + self._nearest_calc.query( + apply_trans(self._head_mri_t, self._dig_dict['lpa']))[1] + + def set_scale_mode(self, scale_mode): + """Select how to fit the scale parameters. + + Parameters + ---------- + scale_mode : None | str + The scale mode can be 'uniform', '3-axis' or disabled. + Defaults to None. + + * 'uniform': 1 scale factor is recovered. + * '3-axis': 3 scale factors are recovered. + * None: do not scale the MRI. + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._scale_mode = scale_mode + return self + + def set_grow_hair(self, value): + """Compensate for hair on the digitizer head shape. + + Parameters + ---------- + value : float + Move the back of the MRI head outwards by ``value`` (mm). + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._grow_hair = value + self._update_params(force_update=True) + return self + + def set_rotation(self, rot): + """Set the rotation parameter. + + Parameters + ---------- + rot : array, shape (3,) + The rotation parameter (in radians). + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._update_params(rot=np.array(rot)) + return self + + def set_translation(self, tra): + """Set the translation parameter. + + Parameters + ---------- + tra : array, shape (3,) + The translation parameter (in m.). + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._update_params(tra=np.array(tra)) + return self + + def set_scale(self, sca): + """Set the scale parameter. + + Parameters + ---------- + sca : array, shape (3,) + The scale parameter. + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._update_params(sca=np.array(sca)) + return self + + def _update_nearest_calc(self): + self._nearest_calc = _DistanceQuery( + self._processed_high_res_mri_points * self._scale) + + @property + def _filtered_extra_points(self): + if self._extra_points_filter is None: + return self._dig_dict['hsp'] + else: + return self._dig_dict['hsp'][self._extra_points_filter] + + @property + def _parameters(self): + return np.concatenate((self._rotation, self._translation, self._scale)) + + @property + def _last_parameters(self): + return np.concatenate((self._last_rotation, + self._last_translation, self._last_scale)) + + @property + def _changes(self): + move = np.linalg.norm(self._last_translation - self._translation) * 1e3 + angle = np.rad2deg(_angle_between_quats( + rot_to_quat(rotation(*self._rotation)[:3, :3]), + rot_to_quat(rotation(*self._last_rotation)[:3, :3]))) + percs = 100 * (self._scale - self._last_scale) / self._last_scale + return move, angle, percs + + @property + def _nearest_transformed_high_res_mri_idx_hsp(self): + return self._nearest_calc.query( + apply_trans(self._head_mri_t, self._filtered_extra_points))[1] + + @property + def _has_hsp_data(self): + return (self._has_mri_data and + len(self._nearest_transformed_high_res_mri_idx_hsp) > 0) + + @property + def _has_hpi_data(self): + return (self._has_mri_data and + len(self._nearest_transformed_high_res_mri_idx_hpi) > 0) + + @property + def _has_eeg_data(self): + return (self._has_mri_data and + len(self._nearest_transformed_high_res_mri_idx_eeg) > 0) + + @property + def _has_lpa_data(self): + mri_point = self.fiducials.dig[_map_fid_name_to_idx('lpa')] + assert mri_point['ident'] == FIFF.FIFFV_POINT_LPA + has_mri_data = np.any(mri_point['r']) + has_head_data = np.any(self._dig_dict['lpa']) + return has_mri_data and has_head_data + + @property + def _has_nasion_data(self): + mri_point = self.fiducials.dig[_map_fid_name_to_idx('nasion')] + assert mri_point['ident'] == FIFF.FIFFV_POINT_NASION + has_mri_data = np.any(mri_point['r']) + has_head_data = np.any(self._dig_dict['nasion']) + return has_mri_data and has_head_data + + @property + def _has_rpa_data(self): + mri_point = self.fiducials.dig[_map_fid_name_to_idx('rpa')] + assert mri_point['ident'] == FIFF.FIFFV_POINT_RPA + has_mri_data = np.any(mri_point['r']) + has_head_data = np.any(self._dig_dict['rpa']) + return has_mri_data and has_head_data + + @property + def _processed_high_res_mri_points(self): + return self._get_processed_mri_points('high') + + @property + def _processed_low_res_mri_points(self): + return self._get_processed_mri_points('low') + + def _get_processed_mri_points(self, res): + bem = self._bem_low_res if res == 'low' else self._bem_high_res + points = bem['rr'].copy() + if self._grow_hair: + assert len(bem['nn']) # should be guaranteed by _read_surface + scaled_hair_dist = (1e-3 * self._grow_hair / + np.array(self._scale)) + hair = points[:, 2] > points[:, 1] + points[hair] += bem['nn'][hair] * scaled_hair_dist + return points + + @property + def _has_mri_data(self): + return len(self._transformed_high_res_mri_points) > 0 + + @property + def _has_dig_data(self): + return (self._has_mri_data and + len(self._nearest_transformed_high_res_mri_idx_hsp) > 0) + + @property + def _orig_hsp_point_distance(self): + mri_points = self._transformed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_orig_hsp] + hsp_points = self._transformed_orig_dig_extra + return np.linalg.norm(mri_points - hsp_points, axis=-1) + + def _log_dig_mri_distance(self, prefix): + errs_nearest = self.compute_dig_mri_distances() + logger.info(f'{prefix} median distance: ' + f'{np.median(errs_nearest * 1000):6.2f} mm') + + @property + def scale(self): + """Get the current scale factor. + + Returns + ------- + scale : ndarray, shape (3,) + The scale factors. + """ + return self._scale.copy() + + @verbose + def fit_fiducials(self, lpa_weight=1., nasion_weight=10., rpa_weight=1., + verbose=None): + """Find rotation and translation to fit all 3 fiducials. + + Parameters + ---------- + lpa_weight : float + Relative weight for LPA. The default value is 1. + nasion_weight : float + Relative weight for nasion. The default value is 10. + rpa_weight : float + Relative weight for RPA. The default value is 1. + %(verbose)s + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + logger.info('Aligning using fiducials') + self._log_dig_mri_distance('Start') + n_scale_params = self._n_scale_params + if n_scale_params == 3: + # enfore 1 even for 3-axis here (3 points is not enough) + logger.info("Enforcing 1 scaling parameter for fit " + "with fiducials.") + n_scale_params = 1 + self._lpa_weight = lpa_weight + self._nasion_weight = nasion_weight + self._rpa_weight = rpa_weight + + head_pts = np.vstack((self._dig_dict['lpa'], + self._dig_dict['nasion'], + self._dig_dict['rpa'])) + mri_pts = np.vstack( + (self.fiducials.dig[0]['r'], # LPA + self.fiducials.dig[1]['r'], # Nasion + self.fiducials.dig[2]['r']) # RPA + ) + weights = [lpa_weight, nasion_weight, rpa_weight] + + if n_scale_params == 0: + mri_pts *= self._scale # not done in fit_matched_points + x0 = self._parameters + x0 = x0[:6 + n_scale_params] + est = fit_matched_points(mri_pts, head_pts, x0=x0, out='params', + scale=n_scale_params, weights=weights) + if n_scale_params == 0: + self._update_params(rot=est[:3], tra=est[3:6]) + else: + assert est.size == 7 + est = np.concatenate([est, [est[-1]] * 2]) + assert est.size == 9 + self._update_params(rot=est[:3], tra=est[3:6], sca=est[6:9]) + self._log_dig_mri_distance('End ') + return self + + def _setup_icp(self, n_scale_params): + head_pts = list() + mri_pts = list() + weights = list() + if self._has_dig_data and self._hsp_weight > 0: # should be true + head_pts.append(self._filtered_extra_points) + mri_pts.append(self._processed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hsp]) + weights.append(np.full(len(head_pts[-1]), self._hsp_weight)) + for key in ('lpa', 'nasion', 'rpa'): + if getattr(self, f'_has_{key}_data'): + head_pts.append(self._dig_dict[key]) + if self._icp_fid_match == 'matched': + idx = _map_fid_name_to_idx(name=key) + p = self.fiducials.dig[idx]['r'].reshape(1, -1) + mri_pts.append(p) + else: + assert self._icp_fid_match == 'nearest' + mri_pts.append(self._processed_high_res_mri_points[ + getattr( + self, + '_nearest_transformed_high_res_mri_idx_%s' + % (key,))]) + weights.append(np.full(len(mri_pts[-1]), + getattr(self, '_%s_weight' % key))) + if self._has_eeg_data and self._eeg_weight > 0: + head_pts.append(self._dig_dict['dig_ch_pos_location']) + mri_pts.append(self._processed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_eeg]) + weights.append(np.full(len(mri_pts[-1]), self._eeg_weight)) + if self._has_hpi_data and self._hpi_weight > 0: + head_pts.append(self._dig_dict['hpi']) + mri_pts.append(self._processed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hpi]) + weights.append(np.full(len(mri_pts[-1]), self._hpi_weight)) + head_pts = np.concatenate(head_pts) + mri_pts = np.concatenate(mri_pts) + weights = np.concatenate(weights) + if n_scale_params == 0: + mri_pts *= self._scale # not done in fit_matched_points + return head_pts, mri_pts, weights + + def set_fid_match(self, match): + """Set the strategy for fitting anatomical landmark (fiducial) points. + + Parameters + ---------- + match : 'nearest' | 'matched' + Alignment strategy; ``'nearest'`` aligns anatomical landmarks to + any point on the head surface; ``'matched'`` aligns to the fiducial + points in the MRI. + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + _check_option('match', match, self._icp_fid_matches) + self._icp_fid_match = match + return self + + @verbose + def fit_icp(self, n_iterations=20, lpa_weight=1., nasion_weight=10., + rpa_weight=1., hsp_weight=1., eeg_weight=1., hpi_weight=1., + callback=None, verbose=None): + """Find MRI scaling, translation, and rotation to match HSP. + + Parameters + ---------- + n_iterations : int + Maximum number of iterations. + lpa_weight : float + Relative weight for LPA. The default value is 1. + nasion_weight : float + Relative weight for nasion. The default value is 10. + rpa_weight : float + Relative weight for RPA. The default value is 1. + hsp_weight : float + Relative weight for HSP. The default value is 1. + eeg_weight : float + Relative weight for EEG. The default value is 1. + hpi_weight : float + Relative weight for HPI. The default value is 1. + callback : callable | None + A function to call on each iteration. Useful for status message + updates. It will be passed the keyword arguments ``iteration`` + and ``n_iterations``. + %(verbose)s + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + logger.info('Aligning using ICP') + self._log_dig_mri_distance('Start ') + n_scale_params = self._n_scale_params + self._lpa_weight = lpa_weight + self._nasion_weight = nasion_weight + self._rpa_weight = rpa_weight + self._hsp_weight = hsp_weight + self._eeg_weight = eeg_weight + self._hsp_weight = hpi_weight + + # Initial guess (current state) + est = self._parameters + est = est[:[6, 7, None, 9][n_scale_params]] + + # Do the fits, assigning and evaluating at each step + for iteration in range(n_iterations): + head_pts, mri_pts, weights = self._setup_icp(n_scale_params) + est = fit_matched_points(mri_pts, head_pts, scale=n_scale_params, + x0=est, out='params', weights=weights) + if n_scale_params == 0: + self._update_params(rot=est[:3], tra=est[3:6]) + elif n_scale_params == 1: + est = np.array(list(est) + [est[-1]] * 2) + self._update_params(rot=est[:3], tra=est[3:6], sca=est[6:9]) + else: + self._update_params(rot=est[:3], tra=est[3:6], sca=est[6:9]) + angle, move, scale = self._changes + self._log_dig_mri_distance(f' ICP {iteration + 1:2d} ') + if callback is not None: + callback(iteration, n_iterations) + if angle <= self._icp_angle and move <= self._icp_distance and \ + all(scale <= self._icp_scale): + break + self._log_dig_mri_distance('End ') + return self + + @property + def _n_scale_params(self): + if self._scale_mode is None: + n_scale_params = 0 + elif self._scale_mode == 'uniform': + n_scale_params = 1 + else: + n_scale_params = 3 + return n_scale_params + + def omit_head_shape_points(self, distance): + """Exclude head shape points that are far away from the MRI head. + + Parameters + ---------- + distance : float + Exclude all points that are further away from the MRI head than + this distance (in m.). A value of distance <= 0 excludes nothing. + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + distance = float(distance) + if distance <= 0: + return + + # find the new filter + mask = self._orig_hsp_point_distance <= distance + n_excluded = np.sum(~mask) + logger.info("Coregistration: Excluding %i head shape points with " + "distance >= %.3f m.", n_excluded, distance) + # set the filter + self._extra_points_filter = mask + self._update_params(force_update=True) + return self + + def compute_dig_mri_distances(self): + """Compute distance between head shape points and MRI skin surface. + + Returns + ------- + dist : array, shape (n_points,) + The distance of the head shape points to the MRI skin surface. + + See Also + -------- + mne.dig_mri_distances + """ + # we don't use `dig_mri_distances` here because it should be much + # faster to use our already-determined nearest points + hsp_points, mri_points, _ = self._setup_icp(0) + hsp_points = apply_trans(self._head_mri_t, hsp_points) + return np.linalg.norm(mri_points - hsp_points, axis=-1) + + @property + def trans(self): + """The head->mri :class:`~mne.transforms.Transform`.""" + return Transform('head', 'mri', self._head_mri_t) + + def reset(self): + """Reset all the parameters affecting the coregistration. + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._grow_hair = 0. + self.set_rotation(self._default_parameters[:3]) + self.set_translation(self._default_parameters[3:6]) + self.set_scale(self._default_parameters[6:9]) + self._extra_points_filter = None + self._update_nearest_calc() + return self + + def _get_fiducials_distance(self): + distance = dict() + for key in ('lpa', 'nasion', 'rpa'): + idx = _map_fid_name_to_idx(name=key) + fid = self.fiducials.dig[idx]['r'].reshape(1, -1) + + transformed_mri = apply_trans(self._mri_trans, fid) + transformed_hsp = apply_trans( + self._head_mri_t, self._dig_dict[key]) + distance[key] = np.linalg.norm( + np.ravel(transformed_mri - transformed_hsp)) + return np.array(list(distance.values())) * 1e3 + + def _get_fiducials_distance_str(self): + dists = self._get_fiducials_distance() + return f"Fiducials: {dists[0]:.1f}, {dists[1]:.1f}, {dists[2]:.1f} mm" + + def _get_point_distance(self): + mri_points = list() + hsp_points = list() + if self._hsp_weight > 0 and self._has_hsp_data: + mri_points.append(self._transformed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hsp]) + hsp_points.append(self._transformed_dig_extra) + assert len(mri_points[-1]) == len(hsp_points[-1]) + if self._eeg_weight > 0 and self._has_eeg_data: + mri_points.append(self._transformed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_eeg]) + hsp_points.append(self._transformed_dig_eeg) + assert len(mri_points[-1]) == len(hsp_points[-1]) + if self._hpi_weight > 0 and self._has_hpi_data: + mri_points.append(self._transformed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hpi]) + hsp_points.append(self._transformed_dig_hpi) + assert len(mri_points[-1]) == len(hsp_points[-1]) + if all(len(h) == 0 for h in hsp_points): + return None + mri_points = np.concatenate(mri_points) + hsp_points = np.concatenate(hsp_points) + return np.linalg.norm(mri_points - hsp_points, axis=-1) + + def _get_point_distance_str(self): + point_distance = self._get_point_distance() + if point_distance is None: + return "" + dists = 1e3 * point_distance + av_dist = np.mean(dists) + std_dist = np.std(dists) + kinds = [kind for kind, check in + (('HSP', self._hsp_weight > 0 and self._has_hsp_data), + ('EEG', self._eeg_weight > 0 and self._has_eeg_data), + ('HPI', self._hpi_weight > 0 and self._has_hpi_data)) + if check] + kinds = '+'.join(kinds) + return f"{len(dists)} {kinds}: {av_dist:.1f} ± {std_dist:.1f} mm" diff --git a/python/libs/mne/cov.py b/python/libs/mne/cov.py new file mode 100644 index 0000000..78706fa --- /dev/null +++ b/python/libs/mne/cov.py @@ -0,0 +1,2087 @@ +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Denis A. Engemann +# +# License: BSD-3-Clause + +from copy import deepcopy +import itertools as itt +from math import log + +import numpy as np + +from .defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT, DEFAULTS +from .io.write import start_and_end_file +from .io.proj import (make_projector, _proj_equal, activate_proj, + _check_projs, _needs_eeg_average_ref_proj, + _has_eeg_average_ref_proj, _read_proj, _write_proj) +from .io import fiff_open, RawArray + +from .io.pick import (pick_types, pick_channels_cov, pick_channels, pick_info, + _picks_by_type, _pick_data_channels, _picks_to_idx, + _DATA_CH_TYPES_SPLIT) + +from .io.constants import FIFF +from .io.meas_info import _read_bad_channels, create_info +from .io.tag import find_tag +from .io.tree import dir_tree_find +from .io.write import (start_block, end_block, write_int, write_name_list, + write_double, write_float_matrix, write_string) +from .defaults import _handle_default +from .epochs import Epochs +from .event import make_fixed_length_events +from .evoked import EvokedArray +from .rank import compute_rank +from .utils import (check_fname, logger, verbose, check_version, _time_mask, + warn, copy_function_doc_to_method_doc, _pl, + _undo_scaling_cov, _scaled_array, _validate_type, + _check_option, eigh, fill_doc, _on_missing, + _check_on_missing, _check_fname, _VerboseDep) +from . import viz + +from .fixes import (BaseEstimator, EmpiricalCovariance, _logdet, + empirical_covariance, log_likelihood) + + +def _check_covs_algebra(cov1, cov2): + if cov1.ch_names != cov2.ch_names: + raise ValueError('Both Covariance do not have the same list of ' + 'channels.') + projs1 = [str(c) for c in cov1['projs']] + projs2 = [str(c) for c in cov1['projs']] + if projs1 != projs2: + raise ValueError('Both Covariance do not have the same list of ' + 'SSP projections.') + + +def _get_tslice(epochs, tmin, tmax): + """Get the slice.""" + mask = _time_mask(epochs.times, tmin, tmax, sfreq=epochs.info['sfreq']) + tstart = np.where(mask)[0][0] if tmin is not None else None + tend = np.where(mask)[0][-1] + 1 if tmax is not None else None + tslice = slice(tstart, tend, None) + return tslice + + +@fill_doc +class Covariance(dict, _VerboseDep): + """Noise covariance matrix. + + .. warning:: This class should not be instantiated directly, but + instead should be created using a covariance reading or + computation function. + + Parameters + ---------- + data : array-like + The data. + names : list of str + Channel names. + bads : list of str + Bad channels. + projs : list + Projection vectors. + nfree : int + Degrees of freedom. + eig : array-like | None + Eigenvalues. + eigvec : array-like | None + Eigenvectors. + method : str | None + The method used to compute the covariance. + loglik : float + The log likelihood. + %(verbose)s + + Attributes + ---------- + data : array of shape (n_channels, n_channels) + The covariance. + ch_names : list of str + List of channels' names. + nfree : int + Number of degrees of freedom i.e. number of time points used. + dim : int + The number of channels ``n_channels``. + + See Also + -------- + compute_covariance + compute_raw_covariance + make_ad_hoc_cov + read_cov + """ + + @verbose + def __init__(self, data, names, bads, projs, nfree, eig=None, eigvec=None, + method=None, loglik=None, *, verbose=None): + """Init of covariance.""" + diag = (data.ndim == 1) + projs = _check_projs(projs) + self.update(data=data, dim=len(data), names=names, bads=bads, + nfree=nfree, eig=eig, eigvec=eigvec, diag=diag, + projs=projs, kind=FIFF.FIFFV_MNE_NOISE_COV) + if method is not None: + self['method'] = method + if loglik is not None: + self['loglik'] = loglik + + @property + def data(self): + """Numpy array of Noise covariance matrix.""" + return self['data'] + + @property + def ch_names(self): + """Channel names.""" + return self['names'] + + @property + def nfree(self): + """Number of degrees of freedom.""" + return self['nfree'] + + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Save covariance matrix in a FIF file. + + Parameters + ---------- + fname : str + Output filename. + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + """ + check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz', + '_cov.fif', '_cov.fif.gz')) + fname = _check_fname(fname=fname, overwrite=overwrite) + with start_and_end_file(fname) as fid: + _write_cov(fid, self) + + def copy(self): + """Copy the Covariance object. + + Returns + ------- + cov : instance of Covariance + The copied object. + """ + return deepcopy(self) + + def as_diag(self): + """Set covariance to be processed as being diagonal. + + Returns + ------- + cov : dict + The covariance. + + Notes + ----- + This function allows creation of inverse operators + equivalent to using the old "--diagnoise" mne option. + + This function operates in place. + """ + if self['diag']: + return self + self['diag'] = True + self['data'] = np.diag(self['data']) + self['eig'] = None + self['eigvec'] = None + return self + + def _as_square(self): + # This is a hack but it works because np.diag() behaves nicely + if self['diag']: + self['diag'] = False + self.as_diag() + self['diag'] = False + return self + + def _get_square(self): + if self['diag'] != (self.data.ndim == 1): + raise RuntimeError( + 'Covariance attributes inconsistent, got data with ' + 'dimensionality %d but diag=%s' + % (self.data.ndim, self['diag'])) + return np.diag(self.data) if self['diag'] else self.data.copy() + + def __repr__(self): # noqa: D105 + if self.data.ndim == 2: + s = 'size : %s x %s' % self.data.shape + else: # ndim == 1 + s = 'diagonal : %s' % self.data.size + s += ", n_samples : %s" % self.nfree + s += ", data : %s" % self.data + return "" % s + + def __add__(self, cov): + """Add Covariance taking into account number of degrees of freedom.""" + _check_covs_algebra(self, cov) + this_cov = cov.copy() + this_cov['data'] = (((this_cov['data'] * this_cov['nfree']) + + (self['data'] * self['nfree'])) / + (self['nfree'] + this_cov['nfree'])) + this_cov['nfree'] += self['nfree'] + + this_cov['bads'] = list(set(this_cov['bads']).union(self['bads'])) + + return this_cov + + def __iadd__(self, cov): + """Add Covariance taking into account number of degrees of freedom.""" + _check_covs_algebra(self, cov) + self['data'][:] = (((self['data'] * self['nfree']) + + (cov['data'] * cov['nfree'])) / + (self['nfree'] + cov['nfree'])) + self['nfree'] += cov['nfree'] + + self['bads'] = list(set(self['bads']).union(cov['bads'])) + + return self + + @verbose + @copy_function_doc_to_method_doc(viz.misc.plot_cov) + def plot(self, info, exclude=[], colorbar=True, proj=False, show_svd=True, + show=True, verbose=None): + return viz.misc.plot_cov(self, info, exclude, colorbar, proj, show_svd, + show, verbose) + + @verbose + def plot_topomap(self, info, ch_type=None, vmin=None, + vmax=None, cmap=None, sensors=True, colorbar=True, + scalings=None, units=None, res=64, + size=1, cbar_fmt="%3.1f", + proj=False, show=True, show_names=False, title=None, + mask=None, mask_params=None, outlines='head', + contours=6, image_interp='bilinear', + axes=None, extrapolate=_EXTRAPOLATE_DEFAULT, sphere=None, + border=_BORDER_DEFAULT, + noise_cov=None, verbose=None): + """Plot a topomap of the covariance diagonal. + + Parameters + ---------- + %(info_not_none)s + %(ch_type_topomap)s + %(vmin_vmax_topomap)s + %(cmap_topomap)s + %(sensors_topomap)s + %(colorbar_topomap)s + %(scalings_topomap)s + %(units_topomap)s + %(res_topomap)s + %(size_topomap)s + %(cbar_fmt_topomap)s + %(proj_plot)s + %(show)s + %(show_names_topomap)s + %(title_none)s + %(mask_topomap)s + %(mask_params_topomap)s + %(outlines_topomap)s + %(contours_topomap)s + %(image_interp_topomap)s + %(axes_topomap)s + %(extrapolate_topomap)s + %(sphere_topomap_auto)s + %(border_topomap)s + noise_cov : instance of Covariance | None + If not None, whiten the instance with ``noise_cov`` before + plotting. + %(verbose)s + + Returns + ------- + fig : instance of Figure + The matplotlib figure. + + Notes + ----- + .. versionadded:: 0.21 + """ + from .viz.misc import _index_info_cov + info, C, _, _ = _index_info_cov(info, self, exclude=()) + evoked = EvokedArray(np.diag(C)[:, np.newaxis], info) + if noise_cov is not None: + # need to left and right multiply whitener, which for the diagonal + # entries is the same as multiplying twice + evoked = whiten_evoked(whiten_evoked(evoked, noise_cov), noise_cov) + if units is None: + units = 'AU' + if scalings is None: + scalings = 1. + if units is None: + units = {k: f'({v})²' for k, v in DEFAULTS['units'].items()} + if scalings is None: + scalings = {k: v * v for k, v in DEFAULTS['scalings'].items()} + return evoked.plot_topomap( + times=[0], ch_type=ch_type, vmin=vmin, vmax=vmax, cmap=cmap, + sensors=sensors, colorbar=colorbar, scalings=scalings, + units=units, res=res, size=size, cbar_fmt=cbar_fmt, + proj=proj, show=show, show_names=show_names, title=title, + mask=mask, mask_params=mask_params, outlines=outlines, + contours=contours, image_interp=image_interp, axes=axes, + extrapolate=extrapolate, sphere=sphere, border=border, + time_format='') + + def pick_channels(self, ch_names, ordered=False): + """Pick channels from this covariance matrix. + + Parameters + ---------- + ch_names : list of str + List of channels to keep. All other channels are dropped. + ordered : bool + If True (default False), ensure that the order of the channels + matches the order of ``ch_names``. + + Returns + ------- + cov : instance of Covariance. + The modified covariance matrix. + + Notes + ----- + Operates in-place. + + .. versionadded:: 0.20.0 + """ + return pick_channels_cov(self, ch_names, exclude=[], ordered=ordered, + copy=False) + + +############################################################################### +# IO + +@verbose +def read_cov(fname, verbose=None): + """Read a noise covariance from a FIF file. + + Parameters + ---------- + fname : str + The name of file containing the covariance matrix. It should end with + -cov.fif or -cov.fif.gz. + %(verbose)s + + Returns + ------- + cov : Covariance + The noise covariance matrix. + + See Also + -------- + write_cov, compute_covariance, compute_raw_covariance + """ + check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz', + '_cov.fif', '_cov.fif.gz')) + fname = _check_fname(fname=fname, must_exist=True, overwrite='read') + f, tree = fiff_open(fname)[:2] + with f as fid: + return Covariance(**_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV, + limited=True)) + + +############################################################################### +# Estimate from data + +@verbose +def make_ad_hoc_cov(info, std=None, verbose=None): + """Create an ad hoc noise covariance. + + Parameters + ---------- + %(info_not_none)s + std : dict of float | None + Standard_deviation of the diagonal elements. If dict, keys should be + ``'grad'`` for gradiometers, ``'mag'`` for magnetometers and ``'eeg'`` + for EEG channels. If None, default values will be used (see Notes). + %(verbose)s + + Returns + ------- + cov : instance of Covariance + The ad hoc diagonal noise covariance for the M/EEG data channels. + + Notes + ----- + The default noise values are 5 fT/cm, 20 fT, and 0.2 µV for gradiometers, + magnetometers, and EEG channels respectively. + + .. versionadded:: 0.9.0 + """ + picks = pick_types(info, meg=True, eeg=True, exclude=()) + std = _handle_default('noise_std', std) + + data = np.zeros(len(picks)) + for meg, eeg, val in zip(('grad', 'mag', False), (False, False, True), + (std['grad'], std['mag'], std['eeg'])): + these_picks = pick_types(info, meg=meg, eeg=eeg) + data[np.searchsorted(picks, these_picks)] = val * val + ch_names = [info['ch_names'][pick] for pick in picks] + return Covariance(data, ch_names, info['bads'], info['projs'], nfree=0) + + +def _check_n_samples(n_samples, n_chan): + """Check to see if there are enough samples for reliable cov calc.""" + n_samples_min = 10 * (n_chan + 1) // 2 + if n_samples <= 0: + raise ValueError('No samples found to compute the covariance matrix') + if n_samples < n_samples_min: + warn('Too few samples (required : %d got : %d), covariance ' + 'estimate may be unreliable' % (n_samples_min, n_samples)) + + +@verbose +def compute_raw_covariance(raw, tmin=0, tmax=None, tstep=0.2, reject=None, + flat=None, picks=None, method='empirical', + method_params=None, cv=3, scalings=None, n_jobs=1, + return_estimators=False, reject_by_annotation=True, + rank=None, verbose=None): + """Estimate noise covariance matrix from a continuous segment of raw data. + + It is typically useful to estimate a noise covariance from empty room + data or time intervals before starting the stimulation. + + .. note:: To estimate the noise covariance from epoched data, use + :func:`mne.compute_covariance` instead. + + Parameters + ---------- + raw : instance of Raw + Raw data. + tmin : float + Beginning of time interval in seconds. Defaults to 0. + tmax : float | None (default None) + End of time interval in seconds. If None (default), use the end of the + recording. + tstep : float (default 0.2) + Length of data chunks for artifact rejection in seconds. + Can also be None to use a single epoch of (tmax - tmin) + duration. This can use a lot of memory for large ``Raw`` + instances. + reject : dict | None (default None) + Rejection parameters based on peak-to-peak amplitude. + Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. + If reject is None then no rejection is done. Example:: + + reject = dict(grad=4000e-13, # T / m (gradiometers) + mag=4e-12, # T (magnetometers) + eeg=40e-6, # V (EEG channels) + eog=250e-6 # V (EOG channels) + ) + + flat : dict | None (default None) + Rejection parameters based on flatness of signal. + Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values + are floats that set the minimum acceptable peak-to-peak amplitude. + If flat is None then no rejection is done. + %(picks_good_data_noref)s + method : str | list | None (default 'empirical') + The method used for covariance estimation. + See :func:`mne.compute_covariance`. + + .. versionadded:: 0.12 + method_params : dict | None (default None) + Additional parameters to the estimation procedure. + See :func:`mne.compute_covariance`. + + .. versionadded:: 0.12 + cv : int | sklearn.model_selection object (default 3) + The cross validation method. Defaults to 3, which will + internally trigger by default :class:`sklearn.model_selection.KFold` + with 3 splits. + + .. versionadded:: 0.12 + scalings : dict | None (default None) + Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``. + These defaults will scale magnetometers and gradiometers + at the same unit. + + .. versionadded:: 0.12 + %(n_jobs)s + + .. versionadded:: 0.12 + return_estimators : bool (default False) + Whether to return all estimators or the best. Only considered if + method equals 'auto' or is a list of str. Defaults to False. + + .. versionadded:: 0.12 + %(reject_by_annotation_epochs)s + + .. versionadded:: 0.14 + %(rank_none)s + + .. versionadded:: 0.17 + + .. versionadded:: 0.18 + Support for 'info' mode. + %(verbose)s + + Returns + ------- + cov : instance of Covariance | list + The computed covariance. If method equals 'auto' or is a list of str + and return_estimators equals True, a list of covariance estimators is + returned (sorted by log-likelihood, from high to low, i.e. from best + to worst). + + See Also + -------- + compute_covariance : Estimate noise covariance matrix from epoched data. + + Notes + ----- + This function will: + + 1. Partition the data into evenly spaced, equal-length epochs. + 2. Load them into memory. + 3. Subtract the mean across all time points and epochs for each channel. + 4. Process the :class:`Epochs` by :func:`compute_covariance`. + + This will produce a slightly different result compared to using + :func:`make_fixed_length_events`, :class:`Epochs`, and + :func:`compute_covariance` directly, since that would (with the recommended + baseline correction) subtract the mean across time *for each epoch* + (instead of across epochs) for each channel. + """ + tmin = 0. if tmin is None else float(tmin) + dt = 1. / raw.info['sfreq'] + tmax = raw.times[-1] + dt if tmax is None else float(tmax) + tstep = tmax - tmin if tstep is None else float(tstep) + tstep_m1 = tstep - dt # inclusive! + events = make_fixed_length_events(raw, 1, tmin, tmax, tstep) + logger.info('Using up to %s segment%s' % (len(events), _pl(events))) + + # don't exclude any bad channels, inverses expect all channels present + if picks is None: + # Need to include all channels e.g. if eog rejection is to be used + picks = np.arange(raw.info['nchan']) + pick_mask = np.in1d( + picks, _pick_data_channels(raw.info, with_ref_meg=False)) + else: + pick_mask = slice(None) + picks = _picks_to_idx(raw.info, picks) + epochs = Epochs(raw, events, 1, 0, tstep_m1, baseline=None, + picks=picks, reject=reject, flat=flat, verbose=False, + preload=False, proj=False, + reject_by_annotation=reject_by_annotation) + if method is None: + method = 'empirical' + if isinstance(method, str) and method == 'empirical': + # potentially *much* more memory efficient to do it the iterative way + picks = picks[pick_mask] + data = 0 + n_samples = 0 + mu = 0 + # Read data in chunks + for raw_segment in epochs: + raw_segment = raw_segment[pick_mask] + mu += raw_segment.sum(axis=1) + data += np.dot(raw_segment, raw_segment.T) + n_samples += raw_segment.shape[1] + _check_n_samples(n_samples, len(picks)) + data -= mu[:, None] * (mu[None, :] / n_samples) + data /= (n_samples - 1.0) + logger.info("Number of samples used : %d" % n_samples) + logger.info('[done]') + ch_names = [raw.info['ch_names'][k] for k in picks] + bads = [b for b in raw.info['bads'] if b in ch_names] + return Covariance(data, ch_names, bads, raw.info['projs'], + nfree=n_samples - 1) + del picks, pick_mask + + # This makes it equivalent to what we used to do (and do above for + # empirical mode), treating all epochs as if they were a single long one + epochs.load_data() + ch_means = epochs._data.mean(axis=0).mean(axis=1) + epochs._data -= ch_means[np.newaxis, :, np.newaxis] + # fake this value so there are no complaints from compute_covariance + epochs.baseline = (None, None) + return compute_covariance(epochs, keep_sample_mean=True, method=method, + method_params=method_params, cv=cv, + scalings=scalings, n_jobs=n_jobs, + return_estimators=return_estimators, + rank=rank) + + +def _check_method_params(method, method_params, keep_sample_mean=True, + name='method', allow_auto=True, rank=None): + """Check that method and method_params are usable.""" + accepted_methods = ('auto', 'empirical', 'diagonal_fixed', 'ledoit_wolf', + 'oas', 'shrunk', 'pca', 'factor_analysis', 'shrinkage') + _method_params = { + 'empirical': {'store_precision': False, 'assume_centered': True}, + 'diagonal_fixed': {'store_precision': False, 'assume_centered': True}, + 'ledoit_wolf': {'store_precision': False, 'assume_centered': True}, + 'oas': {'store_precision': False, 'assume_centered': True}, + 'shrinkage': {'shrinkage': 0.1, 'store_precision': False, + 'assume_centered': True}, + 'shrunk': {'shrinkage': np.logspace(-4, 0, 30), + 'store_precision': False, 'assume_centered': True}, + 'pca': {'iter_n_components': None}, + 'factor_analysis': {'iter_n_components': None} + } + + for ch_type in _DATA_CH_TYPES_SPLIT: + _method_params['diagonal_fixed'][ch_type] = 0.1 + + if isinstance(method_params, dict): + for key, values in method_params.items(): + if key not in _method_params: + raise ValueError('key (%s) must be "%s"' % + (key, '" or "'.join(_method_params))) + + _method_params[key].update(method_params[key]) + shrinkage = method_params.get('shrinkage', {}).get('shrinkage', 0.1) + if not 0 <= shrinkage <= 1: + raise ValueError('shrinkage must be between 0 and 1, got %s' + % (shrinkage,)) + + was_auto = False + if method is None: + method = ['empirical'] + elif method == 'auto' and allow_auto: + was_auto = True + method = ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis'] + + if not isinstance(method, (list, tuple)): + method = [method] + + if not all(k in accepted_methods for k in method): + raise ValueError( + 'Invalid {name} ({method}). Accepted values (individually or ' + 'in a list) are any of "{accepted_methods}" or None.'.format( + name=name, method=method, accepted_methods=accepted_methods)) + if not (isinstance(rank, str) and rank == 'full'): + if was_auto: + method.pop(method.index('factor_analysis')) + for method_ in method: + if method_ in ('pca', 'factor_analysis'): + raise ValueError('%s can so far only be used with rank="full",' + ' got rank=%r' % (method_, rank)) + if not keep_sample_mean: + if len(method) != 1 or 'empirical' not in method: + raise ValueError('`keep_sample_mean=False` is only supported' + 'with %s="empirical"' % (name,)) + for p, v in _method_params.items(): + if v.get('assume_centered', None) is False: + raise ValueError('`assume_centered` must be True' + ' if `keep_sample_mean` is False') + return method, _method_params + + +@verbose +def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None, + projs=None, method='empirical', method_params=None, + cv=3, scalings=None, n_jobs=1, return_estimators=False, + on_mismatch='raise', rank=None, verbose=None): + """Estimate noise covariance matrix from epochs. + + The noise covariance is typically estimated on pre-stimulus periods + when the stimulus onset is defined from events. + + If the covariance is computed for multiple event types (events + with different IDs), the following two options can be used and combined: + + 1. either an Epochs object for each event type is created and + a list of Epochs is passed to this function. + 2. an Epochs object is created for multiple events and passed + to this function. + + .. note:: To estimate the noise covariance from non-epoched raw data, such + as an empty-room recording, use + :func:`mne.compute_raw_covariance` instead. + + Parameters + ---------- + epochs : instance of Epochs, or list of Epochs + The epochs. + keep_sample_mean : bool (default True) + If False, the average response over epochs is computed for + each event type and subtracted during the covariance + computation. This is useful if the evoked response from a + previous stimulus extends into the baseline period of the next. + Note. This option is only implemented for method='empirical'. + tmin : float | None (default None) + Start time for baseline. If None start at first sample. + tmax : float | None (default None) + End time for baseline. If None end at last sample. + projs : list of Projection | None (default None) + List of projectors to use in covariance calculation, or None + to indicate that the projectors from the epochs should be + inherited. If None, then projectors from all epochs must match. + method : str | list | None (default 'empirical') + The method used for covariance estimation. If 'empirical' (default), + the sample covariance will be computed. A list can be passed to + perform estimates using multiple methods. + If 'auto' or a list of methods, the best estimator will be determined + based on log-likelihood and cross-validation on unseen data as + described in :footcite:`EngemannGramfort2015`. Valid methods are + 'empirical', 'diagonal_fixed', 'shrunk', 'oas', 'ledoit_wolf', + 'factor_analysis', 'shrinkage', and 'pca' (see Notes). If ``'auto'``, + it expands to:: + + ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis'] + + ``'factor_analysis'`` is removed when ``rank`` is not 'full'. + The ``'auto'`` mode is not recommended if there are many + segments of data, since computation can take a long time. + + .. versionadded:: 0.9.0 + method_params : dict | None (default None) + Additional parameters to the estimation procedure. Only considered if + method is not None. Keys must correspond to the value(s) of ``method``. + If None (default), expands to the following (with the addition of + ``{'store_precision': False, 'assume_centered': True} for all methods + except ``'factor_analysis'`` and ``'pca'``):: + + {'diagonal_fixed': {'grad': 0.1, 'mag': 0.1, 'eeg': 0.1, ...}, + 'shrinkage': {'shrikage': 0.1}, + 'shrunk': {'shrinkage': np.logspace(-4, 0, 30)}, + 'pca': {'iter_n_components': None}, + 'factor_analysis': {'iter_n_components': None}} + + cv : int | sklearn.model_selection object (default 3) + The cross validation method. Defaults to 3, which will + internally trigger by default :class:`sklearn.model_selection.KFold` + with 3 splits. + scalings : dict | None (default None) + Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``. + These defaults will scale data to roughly the same order of + magnitude. + %(n_jobs)s + return_estimators : bool (default False) + Whether to return all estimators or the best. Only considered if + method equals 'auto' or is a list of str. Defaults to False. + on_mismatch : str + What to do when the MEG<->Head transformations do not match between + epochs. If "raise" (default) an error is raised, if "warn" then a + warning is emitted, if "ignore" then nothing is printed. Having + mismatched transforms can in some cases lead to unexpected or + unstable results in covariance calculation, e.g. when data + have been processed with Maxwell filtering but not transformed + to the same head position. + %(rank_none)s + + .. versionadded:: 0.17 + + .. versionadded:: 0.18 + Support for 'info' mode. + %(verbose)s + + Returns + ------- + cov : instance of Covariance | list + The computed covariance. If method equals 'auto' or is a list of str + and return_estimators equals True, a list of covariance estimators is + returned (sorted by log-likelihood, from high to low, i.e. from best + to worst). + + See Also + -------- + compute_raw_covariance : Estimate noise covariance from raw data, such as + empty-room recordings. + + Notes + ----- + Baseline correction or sufficient high-passing should be used + when creating the :class:`Epochs` to ensure that the data are zero mean, + otherwise the computed covariance matrix will be inaccurate. + + Valid ``method`` strings are: + + * ``'empirical'`` + The empirical or sample covariance (default) + * ``'diagonal_fixed'`` + A diagonal regularization based on channel types as in + :func:`mne.cov.regularize`. + * ``'shrinkage'`` + Fixed shrinkage. + + .. versionadded:: 0.16 + * ``'ledoit_wolf'`` + The Ledoit-Wolf estimator, which uses an + empirical formula for the optimal shrinkage value + :footcite:`LedoitWolf2004`. + * ``'oas'`` + The OAS estimator :footcite:`ChenEtAl2010`, which uses a different + empricial formula for the optimal shrinkage value. + + .. versionadded:: 0.16 + * ``'shrunk'`` + Like 'ledoit_wolf', but with cross-validation + for optimal alpha. + * ``'pca'`` + Probabilistic PCA with low rank :footcite:`TippingBishop1999`. + * ``'factor_analysis'`` + Factor analysis with low rank :footcite:`Barber2012`. + + ``'ledoit_wolf'`` and ``'pca'`` are similar to ``'shrunk'`` and + ``'factor_analysis'``, respectively, except that they use + cross validation (which is useful when samples are correlated, which + is often the case for M/EEG data). The former two are not included in + the ``'auto'`` mode to avoid redundancy. + + For multiple event types, it is also possible to create a + single :class:`Epochs` object with events obtained using + :func:`mne.merge_events`. However, the resulting covariance matrix + will only be correct if ``keep_sample_mean is True``. + + The covariance can be unstable if the number of samples is small. + In that case it is common to regularize the covariance estimate. + The ``method`` parameter allows to regularize the covariance in an + automated way. It also allows to select between different alternative + estimation algorithms which themselves achieve regularization. + Details are described in :footcite:`EngemannGramfort2015`. + + For more information on the advanced estimation methods, see + :ref:`the sklearn manual `. + + References + ---------- + .. footbibliography:: + """ + # scale to natural unit for best stability with MEG/EEG + scalings = _check_scalings_user(scalings) + method, _method_params = _check_method_params( + method, method_params, keep_sample_mean, rank=rank) + del method_params + + # for multi condition support epochs is required to refer to a list of + # epochs objects + + def _unpack_epochs(epochs): + if len(epochs.event_id) > 1: + epochs = [epochs[k] for k in epochs.event_id] + else: + epochs = [epochs] + return epochs + + if not isinstance(epochs, list): + epochs = _unpack_epochs(epochs) + else: + epochs = sum([_unpack_epochs(epoch) for epoch in epochs], []) + + # check for baseline correction + if any(epochs_t.baseline is None and epochs_t.info['highpass'] < 0.5 and + keep_sample_mean for epochs_t in epochs): + warn('Epochs are not baseline corrected, covariance ' + 'matrix may be inaccurate') + + orig = epochs[0].info['dev_head_t'] + _check_on_missing(on_mismatch, 'on_mismatch') + for ei, epoch in enumerate(epochs): + epoch.info._check_consistency() + if (orig is None) != (epoch.info['dev_head_t'] is None) or \ + (orig is not None and not + np.allclose(orig['trans'], + epoch.info['dev_head_t']['trans'])): + msg = ('MEG<->Head transform mismatch between epochs[0]:\n%s\n\n' + 'and epochs[%s]:\n%s' + % (orig, ei, epoch.info['dev_head_t'])) + _on_missing(on_mismatch, msg, 'on_mismatch') + + bads = epochs[0].info['bads'] + if projs is None: + projs = epochs[0].info['projs'] + # make sure Epochs are compatible + for epochs_t in epochs[1:]: + if epochs_t.proj != epochs[0].proj: + raise ValueError('Epochs must agree on the use of projections') + for proj_a, proj_b in zip(epochs_t.info['projs'], projs): + if not _proj_equal(proj_a, proj_b): + raise ValueError('Epochs must have same projectors') + projs = _check_projs(projs) + ch_names = epochs[0].ch_names + + # make sure Epochs are compatible + for epochs_t in epochs[1:]: + if epochs_t.info['bads'] != bads: + raise ValueError('Epochs must have same bad channels') + if epochs_t.ch_names != ch_names: + raise ValueError('Epochs must have same channel names') + picks_list = _picks_by_type(epochs[0].info) + picks_meeg = np.concatenate([b for _, b in picks_list]) + picks_meeg = np.sort(picks_meeg) + ch_names = [epochs[0].ch_names[k] for k in picks_meeg] + info = epochs[0].info # we will overwrite 'epochs' + + if not keep_sample_mean: + # prepare mean covs + n_epoch_types = len(epochs) + data_mean = [0] * n_epoch_types + n_samples = np.zeros(n_epoch_types, dtype=np.int64) + n_epochs = np.zeros(n_epoch_types, dtype=np.int64) + + for ii, epochs_t in enumerate(epochs): + + tslice = _get_tslice(epochs_t, tmin, tmax) + for e in epochs_t: + e = e[picks_meeg, tslice] + if not keep_sample_mean: + data_mean[ii] += e + n_samples[ii] += e.shape[1] + n_epochs[ii] += 1 + + n_samples_epoch = n_samples // n_epochs + norm_const = np.sum(n_samples_epoch * (n_epochs - 1)) + data_mean = [1.0 / n_epoch * np.dot(mean, mean.T) for n_epoch, mean + in zip(n_epochs, data_mean)] + + info = pick_info(info, picks_meeg) + tslice = _get_tslice(epochs[0], tmin, tmax) + epochs = [ee.get_data(picks=picks_meeg)[..., tslice] for ee in epochs] + picks_meeg = np.arange(len(picks_meeg)) + picks_list = _picks_by_type(info) + + if len(epochs) > 1: + epochs = np.concatenate(epochs, 0) + else: + epochs = epochs[0] + + epochs = np.hstack(epochs) + n_samples_tot = epochs.shape[-1] + _check_n_samples(n_samples_tot, len(picks_meeg)) + + epochs = epochs.T # sklearn | C-order + cov_data = _compute_covariance_auto( + epochs, method=method, method_params=_method_params, info=info, + cv=cv, n_jobs=n_jobs, stop_early=True, picks_list=picks_list, + scalings=scalings, rank=rank) + + if keep_sample_mean is False: + cov = cov_data['empirical']['data'] + # undo scaling + cov *= (n_samples_tot - 1) + # ... apply pre-computed class-wise normalization + for mean_cov in data_mean: + cov -= mean_cov + cov /= norm_const + + covs = list() + for this_method, data in cov_data.items(): + cov = Covariance(data.pop('data'), ch_names, info['bads'], projs, + nfree=n_samples_tot - 1) + + # add extra info + cov.update(method=this_method, **data) + covs.append(cov) + logger.info('Number of samples used : %d' % n_samples_tot) + covs.sort(key=lambda c: c['loglik'], reverse=True) + + if len(covs) > 1: + msg = ['log-likelihood on unseen data (descending order):'] + for c in covs: + msg.append('%s: %0.3f' % (c['method'], c['loglik'])) + logger.info('\n '.join(msg)) + if return_estimators: + out = covs + else: + out = covs[0] + logger.info('selecting best estimator: {}'.format(out['method'])) + else: + out = covs[0] + logger.info('[done]') + + return out + + +def _check_scalings_user(scalings): + if isinstance(scalings, dict): + for k, v in scalings.items(): + _check_option('the keys in `scalings`', k, ['mag', 'grad', 'eeg']) + elif scalings is not None and not isinstance(scalings, np.ndarray): + raise TypeError('scalings must be a dict, ndarray, or None, got %s' + % type(scalings)) + scalings = _handle_default('scalings', scalings) + return scalings + + +def _eigvec_subspace(eig, eigvec, mask): + """Compute the subspace from a subset of eigenvectors.""" + # We do the same thing we do with projectors: + P = np.eye(len(eigvec)) - np.dot(eigvec[~mask].conj().T, eigvec[~mask]) + eig, eigvec = eigh(P) + eigvec = eigvec.conj().T + return eig, eigvec + + +def _compute_covariance_auto(data, method, info, method_params, cv, + scalings, n_jobs, stop_early, picks_list, rank): + """Compute covariance auto mode.""" + # rescale to improve numerical stability + orig_rank = rank + rank = compute_rank(RawArray(data.T, info, copy=None, verbose=False), + rank, scalings, info) + with _scaled_array(data.T, picks_list, scalings): + C = np.dot(data.T, data) + _, eigvec, mask = _smart_eigh(C, info, rank, proj_subspace=True, + do_compute_rank=False) + eigvec = eigvec[mask] + data = np.dot(data, eigvec.T) + used = np.where(mask)[0] + sub_picks_list = [(key, np.searchsorted(used, picks)) + for key, picks in picks_list] + sub_info = pick_info(info, used) if len(used) != len(mask) else info + logger.info('Reducing data rank from %s -> %s' + % (len(mask), eigvec.shape[0])) + estimator_cov_info = list() + msg = 'Estimating covariance using %s' + + ok_sklearn = check_version('sklearn') + if not ok_sklearn and (len(method) != 1 or method[0] != 'empirical'): + raise ValueError('scikit-learn is not installed, `method` must be ' + '`empirical`, got %s' % (method,)) + + for method_ in method: + data_ = data.copy() + name = method_.__name__ if callable(method_) else method_ + logger.info(msg % name.upper()) + mp = method_params[method_] + _info = {} + + if method_ == 'empirical': + est = EmpiricalCovariance(**mp) + est.fit(data_) + estimator_cov_info.append((est, est.covariance_, _info)) + del est + + elif method_ == 'diagonal_fixed': + est = _RegCovariance(info=sub_info, **mp) + est.fit(data_) + estimator_cov_info.append((est, est.covariance_, _info)) + del est + + elif method_ == 'ledoit_wolf': + from sklearn.covariance import LedoitWolf + shrinkages = [] + lw = LedoitWolf(**mp) + + for ch_type, picks in sub_picks_list: + lw.fit(data_[:, picks]) + shrinkages.append((ch_type, lw.shrinkage_, picks)) + sc = _ShrunkCovariance(shrinkage=shrinkages, **mp) + sc.fit(data_) + estimator_cov_info.append((sc, sc.covariance_, _info)) + del lw, sc + + elif method_ == 'oas': + from sklearn.covariance import OAS + shrinkages = [] + oas = OAS(**mp) + + for ch_type, picks in sub_picks_list: + oas.fit(data_[:, picks]) + shrinkages.append((ch_type, oas.shrinkage_, picks)) + sc = _ShrunkCovariance(shrinkage=shrinkages, **mp) + sc.fit(data_) + estimator_cov_info.append((sc, sc.covariance_, _info)) + del oas, sc + + elif method_ == 'shrinkage': + sc = _ShrunkCovariance(**mp) + sc.fit(data_) + estimator_cov_info.append((sc, sc.covariance_, _info)) + del sc + + elif method_ == 'shrunk': + from sklearn.model_selection import GridSearchCV + from sklearn.covariance import ShrunkCovariance + shrinkage = mp.pop('shrinkage') + tuned_parameters = [{'shrinkage': shrinkage}] + shrinkages = [] + gs = GridSearchCV(ShrunkCovariance(**mp), + tuned_parameters, cv=cv) + for ch_type, picks in sub_picks_list: + gs.fit(data_[:, picks]) + shrinkages.append((ch_type, gs.best_estimator_.shrinkage, + picks)) + shrinkages = [c[0] for c in zip(shrinkages)] + sc = _ShrunkCovariance(shrinkage=shrinkages, **mp) + sc.fit(data_) + estimator_cov_info.append((sc, sc.covariance_, _info)) + del shrinkage, sc + + elif method_ == 'pca': + assert orig_rank == 'full' + pca, _info = _auto_low_rank_model( + data_, method_, n_jobs=n_jobs, method_params=mp, cv=cv, + stop_early=stop_early) + pca.fit(data_) + estimator_cov_info.append((pca, pca.get_covariance(), _info)) + del pca + + elif method_ == 'factor_analysis': + assert orig_rank == 'full' + fa, _info = _auto_low_rank_model( + data_, method_, n_jobs=n_jobs, method_params=mp, cv=cv, + stop_early=stop_early) + fa.fit(data_) + estimator_cov_info.append((fa, fa.get_covariance(), _info)) + del fa + else: + raise ValueError('Oh no! Your estimator does not have' + ' a .fit method') + logger.info('Done.') + + if len(method) > 1: + logger.info('Using cross-validation to select the best estimator.') + + out = dict() + for ei, (estimator, cov, runtime_info) in \ + enumerate(estimator_cov_info): + if len(method) > 1: + loglik = _cross_val(data, estimator, cv, n_jobs) + else: + loglik = None + # project back + cov = np.dot(eigvec.T, np.dot(cov, eigvec)) + # undo bias + cov *= data.shape[0] / (data.shape[0] - 1) + # undo scaling + _undo_scaling_cov(cov, picks_list, scalings) + method_ = method[ei] + name = method_.__name__ if callable(method_) else method_ + out[name] = dict(loglik=loglik, data=cov, estimator=estimator) + out[name].update(runtime_info) + + return out + + +def _gaussian_loglik_scorer(est, X, y=None): + """Compute the Gaussian log likelihood of X under the model in est.""" + # compute empirical covariance of the test set + precision = est.get_precision() + n_samples, n_features = X.shape + log_like = -.5 * (X * (np.dot(X, precision))).sum(axis=1) + log_like -= .5 * (n_features * log(2. * np.pi) - _logdet(precision)) + out = np.mean(log_like) + return out + + +def _cross_val(data, est, cv, n_jobs): + """Compute cross validation.""" + from sklearn.model_selection import cross_val_score + return np.mean(cross_val_score(est, data, cv=cv, n_jobs=n_jobs, + scoring=_gaussian_loglik_scorer)) + + +def _auto_low_rank_model(data, mode, n_jobs, method_params, cv, + stop_early=True, verbose=None): + """Compute latent variable models.""" + method_params = deepcopy(method_params) + iter_n_components = method_params.pop('iter_n_components') + if iter_n_components is None: + iter_n_components = np.arange(5, data.shape[1], 5) + from sklearn.decomposition import PCA, FactorAnalysis + if mode == 'factor_analysis': + est = FactorAnalysis + else: + assert mode == 'pca' + est = PCA + est = est(**method_params) + est.n_components = 1 + scores = np.empty_like(iter_n_components, dtype=np.float64) + scores.fill(np.nan) + + # make sure we don't empty the thing if it's a generator + max_n = max(list(deepcopy(iter_n_components))) + if max_n > data.shape[1]: + warn('You are trying to estimate %i components on matrix ' + 'with %i features.' % (max_n, data.shape[1])) + + for ii, n in enumerate(iter_n_components): + est.n_components = n + try: # this may fail depending on rank and split + score = _cross_val(data=data, est=est, cv=cv, n_jobs=n_jobs) + except ValueError: + score = np.inf + if np.isinf(score) or score > 0: + logger.info('... infinite values encountered. stopping estimation') + break + logger.info('... rank: %i - loglik: %0.3f' % (n, score)) + if score != -np.inf: + scores[ii] = score + + if (ii >= 3 and np.all(np.diff(scores[ii - 3:ii]) < 0) and stop_early): + # early stop search when loglik has been going down 3 times + logger.info('early stopping parameter search.') + break + + # happens if rank is too low right form the beginning + if np.isnan(scores).all(): + raise RuntimeError('Oh no! Could not estimate covariance because all ' + 'scores were NaN. Please contact the MNE-Python ' + 'developers.') + + i_score = np.nanargmax(scores) + best = est.n_components = iter_n_components[i_score] + logger.info('... best model at rank = %i' % best) + runtime_info = {'ranks': np.array(iter_n_components), + 'scores': scores, + 'best': best, + 'cv': cv} + return est, runtime_info + + +############################################################################### +# Sklearn Estimators + + +class _RegCovariance(BaseEstimator): + """Aux class.""" + + def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, + ecog=0.1, hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, + fnirs_fd_ac_amplitude=0.1, fnirs_fd_phase=0.1, fnirs_od=0.1, + csd=0.1, dbs=0.1, store_precision=False, + assume_centered=False): + self.info = info + # For sklearn compat, these cannot (easily?) be combined into + # a single dictionary + self.grad = grad + self.mag = mag + self.eeg = eeg + self.seeg = seeg + self.dbs = dbs + self.ecog = ecog + self.hbo = hbo + self.hbr = hbr + self.fnirs_cw_amplitude = fnirs_cw_amplitude + self.fnirs_fd_ac_amplitude = fnirs_fd_ac_amplitude + self.fnirs_fd_phase = fnirs_fd_phase + self.fnirs_od = fnirs_od + self.csd = csd + self.store_precision = store_precision + self.assume_centered = assume_centered + + def fit(self, X): + """Fit covariance model with classical diagonal regularization.""" + self.estimator_ = EmpiricalCovariance( + store_precision=self.store_precision, + assume_centered=self.assume_centered) + + self.covariance_ = self.estimator_.fit(X).covariance_ + self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T) + cov_ = Covariance( + data=self.covariance_, names=self.info['ch_names'], + bads=self.info['bads'], projs=self.info['projs'], + nfree=len(self.covariance_)) + cov_ = regularize( + cov_, self.info, proj=False, exclude='bads', + grad=self.grad, mag=self.mag, eeg=self.eeg, + ecog=self.ecog, seeg=self.seeg, dbs=self.dbs, + hbo=self.hbo, hbr=self.hbr, rank='full') + self.estimator_.covariance_ = self.covariance_ = cov_.data + return self + + def score(self, X_test, y=None): + """Delegate call to modified EmpiricalCovariance instance.""" + return self.estimator_.score(X_test, y=y) + + def get_precision(self): + """Delegate call to modified EmpiricalCovariance instance.""" + return self.estimator_.get_precision() + + +class _ShrunkCovariance(BaseEstimator): + """Aux class.""" + + def __init__(self, store_precision, assume_centered, + shrinkage=0.1): + + self.store_precision = store_precision + self.assume_centered = assume_centered + self.shrinkage = shrinkage + + def fit(self, X): + """Fit covariance model with oracle shrinkage regularization.""" + from sklearn.covariance import shrunk_covariance + self.estimator_ = EmpiricalCovariance( + store_precision=self.store_precision, + assume_centered=self.assume_centered) + + cov = self.estimator_.fit(X).covariance_ + + if not isinstance(self.shrinkage, (list, tuple)): + shrinkage = [('all', self.shrinkage, np.arange(len(cov)))] + else: + shrinkage = self.shrinkage + + zero_cross_cov = np.zeros_like(cov, dtype=bool) + for a, b in itt.combinations(shrinkage, 2): + picks_i, picks_j = a[2], b[2] + ch_ = a[0], b[0] + if 'eeg' in ch_: + zero_cross_cov[np.ix_(picks_i, picks_j)] = True + zero_cross_cov[np.ix_(picks_j, picks_i)] = True + + self.zero_cross_cov_ = zero_cross_cov + + # Apply shrinkage to blocks + for ch_type, c, picks in shrinkage: + sub_cov = cov[np.ix_(picks, picks)] + cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov, + shrinkage=c) + + # Apply shrinkage to cross-cov + for a, b in itt.combinations(shrinkage, 2): + shrinkage_i, shrinkage_j = a[1], b[1] + picks_i, picks_j = a[2], b[2] + c_ij = np.sqrt((1. - shrinkage_i) * (1. - shrinkage_j)) + cov[np.ix_(picks_i, picks_j)] *= c_ij + cov[np.ix_(picks_j, picks_i)] *= c_ij + + # Set to zero the necessary cross-cov + if np.any(zero_cross_cov): + cov[zero_cross_cov] = 0.0 + + self.estimator_.covariance_ = self.covariance_ = cov + return self + + def score(self, X_test, y=None): + """Delegate to modified EmpiricalCovariance instance.""" + # compute empirical covariance of the test set + test_cov = empirical_covariance(X_test - self.estimator_.location_, + assume_centered=True) + if np.any(self.zero_cross_cov_): + test_cov[self.zero_cross_cov_] = 0. + res = log_likelihood(test_cov, self.estimator_.get_precision()) + return res + + def get_precision(self): + """Delegate to modified EmpiricalCovariance instance.""" + return self.estimator_.get_precision() + + +############################################################################### +# Writing + +@verbose +def write_cov(fname, cov, *, overwrite=False, verbose=None): + """Write a noise covariance matrix. + + Parameters + ---------- + fname : str + The name of the file. It should end with -cov.fif or -cov.fif.gz. + cov : Covariance + The noise covariance matrix. + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + + See Also + -------- + read_cov + """ + cov.save(fname, overwrite=overwrite, verbose=verbose) + + +############################################################################### +# Prepare for inverse modeling + +def _unpack_epochs(epochs): + """Aux Function.""" + if len(epochs.event_id) > 1: + epochs = [epochs[k] for k in epochs.event_id] + else: + epochs = [epochs] + + return epochs + + +def _get_ch_whitener(A, pca, ch_type, rank): + """Get whitener params for a set of channels.""" + # whitening operator + eig, eigvec = eigh(A, overwrite_a=True) + eigvec = eigvec.conj().T + mask = np.ones(len(eig), bool) + eig[:-rank] = 0.0 + mask[:-rank] = False + + logger.info(' Setting small %s eigenvalues to zero (%s)' + % (ch_type, 'using PCA' if pca else 'without PCA')) + if pca: # No PCA case. + # This line will reduce the actual number of variables in data + # and leadfield to the true rank. + eigvec = eigvec[:-rank].copy() + return eig, eigvec, mask + + +@verbose +def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, + scalings=None, on_rank_mismatch='ignore', verbose=None): + """Prepare noise covariance matrix. + + Parameters + ---------- + noise_cov : instance of Covariance + The noise covariance to process. + %(info_not_none)s (Used to get channel types and bad channels). + ch_names : list | None + The channel names to be considered. Can be None to use + ``info['ch_names']``. + %(rank_none)s + + .. versionadded:: 0.18 + Support for 'info' mode. + scalings : dict | None + Data will be rescaled before rank estimation to improve accuracy. + If dict, it will override the following dict (default if None):: + + dict(mag=1e12, grad=1e11, eeg=1e5) + %(on_rank_mismatch)s + %(verbose)s + + Returns + ------- + cov : instance of Covariance + A copy of the covariance with the good channels subselected + and parameters updated. + """ + # reorder C and info to match ch_names order + noise_cov_idx = list() + missing = list() + ch_names = info['ch_names'] if ch_names is None else ch_names + for c in ch_names: + # this could be try/except ValueError, but it is not the preferred way + if c in noise_cov.ch_names: + noise_cov_idx.append(noise_cov.ch_names.index(c)) + else: + missing.append(c) + if len(missing): + raise RuntimeError('Not all channels present in noise covariance:\n%s' + % missing) + C = noise_cov._get_square()[np.ix_(noise_cov_idx, noise_cov_idx)] + info = pick_info(info, pick_channels(info['ch_names'], ch_names)) + projs = info['projs'] + noise_cov['projs'] + noise_cov = Covariance( + data=C, names=ch_names, bads=list(noise_cov['bads']), + projs=deepcopy(noise_cov['projs']), nfree=noise_cov['nfree'], + method=noise_cov.get('method', None), + loglik=noise_cov.get('loglik', None)) + + eig, eigvec, _ = _smart_eigh(noise_cov, info, rank, scalings, projs, + ch_names, on_rank_mismatch=on_rank_mismatch) + noise_cov.update(eig=eig, eigvec=eigvec) + return noise_cov + + +@verbose +def _smart_eigh(C, info, rank, scalings=None, projs=None, + ch_names=None, proj_subspace=False, do_compute_rank=True, + on_rank_mismatch='ignore', verbose=None): + """Compute eigh of C taking into account rank and ch_type scalings.""" + scalings = _handle_default('scalings_cov_rank', scalings) + projs = info['projs'] if projs is None else projs + ch_names = info['ch_names'] if ch_names is None else ch_names + if info['ch_names'] != ch_names: + info = pick_info(info, [info['ch_names'].index(c) for c in ch_names]) + assert info['ch_names'] == ch_names + n_chan = len(ch_names) + + # Create the projection operator + proj, ncomp, _ = make_projector(projs, ch_names) + + if isinstance(C, Covariance): + C = C['data'] + if ncomp > 0: + logger.info(' Created an SSP operator (subspace dimension = %d)' + % ncomp) + C = np.dot(proj, np.dot(C, proj.T)) + + noise_cov = Covariance(C, ch_names, [], projs, 0) + if do_compute_rank: # if necessary + rank = compute_rank( + noise_cov, rank, scalings, info, on_rank_mismatch=on_rank_mismatch) + assert C.ndim == 2 and C.shape[0] == C.shape[1] + + # time saving short-circuit + if proj_subspace and sum(rank.values()) == C.shape[0]: + return np.ones(n_chan), np.eye(n_chan), np.ones(n_chan, bool) + + dtype = complex if C.dtype == np.complex_ else float + eig = np.zeros(n_chan, dtype) + eigvec = np.zeros((n_chan, n_chan), dtype) + mask = np.zeros(n_chan, bool) + for ch_type, picks in _picks_by_type(info, meg_combined=True, + ref_meg=False, exclude=[]): + if len(picks) == 0: + continue + this_C = C[np.ix_(picks, picks)] + + if ch_type not in rank and ch_type in ('mag', 'grad'): + this_rank = rank['meg'] # if there is only one or the other + else: + this_rank = rank[ch_type] + + e, ev, m = _get_ch_whitener(this_C, False, ch_type.upper(), this_rank) + if proj_subspace: + # Choose the subspace the same way we do for projections + e, ev = _eigvec_subspace(e, ev, m) + eig[picks], eigvec[np.ix_(picks, picks)], mask[picks] = e, ev, m + # XXX : also handle ref for sEEG and ECoG + if ch_type == 'eeg' and _needs_eeg_average_ref_proj(info) and not \ + _has_eeg_average_ref_proj(projs): + warn('No average EEG reference present in info["projs"], ' + 'covariance may be adversely affected. Consider recomputing ' + 'covariance using with an average eeg reference projector ' + 'added.') + return eig, eigvec, mask + + +@verbose +def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', + proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1, + fnirs_cw_amplitude=0.1, fnirs_fd_ac_amplitude=0.1, + fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, dbs=0.1, + rank=None, scalings=None, verbose=None): + """Regularize noise covariance matrix. + + This method works by adding a constant to the diagonal for each + channel type separately. Special care is taken to keep the + rank of the data constant. + + .. note:: This function is kept for reasons of backward-compatibility. + Please consider explicitly using the ``method`` parameter in + :func:`mne.compute_covariance` to directly combine estimation + with regularization in a data-driven fashion. See the `faq + `_ + for more information. + + Parameters + ---------- + cov : Covariance + The noise covariance matrix. + %(info_not_none)s (Used to get channel types and bad channels). + mag : float (default 0.1) + Regularization factor for MEG magnetometers. + grad : float (default 0.1) + Regularization factor for MEG gradiometers. Must be the same as + ``mag`` if data have been processed with SSS. + eeg : float (default 0.1) + Regularization factor for EEG. + exclude : list | 'bads' (default 'bads') + List of channels to mark as bad. If 'bads', bads channels + are extracted from both info['bads'] and cov['bads']. + proj : bool (default True) + Apply projections to keep rank of data. + seeg : float (default 0.1) + Regularization factor for sEEG signals. + ecog : float (default 0.1) + Regularization factor for ECoG signals. + hbo : float (default 0.1) + Regularization factor for HBO signals. + hbr : float (default 0.1) + Regularization factor for HBR signals. + fnirs_cw_amplitude : float (default 0.1) + Regularization factor for fNIRS CW raw signals. + fnirs_fd_ac_amplitude : float (default 0.1) + Regularization factor for fNIRS FD AC raw signals. + fnirs_fd_phase : float (default 0.1) + Regularization factor for fNIRS raw phase signals. + fnirs_od : float (default 0.1) + Regularization factor for fNIRS optical density signals. + csd : float (default 0.1) + Regularization factor for EEG-CSD signals. + dbs : float (default 0.1) + Regularization factor for DBS signals. + %(rank_none)s + + .. versionadded:: 0.17 + + .. versionadded:: 0.18 + Support for 'info' mode. + scalings : dict | None + Data will be rescaled before rank estimation to improve accuracy. + See :func:`mne.compute_covariance`. + + .. versionadded:: 0.17 + %(verbose)s + + Returns + ------- + reg_cov : Covariance + The regularized covariance matrix. + + See Also + -------- + mne.compute_covariance + """ # noqa: E501 + from scipy import linalg + cov = cov.copy() + info._check_consistency() + scalings = _handle_default('scalings_cov_rank', scalings) + regs = dict(eeg=eeg, seeg=seeg, dbs=dbs, ecog=ecog, hbo=hbo, hbr=hbr, + fnirs_cw_amplitude=fnirs_cw_amplitude, + fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude, + fnirs_fd_phase=fnirs_fd_phase, fnirs_od=fnirs_od, csd=csd) + + if exclude is None: + raise ValueError('exclude must be a list of strings or "bads"') + + if exclude == 'bads': + exclude = info['bads'] + cov['bads'] + + picks_dict = {ch_type: [] for ch_type in _DATA_CH_TYPES_SPLIT} + meg_combined = 'auto' if rank != 'full' else False + picks_dict.update(dict(_picks_by_type( + info, meg_combined=meg_combined, exclude=exclude, ref_meg=False))) + if len(picks_dict.get('meg', [])) > 0 and rank != 'full': # combined + if mag != grad: + raise ValueError('On data where magnetometers and gradiometers ' + 'are dependent (e.g., SSSed data), mag (%s) must ' + 'equal grad (%s)' % (mag, grad)) + logger.info('Regularizing MEG channels jointly') + regs['meg'] = mag + else: + regs.update(mag=mag, grad=grad) + if rank != 'full': + rank = compute_rank(cov, rank, scalings, info) + + info_ch_names = info['ch_names'] + ch_names_by_type = dict() + for ch_type, picks_type in picks_dict.items(): + ch_names_by_type[ch_type] = [info_ch_names[i] for i in picks_type] + + # This actually removes bad channels from the cov, which is not backward + # compatible, so let's leave all channels in + cov_good = pick_channels_cov(cov, include=info_ch_names, exclude=exclude) + ch_names = cov_good.ch_names + + # Now get the indices for each channel type in the cov + idx_cov = {ch_type: [] for ch_type in ch_names_by_type} + for i, ch in enumerate(ch_names): + for ch_type in ch_names_by_type: + if ch in ch_names_by_type[ch_type]: + idx_cov[ch_type].append(i) + break + else: + raise Exception('channel %s is unknown type' % ch) + + C = cov_good['data'] + + assert len(C) == sum(map(len, idx_cov.values())) + + if proj: + projs = info['projs'] + cov_good['projs'] + projs = activate_proj(projs) + + for ch_type in idx_cov: + desc = ch_type.upper() + idx = idx_cov[ch_type] + if len(idx) == 0: + continue + reg = regs[ch_type] + if reg == 0.0: + logger.info(" %s regularization : None" % desc) + continue + logger.info(" %s regularization : %s" % (desc, reg)) + + this_C = C[np.ix_(idx, idx)] + U = np.eye(this_C.shape[0]) + this_ch_names = [ch_names[k] for k in idx] + if rank == 'full': + if proj: + P, ncomp, _ = make_projector(projs, this_ch_names) + if ncomp > 0: + # This adjustment ends up being redundant if rank is None: + U = linalg.svd(P)[0][:, :-ncomp] + logger.info(' Created an SSP operator for %s ' + '(dimension = %d)' % (desc, ncomp)) + else: + this_picks = pick_channels(info['ch_names'], this_ch_names) + this_info = pick_info(info, this_picks) + # Here we could use proj_subspace=True, but this should not matter + # since this is already in a loop over channel types + _, eigvec, mask = _smart_eigh(this_C, this_info, rank) + U = eigvec[mask].T + this_C = np.dot(U.T, np.dot(this_C, U)) + + sigma = np.mean(np.diag(this_C)) + this_C.flat[::len(this_C) + 1] += reg * sigma # modify diag inplace + this_C = np.dot(U, np.dot(this_C, U.T)) + C[np.ix_(idx, idx)] = this_C + + # Put data back in correct locations + idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude) + cov['data'][np.ix_(idx, idx)] = C + + return cov + + +def _regularized_covariance(data, reg=None, method_params=None, info=None, + rank=None): + """Compute a regularized covariance from data using sklearn. + + This is a convenience wrapper for mne.decoding functions, which + adopted a slightly different covariance API. + + Returns + ------- + cov : ndarray, shape (n_channels, n_channels) + The covariance matrix. + """ + _validate_type(reg, (str, 'numeric', None)) + if reg is None: + reg = 'empirical' + elif not isinstance(reg, str): + reg = float(reg) + if method_params is not None: + raise ValueError('If reg is a float, method_params must be None ' + '(got %s)' % (type(method_params),)) + method_params = dict(shrinkage=dict( + shrinkage=reg, assume_centered=True, store_precision=False)) + reg = 'shrinkage' + method, method_params = _check_method_params( + reg, method_params, name='reg', allow_auto=False, rank=rank) + # use mag instead of eeg here to avoid the cov EEG projection warning + info = create_info(data.shape[-2], 1000., 'mag') if info is None else info + picks_list = _picks_by_type(info) + scalings = _handle_default('scalings_cov_rank', None) + cov = _compute_covariance_auto( + data.T, method=method, method_params=method_params, + info=info, cv=None, n_jobs=1, stop_early=True, + picks_list=picks_list, scalings=scalings, + rank=rank)[reg]['data'] + return cov + + +@verbose +def compute_whitener(noise_cov, info=None, picks=None, rank=None, + scalings=None, return_rank=False, pca=False, + return_colorer=False, on_rank_mismatch='warn', + verbose=None): + """Compute whitening matrix. + + Parameters + ---------- + noise_cov : Covariance + The noise covariance. + %(info)s Can be None if ``noise_cov`` has already been + prepared with :func:`prepare_noise_cov`. + %(picks_good_data_noref)s + %(rank_none)s + + .. versionadded:: 0.18 + Support for 'info' mode. + scalings : dict | None + The rescaling method to be applied. See documentation of + ``prepare_noise_cov`` for details. + return_rank : bool + If True, return the rank used to compute the whitener. + + .. versionadded:: 0.15 + pca : bool | str + Space to project the data into. Options: + + :data:`python:True` + Whitener will be shape (n_nonzero, n_channels). + ``'white'`` + Whitener will be shape (n_channels, n_channels), potentially rank + deficient, and have the first ``n_channels - n_nonzero`` rows and + columns set to zero. + :data:`python:False` (default) + Whitener will be shape (n_channels, n_channels), potentially rank + deficient, and rotated back to the space of the original data. + + .. versionadded:: 0.18 + return_colorer : bool + If True, return the colorer as well. + %(on_rank_mismatch)s + %(verbose)s + + Returns + ------- + W : ndarray, shape (n_channels, n_channels) or (n_nonzero, n_channels) + The whitening matrix. + ch_names : list + The channel names. + rank : int + Rank reduction of the whitener. Returned only if return_rank is True. + colorer : ndarray, shape (n_channels, n_channels) or (n_channels, n_nonzero) + The coloring matrix. + """ # noqa: E501 + _validate_type(pca, (str, bool), 'space') + _valid_pcas = (True, 'white', False) + if pca not in _valid_pcas: + raise ValueError('space must be one of %s, got %s' + % (_valid_pcas, pca)) + if info is None: + if 'eig' not in noise_cov: + raise ValueError('info can only be None if the noise cov has ' + 'already been prepared with prepare_noise_cov') + ch_names = deepcopy(noise_cov['names']) + else: + picks = _picks_to_idx(info, picks, with_ref_meg=False) + ch_names = [info['ch_names'][k] for k in picks] + del picks + noise_cov = prepare_noise_cov( + noise_cov, info, ch_names, rank, scalings, + on_rank_mismatch=on_rank_mismatch) + + n_chan = len(ch_names) + assert n_chan == len(noise_cov['eig']) + + # Omit the zeroes due to projection + eig = noise_cov['eig'].copy() + nzero = (eig > 0) + eig[~nzero] = 0. # get rid of numerical noise (negative) ones + + if noise_cov['eigvec'].dtype.kind == 'c': + dtype = np.complex128 + else: + dtype = np.float64 + W = np.zeros((n_chan, 1), dtype) + W[nzero, 0] = 1.0 / np.sqrt(eig[nzero]) + # Rows of eigvec are the eigenvectors + W = W * noise_cov['eigvec'] # C ** -0.5 + C = np.sqrt(eig) * noise_cov['eigvec'].conj().T # C ** 0.5 + n_nzero = nzero.sum() + logger.info(' Created the whitener using a noise covariance matrix ' + 'with rank %d (%d small eigenvalues omitted)' + % (n_nzero, noise_cov['dim'] - n_nzero)) + + # Do the requested projection + if pca is True: + W = W[nzero] + C = C[:, nzero] + elif pca is False: + W = np.dot(noise_cov['eigvec'].conj().T, W) + C = np.dot(C, noise_cov['eigvec']) + + # Triage return + out = W, ch_names + if return_rank: + out += (n_nzero,) + if return_colorer: + out += (C,) + return out + + +@verbose +def whiten_evoked(evoked, noise_cov, picks=None, diag=None, rank=None, + scalings=None, verbose=None): + """Whiten evoked data using given noise covariance. + + Parameters + ---------- + evoked : instance of Evoked + The evoked data. + noise_cov : instance of Covariance + The noise covariance. + %(picks_good_data)s + diag : bool (default False) + If True, whiten using only the diagonal of the covariance. + %(rank_none)s + + .. versionadded:: 0.18 + Support for 'info' mode. + scalings : dict | None (default None) + To achieve reliable rank estimation on multiple sensors, + sensors have to be rescaled. This parameter controls the + rescaling. If dict, it will override the + following default dict (default if None): + + dict(mag=1e12, grad=1e11, eeg=1e5) + %(verbose)s + + Returns + ------- + evoked_white : instance of Evoked + The whitened evoked data. + """ + evoked = evoked.copy() + picks = _picks_to_idx(evoked.info, picks) + + if diag: + noise_cov = noise_cov.as_diag() + + W, _ = compute_whitener(noise_cov, evoked.info, picks=picks, + rank=rank, scalings=scalings) + + evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks]) + return evoked + + +@verbose +def _read_cov(fid, node, cov_kind, limited=False, verbose=None): + """Read a noise covariance matrix.""" + # Find all covariance matrices + from scipy import sparse + covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV) + if len(covs) == 0: + raise ValueError('No covariance matrices found') + + # Is any of the covariance matrices a noise covariance + for p in range(len(covs)): + tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND) + + if tag is not None and int(tag.data) == cov_kind: + this = covs[p] + + # Find all the necessary data + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM) + if tag is None: + raise ValueError('Covariance matrix dimension not found') + dim = int(tag.data) + + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE) + if tag is None: + nfree = -1 + else: + nfree = int(tag.data) + + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD) + if tag is None: + method = None + else: + method = tag.data + + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_SCORE) + if tag is None: + score = None + else: + score = tag.data[0] + + tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES) + if tag is None: + names = [] + else: + names = tag.data.split(':') + if len(names) != dim: + raise ValueError('Number of names does not match ' + 'covariance matrix dimension') + + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV) + if tag is None: + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG) + if tag is None: + raise ValueError('No covariance matrix data found') + else: + # Diagonal is stored + data = tag.data + diag = True + logger.info(' %d x %d diagonal covariance (kind = ' + '%d) found.' % (dim, dim, cov_kind)) + + else: + if not sparse.issparse(tag.data): + # Lower diagonal is stored + vals = tag.data + data = np.zeros((dim, dim)) + data[np.tril(np.ones((dim, dim))) > 0] = vals + data = data + data.T + data.flat[::dim + 1] /= 2.0 + diag = False + logger.info(' %d x %d full covariance (kind = %d) ' + 'found.' % (dim, dim, cov_kind)) + else: + diag = False + data = tag.data + logger.info(' %d x %d sparse covariance (kind = %d)' + ' found.' % (dim, dim, cov_kind)) + + # Read the possibly precomputed decomposition + tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES) + tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS) + if tag1 is not None and tag2 is not None: + eig = tag1.data + eigvec = tag2.data + else: + eig = None + eigvec = None + + # Read the projection operator + projs = _read_proj(fid, this) + + # Read the bad channel list + bads = _read_bad_channels(fid, this, None) + + # Put it together + assert dim == len(data) + assert data.ndim == (1 if diag else 2) + cov = dict(kind=cov_kind, diag=diag, dim=dim, names=names, + data=data, projs=projs, bads=bads, nfree=nfree, eig=eig, + eigvec=eigvec) + if score is not None: + cov['loglik'] = score + if method is not None: + cov['method'] = method + if limited: + del cov['kind'], cov['dim'], cov['diag'] + + return cov + + logger.info(' Did not find the desired covariance matrix (kind = %d)' + % cov_kind) + + return None + + +def _write_cov(fid, cov): + """Write a noise covariance matrix.""" + start_block(fid, FIFF.FIFFB_MNE_COV) + + # Dimensions etc. + write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind']) + write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim']) + if cov['nfree'] > 0: + write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree']) + + # Channel names + if cov['names'] is not None and len(cov['names']) > 0: + write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names']) + + # Data + if cov['diag']: + write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data']) + else: + # Store only lower part of covariance matrix + dim = cov['dim'] + mask = np.tril(np.ones((dim, dim), dtype=bool)) > 0 + vals = cov['data'][mask].ravel() + write_double(fid, FIFF.FIFF_MNE_COV, vals) + + # Eigenvalues and vectors if present + if cov['eig'] is not None and cov['eigvec'] is not None: + write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec']) + write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig']) + + # Projection operator + if cov['projs'] is not None and len(cov['projs']) > 0: + _write_proj(fid, cov['projs']) + + # Bad channels + if cov['bads'] is not None and len(cov['bads']) > 0: + start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) + write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads']) + end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) + + # estimator method + if 'method' in cov: + write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov['method']) + + # negative log-likelihood score + if 'loglik' in cov: + write_double( + fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov['loglik'])) + + # Done! + end_block(fid, FIFF.FIFFB_MNE_COV) + + +@verbose +def _ensure_cov(cov, name='cov', *, verbose=None): + _validate_type(cov, ('path-like', Covariance), name) + logger.info('Noise covariance : %s' % (cov,)) + if not isinstance(cov, Covariance): + cov = read_cov(cov, verbose=False) + return cov diff --git a/python/libs/mne/cuda.py b/python/libs/mne/cuda.py new file mode 100644 index 0000000..76b9c0e --- /dev/null +++ b/python/libs/mne/cuda.py @@ -0,0 +1,361 @@ +# Authors: Eric Larson +# +# License: BSD-3-Clause + +import numpy as np + +from .utils import (sizeof_fmt, logger, get_config, warn, _explain_exception, + verbose, fill_doc) + + +_cuda_capable = False + + +def get_cuda_memory(kind='available'): + """Get the amount of free memory for CUDA operations. + + Parameters + ---------- + kind : str + Can be "available" or "total". + + Returns + ------- + memory : str + The amount of available or total memory as a human-readable string. + """ + if not _cuda_capable: + warn('CUDA not enabled, returning zero for memory') + mem = 0 + else: + import cupy + mem = cupy.cuda.runtime.memGetInfo()[dict(available=0, total=1)[kind]] + return sizeof_fmt(mem) + + +@verbose +def init_cuda(ignore_config=False, verbose=None): + """Initialize CUDA functionality. + + This function attempts to load the necessary interfaces + (hardware connectivity) to run CUDA-based filtering. This + function should only need to be run once per session. + + If the config var (set via mne.set_config or in ENV) + MNE_USE_CUDA == 'true', this function will be executed when + the first CUDA setup is performed. If this variable is not + set, this function can be manually executed. + + Parameters + ---------- + ignore_config : bool + If True, ignore the config value MNE_USE_CUDA and force init. + %(verbose)s + """ + global _cuda_capable + if _cuda_capable: + return + if not ignore_config and (get_config('MNE_USE_CUDA', 'false').lower() != + 'true'): + logger.info('CUDA not enabled in config, skipping initialization') + return + # Triage possible errors for informative messaging + _cuda_capable = False + try: + import cupy # noqa + except ImportError: + warn('module cupy not found, CUDA not enabled') + return + device_id = int(get_config('MNE_CUDA_DEVICE', '0')) + try: + # Initialize CUDA + _set_cuda_device(device_id, verbose) + except Exception: + warn('so CUDA device could be initialized, likely a hardware error, ' + 'CUDA not enabled%s' % _explain_exception()) + return + + _cuda_capable = True + # Figure out limit for CUDA FFT calculations + logger.info('Enabling CUDA with %s available memory' % get_cuda_memory()) + + +@verbose +def set_cuda_device(device_id, verbose=None): + """Set the CUDA device temporarily for the current session. + + Parameters + ---------- + device_id : int + Numeric ID of the CUDA-capable device you want MNE-Python to use. + %(verbose)s + """ + if _cuda_capable: + _set_cuda_device(device_id, verbose) + elif get_config('MNE_USE_CUDA', 'false').lower() == 'true': + init_cuda() + _set_cuda_device(device_id, verbose) + else: + warn('Could not set CUDA device because CUDA is not enabled; either ' + 'run mne.cuda.init_cuda() first, or set the MNE_USE_CUDA config ' + 'variable to "true".') + + +@verbose +def _set_cuda_device(device_id, verbose=None): + """Set the CUDA device.""" + import cupy + cupy.cuda.Device(device_id).use() + logger.info('Now using CUDA device {}'.format(device_id)) + + +############################################################################### +# Repeated FFT multiplication + +def _setup_cuda_fft_multiply_repeated(n_jobs, h, n_fft, + kind='FFT FIR filtering'): + """Set up repeated CUDA FFT multiplication with a given filter. + + Parameters + ---------- + n_jobs : int | str + If n_jobs == 'cuda', the function will attempt to set up for CUDA + FFT multiplication. + h : array + The filtering function that will be used repeatedly. + n_fft : int + The number of points in the FFT. + kind : str + The kind to report to the user. + + Returns + ------- + n_jobs : int + Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise + original n_jobs is passed. + cuda_dict : dict + Dictionary with the following CUDA-related variables: + use_cuda : bool + Whether CUDA should be used. + fft_plan : instance of FFTPlan + FFT plan to use in calculating the FFT. + ifft_plan : instance of FFTPlan + FFT plan to use in calculating the IFFT. + x_fft : instance of gpuarray + Empty allocated GPU space for storing the result of the + frequency-domain multiplication. + x : instance of gpuarray + Empty allocated GPU space for the data to filter. + h_fft : array | instance of gpuarray + This will either be a gpuarray (if CUDA enabled) or ndarray. + + Notes + ----- + This function is designed to be used with fft_multiply_repeated(). + """ + from scipy.fft import rfft, irfft + cuda_dict = dict(n_fft=n_fft, rfft=rfft, irfft=irfft, + h_fft=rfft(h, n=n_fft)) + if n_jobs == 'cuda': + n_jobs = 1 + init_cuda() + if _cuda_capable: + import cupy + try: + # do the IFFT normalization now so we don't have to later + h_fft = cupy.array(cuda_dict['h_fft']) + logger.info('Using CUDA for %s' % kind) + except Exception as exp: + logger.info('CUDA not used, could not instantiate memory ' + '(arrays may be too large: "%s"), falling back to ' + 'n_jobs=1' % str(exp)) + cuda_dict.update(h_fft=h_fft, + rfft=_cuda_upload_rfft, + irfft=_cuda_irfft_get) + else: + logger.info('CUDA not used, CUDA could not be initialized, ' + 'falling back to n_jobs=1') + return n_jobs, cuda_dict + + +def _fft_multiply_repeated(x, cuda_dict): + """Do FFT multiplication by a filter function (possibly using CUDA). + + Parameters + ---------- + h_fft : 1-d array or gpuarray + The filtering array to apply. + x : 1-d array + The array to filter. + n_fft : int + The number of points in the FFT. + cuda_dict : dict + Dictionary constructed using setup_cuda_multiply_repeated(). + + Returns + ------- + x : 1-d array + Filtered version of x. + """ + # do the fourier-domain operations + x_fft = cuda_dict['rfft'](x, cuda_dict['n_fft']) + x_fft *= cuda_dict['h_fft'] + x = cuda_dict['irfft'](x_fft, cuda_dict['n_fft']) + return x + + +############################################################################### +# FFT Resampling + +def _setup_cuda_fft_resample(n_jobs, W, new_len): + """Set up CUDA FFT resampling. + + Parameters + ---------- + n_jobs : int | str + If n_jobs == 'cuda', the function will attempt to set up for CUDA + FFT resampling. + W : array + The filtering function to be used during resampling. + If n_jobs='cuda', this function will be shortened (since CUDA + assumes FFTs of real signals are half the length of the signal) + and turned into a gpuarray. + new_len : int + The size of the array following resampling. + + Returns + ------- + n_jobs : int + Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise + original n_jobs is passed. + cuda_dict : dict + Dictionary with the following CUDA-related variables: + use_cuda : bool + Whether CUDA should be used. + fft_plan : instance of FFTPlan + FFT plan to use in calculating the FFT. + ifft_plan : instance of FFTPlan + FFT plan to use in calculating the IFFT. + x_fft : instance of gpuarray + Empty allocated GPU space for storing the result of the + frequency-domain multiplication. + x : instance of gpuarray + Empty allocated GPU space for the data to resample. + + Notes + ----- + This function is designed to be used with fft_resample(). + """ + from scipy.fft import rfft, irfft + cuda_dict = dict(use_cuda=False, rfft=rfft, irfft=irfft) + rfft_len_x = len(W) // 2 + 1 + # fold the window onto inself (should be symmetric) and truncate + W = W.copy() + W[1:rfft_len_x] = (W[1:rfft_len_x] + W[::-1][:rfft_len_x - 1]) / 2. + W = W[:rfft_len_x] + if n_jobs == 'cuda': + n_jobs = 1 + init_cuda() + if _cuda_capable: + try: + import cupy + # do the IFFT normalization now so we don't have to later + W = cupy.array(W) + logger.info('Using CUDA for FFT resampling') + except Exception: + logger.info('CUDA not used, could not instantiate memory ' + '(arrays may be too large), falling back to ' + 'n_jobs=1') + else: + cuda_dict.update(use_cuda=True, + rfft=_cuda_upload_rfft, + irfft=_cuda_irfft_get) + else: + logger.info('CUDA not used, CUDA could not be initialized, ' + 'falling back to n_jobs=1') + cuda_dict['W'] = W + return n_jobs, cuda_dict + + +def _cuda_upload_rfft(x, n, axis=-1): + """Upload and compute rfft.""" + import cupy + return cupy.fft.rfft(cupy.array(x), n=n, axis=axis) + + +def _cuda_irfft_get(x, n, axis=-1): + """Compute irfft and get.""" + import cupy + return cupy.fft.irfft(x, n=n, axis=axis).get() + + +@fill_doc +def _fft_resample(x, new_len, npads, to_removes, cuda_dict=None, + pad='reflect_limited'): + """Do FFT resampling with a filter function (possibly using CUDA). + + Parameters + ---------- + x : 1-d array + The array to resample. Will be converted to float64 if necessary. + new_len : int + The size of the output array (before removing padding). + npads : tuple of int + Amount of padding to apply to the start and end of the + signal before resampling. + to_removes : tuple of int + Number of samples to remove after resampling. + cuda_dict : dict + Dictionary constructed using setup_cuda_multiply_repeated(). + %(pad)s + The default is ``'reflect_limited'``. + + .. versionadded:: 0.15 + + Returns + ------- + x : 1-d array + Filtered version of x. + """ + cuda_dict = dict(use_cuda=False) if cuda_dict is None else cuda_dict + # add some padding at beginning and end to make this work a little cleaner + if x.dtype != np.float64: + x = x.astype(np.float64) + x = _smart_pad(x, npads, pad) + old_len = len(x) + shorter = new_len < old_len + use_len = new_len if shorter else old_len + x_fft = cuda_dict['rfft'](x, None) + if use_len % 2 == 0: + nyq = use_len // 2 + x_fft[nyq:nyq + 1] *= 2 if shorter else 0.5 + x_fft *= cuda_dict['W'] + y = cuda_dict['irfft'](x_fft, new_len) + + # now let's trim it back to the correct size (if there was padding) + if (to_removes > 0).any(): + y = y[to_removes[0]:y.shape[0] - to_removes[1]] + + return y + + +############################################################################### +# Misc + +# this has to go in mne.cuda instead of mne.filter to avoid import errors +def _smart_pad(x, n_pad, pad='reflect_limited'): + """Pad vector x.""" + n_pad = np.asarray(n_pad) + assert n_pad.shape == (2,) + if (n_pad == 0).all(): + return x + elif (n_pad < 0).any(): + raise RuntimeError('n_pad must be non-negative') + if pad == 'reflect_limited': + # need to pad with zeros if len(x) <= npad + l_z_pad = np.zeros(max(n_pad[0] - len(x) + 1, 0), dtype=x.dtype) + r_z_pad = np.zeros(max(n_pad[1] - len(x) + 1, 0), dtype=x.dtype) + return np.concatenate([l_z_pad, 2 * x[0] - x[n_pad[0]:0:-1], x, + 2 * x[-1] - x[-2:-n_pad[1] - 2:-1], r_z_pad]) + else: + return np.pad(x, (tuple(n_pad),), pad) diff --git a/python/libs/mne/data/FreeSurferColorLUT.txt b/python/libs/mne/data/FreeSurferColorLUT.txt new file mode 100644 index 0000000..2b85ef3 --- /dev/null +++ b/python/libs/mne/data/FreeSurferColorLUT.txt @@ -0,0 +1,1397 @@ +#$Id: FreeSurferColorLUT.txt,v 1.70.2.7 2012/08/27 17:20:08 nicks Exp $ + +#No. Label Name: R G B A + +0 Unknown 0 0 0 0 +1 Left-Cerebral-Exterior 70 130 180 0 +2 Left-Cerebral-White-Matter 245 245 245 0 +3 Left-Cerebral-Cortex 205 62 78 0 +4 Left-Lateral-Ventricle 120 18 134 0 +5 Left-Inf-Lat-Vent 196 58 250 0 +6 Left-Cerebellum-Exterior 0 148 0 0 +7 Left-Cerebellum-White-Matter 220 248 164 0 +8 Left-Cerebellum-Cortex 230 148 34 0 +9 Left-Thalamus 0 118 14 0 +10 Left-Thalamus-Proper 0 118 14 0 +11 Left-Caudate 122 186 220 0 +12 Left-Putamen 236 13 176 0 +13 Left-Pallidum 12 48 255 0 +14 3rd-Ventricle 204 182 142 0 +15 4th-Ventricle 42 204 164 0 +16 Brain-Stem 119 159 176 0 +17 Left-Hippocampus 220 216 20 0 +18 Left-Amygdala 103 255 255 0 +19 Left-Insula 80 196 98 0 +20 Left-Operculum 60 58 210 0 +21 Line-1 60 58 210 0 +22 Line-2 60 58 210 0 +23 Line-3 60 58 210 0 +24 CSF 60 60 60 0 +25 Left-Lesion 255 165 0 0 +26 Left-Accumbens-area 255 165 0 0 +27 Left-Substancia-Nigra 0 255 127 0 +28 Left-VentralDC 165 42 42 0 +29 Left-undetermined 135 206 235 0 +30 Left-vessel 160 32 240 0 +31 Left-choroid-plexus 0 200 200 0 +32 Left-F3orb 100 50 100 0 +33 Left-lOg 135 50 74 0 +34 Left-aOg 122 135 50 0 +35 Left-mOg 51 50 135 0 +36 Left-pOg 74 155 60 0 +37 Left-Stellate 120 62 43 0 +38 Left-Porg 74 155 60 0 +39 Left-Aorg 122 135 50 0 +40 Right-Cerebral-Exterior 70 130 180 0 +41 Right-Cerebral-White-Matter 0 225 0 0 +42 Right-Cerebral-Cortex 205 62 78 0 +43 Right-Lateral-Ventricle 120 18 134 0 +44 Right-Inf-Lat-Vent 196 58 250 0 +45 Right-Cerebellum-Exterior 0 148 0 0 +46 Right-Cerebellum-White-Matter 220 248 164 0 +47 Right-Cerebellum-Cortex 230 148 34 0 +48 Right-Thalamus 0 118 14 0 +49 Right-Thalamus-Proper 0 118 14 0 +50 Right-Caudate 122 186 220 0 +51 Right-Putamen 236 13 176 0 +52 Right-Pallidum 13 48 255 0 +53 Right-Hippocampus 220 216 20 0 +54 Right-Amygdala 103 255 255 0 +55 Right-Insula 80 196 98 0 +56 Right-Operculum 60 58 210 0 +57 Right-Lesion 255 165 0 0 +58 Right-Accumbens-area 255 165 0 0 +59 Right-Substancia-Nigra 0 255 127 0 +60 Right-VentralDC 165 42 42 0 +61 Right-undetermined 135 206 235 0 +62 Right-vessel 160 32 240 0 +63 Right-choroid-plexus 0 200 221 0 +64 Right-F3orb 100 50 100 0 +65 Right-lOg 135 50 74 0 +66 Right-aOg 122 135 50 0 +67 Right-mOg 51 50 135 0 +68 Right-pOg 74 155 60 0 +69 Right-Stellate 120 62 43 0 +70 Right-Porg 74 155 60 0 +71 Right-Aorg 122 135 50 0 +72 5th-Ventricle 120 190 150 0 +73 Left-Interior 122 135 50 0 +74 Right-Interior 122 135 50 0 +# 75/76 removed. duplicates of 4/43 +77 WM-hypointensities 200 70 255 0 +78 Left-WM-hypointensities 255 148 10 0 +79 Right-WM-hypointensities 255 148 10 0 +80 non-WM-hypointensities 164 108 226 0 +81 Left-non-WM-hypointensities 164 108 226 0 +82 Right-non-WM-hypointensities 164 108 226 0 +83 Left-F1 255 218 185 0 +84 Right-F1 255 218 185 0 +85 Optic-Chiasm 234 169 30 0 +192 Corpus_Callosum 250 255 50 0 + +86 Left_future_WMSA 200 120 255 0 +87 Right_future_WMSA 200 121 255 0 +88 future_WMSA 200 122 255 0 + + +96 Left-Amygdala-Anterior 205 10 125 0 +97 Right-Amygdala-Anterior 205 10 125 0 +98 Dura 160 32 240 0 + +100 Left-wm-intensity-abnormality 124 140 178 0 +101 Left-caudate-intensity-abnormality 125 140 178 0 +102 Left-putamen-intensity-abnormality 126 140 178 0 +103 Left-accumbens-intensity-abnormality 127 140 178 0 +104 Left-pallidum-intensity-abnormality 124 141 178 0 +105 Left-amygdala-intensity-abnormality 124 142 178 0 +106 Left-hippocampus-intensity-abnormality 124 143 178 0 +107 Left-thalamus-intensity-abnormality 124 144 178 0 +108 Left-VDC-intensity-abnormality 124 140 179 0 +109 Right-wm-intensity-abnormality 124 140 178 0 +110 Right-caudate-intensity-abnormality 125 140 178 0 +111 Right-putamen-intensity-abnormality 126 140 178 0 +112 Right-accumbens-intensity-abnormality 127 140 178 0 +113 Right-pallidum-intensity-abnormality 124 141 178 0 +114 Right-amygdala-intensity-abnormality 124 142 178 0 +115 Right-hippocampus-intensity-abnormality 124 143 178 0 +116 Right-thalamus-intensity-abnormality 124 144 178 0 +117 Right-VDC-intensity-abnormality 124 140 179 0 + +118 Epidermis 255 20 147 0 +119 Conn-Tissue 205 179 139 0 +120 SC-Fat-Muscle 238 238 209 0 +121 Cranium 200 200 200 0 +122 CSF-SA 74 255 74 0 +123 Muscle 238 0 0 0 +124 Ear 0 0 139 0 +125 Adipose 173 255 47 0 +126 Spinal-Cord 133 203 229 0 +127 Soft-Tissue 26 237 57 0 +128 Nerve 34 139 34 0 +129 Bone 30 144 255 0 +130 Air 147 19 173 0 +131 Orbital-Fat 238 59 59 0 +132 Tongue 221 39 200 0 +133 Nasal-Structures 238 174 238 0 +134 Globe 255 0 0 0 +135 Teeth 72 61 139 0 +136 Left-Caudate-Putamen 21 39 132 0 +137 Right-Caudate-Putamen 21 39 132 0 +138 Left-Claustrum 65 135 20 0 +139 Right-Claustrum 65 135 20 0 +140 Cornea 134 4 160 0 +142 Diploe 221 226 68 0 +143 Vitreous-Humor 255 255 254 0 +144 Lens 52 209 226 0 +145 Aqueous-Humor 239 160 223 0 +146 Outer-Table 70 130 180 0 +147 Inner-Table 70 130 181 0 +148 Periosteum 139 121 94 0 +149 Endosteum 224 224 224 0 +150 R-C-S 255 0 0 0 +151 Iris 205 205 0 0 +152 SC-Adipose-Muscle 238 238 209 0 +153 SC-Tissue 139 121 94 0 +154 Orbital-Adipose 238 59 59 0 + +155 Left-IntCapsule-Ant 238 59 59 0 +156 Right-IntCapsule-Ant 238 59 59 0 +157 Left-IntCapsule-Pos 62 10 205 0 +158 Right-IntCapsule-Pos 62 10 205 0 + +# These labels are for babies/children +159 Left-Cerebral-WM-unmyelinated 0 118 14 0 +160 Right-Cerebral-WM-unmyelinated 0 118 14 0 +161 Left-Cerebral-WM-myelinated 220 216 21 0 +162 Right-Cerebral-WM-myelinated 220 216 21 0 +163 Left-Subcortical-Gray-Matter 122 186 220 0 +164 Right-Subcortical-Gray-Matter 122 186 220 0 +165 Skull 255 165 0 0 +166 Posterior-fossa 14 48 255 0 +167 Scalp 166 42 42 0 +168 Hematoma 121 18 134 0 +169 Left-Basal-Ganglia 236 13 127 0 +176 Right-Basal-Ganglia 236 13 126 0 + +# Label names and colors for Brainstem consituents +# No. Label Name: R G B A +170 brainstem 119 159 176 0 +171 DCG 119 0 176 0 +172 Vermis 119 100 176 0 +173 Midbrain 119 200 176 0 +174 Pons 119 159 100 0 +175 Medulla 119 159 200 0 + +#176 Right-Basal-Ganglia found in babies/children section above + +180 Left-Cortical-Dysplasia 73 61 139 0 +181 Right-Cortical-Dysplasia 73 62 139 0 + +#192 Corpus_Callosum listed after #85 above +193 Left-hippocampal_fissure 0 196 255 0 +194 Left-CADG-head 255 164 164 0 +195 Left-subiculum 196 196 0 0 +196 Left-fimbria 0 100 255 0 +197 Right-hippocampal_fissure 128 196 164 0 +198 Right-CADG-head 0 126 75 0 +199 Right-subiculum 128 96 64 0 +200 Right-fimbria 0 50 128 0 +201 alveus 255 204 153 0 +202 perforant_pathway 255 128 128 0 +203 parasubiculum 255 255 0 0 +204 presubiculum 64 0 64 0 +205 subiculum 0 0 255 0 +206 CA1 255 0 0 0 +207 CA2 128 128 255 0 +208 CA3 0 128 0 0 +209 CA4 196 160 128 0 +210 GC-ML-DG 32 200 255 0 +211 HATA 128 255 128 0 +212 fimbria 204 153 204 0 +213 lateral_ventricle 121 17 136 0 +214 molecular_layer_HP 128 0 0 0 +215 hippocampal_fissure 128 32 255 0 +216 entorhinal_cortex 255 204 102 0 +217 molecular_layer_subiculum 128 128 128 0 +218 Amygdala 104 255 255 0 +219 Cerebral_White_Matter 0 226 0 0 +220 Cerebral_Cortex 205 63 78 0 +221 Inf_Lat_Vent 197 58 250 0 +222 Perirhinal 33 150 250 0 +223 Cerebral_White_Matter_Edge 226 0 0 0 +224 Background 100 100 100 0 +225 Ectorhinal 197 150 250 0 +226 HP_tail 170 170 255 0 + +250 Fornix 255 0 0 0 +251 CC_Posterior 0 0 64 0 +252 CC_Mid_Posterior 0 0 112 0 +253 CC_Central 0 0 160 0 +254 CC_Mid_Anterior 0 0 208 0 +255 CC_Anterior 0 0 255 0 + +# This is for keeping track of voxel changes +256 Voxel-Unchanged 0 0 0 0 + +# lymph node and vascular labels +331 Aorta 255 0 0 0 +332 Left-Common-IliacA 255 80 0 0 +333 Right-Common-IliacA 255 160 0 0 +334 Left-External-IliacA 255 255 0 0 +335 Right-External-IliacA 0 255 0 0 +336 Left-Internal-IliacA 255 0 160 0 +337 Right-Internal-IliacA 255 0 255 0 +338 Left-Lateral-SacralA 255 50 80 0 +339 Right-Lateral-SacralA 80 255 50 0 +340 Left-ObturatorA 160 255 50 0 +341 Right-ObturatorA 160 200 255 0 +342 Left-Internal-PudendalA 0 255 160 0 +343 Right-Internal-PudendalA 0 0 255 0 +344 Left-UmbilicalA 80 50 255 0 +345 Right-UmbilicalA 160 0 255 0 +346 Left-Inf-RectalA 255 210 0 0 +347 Right-Inf-RectalA 0 160 255 0 +348 Left-Common-IliacV 255 200 80 0 +349 Right-Common-IliacV 255 200 160 0 +350 Left-External-IliacV 255 80 200 0 +351 Right-External-IliacV 255 160 200 0 +352 Left-Internal-IliacV 30 255 80 0 +353 Right-Internal-IliacV 80 200 255 0 +354 Left-ObturatorV 80 255 200 0 +355 Right-ObturatorV 195 255 200 0 +356 Left-Internal-PudendalV 120 200 20 0 +357 Right-Internal-PudendalV 170 10 200 0 +358 Pos-Lymph 20 130 180 0 +359 Neg-Lymph 20 180 130 0 + +400 V1 206 62 78 0 +401 V2 121 18 134 0 +402 BA44 199 58 250 0 +403 BA45 1 148 0 0 +404 BA4a 221 248 164 0 +405 BA4p 231 148 34 0 +406 BA6 1 118 14 0 +407 BA2 120 118 14 0 +408 BA1_old 123 186 221 0 +409 BAun2 238 13 177 0 +410 BA1 123 186 220 0 +411 BA2b 138 13 206 0 +412 BA3a 238 130 176 0 +413 BA3b 218 230 76 0 +414 MT 38 213 176 0 +415 AIPS_AIP_l 1 225 176 0 +416 AIPS_AIP_r 1 225 176 0 +417 AIPS_VIP_l 200 2 100 0 +418 AIPS_VIP_r 200 2 100 0 +419 IPL_PFcm_l 5 200 90 0 +420 IPL_PFcm_r 5 200 90 0 +421 IPL_PF_l 100 5 200 0 +422 IPL_PFm_l 25 255 100 0 +423 IPL_PFm_r 25 255 100 0 +424 IPL_PFop_l 230 7 100 0 +425 IPL_PFop_r 230 7 100 0 +426 IPL_PF_r 100 5 200 0 +427 IPL_PFt_l 150 10 200 0 +428 IPL_PFt_r 150 10 200 0 +429 IPL_PGa_l 175 10 176 0 +430 IPL_PGa_r 175 10 176 0 +431 IPL_PGp_l 10 100 255 0 +432 IPL_PGp_r 10 100 255 0 +433 Visual_V3d_l 150 45 70 0 +434 Visual_V3d_r 150 45 70 0 +435 Visual_V4_l 45 200 15 0 +436 Visual_V4_r 45 200 15 0 +437 Visual_V5_b 227 45 100 0 +438 Visual_VP_l 227 45 100 0 +439 Visual_VP_r 227 45 100 0 + +# wm lesions +498 wmsa 143 188 143 0 +499 other_wmsa 255 248 220 0 + +# HiRes Hippocampus labeling +500 right_CA2_3 17 85 136 0 +501 right_alveus 119 187 102 0 +502 right_CA1 204 68 34 0 +503 right_fimbria 204 0 255 0 +504 right_presubiculum 221 187 17 0 +505 right_hippocampal_fissure 153 221 238 0 +506 right_CA4_DG 51 17 17 0 +507 right_subiculum 0 119 85 0 +508 right_fornix 20 100 200 0 + +550 left_CA2_3 17 85 137 0 +551 left_alveus 119 187 103 0 +552 left_CA1 204 68 35 0 +553 left_fimbria 204 0 254 0 +554 left_presubiculum 221 187 16 0 +555 left_hippocampal_fissure 153 221 239 0 +556 left_CA4_DG 51 17 18 0 +557 left_subiculum 0 119 86 0 +558 left_fornix 20 100 201 0 + +600 Tumor 254 254 254 0 + + +# Cerebellar parcellation labels from SUIT (matches labels in cma.h) +#No. Label Name: R G B A +601 Cbm_Left_I_IV 70 130 180 0 +602 Cbm_Right_I_IV 245 245 245 0 +603 Cbm_Left_V 205 62 78 0 +604 Cbm_Right_V 120 18 134 0 +605 Cbm_Left_VI 196 58 250 0 +606 Cbm_Vermis_VI 0 148 0 0 +607 Cbm_Right_VI 220 248 164 0 +608 Cbm_Left_CrusI 230 148 34 0 +609 Cbm_Vermis_CrusI 0 118 14 0 +610 Cbm_Right_CrusI 0 118 14 0 +611 Cbm_Left_CrusII 122 186 220 0 +612 Cbm_Vermis_CrusII 236 13 176 0 +613 Cbm_Right_CrusII 12 48 255 0 +614 Cbm_Left_VIIb 204 182 142 0 +615 Cbm_Vermis_VIIb 42 204 164 0 +616 Cbm_Right_VIIb 119 159 176 0 +617 Cbm_Left_VIIIa 220 216 20 0 +618 Cbm_Vermis_VIIIa 103 255 255 0 +619 Cbm_Right_VIIIa 80 196 98 0 +620 Cbm_Left_VIIIb 60 58 210 0 +621 Cbm_Vermis_VIIIb 60 58 210 0 +622 Cbm_Right_VIIIb 60 58 210 0 +623 Cbm_Left_IX 60 58 210 0 +624 Cbm_Vermis_IX 60 60 60 0 +625 Cbm_Right_IX 255 165 0 0 +626 Cbm_Left_X 255 165 0 0 +627 Cbm_Vermis_X 0 255 127 0 +628 Cbm_Right_X 165 42 42 0 + +# Cerebellar lobule parcellations +640 Cbm_Right_I_V_med 204 0 0 0 +641 Cbm_Right_I_V_mid 255 0 0 0 +642 Cbm_Right_VI_med 0 0 255 0 +643 Cbm_Right_VI_mid 30 144 255 0 +644 Cbm_Right_VI_lat 100 212 237 0 +645 Cbm_Right_CrusI_med 218 165 32 0 +646 Cbm_Right_CrusI_mid 255 215 0 0 +647 Cbm_Right_CrusI_lat 255 255 166 0 +648 Cbm_Right_CrusII_med 153 0 204 0 +649 Cbm_Right_CrusII_mid 153 141 209 0 +650 Cbm_Right_CrusII_lat 204 204 255 0 +651 Cbm_Right_7med 31 212 194 0 +652 Cbm_Right_7mid 3 255 237 0 +653 Cbm_Right_7lat 204 255 255 0 +654 Cbm_Right_8med 86 74 147 0 +655 Cbm_Right_8mid 114 114 190 0 +656 Cbm_Right_8lat 184 178 255 0 +657 Cbm_Right_PUNs 126 138 37 0 +658 Cbm_Right_TONs 189 197 117 0 +659 Cbm_Right_FLOs 240 230 140 0 +660 Cbm_Left_I_V_med 204 0 0 0 +661 Cbm_Left_I_V_mid 255 0 0 0 +662 Cbm_Left_VI_med 0 0 255 0 +663 Cbm_Left_VI_mid 30 144 255 0 +664 Cbm_Left_VI_lat 100 212 237 0 +665 Cbm_Left_CrusI_med 218 165 32 0 +666 Cbm_Left_CrusI_mid 255 215 0 0 +667 Cbm_Left_CrusI_lat 255 255 166 0 +668 Cbm_Left_CrusII_med 153 0 204 0 +669 Cbm_Left_CrusII_mid 153 141 209 0 +670 Cbm_Left_CrusII_lat 204 204 255 0 +671 Cbm_Left_7med 31 212 194 0 +672 Cbm_Left_7mid 3 255 237 0 +673 Cbm_Left_7lat 204 255 255 0 +674 Cbm_Left_8med 86 74 147 0 +675 Cbm_Left_8mid 114 114 190 0 +676 Cbm_Left_8lat 184 178 255 0 +677 Cbm_Left_PUNs 126 138 37 0 +678 Cbm_Left_TONs 189 197 117 0 +679 Cbm_Left_FLOs 240 230 140 0 + +701 CSF-FSL-FAST 120 18 134 0 +702 GrayMatter-FSL-FAST 205 62 78 0 +703 WhiteMatter-FSL-FAST 0 225 0 0 + +999 SUSPICIOUS 255 100 100 0 + +# Below is the color table for the cortical labels of the seg volume +# created by mri_aparc2aseg in which the aseg cortex label is replaced +# by the labels in the aparc. It also supports wm labels that will +# eventually be created by mri_aparc2aseg. Otherwise, the aseg labels +# do not change from above. The cortical lables are the same as in +# colortable_desikan_killiany.txt, except that left hemisphere has +# 1000 added to the index and the right has 2000 added. The label +# names are also prepended with ctx-lh or ctx-rh. The white matter +# labels are the same as in colortable_desikan_killiany.txt, except +# that left hemisphere has 3000 added to the index and the right has +# 4000 added. The label names are also prepended with wm-lh or wm-rh. +# Centrum semiovale is also labled with 5001 (left) and 5002 (right). +# Even further below are the color tables for aparc.a2005s and aparc.a2009s. + +#No. Label Name: R G B A +1000 ctx-lh-unknown 25 5 25 0 +1001 ctx-lh-bankssts 25 100 40 0 +1002 ctx-lh-caudalanteriorcingulate 125 100 160 0 +1003 ctx-lh-caudalmiddlefrontal 100 25 0 0 +1004 ctx-lh-corpuscallosum 120 70 50 0 +1005 ctx-lh-cuneus 220 20 100 0 +1006 ctx-lh-entorhinal 220 20 10 0 +1007 ctx-lh-fusiform 180 220 140 0 +1008 ctx-lh-inferiorparietal 220 60 220 0 +1009 ctx-lh-inferiortemporal 180 40 120 0 +1010 ctx-lh-isthmuscingulate 140 20 140 0 +1011 ctx-lh-lateraloccipital 20 30 140 0 +1012 ctx-lh-lateralorbitofrontal 35 75 50 0 +1013 ctx-lh-lingual 225 140 140 0 +1014 ctx-lh-medialorbitofrontal 200 35 75 0 +1015 ctx-lh-middletemporal 160 100 50 0 +1016 ctx-lh-parahippocampal 20 220 60 0 +1017 ctx-lh-paracentral 60 220 60 0 +1018 ctx-lh-parsopercularis 220 180 140 0 +1019 ctx-lh-parsorbitalis 20 100 50 0 +1020 ctx-lh-parstriangularis 220 60 20 0 +1021 ctx-lh-pericalcarine 120 100 60 0 +1022 ctx-lh-postcentral 220 20 20 0 +1023 ctx-lh-posteriorcingulate 220 180 220 0 +1024 ctx-lh-precentral 60 20 220 0 +1025 ctx-lh-precuneus 160 140 180 0 +1026 ctx-lh-rostralanteriorcingulate 80 20 140 0 +1027 ctx-lh-rostralmiddlefrontal 75 50 125 0 +1028 ctx-lh-superiorfrontal 20 220 160 0 +1029 ctx-lh-superiorparietal 20 180 140 0 +1030 ctx-lh-superiortemporal 140 220 220 0 +1031 ctx-lh-supramarginal 80 160 20 0 +1032 ctx-lh-frontalpole 100 0 100 0 +1033 ctx-lh-temporalpole 70 70 70 0 +1034 ctx-lh-transversetemporal 150 150 200 0 +1035 ctx-lh-insula 255 192 32 0 + +2000 ctx-rh-unknown 25 5 25 0 +2001 ctx-rh-bankssts 25 100 40 0 +2002 ctx-rh-caudalanteriorcingulate 125 100 160 0 +2003 ctx-rh-caudalmiddlefrontal 100 25 0 0 +2004 ctx-rh-corpuscallosum 120 70 50 0 +2005 ctx-rh-cuneus 220 20 100 0 +2006 ctx-rh-entorhinal 220 20 10 0 +2007 ctx-rh-fusiform 180 220 140 0 +2008 ctx-rh-inferiorparietal 220 60 220 0 +2009 ctx-rh-inferiortemporal 180 40 120 0 +2010 ctx-rh-isthmuscingulate 140 20 140 0 +2011 ctx-rh-lateraloccipital 20 30 140 0 +2012 ctx-rh-lateralorbitofrontal 35 75 50 0 +2013 ctx-rh-lingual 225 140 140 0 +2014 ctx-rh-medialorbitofrontal 200 35 75 0 +2015 ctx-rh-middletemporal 160 100 50 0 +2016 ctx-rh-parahippocampal 20 220 60 0 +2017 ctx-rh-paracentral 60 220 60 0 +2018 ctx-rh-parsopercularis 220 180 140 0 +2019 ctx-rh-parsorbitalis 20 100 50 0 +2020 ctx-rh-parstriangularis 220 60 20 0 +2021 ctx-rh-pericalcarine 120 100 60 0 +2022 ctx-rh-postcentral 220 20 20 0 +2023 ctx-rh-posteriorcingulate 220 180 220 0 +2024 ctx-rh-precentral 60 20 220 0 +2025 ctx-rh-precuneus 160 140 180 0 +2026 ctx-rh-rostralanteriorcingulate 80 20 140 0 +2027 ctx-rh-rostralmiddlefrontal 75 50 125 0 +2028 ctx-rh-superiorfrontal 20 220 160 0 +2029 ctx-rh-superiorparietal 20 180 140 0 +2030 ctx-rh-superiortemporal 140 220 220 0 +2031 ctx-rh-supramarginal 80 160 20 0 +2032 ctx-rh-frontalpole 100 0 100 0 +2033 ctx-rh-temporalpole 70 70 70 0 +2034 ctx-rh-transversetemporal 150 150 200 0 +2035 ctx-rh-insula 255 192 32 0 + +3000 wm-lh-unknown 230 250 230 0 +3001 wm-lh-bankssts 230 155 215 0 +3002 wm-lh-caudalanteriorcingulate 130 155 95 0 +3003 wm-lh-caudalmiddlefrontal 155 230 255 0 +3004 wm-lh-corpuscallosum 135 185 205 0 +3005 wm-lh-cuneus 35 235 155 0 +3006 wm-lh-entorhinal 35 235 245 0 +3007 wm-lh-fusiform 75 35 115 0 +3008 wm-lh-inferiorparietal 35 195 35 0 +3009 wm-lh-inferiortemporal 75 215 135 0 +3010 wm-lh-isthmuscingulate 115 235 115 0 +3011 wm-lh-lateraloccipital 235 225 115 0 +3012 wm-lh-lateralorbitofrontal 220 180 205 0 +3013 wm-lh-lingual 30 115 115 0 +3014 wm-lh-medialorbitofrontal 55 220 180 0 +3015 wm-lh-middletemporal 95 155 205 0 +3016 wm-lh-parahippocampal 235 35 195 0 +3017 wm-lh-paracentral 195 35 195 0 +3018 wm-lh-parsopercularis 35 75 115 0 +3019 wm-lh-parsorbitalis 235 155 205 0 +3020 wm-lh-parstriangularis 35 195 235 0 +3021 wm-lh-pericalcarine 135 155 195 0 +3022 wm-lh-postcentral 35 235 235 0 +3023 wm-lh-posteriorcingulate 35 75 35 0 +3024 wm-lh-precentral 195 235 35 0 +3025 wm-lh-precuneus 95 115 75 0 +3026 wm-lh-rostralanteriorcingulate 175 235 115 0 +3027 wm-lh-rostralmiddlefrontal 180 205 130 0 +3028 wm-lh-superiorfrontal 235 35 95 0 +3029 wm-lh-superiorparietal 235 75 115 0 +3030 wm-lh-superiortemporal 115 35 35 0 +3031 wm-lh-supramarginal 175 95 235 0 +3032 wm-lh-frontalpole 155 255 155 0 +3033 wm-lh-temporalpole 185 185 185 0 +3034 wm-lh-transversetemporal 105 105 55 0 +3035 wm-lh-insula 254 191 31 0 + +4000 wm-rh-unknown 230 250 230 0 +4001 wm-rh-bankssts 230 155 215 0 +4002 wm-rh-caudalanteriorcingulate 130 155 95 0 +4003 wm-rh-caudalmiddlefrontal 155 230 255 0 +4004 wm-rh-corpuscallosum 135 185 205 0 +4005 wm-rh-cuneus 35 235 155 0 +4006 wm-rh-entorhinal 35 235 245 0 +4007 wm-rh-fusiform 75 35 115 0 +4008 wm-rh-inferiorparietal 35 195 35 0 +4009 wm-rh-inferiortemporal 75 215 135 0 +4010 wm-rh-isthmuscingulate 115 235 115 0 +4011 wm-rh-lateraloccipital 235 225 115 0 +4012 wm-rh-lateralorbitofrontal 220 180 205 0 +4013 wm-rh-lingual 30 115 115 0 +4014 wm-rh-medialorbitofrontal 55 220 180 0 +4015 wm-rh-middletemporal 95 155 205 0 +4016 wm-rh-parahippocampal 235 35 195 0 +4017 wm-rh-paracentral 195 35 195 0 +4018 wm-rh-parsopercularis 35 75 115 0 +4019 wm-rh-parsorbitalis 235 155 205 0 +4020 wm-rh-parstriangularis 35 195 235 0 +4021 wm-rh-pericalcarine 135 155 195 0 +4022 wm-rh-postcentral 35 235 235 0 +4023 wm-rh-posteriorcingulate 35 75 35 0 +4024 wm-rh-precentral 195 235 35 0 +4025 wm-rh-precuneus 95 115 75 0 +4026 wm-rh-rostralanteriorcingulate 175 235 115 0 +4027 wm-rh-rostralmiddlefrontal 180 205 130 0 +4028 wm-rh-superiorfrontal 235 35 95 0 +4029 wm-rh-superiorparietal 235 75 115 0 +4030 wm-rh-superiortemporal 115 35 35 0 +4031 wm-rh-supramarginal 175 95 235 0 +4032 wm-rh-frontalpole 155 255 155 0 +4033 wm-rh-temporalpole 185 185 185 0 +4034 wm-rh-transversetemporal 105 105 55 0 +4035 wm-rh-insula 254 191 31 0 + +# Below is the color table for the cortical labels of the seg volume +# created by mri_aparc2aseg (with --a2005s flag) in which the aseg +# cortex label is replaced by the labels in the aparc.a2005s. The +# cortical labels are the same as in Simple_surface_labels2005.txt, +# except that left hemisphere has 1100 added to the index and the +# right has 2100 added. The label names are also prepended with +# ctx-lh or ctx-rh. The aparc.a2009s labels are further below + +#No. Label Name: R G B A +1100 ctx-lh-Unknown 0 0 0 0 +1101 ctx-lh-Corpus_callosum 50 50 50 0 +1102 ctx-lh-G_and_S_Insula_ONLY_AVERAGE 180 20 30 0 +1103 ctx-lh-G_cingulate-Isthmus 60 25 25 0 +1104 ctx-lh-G_cingulate-Main_part 25 60 60 0 + +1200 ctx-lh-G_cingulate-caudal_ACC 25 60 61 0 +1201 ctx-lh-G_cingulate-rostral_ACC 25 90 60 0 +1202 ctx-lh-G_cingulate-posterior 25 120 60 0 + +1205 ctx-lh-S_cingulate-caudal_ACC 25 150 60 0 +1206 ctx-lh-S_cingulate-rostral_ACC 25 180 60 0 +1207 ctx-lh-S_cingulate-posterior 25 210 60 0 + +1210 ctx-lh-S_pericallosal-caudal 25 150 90 0 +1211 ctx-lh-S_pericallosal-rostral 25 180 90 0 +1212 ctx-lh-S_pericallosal-posterior 25 210 90 0 + +1105 ctx-lh-G_cuneus 180 20 20 0 +1106 ctx-lh-G_frontal_inf-Opercular_part 220 20 100 0 +1107 ctx-lh-G_frontal_inf-Orbital_part 140 60 60 0 +1108 ctx-lh-G_frontal_inf-Triangular_part 180 220 140 0 +1109 ctx-lh-G_frontal_middle 140 100 180 0 +1110 ctx-lh-G_frontal_superior 180 20 140 0 +1111 ctx-lh-G_frontomarginal 140 20 140 0 +1112 ctx-lh-G_insular_long 21 10 10 0 +1113 ctx-lh-G_insular_short 225 140 140 0 +1114 ctx-lh-G_and_S_occipital_inferior 23 60 180 0 +1115 ctx-lh-G_occipital_middle 180 60 180 0 +1116 ctx-lh-G_occipital_superior 20 220 60 0 +1117 ctx-lh-G_occipit-temp_lat-Or_fusiform 60 20 140 0 +1118 ctx-lh-G_occipit-temp_med-Lingual_part 220 180 140 0 +1119 ctx-lh-G_occipit-temp_med-Parahippocampal_part 65 100 20 0 +1120 ctx-lh-G_orbital 220 60 20 0 +1121 ctx-lh-G_paracentral 60 100 60 0 +1122 ctx-lh-G_parietal_inferior-Angular_part 20 60 220 0 +1123 ctx-lh-G_parietal_inferior-Supramarginal_part 100 100 60 0 +1124 ctx-lh-G_parietal_superior 220 180 220 0 +1125 ctx-lh-G_postcentral 20 180 140 0 +1126 ctx-lh-G_precentral 60 140 180 0 +1127 ctx-lh-G_precuneus 25 20 140 0 +1128 ctx-lh-G_rectus 20 60 100 0 +1129 ctx-lh-G_subcallosal 60 220 20 0 +1130 ctx-lh-G_subcentral 60 20 220 0 +1131 ctx-lh-G_temporal_inferior 220 220 100 0 +1132 ctx-lh-G_temporal_middle 180 60 60 0 +1133 ctx-lh-G_temp_sup-G_temp_transv_and_interm_S 60 60 220 0 +1134 ctx-lh-G_temp_sup-Lateral_aspect 220 60 220 0 +1135 ctx-lh-G_temp_sup-Planum_polare 65 220 60 0 +1136 ctx-lh-G_temp_sup-Planum_tempolare 25 140 20 0 +1137 ctx-lh-G_and_S_transverse_frontopolar 13 0 250 0 +1138 ctx-lh-Lat_Fissure-ant_sgt-ramus_horizontal 61 20 220 0 +1139 ctx-lh-Lat_Fissure-ant_sgt-ramus_vertical 61 20 60 0 +1140 ctx-lh-Lat_Fissure-post_sgt 61 60 100 0 +1141 ctx-lh-Medial_wall 25 25 25 0 +1142 ctx-lh-Pole_occipital 140 20 60 0 +1143 ctx-lh-Pole_temporal 220 180 20 0 +1144 ctx-lh-S_calcarine 63 180 180 0 +1145 ctx-lh-S_central 221 20 10 0 +1146 ctx-lh-S_central_insula 21 220 20 0 +1147 ctx-lh-S_cingulate-Main_part_and_Intracingulate 183 100 20 0 +1148 ctx-lh-S_cingulate-Marginalis_part 221 20 100 0 +1149 ctx-lh-S_circular_insula_anterior 221 60 140 0 +1150 ctx-lh-S_circular_insula_inferior 221 20 220 0 +1151 ctx-lh-S_circular_insula_superior 61 220 220 0 +1152 ctx-lh-S_collateral_transverse_ant 100 200 200 0 +1153 ctx-lh-S_collateral_transverse_post 10 200 200 0 +1154 ctx-lh-S_frontal_inferior 221 220 20 0 +1155 ctx-lh-S_frontal_middle 141 20 100 0 +1156 ctx-lh-S_frontal_superior 61 220 100 0 +1157 ctx-lh-S_frontomarginal 21 220 60 0 +1158 ctx-lh-S_intermedius_primus-Jensen 141 60 20 0 +1159 ctx-lh-S_intraparietal-and_Parietal_transverse 143 20 220 0 +1160 ctx-lh-S_occipital_anterior 61 20 180 0 +1161 ctx-lh-S_occipital_middle_and_Lunatus 101 60 220 0 +1162 ctx-lh-S_occipital_superior_and_transversalis 21 20 140 0 +1163 ctx-lh-S_occipito-temporal_lateral 221 140 20 0 +1164 ctx-lh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0 +1165 ctx-lh-S_orbital-H_shapped 101 20 20 0 +1166 ctx-lh-S_orbital_lateral 221 100 20 0 +1167 ctx-lh-S_orbital_medial-Or_olfactory 181 200 20 0 +1168 ctx-lh-S_paracentral 21 180 140 0 +1169 ctx-lh-S_parieto_occipital 101 100 180 0 +1170 ctx-lh-S_pericallosal 181 220 20 0 +1171 ctx-lh-S_postcentral 21 140 200 0 +1172 ctx-lh-S_precentral-Inferior-part 21 20 240 0 +1173 ctx-lh-S_precentral-Superior-part 21 20 200 0 +1174 ctx-lh-S_subcentral_ant 61 180 60 0 +1175 ctx-lh-S_subcentral_post 61 180 250 0 +1176 ctx-lh-S_suborbital 21 20 60 0 +1177 ctx-lh-S_subparietal 101 60 60 0 +1178 ctx-lh-S_supracingulate 21 220 220 0 +1179 ctx-lh-S_temporal_inferior 21 180 180 0 +1180 ctx-lh-S_temporal_superior 223 220 60 0 +1181 ctx-lh-S_temporal_transverse 221 60 60 0 + +2100 ctx-rh-Unknown 0 0 0 0 +2101 ctx-rh-Corpus_callosum 50 50 50 0 +2102 ctx-rh-G_and_S_Insula_ONLY_AVERAGE 180 20 30 0 +2103 ctx-rh-G_cingulate-Isthmus 60 25 25 0 +2104 ctx-rh-G_cingulate-Main_part 25 60 60 0 + +2105 ctx-rh-G_cuneus 180 20 20 0 +2106 ctx-rh-G_frontal_inf-Opercular_part 220 20 100 0 +2107 ctx-rh-G_frontal_inf-Orbital_part 140 60 60 0 +2108 ctx-rh-G_frontal_inf-Triangular_part 180 220 140 0 +2109 ctx-rh-G_frontal_middle 140 100 180 0 +2110 ctx-rh-G_frontal_superior 180 20 140 0 +2111 ctx-rh-G_frontomarginal 140 20 140 0 +2112 ctx-rh-G_insular_long 21 10 10 0 +2113 ctx-rh-G_insular_short 225 140 140 0 +2114 ctx-rh-G_and_S_occipital_inferior 23 60 180 0 +2115 ctx-rh-G_occipital_middle 180 60 180 0 +2116 ctx-rh-G_occipital_superior 20 220 60 0 +2117 ctx-rh-G_occipit-temp_lat-Or_fusiform 60 20 140 0 +2118 ctx-rh-G_occipit-temp_med-Lingual_part 220 180 140 0 +2119 ctx-rh-G_occipit-temp_med-Parahippocampal_part 65 100 20 0 +2120 ctx-rh-G_orbital 220 60 20 0 +2121 ctx-rh-G_paracentral 60 100 60 0 +2122 ctx-rh-G_parietal_inferior-Angular_part 20 60 220 0 +2123 ctx-rh-G_parietal_inferior-Supramarginal_part 100 100 60 0 +2124 ctx-rh-G_parietal_superior 220 180 220 0 +2125 ctx-rh-G_postcentral 20 180 140 0 +2126 ctx-rh-G_precentral 60 140 180 0 +2127 ctx-rh-G_precuneus 25 20 140 0 +2128 ctx-rh-G_rectus 20 60 100 0 +2129 ctx-rh-G_subcallosal 60 220 20 0 +2130 ctx-rh-G_subcentral 60 20 220 0 +2131 ctx-rh-G_temporal_inferior 220 220 100 0 +2132 ctx-rh-G_temporal_middle 180 60 60 0 +2133 ctx-rh-G_temp_sup-G_temp_transv_and_interm_S 60 60 220 0 +2134 ctx-rh-G_temp_sup-Lateral_aspect 220 60 220 0 +2135 ctx-rh-G_temp_sup-Planum_polare 65 220 60 0 +2136 ctx-rh-G_temp_sup-Planum_tempolare 25 140 20 0 +2137 ctx-rh-G_and_S_transverse_frontopolar 13 0 250 0 +2138 ctx-rh-Lat_Fissure-ant_sgt-ramus_horizontal 61 20 220 0 +2139 ctx-rh-Lat_Fissure-ant_sgt-ramus_vertical 61 20 60 0 +2140 ctx-rh-Lat_Fissure-post_sgt 61 60 100 0 +2141 ctx-rh-Medial_wall 25 25 25 0 +2142 ctx-rh-Pole_occipital 140 20 60 0 +2143 ctx-rh-Pole_temporal 220 180 20 0 +2144 ctx-rh-S_calcarine 63 180 180 0 +2145 ctx-rh-S_central 221 20 10 0 +2146 ctx-rh-S_central_insula 21 220 20 0 +2147 ctx-rh-S_cingulate-Main_part_and_Intracingulate 183 100 20 0 +2148 ctx-rh-S_cingulate-Marginalis_part 221 20 100 0 +2149 ctx-rh-S_circular_insula_anterior 221 60 140 0 +2150 ctx-rh-S_circular_insula_inferior 221 20 220 0 +2151 ctx-rh-S_circular_insula_superior 61 220 220 0 +2152 ctx-rh-S_collateral_transverse_ant 100 200 200 0 +2153 ctx-rh-S_collateral_transverse_post 10 200 200 0 +2154 ctx-rh-S_frontal_inferior 221 220 20 0 +2155 ctx-rh-S_frontal_middle 141 20 100 0 +2156 ctx-rh-S_frontal_superior 61 220 100 0 +2157 ctx-rh-S_frontomarginal 21 220 60 0 +2158 ctx-rh-S_intermedius_primus-Jensen 141 60 20 0 +2159 ctx-rh-S_intraparietal-and_Parietal_transverse 143 20 220 0 +2160 ctx-rh-S_occipital_anterior 61 20 180 0 +2161 ctx-rh-S_occipital_middle_and_Lunatus 101 60 220 0 +2162 ctx-rh-S_occipital_superior_and_transversalis 21 20 140 0 +2163 ctx-rh-S_occipito-temporal_lateral 221 140 20 0 +2164 ctx-rh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0 +2165 ctx-rh-S_orbital-H_shapped 101 20 20 0 +2166 ctx-rh-S_orbital_lateral 221 100 20 0 +2167 ctx-rh-S_orbital_medial-Or_olfactory 181 200 20 0 +2168 ctx-rh-S_paracentral 21 180 140 0 +2169 ctx-rh-S_parieto_occipital 101 100 180 0 +2170 ctx-rh-S_pericallosal 181 220 20 0 +2171 ctx-rh-S_postcentral 21 140 200 0 +2172 ctx-rh-S_precentral-Inferior-part 21 20 240 0 +2173 ctx-rh-S_precentral-Superior-part 21 20 200 0 +2174 ctx-rh-S_subcentral_ant 61 180 60 0 +2175 ctx-rh-S_subcentral_post 61 180 250 0 +2176 ctx-rh-S_suborbital 21 20 60 0 +2177 ctx-rh-S_subparietal 101 60 60 0 +2178 ctx-rh-S_supracingulate 21 220 220 0 +2179 ctx-rh-S_temporal_inferior 21 180 180 0 +2180 ctx-rh-S_temporal_superior 223 220 60 0 +2181 ctx-rh-S_temporal_transverse 221 60 60 0 + + +2200 ctx-rh-G_cingulate-caudal_ACC 25 60 61 0 +2201 ctx-rh-G_cingulate-rostral_ACC 25 90 60 0 +2202 ctx-rh-G_cingulate-posterior 25 120 60 0 + +2205 ctx-rh-S_cingulate-caudal_ACC 25 150 60 0 +2206 ctx-rh-S_cingulate-rostral_ACC 25 180 60 0 +2207 ctx-rh-S_cingulate-posterior 25 210 60 0 + +2210 ctx-rh-S_pericallosal-caudal 25 150 90 0 +2211 ctx-rh-S_pericallosal-rostral 25 180 90 0 +2212 ctx-rh-S_pericallosal-posterior 25 210 90 0 + +3100 wm-lh-Unknown 0 0 0 0 +3101 wm-lh-Corpus_callosum 50 50 50 0 +3102 wm-lh-G_and_S_Insula_ONLY_AVERAGE 180 20 30 0 +3103 wm-lh-G_cingulate-Isthmus 60 25 25 0 +3104 wm-lh-G_cingulate-Main_part 25 60 60 0 +3105 wm-lh-G_cuneus 180 20 20 0 +3106 wm-lh-G_frontal_inf-Opercular_part 220 20 100 0 +3107 wm-lh-G_frontal_inf-Orbital_part 140 60 60 0 +3108 wm-lh-G_frontal_inf-Triangular_part 180 220 140 0 +3109 wm-lh-G_frontal_middle 140 100 180 0 +3110 wm-lh-G_frontal_superior 180 20 140 0 +3111 wm-lh-G_frontomarginal 140 20 140 0 +3112 wm-lh-G_insular_long 21 10 10 0 +3113 wm-lh-G_insular_short 225 140 140 0 +3114 wm-lh-G_and_S_occipital_inferior 23 60 180 0 +3115 wm-lh-G_occipital_middle 180 60 180 0 +3116 wm-lh-G_occipital_superior 20 220 60 0 +3117 wm-lh-G_occipit-temp_lat-Or_fusiform 60 20 140 0 +3118 wm-lh-G_occipit-temp_med-Lingual_part 220 180 140 0 +3119 wm-lh-G_occipit-temp_med-Parahippocampal_part 65 100 20 0 +3120 wm-lh-G_orbital 220 60 20 0 +3121 wm-lh-G_paracentral 60 100 60 0 +3122 wm-lh-G_parietal_inferior-Angular_part 20 60 220 0 +3123 wm-lh-G_parietal_inferior-Supramarginal_part 100 100 60 0 +3124 wm-lh-G_parietal_superior 220 180 220 0 +3125 wm-lh-G_postcentral 20 180 140 0 +3126 wm-lh-G_precentral 60 140 180 0 +3127 wm-lh-G_precuneus 25 20 140 0 +3128 wm-lh-G_rectus 20 60 100 0 +3129 wm-lh-G_subcallosal 60 220 20 0 +3130 wm-lh-G_subcentral 60 20 220 0 +3131 wm-lh-G_temporal_inferior 220 220 100 0 +3132 wm-lh-G_temporal_middle 180 60 60 0 +3133 wm-lh-G_temp_sup-G_temp_transv_and_interm_S 60 60 220 0 +3134 wm-lh-G_temp_sup-Lateral_aspect 220 60 220 0 +3135 wm-lh-G_temp_sup-Planum_polare 65 220 60 0 +3136 wm-lh-G_temp_sup-Planum_tempolare 25 140 20 0 +3137 wm-lh-G_and_S_transverse_frontopolar 13 0 250 0 +3138 wm-lh-Lat_Fissure-ant_sgt-ramus_horizontal 61 20 220 0 +3139 wm-lh-Lat_Fissure-ant_sgt-ramus_vertical 61 20 60 0 +3140 wm-lh-Lat_Fissure-post_sgt 61 60 100 0 +3141 wm-lh-Medial_wall 25 25 25 0 +3142 wm-lh-Pole_occipital 140 20 60 0 +3143 wm-lh-Pole_temporal 220 180 20 0 +3144 wm-lh-S_calcarine 63 180 180 0 +3145 wm-lh-S_central 221 20 10 0 +3146 wm-lh-S_central_insula 21 220 20 0 +3147 wm-lh-S_cingulate-Main_part_and_Intracingulate 183 100 20 0 +3148 wm-lh-S_cingulate-Marginalis_part 221 20 100 0 +3149 wm-lh-S_circular_insula_anterior 221 60 140 0 +3150 wm-lh-S_circular_insula_inferior 221 20 220 0 +3151 wm-lh-S_circular_insula_superior 61 220 220 0 +3152 wm-lh-S_collateral_transverse_ant 100 200 200 0 +3153 wm-lh-S_collateral_transverse_post 10 200 200 0 +3154 wm-lh-S_frontal_inferior 221 220 20 0 +3155 wm-lh-S_frontal_middle 141 20 100 0 +3156 wm-lh-S_frontal_superior 61 220 100 0 +3157 wm-lh-S_frontomarginal 21 220 60 0 +3158 wm-lh-S_intermedius_primus-Jensen 141 60 20 0 +3159 wm-lh-S_intraparietal-and_Parietal_transverse 143 20 220 0 +3160 wm-lh-S_occipital_anterior 61 20 180 0 +3161 wm-lh-S_occipital_middle_and_Lunatus 101 60 220 0 +3162 wm-lh-S_occipital_superior_and_transversalis 21 20 140 0 +3163 wm-lh-S_occipito-temporal_lateral 221 140 20 0 +3164 wm-lh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0 +3165 wm-lh-S_orbital-H_shapped 101 20 20 0 +3166 wm-lh-S_orbital_lateral 221 100 20 0 +3167 wm-lh-S_orbital_medial-Or_olfactory 181 200 20 0 +3168 wm-lh-S_paracentral 21 180 140 0 +3169 wm-lh-S_parieto_occipital 101 100 180 0 +3170 wm-lh-S_pericallosal 181 220 20 0 +3171 wm-lh-S_postcentral 21 140 200 0 +3172 wm-lh-S_precentral-Inferior-part 21 20 240 0 +3173 wm-lh-S_precentral-Superior-part 21 20 200 0 +3174 wm-lh-S_subcentral_ant 61 180 60 0 +3175 wm-lh-S_subcentral_post 61 180 250 0 +3176 wm-lh-S_suborbital 21 20 60 0 +3177 wm-lh-S_subparietal 101 60 60 0 +3178 wm-lh-S_supracingulate 21 220 220 0 +3179 wm-lh-S_temporal_inferior 21 180 180 0 +3180 wm-lh-S_temporal_superior 223 220 60 0 +3181 wm-lh-S_temporal_transverse 221 60 60 0 + +4100 wm-rh-Unknown 0 0 0 0 +4101 wm-rh-Corpus_callosum 50 50 50 0 +4102 wm-rh-G_and_S_Insula_ONLY_AVERAGE 180 20 30 0 +4103 wm-rh-G_cingulate-Isthmus 60 25 25 0 +4104 wm-rh-G_cingulate-Main_part 25 60 60 0 +4105 wm-rh-G_cuneus 180 20 20 0 +4106 wm-rh-G_frontal_inf-Opercular_part 220 20 100 0 +4107 wm-rh-G_frontal_inf-Orbital_part 140 60 60 0 +4108 wm-rh-G_frontal_inf-Triangular_part 180 220 140 0 +4109 wm-rh-G_frontal_middle 140 100 180 0 +4110 wm-rh-G_frontal_superior 180 20 140 0 +4111 wm-rh-G_frontomarginal 140 20 140 0 +4112 wm-rh-G_insular_long 21 10 10 0 +4113 wm-rh-G_insular_short 225 140 140 0 +4114 wm-rh-G_and_S_occipital_inferior 23 60 180 0 +4115 wm-rh-G_occipital_middle 180 60 180 0 +4116 wm-rh-G_occipital_superior 20 220 60 0 +4117 wm-rh-G_occipit-temp_lat-Or_fusiform 60 20 140 0 +4118 wm-rh-G_occipit-temp_med-Lingual_part 220 180 140 0 +4119 wm-rh-G_occipit-temp_med-Parahippocampal_part 65 100 20 0 +4120 wm-rh-G_orbital 220 60 20 0 +4121 wm-rh-G_paracentral 60 100 60 0 +4122 wm-rh-G_parietal_inferior-Angular_part 20 60 220 0 +4123 wm-rh-G_parietal_inferior-Supramarginal_part 100 100 60 0 +4124 wm-rh-G_parietal_superior 220 180 220 0 +4125 wm-rh-G_postcentral 20 180 140 0 +4126 wm-rh-G_precentral 60 140 180 0 +4127 wm-rh-G_precuneus 25 20 140 0 +4128 wm-rh-G_rectus 20 60 100 0 +4129 wm-rh-G_subcallosal 60 220 20 0 +4130 wm-rh-G_subcentral 60 20 220 0 +4131 wm-rh-G_temporal_inferior 220 220 100 0 +4132 wm-rh-G_temporal_middle 180 60 60 0 +4133 wm-rh-G_temp_sup-G_temp_transv_and_interm_S 60 60 220 0 +4134 wm-rh-G_temp_sup-Lateral_aspect 220 60 220 0 +4135 wm-rh-G_temp_sup-Planum_polare 65 220 60 0 +4136 wm-rh-G_temp_sup-Planum_tempolare 25 140 20 0 +4137 wm-rh-G_and_S_transverse_frontopolar 13 0 250 0 +4138 wm-rh-Lat_Fissure-ant_sgt-ramus_horizontal 61 20 220 0 +4139 wm-rh-Lat_Fissure-ant_sgt-ramus_vertical 61 20 60 0 +4140 wm-rh-Lat_Fissure-post_sgt 61 60 100 0 +4141 wm-rh-Medial_wall 25 25 25 0 +4142 wm-rh-Pole_occipital 140 20 60 0 +4143 wm-rh-Pole_temporal 220 180 20 0 +4144 wm-rh-S_calcarine 63 180 180 0 +4145 wm-rh-S_central 221 20 10 0 +4146 wm-rh-S_central_insula 21 220 20 0 +4147 wm-rh-S_cingulate-Main_part_and_Intracingulate 183 100 20 0 +4148 wm-rh-S_cingulate-Marginalis_part 221 20 100 0 +4149 wm-rh-S_circular_insula_anterior 221 60 140 0 +4150 wm-rh-S_circular_insula_inferior 221 20 220 0 +4151 wm-rh-S_circular_insula_superior 61 220 220 0 +4152 wm-rh-S_collateral_transverse_ant 100 200 200 0 +4153 wm-rh-S_collateral_transverse_post 10 200 200 0 +4154 wm-rh-S_frontal_inferior 221 220 20 0 +4155 wm-rh-S_frontal_middle 141 20 100 0 +4156 wm-rh-S_frontal_superior 61 220 100 0 +4157 wm-rh-S_frontomarginal 21 220 60 0 +4158 wm-rh-S_intermedius_primus-Jensen 141 60 20 0 +4159 wm-rh-S_intraparietal-and_Parietal_transverse 143 20 220 0 +4160 wm-rh-S_occipital_anterior 61 20 180 0 +4161 wm-rh-S_occipital_middle_and_Lunatus 101 60 220 0 +4162 wm-rh-S_occipital_superior_and_transversalis 21 20 140 0 +4163 wm-rh-S_occipito-temporal_lateral 221 140 20 0 +4164 wm-rh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0 +4165 wm-rh-S_orbital-H_shapped 101 20 20 0 +4166 wm-rh-S_orbital_lateral 221 100 20 0 +4167 wm-rh-S_orbital_medial-Or_olfactory 181 200 20 0 +4168 wm-rh-S_paracentral 21 180 140 0 +4169 wm-rh-S_parieto_occipital 101 100 180 0 +4170 wm-rh-S_pericallosal 181 220 20 0 +4171 wm-rh-S_postcentral 21 140 200 0 +4172 wm-rh-S_precentral-Inferior-part 21 20 240 0 +4173 wm-rh-S_precentral-Superior-part 21 20 200 0 +4174 wm-rh-S_subcentral_ant 61 180 60 0 +4175 wm-rh-S_subcentral_post 61 180 250 0 +4176 wm-rh-S_suborbital 21 20 60 0 +4177 wm-rh-S_subparietal 101 60 60 0 +4178 wm-rh-S_supracingulate 21 220 220 0 +4179 wm-rh-S_temporal_inferior 21 180 180 0 +4180 wm-rh-S_temporal_superior 223 220 60 0 +4181 wm-rh-S_temporal_transverse 221 60 60 0 + +5001 Left-UnsegmentedWhiteMatter 20 30 40 0 +5002 Right-UnsegmentedWhiteMatter 20 30 40 0 + +# Below is the color table for white-matter pathways produced by dmri_paths + +#No. Label Name: R G B A +# +5100 fmajor 204 102 102 0 +5101 fminor 204 102 102 0 +# +5102 lh.atr 255 255 102 0 +5103 lh.cab 153 204 0 0 +5104 lh.ccg 0 153 153 0 +5105 lh.cst 204 153 255 0 +5106 lh.ilf 255 153 51 0 +5107 lh.slfp 204 204 204 0 +5108 lh.slft 153 255 255 0 +5109 lh.unc 102 153 255 0 +# +5110 rh.atr 255 255 102 0 +5111 rh.cab 153 204 0 0 +5112 rh.ccg 0 153 153 0 +5113 rh.cst 204 153 255 0 +5114 rh.ilf 255 153 51 0 +5115 rh.slfp 204 204 204 0 +5116 rh.slft 153 255 255 0 +5117 rh.unc 102 153 255 0 + +# These are the same tracula labels as above in human-readable form +5200 CC-ForcepsMajor 204 102 102 0 +5201 CC-ForcepsMinor 204 102 102 0 +5202 LAntThalRadiation 255 255 102 0 +5203 LCingulumAngBundle 153 204 0 0 +5204 LCingulumCingGyrus 0 153 153 0 +5205 LCorticospinalTract 204 153 255 0 +5206 LInfLongFas 255 153 51 0 +5207 LSupLongFasParietal 204 204 204 0 +5208 LSupLongFasTemporal 153 255 255 0 +5209 LUncinateFas 102 153 255 0 +5210 RAntThalRadiation 255 255 102 0 +5211 RCingulumAngBundle 153 204 0 0 +5212 RCingulumCingGyrus 0 153 153 0 +5213 RCorticospinalTract 204 153 255 0 +5214 RInfLongFas 255 153 51 0 +5215 RSupLongFasParietal 204 204 204 0 +5216 RSupLongFasTemporal 153 255 255 0 +5217 RUncinateFas 102 153 255 0 + +######################################## + +6000 CST-orig 0 255 0 0 +6001 CST-hammer 255 255 0 0 +6002 CST-CVS 0 255 255 0 +6003 CST-flirt 0 0 255 0 + +6010 Left-SLF1 236 16 231 0 +6020 Right-SLF1 237 18 232 0 + +6030 Left-SLF3 236 13 227 0 +6040 Right-SLF3 236 17 228 0 + +6050 Left-CST 1 255 1 0 +6060 Right-CST 2 255 1 0 + +6070 Left-SLF2 236 14 230 0 +6080 Right-SLF2 237 14 230 0 + +#No. Label Name: R G B A + +7001 Lateral-nucleus 72 132 181 0 +7002 Basolateral-nucleus 243 243 243 0 +7003 Basal-nucleus 207 63 79 0 +7004 Centromedial-nucleus 121 20 135 0 +7005 Central-nucleus 197 60 248 0 +7006 Medial-nucleus 2 149 2 0 +7007 Cortical-nucleus 221 249 166 0 +7008 Accessory-Basal-nucleus 232 146 35 0 +7009 Corticoamygdaloid-transitio 20 60 120 0 +7010 Anterior-amygdaloid-area-AAA 250 250 0 0 +7011 Fusion-amygdala-HP-FAH 122 187 222 0 +7012 Hippocampal-amygdala-transition-HATA 237 12 177 0 +7013 Endopiriform-nucleus 10 49 255 0 +7014 Lateral-nucleus-olfactory-tract 205 184 144 0 +7015 Paralaminar-nucleus 45 205 165 0 +7016 Intercalated-nucleus 117 160 175 0 +7017 Prepiriform-cortex 221 217 21 0 +7018 Periamygdaloid-cortex 20 60 120 0 +7019 Envelope-Amygdala 141 21 100 0 +7020 Extranuclear-Amydala 225 140 141 0 + +7100 Brainstem-inferior-colliculus 42 201 168 0 +7101 Brainstem-cochlear-nucleus 168 104 162 0 + +8001 Thalamus-Anterior 74 130 181 0 +8002 Thalamus-Ventral-anterior 242 241 240 0 +8003 Thalamus-Lateral-dorsal 206 65 78 0 +8004 Thalamus-Lateral-posterior 120 21 133 0 +8005 Thalamus-Ventral-lateral 195 61 246 0 +8006 Thalamus-Ventral-posterior-medial 3 147 6 0 +8007 Thalamus-Ventral-posterior-lateral 220 251 163 0 +8008 Thalamus-intralaminar 232 146 33 0 +8009 Thalamus-centromedian 4 114 14 0 +8010 Thalamus-mediodorsal 121 184 220 0 +8011 Thalamus-medial 235 11 175 0 +8012 Thalamus-pulvinar 12 46 250 0 +8013 Thalamus-lateral-geniculate 203 182 143 0 +8014 Thalamus-medial-geniculate 42 204 167 0 + +# +# Labels for thalamus parcellation using probabilistic tractography. See: +# Functional--Anatomical Validation and Individual Variation of Diffusion +# Tractography-based Segmentation of the Human Thalamus; Cerebral Cortex +# January 2005;15:31--39, doi:10.1093/cercor/bhh105, Advance Access +# publication July 6, 2004 +# + +#No. Label Name: R G B A +9000 ctx-lh-prefrontal 30 5 30 0 +9001 ctx-lh-primary-motor 30 100 45 0 +9002 ctx-lh-premotor 130 100 165 0 +9003 ctx-lh-temporal 105 25 5 0 +9004 ctx-lh-posterior-parietal 125 70 55 0 +9005 ctx-lh-prim-sec-somatosensory 225 20 105 0 +9006 ctx-lh-occipital 225 20 15 0 + +9500 ctx-rh-prefrontal 30 55 30 0 +9501 ctx-rh-primary-motor 30 150 45 0 +9502 ctx-rh-premotor 130 150 165 0 +9503 ctx-rh-temporal 105 75 5 0 +9504 ctx-rh-posterior-parietal 125 120 55 0 +9505 ctx-rh-prim-sec-somatosensory 225 70 105 0 +9506 ctx-rh-occipital 225 70 15 0 + +# Below is the color table for the cortical labels of the seg volume +# created by mri_aparc2aseg (with --a2009s flag) in which the aseg +# cortex label is replaced by the labels in the aparc.a2009s. The +# cortical labels are the same as in Simple_surface_labels2009.txt, +# except that left hemisphere has 11100 added to the index and the +# right has 12100 added. The label names are also prepended with +# ctx_lh_, ctx_rh_, wm_lh_ and wm_rh_ (note usage of _ instead of - +# to differentiate from a2005s labels). + +#No. Label Name: R G B A +11100 ctx_lh_Unknown 0 0 0 0 +11101 ctx_lh_G_and_S_frontomargin 23 220 60 0 +11102 ctx_lh_G_and_S_occipital_inf 23 60 180 0 +11103 ctx_lh_G_and_S_paracentral 63 100 60 0 +11104 ctx_lh_G_and_S_subcentral 63 20 220 0 +11105 ctx_lh_G_and_S_transv_frontopol 13 0 250 0 +11106 ctx_lh_G_and_S_cingul-Ant 26 60 0 0 +11107 ctx_lh_G_and_S_cingul-Mid-Ant 26 60 75 0 +11108 ctx_lh_G_and_S_cingul-Mid-Post 26 60 150 0 +11109 ctx_lh_G_cingul-Post-dorsal 25 60 250 0 +11110 ctx_lh_G_cingul-Post-ventral 60 25 25 0 +11111 ctx_lh_G_cuneus 180 20 20 0 +11112 ctx_lh_G_front_inf-Opercular 220 20 100 0 +11113 ctx_lh_G_front_inf-Orbital 140 60 60 0 +11114 ctx_lh_G_front_inf-Triangul 180 220 140 0 +11115 ctx_lh_G_front_middle 140 100 180 0 +11116 ctx_lh_G_front_sup 180 20 140 0 +11117 ctx_lh_G_Ins_lg_and_S_cent_ins 23 10 10 0 +11118 ctx_lh_G_insular_short 225 140 140 0 +11119 ctx_lh_G_occipital_middle 180 60 180 0 +11120 ctx_lh_G_occipital_sup 20 220 60 0 +11121 ctx_lh_G_oc-temp_lat-fusifor 60 20 140 0 +11122 ctx_lh_G_oc-temp_med-Lingual 220 180 140 0 +11123 ctx_lh_G_oc-temp_med-Parahip 65 100 20 0 +11124 ctx_lh_G_orbital 220 60 20 0 +11125 ctx_lh_G_pariet_inf-Angular 20 60 220 0 +11126 ctx_lh_G_pariet_inf-Supramar 100 100 60 0 +11127 ctx_lh_G_parietal_sup 220 180 220 0 +11128 ctx_lh_G_postcentral 20 180 140 0 +11129 ctx_lh_G_precentral 60 140 180 0 +11130 ctx_lh_G_precuneus 25 20 140 0 +11131 ctx_lh_G_rectus 20 60 100 0 +11132 ctx_lh_G_subcallosal 60 220 20 0 +11133 ctx_lh_G_temp_sup-G_T_transv 60 60 220 0 +11134 ctx_lh_G_temp_sup-Lateral 220 60 220 0 +11135 ctx_lh_G_temp_sup-Plan_polar 65 220 60 0 +11136 ctx_lh_G_temp_sup-Plan_tempo 25 140 20 0 +11137 ctx_lh_G_temporal_inf 220 220 100 0 +11138 ctx_lh_G_temporal_middle 180 60 60 0 +11139 ctx_lh_Lat_Fis-ant-Horizont 61 20 220 0 +11140 ctx_lh_Lat_Fis-ant-Vertical 61 20 60 0 +11141 ctx_lh_Lat_Fis-post 61 60 100 0 +11142 ctx_lh_Medial_wall 25 25 25 0 +11143 ctx_lh_Pole_occipital 140 20 60 0 +11144 ctx_lh_Pole_temporal 220 180 20 0 +11145 ctx_lh_S_calcarine 63 180 180 0 +11146 ctx_lh_S_central 221 20 10 0 +11147 ctx_lh_S_cingul-Marginalis 221 20 100 0 +11148 ctx_lh_S_circular_insula_ant 221 60 140 0 +11149 ctx_lh_S_circular_insula_inf 221 20 220 0 +11150 ctx_lh_S_circular_insula_sup 61 220 220 0 +11151 ctx_lh_S_collat_transv_ant 100 200 200 0 +11152 ctx_lh_S_collat_transv_post 10 200 200 0 +11153 ctx_lh_S_front_inf 221 220 20 0 +11154 ctx_lh_S_front_middle 141 20 100 0 +11155 ctx_lh_S_front_sup 61 220 100 0 +11156 ctx_lh_S_interm_prim-Jensen 141 60 20 0 +11157 ctx_lh_S_intrapariet_and_P_trans 143 20 220 0 +11158 ctx_lh_S_oc_middle_and_Lunatus 101 60 220 0 +11159 ctx_lh_S_oc_sup_and_transversal 21 20 140 0 +11160 ctx_lh_S_occipital_ant 61 20 180 0 +11161 ctx_lh_S_oc-temp_lat 221 140 20 0 +11162 ctx_lh_S_oc-temp_med_and_Lingual 141 100 220 0 +11163 ctx_lh_S_orbital_lateral 221 100 20 0 +11164 ctx_lh_S_orbital_med-olfact 181 200 20 0 +11165 ctx_lh_S_orbital-H_Shaped 101 20 20 0 +11166 ctx_lh_S_parieto_occipital 101 100 180 0 +11167 ctx_lh_S_pericallosal 181 220 20 0 +11168 ctx_lh_S_postcentral 21 140 200 0 +11169 ctx_lh_S_precentral-inf-part 21 20 240 0 +11170 ctx_lh_S_precentral-sup-part 21 20 200 0 +11171 ctx_lh_S_suborbital 21 20 60 0 +11172 ctx_lh_S_subparietal 101 60 60 0 +11173 ctx_lh_S_temporal_inf 21 180 180 0 +11174 ctx_lh_S_temporal_sup 223 220 60 0 +11175 ctx_lh_S_temporal_transverse 221 60 60 0 + +12100 ctx_rh_Unknown 0 0 0 0 +12101 ctx_rh_G_and_S_frontomargin 23 220 60 0 +12102 ctx_rh_G_and_S_occipital_inf 23 60 180 0 +12103 ctx_rh_G_and_S_paracentral 63 100 60 0 +12104 ctx_rh_G_and_S_subcentral 63 20 220 0 +12105 ctx_rh_G_and_S_transv_frontopol 13 0 250 0 +12106 ctx_rh_G_and_S_cingul-Ant 26 60 0 0 +12107 ctx_rh_G_and_S_cingul-Mid-Ant 26 60 75 0 +12108 ctx_rh_G_and_S_cingul-Mid-Post 26 60 150 0 +12109 ctx_rh_G_cingul-Post-dorsal 25 60 250 0 +12110 ctx_rh_G_cingul-Post-ventral 60 25 25 0 +12111 ctx_rh_G_cuneus 180 20 20 0 +12112 ctx_rh_G_front_inf-Opercular 220 20 100 0 +12113 ctx_rh_G_front_inf-Orbital 140 60 60 0 +12114 ctx_rh_G_front_inf-Triangul 180 220 140 0 +12115 ctx_rh_G_front_middle 140 100 180 0 +12116 ctx_rh_G_front_sup 180 20 140 0 +12117 ctx_rh_G_Ins_lg_and_S_cent_ins 23 10 10 0 +12118 ctx_rh_G_insular_short 225 140 140 0 +12119 ctx_rh_G_occipital_middle 180 60 180 0 +12120 ctx_rh_G_occipital_sup 20 220 60 0 +12121 ctx_rh_G_oc-temp_lat-fusifor 60 20 140 0 +12122 ctx_rh_G_oc-temp_med-Lingual 220 180 140 0 +12123 ctx_rh_G_oc-temp_med-Parahip 65 100 20 0 +12124 ctx_rh_G_orbital 220 60 20 0 +12125 ctx_rh_G_pariet_inf-Angular 20 60 220 0 +12126 ctx_rh_G_pariet_inf-Supramar 100 100 60 0 +12127 ctx_rh_G_parietal_sup 220 180 220 0 +12128 ctx_rh_G_postcentral 20 180 140 0 +12129 ctx_rh_G_precentral 60 140 180 0 +12130 ctx_rh_G_precuneus 25 20 140 0 +12131 ctx_rh_G_rectus 20 60 100 0 +12132 ctx_rh_G_subcallosal 60 220 20 0 +12133 ctx_rh_G_temp_sup-G_T_transv 60 60 220 0 +12134 ctx_rh_G_temp_sup-Lateral 220 60 220 0 +12135 ctx_rh_G_temp_sup-Plan_polar 65 220 60 0 +12136 ctx_rh_G_temp_sup-Plan_tempo 25 140 20 0 +12137 ctx_rh_G_temporal_inf 220 220 100 0 +12138 ctx_rh_G_temporal_middle 180 60 60 0 +12139 ctx_rh_Lat_Fis-ant-Horizont 61 20 220 0 +12140 ctx_rh_Lat_Fis-ant-Vertical 61 20 60 0 +12141 ctx_rh_Lat_Fis-post 61 60 100 0 +12142 ctx_rh_Medial_wall 25 25 25 0 +12143 ctx_rh_Pole_occipital 140 20 60 0 +12144 ctx_rh_Pole_temporal 220 180 20 0 +12145 ctx_rh_S_calcarine 63 180 180 0 +12146 ctx_rh_S_central 221 20 10 0 +12147 ctx_rh_S_cingul-Marginalis 221 20 100 0 +12148 ctx_rh_S_circular_insula_ant 221 60 140 0 +12149 ctx_rh_S_circular_insula_inf 221 20 220 0 +12150 ctx_rh_S_circular_insula_sup 61 220 220 0 +12151 ctx_rh_S_collat_transv_ant 100 200 200 0 +12152 ctx_rh_S_collat_transv_post 10 200 200 0 +12153 ctx_rh_S_front_inf 221 220 20 0 +12154 ctx_rh_S_front_middle 141 20 100 0 +12155 ctx_rh_S_front_sup 61 220 100 0 +12156 ctx_rh_S_interm_prim-Jensen 141 60 20 0 +12157 ctx_rh_S_intrapariet_and_P_trans 143 20 220 0 +12158 ctx_rh_S_oc_middle_and_Lunatus 101 60 220 0 +12159 ctx_rh_S_oc_sup_and_transversal 21 20 140 0 +12160 ctx_rh_S_occipital_ant 61 20 180 0 +12161 ctx_rh_S_oc-temp_lat 221 140 20 0 +12162 ctx_rh_S_oc-temp_med_and_Lingual 141 100 220 0 +12163 ctx_rh_S_orbital_lateral 221 100 20 0 +12164 ctx_rh_S_orbital_med-olfact 181 200 20 0 +12165 ctx_rh_S_orbital-H_Shaped 101 20 20 0 +12166 ctx_rh_S_parieto_occipital 101 100 180 0 +12167 ctx_rh_S_pericallosal 181 220 20 0 +12168 ctx_rh_S_postcentral 21 140 200 0 +12169 ctx_rh_S_precentral-inf-part 21 20 240 0 +12170 ctx_rh_S_precentral-sup-part 21 20 200 0 +12171 ctx_rh_S_suborbital 21 20 60 0 +12172 ctx_rh_S_subparietal 101 60 60 0 +12173 ctx_rh_S_temporal_inf 21 180 180 0 +12174 ctx_rh_S_temporal_sup 223 220 60 0 +12175 ctx_rh_S_temporal_transverse 221 60 60 0 + +#No. Label Name: R G B A +13100 wm_lh_Unknown 0 0 0 0 +13101 wm_lh_G_and_S_frontomargin 23 220 60 0 +13102 wm_lh_G_and_S_occipital_inf 23 60 180 0 +13103 wm_lh_G_and_S_paracentral 63 100 60 0 +13104 wm_lh_G_and_S_subcentral 63 20 220 0 +13105 wm_lh_G_and_S_transv_frontopol 13 0 250 0 +13106 wm_lh_G_and_S_cingul-Ant 26 60 0 0 +13107 wm_lh_G_and_S_cingul-Mid-Ant 26 60 75 0 +13108 wm_lh_G_and_S_cingul-Mid-Post 26 60 150 0 +13109 wm_lh_G_cingul-Post-dorsal 25 60 250 0 +13110 wm_lh_G_cingul-Post-ventral 60 25 25 0 +13111 wm_lh_G_cuneus 180 20 20 0 +13112 wm_lh_G_front_inf-Opercular 220 20 100 0 +13113 wm_lh_G_front_inf-Orbital 140 60 60 0 +13114 wm_lh_G_front_inf-Triangul 180 220 140 0 +13115 wm_lh_G_front_middle 140 100 180 0 +13116 wm_lh_G_front_sup 180 20 140 0 +13117 wm_lh_G_Ins_lg_and_S_cent_ins 23 10 10 0 +13118 wm_lh_G_insular_short 225 140 140 0 +13119 wm_lh_G_occipital_middle 180 60 180 0 +13120 wm_lh_G_occipital_sup 20 220 60 0 +13121 wm_lh_G_oc-temp_lat-fusifor 60 20 140 0 +13122 wm_lh_G_oc-temp_med-Lingual 220 180 140 0 +13123 wm_lh_G_oc-temp_med-Parahip 65 100 20 0 +13124 wm_lh_G_orbital 220 60 20 0 +13125 wm_lh_G_pariet_inf-Angular 20 60 220 0 +13126 wm_lh_G_pariet_inf-Supramar 100 100 60 0 +13127 wm_lh_G_parietal_sup 220 180 220 0 +13128 wm_lh_G_postcentral 20 180 140 0 +13129 wm_lh_G_precentral 60 140 180 0 +13130 wm_lh_G_precuneus 25 20 140 0 +13131 wm_lh_G_rectus 20 60 100 0 +13132 wm_lh_G_subcallosal 60 220 20 0 +13133 wm_lh_G_temp_sup-G_T_transv 60 60 220 0 +13134 wm_lh_G_temp_sup-Lateral 220 60 220 0 +13135 wm_lh_G_temp_sup-Plan_polar 65 220 60 0 +13136 wm_lh_G_temp_sup-Plan_tempo 25 140 20 0 +13137 wm_lh_G_temporal_inf 220 220 100 0 +13138 wm_lh_G_temporal_middle 180 60 60 0 +13139 wm_lh_Lat_Fis-ant-Horizont 61 20 220 0 +13140 wm_lh_Lat_Fis-ant-Vertical 61 20 60 0 +13141 wm_lh_Lat_Fis-post 61 60 100 0 +13142 wm_lh_Medial_wall 25 25 25 0 +13143 wm_lh_Pole_occipital 140 20 60 0 +13144 wm_lh_Pole_temporal 220 180 20 0 +13145 wm_lh_S_calcarine 63 180 180 0 +13146 wm_lh_S_central 221 20 10 0 +13147 wm_lh_S_cingul-Marginalis 221 20 100 0 +13148 wm_lh_S_circular_insula_ant 221 60 140 0 +13149 wm_lh_S_circular_insula_inf 221 20 220 0 +13150 wm_lh_S_circular_insula_sup 61 220 220 0 +13151 wm_lh_S_collat_transv_ant 100 200 200 0 +13152 wm_lh_S_collat_transv_post 10 200 200 0 +13153 wm_lh_S_front_inf 221 220 20 0 +13154 wm_lh_S_front_middle 141 20 100 0 +13155 wm_lh_S_front_sup 61 220 100 0 +13156 wm_lh_S_interm_prim-Jensen 141 60 20 0 +13157 wm_lh_S_intrapariet_and_P_trans 143 20 220 0 +13158 wm_lh_S_oc_middle_and_Lunatus 101 60 220 0 +13159 wm_lh_S_oc_sup_and_transversal 21 20 140 0 +13160 wm_lh_S_occipital_ant 61 20 180 0 +13161 wm_lh_S_oc-temp_lat 221 140 20 0 +13162 wm_lh_S_oc-temp_med_and_Lingual 141 100 220 0 +13163 wm_lh_S_orbital_lateral 221 100 20 0 +13164 wm_lh_S_orbital_med-olfact 181 200 20 0 +13165 wm_lh_S_orbital-H_Shaped 101 20 20 0 +13166 wm_lh_S_parieto_occipital 101 100 180 0 +13167 wm_lh_S_pericallosal 181 220 20 0 +13168 wm_lh_S_postcentral 21 140 200 0 +13169 wm_lh_S_precentral-inf-part 21 20 240 0 +13170 wm_lh_S_precentral-sup-part 21 20 200 0 +13171 wm_lh_S_suborbital 21 20 60 0 +13172 wm_lh_S_subparietal 101 60 60 0 +13173 wm_lh_S_temporal_inf 21 180 180 0 +13174 wm_lh_S_temporal_sup 223 220 60 0 +13175 wm_lh_S_temporal_transverse 221 60 60 0 + +14100 wm_rh_Unknown 0 0 0 0 +14101 wm_rh_G_and_S_frontomargin 23 220 60 0 +14102 wm_rh_G_and_S_occipital_inf 23 60 180 0 +14103 wm_rh_G_and_S_paracentral 63 100 60 0 +14104 wm_rh_G_and_S_subcentral 63 20 220 0 +14105 wm_rh_G_and_S_transv_frontopol 13 0 250 0 +14106 wm_rh_G_and_S_cingul-Ant 26 60 0 0 +14107 wm_rh_G_and_S_cingul-Mid-Ant 26 60 75 0 +14108 wm_rh_G_and_S_cingul-Mid-Post 26 60 150 0 +14109 wm_rh_G_cingul-Post-dorsal 25 60 250 0 +14110 wm_rh_G_cingul-Post-ventral 60 25 25 0 +14111 wm_rh_G_cuneus 180 20 20 0 +14112 wm_rh_G_front_inf-Opercular 220 20 100 0 +14113 wm_rh_G_front_inf-Orbital 140 60 60 0 +14114 wm_rh_G_front_inf-Triangul 180 220 140 0 +14115 wm_rh_G_front_middle 140 100 180 0 +14116 wm_rh_G_front_sup 180 20 140 0 +14117 wm_rh_G_Ins_lg_and_S_cent_ins 23 10 10 0 +14118 wm_rh_G_insular_short 225 140 140 0 +14119 wm_rh_G_occipital_middle 180 60 180 0 +14120 wm_rh_G_occipital_sup 20 220 60 0 +14121 wm_rh_G_oc-temp_lat-fusifor 60 20 140 0 +14122 wm_rh_G_oc-temp_med-Lingual 220 180 140 0 +14123 wm_rh_G_oc-temp_med-Parahip 65 100 20 0 +14124 wm_rh_G_orbital 220 60 20 0 +14125 wm_rh_G_pariet_inf-Angular 20 60 220 0 +14126 wm_rh_G_pariet_inf-Supramar 100 100 60 0 +14127 wm_rh_G_parietal_sup 220 180 220 0 +14128 wm_rh_G_postcentral 20 180 140 0 +14129 wm_rh_G_precentral 60 140 180 0 +14130 wm_rh_G_precuneus 25 20 140 0 +14131 wm_rh_G_rectus 20 60 100 0 +14132 wm_rh_G_subcallosal 60 220 20 0 +14133 wm_rh_G_temp_sup-G_T_transv 60 60 220 0 +14134 wm_rh_G_temp_sup-Lateral 220 60 220 0 +14135 wm_rh_G_temp_sup-Plan_polar 65 220 60 0 +14136 wm_rh_G_temp_sup-Plan_tempo 25 140 20 0 +14137 wm_rh_G_temporal_inf 220 220 100 0 +14138 wm_rh_G_temporal_middle 180 60 60 0 +14139 wm_rh_Lat_Fis-ant-Horizont 61 20 220 0 +14140 wm_rh_Lat_Fis-ant-Vertical 61 20 60 0 +14141 wm_rh_Lat_Fis-post 61 60 100 0 +14142 wm_rh_Medial_wall 25 25 25 0 +14143 wm_rh_Pole_occipital 140 20 60 0 +14144 wm_rh_Pole_temporal 220 180 20 0 +14145 wm_rh_S_calcarine 63 180 180 0 +14146 wm_rh_S_central 221 20 10 0 +14147 wm_rh_S_cingul-Marginalis 221 20 100 0 +14148 wm_rh_S_circular_insula_ant 221 60 140 0 +14149 wm_rh_S_circular_insula_inf 221 20 220 0 +14150 wm_rh_S_circular_insula_sup 61 220 220 0 +14151 wm_rh_S_collat_transv_ant 100 200 200 0 +14152 wm_rh_S_collat_transv_post 10 200 200 0 +14153 wm_rh_S_front_inf 221 220 20 0 +14154 wm_rh_S_front_middle 141 20 100 0 +14155 wm_rh_S_front_sup 61 220 100 0 +14156 wm_rh_S_interm_prim-Jensen 141 60 20 0 +14157 wm_rh_S_intrapariet_and_P_trans 143 20 220 0 +14158 wm_rh_S_oc_middle_and_Lunatus 101 60 220 0 +14159 wm_rh_S_oc_sup_and_transversal 21 20 140 0 +14160 wm_rh_S_occipital_ant 61 20 180 0 +14161 wm_rh_S_oc-temp_lat 221 140 20 0 +14162 wm_rh_S_oc-temp_med_and_Lingual 141 100 220 0 +14163 wm_rh_S_orbital_lateral 221 100 20 0 +14164 wm_rh_S_orbital_med-olfact 181 200 20 0 +14165 wm_rh_S_orbital-H_Shaped 101 20 20 0 +14166 wm_rh_S_parieto_occipital 101 100 180 0 +14167 wm_rh_S_pericallosal 181 220 20 0 +14168 wm_rh_S_postcentral 21 140 200 0 +14169 wm_rh_S_precentral-inf-part 21 20 240 0 +14170 wm_rh_S_precentral-sup-part 21 20 200 0 +14171 wm_rh_S_suborbital 21 20 60 0 +14172 wm_rh_S_subparietal 101 60 60 0 +14173 wm_rh_S_temporal_inf 21 180 180 0 +14174 wm_rh_S_temporal_sup 223 220 60 0 +14175 wm_rh_S_temporal_transverse 221 60 60 0 + diff --git a/python/libs/mne/data/__init__.py b/python/libs/mne/data/__init__.py new file mode 100644 index 0000000..6f92b46 --- /dev/null +++ b/python/libs/mne/data/__init__.py @@ -0,0 +1 @@ +"""MNE-Python data.""" diff --git a/python/libs/mne/data/coil_def.dat b/python/libs/mne/data/coil_def.dat new file mode 100644 index 0000000..e3f7ebc --- /dev/null +++ b/python/libs/mne/data/coil_def.dat @@ -0,0 +1,776 @@ +# +# MEG coil definition file +# +# Copyright 2005 - 2019 +# +# Matti Hamalainen +# Athinoula A. Martinos Center for Biomedical Imaging +# Charlestown, MA, USA +# +# +# "" +# +# struct class id accuracy num_points size baseline description +# format '%d %d %d %d %e %e %s' +# +# +# +# struct w x y z nx ny nz +# format '%f %e %e %e %e %e %e' +# +# .... +# +# +# +# 1 magnetometer +# 2 axial gradiometer +# 3 planar gradiometer +# 4 axial second-order gradiometer +# +# 0 point approximation +# 1 normal +# 2 accurate +# +# Produced with: +# +# mne_list_coil_def version 1.14 compiled at May 15 2021 07:58:54 +# +3 2 0 2 2.789e-02 1.620e-02 "Neuromag-122 planar gradiometer size = 27.89 mm base = 16.20 mm" + 61.7284 8.100e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +-61.7284 -8.100e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +3 2 1 2 2.789e-02 1.620e-02 "Neuromag-122 planar gradiometer size = 27.89 mm base = 16.20 mm" + 61.7284 8.100e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +-61.7284 -8.100e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +3 2 2 8 2.789e-02 1.620e-02 "Neuromag-122 planar gradiometer size = 27.89 mm base = 16.20 mm" + 15.1057 1.111e-02 7.680e-03 0.000e+00 0.000 0.000 1.000 + 15.1057 5.440e-03 7.680e-03 0.000e+00 0.000 0.000 1.000 + 15.1057 5.440e-03 -7.680e-03 0.000e+00 0.000 0.000 1.000 + 15.1057 1.111e-02 -7.680e-03 0.000e+00 0.000 0.000 1.000 +-15.1057 -1.111e-02 7.680e-03 0.000e+00 0.000 0.000 1.000 +-15.1057 -5.440e-03 7.680e-03 0.000e+00 0.000 0.000 1.000 +-15.1057 -5.440e-03 -7.680e-03 0.000e+00 0.000 0.000 1.000 +-15.1057 -1.111e-02 -7.680e-03 0.000e+00 0.000 0.000 1.000 +1 2000 0 1 0.000e+00 0.000e+00 "Point magnetometer" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 2000 1 1 0.000e+00 0.000e+00 "Point magnetometer" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 2000 2 1 0.000e+00 0.000e+00 "Point magnetometer" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +3 3012 0 2 2.639e-02 1.680e-02 "Vectorview planar gradiometer T1 size = 26.39 mm base = 16.80 mm" + 59.5238 8.400e-03 0.000e+00 3.000e-04 0.000 0.000 1.000 +-59.5238 -8.400e-03 0.000e+00 3.000e-04 0.000 0.000 1.000 +3 3012 1 4 2.639e-02 1.680e-02 "Vectorview planar gradiometer T1 size = 26.39 mm base = 16.80 mm" + 29.7619 8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 29.7619 8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3012 2 8 2.639e-02 1.680e-02 "Vectorview planar gradiometer T1 size = 26.39 mm base = 16.80 mm" + 14.9858 1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3013 0 2 2.639e-02 1.680e-02 "Vectorview planar gradiometer T2 size = 26.39 mm base = 16.80 mm" + 59.5238 8.400e-03 0.000e+00 3.000e-04 0.000 0.000 1.000 +-59.5238 -8.400e-03 0.000e+00 3.000e-04 0.000 0.000 1.000 +3 3013 1 4 2.639e-02 1.680e-02 "Vectorview planar gradiometer T2 size = 26.39 mm base = 16.80 mm" + 29.7619 8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 29.7619 8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3013 2 8 2.639e-02 1.680e-02 "Vectorview planar gradiometer T2 size = 26.39 mm base = 16.80 mm" + 14.9858 1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3014 0 2 2.639e-02 1.680e-02 "Vectorview planar gradiometer T3 size = 26.39 mm base = 16.80 mm" + 59.5238 8.400e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +-59.5238 -8.400e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +3 3014 1 4 2.639e-02 1.680e-02 "Vectorview planar gradiometer T3 size = 26.39 mm base = 16.80 mm" + 29.7619 8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 29.7619 8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3014 2 8 2.639e-02 1.680e-02 "Vectorview planar gradiometer T3 size = 26.39 mm base = 16.80 mm" + 14.9858 1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3015 0 2 2.639e-02 1.680e-02 "Vectorview planar gradiometer T4 (MEG-MRI) size = 26.39 mm base = 16.80 mm" + 59.5238 8.400e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +-59.5238 -8.400e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +3 3015 1 4 2.639e-02 1.680e-02 "Vectorview planar gradiometer T4 (MEG-MRI) size = 26.39 mm base = 16.80 mm" + 29.7619 8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 29.7619 8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3015 2 8 2.639e-02 1.680e-02 "Vectorview planar gradiometer T4 (MEG-MRI) size = 26.39 mm base = 16.80 mm" + 14.9858 1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +1 3022 0 1 2.580e-02 0.000e+00 "Vectorview magnetometer T1 size = 25.80 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 3022 1 4 2.580e-02 0.000e+00 "Vectorview magnetometer T1 size = 25.80 mm" + 0.2500 -6.450e-03 -6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 -6.450e-03 6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 6.450e-03 -6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 6.450e-03 6.450e-03 3.000e-04 0.000 0.000 1.000 +1 3022 2 16 2.580e-02 0.000e+00 "Vectorview magnetometer T1 size = 25.80 mm" + 0.0625 -9.675e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 +1 3023 0 1 2.580e-02 0.000e+00 "Vectorview magnetometer T2 size = 25.80 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 3023 1 4 2.580e-02 0.000e+00 "Vectorview magnetometer T2 size = 25.80 mm" + 0.2500 -6.450e-03 -6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 -6.450e-03 6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 6.450e-03 -6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 6.450e-03 6.450e-03 3.000e-04 0.000 0.000 1.000 +1 3023 2 16 2.580e-02 0.000e+00 "Vectorview magnetometer T2 size = 25.80 mm" + 0.0625 -9.675e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 +1 3024 0 1 2.100e-02 0.000e+00 "Vectorview magnetometer T3 size = 21.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 3024 1 4 2.100e-02 0.000e+00 "Vectorview magnetometer T3 size = 21.00 mm" + 0.2500 -5.250e-03 -5.250e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 -5.250e-03 5.250e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 5.250e-03 -5.250e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 5.250e-03 5.250e-03 3.000e-04 0.000 0.000 1.000 +1 3024 2 16 2.100e-02 0.000e+00 "Vectorview magnetometer T3 size = 21.00 mm" + 0.0625 -7.875e-03 -7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -7.875e-03 -2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -7.875e-03 2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -7.875e-03 7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -2.625e-03 -7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -2.625e-03 -2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -2.625e-03 2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -2.625e-03 7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 2.625e-03 -7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 2.625e-03 -2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 2.625e-03 2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 2.625e-03 7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 7.875e-03 -7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 7.875e-03 -2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 7.875e-03 2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 7.875e-03 7.875e-03 3.000e-04 0.000 0.000 1.000 +1 3025 0 1 2.800e-02 0.000e+00 "Vectorview magnetometer T4 (MEG-MRI) size = 28.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 3025 1 4 2.800e-02 0.000e+00 "Vectorview magnetometer T4 (MEG-MRI) size = 28.00 mm" + 0.2500 -7.000e-03 -7.000e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 -7.000e-03 7.000e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 7.000e-03 -7.000e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 7.000e-03 7.000e-03 3.000e-04 0.000 0.000 1.000 +1 3025 2 16 2.800e-02 0.000e+00 "Vectorview magnetometer T4 (MEG-MRI) size = 28.00 mm" + 0.0625 -1.050e-02 -1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 -1.050e-02 -3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -1.050e-02 3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -1.050e-02 1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.500e-03 -1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.500e-03 -3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.500e-03 3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.500e-03 1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 3.500e-03 -1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 3.500e-03 -3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.500e-03 3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.500e-03 1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 1.050e-02 -1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 1.050e-02 -3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 1.050e-02 3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 1.050e-02 1.050e-02 3.000e-04 0.000 0.000 1.000 +1 4001 0 1 2.300e-02 0.000e+00 "Magnes WH2500 magnetometer size = 23.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 4001 1 4 2.300e-02 0.000e+00 "Magnes WH2500 magnetometer size = 23.00 mm" + 0.2500 5.750e-03 5.750e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.750e-03 5.750e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.750e-03 -5.750e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 5.750e-03 -5.750e-03 0.000e+00 0.000 0.000 1.000 +1 4001 2 7 2.300e-02 0.000e+00 "Magnes WH2500 magnetometer size = 23.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 9.390e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -9.390e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.695e-03 8.132e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.695e-03 -8.132e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.695e-03 8.132e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.695e-03 -8.132e-03 0.000e+00 0.000 0.000 1.000 +2 4002 0 2 1.800e-02 5.000e-02 "Magnes WH3600 gradiometer size = 18.00 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 4002 1 8 1.800e-02 5.000e-02 "Magnes WH3600 gradiometer size = 18.00 mm base = 50.00 mm" + 0.2500 4.500e-03 4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.500e-03 4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.500e-03 -4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.500e-03 -4.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 4.500e-03 4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -4.500e-03 4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -4.500e-03 -4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 4.500e-03 -4.500e-03 5.000e-02 0.000 0.000 1.000 +2 4002 2 14 1.800e-02 5.000e-02 "Magnes WH3600 gradiometer size = 18.00 mm base = 50.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 7.348e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -7.348e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 3.674e-03 6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 3.674e-03 -6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.674e-03 6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.674e-03 -6.364e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 7.348e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 -7.348e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 3.674e-03 6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 3.674e-03 -6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.674e-03 6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.674e-03 -6.364e-03 5.000e-02 0.000 0.000 1.000 +1 4003 0 1 3.000e-02 0.000e+00 "Magnes reference magnetometer size = 30.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 4003 1 4 3.000e-02 0.000e+00 "Magnes reference magnetometer size = 30.00 mm" + 0.2500 7.500e-03 7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -7.500e-03 7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -7.500e-03 -7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 7.500e-03 -7.500e-03 0.000e+00 0.000 0.000 1.000 +1 4003 2 4 3.000e-02 0.000e+00 "Magnes reference magnetometer size = 30.00 mm" + 0.2500 7.500e-03 7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -7.500e-03 7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -7.500e-03 -7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 7.500e-03 -7.500e-03 0.000e+00 0.000 0.000 1.000 +2 4004 0 2 8.000e-02 1.350e-01 "Magnes reference gradiometer (diag) size = 80.00 mm base = 135.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 1.350e-01 0.000 0.000 1.000 +2 4004 1 8 8.000e-02 1.350e-01 "Magnes reference gradiometer (diag) size = 80.00 mm base = 135.00 mm" + 0.2500 2.000e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.000e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.000e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 2.000e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 2.000e-02 2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 -2.000e-02 2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 -2.000e-02 -2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 2.000e-02 -2.000e-02 1.350e-01 0.000 0.000 1.000 +2 4004 2 8 8.000e-02 1.350e-01 "Magnes reference gradiometer (diag) size = 80.00 mm base = 135.00 mm" + 0.2500 2.000e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.000e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.000e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 2.000e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 2.000e-02 2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 -2.000e-02 2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 -2.000e-02 -2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 2.000e-02 -2.000e-02 1.350e-01 0.000 0.000 1.000 +2 4005 0 2 8.000e-02 1.350e-01 "Magnes reference gradiometer (offdiag) size = 80.00 mm base = 135.00 mm" + 1.0000 6.750e-02 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 -6.750e-02 0.000e+00 0.000e+00 0.000 0.000 1.000 +2 4005 1 8 8.000e-02 1.350e-01 "Magnes reference gradiometer (offdiag) size = 80.00 mm base = 135.00 mm" + 0.2500 8.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 4.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 4.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 8.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -8.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -8.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 +2 4005 2 8 8.000e-02 1.350e-01 "Magnes reference gradiometer (offdiag) size = 80.00 mm base = 135.00 mm" + 0.2500 8.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 4.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 4.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 8.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -8.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -8.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 +2 5001 0 2 1.800e-02 5.000e-02 "CTF axial gradiometer size = 18.00 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 5001 1 8 1.800e-02 5.000e-02 "CTF axial gradiometer size = 18.00 mm base = 50.00 mm" + 0.2500 4.500e-03 4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.500e-03 4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.500e-03 -4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.500e-03 -4.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 4.500e-03 4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -4.500e-03 4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -4.500e-03 -4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 4.500e-03 -4.500e-03 5.000e-02 0.000 0.000 1.000 +2 5001 2 14 1.800e-02 5.000e-02 "CTF axial gradiometer size = 18.00 mm base = 50.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 7.348e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -7.348e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 3.674e-03 6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 3.674e-03 -6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.674e-03 6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.674e-03 -6.364e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 7.348e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 -7.348e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 3.674e-03 6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 3.674e-03 -6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.674e-03 6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.674e-03 -6.364e-03 5.000e-02 0.000 0.000 1.000 +1 5002 0 1 1.600e-02 0.000e+00 "CTF reference magnetometer size = 16.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 5002 1 4 1.600e-02 0.000e+00 "CTF reference magnetometer size = 16.00 mm" + 0.2500 4.000e-03 4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.000e-03 4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.000e-03 -4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.000e-03 -4.000e-03 0.000e+00 0.000 0.000 1.000 +1 5002 2 4 1.600e-02 0.000e+00 "CTF reference magnetometer size = 16.00 mm" + 0.2500 4.000e-03 4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.000e-03 4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.000e-03 -4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.000e-03 -4.000e-03 0.000e+00 0.000 0.000 1.000 +2 5003 0 2 3.440e-02 7.860e-02 "CTF reference gradiometer (diag) size = 34.40 mm base = 78.60 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 7.860e-02 0.000 0.000 1.000 +2 5003 1 8 3.440e-02 7.860e-02 "CTF reference gradiometer (diag) size = 34.40 mm base = 78.60 mm" + 0.2500 8.600e-03 8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -8.600e-03 8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -8.600e-03 -8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 8.600e-03 -8.600e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 8.600e-03 8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 -8.600e-03 8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 -8.600e-03 -8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 8.600e-03 -8.600e-03 7.860e-02 0.000 0.000 1.000 +2 5003 2 8 3.440e-02 7.860e-02 "CTF reference gradiometer (diag) size = 34.40 mm base = 78.60 mm" + 0.2500 8.600e-03 8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -8.600e-03 8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -8.600e-03 -8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 8.600e-03 -8.600e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 8.600e-03 8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 -8.600e-03 8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 -8.600e-03 -8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 8.600e-03 -8.600e-03 7.860e-02 0.000 0.000 1.000 +2 5004 0 2 3.440e-02 7.860e-02 "CTF reference gradiometer (offdiag) size = 34.40 mm base = 78.60 mm" + 1.0000 3.930e-02 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 -3.930e-02 0.000e+00 0.000e+00 0.000 0.000 1.000 +2 5004 1 8 3.440e-02 7.860e-02 "CTF reference gradiometer (offdiag) size = 34.40 mm base = 78.60 mm" + 0.2500 4.780e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.080e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.080e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.780e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -3.080e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.780e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.780e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -3.080e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 +2 5004 2 8 3.440e-02 7.860e-02 "CTF reference gradiometer (offdiag) size = 34.40 mm base = 78.60 mm" + 0.2500 4.780e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.080e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.080e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.780e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -3.080e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.780e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.780e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -3.080e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 +2 6001 0 2 1.550e-02 5.000e-02 "MIT KIT system gradiometer size = 15.50 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 6001 1 8 1.550e-02 5.000e-02 "MIT KIT system gradiometer size = 15.50 mm base = 50.00 mm" + 0.2500 3.875e-03 3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.875e-03 3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.875e-03 -3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.875e-03 -3.875e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 3.875e-03 3.875e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -3.875e-03 3.875e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -3.875e-03 -3.875e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 3.875e-03 -3.875e-03 5.000e-02 0.000 0.000 1.000 +2 6001 2 14 1.550e-02 5.000e-02 "MIT KIT system gradiometer size = 15.50 mm base = 50.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 6.328e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -6.328e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 3.164e-03 5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 3.164e-03 -5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.164e-03 5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.164e-03 -5.480e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 6.328e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 -6.328e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 3.164e-03 5.480e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 3.164e-03 -5.480e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.164e-03 5.480e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.164e-03 -5.480e-03 5.000e-02 0.000 0.000 1.000 +1 6002 0 1 1.550e-02 0.000e+00 "MIT KIT system reference magnetometer size = 15.50 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 6002 1 4 1.550e-02 0.000e+00 "MIT KIT system reference magnetometer size = 15.50 mm" + 0.2500 3.875e-03 3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.875e-03 3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.875e-03 -3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.875e-03 -3.875e-03 0.000e+00 0.000 0.000 1.000 +1 6002 2 7 1.550e-02 0.000e+00 "MIT KIT system reference magnetometer size = 15.50 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 6.328e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -6.328e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 3.164e-03 5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 3.164e-03 -5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.164e-03 5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.164e-03 -5.480e-03 0.000e+00 0.000 0.000 1.000 +2 7001 0 2 6.000e-03 5.000e-02 "BabySQUID system gradiometer size = 6.00 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 7001 1 2 6.000e-03 5.000e-02 "BabySQUID system gradiometer size = 6.00 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 7001 2 8 6.000e-03 5.000e-02 "BabySQUID system gradiometer size = 6.00 mm base = 50.00 mm" + 0.2500 1.500e-03 1.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -1.500e-03 1.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -1.500e-03 -1.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 1.500e-03 -1.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 1.500e-03 1.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -1.500e-03 1.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -1.500e-03 -1.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 1.500e-03 -1.500e-03 5.000e-02 0.000 0.000 1.000 +1 7002 0 1 1.000e-02 0.000e+00 "BabyMEG system magnetometer size = 10.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 7002 1 4 1.000e-02 0.000e+00 "BabyMEG system magnetometer size = 10.00 mm" + 0.2500 2.500e-03 2.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.500e-03 2.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.500e-03 -2.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 2.500e-03 -2.500e-03 0.000e+00 0.000 0.000 1.000 +1 7002 2 7 1.000e-02 0.000e+00 "BabyMEG system magnetometer size = 10.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 2.041e-03 3.536e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 2.041e-03 -3.536e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -2.041e-03 3.536e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -2.041e-03 -3.536e-03 0.000e+00 0.000 0.000 1.000 +1 7003 0 1 2.000e-02 0.000e+00 "BabyMEG system compensation magnetometer size = 20.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 7003 1 4 2.000e-02 0.000e+00 "BabyMEG system compensation magnetometer size = 20.00 mm" + 0.2500 5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 +1 7003 2 7 2.000e-02 0.000e+00 "BabyMEG system compensation magnetometer size = 20.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 +1 7004 0 1 2.000e-02 0.000e+00 "BabyMEG system reference magnetometer size = 20.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 7004 1 4 2.000e-02 0.000e+00 "BabyMEG system reference magnetometer size = 20.00 mm" + 0.2500 5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 +1 7004 2 7 2.000e-02 0.000e+00 "BabyMEG system reference magnetometer size = 20.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 +2 9001 0 2 2.000e-02 5.000e-02 "KRISS system gradiometer size = 20.00 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 9001 1 8 2.000e-02 5.000e-02 "KRISS system gradiometer size = 20.00 mm base = 50.00 mm" + 0.2500 5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 5.000e-03 5.000e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -5.000e-03 5.000e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -5.000e-03 -5.000e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 5.000e-03 -5.000e-03 5.000e-02 0.000 0.000 1.000 +2 9001 2 14 2.000e-02 5.000e-02 "KRISS system gradiometer size = 20.00 mm base = 50.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 8.165e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 -8.165e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 4.082e-03 7.071e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 4.082e-03 -7.071e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -4.082e-03 7.071e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -4.082e-03 -7.071e-03 5.000e-02 0.000 0.000 1.000 +2 7501 0 2 1.486e-02 5.740e-02 "Artemis system gradiometer size = 14.86 mm base = 57.40 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.740e-02 0.000 0.000 1.000 +2 7501 1 8 1.486e-02 5.740e-02 "Artemis system gradiometer size = 14.86 mm base = 57.40 mm" + 0.2500 3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 3.715e-03 3.715e-03 5.740e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 3.715e-03 5.740e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 -3.715e-03 5.740e-02 0.000 0.000 1.000 + -0.2500 3.715e-03 -3.715e-03 5.740e-02 0.000 0.000 1.000 +2 7501 2 14 1.486e-02 5.740e-02 "Artemis system gradiometer size = 14.86 mm base = 57.40 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 6.067e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -6.067e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 3.033e-03 5.254e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 3.033e-03 -5.254e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.033e-03 5.254e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.033e-03 -5.254e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.740e-02 0.000 0.000 1.000 + -0.1250 6.067e-03 0.000e+00 5.740e-02 0.000 0.000 1.000 + -0.1250 -6.067e-03 0.000e+00 5.740e-02 0.000 0.000 1.000 + -0.1250 3.033e-03 5.254e-03 5.740e-02 0.000 0.000 1.000 + -0.1250 3.033e-03 -5.254e-03 5.740e-02 0.000 0.000 1.000 + -0.1250 -3.033e-03 5.254e-03 5.740e-02 0.000 0.000 1.000 + -0.1250 -3.033e-03 -5.254e-03 5.740e-02 0.000 0.000 1.000 +1 7502 0 1 1.485e-02 0.000e+00 "Artemis system reference magnetometer size = 14.85 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 7502 1 4 1.485e-02 0.000e+00 "Artemis system reference magnetometer size = 14.85 mm" + 0.2500 3.712e-03 3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.712e-03 3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.712e-03 -3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.712e-03 -3.712e-03 0.000e+00 0.000 0.000 1.000 +1 7502 2 4 1.485e-02 0.000e+00 "Artemis system reference magnetometer size = 14.85 mm" + 0.2500 3.712e-03 3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.712e-03 3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.712e-03 -3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.712e-03 -3.712e-03 0.000e+00 0.000 0.000 1.000 +2 7503 0 2 1.486e-02 3.000e-02 "Artemis system reference gradiometer size = 14.86 mm base = 30.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 3.000e-02 0.000 0.000 1.000 +2 7503 1 8 1.486e-02 3.000e-02 "Artemis system reference gradiometer size = 14.86 mm base = 30.00 mm" + 0.2500 3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 3.715e-03 3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 -3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 3.715e-03 -3.715e-03 3.000e-02 0.000 0.000 1.000 +2 7503 2 8 1.486e-02 3.000e-02 "Artemis system reference gradiometer size = 14.86 mm base = 30.00 mm" + 0.2500 3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 3.715e-03 3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 -3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 3.715e-03 -3.715e-03 3.000e-02 0.000 0.000 1.000 +2 9101 0 2 2.050e-02 5.000e-02 "Compumedics adult gradiometer size = 20.50 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 9101 1 8 2.050e-02 5.000e-02 "Compumedics adult gradiometer size = 20.50 mm base = 50.00 mm" + 0.2500 5.125e-03 5.125e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.125e-03 5.125e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.125e-03 -5.125e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 5.125e-03 -5.125e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 5.125e-03 5.125e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -5.125e-03 5.125e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -5.125e-03 -5.125e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 5.125e-03 -5.125e-03 5.000e-02 0.000 0.000 1.000 +2 9101 2 14 2.050e-02 5.000e-02 "Compumedics adult gradiometer size = 20.50 mm base = 50.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 8.369e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -8.369e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.185e-03 7.248e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.185e-03 -7.248e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.185e-03 7.248e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.185e-03 -7.248e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 8.369e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 -8.369e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 4.185e-03 7.248e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 4.185e-03 -7.248e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -4.185e-03 7.248e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -4.185e-03 -7.248e-03 5.000e-02 0.000 0.000 1.000 +2 9102 0 2 1.660e-02 4.700e-02 "Compumedics pediatric gradiometer size = 16.60 mm base = 47.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 4.700e-02 0.000 0.000 1.000 +2 9102 1 16 1.660e-02 4.700e-02 "Compumedics pediatric gradiometer size = 16.60 mm base = 47.00 mm" + 0.1250 4.150e-03 4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.150e-03 4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.150e-03 -4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.150e-03 -4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.150e-03 4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 -4.150e-03 4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 -4.150e-03 -4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 4.150e-03 -4.150e-03 1.500e-03 0.000 0.000 1.000 + -0.1250 4.150e-03 4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 -4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 -4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 -4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 -4.150e-03 4.850e-02 0.000 0.000 1.000 +2 9102 2 16 1.660e-02 4.700e-02 "Compumedics pediatric gradiometer size = 16.60 mm base = 47.00 mm" + 0.1250 4.150e-03 4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.150e-03 4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.150e-03 -4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.150e-03 -4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.150e-03 4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 -4.150e-03 4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 -4.150e-03 -4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 4.150e-03 -4.150e-03 1.500e-03 0.000 0.000 1.000 + -0.1250 4.150e-03 4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 -4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 -4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 -4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 -4.150e-03 4.850e-02 0.000 0.000 1.000 +1 8001 0 1 7.000e-04 0.000e+00 "QSpin OPM sensor Gen1 size = 0.70 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8001 1 6 7.000e-04 0.000e+00 "QSpin OPM sensor Gen1 size = 0.70 mm" + 0.1667 -1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8001 2 24 7.000e-04 0.000e+00 "QSpin OPM sensor Gen1 size = 0.70 mm" + 0.0417 -1.250e-03 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 1.750e-04 -1.750e-04 0.000 0.000 1.000 +1 8002 0 1 7.000e-04 0.000e+00 "QSpin OPM sensor Gen2 size = 0.70 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8002 1 6 7.000e-04 0.000e+00 "QSpin OPM sensor Gen2 size = 0.70 mm" + 0.1667 -1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8002 2 24 7.000e-04 0.000e+00 "QSpin OPM sensor Gen2 size = 0.70 mm" + 0.0417 -1.250e-03 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 1.750e-04 -1.750e-04 0.000 0.000 1.000 +1 8101 0 1 2.000e-03 0.000e+00 "FieldLine OPM sensor Gen1 size = 2.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8101 1 6 2.000e-03 0.000e+00 "FieldLine OPM sensor Gen1 size = 2.00 mm" + 0.1667 -1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8101 2 24 2.000e-03 0.000e+00 "FieldLine OPM sensor Gen1 size = 2.00 mm" + 0.0417 -1.250e-03 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 5.000e-04 -5.000e-04 0.000 0.000 1.000 +1 8201 0 1 2.000e-03 0.000e+00 "Kernel OPM sensor Gen1 size = 2.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8201 1 9 2.000e-03 0.000e+00 "Kernel OPM sensor Gen1 size = 2.00 mm" + 0.1111 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1111 -5.000e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 +1 8201 2 9 2.000e-03 0.000e+00 "Kernel OPM sensor Gen1 size = 2.00 mm" + 0.1111 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1111 -5.000e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 diff --git a/python/libs/mne/data/coil_def_Elekta.dat b/python/libs/mne/data/coil_def_Elekta.dat new file mode 100644 index 0000000..4bca22d --- /dev/null +++ b/python/libs/mne/data/coil_def_Elekta.dat @@ -0,0 +1,70 @@ +# +# MEG coil definition file for Maxwell Filtering +# +# These coil definitions make use of integration points according to the last +# formula in section 25.4.62 in the "Handbook of Mathematical Functions: +# With Formulas, Graphs, and Mathematical Tables" edited by Abramowitz and Stegun. +# +# These coil definitions were used by Samu Taulu in the Spherical Space +# Separation work, which was subsequently used by Elekta in Maxfilter. The only +# difference is that the local z-coordinate was set to zero in Taulu's original +# formulation. Source of small z-coordinate offset (0.0003m) is due to manufacturing bug. +# +# Issues left to be sorted out. +# 1) Discrepancy between gradiometer base size. 16.69 in Elekta, 16.80 in MNE +# +# "" +# +# struct class id accuracy num_points size baseline description +# format '%d %d %d %d %e %e %s' +# +# +# +# struct w x y z nx ny nz +# format '%f %e %e %e %e %e %e' +# +# .... +# +# +# +# 1 magnetometer +# 2 axial gradiometer +# 3 planar gradiometer +# 4 axial second-order gradiometer +# +# 0 point approximation +# 1 normal +# 2 accurate +# +# +1 2000 2 1 0.000e+00 0.000e+00 "Point magnetometer, z-normal" + 1.0000000000e+00 0.0000000000e+00 0.0000000000e+00 0.0000000000e+00 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +3 3012 2 8 2.639e-02 1.669e-02 "Vectorview planar gradiometer T1 size = 26.39 mm base = 16.69 mm" +1.4979029359e+01 1.0800000000e-02 6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.4979029359e+01 5.8900000000e-03 6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.4979029359e+01 5.8900000000e-03 -6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.4979029359e+01 1.0800000000e-02 -6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +-1.4979029359e+01 -1.0800000000e-02 6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +-1.4979029359e+01 -5.8900000000e-03 6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +-1.4979029359e+01 -5.8900000000e-03 -6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +-1.4979029359e+01 -1.0800000000e-02 -6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1 3022 2 9 2.580e-02 0.000e+00 "Vectorview magnetometer T1 size = 25.80 mm" +7.7160493800e-02 -9.9922970000e-03 9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 0.0000000000e+00 9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 9.9922970000e-03 9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 -9.9922970000e-03 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.9753086420e-01 0.0000000000e+00 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 9.9922970000e-03 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 -9.9922970000e-03 -9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 0.0000000000e+00 -9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 9.9922970000e-03 -9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1 3024 2 9 2.100e-02 0.000e+00 "Vectorview magnetometer T3 size = 21.00 mm" +7.7160493800e-02 -8.1332650000e-03 8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 0.0000000000e+00 8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 8.1332650000e-03 8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 -8.1332650000e-03 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.9753086420e-01 0.0000000000e+00 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 8.1332650000e-03 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 -8.1332650000e-03 -8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 0.0000000000e+00 -8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 8.1332650000e-03 -8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 diff --git a/python/libs/mne/data/eegbci_checksums.txt b/python/libs/mne/data/eegbci_checksums.txt new file mode 100644 index 0000000..1702429 --- /dev/null +++ b/python/libs/mne/data/eegbci_checksums.txt @@ -0,0 +1,3058 @@ +S008/S008R06.edf.event 149997f77af08c9d6ad150aad5198f91c6c964c07e4d639baa770eac01012cfc +S008/S008R05.edf.event 51f07832e9b1d3d8c667f73dde4aa38f9d3e45cf2a4c2baf8e47ea328c860420 +S008/S008R02.edf dcd82e2a2477c52ca4a3dc784d9c04a55f935f2ed9ff10cfe0ec880d56c60edc +S008/S008R09.edf.event 0db4656c1041f6626ac6fd54117fb1e02890492bb86525e197e9ed116a0fe6c7 +S008/S008R03.edf ab3ea90d829e1e2e10bb6e551e828e66ac6262ec7bc24e2e17db6e1e350088fe +S008/S008R08.edf.event 918e61da68f21c26600383b29f33fec7665f2f3fae232e8413bc9266cc617863 +S008/S008R13.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S008/S008R03.edf.event 8d9c5bb3c83f5f447b4f8159b1454d55332838299c0e4a8e2dc62f413b08cea6 +S008/S008R05.edf 4a1005e7d877efe17e6ab6849665304f21aae138ef72759e5e2e0b96e444e447 +S008/S008R07.edf 9e05bf83ad067538667ab853081165b854b3d13ee334c4185ee5f40aa6b76a7e +S008/S008R12.edf ddced4ba4dc801313554039823fa1826d0dd52648f87a5ee5ada8e9cdd0678c8 +S008/S008R07.edf.event 017d78426c2e9f2b1807cc35ed07405ab8ff157014df0701b8524f965539b319 +S008/S008R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S008/S008R14.edf 19c943fb32f7749b7e37d8765f84a3bbf76c4ac7ea48ff29fa074322ebcad885 +S008/S008R12.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S008/S008R09.edf a13ed84381df5a71e3e0049a8cdc33f8f04f5f4e3f3af8881ade8b5c57064713 +S008/S008R11.edf.event 0bddeca4a8cd621dca3130a2d5affe793bfed62c379600613bb61a6e554ec750 +S008/S008R01.edf 678e47541d9903c300ba7811554ad1f8bfbe2bff086407cb4ff489d2d0e507bc +S008/S008R14.edf.event e9aa79af3e48ec970083b6f911002eac68ffb799057d4805cd5fde8f16d76b97 +S008/S008R08.edf 534c25b65a4fa68afe29a5c0272a686ac474e638c86521b177660d888401f374 +S008/S008R10.edf 6a7934c18466078caf899f724cf13b665d98e41fac9d978d9521f89021e0377c +S008/S008R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S008/S008R11.edf ef072b260a1d92c45d5d43a2a38c2affc8db37bc2da3df2d0962c44cdc449131 +S008/S008R04.edf 034a26131e1425e6374a459e5887b1f831f7bfdb101a3658d2cd07620cf2c06b +S008/S008R13.edf 9707099fbc3c9bb8cd9655c9ac491b5c6eb0383ea2a17972602f0e5c68ee6741 +S008/S008R10.edf.event f338197f5dd0ca078ea8eee22145e57e694f7dce6a2bbd55f5f05346ce3b3f17 +S008/S008R04.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S008/S008R06.edf c2afadf0b5cbc8764825fbe26ae358df677c46ce44c6e8622e4fa3d47d6abb14 +S024/S024R06.edf f4553ab40c7b8334a61f1f880d9cad635af87210fb59de6ab5b84ec79af0c296 +S024/S024R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S024/S024R11.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S024/S024R12.edf f6287534159e552ed194cd1e3d44719e5ac162691779fd1a6fa3ae535cc036df +S024/S024R14.edf 774fb0c7d2bd28ab25364a1761d042725081a1ccb77f7dbd80d6b32be18252bd +S024/S024R09.edf 660928b6113112c7aaca4e2ed77188fcb66a12b75577f670202eb070b6fb351c +S024/S024R13.edf bf02f4f15115f6cb760f5f0f37437b930e648a85a43840b0382a83c6aa0e7144 +S024/S024R13.edf.event 472673d75763793097b36d5b0883addc030cb3790ee84a18c36eb33ef80d009b +S024/S024R11.edf 9ff78f7743d2b47590d5ae93f97d1afc5d266d41d086f6706cdc87ec32de31d4 +S024/S024R03.edf add2fbc9ed9bac885c6c192f7dc4cbfee1872da48da7423e2b06955bb200e0da +S024/S024R04.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S024/S024R01.edf abd9b141d8e2853e30a126a891929bf99f22661e27f571f02d5f6a415b40841a +S024/S024R07.edf.event 6ca290c8f2ab5d2a3d0c42a123bd24341d790899e901dc5991dd66614dfb1842 +S024/S024R05.edf f0de0cc7eb55154e2378911413c3a2ce353a2b4e68a9d298b2778ac4c0c7e587 +S024/S024R03.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S024/S024R08.edf 863899a80ad93a9ed8871be3bb775f8e74bf777dbf7a2c3da50e1433ceb3b50c +S024/S024R04.edf fe32b65b90079a307b05c42602aa66b3c79e09726ce9457e1d95242b102c6ba1 +S024/S024R02.edf 23634177bf16de6e243a1c7f0902043ccef5befca66b0504950cd45e08cc6e6f +S024/S024R12.edf.event 732398d864936092994f6aa5c7a3d554d1c1c35ff4fd7a90dd0c06a61260c68b +S024/S024R06.edf.event 04192a55fa7728d34533e76a1b831fe5c94d79c3bc9f1028f45644c51892a744 +S024/S024R10.edf ef34765f73a46e40ee2c59acc9a15d308765da781da11ce01197ee6e21f41c0f +S024/S024R09.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S024/S024R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S024/S024R07.edf cbf04619f8a980286067dd7fb943a3c5f1bce9928e27fdf3910ac1715d50f420 +S024/S024R05.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S024/S024R14.edf.event ecb3c28bfbaf7c670aa5547fa414949828cb36fcb3d84e0389aa669e01381627 +S024/S024R08.edf.event b9568e8466c8f90e1fe1f9aab8ddb73ea16c008b7b67cbbe5863f04f2ec408f0 +S024/S024R10.edf.event 9b116dac2d8fff59f5bf4cc8d45636716575c73d6e6b2287709cbfdf0ce5f55e +S076/S076R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S076/S076R09.edf.event 0acfeb483b2cb94304c9eb67c0f7c62d40e5219d810874aa1100d47751846b50 +S076/S076R13.edf 5a1a2283844cf3b493f92ffcbe31f2f3f74f09ecef18181edf644d10bcabd041 +S076/S076R06.edf 77739b23f5c793eddec3f39523c08c0dd1794cbee1886bfe34fc32c94700f557 +S076/S076R09.edf 074de623e475352ea6278d66895b15699bbd7213b561f7ec23791ab3a86e3629 +S076/S076R13.edf.event 26ab1ab393fc9047a9dcc0795bfb1c27444f58002c53a85545ba90395b1b54a2 +S076/S076R10.edf 64735a5159983af2f17f6405284ce59abb176211178e9ffa6bf2142bebd1055a +S076/S076R05.edf.event da9621a04c94f97b7529e748da5c8ea934cf17ff04b8b25167bbefb402b05574 +S076/S076R05.edf 9ad75a6b4e666c5f1db7afadf553b3d6f7f0eb46cbfe9980ef83f2ca4c351bca +S076/S076R03.edf.event 816b9e94c71aa492a890b3ee1209e4e3978a2bc1a055bb8d5b29d2aa438e7519 +S076/S076R07.edf.event 25f3656128a87915adc8cadfaac6dcf8a8c1c6f327617318054f010301d6b6c6 +S076/S076R08.edf.event df69a0d4526ab4c42f8d35b328874aafaeda087bb95ee7310d4f3654498f5746 +S076/S076R02.edf 092bbc018ef8bbfe21ff2ebf10e9587c00a1836a922c391e632b829e38aef05a +S076/S076R14.edf.event 246045b94ade6decbe7fab2d5ffd7ca93aada9d955f2d3ad771d86993cebd407 +S076/S076R10.edf.event f902df8b382a804cc4355668d708d0c1fc4103aa929df7db535c913a6352a463 +S076/S076R08.edf b7177ac5c852f96bc54093baafb7fcf0036cabd1263b8dfc2ab594105df3ead8 +S076/S076R11.edf.event 3cb2bc9973bfc45d63eacff0d4dbc31c925dfbadc12a78a18ff82731d7cdf19e +S076/S076R01.edf 86659701591d9e676b453556fd006a7501530d9f328050f2860060150b9ee75b +S076/S076R04.edf 48efdb5438f403e40a95262b12bcfe0147d581610c57ae33ed71c639068a04e8 +S076/S076R11.edf 2a5e9b3db7c0a2d6e030a25131a20e62bb5c4995cf9c001baf605fd0b7f8f254 +S076/S076R12.edf.event 879d902507387beb62ba2852ca4d85abb3e0bf9ee2913b3a553338a93590fe26 +S076/S076R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S076/S076R04.edf.event 994f60d74757d686b44b6a76e2d8e5946d5c8bec058f00a2264bae2e66ef7f62 +S076/S076R12.edf 6e9353b6b73166048fe7477a7f76b25a768d56bb0490188f5c8186506e96c47a +S076/S076R07.edf f45b4bb5193806594236d25ce2e6d3664d560aba813b92259e688f791a8d3552 +S076/S076R03.edf 413e4c44629550309789e4d0bcb33b6c5d9018731b497fc5189634e077636860 +S076/S076R14.edf 51114d952fde1e67627aa2abe33ffbdaf29653097cdc81c559f16d08226c0aad +S076/S076R06.edf.event 4e2eb91af71f5afce0e8a62b4a4ca68badcc99f5cad95fefc14d81d3b8b942ba +S102/S102R09.edf 507e52473e0378f094eb7622edb4ebc669772dcaf28961055790d17adfe64ea2 +S102/S102R01.edf 8eb9f4a85bd7854b545d00504f59d00ae4ba301011bcf3b562fb8d09ecb37d1d +S102/S102R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S102/S102R04.edf a66ad16f27f36c7fd126e15d47eae464e52c906eb5777c9188f392d0db97169e +S102/S102R07.edf.event 67b710bfbdaea5c65257f5bee9fa64cd171dd8b9c8a41d1686b0a14b0d997c51 +S102/S102R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S102/S102R09.edf.event 2d2b8dff802b702fe4eb03f1ac14632c8e7ac45acf9ff5566d798e576f1458d7 +S102/S102R06.edf 2275d9b0af1689c652d0846747eef993479998941fe7dbd3fc1206e39922b67d +S102/S102R04.edf.event 9fd975ed76b006ff20105ebf0fd1ae6dd127f008e06a75cad4484483eaad568d +S102/S102R05.edf.event f89b8b851132f6a167fb816989ff4c56e00ee3cea0b5a0531d0b31636ad86634 +S102/S102R12.edf bdfca4e013a18c8f43c424fd3acf604c231b1405e12c86dc7dd902b427d235a0 +S102/S102R07.edf f25c51950d872a5bfd169cb79263fda5e7c49d4c14d9cdfede7698b242099bde +S102/S102R10.edf 0dd5cabe19f4a7be3e907a69b752c41d459da617fe1b4110ed4bf8400a912666 +S102/S102R11.edf 8debe5134c1bec5e986ce2c025cd133bf1de8486837c363910b79349ebd6e889 +S102/S102R02.edf dedc3e5421cfd02d5505738c92bfe77ad7afc5478cc5c63f1ee07598a66af255 +S102/S102R14.edf 9c983d89d2df07f4477a73b17c5226166f859498f1f859fa64ca92ea3b144427 +S102/S102R14.edf.event 28f75c7ca5b9a2497e2ac9802a60d98795fd30929d3a9999fdab4252b01f28a3 +S102/S102R11.edf.event 82111548c0e7fdffe9a81852fe0ca1917a0898ce34e4d5af5bec8826a50ae766 +S102/S102R08.edf.event 94aaf9017009cf1af17ca3bf9b16aa30c750bd60048f18af249b106afdde2c63 +S102/S102R03.edf f4e0d946b27185e80ed8326d2b0985a72e8c1b25aba86496ba3c91797fd8a6b3 +S102/S102R10.edf.event 761750058b75b97c1177655c9399a4de55d2f053dd7397c6d95c96c7718ded98 +S102/S102R12.edf.event 8eec54f234ee4835606a193713d02283c66ac6177147ea2d90c6cb07814fb12f +S102/S102R13.edf.event d107d48c57429e3818a39fec732023db2d15060d55e82d27b578baa3875b6025 +S102/S102R05.edf 7d0b0c5d517978af5f1ff0386f14eb81af3f9d3402986bbb02e7afd47a7c98b6 +S102/S102R08.edf 6155ab874f65352641325fa1a308cca891820821572a28cae81a87740a046883 +S102/S102R06.edf.event 2de1f3543fab2d52b1e97e2b52f84bee5719ac28f3e613357cc69f70ce2f1e7b +S102/S102R03.edf.event 6c457793161b0b2dafe7d78bf4d750a570530145b98ce0bb007627422152b0f2 +S102/S102R13.edf 9ba36fa3b8377e35c9e666cfa48715e3f2b07513739f7dbb841c45d541673cef +S105/S105R14.edf cd55636e49e04b863df13ab0aa09bc5294eef100f8bc055cbc528f316c415aa6 +S105/S105R10.edf.event 353c44c45eb89b709590af77f11106666cfa3680e5470b61c607b80304933399 +S105/S105R03.edf b04097ea13899d9139a25b8a109d83b78f45aebdd7145610d1abb74ab1302f4a +S105/S105R11.edf.event d9d89addd8fca4d057ce27c16b349184b9dc3b13193561b7c99ffa9414e86138 +S105/S105R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S105/S105R06.edf.event 61280c9a206447732db06a6480d23654d272250513af280790c631d46fd6de1f +S105/S105R08.edf.event 537ad705e53a339bd1d130f3331df882a0416fb7e95c4f565d283142dcd120f4 +S105/S105R04.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S105/S105R14.edf.event 3fef219961dd488cd267ad63abdc2bd5db054783d67c836968d1266e4a9f8560 +S105/S105R11.edf 1275ff3237fa4bfc006b5e296d6a77a2d65c9b1045451b1485f2837d5ba189d3 +S105/S105R10.edf 3367bdaef3d66d056ec64c553ecb2fecf7fb19677b0d310e4fa6c7501052b7c5 +S105/S105R05.edf.event fb7055ba8adb05cadd3009cfa9adefacc5298e01d2b7156815f6424ce1d4f3f6 +S105/S105R07.edf.event 26fabc186c9b04bd70469a5964b2648cb7a2115fb0a397d51de147fc640d8d83 +S105/S105R02.edf 8816d8070f58a9ddee89d1d6f9923c9df3d73707a0a204953a128b6c357d237c +S105/S105R09.edf.event 9787b103ae02f348ebe837cf8e545e3e630609a320868cfa229f0f82a0734ddd +S105/S105R09.edf f3b966181f93016d52127e1bcc049bf5c4e866bc0540fc7697f369618f78741a +S105/S105R01.edf 712c423a028c67f2fa328bb427cf140fbbeca2fee90bec6c17fb54ed01ffec85 +S105/S105R04.edf 218e3e8ba2bc171a3c6dd5891e6cd604c8fec4f32fe2d4c274732ef880264180 +S105/S105R03.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S105/S105R13.edf.event e4c9fdcbbe3469b81dd48d30396ee921a23d45fb900a0dd3b7eb4ceaf04936a6 +S105/S105R12.edf.event bf636aa7d9551082dd6cb4265143a22c283dfa2e23b0fee221d83a0b0a57742c +S105/S105R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S105/S105R13.edf 0942c77de3e2167d8f6d2ef7cc5b62fbfc9248ea92985c0c484726a0b893bb51 +S105/S105R07.edf dd475e1a162d6665cca22cfe293d45874b84a017007ca845300fb205ace24424 +S105/S105R05.edf a50dfac654a6f0e0d730cd2475e086f78122aac22d288792c44756e646ccfed1 +S105/S105R12.edf 926fe3afca0077495884cbd67a09761d0a7cbb956c37dc7c140165798b510891 +S105/S105R06.edf f80b5f1ae34d833fc370e17a8e369609240a43f2dcad050eed9b393a32cc4475 +S105/S105R08.edf 52bca66d6e03db2c45e6cf560f54d13abe113ad3fca5396ac4c6fda7796e36c1 +S090/S090R11.edf.event 9e1cfe676de301ff52ef7ef70ad2cdd8b8c9562c01e98d8eae191a5c8c28aa46 +S090/S090R04.edf.event e3fab15583152a967f41c771dfbae2769f9b7aa683947093588c32e8521f7150 +S090/S090R07.edf 4dcb675b5b73c8d79b7e780e310ef91e5060c5965b6aa7e0bdeeed00862e2621 +S090/S090R08.edf.event 4c374f58a91c1ff71894cacb50bfb798d20cbef67ee65a1ce8a2d5826349e390 +S090/S090R05.edf 0f8919f65642a8abae2baa57f5c4550ebfc3c5320eb97d1c9da11b4fe082a926 +S090/S090R06.edf.event 296bafcd473fd6051b9cf19b1af83d7f0a4ba4eb310f7fbf462aa7ac3ebb8120 +S090/S090R12.edf.event 8a4b5822b251050d46b816bf54fbb47530d91210b6d01cc2e6acbe662193553d +S090/S090R10.edf 53184bb4b5b4785bb80819ee6779f6684b5b17b7bdc0be4bfa1113c2c723adb1 +S090/S090R09.edf.event 9aaec3ff6fcd8c6d47bbf0c154dac87cdccac4bfb099e5995ee69afc7b34257b +S090/S090R04.edf 693e109389bc37367ab66ff558171a2fdbcf74bdcace569303e7da8c0f9f8a58 +S090/S090R03.edf aa2696005ede093289712cbff6c1d9ca7fb72c3f127e8e29593b87c160a2c277 +S090/S090R07.edf.event 132c4ce227ebb4e694982a37f3f7e9289511d75fdc0079dc876ccb6d9cf1a81d +S090/S090R01.edf ef8e84de758cebfad15ee86d49bd134578ac5ff24154ece98e9e853c91356e34 +S090/S090R12.edf 8cc1a80be7f14683b645e7443854c6cd938e31049be92344adc58a9e4c586554 +S090/S090R14.edf.event b9997b11f88beac0859d8b671d263d82a6687a271c7dccc0c874b0cb51d57af4 +S090/S090R14.edf 2b900c0724f97e41416ffabc40ff8de4bc2e3333f808169bd2e5c9b4be5c9f76 +S090/S090R05.edf.event 5bb35bd49434a9630e941b5646d6d89f7907531ef3e44464334b78943d4b0237 +S090/S090R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S090/S090R06.edf 07c41b448f75aa807e63018fe370a017e00f6d82b8d23be59ed8969e4091492a +S090/S090R13.edf.event c2f5111be300abf5d209c1908e46d378a7a94c2f8043fe6acf88665aab8efb02 +S090/S090R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S090/S090R03.edf.event a6b508d78111c06b1a185c7a70db08a6126b40c12b7f09d110a17945d19cfdbf +S090/S090R09.edf 6c3614bce81b0e38d7b41c9bb18c77899f1d71be7fa8d135c1f81b210b71d950 +S090/S090R08.edf 9585057959bbaae8a4b6ef81546a88a3ae1d6e95bbc7dfb1dd6a846d46af0637 +S090/S090R10.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S090/S090R11.edf d2bf6493d1968c5e0ad3c90f0a5b807b87e321ba5e516d2184db3c18a54443bb +S090/S090R13.edf 4aab298b8aeb892039c948102e8dec14d8824c876dd27332d147f2da11e64a97 +S090/S090R02.edf 19877620e881df6cf52aa25b6383dd55d9097e4425b16b9451f99d68a67c51f7 +S039/S039R03.edf 966a6c64ac3e79dcd455917fdb1b298b812bd8164b77089f560ab6fe0a93d941 +S039/S039R10.edf 4e1fe26c20c3990f2a197c6f4090613de90eab0699c4606daa9bfca9eaefdd77 +S039/S039R08.edf 4f5f011e61932f4f5998692bd6324c70b2286191dca137e340afbd4f9b7b93ae +S039/S039R11.edf 46dc22efd58d1179f521a3827b62d5ab4081fc333bb65025e940aeccf34e684a +S039/S039R12.edf 19078a959cce7c535e92c6fdc557dbd01c162549784a409af79929d9c4f20e16 +S039/S039R11.edf.event 1f21e6a28cc1b59fdc3667207c5b1029902f6d0018bb978cecbb5905b868cb10 +S039/S039R14.edf.event 0d4308120129d0f92d2b8779374ae232f98d1357e6c6427983fed655d9611af0 +S039/S039R09.edf 1d86d0dd505aa15e6e819feb0d9a36f43f783f25861514aaf703b8f93ed774b1 +S039/S039R09.edf.event e1b29e90b20be6fe74d2526e8f695d3e9dfe8bdd7f08c801c24f523ac253462a +S039/S039R05.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S039/S039R05.edf 127aa579b8333abc584461ad7efba5ba8b8b9da701fe111677a812dc5d73d622 +S039/S039R03.edf.event b7ad55e8ce55c5743b2d7e417e77c8d7efdeba7b6cbbddb02fe11361879eb9a4 +S039/S039R02.edf e54936d509c572066383ae4250e28f41e9990bad464c26eaa10e248667464483 +S039/S039R04.edf 57302f5f9cd1714693e52b7785a839666da1560ee88b00779fe7f18f56899324 +S039/S039R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S039/S039R10.edf.event a50926e15d8027e4167d22e5479fe3751780003aa245b90b2eecb0b4f474fde2 +S039/S039R13.edf 9bef82ea3f69eff3f3c9f47779593a278a10c9ea379b35d5741b063a4a22c66f +S039/S039R12.edf.event 61878a566785fc86672b3e061fc0eefd0fb9735a4239d8f13ceb0d08ff22f60d +S039/S039R08.edf.event aaa5cf263ed5e1e7bf38b3d540f4e2d286107c553f7904af5c9cee9bfe715f19 +S039/S039R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S039/S039R14.edf 29a2f92b3a9ddad0d55ff5440eea81b69fcf6b21e62e33c3dbfdb42fc1398190 +S039/S039R04.edf.event 09d9d4cbfb435c34d0498269502687afe1e41cb410faf772b5fd4389e435ad7d +S039/S039R06.edf.event 8fb228960cd397f6100e75cca49e1045159bf62e6184dd5267ea608be1d540b6 +S039/S039R07.edf c0c5867355e92bfcd0da79ac30584aa8a97558aaa9900af35f43f5acdbc82378 +S039/S039R06.edf 8512ad15c7d2f497fd1d48753b10f81d6dfde4a1c27434499cf1f50e4e066b32 +S039/S039R01.edf b1c5ca3e506c3dc0d7dc855f3908b5d4ac3b1839e9fac8e7dd316eaaa50f953e +S039/S039R13.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S039/S039R07.edf.event e1b29e90b20be6fe74d2526e8f695d3e9dfe8bdd7f08c801c24f523ac253462a +S098/S098R12.edf 7523f3be8c00b0ffaf151c4e5f4b3407a88d499b65aeb8b37e6c37bdbed5bb01 +S098/S098R14.edf.event 9b116dac2d8fff59f5bf4cc8d45636716575c73d6e6b2287709cbfdf0ce5f55e +S098/S098R06.edf d07ce4a4f2d6eab3e8f0b3b45285302c9551a6562cb31a247f160d5d702e6b73 +S098/S098R13.edf.event e129d841c2e51546d5ad32cb6c2a8303e4c0bfcd90dc9e3e821c0c40a9a049bc +S098/S098R07.edf 9e6287b11b514ababb4185c98955086fbc53547c2758e2876e23c63bbd7ff766 +S098/S098R09.edf.event 96cef3a9a9e2cd7f438d53008e256b198c48c6e0d2f0b2d70e0d296180cd58b0 +S098/S098R12.edf.event cdf1a70ff91abbdc6f14a7ca794a7b5932276819e6f7c68ee344c3d21e9b73f0 +S098/S098R10.edf e7f29d50b356234f59f36e3f2e239864e40696bafa7f856f295900064ed8519a +S098/S098R07.edf.event b0c7884218a114ab4fc2b8cb09b2c8f1bd0ddbaf69aac65191618c0a230f65d3 +S098/S098R04.edf 9bae31bf3add196cc4588f279b3a372112331b5b385eec4490f09a23c29a9468 +S098/S098R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S098/S098R05.edf.event 1cecdedf3d7f8b7931b4cd84b48bcd356337c0ee32518d737ce0ee8f0d428d8f +S098/S098R05.edf 0e27c55698a030660f39462f0f948da1b1637b4381c3babb432b3963483b8044 +S098/S098R13.edf 95d6338578f27881bf3cc14b09ee3137a5e5be8e7ce598e5c7f562bc1715d93c +S098/S098R08.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S098/S098R11.edf 55c06e652ae9e6862e19c9e6e0709add991fda1d314df27982d3dfc13ea93a72 +S098/S098R09.edf d0c14bf014bb521244332d7044a8e01763266175d676323a82a92a0164632430 +S098/S098R01.edf e908b862df484c6ffa822ccbb5658894cd3459fadd6c9d02b26e30bb9c392552 +S098/S098R08.edf e357cf8d5236aa3b63e212bccd663e8f07d8b98952d3ae301eca861a63736c19 +S098/S098R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S098/S098R06.edf.event e8abea1fb06a5ad73e91ea5c97aa84247e1c165ee2bc724e9b9c7ae6f6c017cc +S098/S098R02.edf 2d710c6eac0d9bf911512a5cd3ef10f168592af7b2d4f76aa7378c77a9b83bb5 +S098/S098R14.edf cd8fe3074a59818a00e8749cd0a3b9ada624955b56acf7cc81bc20824a7370bd +S098/S098R03.edf 5f37a275e82a5911f1f467fee237bd97dac7496277db7025864bbeb0e8e52b80 +S098/S098R10.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S098/S098R03.edf.event c4fe35467d7d0b21a1a13fdeae18b7f036dce640d9e06acb7a946289a7fd4f44 +S098/S098R04.edf.event 4e057b134c286502a0bd4e491daa0ebb3c4bfdc21b738b2ba2453c8ffc558218 +S098/S098R11.edf.event a6c871f2eb69757aec91f71c7de20007524b3b628f251799a3dd57bbb181fd84 +S020/S020R05.edf b78e35ca2354a77d8d44af4b50205f09bbf866d8f295d5047cca111f60eae213 +S020/S020R14.edf.event bd6dfaccdd7deb73743f500e0a1fa9d0ff333e94d59c6d153e2e9d2dc8a4795f +S020/S020R09.edf.event 26fabc186c9b04bd70469a5964b2648cb7a2115fb0a397d51de147fc640d8d83 +S020/S020R08.edf 05314b692cd4c103071ed616e36f7dec96116f2b5251e0912a35d65782ae9205 +S020/S020R12.edf 63ef2b1452fe4a93a84c501f10fc35a234d87eece20f0b563b373229b9b8ae7d +S020/S020R11.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S020/S020R06.edf 6e127745b2b1031bb274f72063b954a79b584163e17c964aa8e859c878498145 +S020/S020R04.edf 3ff2fd30d246987573897d429b8a6c51cca3f1ea4cfcf8815c49e1f6f6512013 +S020/S020R12.edf.event 611df9f780acf887245656c2987fb77e486d2bc016936d00eea0a55d2f5c3028 +S020/S020R02.edf 0fce787b1715bb3799b110bbdf72198fbb5d46af9b25d1c0f55014f245afdfb7 +S020/S020R14.edf 1d9a26f8c38886fa4d700deb86100e56df958b2507875f45adbac7c98de15ed8 +S020/S020R10.edf.event e2b79b09413cc4065a25ea2956bc7726f4bdedc8730e89a7b86a8617c79876c8 +S020/S020R10.edf aef44481a6fe1a8e732450ef594b3167bca2fbd01bb23a29d7ec79a670f0a658 +S020/S020R09.edf 92c11a238de78b5f58402760ca9e157391ca8501705dfd4a4a172fd7e2a7bf05 +S020/S020R13.edf 8fe70e65b1e8f3a61b748d12a01a3d3844071b32bc591a8996b9a4fe1d824b66 +S020/S020R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S020/S020R05.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S020/S020R07.edf 7e1038d12397731e36039b6ba26504383d9f700d4b2e0252421d072d41cb3ce3 +S020/S020R13.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S020/S020R03.edf.event 6ad812d50b44ed49ee87e09d1cf28b68a99855b6d266be8b9017d8056de057b4 +S020/S020R07.edf.event 151aa0e52269f6759e2bed18339cad06a9761f4b713071e665a50681af66afc2 +S020/S020R04.edf.event e2b79b09413cc4065a25ea2956bc7726f4bdedc8730e89a7b86a8617c79876c8 +S020/S020R06.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S020/S020R08.edf.event 146180adc42ba38b30989a7a30f8dc33c397aeed2db797d266b9ddc607fbddec +S020/S020R11.edf 80014e2cfda81a19647e9f2a16f66082693a591b78a57aee5d220b3ef2a0241f +S020/S020R03.edf 4660edd9962577ceac6e35db1a5c0756fbdfa954a77f479dc8aa3e3b3819f453 +S020/S020R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S020/S020R01.edf da48b7d4ed805218e12c70c775b47a3a111670901e06e7de843916c579278693 +S042/S042R14.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S042/S042R12.edf e327ec08604c1a835bd0082c79c2b4f44ec22c1e0b39d12ca7be6c80d1acc8d9 +S042/S042R10.edf a414c5017fa91f1f099174fa99bd13b3391b68fa1473cb0b6642debf264365f1 +S042/S042R13.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S042/S042R05.edf 423d7daa4453176ed054da2ac90d2951c45c48f4324a49cde4846160af810e52 +S042/S042R05.edf.event 09d9d4cbfb435c34d0498269502687afe1e41cb410faf772b5fd4389e435ad7d +S042/S042R11.edf.event 49dac749bf850e47e5d13a8edef5121c75a7baa7f50917a5c17c1dda52d3013e +S042/S042R09.edf 621acaf37bc58d96395bf0a6d8c242e2784768822aa8515251a3cfa149ca5fcd +S042/S042R03.edf 27158284aabae182fcc060a945e8070bf0e9a40bbe737f43a1c4baba5988d34c +S042/S042R11.edf b9eb76af792f6ea5992359b14ad57ece1d5ac3b3d233cc46ebfc6afd07bfd697 +S042/S042R06.edf.event aaa5cf263ed5e1e7bf38b3d540f4e2d286107c553f7904af5c9cee9bfe715f19 +S042/S042R08.edf b277ee780d7ed612958ca36ea7db2be9797001d5afcaf1dc20cf554dd774d18f +S042/S042R07.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S042/S042R06.edf d4bcaf4ed157911a326c61770386a2d9c95dd8e60d8b3d035efc8266dea1292b +S042/S042R03.edf.event 39c9864c57efec906759ab97dba0ab26a900fa25ad8fc3c48b0d97ea83c3a893 +S042/S042R08.edf.event e16d907d8f296edaf98d1ab54138ee16bae85a4bd81d90a487ccfece5b611fd7 +S042/S042R04.edf 278f745b79a507e87c4a4fe97542416b7723ee66e6960a459d43995486cf60bf +S042/S042R10.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S042/S042R02.edf 34bdf9da7d2a4845db721022179717d6ab5b9e7acb47e2c8f96be20a4c6fc30f +S042/S042R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S042/S042R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S042/S042R09.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S042/S042R07.edf bacd80ce7c09dd4442384e04babdb34259e79f708c2b093a613b4478f305c2e9 +S042/S042R13.edf 676232fb801e81ade568c6ea9c5e2f98f262283168c92358a0251c50c6fc0565 +S042/S042R14.edf c5292099a382f2a0279cfc29f3a86322e8b607f5329f2295524faa528b7f23fb +S042/S042R04.edf.event b0dc1155da666ef5240efa8026cec5f4f69b5fb6f1d21226a512de3f6b62002d +S042/S042R12.edf.event a716ed693834141bb2b56720ce6124df46e42e86652ec02bc96746644a79378e +S042/S042R01.edf 34f1adbcb0155953eb68874d9a23cc1a8d7ce57a507bc6ceea9189b410d38439 +S031/S031R11.edf 77ae7fd0a38e2bd23eaab19f8c612d144cece3d581fc0ce6740b9eec4e8dfc49 +S031/S031R06.edf ac5b92e882911bc3d4c5d1410a9baca5282536cbe1d183f6333b8fbf9ce8ab54 +S031/S031R10.edf.event 57636037048fc90c882a690b692342a1f4fd070d9f2a219d527ef6c0f7ea7dc1 +S031/S031R03.edf.event 985f9701733fcf2689c3a35bdb9e4b0058b6137ded5bc0f1a2a3b84431edb435 +S031/S031R06.edf.event 0bddeca4a8cd621dca3130a2d5affe793bfed62c379600613bb61a6e554ec750 +S031/S031R05.edf 92e138d4764bc8709af2170722b28d816655ff80287f418346b2886387f25a67 +S031/S031R10.edf 0967dba784038babf1459f8afb6a1b6ae57e51e591a8d84896d5c81736fd4a18 +S031/S031R07.edf ec5602c9eca8e29c1aa51f225504e97189589a18848d1ad968320dcdbf834daf +S031/S031R11.edf.event a931510ec25c8e6b0352576ca7f98b414a922451f0a6ee6aab03d3409a677c66 +S031/S031R14.edf bf60fcc7c476f9e7b0b23710f0c62edd6ed3364698db305fd76925f052c27f5a +S031/S031R01.edf 55d1e8e8145b8eb6778468b55020662275d152d34f33e18c2893b70605175fa3 +S031/S031R04.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S031/S031R13.edf.event 2da7955eedd5ec220793d3b4b3224c26256f1427c213729179eecd79fd4321c2 +S031/S031R05.edf.event 596ad948f8aa9dfdc6cd3125a3d02e115d1cd6a505023467bf5cd721125ee7fb +S031/S031R02.edf e1f18d1810f486b34075e9b2ba9f028e303b5f548c2477d1340ed4ce0f578bf6 +S031/S031R12.edf 6bfe914504368ce54822eac53598d9d53b251b39b17e9a7090dd64619fcee982 +S031/S031R14.edf.event 5b8eb2e2184e2816cb3f7104149139af83c25f381a47b42e81c9c6aa1723fb5a +S031/S031R07.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S031/S031R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S031/S031R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S031/S031R04.edf 514a2b800313549f00e927f8f322eac9a381b715b8728b835f0986fa4f7d5096 +S031/S031R08.edf b4cc77950c0f364cceb1ab55559a40c3ffe7a5b2fd5146a8ffa0243a35897e6a +S031/S031R08.edf.event 927b59b2e19d89df8169362057a329c5b70fd65d1d2d0a77e56546927bc02281 +S031/S031R09.edf 3217f543f6f1db0ef3b8e7b268e87e0e1dd69bfc7fdbd33e0ad8ba295341161e +S031/S031R03.edf f9d75722d2b3412a9b9da296dd28185d54f6b5471082f7952c2cfa51212f6845 +S031/S031R12.edf.event f1d83aaf535b3be7098ef9960def69c712f4dadb4f334e40434e155e9088b299 +S031/S031R09.edf.event 918e61da68f21c26600383b29f33fec7665f2f3fae232e8413bc9266cc617863 +S031/S031R13.edf f0fefd12e9a037ba9d46750508f834c99d1331f75024be889bf05c826e55b2c6 +RECORDS 02d1de9c00511d3b7548a6b59bdf209c819db025518ddcc52b21fd3b2b9de4a8 +S004/S004R05.edf 03d587f2b60f6fc1cb8105ea06219b560345e354f4e6623cb1840cea5d0e7138 +S004/S004R12.edf c44b7be0464d86be4d460ec66432869b7b3e8dcaa2067af02d6e772abf5c11de +S004/S004R10.edf 8f8034251a5bc4bf8dd8ecc1869da6ffd9e61bb9d9f8a4ae0df9003d72d40e9f +S004/S004R12.edf.event 6e9a969133a5a862400b62cb84f763eda38a0967078b1ebbfea1ca2ce8635b48 +S004/S004R14.edf.event cdf1a70ff91abbdc6f14a7ca794a7b5932276819e6f7c68ee344c3d21e9b73f0 +S004/S004R09.edf 2b5b61af7ebad040f57ab761e7b2bfb83bd7488801bfcf35805399c076b5e1d3 +S004/S004R10.edf.event 082acf669cebc89d5f5f4bf29769c743f3b1e14290b8f0f34415e285db553b3a +S004/S004R09.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S004/S004R07.edf.event 83ec130ac6a664e0d88923e1496dc0806008967b51e6158521a6beb0515b2eb8 +S004/S004R05.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S004/S004R11.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S004/S004R13.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S004/S004R06.edf 2d9afd10462b0dc93c07e605ce6dd49ddf42c856c843a18a8236cbf08c9af7fa +S004/S004R03.edf.event a716ed693834141bb2b56720ce6124df46e42e86652ec02bc96746644a79378e +S004/S004R07.edf eb00bf6a816a6ded6f93e0b96d2a1c2ae8f6d13a59e39d475bf94d57fb842643 +S004/S004R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S004/S004R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S004/S004R08.edf.event e2b79b09413cc4065a25ea2956bc7726f4bdedc8730e89a7b86a8617c79876c8 +S004/S004R14.edf 06a3c88276f1db76214f8b1068878add45b22836327fe6b95d0231d415cce752 +S004/S004R04.edf c9ddf0294961f0b877192a2479802b9c6c88403682a17b5ad679fd6485aa6f59 +S004/S004R04.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S004/S004R02.edf 32134f3ce7056fd6487855552636b8f93ea2d67631ef1da6f6474eda6040162f +S004/S004R13.edf 136e362b79e95407025f6ff390631e301e75c996538435ca5e8fee80b4c08682 +S004/S004R06.edf.event 4e057b134c286502a0bd4e491daa0ebb3c4bfdc21b738b2ba2453c8ffc558218 +S004/S004R01.edf 23e9645f492488d2376ac3a606d177043efc6e5fb00281c2d6648e4b06a6ce62 +S004/S004R11.edf a8b96b32782329f12accdc222331545707be4cd497f97f158de4c06e87235f8c +S004/S004R08.edf 1ab5909b183413283a7ea76e35311a68a4688558f0044b9faab3987291cbdb92 +S004/S004R03.edf b24e3ae9d302935be06c0905d1e925ad68a94b4a5b20dc659cbc63bb8481d503 +S002/S002R11.edf 694bd9fbee1305dbc212ea4eecb8930750f5e08f8cc8ea45e2b94c92ac5f5a7d +S002/S002R14.edf 21e20c72ae3f52cc95f6fd6d4b5b958e28fc85bc0d3886f494de97a82c2aa24d +S002/S002R05.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S002/S002R07.edf.event 350042ca5c37cad9cabe8c469aead2bd074d996b13c69cafba8d24cd8fd825fb +S002/S002R05.edf 16ad84f17851599da5e199106b9b29086cc5793d78197a46b36efd49602b35e1 +S002/S002R11.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S002/S002R04.edf fcd37831378c411d50c223d97ffbc00949be2271d093f1d8e56bbe7c02bd1539 +S002/S002R09.edf.event f2f8656ee521f666124ee80cf26440ad4cb3e88315a64306e592a3424ebb8ee5 +S002/S002R06.edf.event 5e6dd7d9983b10c75f267d25fb4f039777b8f17f9d64869cb39446d1e9306505 +S002/S002R04.edf.event ecb3c28bfbaf7c670aa5547fa414949828cb36fcb3d84e0389aa669e01381627 +S002/S002R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S002/S002R13.edf 6c018440a9b52980fa40e4b9b9baeeaec7c94fc6d1c0b88c7fb32f1dae3a5a45 +S002/S002R03.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S002/S002R06.edf 08b07a8495a51ddca66a91fcc1275651f2d3e6b0a7a56711f06769b4ecbb8d53 +S002/S002R07.edf cdba64ad60574903248aed651d393c148df3c611eebdc9694717a04e2e2deef3 +S002/S002R13.edf.event 533737c87655572a20223c364be6c8be7a035cd8cf3dc42ed8c418e46e174661 +S002/S002R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S002/S002R12.edf a3d166b23375942a5ccf352924f7766f0ca9cfd1bac7951175e710d978f5239f +S002/S002R14.edf.event 5b005cafab5d6d8968aafc95da4c7b50e36cdf3bf2d71b98cd18431017101fcf +S002/S002R02.edf 666a9299341ece77522df050b1ddd128179a540548d82642ba975a65b4f6d84e +S002/S002R08.edf 02d64941f6bcd1635bc7dd187a9553331b73933e9771f4e7c59249dfc5632c5a +S002/S002R10.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S002/S002R03.edf cbabe29620b19978454bc429f59976f6ee8f32f6392e4fcdf7e463981248072c +S002/S002R12.edf.event e19fc47bcf5b585ab27ad756884c0c4cd37d239bb04c8bf15e2f33fe9fe05e18 +S002/S002R10.edf 817961f28a7acfebc45c664f1a9e40dcf4a8e1e1e51dc089062d7e3e2cef44e9 +S002/S002R01.edf d542689b31c977838f20b1a2062865b98ecf10a7e9702f56f101000d47f2ec68 +S002/S002R09.edf bc439584841a5b637596485e1979c89d872a90f2270a363a039a31587954ec4b +S002/S002R08.edf.event d45b05b6c6897898bdd688c12fad776011d39250bd1bd8de7e7745d939b60e3e +S093/S093R08.edf.event e129d841c2e51546d5ad32cb6c2a8303e4c0bfcd90dc9e3e821c0c40a9a049bc +S093/S093R13.edf e9e7a6f1676cce8250555d109df575bf434049a2be4919faff324defe427fb54 +S093/S093R06.edf fe0a178f7fcfcb2f60d9981e7d5c57caf1cffbac74b73e248b1f4ddcda8cd07f +S093/S093R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S093/S093R06.edf.event 98da35a4768842863f8f05d63e959a36f12126a205e0370d8519299fc06e56ec +S093/S093R09.edf.event 3593f38db6b9b0b72284e6ea58a9169bb2459a37f75643fd634363b665a636d9 +S093/S093R05.edf 08c4b631fb815382a0ca5a3b76309308d589317888cc78d9254113cc71b9806c +S093/S093R01.edf 2d1bca83825b37f2186af5dbdb0c4d7c48d0a971903761a84c385d7dfeb3f54c +S093/S093R05.edf.event 386c51194ef175e56c4d94b5e327133f39929095caa7c3ac86e89628f6fc0610 +S093/S093R02.edf 35ccae312fc9b7a87d3b485bd8cd0721c579d722a78e5e0802d2ba65f07a2e66 +S093/S093R07.edf.event 818acb17a86d3ee6425a512fab58b363d01b6ccc783417cdad466d1ecddc506f +S093/S093R11.edf d312b4f5b5faa38dbc90b0ce863a4dd8181f4a32fb535487fc049b2cc9a523a7 +S093/S093R10.edf 6f0f8189513bf06f9a0d7e694ad12b7efe948195f4b077a57706936b79dc6e6a +S093/S093R14.edf.event f2f8656ee521f666124ee80cf26440ad4cb3e88315a64306e592a3424ebb8ee5 +S093/S093R07.edf 1c493c8dc14e1e1e01c02e0cbe585d1798be82e1503dfdcfe7066db93387157f +S093/S093R12.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S093/S093R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S093/S093R03.edf 20c0e4a51b3eac6047fe26e1f3aef29b4f74caf5e335db090f682f1f7610afb4 +S093/S093R13.edf.event 2d33f0e6820d6c85c1b096ad02de0f117c60e1202edb58aad42f3ec835f3162b +S093/S093R04.edf.event 2d33f0e6820d6c85c1b096ad02de0f117c60e1202edb58aad42f3ec835f3162b +S093/S093R10.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S093/S093R14.edf d061f438e28cebbadad78943f0d70cefc79b82b20ffc8d9421239ed5cf912aa5 +S093/S093R09.edf 5cc82efff1b3669dcd61697c00596eb850a7117b55817100ff100849888b5fb6 +S093/S093R12.edf 3fea038b8ee2551bfa67f17a6dd668a30e70817996102b7b30dd7f8d7451bd47 +S093/S093R04.edf cab05a2b51874a488cb968bb6c76afb883a7057e384c0af1b7a766e34f4a1de2 +S093/S093R08.edf 54d9a7c0997e29abaa6ffec4a48e32555fff894d83e778c5e974452665835d4d +S093/S093R03.edf.event 6adfbec29ec794c0e3c78a211cdc8485ffa3b00bd6e1c3dab9a7fe1bab88aa88 +S093/S093R11.edf.event 0724cecedb70811bcaf06ec5a41151edaf5209f968315c4da2e3a25bbf0fb9ec +S006/S006R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S006/S006R09.edf cb3af5f176ea1f5be8e493d80f62eb374adf0a92259beabf254a6b423711cd64 +S006/S006R02.edf f114ee8be3445ff48c78422d4cc7b6b68ab83d79f06cb47634b1710796aeda4f +S006/S006R05.edf.event a38580800381d773ef251e941cbd4e02611c4c4e4efab3c383f14300a3394f1e +S006/S006R01.edf bfe32b925fdd469c90ce6913f7409cf85e4492b40d36c77b83511592d6e4c647 +S006/S006R14.edf 4fb5946ed7a29268af9b1770db80d4c6a3dbeacab4be573b6da9c095b8a5e68b +S006/S006R11.edf ce5ef60e07d2b1db8492739786eec2bcc6bb9e85b05fa0c9f2851e3a0f3e9907 +S006/S006R06.edf fc5e1cc21df8b1de6a63fe1a063db27d14ac882d6105057c9d4f924365730a4d +S006/S006R09.edf.event 825d019e301e14c4cc2a396252dd43ba79dd75489c263fe4af3e18e46c5dab2d +S006/S006R07.edf 56b8c705b9c1406546ce90bee6373172f43ec8bb5437aa9772dc2512405e6a17 +S006/S006R13.edf d4ebe044bd1f71c2a653bd8ada69b343b79fe18d9002a24ba96917533ecabe87 +S006/S006R13.edf.event 537ad705e53a339bd1d130f3331df882a0416fb7e95c4f565d283142dcd120f4 +S006/S006R08.edf.event 88e1ea56ed0459a99b011a3ea2f25f947016dd359b2d4fd683f838ac00d993d5 +S006/S006R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S006/S006R12.edf 296b5cb14549098dc55ee047cc9ff63faccf480e432b94797ff73cc7209b7353 +S006/S006R06.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S006/S006R03.edf 1bedb6634f28af44942c14901aa82f420858bc367fd5830482b4e8cf7a28bbf1 +S006/S006R12.edf.event 2b0cd50273254147215db0f2d93c0e409b2279fe37afd6cc7d86edb7df57486d +S006/S006R14.edf.event 0bddeca4a8cd621dca3130a2d5affe793bfed62c379600613bb61a6e554ec750 +S006/S006R11.edf.event 69bc42ecdda8587b1f3dcaeb49f434eb51439670272d896577812c1a8c0b14aa +S006/S006R04.edf 4bd532bb728e1f2ed2afc0a5c162830a07465b6a4a58e105d1316c6ff1921f3b +S006/S006R10.edf.event 48ad6251760a77acc67b82ce0b201f4244cd1385a6181c8236487af76fe1636b +S006/S006R04.edf.event c6fd76cba5a912b92a280d7d54b9158146ba5522f81d9192baecc014a6c9149c +S006/S006R08.edf 006e55ec16590f5f5b9230ac53a2f8fd0660960fa7b4b512ff61f8f7e2f8714e +S006/S006R10.edf d4a1d432d4c703decce7f7ce0a6d7c05a030fd19f562fe2be52d4c581ca24365 +S006/S006R03.edf.event f500286a76884018ad149ef34cc8871332593723b30ad3d79f8f39236a5cc25f +S006/S006R07.edf.event 190f359cc14939d921985886ad1c9081e5e2059b38ae9d130845e8dad044d790 +S006/S006R05.edf 2b793afa3378f56fb546da8265f3efad685d1201b26bc35ea5b0f660a15d87aa +S096/S096R12.edf a55f541160ec1f29086e8ecc7e54ee60fd5d0ec9edbd40313c30bd4c7695c81d +S096/S096R14.edf ad6c2bb5130d2c4379d08069f45be5bd6e16e2cf52d3d46c480a211ad4fc0c0c +S096/S096R02.edf 2a8c6c0c88ba142d17c19c902dce4befd1a2cc497bd43e112c14bfeee792ee18 +S096/S096R11.edf f4893d08eeb086de8eb889dd3d36ec3fe8ffbc56c93ade7a08ebf1af6c731877 +S096/S096R07.edf 1d4575fea624049f78bd4139006308fb88cea1da31e51690f0aef14c5063cb06 +S096/S096R04.edf c39a9977ef6b916ec4183e678a0eedb12695ce9084e2a43fade39a6038cc907f +S096/S096R13.edf ea7994757aa12872aac2fa8bc6aee5b48fae69f74d69e9c060c3f95bed4fb98a +S096/S096R04.edf.event 3d40baf6066941639ef493e2ee821cfa6bd1a236dff48c0659131c0e88fef481 +S096/S096R03.edf d84c57263849e5980aaa0ce9c4b40590ffa912cc45ddb9231de7f6336b8af234 +S096/S096R12.edf.event 37cc966752b10d3890cb16ee78927420f7facf9e9edaf86af928442f1f9df3b4 +S096/S096R14.edf.event 94d1d7007146c80e4f45e873986a770b4b807a34cfeca23f6a7d9054865154c1 +S096/S096R08.edf 6dfcfa5182fc32ca2aa8bf74722f43cd992b2bce64cb7fc15a4ba13a9533fe10 +S096/S096R03.edf.event cfdc7957f5c28cd1f437cdb649750034f1340084be6ccc484dad902fff9127ed +S096/S096R05.edf.event 1d29aa2dd90032d41f7c1c6386db9b2b26b7c29b87234d56e63f65c958acaa3c +S096/S096R09.edf 77c04af955fa1af9dcacfe325d01bb99da92f48d30a041f68a6f8e9ae3e2358b +S096/S096R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S096/S096R05.edf fa817e1acdefa929755a9e0e5b8dfcf85ebd08369ab5d9be92287ecaf172827b +S096/S096R09.edf.event 37784e11407ce31ee513275216bd40c96a4580f080500f2331a58f58a3ca826b +S096/S096R11.edf.event 2e361d8b420d6db7c1d4372f2f615a9f9290ab6956917406863d12a1fdec8f5f +S096/S096R06.edf f69902e69dc0894d9bbb2aa64a154571cd60d867c885578a35bb312ef6f9b831 +S096/S096R10.edf b5e8594fb191adf76d6f29a1607cde6435b25616b0fab99bc18f24adedb17dc3 +S096/S096R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S096/S096R01.edf 005e6ffa9938042c262035797bcbab224dd6039236872416dbe0ec42cac91e49 +S096/S096R06.edf.event 14a453fc6552b2a6d7b55329ca3533853a89a83368101bee6b522e4a67d37283 +S096/S096R10.edf.event f345df3e4e6ca2a51df43688b6036dc5af9a89117b2005dd4a45a7e3886d6bee +S096/S096R08.edf.event 7bc37a046920a5e8c90ae816bf99bac5ca23a1a0ad7d98c1d3c2c5d698871986 +S096/S096R07.edf.event 3c11674692ca50df8414eed1ec2d7308aec7f8ee62785db4f654ee632e18d29d +S096/S096R13.edf.event 405867bf01702626e40e74e21dcc164b7e87d59de89a4e6fd2c4ce561f2e6c1f +S088/S088R12.edf 930e7f40798227e75d28c35e9d79b6c58c78d742daa1daad95f967f633ee02b3 +S088/S088R12.edf.event ccb7718e9ad0d8edd7de8d12553d98a89bcb191c436a6067b3e688a579d4abc9 +S088/S088R09.edf 06f8ec3746f7fe6f2fd6e80465bc8d94471cedd91bbc7f9ac0f1b8dcc00fe65c +S088/S088R05.edf cecedd6c8a8f4320a038a2b32034acbe54bdfbc9f2ec8ec5e04f00eae4e15ece +S088/S088R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S088/S088R04.edf ce6b97a3e00de1b844694fc889ec0ac568fb5c0129130d8e701761fdf0e2711c +S088/S088R08.edf cc57c5b3d363247feacb04b4ef37a30b930ef7a202449874f3773a24af5332f2 +S088/S088R04.edf.event 01f74ce2c10cdf9ed7fe03895dd7f42bb5a5c5b9b847f0f0d5143ef0f1d2211b +S088/S088R13.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S088/S088R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S088/S088R06.edf e4bb1819e8c496f228e80d60335964c5ab8e4e1fd1084bc1238ad573aecf748c +S088/S088R05.edf.event 0930979151ea3e22809e2b68d6fad0bb672dea8e58caeb269c20f5bbed49f1a3 +S088/S088R07.edf e7eed7f6d17a3d7e11ef52de0237ba2d93508990406cfc5fba40c91347b366b6 +S088/S088R13.edf 291d09d8e7c444f9dfa93db24cf69f7c64f6db817c6c7a717871d98edd7f8549 +S088/S088R11.edf a7fa83aedd07dacc07408fa53026d4626aa9680968946930a6f369bfdada8778 +S088/S088R10.edf 1c49b9ba7cc30f59443c4292aeaf85dfe81f37e09e4e24db3cf66c7e45a01d3b +S088/S088R03.edf 95c2618922b253bb9faf4a565dc64bbd3b213347764aaf470661b5ea3755c241 +S088/S088R01.edf d7ec3aea4cb49618ec70a1c223c578afac19338e4142807ce9128e78aea49618 +S088/S088R06.edf.event 01f74ce2c10cdf9ed7fe03895dd7f42bb5a5c5b9b847f0f0d5143ef0f1d2211b +S088/S088R08.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S088/S088R02.edf 52a17441943433ca8965ce7a21262166ab3ac6e8609e5bed3c39e2ca12cc049c +S088/S088R14.edf.event e88110dc871719db682717f3a256188ebe916d76e5a1a09b0cea908778ae4424 +S088/S088R07.edf.event 0930979151ea3e22809e2b68d6fad0bb672dea8e58caeb269c20f5bbed49f1a3 +S088/S088R14.edf 8a912f35e39b002fe61206bb8661bd783a7b7ba23338f00b8e14ffa746938d0e +S088/S088R09.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S088/S088R11.edf.event 82e7ace9f055649957ab04324ffce9101db68bcfcceac1df6786e304f9de8669 +S088/S088R10.edf.event 682de6811acbabf1882380fe24d80b87fda2ac6a48fbdd6eab47970ea1190440 +S088/S088R03.edf.event a27a31471bf1b8bc6ff9b144eb110e9eb0711b24514aaf47ed1cd94395c10b7e +S029/S029R14.edf de0767217b8a403b9670e09ae75d301bfca1e31c4c08cef8678091a038ed9fc2 +S029/S029R03.edf 8e65ac9c88f3dc61adafbdc6809c1bcb9a46bc9ec911239fc8fd7726416e2b5b +S029/S029R07.edf.event 5207e825d15ed81ce802718b5134141adfbb78f1e5f1ca62f6083c6fc9cdefb9 +S029/S029R05.edf a6b3b806a7e4f79ad706ea4e65aeb1109b37730a87919d46460aa8682433eaf4 +S029/S029R09.edf d3acd3558084062eab5a28643879f51b03b8b6377e1cb31d8d45a93517342262 +S029/S029R11.edf 7c6f7cce142ba49222a0f4dc65f0094a7f1a0b8690ff043b834f043cc4286408 +S029/S029R04.edf 4674f7e44919af804dfd99a38004f6b9ae62b21eacd61ca389df55aa18a00f01 +S029/S029R13.edf.event 5d2f63f6f7525c2a049a2a72e3cce5df4e2f537e88220d558ea3f39afc316bb5 +S029/S029R12.edf.event 8d1471f501a31820d94d776160244282b4f4d8c8faec4fcedc3041360481c5c0 +S029/S029R01.edf 48641d0db6e810c96273d8c26d52510132ae503badf74471558b7c12306265b2 +S029/S029R08.edf.event e79dd10c21416b18d7df92203956af2ed5ee19efd50147912adc49744cc31c00 +S029/S029R08.edf a77d299e99458745c7010d023698208f6b0d56cd5f2a74628688d08a600df781 +S029/S029R06.edf.event 32d42684dec82d19ed241af99bffa1b231a1379487eeab2ca6ecf4e960d72495 +S029/S029R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S029/S029R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S029/S029R04.edf.event 8968db92630d7873955e9e9f9858bc44efed375ca81e0e18966b9e0e060b589c +S029/S029R03.edf.event 064ef0b07a868c81ecd78eaf44c28915f919810c3799f26c5fa5040d9de0a71f +S029/S029R11.edf.event 321c4a867784fc95b428c11912af5a17c303ae8df6cd8f6e4124a31ccbfea8e9 +S029/S029R05.edf.event 9ae43452d01b3a55cbcf82776804cae07420bf106c0e5d7010ce1d987637c83e +S029/S029R13.edf 5c1e9299ec2f5c58455b4f9c42ca729dd5013c898ffcd4b33e9114b7d030509c +S029/S029R09.edf.event 6ca3ff62b8d0191c28d6529edb07636e822a980b4ba1b463fd26b1ad8a09c5d8 +S029/S029R10.edf 86340bf35f707ee4194c4440a5bbdc1938b885f2ae0bb425cd3f37a82d352133 +S029/S029R10.edf.event cb58aa4b6d937e71ea9ca4f60e2e4c78b06334d7bd11091e5a5927fa712064b1 +S029/S029R12.edf 9d01ce9be2230b87a47050e0fcdbbfde4ff10144b9466fd286b618e59a81473a +S029/S029R06.edf 0ca802caa9950b5277730fad8b5e8afb0363b0df99dfebe2a7c8a0229e6d2760 +S029/S029R07.edf b99f383631ea61de2fa1b36d28c490a72009bf5bb192294fc1ebaaea254b070c +S029/S029R02.edf 7eff9783f20f4d3521ae877f26d7b2c87c609a920e16d12e5479072c2cb65671 +S029/S029R14.edf.event 5a42bfc69bfc027aed4436c7be8edf69672b70d1ad5a4354e11a92ebe527fe53 +S047/S047R12.edf.event 020a012ee89a9fe2c7bcf34bf02c0d2d78b688185ed74043d21a3d53053e3882 +S047/S047R04.edf.event 99dc24a5732bc2857f1d3fba3d1a5b942b288ef56950e45354e03fb10183bbe6 +S047/S047R07.edf.event 296bafcd473fd6051b9cf19b1af83d7f0a4ba4eb310f7fbf462aa7ac3ebb8120 +S047/S047R06.edf.event 1a93c91ef79032df8664905730df14e57174a6aab494378ea976c6845e662faa +S047/S047R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S047/S047R10.edf.event 017d78426c2e9f2b1807cc35ed07405ab8ff157014df0701b8524f965539b319 +S047/S047R11.edf.event 7ee6977e5ff9c282ca6370406d7a9871d162328940b104573ac9f1d5151c4b96 +S047/S047R10.edf 65fa985cd59fb7c1d5e2cebea93e23608c59ebce4e4e7f69e4cc23542290dda8 +S047/S047R09.edf.event 96cef3a9a9e2cd7f438d53008e256b198c48c6e0d2f0b2d70e0d296180cd58b0 +S047/S047R13.edf.event a716ed693834141bb2b56720ce6124df46e42e86652ec02bc96746644a79378e +S047/S047R01.edf e9e5061be6fea706fdbf03bbc9d0d8de48476d036553a06ff0117c98b60303c2 +S047/S047R03.edf.event 472673d75763793097b36d5b0883addc030cb3790ee84a18c36eb33ef80d009b +S047/S047R08.edf.event 386c51194ef175e56c4d94b5e327133f39929095caa7c3ac86e89628f6fc0610 +S047/S047R12.edf d457e75b37b82724af1b7024194e5c4858b31402b01f8b2691bc9d4bd6ea112c +S047/S047R11.edf 15b68b4f9959a4239152f3e33f0e9f526cc8e8a5d20195f0910724c3f24170dc +S047/S047R06.edf 49d91dcc440c3579b89a557d2aadf39c3fd80f1ae219e91c6d4fc8c36c880562 +S047/S047R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S047/S047R04.edf 67a0ab9dbf9e0164fbe16036ec578b89056fdfe8efdb1eb4aa441502a23b5a5a +S047/S047R14.edf 225c6e69e9f83c33c7529bc91bbd5a446df75589abb2c9d67b34f6b819561720 +S047/S047R07.edf 19b9ec59b71b23bc0228076fd0250cc3f942a14c39bf8abf3ea756e3727b81d0 +S047/S047R02.edf bf7069f688dd19df47c0f9f9b0fcdd6c01fba48a170d5c0dae8551abec383388 +S047/S047R05.edf.event 8ad7739d7d370e087ebe5db6ceadbc03ee38d96703e78cb5c91f2b714f07a25e +S047/S047R14.edf.event 5b8eb2e2184e2816cb3f7104149139af83c25f381a47b42e81c9c6aa1723fb5a +S047/S047R13.edf 4c8e71039e5368b6e6a9acb68b81a4f3bc3c9bb5911d925f0064eceec6b92a9a +S047/S047R05.edf 04dd463e35a6e7099508e49eb2a92dc4cd9bd2b893f194bf332a705232d2b8e6 +S047/S047R08.edf 4de761cc5ffcb89ae9a1d4e206a0397742e7a6e34e458bdf8e50883a3f7f6328 +S047/S047R03.edf 9f43c92bd5cbbc1eaea5b1decf8e66b8629874ed3b81055551b7a59574dfe28e +S047/S047R09.edf 988d7b5518596909926ef6038fd4d6dea34310e874e3b681331bd7a5c8de2f86 +S070/S070R07.edf.event ed0acb0c9635a838cea852cecffa253bbee6e084a3404ea9ac9900188570c878 +S070/S070R06.edf 94aaeafdd41b40b187a9517590531be5559cd86d21bb729bf797d32d4de584d1 +S070/S070R04.edf ac0f01d6df57bd65645c2c6a5b6685d38b11727bf41927fdca90ef7e2d601482 +S070/S070R01.edf 5dbb88fd5e39ce0aae257321765c2b9fd67ee9248993c55fdcccf79605442f23 +S070/S070R02.edf 26e7f3e389e357fe2070157a1d4fa0f504cb8e9cefbfcbb2f1cc5997e71a0e7c +S070/S070R10.edf a0235c84275a4066a7905b0b3322d604213c00c803d59ed9b55b912d9cdd89b6 +S070/S070R08.edf 8c3737b379124d86fe35ccbe37164773c97926d6c7731007dbf54722848e685f +S070/S070R07.edf cd366575453680fb6b0af26f93bd220b17c5da1d66ef4a594a7c489c0f1d6907 +S070/S070R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S070/S070R04.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S070/S070R08.edf.event 0028edf9b5fcc76311706f809ca44f884f227acaa02c56031cf87a7937c1d9a5 +S070/S070R05.edf d28e20d96170e384b92c71cd7a1ea6393e7f28f779eaea3c7fb7142f5d90dc83 +S070/S070R09.edf.event 72f39ff0119f687cb1efa8861280ba991bf463a2eeac59557c3bb3ea57e09aab +S070/S070R12.edf.event 951ec5064433ae4ed0e83f00905ef394c5250b33e1122f41b10ea8917d13afd7 +S070/S070R05.edf.event 7ee6977e5ff9c282ca6370406d7a9871d162328940b104573ac9f1d5151c4b96 +S070/S070R14.edf 3661f01ba04678982e2efdd3823c1ac1f9b3bc254b1fbc7982b28c5a7ddeeb4d +S070/S070R11.edf.event 190f359cc14939d921985886ad1c9081e5e2059b38ae9d130845e8dad044d790 +S070/S070R13.edf f724ddf4f4037b8e7beef57d55a76e7641fb9ec8bf98bd0a50ff61e2d9433129 +S070/S070R09.edf 7c69a40d9f5aa95bbe259e4cfd8a2664934a1719cc66a6289fae7d1fddf2aa2f +S070/S070R10.edf.event 8aa5136ec7d40284d8eb37993d435226d73bc8621186eae0d173bf0e65165054 +S070/S070R13.edf.event 1f21e6a28cc1b59fdc3667207c5b1029902f6d0018bb978cecbb5905b868cb10 +S070/S070R03.edf.event 5bca2030ba3ea66c594c376bfc5a701d3f25c2f85ea7d72b8abb2b2bbc5644ba +S070/S070R14.edf.event b50d31cc4a2ec520a336774ea70761d08ebeef4930f053a00ed66803060bddef +S070/S070R06.edf.event fb76d885a6c380c62ce2da054f5ec78c9f0178752694a36e4b78183ec8850830 +S070/S070R11.edf d54f1b715e23bb3a48a6f904ae2eb50d109447fe4c483eadffe7e965339c34de +S070/S070R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S070/S070R12.edf 9519f622bb5de5cd8f24eb6a765888a78a28bd17446fc9a41ec13ba84b75a0db +S070/S070R03.edf 9b04b49209c33999d70119b985f0ed5ea5dbe3fd4363dde555e742872ed7f5c5 +S072/S072R08.edf.event 7234785f889c4885f0b35192d9919da14670dae4f01dc951f9099ca59d5c7bd4 +S072/S072R06.edf 1b615047f9d42fd7e0da4f649348ebf2b1d929c6b81852898189dfbaeac47901 +S072/S072R05.edf 135ea7898b7c7454535a4be865cb286b6196f20f2dbbd4ae7fc8a7919e86d44d +S072/S072R04.edf 854bc325b4913798858d5ad099c953c6a018cbbb73a1dbf465882ea2f965d5f9 +S072/S072R08.edf 94bfc11c4720ca4a52dd683f74dba027c47a3e00a634ec15c816f7ff3801ba8f +S072/S072R10.edf.event daacde82696d6cf6075cf81c698fad407304dd3c18071f700a24850cf39d5427 +S072/S072R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S072/S072R09.edf 5d1653c82bbea3fa795dfd7f97e535bdbf17587bde003ba45cc5ea4101f965cb +S072/S072R12.edf.event 10b6367f469a7259d23674e3f8fc88a24dbdb41460385095daa9a795a3b47010 +S072/S072R12.edf 8d4b7877af0a9f04c281ce539c65085ab41d21cdb97b28282f55ab26f5dd6afc +S072/S072R10.edf 29700aafb0ceb72983447355ff62479f7ac371db4fbee7c35e2b9427412f647a +S072/S072R11.edf.event 2c7eac05570280238d6e75f11c28223cf3c6d9c9bbc4a888e05c2b99df0d8b15 +S072/S072R13.edf.event 90ddccb5a9607fc09dfb6e835c770a03f7ef1f0e65eef2306953bf4894302136 +S072/S072R14.edf 45cf02bea1f4db899aec41ab64a7eae44b4d6e2fc2fbac0109400e84706a6602 +S072/S072R09.edf.event 357bcc1ef517dab5a2c819b99716cb28db0a13240c332e1c37196b27021825a9 +S072/S072R07.edf ad44b64c22a62b119afba8ca57a9dd1e632da8e422abd2dbfe92b9088df96250 +S072/S072R03.edf 65e1b35a0098d9eb6a64f618ff8b106662dc14a6624a392e7c5bcbf675a843fa +S072/S072R07.edf.event 880bc36e6f5d82ca838cf6169f989c0e959e8c2d5970a205282cc1e8b8a7c068 +S072/S072R13.edf 84a5a50facd102fbebd09f7dff4ec4459beeaa0ac2c79d16a165de0aa37fd215 +S072/S072R04.edf.event ebbd9bc0cd84bebd02e71f43989cfd362624adbb968d778a6e759a70c66956a0 +S072/S072R06.edf.event 352673445bcc538539b47f676cd9701d31de8178217f7cf903d3b878f64b294b +S072/S072R14.edf.event b4317c81c19626393ab603e338ee0d39f9b6b23f4bcd2dd8ada827ee5a14093a +S072/S072R03.edf.event bb4d81b1643558abae8dda4829a909d44c1d41b95f317fd8470886f9c19a27da +S072/S072R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S072/S072R02.edf 4d9044d0e5c9c0d71becc31e5f5c1eb5739303905c4523566bf63de7497ff5a4 +S072/S072R01.edf 7742f6c605fd73b04ca4ddb7dbc1db4a11cae8497107d3720cae64bcf1fa32a3 +S072/S072R05.edf.event 76db7f6afb0264b534d087d90c2136b0f5e75f69d744c33be087337e64d4a360 +S072/S072R11.edf 3a2d6e7ba690f2ca53a716cbb6eccbb6bd47d1ad1021a819a60c1e9f68333b45 +S017/S017R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S017/S017R10.edf 1cb81c247ff4d726beca43e0bd37d6ff1aacf0d0c63068c6d389af327a86484b +S017/S017R09.edf.event 8ad7739d7d370e087ebe5db6ceadbc03ee38d96703e78cb5c91f2b714f07a25e +S017/S017R12.edf.event 3ea2fd67495fde74ca6a29f7b4f660c7de2dc5720c3a99a762e365ca4d09ab34 +S017/S017R11.edf.event f1d83aaf535b3be7098ef9960def69c712f4dadb4f334e40434e155e9088b299 +S017/S017R10.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S017/S017R13.edf.event 0487cd7ee2fef7813667623ecbe113ebbdd5400e7dff28fd7f0f7b1e00d585a6 +S017/S017R05.edf 76f7e14a4d6a3fe0914722ba2ffdfd8e67f3b878b530872e1d6c1d6fd0840217 +S017/S017R08.edf 902e30ad95dd3786fa3d3e0532743edf049850536cf773974fb9fbe3f4475da5 +S017/S017R13.edf 11045212225733576e7ca2a908205f0c9780e25d3db13ba9c7b3de8f990aa076 +S017/S017R01.edf 051e38a4601ef46f4e5be488f06e3a6c0cf53263d6166075dd94cbddec38624c +S017/S017R03.edf 19911c4a9a693eaed4addcee0ffd78589dc3fe35724079081079ad29e56c911b +S017/S017R09.edf ecb7b472281ac86ff5ca02240d785c0fe94e910f4983c3beaba7998caaadff46 +S017/S017R14.edf f95cb81454f050fc58c4d2bcb7698526e5a6cef17b157e601ec1d4abb96eb3ea +S017/S017R06.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S017/S017R06.edf 0e32de926757e1a493c3df6d85955c30e0b45b0caa7f1ddac2a4cb483257891e +S017/S017R04.edf 9d13376473bae5b868c07fefde7d4ec465573b393a4831b0142a26812e5b6bf7 +S017/S017R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S017/S017R05.edf.event b0c7884218a114ab4fc2b8cb09b2c8f1bd0ddbaf69aac65191618c0a230f65d3 +S017/S017R04.edf.event 037c923028aca24716dbc8d307c618e19d4e94bb1e759e5ddb8a9d3b6b205a86 +S017/S017R08.edf.event 48eea52bb9946ed46ef7e1b621c8558d41739ffc9aa7ed8fc25eaebd171ac223 +S017/S017R07.edf 850cbc48702c9a180b6fc2408762554fb21d1ad9a6e57fcc33963055de46b517 +S017/S017R02.edf b0a760d49747671b62f7ec39cc147cdf4a3f1a6a7e2f898cddc4f5a0c8b73c5b +S017/S017R07.edf.event 04192a55fa7728d34533e76a1b831fe5c94d79c3bc9f1028f45644c51892a744 +S017/S017R14.edf.event 2b0cd50273254147215db0f2d93c0e409b2279fe37afd6cc7d86edb7df57486d +S017/S017R11.edf 864a47610ad9fe92a8dfe0b667ef81aff31f65ce988f608642572c4a7d7ee859 +S017/S017R03.edf.event fb7055ba8adb05cadd3009cfa9adefacc5298e01d2b7156815f6424ce1d4f3f6 +S017/S017R12.edf ffb8ee6f01d5b12337052ee1eb9e1caccade52937b6999d34925e90634d5b82e +S103/S103R09.edf.event 3b5e25a187d5d82fb5b851ecbebf095c41a5496c5a40492c7a2c638245bbed51 +S103/S103R06.edf 1f716bfca1542c8a270fa5ec298ac2bc5d2fb47590d2d9ef1209c3ba2cde95bc +S103/S103R10.edf ecc2b6bf5823c8aee52aeb1093804a42139993110ad49cf584a78e87b6c63c93 +S103/S103R13.edf 5ddb5493a032511dc890488bc246ed22260b40d55633b8d26a2005e989100e08 +S103/S103R04.edf.event 8a76f461c0b920a909de21383cb5135f496cf1aa992529755f784ebf12db55c1 +S103/S103R09.edf 4ad22209206ad0dac641f659b404ef1895c6446ea11a82f387d0442fd92cd76a +S103/S103R08.edf 9a594869f6cfff74f5d448abd2268870fa98c57381bdfb3eeb7bf7372daabf2d +S103/S103R03.edf.event f8a465b9ebddc2704252299afc352d87e33c523fa8f80ca82a96fe0b268727cd +S103/S103R12.edf.event a4198dd583f6157a8688c48cddf8a62d6829703a2a057ea11e115214c9cd151a +S103/S103R07.edf.event 0e11701069314a60a51b64f11d88913bbbeadf584e9322f073cea02c7d91cbeb +S103/S103R13.edf.event 8496271f8f27f048601fbb18304e61a0950c33536381baa6e6d211de6443cc67 +S103/S103R01.edf 48dbfa8c73308311e3f7c7137b25cbf4a060775c1a460fb8496c50723b8fb231 +S103/S103R11.edf 0d6426287419f30577f49cd759d77dfb2720c84007f5942b0bc9e0f12367323f +S103/S103R02.edf 62217247b15b987831cddf456d3e45904d06d470926e6df2e4ae8e4c23b1b08e +S103/S103R05.edf.event 5ac7e9c852d23361d66baa9d38cfacc10651dcd9582bc391cc58f08094c06941 +S103/S103R05.edf 65a86688d81296f935a4825fd74a189587f02300b66654ba0a837d294c94ca70 +S103/S103R10.edf.event 14bdd1b94d8b8ccdce55e581601a4a304c1444e030aee15596722a70be24c5c4 +S103/S103R14.edf 0b97a59662e0f0cd76ee688f4080ccf71a93bb5db06d0dadc82c970c4aa62f92 +S103/S103R11.edf.event 9688994b1285c6e0afb366716a7cfc380bd4340c8701275dc9340f0713deee95 +S103/S103R03.edf a4d0c0209751833078eeb3d953e161d0fcaa02c4af5e93e93094d2add8106b65 +S103/S103R12.edf 96ea0eb56fb4470b1be09516ccd0561b857748abed9c6afd5302b24c32430aca +S103/S103R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S103/S103R14.edf.event b77225fdf7eecf463ffe5af19336030526137aef78111785b6605cdf4c15c95b +S103/S103R08.edf.event 30b760c52698fc58e43824eb7302010b60c8374cc35c21494035da15da835fdf +S103/S103R07.edf e40a112d21a463232a84e08c88496fdec8e7cf2e12fde7654c891ded54640cc8 +S103/S103R04.edf a4c707cbd94c08bb37718f9c7d03c7259389e4e43e9418f3887de35acfb1125f +S103/S103R06.edf.event 43583e190556326d49c6693ecacd19aa4b29002fcaba93157b7803fca2f71842 +S103/S103R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S061/S061R08.edf.event dbd0435f98476653a27f53b54a6757c6e4596f6d9318a14067e4dd50bb37a888 +S061/S061R03.edf 27558de88191bea04c87f3f079f778217fcd016362f350e8a2c0eac6a182dc67 +S061/S061R13.edf 194e5626af80b437a223c8f28bb908647d7cef050cde4aee91224dbe3e26bee7 +S061/S061R02.edf e6c090a982e016f10248e16123afd230f8725c5a8bfd56774cb659e94efc3783 +S061/S061R13.edf.event 405867bf01702626e40e74e21dcc164b7e87d59de89a4e6fd2c4ce561f2e6c1f +S061/S061R11.edf 20a555d403226f3ab61a341b768a32f4230d0613b7b7a2edb616bbff67e9f39f +S061/S061R01.edf b0728cf10eb1885b0d5cbb9fadc43dc1625dcd892b043f05f7a55a375ad1a3eb +S061/S061R08.edf 9cd27a3563ba90dd3ca373b0da2be78b64c7e4858cbc9954cf4f503f39809aac +S061/S061R12.edf e16007dff642ce2bd6275e806525a0d66d0829f39b6b7e7a8fe18cb3fcea498a +S061/S061R14.edf 5a6711078c8d3114c4708f012b4ad5a9f60f615ea9fd1e9874f348e5ee4d6926 +S061/S061R10.edf 9502b545c2e86b5403dcfcb98c32f7738c6918d9e21c893b61ee0f5f12a0773a +S061/S061R03.edf.event 7fbc239687b3b3100e651edd3c5804afc9493f15f29a15a7c7c523f9f42b5b1b +S061/S061R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S061/S061R05.edf.event e3378f798c2ae2109571b901374ec3f6e67c17b45e2f63b458ca5b1db30f1ea0 +S061/S061R11.edf.event 109d77619d968e04ce3d80665c461d8412a594700916e31d523f31604e109b24 +S061/S061R06.edf.event e697ba3ef839244357e1c111d7dfa9afb60310a0bd8b7935ea5a426a1711194c +S061/S061R05.edf a559920a213923175fd8d90bbeb55840ddbc0851e4cdf661476b19f1c5d8fa31 +S061/S061R07.edf b7a91ef5bdde5cdc5255ad9d8482d8b6f44d89a402d67041e05441dcdf37fd71 +S061/S061R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S061/S061R07.edf.event 3a98d78e3ff8d947a073981e739716fc7bd37c89605bf37bf0addaa16779530a +S061/S061R14.edf.event c6742adf7ce83d034f6e3cecede733919ea0d2fe7854dc4a86c12cf32c5c5aa2 +S061/S061R04.edf.event ea56fcb5af6dc22a09e6c52c6c50be06f5bf4722cba178188f67732c6ecd0395 +S061/S061R12.edf.event 4b81a244be0ef71bc8b32a5a151ca3746b282903e6c7a28b644879e8bae159b9 +S061/S061R09.edf 93ab6309318f1c2f9866e4b158badd8d1eca684ce936eb8d8d8b5ab80ecf8b33 +S061/S061R09.edf.event 82f0f5ea19a20ed8cc8994d29456053772eff3af83ffacc51907fa043cc0ef48 +S061/S061R10.edf.event 22a6b841d94ebe84eeaabd93fb3e0f00da65ddf3bc8de6d5a79394e3a1394567 +S061/S061R04.edf 788c7f7d524dc1e483e49e81f36920a65c464ae54dece6c96ce48b7d168a22ae +S061/S061R06.edf 24dbe6c1d0465c3bd447413a5b28920ea0dd241c05762fef4f19b5bae3a7cc78 +S057/S057R07.edf b46f328990c5607512f28a229769746f0322d8b50e9bc76eb2499abccd0aae27 +S057/S057R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S057/S057R10.edf c7c581aa6f0b7d21e348034fc4f22c3ce66ca81533a3b30252c6b8be10f0d5e8 +S057/S057R06.edf.event d704e59168ad57b1fa2751e18c013c84b7169c08d874e72c7388b74c93006559 +S057/S057R09.edf b6e2cfa63bf3ee3815fb384bbab18aff1c34254e30c1b0d0d4778f5bbec2e40e +S057/S057R07.edf.event 018a676bc733cbc27dffcbbf3767758e526d1ae08a5dbd0785d28e267132a3aa +S057/S057R12.edf.event bd6d931c82981d9463af509515eeb55f7b6499310a828316c8f4a3ea35ecc7bf +S057/S057R13.edf.event 0513fd04977ef5a66b77e72c59699e4e54ff57c226456d0796b1c58c38fb4d59 +S057/S057R14.edf.event cef39c5c7fed8cace25154d7385aa5c8666f28db51ba7c7224ce1650f9388915 +S057/S057R13.edf fdbbaa81ee07af8968795bd8fcdd52ea0a2b74ff686ab03666db07757e0b63d1 +S057/S057R12.edf a893824591d10cd67a04b1273ed436bff4522e14c1d6b6745f2e75348267d7b1 +S057/S057R05.edf 3c6aa834c303ebe0e89e012a0cb7b541c020dc40d028d54dba51ba16c3537998 +S057/S057R01.edf 0b5b499f95a86f7c04d63cef9489f04a81a335eac0c80ec8897eace504c0205a +S057/S057R08.edf.event e96a6caea4f9685ebdd4b9c4f79dee271ab2ca8fa30cd6fcc49d330a6a0c770c +S057/S057R04.edf 53d8515a722509864bd45b148d02730770f540ef56f44cb1e161fdec6978a0a3 +S057/S057R02.edf e38e0435c54470b41f2c2fde3685d58dddaa127e989474eb30955a5d14009969 +S057/S057R03.edf.event 211c6aa0a079d9cba1e8d336802ff8e126829175da37deb01fb3234c2fd81d9d +S057/S057R11.edf.event 6a0785ec7d7cd8c80087db9605dd320790daf7db8cdc6c9fbe68fad0470ed604 +S057/S057R11.edf f0195299b3289d582c4aa4fe027fb937a1c2339def7afabccd065887e8039220 +S057/S057R09.edf.event dea4ebd6eb695d78f9821d3bfa3d4a325dfc5d7ff6f514c6a3f205dfee689e4a +S057/S057R10.edf.event 8162d74d19617d3dc613cc4d3505a1e143edecd3fb9c7901e255495cf94de0d1 +S057/S057R03.edf c14f828bbab8801708392702b0af3536aa23ce706a18b212ff0599a566375876 +S057/S057R14.edf 0994c41b6b5b114b14d85e8af5c4a4cd9d54cee8dacfb780ec922d8af2c59b25 +S057/S057R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S057/S057R08.edf bb2842570aa4a89021f710397914cd810dac010cac428b75173adcc2fbf30a28 +S057/S057R04.edf.event 837f0145b9da4dcc73e14962769b9c68f3eebad462eebe9d8796bc8b099af925 +S057/S057R05.edf.event 1442094c7e4c8cf80c2b917010d3a5835f895b48a64150371ef6f1f36d5eb9dc +S057/S057R06.edf 4f03601e2259a409efad3ade660609c937960ef4dfa564c70deab294f5700c9b +S083/S083R07.edf.event 1db920b489aacf8fb0632d9e919efb2b41c903f8721a96cda16479fdb668ec36 +S083/S083R03.edf 05e2dd9755eba090bd9445c850a7d7d3ea85f1945f36b84787ee434f25ba9c80 +S083/S083R09.edf 0e3ec56ae46f1497d6b427940d6648257beccbf16c779903eb8f51ba66de16ea +S083/S083R10.edf 8772357764f7bb97617443ab303c47d851e0a1f0ba30c9f485507aaa0e0b48fd +S083/S083R12.edf.event a79f747a01fa0ee3d769d3c8e6d6c47bfd1e464df2bb7748eb057537007ded6c +S083/S083R05.edf eed3d080081210253834894b06568f427a996c40e3e11ee28a7acd7133684050 +S083/S083R05.edf.event c81d77b099878d1d392e93aa7a18a46b936b690bad605aa84a652b2bd9cbff1d +S083/S083R13.edf 02a1be3f1868f8f70c83bf073259c01185cbd2762bf96f488d0d140843ddc5e9 +S083/S083R14.edf.event 15eac883e797e576d72c57d60ca80b477563711c2f4f8dd16cadc5a529d40f03 +S083/S083R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S083/S083R04.edf.event 3048bcb4e0fbb0a4db3d630b323c27c5e88b2f2b6aedecde86b33b1d4f135461 +S083/S083R10.edf.event 3d40baf6066941639ef493e2ee821cfa6bd1a236dff48c0659131c0e88fef481 +S083/S083R07.edf 30da924b540a596f91e0964cc993821f29f3ec4ddea82aea78fd60706c6ac32f +S083/S083R11.edf.event e3378f798c2ae2109571b901374ec3f6e67c17b45e2f63b458ca5b1db30f1ea0 +S083/S083R08.edf.event a8e304ecabaf8d9f1d4997f8bb05c9613cb7743d7dc73f6af7fb82cea10f7792 +S083/S083R01.edf 5f6e11cd2acd6244310fefb52a9754242e5fd67e9d1e9fbc148c40ba6c660815 +S083/S083R08.edf cb1c0b233d3a0e618c4ef16f80cc5c2cfd400162aed06847c6e24b1d6cd2add6 +S083/S083R11.edf 2fbf5cfee173730d5a816355c31442044ed73fb20683f376ed90115786f0f21b +S083/S083R13.edf.event 8e32d162c32c431dc9eedfd3b87e45cd3f4466027dc3ebde41c1840612c6f52a +S083/S083R09.edf.event cf74c26a450e66b1953f1cafaa4b5e1beed2c2e16627f1466e08f1d0e4ca0653 +S083/S083R06.edf.event e8e41113aee5fb4f165be2a6037f6a4368faa2c5f90a63ec9b60f4466a2ba6f7 +S083/S083R06.edf 4a715e03ec3c310eb02d9bf03b2e16f65cf3a892a033956fbd60210c202fe751 +S083/S083R03.edf.event 7bc37a046920a5e8c90ae816bf99bac5ca23a1a0ad7d98c1d3c2c5d698871986 +S083/S083R12.edf 7fa028930f6c461c054e1d22e6026f954688b99817232e88f13892c4d7e2dd91 +S083/S083R04.edf 8e28c29d9d6203a35237b30878a1678b677e762561ee6f55854661284d9b3a21 +S083/S083R14.edf b94a229fa28185fc61737b30c0df46a3b40712d6f16f10bbfa369789b9010cd7 +S083/S083R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S083/S083R02.edf 27108699388b469a3d7c75e75d9156b168715f9cfec764c11ea45d8b188dab1d +S019/S019R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S019/S019R11.edf 0bbc66e7c1fbf22e6e8f43aadc075508d94ad0567809f0257d7b65d2b7c9ac8f +S019/S019R10.edf.event 533737c87655572a20223c364be6c8be7a035cd8cf3dc42ed8c418e46e174661 +S019/S019R09.edf 9640606d791c5eeb183b385ae2da0c187cce28b7491378fe9624c777cf063b47 +S019/S019R12.edf a3c4942a316f2cc431aaa358288b3c8f55fffa7b832ebca26705f68f007983ea +S019/S019R14.edf f1c4268a5d230b83417494012193d3511dd8d4de5930a9ebbb03ad2e26a75dd2 +S019/S019R08.edf ae16abeb147d185bf9bd6e1ceff37d95f73fa2b3d03561225a35447d72cc8020 +S019/S019R07.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S019/S019R04.edf.event 6e4a2f2b413bd53735575eeda52d35059687bb58d072e4de1626bc16d7a28d24 +S019/S019R05.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S019/S019R03.edf.event a38580800381d773ef251e941cbd4e02611c4c4e4efab3c383f14300a3394f1e +S019/S019R08.edf.event a7dd5d461dbdafc68a647202ccb3decd039a172ca33d925065fad2be2efab6ab +S019/S019R02.edf 0e82a0cc44fd65c15b9cfaac7143a9915c1712d6286feb81396911e7d4c06925 +S019/S019R11.edf.event e4d125d65ed410f2e37eb6c7f7075c585662cb4e5931bd8436cbd1b59c474f77 +S019/S019R09.edf.event 0c671e4e4ff7b21e4f75cb8796305c57d6ee3fc48e74337e26c3b9f5d49408ee +S019/S019R06.edf 1b0ad7d85228488fe95410bcb88108d41cd8bc2f9b078cf8c0641ed1e8656f42 +S019/S019R04.edf 97eda5477ba1fa369a3718b332a0d5ce6a71e37b5153d6472d65b430103a3d06 +S019/S019R12.edf.event 9e1cfe676de301ff52ef7ef70ad2cdd8b8c9562c01e98d8eae191a5c8c28aa46 +S019/S019R10.edf 809a866319f3dca5bc549d4ed8bb36a891ba270d4aee23950cb5e85c2819fdbe +S019/S019R07.edf 0a97151bc1c64abd2d943c3de22f64d769cdef62ebc5ba93a502b50f82a26f2f +S019/S019R13.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S019/S019R14.edf.event a190f5e706b1ba961aeaf8f8f44e40328c3825ee96b16903b64992c144c93a60 +S019/S019R13.edf 661f5140802887726cee2dd2f53657bc61025610686ba187796a67c58c691be9 +S019/S019R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S019/S019R05.edf ca4f301dbdf4a3a6d2a92e747938e249a0b14636ced35d1049c98165090fb233 +S019/S019R01.edf 2c542b11398dfe863ffdfa3e94aa8ee3fe6414b5ca384c7a242b018dd9e9df12 +S019/S019R06.edf.event f2f8656ee521f666124ee80cf26440ad4cb3e88315a64306e592a3424ebb8ee5 +S019/S019R03.edf 27c074fa068c8e6c8b38479d83f99ae810ca06f2d60ed76339f30c3da111f385 +64_channel_sharbrough.pdf 258b24e77052a735ab66ac99692fc7b815f1c79b6e3adb9c71111ecbb1636cd7 +S027/S027R06.edf ca8bce6ff67b1815190481926ed96c73dbf0dbde4a2cab4f1545be725d04d685 +S027/S027R03.edf.event 57e9107b34629563ac9d22f509b6f40e40ebedb8afaca03c2199613287fc06ad +S027/S027R08.edf.event 8fbb43d322f1567ddece82c464cdf460a9df3070b5684895a7bfa7febb8e9950 +S027/S027R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S027/S027R14.edf 3035250ffd4e397cf1475df74263c66af7e2e9e04bad3319de9f92ebde448745 +S027/S027R07.edf ed97a6b4e01e5d5590da6bda8ab79197ed47a998932d391c2c950518aa8c17b7 +S027/S027R11.edf 9f361a90bac8599e7ff0132bd7524b6e12472d6bbd858ec2b75c70b6c4c70bc9 +S027/S027R09.edf 08e6788c429964dd22318fbbf5375a48047ee8e5b2b7a4fcab369ef3ba7019fa +S027/S027R07.edf.event 74fe6694983b37f44ede4efdbd00b6344db3ee7dfce3f27d06f5ec67ac6fc65e +S027/S027R13.edf 62d5ccdfb702cb9050a3243d3baea6466cf591c47c3544128daaadf33f736809 +S027/S027R05.edf.event 60706e2b67a15786207bcb87a2548eb3631153f455476ae03ec52b135003a858 +S027/S027R12.edf.event 5b005cafab5d6d8968aafc95da4c7b50e36cdf3bf2d71b98cd18431017101fcf +S027/S027R09.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S027/S027R12.edf d782cd20eb82862f4bb6f74f4fbd507ff2610480f581ccbc6ab885a60be94ac6 +S027/S027R08.edf bb50d3febb694639ff3d90cedae79d6f9531f4d6824919ebcc843254acc8cd96 +S027/S027R04.edf.event 710672ce0f425a207ef543d23f608683e1863d7423702d511235d71ccbd289cf +S027/S027R01.edf 16ae12914a9c301b652d64f1d6337e6e31edbc4f64f9c222c3670dd7157ecf21 +S027/S027R05.edf 7fdc9ad49431d5f713f883578426385d776b50787a08bff7e0a3a853952b4d62 +S027/S027R13.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S027/S027R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S027/S027R14.edf.event 91a58fdc6ac826e1cc60d1f6d7a8f3c395a374bee6c3fedb7e90d360410f8f97 +S027/S027R06.edf.event 37c09b028d07b5b1954199394e59b7c78fd1325c5fada30ce1411ada2513eb23 +S027/S027R10.edf.event 23b6fc268df2ca2b48b0a505a6eed055a62dad293041c180f6f03136d0f4b36e +S027/S027R03.edf 59493f105a7531b67e6cc52c1ace3c2fc248800077015c4dcb417b7c6ca6f3fb +S027/S027R10.edf 9fab75ae533d35878ef06a2e8ec6c623fcad251511a8c2dc81df8c332603b707 +S027/S027R02.edf c0ef16b97e2fff2692e400825728ca3c102b359634ac34a08fd2307f14b05d96 +S027/S027R11.edf.event 09d9d4cbfb435c34d0498269502687afe1e41cb410faf772b5fd4389e435ad7d +S027/S027R04.edf 4089acd89834f5aae1ea194c2d42b469b3dff95700554dbb4f0da68e91d94d48 +S040/S040R01.edf 45423b0cfc992bee8cede102265968d424d3731a6a7f2c8fb4a714f4ba3dfe61 +S040/S040R12.edf.event 27db1e140fad9d7a3cbfb7c99bf32f74d60de45fbaac18d08e05970e1ff5b49f +S040/S040R11.edf 693e9ccc55339199be31a7bc2099a26509855447ba1dd94cc8a32760ae9d67bf +S040/S040R11.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S040/S040R14.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S040/S040R07.edf a9dbe8be9c65ff1bfea01eb7a87c68a8fcc59b0bf3a5e78d8b3361687c0ff0d0 +S040/S040R14.edf e439a81cf9c84fb35c9764ba5e760806fa638bb99fe0c222f9fca85e948cb72c +S040/S040R03.edf 0ad4d13fe0bdf89731358eb2ef45bfd1e3d6ccb5704ec601a5b548a18395beb5 +S040/S040R05.edf d3b0fff7acb44c5c07900ab3c3b86a63c188a86ad4c0506bafaf3303930c1dd1 +S040/S040R02.edf 3f9cb083e7c2677528a1f4d23243a062c3ea3498fde899803ae752fd29f9c2db +S040/S040R05.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S040/S040R08.edf.event 57636037048fc90c882a690b692342a1f4fd070d9f2a219d527ef6c0f7ea7dc1 +S040/S040R07.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S040/S040R04.edf 7e57eaba05275c3238d5fc60b1e696ffab146d578449abe50113f8b0dc388c12 +S040/S040R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S040/S040R09.edf 6b58a26d5f7914661d5f82c9e4e83fd1453af9b9b9452350d82ce5332f01d0d3 +S040/S040R12.edf 4039c3d1a84f9cf689592549332130bfcd37e6739d8e978046f1427af22b10d5 +S040/S040R04.edf.event ee9a506277766c8d64377864252176694f8cbdaac82a329d5cce5e9c11e2e529 +S040/S040R10.edf.event 037c923028aca24716dbc8d307c618e19d4e94bb1e759e5ddb8a9d3b6b205a86 +S040/S040R10.edf 5d6089202962ef26fa68fec40221916256c07e0fb7017fed0dd63ad9a9fe3fe6 +S040/S040R08.edf 941d55ce92107bf7cab012df55566d5dedb0f7b0f13a408dd0a4feeae2d16da8 +S040/S040R13.edf.event ed0acb0c9635a838cea852cecffa253bbee6e084a3404ea9ac9900188570c878 +S040/S040R13.edf 6bfe46c52dd085d36216c28f1dca46071de404bd33d630687ced49d948d1aa52 +S040/S040R06.edf 82f87f29942e399a8d0f996ee8132b8f0028f159826bf31416e1b98a5676017f +S040/S040R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S040/S040R06.edf.event 25eca6452bd9e487db46059b497ec8df5b6c353e97a2ac76ba8344fcc0797c79 +S040/S040R09.edf.event 07fe70f3f8a3bdbc31f5b573f0a7411d64d34385995e5b88892dc178ef898e16 +S040/S040R03.edf.event 1c886979e539efbac81a65fd3868773f73efb803324a60ebab5a203d75cdb97a +S043/S043R14.edf 60940854f70ceaf6fd2438544df485283deb3e6dbb2037b48a8f981e9d8e0fc8 +S043/S043R07.edf.event 8612fac39dad16048db3bea4e27be45fe67ced9a22781462eb9c6e435fe28d6d +S043/S043R03.edf.event e1c00064d3aa1fb0636aaf0dedd65aa66d02a8afcf3dd70b9a5fa4df4d4ebd47 +S043/S043R10.edf 050765b97761f0943cfd1f95fdb707a2b7cf52b26b8a2899a140bcaf502d3a20 +S043/S043R06.edf d58bdfae3036681d8f041ebd230537fe396023f8c42ed6f54003627b50c0dcd6 +S043/S043R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S043/S043R01.edf 49151f1f618da69d7de8bea138e21c0d1f7f9833bdc8b23a9a98c95dbf0120db +S043/S043R04.edf e841bfd7c22f200ed1999fd56466b9c13b05c5bfaf0c1ee30a2685827a137b85 +S043/S043R05.edf 649d2daf74d6bb33e922dea594322c90b35c8bbd9bcb75963fc170892ee4fdf6 +S043/S043R05.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S043/S043R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S043/S043R04.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S043/S043R13.edf f51789ff14d721b809f205eefdca14e6bd57d6d63202a7624e19c1d6793e16aa +S043/S043R12.edf.event 08cefdab90cc2c4f915e80340ff4030bb291ede00b87eaa742fbe6a306942d05 +S043/S043R13.edf.event b9568e8466c8f90e1fe1f9aab8ddb73ea16c008b7b67cbbe5863f04f2ec408f0 +S043/S043R10.edf.event 9aaec3ff6fcd8c6d47bbf0c154dac87cdccac4bfb099e5995ee69afc7b34257b +S043/S043R02.edf 51f6d82c5ce4c53a4bc5477d3fa8decd78cc6e3656209b8324df9be0030f27b2 +S043/S043R07.edf e554299d7a61ac63e3c193c9cf968b0c921b20f644673417c767ade5f254e391 +S043/S043R09.edf 32f3a9cdad8dc71123ba983b7b30905dce563cf330d061374d69187d696d23f0 +S043/S043R09.edf.event 25eca6452bd9e487db46059b497ec8df5b6c353e97a2ac76ba8344fcc0797c79 +S043/S043R14.edf.event 8b26d9be3cec072b0ba8e7e1b1aa9f46dbf8f50992131d413440192fc40ccc5f +S043/S043R06.edf.event a788bd6a825d960534053c6559cb4f24cb7ee7a3164017d99c1dbc55f3b22215 +S043/S043R11.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S043/S043R03.edf 3bb31e415a5cc1b889c80b16473bd18ee5fb6cc8b03cbc2e57255a55e5564047 +S043/S043R12.edf 514e5dd961671fbc4e1e23f852ac6e82320be1ef8b65383602c368f6de946909 +S043/S043R08.edf 9359ced76df3828782fe5ab696efdef57b54e87be5363f2f3f07f8d4b3ac0adb +S043/S043R08.edf.event ea6f6003ddc3676cda270c185ba40fd7915630a24511085aeb31217c9a1e8852 +S043/S043R11.edf 6a70abdb1e96f21d02777643cd36113b950e99ffcd114ea4fbba8889f1114285 +S048/S048R07.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S048/S048R12.edf.event 50f12f26efafddcd994732349e4117055595d324e4dcff8fa56160baad5d5533 +S048/S048R13.edf cca277275339fcc530dd047960b75284bfc0c7af32ab695162c79189ee46abb3 +S048/S048R08.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S048/S048R11.edf.event 00eb5e22d01c3eaeb1626660a99e0c6fbf2f4423eed750224cc549752b7b6761 +S048/S048R11.edf d369c128d89f0d8a8e151eb2e335ff858836fad04c432c51db041807992d9844 +S048/S048R02.edf dba6cfa10d8bf65a0ecf681c3d1f7112bfc96e50eaa9c2782a52dc90fb81ef00 +S048/S048R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S048/S048R10.edf.event 20d3d791a72acbcdd619cb968567a104858a5889afc628a6fae4776c90c0780d +S048/S048R04.edf.event b7fc6043070236adccd2c6d2a291a12804c8a08c7d7b2194d31b1f6996080655 +S048/S048R01.edf c614f7a42eee90e494006d410917f21c7cc14e04f6cf86e0728437c636286171 +S048/S048R12.edf 9dbd643ce614d3b1c9e07190375fb04222e065c057d468f36b1141572e1490b2 +S048/S048R10.edf cb8c1bb4774306aacc0e3211444f9470dd0b89d73f5422b4b5c63450b495063d +S048/S048R05.edf ec79c9a93f997629556f37c47f408941813181d4bb8f02a191f0fe3bdf958afc +S048/S048R13.edf.event a6c871f2eb69757aec91f71c7de20007524b3b628f251799a3dd57bbb181fd84 +S048/S048R09.edf.event 6e9a969133a5a862400b62cb84f763eda38a0967078b1ebbfea1ca2ce8635b48 +S048/S048R05.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S048/S048R14.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S048/S048R04.edf 0916751e1b738588b93540043b78ea96f033b83ed87141a7cde6351409020d1c +S048/S048R03.edf 9c7ba6a229783276d59f9893fcf26a56d498bd4f231bbbdf1530315782b3775e +S048/S048R03.edf.event 48ad6251760a77acc67b82ce0b201f4244cd1385a6181c8236487af76fe1636b +S048/S048R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S048/S048R06.edf.event d6641564c0a8724abac8f919ad99064213b0fafe2c25f0d37d1cc67d83bda19c +S048/S048R07.edf 4b01ca005dd50ba06f1efe2bea065d30bff04204f5af10f18539747f7025e93a +S048/S048R09.edf 9d0481b4338cbc3d3070e53a9198e5d8d889fb692334ed32f8453d480dc0a3d8 +S048/S048R08.edf 5f519c4a10d3e18c4be0eb840be7be1022471594c46c451c3614712896feb669 +S048/S048R14.edf 234b83adaeb3c24523dc12a3c74a5271887cbc85ebd770095644dd373c27329b +S048/S048R06.edf 195f272f2e56fdd9e6b3151982c59bc5bdac42cbcf26cf75846872378ec317d9 +S028/S028R13.edf 780092ab1c7826d7d1fad8329cf8fcbb39c2b6a078314d018f64abc24b5fe47f +S028/S028R09.edf 27fce7d1a64a75ac16d6717161703a402cfaefe4756d27635df2635620e0cf47 +S028/S028R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S028/S028R08.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S028/S028R12.edf ce460f08c9ece9756e66c7e413e565d106676c865bd9a4fe6aa19097ff693bf7 +S028/S028R04.edf deee0e2e89467831f8d6d6ad4b142bbe5ffcf79a1e4dc993e1a52b5face5604a +S028/S028R07.edf c4059eefa097feea4b52bdf3aaa2684b2011c6f207668836ae461cf3ad4ef2d3 +S028/S028R11.edf.event 029131148bb6c782573739be6ec79dc67ade89f0b101169912d9c8201871bcd0 +S028/S028R10.edf d6eca0c406b75892a351c65d108546d27ed2d51fd31c0967427fa6115f6097a1 +S028/S028R05.edf.event c2f5111be300abf5d209c1908e46d378a7a94c2f8043fe6acf88665aab8efb02 +S028/S028R07.edf.event a6c871f2eb69757aec91f71c7de20007524b3b628f251799a3dd57bbb181fd84 +S028/S028R03.edf.event 7ee25d4d164dab7d909181d9ed3898053c69dd75d8affe3f277cb9ceac75184a +S028/S028R12.edf.event 985f9701733fcf2689c3a35bdb9e4b0058b6137ded5bc0f1a2a3b84431edb435 +S028/S028R01.edf 7aefa2fd7d92490654beb7deb417dd5e04c9d7c924716c16a48ee3d73ad6c3ce +S028/S028R09.edf.event 386c51194ef175e56c4d94b5e327133f39929095caa7c3ac86e89628f6fc0610 +S028/S028R10.edf.event 149997f77af08c9d6ad150aad5198f91c6c964c07e4d639baa770eac01012cfc +S028/S028R06.edf 04ad2d3a29a0f76a5f8840940bf87fbdcf6acb5d95c677e5bd69b2d259053d3d +S028/S028R03.edf 2137c12dbba064b5181dc83d7d385047ba7f33874679be51bc17bef2a566bdd6 +S028/S028R06.edf.event c2f5111be300abf5d209c1908e46d378a7a94c2f8043fe6acf88665aab8efb02 +S028/S028R11.edf 15e3a1a2c0f28b5856901f6ee4338cd65f98c3b3870b29ee7b5967d2fdc17e55 +S028/S028R04.edf.event c117ba4c66b5467903fddc4ed77a580e09639381683c611dd1f02cb0d311a4b5 +S028/S028R02.edf 3663ccafe904bbf31e929a7999a03e8c387aa645b5a90c8d11b0a8bf751f3d7d +S028/S028R14.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S028/S028R14.edf 3a2732e2965c9d8a56c1244bba7640b688177b6d2a13e7da4e3d934d25e00db9 +S028/S028R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S028/S028R08.edf f4faed4d95f419e54b95e5232155fa77af1a1a0a739bdc12b3bdfc6edb5f0a79 +S028/S028R05.edf e784563aaecbb0a085924511ecda4ac320a564f9e0dae2e70ba044dede5f8928 +S028/S028R13.edf.event 611df9f780acf887245656c2987fb77e486d2bc016936d00eea0a55d2f5c3028 +S045/S045R07.edf.event e19fc47bcf5b585ab27ad756884c0c4cd37d239bb04c8bf15e2f33fe9fe05e18 +S045/S045R12.edf.event 533737c87655572a20223c364be6c8be7a035cd8cf3dc42ed8c418e46e174661 +S045/S045R03.edf 14e948d21adce6478e71ef21dd9dea8bc3f1555518cf8927b2c33ac6feef4a13 +S045/S045R13.edf.event 91a58fdc6ac826e1cc60d1f6d7a8f3c395a374bee6c3fedb7e90d360410f8f97 +S045/S045R09.edf.event 732398d864936092994f6aa5c7a3d554d1c1c35ff4fd7a90dd0c06a61260c68b +S045/S045R05.edf.event d229cfdfcd562a5cfb40ea306452dab12d3ca82a70a465289b4c69c299fb0258 +S045/S045R06.edf 0a2348d5e52f55836b1154606d89d530f0fc914885cc334ec430c304f7a0627a +S045/S045R14.edf.event bd6dfaccdd7deb73743f500e0a1fa9d0ff333e94d59c6d153e2e9d2dc8a4795f +S045/S045R14.edf b7cb236b51fc8acf948277e703c2bfb04fe5515e3036a7a1d1c8df5a5333a837 +S045/S045R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S045/S045R08.edf.event 1c886979e539efbac81a65fd3868773f73efb803324a60ebab5a203d75cdb97a +S045/S045R03.edf.event 3764472ec04047763aeff3c1680cbc45cec3a88ed5f483d80cfbb31b50a12ac9 +S045/S045R07.edf c37669371b627ee9f63de41f8f276867f8defeff59489873eab0217a852ddf85 +S045/S045R02.edf 91c60dc17ea3b0c8a636b727f5d2caa9d84d6813bbc2a63b40a80d6186e8b75a +S045/S045R04.edf.event d45b05b6c6897898bdd688c12fad776011d39250bd1bd8de7e7745d939b60e3e +S045/S045R13.edf 5eebb9e7c81bc73766bd32d167005ac7dcb256155052e2bb9ab750f104c6efe2 +S045/S045R04.edf 655d2602c087d8f9efa32722bb32aceed76cab3c72570516df12e41458a25482 +S045/S045R10.edf 309184404b1ef8d649d7e8c6177b48a75fb4eaa655eb49d96a797f92b79d3b67 +S045/S045R10.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S045/S045R05.edf 3bc2887a902ebfa42449cd0d7bb5488d2f6fe6615072b2059639d119d62d76b8 +S045/S045R01.edf 99ed18a81e6d86970dcef27b3bf1ad6d4adf01477b4560aa5e198673de2992bf +S045/S045R12.edf 171f3487729fbbea555c923c74b8ce1e0f408aab10205cc95d2f498ba3220b2b +S045/S045R09.edf 2c5c79713d5127bf59e824029a0e5cba9a46badeb0ae94b0cd729d8dd1bfa26c +S045/S045R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S045/S045R11.edf 1517b91e4166540a416bd233b4ed77be080f740de5c25df6c52f60d61916b68e +S045/S045R08.edf 8e3ac9f836029bfdca587290743f5b7a190f2470f55fcdccbecf9eb7bb3aa40a +S045/S045R11.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S045/S045R06.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S109/S109R02.edf 006fc0cfb18c2084a453578484b2252b6c1dbe5608d73e20717a39b557a1d429 +S109/S109R03.edf e2adb0517ec78e1ea79e6dbc7aba14f16b56243b8dafe630ad57deec29b79d34 +S109/S109R09.edf.event 8a4b5822b251050d46b816bf54fbb47530d91210b6d01cc2e6acbe662193553d +S109/S109R05.edf 033bd271a8054992c658ccf79575e61d3e25d820188cca4f3bd563f899f05bf4 +S109/S109R06.edf.event c777015bef40a19f68f8ed8c37572f501bc00d9f061933697ce2f238c9ad3f9b +S109/S109R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S109/S109R01.edf 4547299bf4645e4afb08006d03b419e1273d4a3e853b8aa73d569e5a96dce8d0 +S109/S109R04.edf c267b8aa26c70924fc9cb9fc8596b351a83894490bab95325f738d68829aab3e +S109/S109R10.edf.event 85ffa4ceaf93483bfdb010c4404fa88a39a260f5371d86a551e206b015abd33d +S109/S109R12.edf.event a7dd5d461dbdafc68a647202ccb3decd039a172ca33d925065fad2be2efab6ab +S109/S109R07.edf 2dde459f8e061be6d581880e6fdcd6cfc1109ff7151729d82097fa8ad66ca50b +S109/S109R04.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S109/S109R12.edf b9f0f477c70ce519595b74c887af790c78c77a7f67279fabc7fba2989ee59782 +S109/S109R05.edf.event 6ca290c8f2ab5d2a3d0c42a123bd24341d790899e901dc5991dd66614dfb1842 +S109/S109R10.edf b147611b45da6fd50f0df50f78be9b4782f85a1d5b43edfc0d428ec519bf5012 +S109/S109R07.edf.event 4c374f58a91c1ff71894cacb50bfb798d20cbef67ee65a1ce8a2d5826349e390 +S109/S109R08.edf 879f8c8cda430e95bcda881f9cb1ede5e0693c9b83849d659e60082dd3e8706d +S109/S109R14.edf.event 1ccc28680a9b24af49105d307967ceb434f5229a29e70fbfde089b2d092db7c3 +S109/S109R13.edf.event e4c9fdcbbe3469b81dd48d30396ee921a23d45fb900a0dd3b7eb4ceaf04936a6 +S109/S109R06.edf 5a685c86cf1e3d85e6c7aa58d7d9eabf048d0e1192951777f30f97dfb675bf3e +S109/S109R09.edf 07fce275dae8835e1c4458ff66dd9e87805593e4169faeb025b60c55fbc637a7 +S109/S109R03.edf.event 6ca290c8f2ab5d2a3d0c42a123bd24341d790899e901dc5991dd66614dfb1842 +S109/S109R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S109/S109R08.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S109/S109R11.edf 7b5859aece6cd7b8d8a732ba372645d6885af1bf29da24a0a27a366dcc92021b +S109/S109R11.edf.event 0bddeca4a8cd621dca3130a2d5affe793bfed62c379600613bb61a6e554ec750 +S109/S109R14.edf 40ff8875daf98e4614854d1bb6bdf97b85b386b9caddc0dc944414fec0d049fe +S109/S109R13.edf aa94b64cd18a1b106471367ca9f2b3a16ca9c5669e637b8328d620df60a96a0c +S068/S068R11.edf 9993b459a0a3667c0b2810476632a71f9c6601f12516ce94c3e127fc8685687d +S068/S068R14.edf d5cba484c5cb43dc0bf4cdf4cb0ff098803b9d4b1f3fd8dd6c3f79006b58ba9d +S068/S068R14.edf.event 2eb503fa48ef7e13b77098d860c716de2d0eb55a3a0580117eb34aedd472a728 +S068/S068R08.edf d49374089836e11880347909fb5a358d6834303153e074f230be6caf0c66aa16 +S068/S068R10.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S068/S068R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S068/S068R07.edf.event 819461ff12613675eb0a26eec47ae680594adb9c6696c5e02f8e4b0d3731d56b +S068/S068R09.edf 171dbab1442c48f774cdab95e154d52e2dc9b8c3a20b01933ccaee264871dbbe +S068/S068R09.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S068/S068R01.edf 7f528d62c8d4b342ff2f958ec6571d425f7d3926f0832ece121fa162efccab1c +S068/S068R04.edf.event 1b02f94c97b31c7b11129bd2f0db6eb71ffb209c8dc379625185f7a3726d3aff +S068/S068R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S068/S068R12.edf.event 3a8202a5a33eb8ffe91e329a5cb7a69f0647af677bc441cdc0bd0e11b3631c64 +S068/S068R06.edf 81a71170f71fd313c373612f474c6bc051481d5a831cba3f37e7b06c1d9935f4 +S068/S068R04.edf 7009d509684ab0b13b0253178ebf2b715a7ced2dee2b409969f088c3094f272f +S068/S068R13.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S068/S068R11.edf.event 2553cdc46c8017dc83d793a7cce6a36ddf99a043e777612539831e67f1aebc23 +S068/S068R06.edf.event 2da7955eedd5ec220793d3b4b3224c26256f1427c213729179eecd79fd4321c2 +S068/S068R13.edf bea2097a6ee72e37f7039b38cbd57af051dba76797ee83629d4c333c66a3d934 +S068/S068R05.edf 9f5307cda4c4be652743ce6fca4275d2614ec81c42968c1f9c46a22fdcd8a376 +S068/S068R05.edf.event af9bed6667136366cf88d2cd7f0f5fd9dda3fa51cc4cf73d3afe4d57dc8b7502 +S068/S068R08.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S068/S068R07.edf bc4403c8c2186589008953fac143b4556e05b294fff29bae1de2151f9a7ad36b +S068/S068R02.edf b8207c54c2f123032ba589c1d69ebe39503cbd4f539e96935da842fdf9dcfd4d +S068/S068R03.edf.event d090cffefb3b3a6b7c514daaeff9edd2dc1c358aaa5ba0a069b62e257f59e09c +S068/S068R03.edf 2ce7f50663087ed5f90aa5a75583994c11b86e6556c2395d8cc327cd04599f4a +S068/S068R12.edf 966c9fe51563d5dd048482211d01eb1005130ebc14b5bc6ac89097b96ba9a5ab +S068/S068R10.edf 12250b352069cdef00d68d872a5c110a1098e5e7be6a77c0658f95ebea52a6a4 +S085/S085R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S085/S085R08.edf.event 8a4b5822b251050d46b816bf54fbb47530d91210b6d01cc2e6acbe662193553d +S085/S085R07.edf 67e6d9307b088dee4ab676419451931e4215372843e55c99f55629375fa1d61d +S085/S085R11.edf d2dbe9fbdf9e0a050f08811c18b25bc69a4a87abbca6c819ef96515b72147fe1 +S085/S085R07.edf.event f686071dd83e0610c6463f51dda1c01de1abacf998f19f9b7d3910c06492fc30 +S085/S085R13.edf 253e4d192802423c11f3d9ddb6ffa35c41a5fec990c71f04fe79d86f362836be +S085/S085R14.edf.event 04192a55fa7728d34533e76a1b831fe5c94d79c3bc9f1028f45644c51892a744 +S085/S085R03.edf 3723d3c37f3fc3b8ca7246de882e8ad2edea370284e4878f8a706d6bef0e2f8b +S085/S085R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S085/S085R12.edf bd78d9b98a9ba10ac7ba51cc213970da2f2c7a7f3238714b362017306d53983f +S085/S085R04.edf.event 74fe6694983b37f44ede4efdbd00b6344db3ee7dfce3f27d06f5ec67ac6fc65e +S085/S085R03.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S085/S085R09.edf b2f2fc7f0bb5a83d8876b40ff654d495a9882a4c046f4e7c9317a524e55a55c3 +S085/S085R08.edf 7da6a14c5b2722ddfa3962cb0b5580e3a5207c77154a6bac05c73d7f75e4409e +S085/S085R04.edf da0fbebf6dd7074cb3b71769f2a32259249b360e8ff03d832e99a02ae7a55090 +S085/S085R11.edf.event ecb3c28bfbaf7c670aa5547fa414949828cb36fcb3d84e0389aa669e01381627 +S085/S085R01.edf 378774575493af619ad20aa110604dc98be465894a7f82ce8ef815e3e58b51ec +S085/S085R10.edf 9d9fb1173e754a9b1ed6cd430c2f4b5418e52ae89878ad51c7f79d7a10244d60 +S085/S085R09.edf.event d090cffefb3b3a6b7c514daaeff9edd2dc1c358aaa5ba0a069b62e257f59e09c +S085/S085R13.edf.event 88e1ea56ed0459a99b011a3ea2f25f947016dd359b2d4fd683f838ac00d993d5 +S085/S085R05.edf 0caeea47c39fc638c03524b356f2fe749fff9ffad8fb3248c476c47c15766ca7 +S085/S085R12.edf.event 6546377ca20368b45bfd1f06c72e153809df57db54b8731ecceb6c8ec813a774 +S085/S085R10.edf.event 57636037048fc90c882a690b692342a1f4fd070d9f2a219d527ef6c0f7ea7dc1 +S085/S085R06.edf b5a691952339b9159eeda0ff49d33a1a05869254398761e4da70a5aca445e1a3 +S085/S085R02.edf a89c675ea2e6e0211aaea83cc2fe0d154bb1984c1ed836868f283f0f61b6c943 +S085/S085R14.edf 77a596bc1306b966accaf331af8501c3cc6dbdf66b0213993a11b189afc01a9f +S085/S085R06.edf.event 7925ab0749163820c00af5a617adba24f1ce1c711886e12def08eb72594e10cc +S085/S085R05.edf.event 2fa0b9392dbafefb266a1c269022274e4f4638513c85370c47ce925b54b7d5d6 +S050/S050R04.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S050/S050R01.edf 8407057efedd6e133b5e7b13caba82c90654d2697288d35b27b7fc33aeb4f522 +S050/S050R14.edf 4a11efcc5be7792041af8429b25cf7344e4c9c0adfa75d86bb06bbf8e870ff01 +S050/S050R08.edf.event b7fc6043070236adccd2c6d2a291a12804c8a08c7d7b2194d31b1f6996080655 +S050/S050R05.edf 59950256e03b968a6686c8d102efa843d7ed824e458597084be1a9a535a7d6d8 +S050/S050R10.edf 72174e570bfa31ece2dae5dad0ac8c242044784755bb9d6481a194a4a2b7e311 +S050/S050R06.edf 4b0d2ba3215359fe1b2a963c8ba0c94e0b10c08f33a0eb1c1da0e3bcb8c2617e +S050/S050R09.edf.event 6e4a2f2b413bd53735575eeda52d35059687bb58d072e4de1626bc16d7a28d24 +S050/S050R11.edf.event 08d1f10d75e3c7f02ed234fdfe64948be5fa14e26fb27441a377ebf76532dc50 +S050/S050R06.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S050/S050R04.edf d2068d794befef727cb907835cf04cbc357e16ef90766ca22917dd4b4649df85 +S050/S050R07.edf.event 08d1f10d75e3c7f02ed234fdfe64948be5fa14e26fb27441a377ebf76532dc50 +S050/S050R02.edf e4d2e4444e5dd830417f8729a549daba168d49e2f2db6467f44aa75ab9554474 +S050/S050R09.edf 15c17c5266b4c46e49e454b455dd4fc9cba0cc2ed670b64f47440c5e96e814d4 +S050/S050R03.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S050/S050R07.edf 0b0435ff445199521d9d11589e69bb0940f7c4878f6546dcb7d65d0dc52fdfea +S050/S050R05.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S050/S050R08.edf 211a70a072988cc85fbab6e28b0e0550c18d22225dda82a9471a980447eafcf9 +S050/S050R13.edf 2a586af9a1e31ef0bce58d53c92c55c0c1ad670b301711c669daafa2fc452dfd +S050/S050R14.edf.event 1f581da8e4856c63e25c541381ff8a370d89525260484c85de24ca995a24a984 +S050/S050R13.edf.event 9787b103ae02f348ebe837cf8e545e3e630609a320868cfa229f0f82a0734ddd +S050/S050R03.edf 114ffd39341cdf2375d4762190ee4d8b893c0468e02f4c8a413b183432fd44b5 +S050/S050R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S050/S050R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S050/S050R12.edf.event 2b0cd50273254147215db0f2d93c0e409b2279fe37afd6cc7d86edb7df57486d +S050/S050R12.edf ae1fa0a96ddbcf2f2e55f1df26fb90c4bfe439bf0bacb89c03579c0308ea1180 +S050/S050R11.edf 16d0f476a75c5ce8c0dd1e99bbfc8ac1cd0ad72804b9dd76e6803ccc6b4f3b1c +S050/S050R10.edf.event 7ee25d4d164dab7d909181d9ed3898053c69dd75d8affe3f277cb9ceac75184a +S049/S049R07.edf.event de55d737a8555e174e0cd004746337287c294a01d42348abe972b98f07739b4c +S049/S049R14.edf.event 07fe70f3f8a3bdbc31f5b573f0a7411d64d34385995e5b88892dc178ef898e16 +S049/S049R10.edf.event 6ad812d50b44ed49ee87e09d1cf28b68a99855b6d266be8b9017d8056de057b4 +S049/S049R09.edf.event 6558be2dc3366ecd25198b4561d2d6a49bf257ea06005bb7804aa7c5f9bb7c8b +S049/S049R03.edf faff4a3b9f549956039d5fc56f6a3a69666a722767b52672d10057c3af3bd32c +S049/S049R13.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S049/S049R06.edf 0d6e568a62a15ada1889fbced43069e0c2d5ca1dbb5fede5484a954af40bd079 +S049/S049R08.edf.event 9a154a517e2cf402786cffa7d164d0656aa7a8bb30af51266fc6403fcd9d3d00 +S049/S049R02.edf 1a717c22907a799d191aa1e34b4702d5429359f0d10419ae64aa3baf6d4a9241 +S049/S049R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S049/S049R11.edf b90f50b60a229cc4426527b09711ce720932165fa23d427cee871f13e3b372bd +S049/S049R13.edf dd9504e0120ce5876437f1fbd0ffc150bb6e713550ac818fa7f1b165571b0b47 +S049/S049R07.edf 7a290ed431af3fc57e9e8dd41e383ec8a734fc59240eeced59872f32ca117723 +S049/S049R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S049/S049R01.edf bdfcb91b7c914a621617d59cd6f132c9f3229f64f22084e57904232e84c2eaf9 +S049/S049R11.edf.event 0d4308120129d0f92d2b8779374ae232f98d1357e6c6427983fed655d9611af0 +S049/S049R14.edf 22d3b7e7e6efeae214e237437447f3928214f5d174413f03a113613bb8840771 +S049/S049R05.edf b6eeb2d843cdaacd5316dbe64a15a8332397ce454a657e887fa9ffeb9c335984 +S049/S049R12.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S049/S049R03.edf.event 596ad948f8aa9dfdc6cd3125a3d02e115d1cd6a505023467bf5cd721125ee7fb +S049/S049R08.edf 449bc4afe04a06d018e6a4385144253bdbea91334af3be4f59165883be2c46a5 +S049/S049R10.edf 901196f4f5fd3d9cfec28a04d581bb0e91b43bd6c1815655f112755f7536efed +S049/S049R04.edf.event cd91d83b7ed2080d3d803a7381c00951232c99bd8107647556298a104a0ce8c0 +S049/S049R04.edf a481efa8e833cdc64e9f0a3a5c082ac1765462d4f134beb46a3439acb40323f8 +S049/S049R12.edf 37ac2679e0f9b35f879dd80255dad13f27fda1950a5182eddcee5cab8dc6e8ef +S049/S049R06.edf.event 533737c87655572a20223c364be6c8be7a035cd8cf3dc42ed8c418e46e174661 +S049/S049R09.edf c21b03a3947c630f19e63796321c31088c9b1fd082a4d43b8de813e2140cfb9c +S049/S049R05.edf.event ee9a506277766c8d64377864252176694f8cbdaac82a329d5cce5e9c11e2e529 +S003/S003R13.edf.event f08aa591d71f3207481623462ebe22d9e72781bd8f6adce3738257c0be7ed7f5 +S003/S003R10.edf 99429f0075f59216a10b75eea4029a7ded7bccec34d17100b08a55770cf1f014 +S003/S003R03.edf ebf184ea51d9aa3178190583f428db02f184e22412ff300a5f224776d1e8dbb4 +S003/S003R03.edf.event c9ceffa934dbf8716d8f84a8ab3a0d227c746d5d4fd50627350da4bfc6029e39 +S003/S003R11.edf 0563c2a26f759d849d6b99b3efb6047d1e1d288f80c0c16f5f07403bd0029271 +S003/S003R07.edf.event 92b3e8a6b67a6846154b1244f9044558257134f17b25840c7f71206ad195584e +S003/S003R09.edf e871d2d855a538f653e1e71c4181f1e2b0b5eba59ef698328908cd8511dfdf58 +S003/S003R13.edf db14ae5f0e58a25593fdf9a71c60a80540e06fac158ecf0c28380c7fe54c02d8 +S003/S003R10.edf.event d192ea57ff85ecd8427faf400415c2002cb41aa1189199e0c6ed62a7ecd048c1 +S003/S003R06.edf.event 1056a6ae091909b3aed4f5a2b6e53c4083bb73cd4929def393a94ea0f5953f0d +S003/S003R12.edf.event cfccf092791e4f541cc50e2a8c3317df4e3eaf04f099301c7f827e7dffd9c354 +S003/S003R02.edf 41044fea497e05650c4b1f4009011b44a50a6cefd55d5afa9e1d1b7988afd6e5 +S003/S003R01.edf 2af68afa8d4f158a7ae7f3903b66f0947925880639b7a8b5b6bb13b0c6c50f96 +S003/S003R06.edf 8d48a46397416bbea19eb6b97474aaade72029364231202295bbda805ed79c97 +S003/S003R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S003/S003R04.edf 7d0732eea963488a53153835524e55c2b68220b0a0c7c5be99e535a9f5367e7f +S003/S003R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S003/S003R05.edf.event f73e764c2a5687f81917e5c4eaa8b964ecb99e4c58ff33aa7e02e5fe1f655a98 +S003/S003R07.edf d8a610bf60a19c1d653a11633f7df40bd7b3eca976bebf2b525eb65017fdf044 +S003/S003R11.edf.event 94d1d7007146c80e4f45e873986a770b4b807a34cfeca23f6a7d9054865154c1 +S003/S003R14.edf c80a7a0fab93074cdead76450b49ba8d27b7183f1baa406daf5207d2c1825194 +S003/S003R08.edf 0f50978bb972e693b8c758a9223a2d9fa35c7f117226391090bcc32a83ce765d +S003/S003R04.edf.event 53726fe905bea0abe513d5e84fd629ed577ff03739fbd56c6a12198e7cca5cc0 +S003/S003R12.edf bbf7137bfa7905724741e95359fa090439d4422c07bdba16c792acb09ebd6421 +S003/S003R08.edf.event a3a021aa3014366fda2210569cdbdc257724ba6d0d11b1e6c924103e837c7294 +S003/S003R05.edf 847632c525a231a97e61cd942fe8f4205b2138764f14cecd62f76621287efacd +S003/S003R14.edf.event 1513629f6e8e700394421b828869afdbd4e2ccdf8bdbed2127f75a42b2db3ff4 +S003/S003R09.edf.event 99a46eb7d9a4fa08a856556abfd134cec7b55f86ee94c1eedc9b0b5214911db9 +S108/S108R13.edf.event c777015bef40a19f68f8ed8c37572f501bc00d9f061933697ce2f238c9ad3f9b +S108/S108R06.edf 79c8e5268af37c118dd3b380918b58ccbd90ef51a44f571d6b718929322be16b +S108/S108R05.edf.event fd77510c7ae392a42ed020605a40066a43ea3dcf80082d17cfd95141d71549ae +S108/S108R07.edf.event de55d737a8555e174e0cd004746337287c294a01d42348abe972b98f07739b4c +S108/S108R12.edf 424aa46e174d38f3395647a591c20aedcecf29989f98a6006a77502af9a2add7 +S108/S108R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S108/S108R08.edf.event 176fd10c94c5d482875b1ae7f0756ac60194f41bba099b0b40e5efd4e95e8df7 +S108/S108R14.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S108/S108R13.edf 0708199ca4f0fa026eb23ded19eb6de66c62db605db7df8129696f6d4fec1b64 +S108/S108R11.edf 7826876fcdac9aff4bfcc44cd2417a844673aa3d15a3d14bdc16c86eb6c51e26 +S108/S108R04.edf 7c56ee104d1b080834992d79fcfdf9f995965f0a416a66fed454784486d97d40 +S108/S108R01.edf 2115183a713af1edb4ddbda03f35712340a2e1bfdc62f825e2b73f495d421291 +S108/S108R07.edf 53f4d5dcaa3e7acbee5762e68d7d3552954df9f4e46c5cc232c2cbb794c3deb8 +S108/S108R09.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S108/S108R03.edf ebe6c07ca779b1226b3dc11ee2d46877fd5d3a94d1fe9f97fa5c2161f16cbade +S108/S108R04.edf.event a730605f9838282a7ec09530538d4f4dc6f2f5cf73c8f2e85944d5e8297c441e +S108/S108R10.edf da4adc3a144f5699b4a4facd89dc85a4617170ae84828ad0a909d154979751a8 +S108/S108R14.edf c51f9e98fc6c4dab6a73fe3ec7363e3ae2c721cde414a46cf4e1fad1bf9d81ff +S108/S108R03.edf.event f1d7c85c7c4b298aa795662274613ada05d95ec81ad8b21ce4c8ddca5a11ecdc +S108/S108R06.edf.event 4ca96a0e24908b831c877816b86e3dbd1eee397d9d9851bd1e890d67134c57a0 +S108/S108R02.edf 2e117bc915d3720afd714e82f1769b0f8a1f040e39ba735fd1c3d2f0e704301a +S108/S108R08.edf 472179c1c0efd3662cbd47ae8ab1be2aef9ec8db8344c086f79c3ac2cd5b5fd3 +S108/S108R05.edf 5073bba54535642aa061d4cffdca53a981c06069f7c5ebca3a8e18953cde928f +S108/S108R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S108/S108R11.edf.event 082acf669cebc89d5f5f4bf29769c743f3b1e14290b8f0f34415e285db553b3a +S108/S108R09.edf 91bc85098b6ae4e93c859dd7e1f606d48b7d83d9bae3b54eb0db4dc7db876ce9 +S108/S108R12.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S108/S108R10.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S060/S060R10.edf.event f39b724630cf39de64096da55e80ef4acde232132367bdb459b9a4b872fdabb8 +S060/S060R05.edf.event b7ad55e8ce55c5743b2d7e417e77c8d7efdeba7b6cbbddb02fe11361879eb9a4 +S060/S060R07.edf 17a7012187c8be76959586c37b5d8cce35add75d601f254a683a8b805a62e404 +S060/S060R03.edf 7b6a4c4b50ae82d8dda38d58dbf4a63668a6ca241c230b110bdba481088116e5 +S060/S060R03.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S060/S060R12.edf.event 818acb17a86d3ee6425a512fab58b363d01b6ccc783417cdad466d1ecddc506f +S060/S060R14.edf.event 029131148bb6c782573739be6ec79dc67ade89f0b101169912d9c8201871bcd0 +S060/S060R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S060/S060R06.edf 19abfd9e28c5d02166a4ef1753ba504c745faee53d28c4943f53a577c76e5b32 +S060/S060R10.edf 93ba02bbc1b91007849214d6cf0330402d55d993357bb70bfd8dc781f96464a3 +S060/S060R01.edf ae25d2c837966144341798b0bd0cbcba1b0eddd28a6b31502f813a41fd6ad1d5 +S060/S060R08.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S060/S060R02.edf 597c676b123c5e834048a91035349ce2f72116eef1b80c64602587def6354ad5 +S060/S060R05.edf dc2b1b2ad165e2edceb0fe2634b2885c9f122afb571bb8830758ed8579192400 +S060/S060R04.edf 9547bdc9013bf74161abaae66f3f30453c159a4a0ad7f030a1d527538250ab59 +S060/S060R04.edf.event 931266ffca879dea17f50ad227bae49a0d891b282f30fa3ee4b13ab8623dc5eb +S060/S060R09.edf 6ad2e283d739177eb6fa17c421b3a40b1b9b65c8645dd39ccc2bc491ac70f7e1 +S060/S060R08.edf 5f6411e04fc1fa14636a78f15a8bb49dfbd765fd5f7d9c3c3357044a9037aaec +S060/S060R11.edf.event e16d907d8f296edaf98d1ab54138ee16bae85a4bd81d90a487ccfece5b611fd7 +S060/S060R11.edf c33e3465474c7640dd8d58f7633c4055b85a13199f324cb77246107cb80690a2 +S060/S060R09.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S060/S060R06.edf.event 13968dcd0074afe70c79ea250f7148e28870c5b0140104f078a501fd3b51ed69 +S060/S060R12.edf 8b1954f873ac2e1c741522541e4d02e1ff8beede7434a5ccff36cc356d9b95c0 +S060/S060R13.edf 5faf02a9d111e48f09fc40c4ae2906385c37ecd9e62be423e12399215463e21f +S060/S060R14.edf 546122a56dd778291a7867d913093f1605b85ebbbc63cfcb5bae93cb360a0d96 +S060/S060R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S060/S060R13.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S060/S060R07.edf.event 8612fac39dad16048db3bea4e27be45fe67ced9a22781462eb9c6e435fe28d6d +S055/S055R07.edf.event 1c886979e539efbac81a65fd3868773f73efb803324a60ebab5a203d75cdb97a +S055/S055R03.edf 5a700147e53e7659c57d78b15dc2590378d5549edc35631fd7302c7847c5569c +S055/S055R09.edf.event a931510ec25c8e6b0352576ca7f98b414a922451f0a6ee6aab03d3409a677c66 +S055/S055R13.edf 8a1182a5eabd260a4d09929bf2b503bcaa5a24ec18c13b9cf525b06e39d787e7 +S055/S055R12.edf 2e9307c91262be0c77efaebd886f75b7e3c11729a9688cada4b5767e7f7490a1 +S055/S055R12.edf.event 9787b103ae02f348ebe837cf8e545e3e630609a320868cfa229f0f82a0734ddd +S055/S055R11.edf 7715df0df55699795e90e5de10cfb1e1b1d60ae7351b81672ebac9fb566ad33b +S055/S055R06.edf db7897ba36039a65f10d8197a3947c6c64d9ed478b9ba21a1a443440243a06af +S055/S055R10.edf 6ec12775870d4e1a563ea51f7eca8dc75528bb283e10dfdf9817066afded8ecb +S055/S055R04.edf.event 00929d2d58d65a270182e61a69c5c896d7083a039df6ae5b3e35e318a811d1aa +S055/S055R14.edf e96ddb32c6f99774a78634ee17e2f8da83a84ac51eb2a3de02262bfde2549f30 +S055/S055R01.edf 38df022c18db1a69e74261fefd85f397b0b55b457384c4c7208ef00f7f6231c6 +S055/S055R06.edf.event 2553cdc46c8017dc83d793a7cce6a36ddf99a043e777612539831e67f1aebc23 +S055/S055R05.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S055/S055R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S055/S055R10.edf.event fb7055ba8adb05cadd3009cfa9adefacc5298e01d2b7156815f6424ce1d4f3f6 +S055/S055R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S055/S055R08.edf 808e0060198a3c0189860aa2b92b768f1764c4441eee72af3a01d6014dd81176 +S055/S055R04.edf 5626401f88b26ca66d9621f300a98c0fd80bba41438380dddba05111c8721d36 +S055/S055R05.edf 8afe70b47200e2163b1d57af89eb7eef854cefd1747a938ab21b90120bfb1f15 +S055/S055R09.edf 0cf04dae0b3ba9d249aeeac77e339a5f48f434eb667b901ca65f08b30dfd6c65 +S055/S055R03.edf.event 037c923028aca24716dbc8d307c618e19d4e94bb1e759e5ddb8a9d3b6b205a86 +S055/S055R08.edf.event 9a16113810c1d4f4c6d4bb0e9fa5ea774628a0b8f3e1764e93d71da831cb206e +S055/S055R11.edf.event 350042ca5c37cad9cabe8c469aead2bd074d996b13c69cafba8d24cd8fd825fb +S055/S055R07.edf fb8ba11d022fb1e8b8df561e6eaa84f745cb51c14872e6fbdc4361c4cc14aa01 +S055/S055R02.edf e872c1d3f02e88cd7c040953e4693c851899a0e3020e85aec1373db26cb1503b +S055/S055R14.edf.event 91a58fdc6ac826e1cc60d1f6d7a8f3c395a374bee6c3fedb7e90d360410f8f97 +S055/S055R13.edf.event 596ad948f8aa9dfdc6cd3125a3d02e115d1cd6a505023467bf5cd721125ee7fb +S037/S037R03.edf b08e4b04625ff65ddacad42cdfc403952e391e976ca1451c6c485f6477f290ab +S037/S037R01.edf 51182f52a9316783ef7d393bc1354efcac916524dfa826b3519b132bf516f7db +S037/S037R10.edf.event a3605df98453cf17b0c003ae26ea3b82e48e5080e12c7d92eaa104fb30571adb +S037/S037R13.edf 14b5137aa15c2ba7ee9d1b441f7207c82ea3456a6051eec3062e0037d31bb1a1 +S037/S037R06.edf.event 6c457793161b0b2dafe7d78bf4d750a570530145b98ce0bb007627422152b0f2 +S037/S037R13.edf.event ed3cf1bb44b1d2c21282c844044e08add0a9e1ba501a19ac91fd1593cb7a0f90 +S037/S037R08.edf.event fa3c7a187a7f8a7aebf8ef9cc11c0a206ae8b7b11512d6753899c156e88d5394 +S037/S037R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S037/S037R09.edf.event 10fe2ea57f3dd20adb3824338de616d484101e5955b8607ce38e43162b4cf53d +S037/S037R08.edf 7b21a0f65631f91e46812aac3a2c3d46b9da9e847b2d0289f618419827973e4d +S037/S037R07.edf 483eff50a97ecc856ad5e5cecf01a630d3e11dd64b454ef61a091d30accf133d +S037/S037R06.edf 7aa020ed64d5fbbacbb5d44fb6aaf1fef2bc6d97c5e55a2827f7620055652b34 +S037/S037R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S037/S037R05.edf.event f751705cb1ac76297342f42e431acdcfbf89711a08d63c261434f3584d3edf77 +S037/S037R04.edf.event 30da64dda979b788f6866a64bdbb871697aea2971b00b19bb12b45eb03c9e13b +S037/S037R12.edf ca053b2a09045e39cd0ecf2d7f50e4c067a515becfaaa7ade1f4dd4a06f91b6a +S037/S037R12.edf.event 20b9488db0142f8af79f3a1f6669aa603fafc46d3617ebab296207f1df9f4c2e +S037/S037R04.edf 058d115743bbd3a114a21aa839415312b7210f4aabba8260ec7b9b471430c17a +S037/S037R05.edf 7ee430473a18c6dc76f1635a6a5d5b439de3f873fc83c07016d0508ec991dde9 +S037/S037R03.edf.event 3ebf8a974399c5878d02e333c8cea9226287a30a0ba2f22d93f03dda6fc523e0 +S037/S037R14.edf a3c62714157519822f6820b551505a2329b01ae4359ba6f1099e99b513418dd5 +S037/S037R09.edf 7979da78867d6d4091a4ef919e82fe0e6c7b03727f7f3d67a3dc7718ae9ef502 +S037/S037R10.edf 89a3de5959e5855294d1b86d14b1648909d4c2657fba6a2da289d00364eafd03 +S037/S037R11.edf 8bb1b09e6bba588a17be875f46d33e86a9fcf37dbeebcca51910d636de6b5df8 +S037/S037R14.edf.event 5bbe935f4d605f380ecfc838403e37a58fa59da73118602632e00558c003c171 +S037/S037R11.edf.event 2b4e8c33ee0adfbf5ea0a5dc317f8f76f92419263cf693730153cd417ff4bcf5 +S037/S037R07.edf.event d0350c9c71f7bf60173004774cb340f2afa23c26f3c0b859574afb0514a062a8 +S037/S037R02.edf 9d4da39ddfef739b7a2b0b4fc6e9a748cce3c227c665f6a05fcc8a7da91feb9f +S058/S058R13.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S058/S058R12.edf ec035945aa497928bca1ae1387f50f06b68f346a992e9525afd3f91f74c081da +S058/S058R12.edf.event 96cef3a9a9e2cd7f438d53008e256b198c48c6e0d2f0b2d70e0d296180cd58b0 +S058/S058R04.edf 4e52357fdd5f23f7acff7de83d870b946d3225885ea77b60f2ef6c2f889221d0 +S058/S058R03.edf.event 49dac749bf850e47e5d13a8edef5121c75a7baa7f50917a5c17c1dda52d3013e +S058/S058R10.edf.event a788bd6a825d960534053c6559cb4f24cb7ee7a3164017d99c1dbc55f3b22215 +S058/S058R14.edf 09de80fabf5bfdbc914150d0bb1af7e53f59be055dd2823534fc9d47f9584764 +S058/S058R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S058/S058R09.edf.event 8fb228960cd397f6100e75cca49e1045159bf62e6184dd5267ea608be1d540b6 +S058/S058R06.edf 299a82dbf807ad746e2adf9ed15fc373f60e9b5770ea8a39c76e1a46e1e41360 +S058/S058R01.edf 6a3d55038d5198b06772b43243f2009436a23c41696e98b57d8d98acd2008df1 +S058/S058R05.edf.event 0c671e4e4ff7b21e4f75cb8796305c57d6ee3fc48e74337e26c3b9f5d49408ee +S058/S058R07.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S058/S058R11.edf.event 98da35a4768842863f8f05d63e959a36f12126a205e0370d8519299fc06e56ec +S058/S058R07.edf be2e3cf48c68ab002ca78bac6f055b3bfc8e6597e07033bb868b0393c7a8a0d7 +S058/S058R11.edf 4b079809ac45a8aaae9feb663e7a37501c02c3c67ce1e2399bafd49ff5b9f7d6 +S058/S058R13.edf 2eaac3fa4afade6e1772235fb0a7a8ffb5f1c1c1fa0bfb4a9ed97373a0a59d24 +S058/S058R03.edf d974afce72f8c832de47fb809125fa6f78bf6b32558cc468c62c554078d30d3f +S058/S058R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S058/S058R04.edf.event c6fd76cba5a912b92a280d7d54b9158146ba5522f81d9192baecc014a6c9149c +S058/S058R05.edf a389493fb0ae8a74d30a8e69db8d16045b53d0c8bbebde7902953db44cf012e9 +S058/S058R06.edf.event 2da7955eedd5ec220793d3b4b3224c26256f1427c213729179eecd79fd4321c2 +S058/S058R08.edf.event 5bca2030ba3ea66c594c376bfc5a701d3f25c2f85ea7d72b8abb2b2bbc5644ba +S058/S058R08.edf 156b9fd913d2277dcf4fd553f77b9a842992671d3f0b250e16c485b66c3aebf2 +S058/S058R14.edf.event 820bcb0b8aa75c06572fb3677af8b965e06ca92c0ff5f4eabd0d347c7b141680 +S058/S058R02.edf 1ccaa3ea370002190f9d69c0f799c2e061050388e12ff37acd8d69e09f34d5d8 +S058/S058R10.edf 889d23e02b38dae04ef10f756badfeb26f7675af6481a9a033e6b84913e5999e +S058/S058R09.edf f8e9caa438bcc4daaa18f12f0ce70ab74b200c6361dc5b5d7212d3e4a505f277 +S094/S094R04.edf.event fd77510c7ae392a42ed020605a40066a43ea3dcf80082d17cfd95141d71549ae +S094/S094R06.edf ff3769b67a9d0ef29467c4fe73ab47134557baae366284dff40c4e217cecb006 +S094/S094R13.edf de8815227485510ea171fef9903c358a23467caa82d8adfd8a61c65321c88a2a +S094/S094R05.edf b6a7f75293a4d26c06c33f34726ab89b28010fea614e1b4aa61c5de6e9906be4 +S094/S094R07.edf.event ffbdc0e528335775cb88a14baab7b7f1fff4fa1647a473bcd640feefcbfab5a3 +S094/S094R09.edf 0f51c986bb317505fe7d6c4d1df17ac5ee1cdc4cf5d5f45664c86f9afd825b78 +S094/S094R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S094/S094R13.edf.event 7c83be0506163b52a89548c7af6d4893472403c2bab3a455c7d8c92bf3550ad8 +S094/S094R08.edf.event 176fd10c94c5d482875b1ae7f0756ac60194f41bba099b0b40e5efd4e95e8df7 +S094/S094R07.edf 9aa5ec681860884f534e93a56e10cb276dca8b092435483abdde2b853df35c07 +S094/S094R04.edf 41b63eedefea1796e9289de2ead00b6e03bc6f8319cabe1cd293c981c4cfc406 +S094/S094R03.edf ceed432815df2089c58aff28a7a9bf37b3d5cf7ef61825a717dc0fd414f6a54a +S094/S094R03.edf.event 4e057b134c286502a0bd4e491daa0ebb3c4bfdc21b738b2ba2453c8ffc558218 +S094/S094R11.edf.event 1ccc28680a9b24af49105d307967ceb434f5229a29e70fbfde089b2d092db7c3 +S094/S094R10.edf 187f1af97f51fb5add42e3b511e70ce7ae19d30982f9e7c2901c9ee1aefd15c1 +S094/S094R02.edf 5f08c5aaf8e21015ae6fce4a8f8e6e6e658df662e393ec77fc75689bb0a94c63 +S094/S094R01.edf b11fbdf3f5ecd11daaad702b58bdabaf1f15c82f045b7ed53dfd88855271a5bc +S094/S094R14.edf 4111e5c2b5d385cc8f0e5c7ba9757d8deac4cd9c969e44c4008593e78a5f7c70 +S094/S094R05.edf.event e1b29e90b20be6fe74d2526e8f695d3e9dfe8bdd7f08c801c24f523ac253462a +S094/S094R11.edf d07dc1f9703a53caf60c3eff7de342cd54264a3077e6c7975efa20adff7c52fd +S094/S094R14.edf.event 017d78426c2e9f2b1807cc35ed07405ab8ff157014df0701b8524f965539b319 +S094/S094R10.edf.event 2d33f0e6820d6c85c1b096ad02de0f117c60e1202edb58aad42f3ec835f3162b +S094/S094R12.edf 2f70c608f9e291559ca1e135824ea1c8aa44eccdae35bd18705d5574abc68523 +S094/S094R08.edf a265132a093b20b1366ebdd17e77628a0b4feb3b02cf7c037fb092a03a1f47dd +S094/S094R12.edf.event d45b05b6c6897898bdd688c12fad776011d39250bd1bd8de7e7745d939b60e3e +S094/S094R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S094/S094R06.edf.event e2b79b09413cc4065a25ea2956bc7726f4bdedc8730e89a7b86a8617c79876c8 +S094/S094R09.edf.event d229cfdfcd562a5cfb40ea306452dab12d3ca82a70a465289b4c69c299fb0258 +S053/S053R03.edf.event fd77510c7ae392a42ed020605a40066a43ea3dcf80082d17cfd95141d71549ae +S053/S053R05.edf aa23445a5c64286f876853e2d68046fd2e21e01f0d3c6f12767d7078edc63bdb +S053/S053R10.edf.event 83a13523a9ab3f80c3d6e974582a163a305c3619a795d4e26af679735769b3ed +S053/S053R07.edf.event ee9a506277766c8d64377864252176694f8cbdaac82a329d5cce5e9c11e2e529 +S053/S053R14.edf 6f555682d43d8e95f69dc2805bd305d253147b28b6641ee038956ee667a96233 +S053/S053R06.edf 9151463be662a8961ad6e26b3ac320e1f3fa22792e3a5a48685f1fdea17518e7 +S053/S053R09.edf.event 3593f38db6b9b0b72284e6ea58a9169bb2459a37f75643fd634363b665a636d9 +S053/S053R08.edf.event 8553d29b6f3410b4ee7ebd3f65050ee16a492ed893d0bc9a14cf35cfa904d586 +S053/S053R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S053/S053R12.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S053/S053R02.edf e9a60f7a8aea15a1093cfbbd58e2f4cedcbd4ca1fab1fdbaf272386cf87b4bbd +S053/S053R04.edf 90703ee0a3f4880dfb4bac085c883ce4b0349e7de9d9d4485fae4fb0f2a8d78a +S053/S053R09.edf 6d0b9439be889c04c476d082117cc895ec0a2b8b10d347328e2561e56bd2281e +S053/S053R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S053/S053R04.edf.event 7c83be0506163b52a89548c7af6d4893472403c2bab3a455c7d8c92bf3550ad8 +S053/S053R13.edf a835beec4d19661978710a3680265dc18968f1bd82c006e80a69fd2a60cd7d4c +S053/S053R11.edf c666e7a5af9453a6d7eb9526304229111cb1dbd9ffbf9f4e8514a60a78bbbe95 +S053/S053R08.edf 21c33cc1ab60d7d392b99f1a00df4d4d8024db6f46d0e3f55f53e6321eaeefb0 +S053/S053R01.edf 72184f7aad1257f85cc97ea9afbe7c6e80b4cc3c202403a1323a648955d2d327 +S053/S053R11.edf.event c6fd76cba5a912b92a280d7d54b9158146ba5522f81d9192baecc014a6c9149c +S053/S053R12.edf d151fcac368e31dbe81495377f734a1c2ed898587696503253e197324c884ed0 +S053/S053R03.edf 8e74f83632eea295b71f82004abb466635b0cdb33d7ad393c130866995924544 +S053/S053R14.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S053/S053R13.edf.event 4c374f58a91c1ff71894cacb50bfb798d20cbef67ee65a1ce8a2d5826349e390 +S053/S053R06.edf.event 0724cecedb70811bcaf06ec5a41151edaf5209f968315c4da2e3a25bbf0fb9ec +S053/S053R10.edf af5fc64c8dddefe50ae7d3f12b2f31f3ef1f4b3e72dd5972399205e5a37035bc +S053/S053R05.edf.event 2d33f0e6820d6c85c1b096ad02de0f117c60e1202edb58aad42f3ec835f3162b +S053/S053R07.edf a3f42b2a8cb0cf59ccf421f5ca2d2a11992085eefa971cd28bc1ab0efb6f734b +64_channel_sharbrough.png 9e2ba68c31f3499b1f558fe02cc20aa44f69179416e5de13a025a3012f91d73e +S077/S077R05.edf 5c5f74f4d83f9fb5a08a7df7f34eb1c488d7e3d5c2d4d0ccf2e799b443f7db57 +S077/S077R09.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S077/S077R02.edf 164a6d74ebbd56ec2f8a4cc182ff49b6e41b01796d76ee0d3f245e03a6d4675f +S077/S077R07.edf 69788b22d85f3fe8f1e6d18ed916c035ceb5bb042830c8d2d62515e4a5ed75b2 +S077/S077R14.edf.event 23b6fc268df2ca2b48b0a505a6eed055a62dad293041c180f6f03136d0f4b36e +S077/S077R09.edf fefae86e28f02f36fd24642b62657c00bdb8a82a4088dee6c0a5e10acaf0634c +S077/S077R03.edf.event 0487cd7ee2fef7813667623ecbe113ebbdd5400e7dff28fd7f0f7b1e00d585a6 +S077/S077R03.edf 07eca0c3b99b709c128c52ff9b8bae2b0dca9b84bdc30845cae16e694a935aee +S077/S077R07.edf.event 9e1cfe676de301ff52ef7ef70ad2cdd8b8c9562c01e98d8eae191a5c8c28aa46 +S077/S077R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S077/S077R04.edf cdccce472114bda440a3eb03fa0a1bf32d9dec5753a105736672389bff46d9d9 +S077/S077R06.edf 002a49d4320ef6dbc9daa93471a6f2e7e4036b49fe87e7cd29927f1803333c35 +S077/S077R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S077/S077R14.edf d02fd15e9745107464dc057d0936bbab18a55e1fec8e652c85b9f17f28171a71 +S077/S077R11.edf e023615579d13a36aff9332b376c5e98e795b77d26827e331b3ac0151742734a +S077/S077R06.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S077/S077R04.edf.event e8abea1fb06a5ad73e91ea5c97aa84247e1c165ee2bc724e9b9c7ae6f6c017cc +S077/S077R11.edf.event b0c7884218a114ab4fc2b8cb09b2c8f1bd0ddbaf69aac65191618c0a230f65d3 +S077/S077R13.edf 74955924f5ad34205cb1a0cde6904fa9bfe8d80ae1e8ed8e389089eb28fe3471 +S077/S077R05.edf.event a6c871f2eb69757aec91f71c7de20007524b3b628f251799a3dd57bbb181fd84 +S077/S077R13.edf.event 0028edf9b5fcc76311706f809ca44f884f227acaa02c56031cf87a7937c1d9a5 +S077/S077R10.edf.event 69bc42ecdda8587b1f3dcaeb49f434eb51439670272d896577812c1a8c0b14aa +S077/S077R08.edf c0bea153dc620d85e59fd9115459a07da82c137b4e836c1752d1a350cbdedab7 +S077/S077R08.edf.event d9d89addd8fca4d057ce27c16b349184b9dc3b13193561b7c99ffa9414e86138 +S077/S077R12.edf 09e9310ea5e0a9a17a0f0d48e9d2cd3b36bca4c4901c4ef246b2b789d08e1927 +S077/S077R01.edf 1eed03d32fc8cc2cb57cf28fec4b380d56ce3bef548ad82b045d43d7f0ac82b1 +S077/S077R12.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S077/S077R10.edf 794f13c06350ac4f14523c20edbe60185d3a464e72ae50fecd9074b9d5bc5352 +S069/S069R01.edf 8da5f1695c8c03bcc530dc1478ff74749380d579a5ca84d6232e1df1815071f4 +S069/S069R09.edf 90f43268da66b9f89b4092fadc9e40e6f68fa86f0f54dfb55e1bb4f00d17af9d +S069/S069R12.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S069/S069R05.edf.event 9aaec3ff6fcd8c6d47bbf0c154dac87cdccac4bfb099e5995ee69afc7b34257b +S069/S069R10.edf 6d404692f34d3da6dfadd926d862bba22072377d1b3c38c18928b8f0d0203073 +S069/S069R10.edf.event 2eb503fa48ef7e13b77098d860c716de2d0eb55a3a0580117eb34aedd472a728 +S069/S069R03.edf 1a3ca3653caf83303ac1e7e9cbc965a8a135d04f383dca4170c54264d5368253 +S069/S069R08.edf.event 8a6bbfcfdb8f2653e0bd440f6aa7459e00906a06d68c4df1b718df856267369a +S069/S069R08.edf 72a1bc8c4b7790aa730a92029fb18cc9a9894d3934411d3b02f5bf6806f4c3af +S069/S069R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S069/S069R14.edf 4fcba35e3c96f6083afe2124813a6847893c811fddb7848a640ebbebdc1c60fa +S069/S069R11.edf 83adb3635af06c072ead0677b0f97ea35af309cfc9499e386c6aa5b697e0e016 +S069/S069R07.edf.event d45b05b6c6897898bdd688c12fad776011d39250bd1bd8de7e7745d939b60e3e +S069/S069R09.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S069/S069R02.edf 10456a512b499a18be5cd9d8e6835ad45ab8666db79e25470b7785b062e43e85 +S069/S069R05.edf 4af85e1eb3da69940edf24de72c7676f30a29b8d52b6cfdfabf6b0cbda20d180 +S069/S069R06.edf.event 1a93c91ef79032df8664905730df14e57174a6aab494378ea976c6845e662faa +S069/S069R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S069/S069R12.edf c47fb991214b23a3e30fcdc62d8bca21f4df7765417bba5ebc89255f5043fb7c +S069/S069R11.edf.event cb2f9b00882e2e0ece44139387a5ab787bf248faadba57d4e5f0e759bd015f4d +S069/S069R03.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S069/S069R14.edf.event 1ccc28680a9b24af49105d307967ceb434f5229a29e70fbfde089b2d092db7c3 +S069/S069R06.edf 670e54ba92e9781c033d2d360f50ebb156723ffc2ab77d1e88f0d68a247cd96c +S069/S069R07.edf 3d465afa0d689bf913f8a5ffa486a3dd9808310d20d7750920f760a010ef158d +S069/S069R04.edf 31d86ad9d97e919e9705503bcd47f391a75e1b89f327bfe4656eb76b41fc628b +S069/S069R13.edf 914947f72ec016f128ff2c071624f1be1311500a8af5d0b673948ef65fbaeb53 +S069/S069R04.edf.event 386c51194ef175e56c4d94b5e327133f39929095caa7c3ac86e89628f6fc0610 +S069/S069R13.edf.event a50926e15d8027e4167d22e5479fe3751780003aa245b90b2eecb0b4f474fde2 +S065/S065R08.edf ec531cbf9279eda222e9b873129c34b8bf33f0fcfcdeb790e1c4b5a1146dfd85 +S065/S065R14.edf.event 86187c2aee7d3ec840b09b5c7d4105dede60c6f6536fcc3d5347079d5ce58666 +S065/S065R12.edf 578268f2055e302e5bc3248f8dbfbbd63c8c60b300f4082b4213add5c03e7a2f +S065/S065R07.edf 7bbc404c049acc05e8d5e380bf62594686c3a265e222f516405254721c6d39da +S065/S065R10.edf.event a8e304ecabaf8d9f1d4997f8bb05c9613cb7743d7dc73f6af7fb82cea10f7792 +S065/S065R11.edf.event aa719b9aea445a02c8c5c6a6de32bcb12238842c8d4ae16493f623bf0d226c1d +S065/S065R01.edf 2f6d0594f0207077831c620c299d84ad49cfa36c12af43a32bdbc129f0d9d703 +S065/S065R05.edf.event a26099d22ad0d99c6db2d456dbd53db1b49b139b9020075cd2d55944c659394f +S065/S065R11.edf 48f2b63b9f9d1274c197bc0fc0a7bf67dc53795d06f01319ace8937b2911da71 +S065/S065R13.edf.event 6a0785ec7d7cd8c80087db9605dd320790daf7db8cdc6c9fbe68fad0470ed604 +S065/S065R14.edf 13f91f949f5ec9e1937a7e3722da005338ea6e00abe879eef6144185bea1858c +S065/S065R06.edf cccc62bb8932916489019988e831812a9e38e48d0045a52d395358790203c3d1 +S065/S065R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S065/S065R03.edf.event ee1750c58b0ffdf35fc0b7841091977647634e837de14a0b9f891ff04dd2f5d9 +S065/S065R13.edf 1ecc6c90f6e2d2199cea20044d6d90f0e7e457b5effd3c186a939419e115933f +S065/S065R04.edf 3fc9b1712f959c02a8b6588a77e879d6d9a89d0ec4e746fafb40c2a469d66fb6 +S065/S065R02.edf 8b33a98df8b418688cc3e5fdcc931c78fc616f6a2f82bf3c166455715b3a9ed1 +S065/S065R07.edf.event b1551573cd91101be666afa2abcfdb421f144c3fa966b0a62b6251bba942a18e +S065/S065R03.edf 484dc9e2aeabd8e8ed55b14364b4848e079070bca661bb42f51a7cab9ca6013f +S065/S065R12.edf.event 0513fd04977ef5a66b77e72c59699e4e54ff57c226456d0796b1c58c38fb4d59 +S065/S065R10.edf 1fab43de441d8fb8266264f03b8a72984d41ace8c1ce1bcc04fc084ae550739b +S065/S065R08.edf.event 194c7017f9fb0a9e8e5ed2d2c50b59fe067aa973a5e75567ca562949327e2edb +S065/S065R06.edf.event 3a98d78e3ff8d947a073981e739716fc7bd37c89605bf37bf0addaa16779530a +S065/S065R09.edf.event 1442094c7e4c8cf80c2b917010d3a5835f895b48a64150371ef6f1f36d5eb9dc +S065/S065R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S065/S065R05.edf 13a29cf406fca98795925cd43b56e202ac098c075a2599fd76294baef8139b96 +S065/S065R04.edf.event f8a465b9ebddc2704252299afc352d87e33c523fa8f80ca82a96fe0b268727cd +S065/S065R09.edf 63e28becfd834edd2d827e2e674e2451af2b5195159bd11bead16d41fe44c817 +S032/S032R05.edf ae793cc1cc052c1680defcb9dde25ec5a7551da1848b9d493f31704e6ef766f8 +S032/S032R12.edf 86ec0265944349a8e801a96924a33cbf8834197e312cca86a576bc4be4057844 +S032/S032R09.edf d415986f87c0465cddc8097b69851f6df6d031366fe4694cf521f1d9c29626c7 +S032/S032R14.edf.event 3746a126df3fdfbe9c0038a3eaaf45fe9135091875ffc4e91bf194bec65bd6f8 +S032/S032R02.edf 2cf498af92415c18090d52e96ca5977a63ba2041368dd87df761f037f41ce526 +S032/S032R08.edf.event a9832f55fb6afe673f34035a0fd1c83d70ffe8552c3a3b9927a0296a70c67809 +S032/S032R10.edf ad4e5d446db449d64f10910c6aeac448fc258859257cc5e7f3948478985518e7 +S032/S032R13.edf 242f6cefc2109136019430327cb2639b024c31a7fec71fb69beaf0e30ba387b0 +S032/S032R06.edf.event 86f35381c7eb24cc6094567ca1b734e53a81bd8184a6364c79b7f00b9a5ece02 +S032/S032R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S032/S032R11.edf 6e8c0532a32664a47a4147dee233d71f28bf0a033cd3343addc6b23df805ded6 +S032/S032R11.edf.event 7e31a2cfbce9a99bc07e9ddbae2cc7459eacaa2ee5661470ec567df0527acb79 +S032/S032R03.edf 5cf58f6d8f7b5771cfaa660754a73759e71d7cb5eb4d207bfaaafa815c3e4729 +S032/S032R14.edf 03dc57e9d57878a08ec8201f903b4c3d7539f3cdce3793088139e6974327f88d +S032/S032R10.edf.event 377caeb601fd91dd56677c88ac2c9264debff52b4cc40eb517657abcc42e6d9a +S032/S032R04.edf ae89d2c1dd10b26c8d382ddda0f6681723ab994168d0e94b4051c43d83bce694 +S032/S032R13.edf.event edb77eab3780127cb1134d7d4b9339cd17e3006b601c1091241521e1671ff407 +S032/S032R01.edf ba99cf38dc94b4408469708227736b388eab4cee0a0fb27ecba353ac4348757e +S032/S032R07.edf f32f55586744578a948a01b2c0ae7528f004c6c8085cbac88b7830ae9e5691b1 +S032/S032R03.edf.event 194c7017f9fb0a9e8e5ed2d2c50b59fe067aa973a5e75567ca562949327e2edb +S032/S032R07.edf.event 22a6b841d94ebe84eeaabd93fb3e0f00da65ddf3bc8de6d5a79394e3a1394567 +S032/S032R08.edf d707e791b0439e6b0c2401af9bcb90085272a857fa138f2b0c7d9bb20cf638a6 +S032/S032R04.edf.event 6f21f5809578073ed2c858eefa6348294beb3ca2936579fcd1c562953dfd18ba +S032/S032R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S032/S032R12.edf.event 7eb3d99d1a46e50d5ec14297be174789c9a514ecb48636ff9ba19d90bf2ed9c3 +S032/S032R06.edf 48b8480f6b38c9130c433d907db9b695198d432da837bddc720eb93099bc1ba5 +S032/S032R09.edf.event 1bc8e1dc23f970d7522827a583fce66cb96923864ede7ce6fb77f885dd7ce6b3 +S032/S032R05.edf.event aa88e9dc85f46564702f7964b37b2058d00e5e0b93d498e32bee49aaf8e7c745 +S084/S084R06.edf.event 79987bce81c997afe211bb1afd5e197582440355efe08dfb5466d616a8f03f7f +S084/S084R10.edf.event 69bc42ecdda8587b1f3dcaeb49f434eb51439670272d896577812c1a8c0b14aa +S084/S084R05.edf.event 985f9701733fcf2689c3a35bdb9e4b0058b6137ded5bc0f1a2a3b84431edb435 +S084/S084R01.edf b8fe107728ed7e3057cc35faf2e66f91602dab9260b0ab2b0a64bfe6a3952434 +S084/S084R13.edf 86889443cdf0605b603b1b6b9a0d61f670e16212730e986b8a0f8e90a85d6a78 +S084/S084R08.edf 4228f23c5c8579ed84d2de2a35d33b90ade7202ff18df9e404aad44395acea75 +S084/S084R03.edf 83318bf6b9b298cd261109537d31ce9a945d29c0064152fa19f04b49a004a0cc +S084/S084R03.edf.event 5bca2030ba3ea66c594c376bfc5a701d3f25c2f85ea7d72b8abb2b2bbc5644ba +S084/S084R08.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S084/S084R07.edf.event efd977506cc195b985e542f3333bc334c4793652f9b52580c96ef8a5948f4db9 +S084/S084R12.edf.event 8553d29b6f3410b4ee7ebd3f65050ee16a492ed893d0bc9a14cf35cfa904d586 +S084/S084R04.edf 0b79d12851eb2fc57b8fb94c29fc78d228c2125233a0a4ab36581d83e1136456 +S084/S084R12.edf 7e726571c4a9b7513e9e0afce0546a9fb221f13681e70e2d29a02725e608451b +S084/S084R13.edf.event a788bd6a825d960534053c6559cb4f24cb7ee7a3164017d99c1dbc55f3b22215 +S084/S084R07.edf e9674d16f540f1568073ad21fe40ebe186414813069548bc45c8fe6f0ff44a66 +S084/S084R04.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S084/S084R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S084/S084R09.edf f01a0af0ad426c6ea59e93584b352091b2448a3bb7e503755fca4b5d74f9c242 +S084/S084R09.edf.event 98da35a4768842863f8f05d63e959a36f12126a205e0370d8519299fc06e56ec +S084/S084R11.edf 2491e3b8b033ae43659c025a9ef62beceeecf202ecedf579f1dfe7e0b06fe8ec +S084/S084R11.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S084/S084R05.edf 8a1c8373b6276d1e755ef71144d2b56e30b4b687d08cc56bdf12d880a099938c +S084/S084R06.edf 361908a101cab206d5cf84f47b813dd6ed436fa773b2a17ea97a3b2206b97cbe +S084/S084R10.edf 6c2096104cb26769e84b982f625ebdccc2de28b5b2f45c3a7c4fa398eb564f3d +S084/S084R02.edf eba216592aa8b26c390fe02a33773d9d007cd7b9cd886b18fc9250b649d7e4fa +S084/S084R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S084/S084R14.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S084/S084R14.edf d24540d0dae99f7453bbcf4acb93d299747ad65844ee4f4ef7d846e7ed912309 +ANNOTATORS f5bff1d1558405bb6de5d02821a0f00b4be3671ed3454b19d1d2ee75dc689264 +S097/S097R14.edf 76520698cbd9ae79cfa6ababc8627fc60469dfd0f307eb4acf3ca96b5e67e099 +S097/S097R01.edf 7bf7346caa9bd3effc06b27a1f0c4bd94e21a861047fefbf19e7b60ae76ca02f +S097/S097R13.edf fcbde4d0bbb01757064d48afbacff750cb9fc416c741ceb88a3dba5624b8f23a +S097/S097R02.edf 1b138117c090c2041b27984970a5f0a568b5a95b2235c29cd4189c28365b3d21 +S097/S097R12.edf afab0765b4ee4d03198c1e0a18a0f285c84d6734dbd1cf305886cf0d7464f433 +S097/S097R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S097/S097R09.edf.event 0d4308120129d0f92d2b8779374ae232f98d1357e6c6427983fed655d9611af0 +S097/S097R10.edf 6acfd4c3295829d636f686910d28b41a73fa1b6a26ae4d1a7fe30578f1049e7a +S097/S097R11.edf 6d491c8035377279db2aecf740cb19fccc0b00d92d642595775146adbcbbade0 +S097/S097R04.edf.event 7925ab0749163820c00af5a617adba24f1ce1c711886e12def08eb72594e10cc +S097/S097R08.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S097/S097R12.edf.event 9a9f0b49f8c6435ad71289db5003bf298965348ca79b0498dc572d390808b84e +S097/S097R08.edf d36269db5e812dfe12879bdf3ecd1d1f7dc0319d59f897dae352e5e19532cb98 +S097/S097R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S097/S097R13.edf.event 1a93c91ef79032df8664905730df14e57174a6aab494378ea976c6845e662faa +S097/S097R06.edf f75339f76c9fdc59aff27e03ed839898bb9b3dea36ded727ff5287b85c274b79 +S097/S097R05.edf.event 20d3d791a72acbcdd619cb968567a104858a5889afc628a6fae4776c90c0780d +S097/S097R05.edf 42d0339fe1c0632861d54e04e1e4cdbe3dc2da0d00295c8a275b8978b6d176c2 +S097/S097R09.edf 3534c74016c2cee1c9ad49e5a08a31e6634b604a8fb3018b183596c863ea1531 +S097/S097R07.edf 1e895aef79f71d0a04a68f4578ad204733a3631ab9c2852526a4f604b8d9392b +S097/S097R03.edf.event 00eb5e22d01c3eaeb1626660a99e0c6fbf2f4423eed750224cc549752b7b6761 +S097/S097R03.edf c3ff3efe887db00e8fa2900dfa09278abd23dfc090262c231815005e81f0736e +S097/S097R14.edf.event bd6dfaccdd7deb73743f500e0a1fa9d0ff333e94d59c6d153e2e9d2dc8a4795f +S097/S097R04.edf b179e92e1d5ff926d6aa88bf3c748201c8368312fb31b1d2bfb8bdd033c67cce +S097/S097R10.edf.event 0487cd7ee2fef7813667623ecbe113ebbdd5400e7dff28fd7f0f7b1e00d585a6 +S097/S097R07.edf.event 25eca6452bd9e487db46059b497ec8df5b6c353e97a2ac76ba8344fcc0797c79 +S097/S097R06.edf.event e4d125d65ed410f2e37eb6c7f7075c585662cb4e5931bd8436cbd1b59c474f77 +S097/S097R11.edf.event a71674e1fd86a57270a24706f4e05755887534f04ecc35f98e56f000312402a9 +S099/S099R08.edf.event 78e5b5619faf1f99b53251a9e743644a731ec9cc5b6e60533a3695f33fca35fb +S099/S099R13.edf 399bbb5280b215c3451d0e10d317fbcfdac333093adfced081aa2db59c523ad0 +S099/S099R11.edf.event bed28cb624951271916f88bc556ff204ccc63699ebc4523ed8043baa9724625a +S099/S099R11.edf c5e935a60f572f30fd2aa2e9639b02f313cb498af17f9105f644d51636fda4f7 +S099/S099R07.edf.event 3670ed6734a1e3a4a23fe378ef332968f910405646ccd883a62369d2add4b888 +S099/S099R14.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S099/S099R07.edf e9c2be6c0a2d80d78feaef0d251a18f6b753c7987297ec84e66dee0910397afa +S099/S099R12.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S099/S099R09.edf 9041b94fb5dd2cdf300413c690263a223d506ce38347855ee02ef7d45188aafd +S099/S099R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S099/S099R12.edf 79a4364092c5678b43071c5907afe061974346751bd90a2d8eabdc7ef8808e32 +S099/S099R10.edf d3b233a2403e3d00777830137377c4fd3bb4c9130a137439c6ecdd3e39066fef +S099/S099R10.edf.event 00eb5e22d01c3eaeb1626660a99e0c6fbf2f4423eed750224cc549752b7b6761 +S099/S099R03.edf 9166582d968b024a2ca92b99926c4658d6911bb2d3a2256514be716fe57a165f +S099/S099R05.edf eeba6d292190922beee9af52ee0c83530e9e4a406e2fe2e255094ed0ec681417 +S099/S099R04.edf c463ad89dc08d0d5e6fd9f766b0e1c743b802290d93a3c496a9faf3b8ed6032d +S099/S099R06.edf.event 6546377ca20368b45bfd1f06c72e153809df57db54b8731ecceb6c8ec813a774 +S099/S099R06.edf 02a19ca40ac26693a6763d2ebcdfae9e588dbc32cf6675cc236f0c9c17809899 +S099/S099R01.edf 9bdac81d159f0f6c153e4a48a72bcb1bc53ec0ae77a35df1263524016df3a937 +S099/S099R03.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S099/S099R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S099/S099R14.edf 1a56b53c56685a91872ca0e57ed2cc13f88fd70b0bdfa369f1cc2ae19dd19f34 +S099/S099R05.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S099/S099R08.edf 6c5630263f0e2a23e2ec4c57c00831ced0469b3fc4c8dc79cee7216bb85a4810 +S099/S099R09.edf.event 98da35a4768842863f8f05d63e959a36f12126a205e0370d8519299fc06e56ec +S099/S099R13.edf.event 537ad705e53a339bd1d130f3331df882a0416fb7e95c4f565d283142dcd120f4 +S099/S099R02.edf ee86fd6058f1996d9535938c1cd6eea923ce7dd06774bad24ec342c0a1c1a2af +S099/S099R04.edf.event e1c00064d3aa1fb0636aaf0dedd65aa66d02a8afcf3dd70b9a5fa4df4d4ebd47 +S089/S089R13.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R14.edf 8693681084d9a31a2373e1206a31a5255a098e377986dae311f40b1945aa14f1 +S089/S089R04.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R10.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R01.edf fa4c6f60949968c607bf5105f9c80ea1a786b5bf6b15737077a9f256af66bfa3 +S089/S089R11.edf.event d8477aec0c4899d8efdae84a2031ad72cfb0d63c48f6c0d509cf23003d2b29ad +S089/S089R02.edf 44c5016ee50908df260c8adb6d195c44824787637b2537ee823efcabbc8cfac6 +S089/S089R04.edf 59b477c2e08b85fd3efe774945f6becc5073d654bf0f5b911d45dcf30b92c8a9 +S089/S089R03.edf.event aa72ec0fb1b9bb4224bd65e15fc8975e31a5647924ae870539169014ee07329e +S089/S089R06.edf 7762f48f45a3b446d508942e05bdc9081c897abbe0b6a57651a494da1dba8c27 +S089/S089R11.edf 7f41dc795cb95835ad59fbb44fc8fd14a191dd9634ccb82b73cf2e9a04eaabd5 +S089/S089R12.edf 2fcf2f10853418d0ca8184e008b29ce2d49a4762c0f5e82aa71298925ab2abf4 +S089/S089R07.edf 93a6e3dba6343f46211d0f57dd81776da03488be2356006dda7023c1c22e1dc3 +S089/S089R14.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R12.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R05.edf a5726d13ea72450e4a367588e79c740daf7f5288a2d950fd4cf4904e10ea6ff1 +S089/S089R05.edf.event d8477aec0c4899d8efdae84a2031ad72cfb0d63c48f6c0d509cf23003d2b29ad +S089/S089R09.edf.event d8477aec0c4899d8efdae84a2031ad72cfb0d63c48f6c0d509cf23003d2b29ad +S089/S089R07.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R06.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R10.edf 4a3dbca5c67c12590dc5b9c1faf95587030bc3029a810ecbc7193d5e07f72a87 +S089/S089R03.edf 0de49011384bfb26129a96a0ad2d4edf64f22eacfb523da12afb9889c324c0af +S089/S089R01.edf.event cc4b6b7e93062a4a93859d69a21eac72c2dd51a7a596f8c3941d89d78073eb06 +S089/S089R13.edf 5f3a32cdde3b650569d7b8925bcbb887ac06f354ec987ab6db15aff9d2948c1e +S089/S089R09.edf c5c002521ae41431dedeb6f07fb283f55fd030024f56956f3d371d542be3a0a4 +S089/S089R08.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R02.edf.event cc4b6b7e93062a4a93859d69a21eac72c2dd51a7a596f8c3941d89d78073eb06 +S089/S089R08.edf ee2d004c55381ccbc5f75062e714da9705c7e8cac1d4193952d646399f3ef7e6 +S100/S100R05.edf f9826ef24c2091bbe26928a23b8cd6e9ea764a55a7489c0a06dfa6d8f0b09368 +S100/S100R08.edf 8371eec06d0b1ff62d10e016124e3bc2db411ab6764fea572fb2d1f25a6afd97 +S100/S100R06.edf.event c78171063f0b6cb3d3f0d6ae265f22d687d1a2136c5a8dc8b39c9f87ecd253c9 +S100/S100R13.edf 0294751068a2a6477dbfdb86bda76199069cf506d9bbfba907945accfab9e1d6 +S100/S100R07.edf b3bb899c1bd819e50b9c7ad5837fd57b70ca7c423e06e3f9bafa885eebc0fa97 +S100/S100R10.edf.event 45d618537012fa72a10d1b7fb27c2662f90c1b7975f1c48a918fb5bafda756de +S100/S100R04.edf 0b8af6ed149b8b8d05a5d25da8896c6b555a199002acc0705dc1c5a01e489ea4 +S100/S100R01.edf f3741fda19dfb05973fb367c6f9663df5a9a1c378324c223e677e59cb4450fa8 +S100/S100R14.edf.event 2679ccb7e833183a26577624b2f4dd83eb2cf8afe48763eb5fda873c412cb7ad +S100/S100R05.edf.event 2679ccb7e833183a26577624b2f4dd83eb2cf8afe48763eb5fda873c412cb7ad +S100/S100R12.edf.event 45d618537012fa72a10d1b7fb27c2662f90c1b7975f1c48a918fb5bafda756de +S100/S100R04.edf.event 8c302e42b971167e26925e79e5ace7653e62a27d70b8c0a53aada8f5f3770a99 +S100/S100R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S100/S100R11.edf.event 2679ccb7e833183a26577624b2f4dd83eb2cf8afe48763eb5fda873c412cb7ad +S100/S100R06.edf e57c7986d7814bd142c3cbd525d9602bf00023d397a5e9a89e4646cfae0a8163 +S100/S100R03.edf 1498da5c347ddbade9c693f68063d3fbe244dddb6fc73bcbbb16331d05a0e9a9 +S100/S100R03.edf.event 8c302e42b971167e26925e79e5ace7653e62a27d70b8c0a53aada8f5f3770a99 +S100/S100R09.edf.event c78171063f0b6cb3d3f0d6ae265f22d687d1a2136c5a8dc8b39c9f87ecd253c9 +S100/S100R09.edf 3e1651550a82aa15acdf0f9639712da2153ba1d00cea5a8c11608a43ee88287e +S100/S100R02.edf 506d0a77e01e0326076136cf41c4dfc5ee6206376f20da6efb858affa1c044e5 +S100/S100R13.edf.event c78171063f0b6cb3d3f0d6ae265f22d687d1a2136c5a8dc8b39c9f87ecd253c9 +S100/S100R07.edf.event c78171063f0b6cb3d3f0d6ae265f22d687d1a2136c5a8dc8b39c9f87ecd253c9 +S100/S100R11.edf 9fca7506be3fde10877ebc06d1f64a10aeebf7a99c1058762da81548d562fd18 +S100/S100R12.edf 6dbc4350c7e26c46b96ece5cd7ae2cdf1a9303d237d7089216e5facbc1d25538 +S100/S100R14.edf e80cc42dc4b5dca4cd37bff1a44d971452d265d825f893cb52a6b71d61922882 +S100/S100R08.edf.event 45d618537012fa72a10d1b7fb27c2662f90c1b7975f1c48a918fb5bafda756de +S100/S100R10.edf e68dbed45e873cf275dc2174c7b56ba2fb2ac2ecf162bc7ba7cff5ed0d789cc6 +S100/S100R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S025/S025R12.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S025/S025R06.edf.event 6ca290c8f2ab5d2a3d0c42a123bd24341d790899e901dc5991dd66614dfb1842 +S025/S025R09.edf 3049771d277191a89db3650c36972cd0c41cb947f14674501dab593d68edb66f +S025/S025R14.edf 470766d1886c2c43076606110cc79b995808717abd7c109419ac8cb9924ce6dd +S025/S025R03.edf 19a6350b61d317385b80ffd2b7d7f4a23fa889a9b8a3c978db630f004a88fa3f +S025/S025R04.edf 25ee02ebfe5c2b2a652a295835d7a4da653c5e6b390084b7c78a4daa64336381 +S025/S025R04.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S025/S025R11.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S025/S025R06.edf f11f802c5ed1087c08cab562ae165fa0eec1c13fce7a203427e210c0940dabab +S025/S025R13.edf 1048a353f1132ba976dc920cff2048dc6d47a35bf1f70eb8ae64e8085fa0eac2 +S025/S025R07.edf feaf0611e4671d07daff067c688af3c74e81b4b5e903529b786bc2240f8079ab +S025/S025R01.edf d630db7ef42ec83068197a587f93bd71e218e3d592e190e79111b414633d7fed +S025/S025R09.edf.event b0dc1155da666ef5240efa8026cec5f4f69b5fb6f1d21226a512de3f6b62002d +S025/S025R13.edf.event 33a34db8c83e4bcf4288f2413aa75b7d7c03a7bccfa172b157568ba29ee42d1a +S025/S025R02.edf a7a4ad734563e92b931f990bcfad62a6d03daaa273c7a4b9f039c3030c1536d7 +S025/S025R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S025/S025R05.edf 1bf20c6a6e9f9d926b581eaf8a771f61efb626930c28a19455c90ae0dea6935c +S025/S025R10.edf 93c2e8893122eae1ea5c83f02f08c54847b6dde32814ef1c7b0d390d3a2d3f0c +S025/S025R12.edf 94a0c6cb21c67381221fdf364c8eadd17ce3b70021c579d0721c22d04c6d2523 +S025/S025R07.edf.event 8fb228960cd397f6100e75cca49e1045159bf62e6184dd5267ea608be1d540b6 +S025/S025R10.edf.event 1b02f94c97b31c7b11129bd2f0db6eb71ffb209c8dc379625185f7a3726d3aff +S025/S025R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S025/S025R08.edf eca94875e6b247b85ee541ccca3118d259132e96212fcd059ccc943f48c2dd56 +S025/S025R11.edf e2b2694aa6f7a47dd5a774c7463eb232af25987580d11e729d23037320272e64 +S025/S025R03.edf.event 39b46b55fa02f8503f1d8726fb4de22eb131c815725552e89309f9db71c825ea +S025/S025R14.edf.event 09d9d4cbfb435c34d0498269502687afe1e41cb410faf772b5fd4389e435ad7d +S025/S025R08.edf.event cdf1a70ff91abbdc6f14a7ca794a7b5932276819e6f7c68ee344c3d21e9b73f0 +S025/S025R05.edf.event 9e1cfe676de301ff52ef7ef70ad2cdd8b8c9562c01e98d8eae191a5c8c28aa46 +S063/S063R05.edf 212f1e4ae284e58d11c14dbb6c1cf5cf0ff59cc4b8b5b777412c54bb626ebf02 +S063/S063R11.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S063/S063R14.edf.event 83ec130ac6a664e0d88923e1496dc0806008967b51e6158521a6beb0515b2eb8 +S063/S063R03.edf 83bb2377058d1ecc7e2ea22d55aef11d131903acf88953323cef68488ad41f04 +S063/S063R11.edf 9b829774c5dbcb1e7c3c5ffa541c9e7c22672cfa964535a8b8ecdba0db447f24 +S063/S063R04.edf b3f74fe891fd3b0da76d7c42b624c8ac2b31c566e2c5d017b779ce5b2b4c8989 +S063/S063R14.edf a32cdf01125535e453c311aef70a400c7483d4a09c00751c38d2b522168f2510 +S063/S063R08.edf.event 082acf669cebc89d5f5f4bf29769c743f3b1e14290b8f0f34415e285db553b3a +S063/S063R12.edf fe724c2eff756b9b2079c0028e719837ed8a2e3cc90341065ba31f073c04ef5e +S063/S063R04.edf.event 3cbb6086e0e7d8748f07bafa5905b530a6f1a43c1ebb1a06c6f846a82247c451 +S063/S063R06.edf.event a716ed693834141bb2b56720ce6124df46e42e86652ec02bc96746644a79378e +S063/S063R10.edf 6ca54d65cfa0768f22e7ddeaed4043860731f8e0fb777b08b78525875b656a26 +S063/S063R12.edf.event 186a60a98ff75df214cbab061e8be8ae3bdc4dfa895704bc1ba0c490018953d5 +S063/S063R09.edf c167ad6b516365c6a9cdebbe8fd49658ad31a882b82416eb7a759544f3c27b03 +S063/S063R13.edf.event 918e61da68f21c26600383b29f33fec7665f2f3fae232e8413bc9266cc617863 +S063/S063R13.edf 63a81fbc94679318c4bfdcf8ed864a3acccdd904e2f7b8b4486d38f5efcf6d79 +S063/S063R05.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S063/S063R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S063/S063R07.edf.event 818acb17a86d3ee6425a512fab58b363d01b6ccc783417cdad466d1ecddc506f +S063/S063R03.edf.event a50926e15d8027e4167d22e5479fe3751780003aa245b90b2eecb0b4f474fde2 +S063/S063R10.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S063/S063R02.edf 5611a5b628d656e04f3c11c64c3d9da543098757ccff5ee6b41b8b069c3aa682 +S063/S063R07.edf 1ff351b434542537e83eab5e3aef59bc272c42bdb69dc450589a0903b0b5469e +S063/S063R06.edf 75495dff156daef37b3d008791930822aade0ef6b9f8a69ff473d71d8189d7d2 +S063/S063R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S063/S063R09.edf.event 9a154a517e2cf402786cffa7d164d0656aa7a8bb30af51266fc6403fcd9d3d00 +S063/S063R01.edf 19e89fec35b5227c3ddd5a031668a7311264610cde156def93771adc15685af6 +S063/S063R08.edf 76fae5c78ea8a2c05a6f7231b5eb8fb2b46a6001344197916a87306c786ef241 +S073/S073R01.edf be8473cb11d653152f32965bfc62691bae4b3df7fbc1e193840667f5912cfb73 +S073/S073R03.edf f1632361422757358260ab11e6b818e9594be3d85082097a772ba46bbd21f295 +S073/S073R07.edf 012e37d81ab052c3751d06203f4c418341135c71ab4ef6536f4485d02bc11872 +S073/S073R08.edf df5ac3bbcadd037c1698db4e3f36ba4f752c6571775c7780c3ec5a778025f909 +S073/S073R08.edf.event 10fe2ea57f3dd20adb3824338de616d484101e5955b8607ce38e43162b4cf53d +S073/S073R13.edf f7dfa58f68f7863670f91abc8c252f2df4875c52ec69dc2246d792b2c7a5eed3 +S073/S073R09.edf b27f1e82d1e744fe40538fa8c700eab5373def323d6df8108a6808bd7f6376d0 +S073/S073R05.edf.event 1fd941c7969069964d579a646ecff980c4713b69568dc624c832f58c5c7df8a4 +S073/S073R10.edf 9a02aad8afcdb74ac200b42b662cb1cd524aad7a6352992339486ca67644f606 +S073/S073R05.edf 0ad69cf05c39f8641d4b319f0ee822237a551c4ecf1214a906ade02c827739b8 +S073/S073R06.edf.event f4c34ff99deadcec51112b9cb6b5db7a903fc19968b9ca14feb0a67ba8795ae5 +S073/S073R13.edf.event ed3cf1bb44b1d2c21282c844044e08add0a9e1ba501a19ac91fd1593cb7a0f90 +S073/S073R09.edf.event 132d7c78fc024e2a331d21e4a954e9dade78d9bc7c55ab44b306fe6ef20a13dd +S073/S073R06.edf 22b272fcefe3337a277ec3b3becf14b2cf78409e878c8e5bd73e44e803b75eed +S073/S073R03.edf.event 0c68a34fc7c7f42962c230acd4f90579e920c83f2d32cd504111843c5f7ae410 +S073/S073R04.edf b7545db1ffdc782d1012419b18a823c35d6f6fa96e3f7c836f08e3d799aff5f4 +S073/S073R14.edf 0d2d955cd5175eff84083b78808f28dfc2a808f6d1f3870f2de5edd21d479f80 +S073/S073R02.edf 843e65b4b436dbd13649e680c90c807d033bb15de8e80e167dcec74335c5db60 +S073/S073R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S073/S073R14.edf.event 726af5b965cd68175880075d714ab5c49b3b1681512af02ff7658e9ba0edbfb5 +S073/S073R07.edf.event ed7531e033fd753faf42520203638d4de33208e84160d44677c41467cb0c16b4 +S073/S073R11.edf a2b965a59a34f3f872756d5d891bd268d5b041ec60f8c0010847d17dbbff6256 +S073/S073R12.edf da24f28f7a880c97e25a45e0f9eb86dbe7aeb13b651e6cef0f99afe2aa1f425f +S073/S073R11.edf.event 8eb33e667f4552c335382b3baa23b804d5f1b703c747eabc52d51698bf1cd83a +S073/S073R10.edf.event 16ea690d66c06482dd3120d00dd8a03decfa55bab04dc1c945f99213f9c5753d +S073/S073R04.edf.event e6e3fae8bbcbc00e9d7d959eb1e3c2c0ab93eb9e896a0adc93c685329e429982 +S073/S073R12.edf.event f8969447e196cd3b85aca233f8197d3642b3a82ffb49458332044d86c5df05d7 +S073/S073R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S009/S009R09.edf.event 7ee25d4d164dab7d909181d9ed3898053c69dd75d8affe3f277cb9ceac75184a +S009/S009R11.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S009/S009R02.edf ca5a6f0a0fc6fe11696d5505c3db2b965136d85c4c0590f7c03af1b527b97242 +S009/S009R08.edf a8121c688ffca3db1d3b1f61dd53d6636ac91763cd2269b39636c65dfc6e4fe2 +S009/S009R12.edf.event 1a93c91ef79032df8664905730df14e57174a6aab494378ea976c6845e662faa +S009/S009R11.edf 2e1eac06f72f940fe826ff9c6b963bc686c1c7357d2b860e351baa263684f66b +S009/S009R10.edf eeeeb3a1fad45ab52993a7696c8f86b0f4cb7de3aa68a62cb2b1379fe87b4084 +S009/S009R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S009/S009R06.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S009/S009R04.edf 705f53460954e465e7a6ef45bd1f64e675548c08e9238dfb1f448713f9e559f8 +S009/S009R07.edf 9d737ae61e6b01feee5a8cd22c219cfa84e8c5f67210a04dd08ad08aa61d82ce +S009/S009R05.edf.event b50d31cc4a2ec520a336774ea70761d08ebeef4930f053a00ed66803060bddef +S009/S009R07.edf.event 9aaec3ff6fcd8c6d47bbf0c154dac87cdccac4bfb099e5995ee69afc7b34257b +S009/S009R14.edf.event e8abea1fb06a5ad73e91ea5c97aa84247e1c165ee2bc724e9b9c7ae6f6c017cc +S009/S009R05.edf 6ecfb972c81f97b5bc88a292b9b249b4fbd3c51f1959cac1ce8f83ee480c7689 +S009/S009R01.edf 37f117b69060f2c784d74fe973d6af30b3b978f9c7c127324273f5741749bb2e +S009/S009R13.edf af39afa6d2e9006c7d7a2cc573206febd62b75bd696f7ac5d0d733af6789e455 +S009/S009R03.edf f5718a37aa5ea43c8f52dd01326ce09568a71f52e1b2750e0e8e2145e88b6e4a +S009/S009R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S009/S009R04.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S009/S009R12.edf 0c3f9700d6bfd8a8dd797803d61b80852688709011e5863f4e13e9ec3948191f +S009/S009R08.edf.event 0028edf9b5fcc76311706f809ca44f884f227acaa02c56031cf87a7937c1d9a5 +S009/S009R09.edf 33060662f7551cba7035f177f88442594250cf4359da7d9c9fc7070d929cd3f5 +S009/S009R03.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S009/S009R14.edf e9ffb381cf76880a63c95ffe80106e9339a290f1fa9632e7575515b4900a820b +S009/S009R06.edf c6665f0c93288610a0f3cc379edb8064072e7b276722358912a76e899bd6b194 +S009/S009R13.edf.event 8a4b5822b251050d46b816bf54fbb47530d91210b6d01cc2e6acbe662193553d +S009/S009R10.edf.event f338197f5dd0ca078ea8eee22145e57e694f7dce6a2bbd55f5f05346ce3b3f17 +S030/S030R04.edf.event 21ce3656bdb563d0169b7aa05527648a736ad7cd77c301a0c752f4cf76eca3a2 +S030/S030R02.edf 0527816f344412f459589c2e29a24f87f956c5ed63328755ef06151c96d1ea0e +S030/S030R11.edf 2c0ac5cf0e945d5c66c220d1cac70b376ed1808a723c107051a2c9f8fac1a793 +S030/S030R06.edf.event 745873996b96ae5adff4736ed896862f8ea11f4da06821cb2df5927090639fe5 +S030/S030R13.edf.event 76436f363d0b1f03efa79422710ad179cb02fcaf8b3e4d6a724efc92143ee8fc +S030/S030R08.edf 6d4742bae0754084c96a1b14ff6430203b9d71b619fe9b33b420aa3d6fb83d28 +S030/S030R04.edf 41305848c93a818493e0bbfb965d01433bdce1b48e9ea1b355e88e08f5c20f20 +S030/S030R03.edf d4ea66ddb38572d264851ffe4e1468e206593c2789e675ef29d18ef7c2767e5e +S030/S030R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S030/S030R09.edf.event a8d31a5eb87f88035de02464248ec55b948d96d57ac210508ba4d4b320318124 +S030/S030R09.edf 16f4ea9781be4ef8d52499252bc782056e6424ee391370398095f59d34fdd83e +S030/S030R03.edf.event 0dc10cf1c35632cd53bc4336ca89f30d7f590ef366a20b7511c2cbaa031469f4 +S030/S030R01.edf 3b0b3d390a9d15b30f95230729a24d5b8a2a55b22ae6ab9ee0c18653068d22e8 +S030/S030R11.edf.event 5cf15190744627a260c9ee5573dfc68ba2b843205a19877288aa8be1015fed06 +S030/S030R07.edf 412ff959e0ac1b98565fc741205625bb31a7f19a446edb6d1e8070b2c7d2bed9 +S030/S030R05.edf cf385ce00f0b8efb8714af8058678f375d0597e66a079bc152dadfa922440dd4 +S030/S030R12.edf c1e5ffc5f0876254f77021f0a478bb374e1ef3c1388f8e90694e984aa085ac90 +S030/S030R10.edf ebd59f99c05f284e1cae773c3d00622d45b137e388951b566d8dc3d48ccfafa7 +S030/S030R06.edf 15b136ad592e0f6fd705ac06588ad6bc300c1b6adbbf5a1437ff6a6fdb2be046 +S030/S030R07.edf.event 1089930d94c72e3ce9766d907746b5e30bc79325e62cafe09bf378282e3d63d3 +S030/S030R12.edf.event 839324cb15f4c85297a1bab617a363408975197aa26bb5ce4aa6f0238ecdb3db +S030/S030R14.edf 569adaf14287ac5944c1492ed853d934f1b7e44f1bb89297fdacb0071f81e110 +S030/S030R08.edf.event 60ca10a619a64d8ffe05bbe5271b4639be106480c5613e3a767224f993989d2a +S030/S030R13.edf 5bab95b93f88b747aff0c96c055326197e51a2d2887264ceee3a59f799c4aa64 +S030/S030R10.edf.event e9b96219c96a5df8ee769b66c4f57662e192352a69c8192bf391653bbe782df3 +S030/S030R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S030/S030R14.edf.event 92869f74a264158c2c716f02e82ce6bb81d49bd4cea9ee281a8c63768600773b +S030/S030R05.edf.event 4d70c0e1ea5d348919f757429a1fa31b6bdc7087f75c8e925771beb9d5894c32 +S013/S013R12.edf 5a4d97f54f5769fb61a64854d46108b2554f1013f0e664810746911f69ea241d +S013/S013R01.edf 6f41486d61f25ea3c38608b650f4e9a19d5e6782113fa184408049267fd75310 +S013/S013R14.edf.event f1d83aaf535b3be7098ef9960def69c712f4dadb4f334e40434e155e9088b299 +S013/S013R06.edf.event a38580800381d773ef251e941cbd4e02611c4c4e4efab3c383f14300a3394f1e +S013/S013R12.edf.event 68922fc617d2a7f851f538c226fc5491b6f57526417f33c57ada180e97746c59 +S013/S013R09.edf 78796fb0af41599ba7fbd25a1a084226f7dc0e385ea6bbbee21e6cc8afbde3dd +S013/S013R14.edf ebdbc2b8c8d86022a8f285bf4b42674364d91cc46eecc1e1abc04043ef12c17b +S013/S013R05.edf.event 7ee25d4d164dab7d909181d9ed3898053c69dd75d8affe3f277cb9ceac75184a +S013/S013R10.edf 14c0536c6a2bbc572cf7b4b7e128aa2f1bd5b3a65e84a16184fd4b382609459d +S013/S013R13.edf.event 60cee9d6dd15c5919b0d3d4f0618922c82d19e6490ea88b6fcd09fc7631fac71 +S013/S013R05.edf 882aecd05b5b3869c55c483795743af24e2c7ddea43caafbcd07c6ef435fb6cf +S013/S013R10.edf.event 9787b103ae02f348ebe837cf8e545e3e630609a320868cfa229f0f82a0734ddd +S013/S013R07.edf.event f686071dd83e0610c6463f51dda1c01de1abacf998f19f9b7d3910c06492fc30 +S013/S013R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S013/S013R04.edf 796eb8df7d12b32e6ebbfb6756e527217c7b59a282735eb782dd50f5f16c39c8 +S013/S013R13.edf a81924a0668c8af7b92bc554eccd15881f4f7f6e7a1811db5f44e11e5c61717f +S013/S013R02.edf f60098973655243b1ce53e1e1bfda84cabb9862849461aef58d170369f2700f0 +S013/S013R03.edf b3ccfde504fe3bc9377495c2b70feabcc415a2facdbea1ab0d33e4644f7fde66 +S013/S013R06.edf 2e615fe7a657d7047f4fbe789b66968dbde9394c16cdb9d0995f1c0d376a94a6 +S013/S013R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S013/S013R03.edf.event f338197f5dd0ca078ea8eee22145e57e694f7dce6a2bbd55f5f05346ce3b3f17 +S013/S013R11.edf 261e6a4c8f81071cd6f1309ebd6deef79496601c6b77a8fe3bcc8c3b065626a4 +S013/S013R04.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S013/S013R11.edf.event 1f581da8e4856c63e25c541381ff8a370d89525260484c85de24ca995a24a984 +S013/S013R07.edf 17a4e59d8c4fdd133f19b428ffe4927484814d1dbaad7df7e5623361fff769c5 +S013/S013R09.edf.event 04192a55fa7728d34533e76a1b831fe5c94d79c3bc9f1028f45644c51892a744 +S013/S013R08.edf.event 732398d864936092994f6aa5c7a3d554d1c1c35ff4fd7a90dd0c06a61260c68b +S013/S013R08.edf dac533ea38aaa6c7505565059a5a8e19825702e68f5d27f47edd11cff0035736 +S104/S104R06.edf b32a0d5028810a98f7712b58ec2c6009f135d0727d03abd0debbeb3cc9190277 +S104/S104R04.edf 21f1d57c977b6dd8e9d3e1a5893431cf4bf0a142c850e273cb34f7e0864dedab +S104/S104R02.edf 02b55d7a4f3e38a37ae12acc81cbf1f9c6f40ececb6290fca2a902c19ce6b180 +S104/S104R11.edf.event 78e5b5619faf1f99b53251a9e743644a731ec9cc5b6e60533a3695f33fca35fb +S104/S104R08.edf 505ca2e90c3476a945714776461c10ce6112ad70b5893b2ac80722b56d26693f +S104/S104R09.edf 7bfa4e74dab55b1fadab1c4c3567e9a612bec013a2da6047c855c5dd125fe27e +S104/S104R03.edf.event b50d31cc4a2ec520a336774ea70761d08ebeef4930f053a00ed66803060bddef +S104/S104R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S104/S104R04.edf.event e1b29e90b20be6fe74d2526e8f695d3e9dfe8bdd7f08c801c24f523ac253462a +S104/S104R14.edf.event 2fa0b9392dbafefb266a1c269022274e4f4638513c85370c47ce925b54b7d5d6 +S104/S104R07.edf.event 472673d75763793097b36d5b0883addc030cb3790ee84a18c36eb33ef80d009b +S104/S104R01.edf 359e9f6ee1667782fc85dc9c9c963fd8327fcc78c7c24381c7be139b66f89dab +S104/S104R07.edf ab366eaa999f517e5b23ca8021d7cf28f6ee29d68a165ee4a6e95775663c2ded +S104/S104R06.edf.event e2b79b09413cc4065a25ea2956bc7726f4bdedc8730e89a7b86a8617c79876c8 +S104/S104R05.edf 57c4e11375232ddd10c4b2e4a4212956b4c700a8c855a788b3d187e653ecf2ee +S104/S104R09.edf.event f338197f5dd0ca078ea8eee22145e57e694f7dce6a2bbd55f5f05346ce3b3f17 +S104/S104R12.edf 541340f418003d02928cfd51baeead5dcf82a405bc6dc20499e129d43dd8e8d2 +S104/S104R14.edf f1f07d0e2801a69d6dd1991273efbdef58edb92220425168ca195fabc7373327 +S104/S104R10.edf.event 353c44c45eb89b709590af77f11106666cfa3680e5470b61c607b80304933399 +S104/S104R08.edf.event 803329ff4c524f2f94c14ac888eb15a5670f44ca9e42a101568a0c16cd0c69b3 +S104/S104R03.edf e74ac8759624fad7c5059fd925d458f2952b893513381c86818619027e698ca9 +S104/S104R13.edf.event a4381c73837f1ba112959670ca4c474004b1d78b82daefb4ea220692bfe23c8f +S104/S104R12.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S104/S104R13.edf 907fe8ab003947ee5395c97f6f2425857ae0be24f4e95b32801cb08a5464e074 +S104/S104R10.edf 2fa882510d50bd3492537b204eeaa5f29e646bb81388ec119276aff07d8ddd94 +S104/S104R05.edf.event 440457b385c6d53d2340acbe5512027de7800fb1514858b055b126c10e58b404 +S104/S104R11.edf d0a943d736737361dea98c5585d63a7bc1b1a4d1130fb43fa43255426ba73b96 +S104/S104R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S038/S038R02.edf 7af50c7caa2b873015cabd726bc47ac5dc57de2ae61d151e278dd2613aca3681 +S038/S038R12.edf edb6693591ebc3e273004339ec6d5eb7b11c60604828cca5e61e0d4df8eb22c1 +S038/S038R13.edf aabefe770caa5296de15ad7395dabfb5947a6daa79bec50596dc0eed3b505243 +S038/S038R08.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S038/S038R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S038/S038R12.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S038/S038R05.edf.event efd977506cc195b985e542f3333bc334c4793652f9b52580c96ef8a5948f4db9 +S038/S038R11.edf 315510579e7c607daa75969532808476c162076335ef7bd981aa7a02c62b894c +S038/S038R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S038/S038R06.edf bded2cfd25b9240d522d3e5858a6325b0ae13d46ed9cb01c5dad5634c6136227 +S038/S038R09.edf.event c777015bef40a19f68f8ed8c37572f501bc00d9f061933697ce2f238c9ad3f9b +S038/S038R06.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S038/S038R05.edf 34fafe40c0e98874f023c9548e12a139452c8cff9d71836272ae2ca53222cd7d +S038/S038R03.edf 6d703baf9b57dec7da30d953deb26c24f93ce290240ca136d707ed5a273cd442 +S038/S038R14.edf 160da28cda376cf3e4206f7892f9efcf9342cc701bd9136a31d57d71048451f6 +S038/S038R13.edf.event 83f18e3a845e17346dc10fd176b61c6f306078227a59bb51610834234d1454aa +S038/S038R04.edf.event f686071dd83e0610c6463f51dda1c01de1abacf998f19f9b7d3910c06492fc30 +S038/S038R10.edf c358329a87e97ea80a3daf5702e9d7114e954bd98dca5a157702ed096f3b04ac +S038/S038R03.edf.event 350042ca5c37cad9cabe8c469aead2bd074d996b13c69cafba8d24cd8fd825fb +S038/S038R01.edf cf352ddee891d1f3f6c6aa3fb4e947ae0c89b9dcb2edd0cb2c75defa5c5eb2e1 +S038/S038R08.edf 2f5146836f84423a5dc0a274f4d9da645bf7e485a5100d74cf98508bdac29b69 +S038/S038R14.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S038/S038R09.edf 2abf752ee32dbfcf22c85bd8137c6976b39b46169a2f63d13a769eef0243ed60 +S038/S038R07.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S038/S038R10.edf.event e4d125d65ed410f2e37eb6c7f7075c585662cb4e5931bd8436cbd1b59c474f77 +S038/S038R04.edf 45e64556375b00e2a2541ed5e14f96c79f4b86c18d54fcb1bafb2508c1ef06f2 +S038/S038R11.edf.event f500286a76884018ad149ef34cc8871332593723b30ad3d79f8f39236a5cc25f +S038/S038R07.edf b6c989c93126b209c5e513e3c8cf32521a86b0ce8b89cc811a1a653c18ee86e7 +S078/S078R03.edf 108c6824287a8a48eca4aba572a0a689bf46931e7c35556ebd36957907c16ceb +S078/S078R07.edf 43521ca9ff9b7cab6cf52bbe6313ea2cdbe7772a27f42e8a310cd0aeccb4be34 +S078/S078R14.edf 9bc71363b71480789c601f6d9cb90aa8bc690567a54a2474af2d9f2fd8c6e37c +S078/S078R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S078/S078R04.edf 4448c6889ea396f851dd48708509b0620974421768814d18d1d5b0f6f22c60c4 +S078/S078R05.edf.event 190f359cc14939d921985886ad1c9081e5e2059b38ae9d130845e8dad044d790 +S078/S078R12.edf e4f6f983bf941c4f713b353a79594af49095e7f002b102ec086a93dec3aa37f5 +S078/S078R14.edf.event 0db4656c1041f6626ac6fd54117fb1e02890492bb86525e197e9ed116a0fe6c7 +S078/S078R09.edf.event 5b005cafab5d6d8968aafc95da4c7b50e36cdf3bf2d71b98cd18431017101fcf +S078/S078R12.edf.event 6e4a2f2b413bd53735575eeda52d35059687bb58d072e4de1626bc16d7a28d24 +S078/S078R10.edf 222df3b50cc0eff62d6fbbc2182e3365a74d996997e9b4219f904201310623c5 +S078/S078R13.edf.event b974d964a889e51c36cb932266f84898e1e404062fac116381b220b354f04feb +S078/S078R04.edf.event fb76d885a6c380c62ce2da054f5ec78c9f0178752694a36e4b78183ec8850830 +S078/S078R01.edf 42899f0af33e3bc857c963066f250f261d4e76d73c0c7700d223b09d13ad495c +S078/S078R09.edf 7c12e0c729eeb0e250422ae71f03558dd411c9f56d63a49a5292f13630d9366a +S078/S078R07.edf.event 176fd10c94c5d482875b1ae7f0756ac60194f41bba099b0b40e5efd4e95e8df7 +S078/S078R11.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S078/S078R02.edf 3201f4cdd5585a66c882504f15b3427187c78effd66d16976e178ec396abd878 +S078/S078R13.edf ab016fc3d05e5277fdd813ffecd387cb5125ba62d3bb997ed7ee4008697a22aa +S078/S078R11.edf cfad033ec2419b92300f61a64be8f0a547ea531ffc20a0d51719721da81f4f1d +S078/S078R06.edf 1623a5be10e9f4ac175476f00e8b0637b7a63a0253fff92eb7ac561bb21be749 +S078/S078R08.edf.event 51f07832e9b1d3d8c667f73dde4aa38f9d3e45cf2a4c2baf8e47ea328c860420 +S078/S078R08.edf a008d9c403db430abd52d91a04955431b675eb283d3fb9db80e75feb08e3fe39 +S078/S078R10.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S078/S078R03.edf.event e1b29e90b20be6fe74d2526e8f695d3e9dfe8bdd7f08c801c24f523ac253462a +S078/S078R05.edf 77bdcfdbcf477261a4d6dac03c7a7d2278c445f4d659da8b5693a1b5e70852a7 +S078/S078R06.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S078/S078R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S034/S034R01.edf 62814a71a5926f51efe322e6f959b0c6a7b18da7603474a4daa3de3cd4ac2718 +S034/S034R14.edf fab8316da4a5398cda818d57bd9db5f016a3c500ae387bac4e65f9c5eedcf1aa +S034/S034R04.edf 16500fe53e24fd3683538d894bc85214a41801e172787e68fd7e1dc5a42bb56c +S034/S034R08.edf 01d92913e2d1f2bd5eba5d16f1ce03a150f6dba999db83a442e4d6a754fd4c44 +S034/S034R11.edf.event cdc1e4d28716284ddcb2f27cd78fa71625a5d0743587c4085cd40553c3d4593f +S034/S034R11.edf 104b08f5f2f728a5e88b75a4ba8037aa6daac2592bc5814580b76dbf1dbb32d5 +S034/S034R05.edf.event e6e3fae8bbcbc00e9d7d959eb1e3c2c0ab93eb9e896a0adc93c685329e429982 +S034/S034R05.edf d81499b1372d6ccf90677a7a75ab740ff8250347e7883cd9e15035687bea1350 +S034/S034R06.edf.event 626f0f4bcd2b8e692655298d20611bd8343efcaaffc9750f9de2d48d12d814bc +S034/S034R10.edf 9f35f25a3894e5438e2c4673605ba0804c5998e4e75ec1fc5662503f50c77293 +S034/S034R08.edf.event 242b32e3360e3746853cdb2b5bc9e3680de1c253901aec55810ebf98a6f228cd +S034/S034R03.edf 5605f6ea99761ffa6ef64072b3f74688b3483656fe60ec711abec72e90a4bad8 +S034/S034R07.edf.event c3b5fa1c4d34c735d2c72afc39bc9de181add93cfe26671d98a67a907d91ae61 +S034/S034R12.edf.event e5ff9b02c6b12b21bf3d43e30b4e2e24dc33e4dc8e4f263b175a6be78cdbecd4 +S034/S034R03.edf.event 6a82f1537b9e346f18c99107c26f471d6dd0e1a1866c60ec0fd81e8ae2d94fd0 +S034/S034R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S034/S034R14.edf.event 4c04256488608599d6bb2976fe1f7764651501c5b36fa6450a25871cccdffb65 +S034/S034R04.edf.event 72ce0e64e6b25c72495c25f11b75df8d02784d1e7645c959e2379fd7b73c8959 +S034/S034R13.edf.event 80df586db524a77e67f11eb275b00d505b0ba9212ee984d6f721958f2b100b4b +S034/S034R10.edf.event 8e39b81d7164017e1d67672dbc17ff18d31922b3f6365e9e1961814c475b2210 +S034/S034R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S034/S034R06.edf c801d9aada4694ed376b3b1804003c1c5bec9dff51797504ff0f2b93f9c6a580 +S034/S034R12.edf e7e7b75d6af726120e7482462e429a9faab0b1509eee0a73e064f972c4ed9779 +S034/S034R09.edf 044914d21373682d7d59fd35aab9f61350e7594f7b8aaaba9e1c3fbf5399e7e0 +S034/S034R07.edf d687daafc901f01b9536129779d8e53ebcbe1ce55820c0e4a433ec27ae2f04a2 +S034/S034R13.edf fbf72ecf8fa58e695e5b3946827190174beac79d6684da47288ac33120c9718d +S034/S034R09.edf.event ebbd9bc0cd84bebd02e71f43989cfd362624adbb968d778a6e759a70c66956a0 +S034/S034R02.edf 5a59bca545dfc4fcafeb49a9c11bace3a0c91029c941c9f2a46fef9904681514 +S026/S026R11.edf.event 6ad812d50b44ed49ee87e09d1cf28b68a99855b6d266be8b9017d8056de057b4 +S026/S026R05.edf.event 96cef3a9a9e2cd7f438d53008e256b198c48c6e0d2f0b2d70e0d296180cd58b0 +S026/S026R01.edf 2c4c52f3afd7dc4742bc1b009744da1172bb9c4f45546acee2fef230931d411b +S026/S026R13.edf f86fd8bc5817c7cb98176b0e58e7c4a3dade5243311676d1e9fd4a227f22e999 +S026/S026R13.edf.event 7c83be0506163b52a89548c7af6d4893472403c2bab3a455c7d8c92bf3550ad8 +S026/S026R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S026/S026R09.edf ea6dea29fff8a84242be061a37dac15b84acacee2a97045e18ca219943c57bc5 +S026/S026R08.edf.event 78e5b5619faf1f99b53251a9e743644a731ec9cc5b6e60533a3695f33fca35fb +S026/S026R12.edf 681a772c11bbdd917acbf081308f8854c972afb4bf8a05d50fdef1b1c86feff2 +S026/S026R09.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S026/S026R05.edf 4631e05233933160bb1ba8152f03167481004845fce05bd821ee424dcad7610e +S026/S026R07.edf aa551f530d6949a53ddfd0a4ce0371d2ca8db7c97e11309c45fae79a1600824c +S026/S026R04.edf 1e6bb923f94a3b28354244ea4302d213e1656bb309fe3b3ab069e9dc0b2d62ea +S026/S026R11.edf 39666f75b931ce689e7b76bc9286eec8258cb4bd745f7208b16de318237f9382 +S026/S026R04.edf.event 3670ed6734a1e3a4a23fe378ef332968f910405646ccd883a62369d2add4b888 +S026/S026R08.edf 2c035db71fdf3615a23be361328ff9e9017feb97c097b49a4f6c8aff2c36f1d8 +S026/S026R02.edf 462ce4f1335dcfc319fa4d706b6a23467512366055f0e3283a836a0232891cbb +S026/S026R12.edf.event 37b92549c1fe5d740cc394805738f2d228f8fb948bb4c3aa4817c4dd4b04b9e5 +S026/S026R10.edf b8ee20e9711cc6deae83ca355a3665616d61d0114c263db49138230a71189eb3 +S026/S026R03.edf 5ef33f46f67a415ce54210af6ccd112f00a517cba357489f4cb66da7d2e9265f +S026/S026R14.edf 829b319606b77a21a3c7a4a3906e2c6f2301ae1f6b0d1003c6f3d4eb41297f27 +S026/S026R14.edf.event 37c09b028d07b5b1954199394e59b7c78fd1325c5fada30ce1411ada2513eb23 +S026/S026R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S026/S026R06.edf 3866938781e7784301ba3a0f61e753f4fe0b5117a787c61e1f7b5e5c30c0000c +S026/S026R10.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S026/S026R06.edf.event 2fa0b9392dbafefb266a1c269022274e4f4638513c85370c47ce925b54b7d5d6 +S026/S026R03.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S026/S026R07.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S007/S007R06.edf.event e318e6bd044ead383499ec22af114e14b3a59def9653470287d411ce26ee7335 +S007/S007R03.edf.event 7c8a0ac57a3537143f6591a8d4f0420a4948e65577e0caca7c08eb06daa4dbaa +S007/S007R10.edf acd9571b0a6b1f864807e556d09d913592d1eff978e7469bb68a9fefed62e172 +S007/S007R07.edf d801688adfc14ce87dd52f4308b0dbf72fffba30fc5d5b66a4c05a4f3b9310d6 +S007/S007R05.edf 6697e3a18c3025727806e28df7c378468b700f73532fd61ecbebaa2960d712dc +S007/S007R14.edf.event 600983ef19711fe2016d742d1857fcdfa4f0ddc7c5b8c88773db0019f92315d1 +S007/S007R12.edf.event 94d1d7007146c80e4f45e873986a770b4b807a34cfeca23f6a7d9054865154c1 +S007/S007R09.edf.event e8da7819eee7d2c77d74fa9e6aec49532a6b299f30b13b3938ad7ce0357cfb02 +S007/S007R01.edf 2ca422c6076860f12bce174799c5d7e85cae46b7b09a8e8519681d0f82168b22 +S007/S007R05.edf.event 54016a6260b12c8b99943951ccdc7d5425efbf9ab503ef1abeb93deafb262790 +S007/S007R14.edf d6245b9bf35efde8522982146138e2f03fc352217579cb0c8f4d83eedd7d8c9c +S007/S007R11.edf a2ec8339ddfd99168bfa0fdb6b94145fcf6e17abd9816f950d3182f888928b76 +S007/S007R04.edf.event 194c7017f9fb0a9e8e5ed2d2c50b59fe067aa973a5e75567ca562949327e2edb +S007/S007R08.edf fa8cfc38727e908be4626cf82b19533f7a761f102319696e53e4ee2d61b1092b +S007/S007R04.edf 3b31e50c4a5ebbb25459b1ebdf802b7ca86010857637580aacd53067350553a6 +S007/S007R12.edf 70cd3287d6b4821661521954206f86dde9f5e62904edd76cc0cf1d03fe112df3 +S007/S007R06.edf 9f6e8d2367dd2965fe71380ad6d42c94e13848d589516fda86fa0eb93481998e +S007/S007R07.edf.event 5c551f31bb279f96e1589e6bd04f5890f6ec5f9b8eba042c3e54359a4cd3a0ee +S007/S007R08.edf.event 73d0d5230c58b2964db8202c6deae200c4a894209cfbcbdcc9b9898f2359f015 +S007/S007R11.edf.event e697ba3ef839244357e1c111d7dfa9afb60310a0bd8b7935ea5a426a1711194c +S007/S007R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S007/S007R13.edf.event e2f45590bae85355ca04a69408a62dab39a883a35597ae39c912ac5923e8579f +S007/S007R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S007/S007R09.edf 47aed2ff57705ed0e534cca0fe5e7cf2571b7709efd50c61c2480be4752e0244 +S007/S007R10.edf.event 582fbf3e4f5bbd51cc3d858954988e7800ce943626d6f081f659cdd9b863fc0a +S007/S007R03.edf af7f2fe4fc8ae5b727a2cee5e41a58ad0aca7907f0da83b67a754ea7e94ff3a9 +S007/S007R02.edf 9afdeee151fbbcf9ab5ab3537f8f647f0303d97f506048cfdcc6a99d8da809a7 +S007/S007R13.edf 89c78f89c4436ac06622d314ed5cc49a825418a7f051f6c0d3675c70f19cc018 +64_channel_sharbrough-old.png 96d50a707bc8841a946682e47dcfa6ded573ed91dc5f1c01b129c281473f0ce1 +S064/S064R14.edf 330c6c6384fb9a7cf686ed9f230d180cb70a863ec824fccf1a55e047b1696aab +S064/S064R13.edf.event 45f5eb2c092354aee3ed9873f05fbb270ea898808952a8e8d89f53a58d171ca5 +S064/S064R07.edf.event 0c18fa49d469703f30b80a748450ef0688aa72103e201d690fb064dd55c7e540 +S064/S064R07.edf 93149a39154aa81119b69e6cf2c849b214541ce7d16f48a09091efc8f54426d3 +S064/S064R04.edf c4fd5e8940ff3c9809023886c770dc3ca374adcfa93b85784d749241918ee5e1 +S064/S064R08.edf.event 25b9b610fa5d47c04ecf80ff5afc8dc8ae41908a3571f3bdc98cd14a55c003e8 +S064/S064R09.edf 28410cf933254c2c1657a0c89e0177d5ddf25512988dd414a2efd303e3f1bde4 +S064/S064R05.edf a6611a3362f3dc1b41205d9fc5270880b3bea097a7b5d820bbb8dd4dbe236e51 +S064/S064R11.edf bde8d57b66d9904d666702e0824f209ba0ebef5a1b3e5d01d03ab7807e901671 +S064/S064R02.edf ba4aac54d62892710166b5038da3485a1158d1e03cca99fa9710922623a35bd6 +S064/S064R12.edf.event 1071a29bc96a7a8302b3670dbd4e05d2ac47cbf397e29b74a32e6f4029d8a52f +S064/S064R05.edf.event ce9aa6f7710315564bedec57181565954b52788105428ed69c693ca407a546a9 +S064/S064R09.edf.event 7c4bd521fbb97676e1187543f70b99a542e6a60682c3ca4455c69b001e97baf2 +S064/S064R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S064/S064R01.edf e208a4c403cb3597ee704d7b59918e86faf195fdaa28587b84efc86708109219 +S064/S064R10.edf.event be8b4e0108640ed6cf994ccaa0ea908d53eeaee5d7d1fe5059a06c982f4b08b1 +S064/S064R08.edf c0abcac7f314a6a8e921f3a3e100feeb7454f3fffaee325a89930660c5f0bb6e +S064/S064R10.edf c1e0330c970bb6b82a6501027da9de5b62ced4fe5b1fb85db11a3d6c3a8a5a41 +S064/S064R03.edf.event 57cbb289c3aecc1dd1481829d7cbb9f0c68e99192017986dc9107d7cad5f6506 +S064/S064R14.edf.event cce8c6ada8b4f46efbe5930fa6779f8d1e0c787058b6dd8c7b3841254bbdaf35 +S064/S064R06.edf 95acb614f05f3ec49fa426fd474ad02bf26b8f093e837584961472559ae1ffd6 +S064/S064R04.edf.event c0e03fc052196f399444ee8e817cb9226c53c8b1d27137831cbcd0d283821560 +S064/S064R11.edf.event 549c8a3be85848123efbb6bdfc0cc6622bf8ab61ed733497407936b0086b9805 +S064/S064R03.edf b3001738030c26d58536d233a18956c35e12640a4b226c663e05a3c674914645 +S064/S064R06.edf.event 45f381c89be373b8eb7565ebc5489e673811cfc334165888408d2e99864e484a +S064/S064R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S064/S064R12.edf d3f5426e082f27bf037e9ad16336a763b95b00578ef426cb1389830cf40058dd +S064/S064R13.edf 466b835133c3edaaabbcbc612e90cb438f3b216d922ced5419d8ef189eb63423 +S005/S005R14.edf.event e19fc47bcf5b585ab27ad756884c0c4cd37d239bb04c8bf15e2f33fe9fe05e18 +S005/S005R08.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S005/S005R13.edf 2012101a68c9abcf8021eca5587bf3305ef36ac983486b3396a3c012227b3c47 +S005/S005R06.edf.event bed28cb624951271916f88bc556ff204ccc63699ebc4523ed8043baa9724625a +S005/S005R04.edf 956d0857b17b040955fee9b2384f818f85bfb02248c387d9aa7930c42934ca0c +S005/S005R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S005/S005R11.edf.event 710672ce0f425a207ef543d23f608683e1863d7423702d511235d71ccbd289cf +S005/S005R09.edf bdc007224d5b82152476140b18d2e54b9abdadfeb509c06e1a0c18f62bb0a60f +S005/S005R13.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S005/S005R01.edf f84f58d7d96bce1700af8b2c9e333d66e4b2183e34b3ed5634840fff37845af0 +S005/S005R07.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S005/S005R08.edf eeed6c328ee256dd2c20d1808e05819bed4ce56b8de1c57914ded00565b6b7bc +S005/S005R05.edf 40ab4f0bff456a6b55cd55f6a80449a1865fc9a63a202f597a5d679b14978380 +S005/S005R06.edf 07260f0bc56394b88fc506823779ebaf9e0b6ab6286608b010900155a4d206dc +S005/S005R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S005/S005R10.edf.event 020a012ee89a9fe2c7bcf34bf02c0d2d78b688185ed74043d21a3d53053e3882 +S005/S005R12.edf.event ea6f6003ddc3676cda270c185ba40fd7915630a24511085aeb31217c9a1e8852 +S005/S005R11.edf a4d4111c206de3c094d23c9fbaf2b3fece74eff7a68a11073f29dad9800653d4 +S005/S005R03.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S005/S005R02.edf bdb498c613cd048a889899282e8f5491c23a76731f3c2be11800362b7bbcb836 +S005/S005R05.edf.event 8b26d9be3cec072b0ba8e7e1b1aa9f46dbf8f50992131d413440192fc40ccc5f +S005/S005R07.edf 630fbaf27a7a962a4a8bd61396b6a1c4f56b44e85fa5e9400016ca19085ad1f9 +S005/S005R10.edf a5721f5a8229799b6323e26f5f2fa149c121ff87d4ba73b30fe9280602aee140 +S005/S005R04.edf.event f1d7c85c7c4b298aa795662274613ada05d95ec81ad8b21ce4c8ddca5a11ecdc +S005/S005R03.edf 3bf2695b66661a274bb099dd093c8f2eb4ede8a7068ec4bfd0a1fe9f4bfc0e18 +S005/S005R09.edf.event c843292c2d927d69501ccd581dd6688cd971a162df8d02a7eb20e6a3c1aa2d83 +S005/S005R12.edf dfaf07c8ecc583b07363485596258e66a75fb33169496c62918c6dd0803814ce +S005/S005R14.edf 77d5b9b1f03074e96c0e42234f57363272fa90501661cbe038bd387f3ce922e0 +S015/S015R09.edf.event 20d3d791a72acbcdd619cb968567a104858a5889afc628a6fae4776c90c0780d +S015/S015R08.edf 70e4dcfa5bb501007b4a502fb1ac6975128fb316de67eb825f8eee00cba9377f +S015/S015R11.edf 5403967a07422bcce09d1d96dd78d0eef6f29344d8e0d388975b483b93d47341 +S015/S015R05.edf.event 3764472ec04047763aeff3c1680cbc45cec3a88ed5f483d80cfbb31b50a12ac9 +S015/S015R12.edf.event a7dd5d461dbdafc68a647202ccb3decd039a172ca33d925065fad2be2efab6ab +S015/S015R03.edf.event 2eb503fa48ef7e13b77098d860c716de2d0eb55a3a0580117eb34aedd472a728 +S015/S015R13.edf.event a4381c73837f1ba112959670ca4c474004b1d78b82daefb4ea220692bfe23c8f +S015/S015R05.edf 9119fdc1ff6ac9f67eb34d14803c8d1279a1d4c013a77f05b99abe2e334dd55a +S015/S015R08.edf.event a7dd5d461dbdafc68a647202ccb3decd039a172ca33d925065fad2be2efab6ab +S015/S015R06.edf 705c63ec04c0f9096cc6b4f32eec0bcea9a0bfa8d01aed7494b67a487e014afe +S015/S015R14.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S015/S015R04.edf.event 4ca96a0e24908b831c877816b86e3dbd1eee397d9d9851bd1e890d67134c57a0 +S015/S015R12.edf a5e56adba75340447849c46eb049b39799ee98d68133357741c6980f8f60c54a +S015/S015R02.edf a4aae371744dcfe5febf36d2605ede4d704524f9971f5501c4316a077d91562a +S015/S015R14.edf 3f57b464e13807f30aacbfc2f6780bc02bf8faecea883725683177416503c875 +S015/S015R03.edf 9de915e373fc372b372cbe9c59692c7c961dc9d2ff12cbffcdc99190a1bac99d +S015/S015R07.edf 914f9c987bf66fdbc3183531bbc5ca97ba8afaabd8864811c438d8ddb38dfc3a +S015/S015R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S015/S015R13.edf e154f143697251a1b3401fa5792fe47c7d9891a2cf63edd1cc67234752b428e3 +S015/S015R06.edf.event 23b6fc268df2ca2b48b0a505a6eed055a62dad293041c180f6f03136d0f4b36e +S015/S015R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S015/S015R10.edf 67233630176aa19faa4133355be20476cc8f571d9327943b9d35a73ecb96f519 +S015/S015R09.edf 628b83cdf791ab95545606d1b879edd13449da7e143627f931a35680de2b312c +S015/S015R10.edf.event 48ad6251760a77acc67b82ce0b201f4244cd1385a6181c8236487af76fe1636b +S015/S015R01.edf 79564c46036cd6c3d1371811f2cc426c9ac14d09d7da3dcf935a65d1a633c9c6 +S015/S015R11.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S015/S015R04.edf 1721d4a066e0c28221877d0f9e7da2402bdd924a532f5f19261c429def061273 +S015/S015R07.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S081/S081R04.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S081/S081R14.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S081/S081R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S081/S081R10.edf a8cf1995bc0c09a2a50bf889759ab4c1076735bf6a6144bd73a9ff3e845d885a +S081/S081R06.edf 726507264648754d688db166ebec9836b7be70025eebcbb8ede1ca9e47f74e53 +S081/S081R03.edf.event fb7055ba8adb05cadd3009cfa9adefacc5298e01d2b7156815f6424ce1d4f3f6 +S081/S081R12.edf 48e0a4562e2355a3737b412fc54259c7174269f5e6012448fed56253854950a8 +S081/S081R09.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S081/S081R11.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S081/S081R09.edf 2662fbeb19e4154447cdce6f73b767e972cc5abddc9c7c19b1a65acd24166847 +S081/S081R10.edf.event 7925ab0749163820c00af5a617adba24f1ce1c711886e12def08eb72594e10cc +S081/S081R08.edf 7faa1f739438962a64487e2132aabdc5c9528430a5f8e374f4b5a9106cc70fa6 +S081/S081R01.edf e5aa41fe4d8447c05d0e3efb752f1312210db47de6a670da2c7c57b3cdb1bad8 +S081/S081R05.edf 9f5bc4ef1c7fcfee0211319ca64aa04ff7a3b2c01c0c5dc460eaa3d17d3b24ff +S081/S081R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S081/S081R08.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S081/S081R12.edf.event 472673d75763793097b36d5b0883addc030cb3790ee84a18c36eb33ef80d009b +S081/S081R05.edf.event 1cecdedf3d7f8b7931b4cd84b48bcd356337c0ee32518d737ce0ee8f0d428d8f +S081/S081R07.edf.event cdf1a70ff91abbdc6f14a7ca794a7b5932276819e6f7c68ee344c3d21e9b73f0 +S081/S081R02.edf feca30506294768d8b87d959f757ea1c95e17dd5749627a6d7cb4488b314e230 +S081/S081R06.edf.event 931266ffca879dea17f50ad227bae49a0d891b282f30fa3ee4b13ab8623dc5eb +S081/S081R07.edf 3eebdead7b378306bf45d32035f06493de11b6e0f6e7d127ea1f6281371efce7 +S081/S081R13.edf.event 0aaac0e6493abaa7abc2bbcabb2ba2ab9cf91a665d7b07e95e181cb6e7a7d207 +S081/S081R13.edf 163ad2dab963d61eeb59a57e4df45b33c7183bde1209396ccdf3bb74a9c2008a +S081/S081R04.edf abb5db7c9c42a09fe10644c0a071d8511b1a31b0014258e0b076cd9195b0ed2f +S081/S081R14.edf 1ff930fd50f5b1710395ee4e4680ba050f01c776b3d6605609cf2446793159fd +S081/S081R11.edf 98120ee5954f8e394f6405ec853f66cdf656c24c456b6fc004b66e2117135c98 +S081/S081R03.edf c493f4eca660b386bcac76cec638928f5b98cc88eea16ff97b67993b6405ea5c +S018/S018R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S018/S018R10.edf.event 3cbb6086e0e7d8748f07bafa5905b530a6f1a43c1ebb1a06c6f846a82247c451 +S018/S018R08.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S018/S018R10.edf de1322a24dc7d95c29d64d91488cb1cf3ea30aa5953c7fbc80b1fa42d4ab44f2 +S018/S018R12.edf.event d090cffefb3b3a6b7c514daaeff9edd2dc1c358aaa5ba0a069b62e257f59e09c +S018/S018R06.edf.event 0487cd7ee2fef7813667623ecbe113ebbdd5400e7dff28fd7f0f7b1e00d585a6 +S018/S018R06.edf c63f23d980060ac721c705567e65603808dffd0af32391c6e2528072bdaf3cec +S018/S018R03.edf 6538d4a1fe7df53296897dc6efb2274e78429346086c6383db9c58af16c46251 +S018/S018R09.edf 75c9cd943edd18b43cc73f8b3702e3c7c2fe58dd35119b147041d4ba40e358cd +S018/S018R13.edf f7886c46a154ad73254422b68b56d81e0b986274ab7d1db177de4ec656c88292 +S018/S018R07.edf 1bfa4c8da5c4f30a71797cb27f2c77cc02c66133d3cb83430918cb2950e3d7d1 +S018/S018R04.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S018/S018R07.edf.event 6546377ca20368b45bfd1f06c72e153809df57db54b8731ecceb6c8ec813a774 +S018/S018R05.edf 86d9fbc0881a182960ba9486a456f2f91710e94cf2ad755247dd37d352b40e19 +S018/S018R11.edf.event 61878a566785fc86672b3e061fc0eefd0fb9735a4239d8f13ceb0d08ff22f60d +S018/S018R12.edf 50b4e74957541c9f1f2161ec3665b58b247727b50ed9284f1d173b9b77d1feef +S018/S018R13.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S018/S018R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S018/S018R03.edf.event ffbdc0e528335775cb88a14baab7b7f1fff4fa1647a473bcd640feefcbfab5a3 +S018/S018R02.edf 61c88fcc56859ba2299afb7b1f72a288062826ca7ececf6898224dc9b52c7a70 +S018/S018R01.edf 664012489cbc59928d2bbd665e4f3d557421e9ecec7a8ff6f46faa64ea9e7028 +S018/S018R08.edf b417c65a63c4b70f7eedc81ad11f8b5c067be5143d343a250d9397050ec5e27e +S018/S018R09.edf.event 9aaec3ff6fcd8c6d47bbf0c154dac87cdccac4bfb099e5995ee69afc7b34257b +S018/S018R04.edf 0b726bfd4ce56c9fa7417f84f52759c40eb7cd326f06a25374034c199f5fd8bf +S018/S018R11.edf a08b1554878eedfad123f7a86e034acd22cd9052992e5967b693e13c94e14fff +S018/S018R14.edf 33e7d70d1b51cb549dbe2785e85414e3c82b039c4dbfb71be40c2bcd48136dcc +S018/S018R14.edf.event 09d9d4cbfb435c34d0498269502687afe1e41cb410faf772b5fd4389e435ad7d +S018/S018R05.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S056/S056R02.edf 061c293fe7d9d36359e77b59c26693809fdf02a4629fee46e9c857b98aa9ad47 +S056/S056R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S056/S056R01.edf 29f8a7a25c02b872dbbf0cb52475ed55b45fd3b5a90e2fc5025691ff389b04b3 +S056/S056R04.edf.event 6546377ca20368b45bfd1f06c72e153809df57db54b8731ecceb6c8ec813a774 +S056/S056R10.edf 3d4f919505b19d346f4d81a8af960119113de7d69ca04effd0d6756ba7e926bd +S056/S056R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S056/S056R07.edf f404688039c70c11a61caeceff3c9e02c214d83d3a404a78947c1dbdc0e4d41b +S056/S056R09.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S056/S056R05.edf f0a6de4bfe5925d98cf3c0bfcef21a9e61cfb8165c3caf40fb6228c8a507a29a +S056/S056R06.edf 6ce8cd28af985d32ab874784e311f06ae0c2fbf0b9ef4817bc0c561bc4abbd0b +S056/S056R12.edf c47457c3baec40f4b7205cd1fdd878ab5855cfe0d32fe00655639101efddd69c +S056/S056R11.edf.event 7f2596bafc4dd481e36c47d0e562fb6c5f9b7e91c2a915f19f66844b55b75410 +S056/S056R13.edf d04270980331cd9888307b60cb0241f83cd4f4f6a2f840d0350523275776a3bd +S056/S056R06.edf.event 1ccc28680a9b24af49105d307967ceb434f5229a29e70fbfde089b2d092db7c3 +S056/S056R14.edf.event 1c886979e539efbac81a65fd3868773f73efb803324a60ebab5a203d75cdb97a +S056/S056R10.edf.event 5e6dd7d9983b10c75f267d25fb4f039777b8f17f9d64869cb39446d1e9306505 +S056/S056R07.edf.event 88e1ea56ed0459a99b011a3ea2f25f947016dd359b2d4fd683f838ac00d993d5 +S056/S056R03.edf 5714d4361308d21f12c5fd37fda0f2513fc0a2d4a1ef7520b5f20ee302847705 +S056/S056R09.edf 9df278a8cd6331dbbbaa7c79aaeaa70411906fc23b8fea9f3e44a1fa85cdf62b +S056/S056R12.edf.event 440457b385c6d53d2340acbe5512027de7800fb1514858b055b126c10e58b404 +S056/S056R14.edf 281285940d65a262d8544c4ce035bd24720fc5bac685d5e557654aa0470afe2a +S056/S056R05.edf.event d0280a6531ef96d2b622c2d562c05bf53a0d00439a4819213365b6e52e54abd1 +S056/S056R08.edf 0fa0554aafcc78c38f2e7d65f5941c528d26741194a1eeea8801225c44799db8 +S056/S056R13.edf.event 020a012ee89a9fe2c7bcf34bf02c0d2d78b688185ed74043d21a3d53053e3882 +S056/S056R08.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S056/S056R04.edf a618b25fc7570d8fe836949761a53e9a9a61cad59e99ac53e09bccca78155c14 +S056/S056R11.edf d68892e7f775eee5630a37cef9b0357671546f0d05272cb2b5c272cf1c533765 +S056/S056R03.edf.event 25eca6452bd9e487db46059b497ec8df5b6c353e97a2ac76ba8344fcc0797c79 +S095/S095R08.edf e3a48b8a9582fc7286a014902b73cdfec56c3cb1b4376822b5275178ef7d363d +S095/S095R14.edf e38842403578f1438e2946e72257fa3ba5d8d422cc0303f35b50d3f2471c8aa5 +S095/S095R05.edf 0bf1b8fce6528eea18ab08b0d479ae5ecff4befb1d18f707b0637251197f000d +S095/S095R14.edf.event 60a42ff3c5acecb10ff34635fb3d1ffa125153f262355d4bdd58ab9b9345bcae +S095/S095R12.edf.event 9ff78ae1debf975d7ce67d0ae65ebc35a183ca2e2a87fec3f877fda244e34487 +S095/S095R09.edf 98d149a2f0832ef787ea016d2e215ef57bbe944ce4c7768e3bfc8e4d9a8eb888 +S095/S095R09.edf.event 8f01ffccfcd90b3dc7f8bcf530f915ede4d3a7359039c3d68ac2b897a331031d +S095/S095R12.edf ec1661503c6628bda69ba9aff83004bd9528e282707e83f243dc906bcd64caec +S095/S095R01.edf dfc6a2fa8305ddc93e46704abac01c6c9138bb8cd69fd86ea6b29868940d0466 +S095/S095R07.edf 9dbd5a02ae1fecdb713909528ab6803c5a4fbe027dde1da453b71ed3420a3315 +S095/S095R10.edf.event cae710b11a021b7affcf41c5a3937e4c6bed000e6bdc1690cb997e03ef6db7af +S095/S095R06.edf.event 7309915dfa710afbbadf32d03ef14d92ea4f3831f5edb707fd8ba858d5360684 +S095/S095R10.edf d3456f0afc83bf51ea717fdc7f379df64555397b3cd52951b8f4e0ba8bf3b220 +S095/S095R06.edf 8f6847f949aba7a3aa2b5b2394e847a1ef5473f4dfe426792c7c8e75c13afe1b +S095/S095R13.edf 96ed237df16addebaec9ac6d19ba880e0818866c1e7be4bd00d6d69c7bea608e +S095/S095R13.edf.event 954333c8a5a38083d1d58167e4f7c3abbbf8949bb46067df41825e1156c3b517 +S095/S095R07.edf.event 6ae700f98a136e3a38418f1e7ad1db6ec534a13167c4f31e5d88b04564d47fc2 +S095/S095R11.edf.event 178339a2095d7eaf759407927ba3657ec8e96b153a8e085f8a593674f3697b42 +S095/S095R02.edf 1b71278872c1d079892ce8593c714b5dfe0d8a8f17c0133db8ad0bb4eff7cadf +S095/S095R11.edf 677157a867afe196b3c9eed6e156da65323055506f3bee130034ea7381ccf14a +S095/S095R05.edf.event ea56fcb5af6dc22a09e6c52c6c50be06f5bf4722cba178188f67732c6ecd0395 +S095/S095R03.edf fc0385bf90bb072c71c20602106599dfc212cd92c7675697d857db811e3ec2bc +S095/S095R04.edf d6e647c36b24df0524c32d896328b876a079ae75be41aa665c5fb0871ace1430 +S095/S095R03.edf.event cef39c5c7fed8cace25154d7385aa5c8666f28db51ba7c7224ce1650f9388915 +S095/S095R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S095/S095R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S095/S095R04.edf.event 8d0bdc46ca76b86f6ed45d1ca23522221fb5a9e165c0de588616c0b9c11a6077 +S095/S095R08.edf.event c6742adf7ce83d034f6e3cecede733919ea0d2fe7854dc4a86c12cf32c5c5aa2 +S091/S091R11.edf 1fdc4fa050d6249b5daa14099225882eae6c48a99b7993016b536b34ba51fd2e +S091/S091R03.edf.event 0487cd7ee2fef7813667623ecbe113ebbdd5400e7dff28fd7f0f7b1e00d585a6 +S091/S091R01.edf 98db88036017237d6b096d1307927fcbb21aedd5412a55ec641912a9a8dc1cf4 +S091/S091R06.edf b7d61422c998c20a1321c1dc8a1c4f8f33e47b31f8021f4f396ee3fce010f672 +S091/S091R05.edf 7425b2930d298ad1040c2e1ad27d85413fda9de0aa4a09482007be2ef5736cef +S091/S091R11.edf.event fbeefca3ec1354b1a7a1326279d804c2c564c4b25f2475ccda7b8e94ba4cd68d +S091/S091R14.edf 1182c440b3bbee1cec62695b426e6d6021e86f2b40902c9579e2385bd9c6d53d +S091/S091R07.edf 1f4f23a9a08c83685a165f25386bfc5aefd90ba5031337bf0775d790f7fd8e9f +S091/S091R08.edf d38c2dff0d43f699c7c2bde4387d52745f5073a827c2d41200e0aef9371015af +S091/S091R09.edf df600737e8cd8e20fabd7d2e1cab7f1814e555483a254bbdce0c8c5da1857152 +S091/S091R12.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S091/S091R05.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S091/S091R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S091/S091R12.edf 3c17be3adc32ebdf36b4addb89fb78b0b0d0aed352497795aedd8f17cea30b4e +S091/S091R08.edf.event 082acf669cebc89d5f5f4bf29769c743f3b1e14290b8f0f34415e285db553b3a +S091/S091R09.edf.event 1cfd45342f8ef617862da12ca0e446e681027d6001054fe41ac2728751d3b2d1 +S091/S091R14.edf.event 132c4ce227ebb4e694982a37f3f7e9289511d75fdc0079dc876ccb6d9cf1a81d +S091/S091R06.edf.event 4e057b134c286502a0bd4e491daa0ebb3c4bfdc21b738b2ba2453c8ffc558218 +S091/S091R02.edf a7a4ae35661c7ca877f353762e1717f6441dea1fa345fc8e4908381197cd25aa +S091/S091R13.edf.event 985f9701733fcf2689c3a35bdb9e4b0058b6137ded5bc0f1a2a3b84431edb435 +S091/S091R10.edf.event b0dc1155da666ef5240efa8026cec5f4f69b5fb6f1d21226a512de3f6b62002d +S091/S091R04.edf.event 710672ce0f425a207ef543d23f608683e1863d7423702d511235d71ccbd289cf +S091/S091R07.edf.event 9b116dac2d8fff59f5bf4cc8d45636716575c73d6e6b2287709cbfdf0ce5f55e +S091/S091R03.edf 0bed1c3dded92a145b61d90c8df087b5502f72b54e5f027855297253944a1f82 +S091/S091R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S091/S091R13.edf 0dce3749db17c971b2d67c7aa1f0cf6db9745714d596f28808fb4595caa8855d +S091/S091R04.edf 285b7884315a91e2d565aa74f208b1c13a9386af477cc7c6dc8253efcd5d1f25 +S091/S091R10.edf 1acf6fe5f95741ceb0db5a4bb741bdd8dd45e99530dd2e159c11ace48707a703 +wfdbcal 76c15f6af371fd682ba78553d974a8c2d5f12c2f84f0bbc3f98d02f9757d426e +S036/S036R07.edf.event f39b724630cf39de64096da55e80ef4acde232132367bdb459b9a4b872fdabb8 +S036/S036R10.edf 7a675ef450988c75998d3ddca96f3773e611ba79d6aee5c09d51dc63e9a1a246 +S036/S036R14.edf.event 0028edf9b5fcc76311706f809ca44f884f227acaa02c56031cf87a7937c1d9a5 +S036/S036R11.edf 3cb485b341bacf8ade8226b2d2bc25ae42ff3d5dddfe7b500c558f3d6febd6b4 +S036/S036R05.edf f43f1de6a8790fd42aeda8cc107dcbae9577598842da1a7f46f2b6020c6d713d +S036/S036R01.edf dbdda796236e1262d7dd1758d7b4c80762b1865047b8bd2b28d59d273fc4f789 +S036/S036R04.edf c4646d8128095e80fbebfd03dd6c77ebb3491fad49f9824c74f3e4861d9a9d04 +S036/S036R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S036/S036R09.edf.event 132c4ce227ebb4e694982a37f3f7e9289511d75fdc0079dc876ccb6d9cf1a81d +S036/S036R14.edf 84c2ffe0f52ec16ab7b188b2713173b635a3a8beeecf575d77b118b795e6738d +S036/S036R08.edf.event 5bb35bd49434a9630e941b5646d6d89f7907531ef3e44464334b78943d4b0237 +S036/S036R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S036/S036R06.edf.event a4381c73837f1ba112959670ca4c474004b1d78b82daefb4ea220692bfe23c8f +S036/S036R11.edf.event d229cfdfcd562a5cfb40ea306452dab12d3ca82a70a465289b4c69c299fb0258 +S036/S036R05.edf.event ea6f6003ddc3676cda270c185ba40fd7915630a24511085aeb31217c9a1e8852 +S036/S036R04.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S036/S036R10.edf.event a71674e1fd86a57270a24706f4e05755887534f04ecc35f98e56f000312402a9 +S036/S036R09.edf c0db65c2fbaf526b62270a476cacc1200f437060fdebcc2528bc85891575e090 +S036/S036R06.edf 116ffdcab798c901b6fb13763417dba611275d4f22c405248f84144cebd33b43 +S036/S036R12.edf.event e19fc47bcf5b585ab27ad756884c0c4cd37d239bb04c8bf15e2f33fe9fe05e18 +S036/S036R02.edf 0dcc07f8b1362772a26db20e0e8ce9426c463973741bf6ef0079f897771ded0c +S036/S036R12.edf ce79efbd62a81e00a6b7e98bbb80e2d8c0c4bf5c23ab1c1a036884044d5978df +S036/S036R03.edf.event 48eea52bb9946ed46ef7e1b621c8558d41739ffc9aa7ed8fc25eaebd171ac223 +S036/S036R13.edf 6b2c188626213b2dbb06c07d9906be0f982fda71c9149821532cffa3855fd782 +S036/S036R08.edf 60ab89e8d0ecba687a40908643caaa4c7f4bf6ef92abb8d5c3c31201315eab2b +S036/S036R13.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S036/S036R07.edf 1b978c2ef1ed07eb355b270c32d59e9170e16280d6247578ec806886da798d0f +S036/S036R03.edf e9ec8c8a0fba4c7bf9de6db237bb6bd1ac4a3d5d6d273f9e37a0253ff4d431a7 +S106/S106R12.edf 1ea2b9ed0d8f02557b21bee5b70514b641e68321b3363cc3325b942832005a99 +S106/S106R08.edf e75bafd408f19d7c04d86479a414c0cc29be39150f20b9e676dfb40843f34792 +S106/S106R10.edf 42a957d437ba3b040aba7f84da7d0d30989268f5dffd9e9824436ac99eead107 +S106/S106R11.edf b3819655e3af2c063091ee1df642fef4205268cbe1692d64f91a37393a8e6e56 +S106/S106R07.edf 4cf953b4b5eff90f64667556cf5bf5c7185bb9c013cc8f00a0222fbab852faa8 +S106/S106R04.edf.event b1fa69a82433c5887997fcbcc3cd7d906b4b49e77e25114de9d45087c9b126dd +S106/S106R13.edf 7137ab5a7fb1fa9ae84d4172a80338ab124824d219b52725490854192d5a8596 +S106/S106R09.edf.event 4b81a244be0ef71bc8b32a5a151ca3746b282903e6c7a28b644879e8bae159b9 +S106/S106R03.edf.event e318e6bd044ead383499ec22af114e14b3a59def9653470287d411ce26ee7335 +S106/S106R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S106/S106R11.edf.event 6ae700f98a136e3a38418f1e7ad1db6ec534a13167c4f31e5d88b04564d47fc2 +S106/S106R07.edf.event 6004c1ac954f2a17e948f3273c849a0e322cb8bd539c483c363378dc50ae2099 +S106/S106R06.edf.event ee1750c58b0ffdf35fc0b7841091977647634e837de14a0b9f891ff04dd2f5d9 +S106/S106R12.edf.event ccca38b5a6381c8bccf9729b6a2e1d1cef2d3880619313ab656bd58ab7a0df0d +S106/S106R09.edf b7b4983a788dcd3e801873d215a4d9279ad10166da96e0f47fdf5844ad58c8b9 +S106/S106R06.edf d36e4c2a47438a8c4bd92caae50c73825e7927b4011f44bef010a43982a99022 +S106/S106R05.edf 85fb5d6ca6f88320f8245627bf418b098cdfeb00bcb2f909dc68c7d813aef14b +S106/S106R10.edf.event bd6d931c82981d9463af509515eeb55f7b6499310a828316c8f4a3ea35ecc7bf +S106/S106R05.edf.event e580516304935711e290073cc13e75cf23b577826c9814c6b0676828f9976222 +S106/S106R01.edf 605ac5d4ea9ca2170fa10ff85119297a77a7ccb7054b107294a903b911788c37 +S106/S106R08.edf.event 30b760c52698fc58e43824eb7302010b60c8374cc35c21494035da15da835fdf +S106/S106R14.edf.event ac9692706f0387f2683b9a9fd24b4d8ed371ffd2c916c2e3049ee3f63a0c5c9d +S106/S106R04.edf 7d7599596aacb4635049041a26635e1e14b6b96437a3304701a2a3d7f7cc8c05 +S106/S106R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S106/S106R14.edf 27e090ed4774109e03a7b986ebf7f8f617eef959906987a8f98e4599e285b653 +S106/S106R02.edf fec905ea2aa23a83ee57ec8cfa2dc7babdb169a3ddc3d71623f350ff4a54b340 +S106/S106R13.edf.event 59a87fb5bdf07fa6133a8304b27cefbaef11d6edf13e912fe47b9c6752721e60 +S106/S106R03.edf 603e54496f1e7f21a673e23dc91e304320f7d49d31139369f1232892c4c2ea89 +S079/S079R06.edf.event 15eac883e797e576d72c57d60ca80b477563711c2f4f8dd16cadc5a529d40f03 +S079/S079R10.edf.event 3b5e25a187d5d82fb5b851ecbebf095c41a5496c5a40492c7a2c638245bbed51 +S079/S079R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S079/S079R03.edf.event 220ce5d60602a369af8dd5bc9c722c30b74d0950101a84f6dcc0ab8dcb0c2025 +S079/S079R05.edf a66c18316867709634bcdbfea16f81c3612758d26f0bd252c088242e56281845 +S079/S079R01.edf 5dbffbbbeb71b073533cd280510875199ac7b00e5c6225f95c1b88dcf2fbed1a +S079/S079R11.edf.event 8c5f1f6af23dab21ca4abe1e798a979ad9ab71892604c84a00379c39bd269aed +S079/S079R02.edf 1a43e3f707804eaeec05bda7513e811ada8701131b93650de93131e2e1befb88 +S079/S079R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S079/S079R04.edf.event b1fa69a82433c5887997fcbcc3cd7d906b4b49e77e25114de9d45087c9b126dd +S079/S079R08.edf.event a6cacf979bad39813ad2028620db2781eea6e16e029b8bf69686928d2958fede +S079/S079R14.edf.event 86e7714efe0f24263f2567d7a273935782e2b0153d9e1db329e96ab5cb995a8e +S079/S079R14.edf f3f3119d4f31164076e5d6668b931ddf1730961a7a9f7e198106ed0232adc217 +S079/S079R13.edf.event cfccf092791e4f541cc50e2a8c3317df4e3eaf04f099301c7f827e7dffd9c354 +S079/S079R06.edf 1d7a8f7c0bb7f17ddb08cfac1bd77bbe79acd1973cd920dd34fcfdb684cdd6a6 +S079/S079R05.edf.event 4b408c3796f7bf8dcd27259a0ae6508128a2f1069560a5cff7d3eba4508a6768 +S079/S079R03.edf 72bc07225a4c4cf79335f39e6dc62690bd4bce2789064e53d61af13029ae1416 +S079/S079R09.edf 9e224ee24274de2211dbc80327460af713fd708ca65242eae9119d860299b914 +S079/S079R12.edf 0350def02e230173edb8d69c7e66fb22155d00530fb7ce9e945921e0ab6abf95 +S079/S079R13.edf bf4a8cc1b524063cbcf3e4a7dffc1085be8c02a44e9f8fc5568b63a9d6d45c4e +S079/S079R07.edf.event 86f35381c7eb24cc6094567ca1b734e53a81bd8184a6364c79b7f00b9a5ece02 +S079/S079R04.edf 5538f0fbeb647ea7577e448856433edd3c3884516ef6bc5bf10d601cc62824ac +S079/S079R07.edf 74184070e26635730190095b4e81558916e897b97a0b09326facfbf3412ac6c9 +S079/S079R09.edf.event 4720e082121115ddaf3c9fdf14c4fdd2b2cd4b97a259d705f45872624ca24718 +S079/S079R11.edf 446fea2a6a8885b3230d3fea22cfa4aaaf2ea3f8ff43a413ddbb4e532b41aa70 +S079/S079R12.edf.event ee1750c58b0ffdf35fc0b7841091977647634e837de14a0b9f891ff04dd2f5d9 +S079/S079R10.edf 51ab713a7c33dd19f805f91cc03c62525d0d4c4b746cedaa3a26f2ea67f49471 +S079/S079R08.edf e65556abef6b586d84ebbb2e0a42caeb493f58af9c3d62ee4d41dbf43ceff973 +S023/S023R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S023/S023R03.edf.event 2553cdc46c8017dc83d793a7cce6a36ddf99a043e777612539831e67f1aebc23 +S023/S023R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S023/S023R10.edf 402074b10f0e8689441f60a53e8439a80c43ba456f0cfe245a84d1fc27c3fe1f +S023/S023R11.edf e5be72fc22fb3420b4a4936c87ec9c003fe377c6c5f7bd6fa39242496a0ba2d7 +S023/S023R07.edf 10d59b860cead78e224b2f6633a9cf54a5b735804157f5c8f820e7c560e05f42 +S023/S023R09.edf a787f36a42c2c1b1aaca4c71bfc26b1e0ce60fa90089763e86e4de36a6f476d2 +S023/S023R01.edf 3dc49252e5969854b3c0b9126058e3a9f28efcddd61a6f570702ee3043fec0fb +S023/S023R08.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S023/S023R05.edf 37405690b9dcb16205a9877bce43f81dc50efaa85422a29493e7a521a8e997d0 +S023/S023R04.edf 16ebfe43b2510115dbf1b15a00a429cfbd6b3a1443c2bed08e6755282e06f3c2 +S023/S023R13.edf.event f686071dd83e0610c6463f51dda1c01de1abacf998f19f9b7d3910c06492fc30 +S023/S023R04.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S023/S023R11.edf.event 9b116dac2d8fff59f5bf4cc8d45636716575c73d6e6b2287709cbfdf0ce5f55e +S023/S023R03.edf a2f2a6ed190a2381db55e49223fda8526bd0a603bc339f862046604352cceefb +S023/S023R12.edf 366faf95b5e6142e8c6cd9b55da2005533b199983ef0599d73d1535179e7ca5b +S023/S023R12.edf.event 7c83be0506163b52a89548c7af6d4893472403c2bab3a455c7d8c92bf3550ad8 +S023/S023R13.edf b316cc43e36336e6b535f0c8c065db474d057de54b42a95471b2d5ee342860fb +S023/S023R07.edf.event 1cecdedf3d7f8b7931b4cd84b48bcd356337c0ee32518d737ce0ee8f0d428d8f +S023/S023R09.edf.event af9bed6667136366cf88d2cd7f0f5fd9dda3fa51cc4cf73d3afe4d57dc8b7502 +S023/S023R05.edf.event 33a34db8c83e4bcf4288f2413aa75b7d7c03a7bccfa172b157568ba29ee42d1a +S023/S023R14.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S023/S023R08.edf 70f7e8d4af5500e2114eea7db8c50d7c4b822fbd7c92c8907018e3aa094f4ffc +S023/S023R06.edf 2b903a9f5bd0ce741321e887274e5114d81a4138fd61a7baabacd83b630b4dd6 +S023/S023R06.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S023/S023R02.edf e75a898d302ec8aae307d84835922dae6c298607e9a09a95fbaa9d90452831ec +S023/S023R10.edf.event 99dc24a5732bc2857f1d3fba3d1a5b942b288ef56950e45354e03fb10183bbe6 +S023/S023R14.edf 6d8ac157a1c9559c29aaefdead7748e0853a3eacc54e7b855b97e17653a4c53c +S052/S052R13.edf 1997e7d693fa8a6f2f1f344ba3b57e1ff1ef2aeb330b44afb3151b05ac5dcd69 +S052/S052R02.edf 2ca319811ed895cb132183e3fdcbae6e493bad2759c806833a234c0e7d65fb7e +S052/S052R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S052/S052R06.edf b8e9e0add922295c01a91cc2b88c540dbf8c3076f77d97ffe726fa952442db03 +S052/S052R05.edf 8b90b20f8191b826f42192575c888496f7eeaafdb8e58aa5d3e8e523d48fe677 +S052/S052R10.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S052/S052R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S052/S052R04.edf.event 5e6dd7d9983b10c75f267d25fb4f039777b8f17f9d64869cb39446d1e9306505 +S052/S052R03.edf a81f9ff3a2af1126e4a1c65491ad03805e04c6895f74bb7c4edcbb630f144eaa +S052/S052R12.edf bd07d4c56f8d31a0fef2a31926a98c11104d21e9b127f84b02743cacc9e6eb40 +S052/S052R10.edf 4d8eb059c2fe2823ffde762b986df2ac941d7ad761b8006fff1719fd863dac22 +S052/S052R12.edf.event 48eea52bb9946ed46ef7e1b621c8558d41739ffc9aa7ed8fc25eaebd171ac223 +S052/S052R11.edf f9b1baab5e1efe90df754dbc91f79d6dfcc145947ea1518272caa713f9da2f25 +S052/S052R08.edf.event 6e4a2f2b413bd53735575eeda52d35059687bb58d072e4de1626bc16d7a28d24 +S052/S052R03.edf.event 98da35a4768842863f8f05d63e959a36f12126a205e0370d8519299fc06e56ec +S052/S052R07.edf 04cf399950b747941a2f78f7de5fbe7bc38c57064fa1b70603ac3879c44a4ad6 +S052/S052R01.edf 8346bfe5bae53f3a82438211538a6482d4e1c885b5b70953b862a4d30fc0bc26 +S052/S052R07.edf.event cdf1a70ff91abbdc6f14a7ca794a7b5932276819e6f7c68ee344c3d21e9b73f0 +S052/S052R13.edf.event 78e5b5619faf1f99b53251a9e743644a731ec9cc5b6e60533a3695f33fca35fb +S052/S052R09.edf fe58ed0bef28cb86f78bfa82dcdc94020a0281b05749b2c7828b580dd8eaab39 +S052/S052R05.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S052/S052R04.edf a519ee3e292f1d8317b69f8bf39e3c6ace26dd5084baf5824f8954b42fc2c8c4 +S052/S052R06.edf.event d0280a6531ef96d2b622c2d562c05bf53a0d00439a4819213365b6e52e54abd1 +S052/S052R08.edf b9583eccd78acef6f7bbb65318f6b1d3fec1ca88ab6e470cd5f97722635379e8 +S052/S052R14.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S052/S052R11.edf.event 13968dcd0074afe70c79ea250f7148e28870c5b0140104f078a501fd3b51ed69 +S052/S052R09.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S052/S052R14.edf 55a5eec629950b7d895830a3643d3692d47e8509426b2660aaad8798b905a038 +S107/S107R11.edf 8af14cd3527fa03c48ac9093a61310dcc562af7366867b65f4fbd1d75d53d520 +S107/S107R03.edf.event 272aa3698bab2b05544eceafbf5b26dafc58d0eb50a508a47c26572576853ebd +S107/S107R05.edf.event 5d8061cd48a1d74cf68aa1abca145987d71fda1c779f4e067d7120a42e8532ce +S107/S107R11.edf.event a7ffbae85e6a87d36d8a1790de88fe9163b4487f124a5e2cbad1abd1527eb341 +S107/S107R12.edf 1f61ae883dcb0973b8aae174b588eb352e3d55cd32006cd3a083bf019ece19a9 +S107/S107R14.edf 1a9669549bca2d45e36cd14ad90df230987fadb9eba18af366054390599025f9 +S107/S107R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S107/S107R07.edf 1e0c4ce1cc9b901c3478bffa5bfd97010755ab3c61a598e193fe44364de61dd0 +S107/S107R10.edf 447972429a3a7ee6e6bbabd66b2dc3182b2ee8afa60b8dc354dd9b90837cd269 +S107/S107R12.edf.event f94fa47bd56896d1b35682e2e14d8d1fdb308a058c4a48b7360c7276bb8cd922 +S107/S107R03.edf 90826bec34861d168306758314750e1ff80a09684b25f2b15f393840e8558b8c +S107/S107R10.edf.event 018a676bc733cbc27dffcbbf3767758e526d1ae08a5dbd0785d28e267132a3aa +S107/S107R09.edf.event e318e6bd044ead383499ec22af114e14b3a59def9653470287d411ce26ee7335 +S107/S107R13.edf.event 6f21f5809578073ed2c858eefa6348294beb3ca2936579fcd1c562953dfd18ba +S107/S107R04.edf.event c10d573a5a983f2f3315ff36b74aac2bb21199e85afc6827d4bd4acf2a3c46e9 +S107/S107R09.edf 42b7bf6ddeef1eceb30b2e166d401edd2638d0db97b3c3c7561415b83cfc1313 +S107/S107R14.edf.event 1442094c7e4c8cf80c2b917010d3a5835f895b48a64150371ef6f1f36d5eb9dc +S107/S107R13.edf 83524393a1922213c548d24a014cac7ac9db87425a33cc2bab8612409fae5c52 +S107/S107R08.edf.event 53726fe905bea0abe513d5e84fd629ed577ff03739fbd56c6a12198e7cca5cc0 +S107/S107R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S107/S107R04.edf 7a4eb02e295694a920cc1db2b57bce8024b131dbbd1960310f4c5ac78d614371 +S107/S107R06.edf f164b842f5743e05ae7c67e4a190527b6f53cd7c4d6b8e7e34ee8c4f494c137e +S107/S107R06.edf.event 954333c8a5a38083d1d58167e4f7c3abbbf8949bb46067df41825e1156c3b517 +S107/S107R01.edf 08f3946647a37c1a9dff23a2821c23d5fc1421eaed08bd5b1e14291bf9166363 +S107/S107R02.edf ee27e5b4641392d705cb7ebf76cc42355edd23aabff64aa34495e5cbe9436d79 +S107/S107R05.edf 11a0cdbc2881cdc6bb1c8df54f8468eb0ef77dda94df5c1dad3de09fedccca6d +S107/S107R08.edf 0561cb71fa613df859d632d315cd694a686a0d871ee6661d645bc7e9355a6f0b +S107/S107R07.edf.event 6f21f5809578073ed2c858eefa6348294beb3ca2936579fcd1c562953dfd18ba +S041/S041R08.edf.event e1ea33ab1840970a45f9494f5d70cd8cabbc689566eca11d6942253b2395c368 +S041/S041R09.edf 43b6031d29f744456ec1b586735168e6cf08633d1f34d7353528e61059752f97 +S041/S041R06.edf.event df69a0d4526ab4c42f8d35b328874aafaeda087bb95ee7310d4f3654498f5746 +S041/S041R11.edf 29dadfc9c0a7046ad6fca3ff3123a8de9825e6ea46407d0ea0397f0f1243371b +S041/S041R05.edf e3b18470da33e7377931d2f8932bae6b2abdf12bbc5a4715f59b9d331d9ad5af +S041/S041R05.edf.event f8969447e196cd3b85aca233f8197d3642b3a82ffb49458332044d86c5df05d7 +S041/S041R09.edf.event 67b710bfbdaea5c65257f5bee9fa64cd171dd8b9c8a41d1686b0a14b0d997c51 +S041/S041R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S041/S041R14.edf dd94f1e4d2b83ccf6b984922886bcbdb9a2923ae8ea0756699114c50526399e8 +S041/S041R12.edf.event f148c5d5feebd73d0187005d90c8f91681f338bbb40cb34a407743227184144d +S041/S041R10.edf.event f148c5d5feebd73d0187005d90c8f91681f338bbb40cb34a407743227184144d +S041/S041R08.edf daba914c30b37217f9ed61f4f79157f9e23372ed083260c47edcbda4359c97f1 +S041/S041R02.edf 3c67750762b3ccdc5d76f81e13bd10b996a7d7f6b7a3de4f16985a21484d08d0 +S041/S041R07.edf.event 67b710bfbdaea5c65257f5bee9fa64cd171dd8b9c8a41d1686b0a14b0d997c51 +S041/S041R01.edf 5fec76892825c885cb231c16424e497fa7b0bc5d255f15117c2f896da85a27bb +S041/S041R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S041/S041R10.edf 4c75369bddd2fc10cbf7777bbb1c7c289547ab6f10bf05ebe4a9bbd37d9fedc4 +S041/S041R04.edf.event 4583d527bb9a02f3b0e943a0fc2ce2201582f8b83cdbc0f712b57fac2679359c +S041/S041R12.edf 99f13ffe9ca9d9a3d680360b91555e73f190d91948c80ab8415b901b42350bc0 +S041/S041R06.edf 171ccfca39f773142019df61be511b23536720e385ad4e38e92c4c9a24a8870e +S041/S041R11.edf.event 9fd975ed76b006ff20105ebf0fd1ae6dd127f008e06a75cad4484483eaad568d +S041/S041R14.edf.event 1e4f0638e7419908f6581021a41fbbb2b368f8e087db11c71f96241ae6ccb506 +S041/S041R13.edf ec6f193a78c29024ae8e67e69b942ee30171cb36b99d82ef96d776f2fb1b7e4a +S041/S041R13.edf.event 5eab54e7aecee45049c9cf3e99655056d386829d66b77ed83e678bfb70ef0bfe +S041/S041R03.edf.event 89a83f0e623d00fceb1de134620c85fbe897d37c7fbb2b8d4c97de688059a2b0 +S041/S041R07.edf 980f0e64820ab06e8c913511d92c799b2ae1abe1e48bacf9b9f29b12fd2c93b6 +S041/S041R03.edf 568218fd5956a8c3c8e7cdac30a1e8ea0bd5371fa7ebb96dc9b17a120ad566a8 +S041/S041R04.edf d814dd4d8d3d1e3853051e48cd3ac5e4761c3ed33a509d3c06ec946b9394c534 +S101/S101R10.edf e90eeb58bf74f072e1a7382befc414f5f4c6a88cfd0c46e67b59bb59c09e404e +S101/S101R09.edf 265ae1a34c6bf36478bf3646aa3bced1df8d073090656f2d3b15993ba88e7532 +S101/S101R03.edf 6bcc5fc3330b946a93fc50f2d4ffae97612a78c6f592a443b46b9cba7a06df6c +S101/S101R06.edf 1a4573d0f58b9431883c95b5f4d7695d352632f7d0d40698f4cc82081561e23f +S101/S101R13.edf 13779bb09ca42f247f6fc8dffe19f02a70afb3f5baa49f902e2706fdb9671c49 +S101/S101R14.edf.event f804d1f19243344c8d8b505a1f229c8626e7f81cf4397b94650b0e6959d27437 +S101/S101R03.edf.event 1d29aa2dd90032d41f7c1c6386db9b2b26b7c29b87234d56e63f65c958acaa3c +S101/S101R01.edf 111b902998ad14ea66860661023c2e3132c8da06961afe0de02f71ddc4ee383c +S101/S101R05.edf.event 6110dd47d25df5fec4b10a72fb28e306cc9addec318d4703b53d4b40b49f1930 +S101/S101R12.edf 89538acbaec07991b51b64bb1dd2b7e3927a4436f02028410de95d06e85f683c +S101/S101R07.edf.event 3b5e25a187d5d82fb5b851ecbebf095c41a5496c5a40492c7a2c638245bbed51 +S101/S101R13.edf.event 273c4fa451c5781d94b0c8204068736b90665f96073084c33770108605bd302e +S101/S101R10.edf.event 0513fd04977ef5a66b77e72c59699e4e54ff57c226456d0796b1c58c38fb4d59 +S101/S101R08.edf.event a09f7e11a8c8dae371fa8ecbb2a00172679cc16c6776199906f532a7130b3e33 +S101/S101R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S101/S101R04.edf.event 41090322343b92e918aeb527b6dbeed57fa35198578161fe2d243fbc66dcf876 +S101/S101R14.edf ea43cff7ba5813903ce1695cbdac51c5ffecac98e09c974e3e63645dec45f9d1 +S101/S101R12.edf.event 3e0305869314baf38d1ecf15532e8069bab3bec83bfc99e199b9f5c49899ec56 +S101/S101R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S101/S101R06.edf.event bd6d931c82981d9463af509515eeb55f7b6499310a828316c8f4a3ea35ecc7bf +S101/S101R11.edf.event 007c6ac586d5de80642bcf571fae31808a22c5f6b8fff35fdd8f99f1b334f97b +S101/S101R02.edf 0b999b572a22c300088a4d0f7288a9adc7d09c554a8216b1231124a73468dfee +S101/S101R04.edf 6618bca949e60277dc7a6a23e9fcc5e1d582165550fa31b391a132b2b30b2ace +S101/S101R05.edf f09ad4dba652c6cca39cd9f6d2e030990a564413b30ae57ccfb68b770f111d4a +S101/S101R11.edf 0cac663bde79ffa8674e322b8d7590621568306cbd411618c4370843b3eadb6b +S101/S101R07.edf 4159deabe1ee6e896e8b3604c4260f227c93c43284c99d5c39db63fd3928cac0 +S101/S101R09.edf.event 9a1f9c37f0137ac6d7341c2a9dbef1bc161b35fcb6bd9e0db6cb95f8c7096c5b +S101/S101R08.edf 6e3be34d8b83eac9aca9438049e9de0a8026d6d861e1a0cde5a2df3b4c225331 +S067/S067R03.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S067/S067R02.edf 814eb9ee675c39178536aec7223f88869ce3dc0a02592fc422995bf8c9a8f192 +S067/S067R05.edf 3fbb87ec6c2b0e60be7dab51c5f5dc4078a89d116b292740bc49904ea2c3bcdc +S067/S067R13.edf.event 69bc42ecdda8587b1f3dcaeb49f434eb51439670272d896577812c1a8c0b14aa +S067/S067R09.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S067/S067R12.edf 88607f444d64af64f30578bfd1019ff837a0139ad7985235a6401b4edcc1f388 +S067/S067R07.edf.event 49dac749bf850e47e5d13a8edef5121c75a7baa7f50917a5c17c1dda52d3013e +S067/S067R10.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S067/S067R04.edf dbdadbd4a88eb8fda3fd90835ac8a0257101c3b58dd08fc272892d707a560fbf +S067/S067R08.edf.event a730605f9838282a7ec09530538d4f4dc6f2f5cf73c8f2e85944d5e8297c441e +S067/S067R11.edf.event 2fa0b9392dbafefb266a1c269022274e4f4638513c85370c47ce925b54b7d5d6 +S067/S067R14.edf 2b5dd5b0a424ebb04c1d89b0f97532cfda4c4af1bc1c4c1c441b9ba05e3f0d15 +S067/S067R01.edf 7a869d87fe690e94b63ae466a2f8a888d8baf3a028c1be35b6d7fc2b80cb5eb9 +S067/S067R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S067/S067R06.edf 88c56ace75081e57c1b66f781831e4c82403aecd472e2883ea2cd696acdde5f7 +S067/S067R08.edf 838faf306afb01abe27febd1a0992f9ee46bf522073aa94bd625eef578f98480 +S067/S067R11.edf 41d47be97d322c350d4869bcc5fe702162f15f5e2ff8640aa87836904208bba2 +S067/S067R07.edf 5a0c99dc1266dfb64219b872d2ce8aa6536e37f3cccadb0a330794e467120477 +S067/S067R09.edf 06fe2f2805cb210ee9120ef76d997788545a71b52d18c53e58fb2550603bff8a +S067/S067R14.edf.event 99dc24a5732bc2857f1d3fba3d1a5b942b288ef56950e45354e03fb10183bbe6 +S067/S067R13.edf 267c23b4048f8214cad4b3903515bdc36f4aeec9f61e74c3be3d0f791df12b63 +S067/S067R06.edf.event 0c671e4e4ff7b21e4f75cb8796305c57d6ee3fc48e74337e26c3b9f5d49408ee +S067/S067R05.edf.event 8fbb43d322f1567ddece82c464cdf460a9df3070b5684895a7bfa7febb8e9950 +S067/S067R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S067/S067R12.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S067/S067R10.edf 6cdfae4ec7f0ff4b034296a7bc15c92cd6eb6db0536bf21dbb83520b604cc9a9 +S067/S067R03.edf b4b57a88b18afa8e2d40a0d05c53f5868879664d1c6363d4ecc0b38c9af8815b +S067/S067R04.edf.event 732398d864936092994f6aa5c7a3d554d1c1c35ff4fd7a90dd0c06a61260c68b +S001/S001R09.edf.event e438579df1a078f089a8a64cd82e2e11cb94abbd724605c4705658e9cc2458fd +S001/S001R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S001/S001R12.edf.event 482ba145b4cf10d7d6dc91bf708cc14230ec2a86592562a3f6c187be5c9f3e9b +S001/S001R08.edf 358fb5189220725141968ae285fbe9e3f36210b834ffba71d940af308e3aca68 +S001/S001R05.edf 8828f4b97931100af136cebf27838248b82f69cf359f7e6c3146a1970a8fb8d9 +S001/S001R03.edf.event ba934fef5794a278b361c657b30c7a254a5c2d10754a37f484373d779d37bf66 +S001/S001R10.edf.event 8162d74d19617d3dc613cc4d3505a1e143edecd3fb9c7901e255495cf94de0d1 +S001/S001R08.edf.event ccd5cf82de626903ece23e339121ff7919b3c3788cf647b575cf08dbb0198ebd +S001/S001R06.edf.event ea56fcb5af6dc22a09e6c52c6c50be06f5bf4722cba178188f67732c6ecd0395 +S001/S001R05.edf.event 53f4af70d71eafab6e0d5fabc2c8e8b73a48b4f21df8be00e2543cc640c12549 +S001/S001R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S001/S001R14.edf.event 7e61f9359aba654d84b2ebabe218ba21c6b3f8c33a13e5b6c9c71df1e91cd112 +S001/S001R13.edf.event 1513629f6e8e700394421b828869afdbd4e2ccdf8bdbed2127f75a42b2db3ff4 +S001/S001R01.edf 4743b736131a7e147c150e8b37711029b6cda5e356c4b3e8261a03cdcaaf8b0c +S001/S001R10.edf 20de1c7746c2349d16bda5e9f1b0ac7b7ad1581102a2e30dd2ac422696f62fb1 +S001/S001R11.edf d5296b9232b0ad88b7022155cbcde618df44d4b0db046ce3bec54f8f8644207a +S001/S001R03.edf 3427c8d01bff1380bc9ab9f27a35ece2af5dfadf3e291bbc05eb66e4dadbfe2e +S001/S001R07.edf 6320a941815eb7a0bc632e32c07c88b6e2281a0e2f177e8f49e2d0a16231145c +S001/S001R04.edf.event c81d77b099878d1d392e93aa7a18a46b936b690bad605aa84a652b2bd9cbff1d +S001/S001R11.edf.event 8162d74d19617d3dc613cc4d3505a1e143edecd3fb9c7901e255495cf94de0d1 +S001/S001R04.edf 3d161f88e1c00632585287d2ce584c2bc0f08862438eb255ea8723e00fac693d +S001/S001R06.edf 5369364f2c4e81ca141679d6dd2ba6ece61c7eb53d7fae31241b308876e1b6b3 +S001/S001R13.edf dde646236a13d846ca68ee71440f1fd38d818bc50a2f4804a29bcf0f773ad167 +S001/S001R12.edf 2b281c9b687b4c4176e83251d74743721f2d6ebd76656a972a3b9c44d9d88cd5 +S001/S001R14.edf 2110c48e3106898e3dbca47e39b330637afd3d3b8bc2da3ba1e44f4ac1118137 +S001/S001R02.edf 31a95e0a880e6c3d89960d9d62c144f24cc4e9f5d7e93c7f864ef61cd49e847e +S001/S001R07.edf.event 92b3e8a6b67a6846154b1244f9044558257134f17b25840c7f71206ad195584e +S001/S001R09.edf 1b642457807be572c31e8bb56a936cbdb554507d66e25e242c9bd1b0c557c53c +S016/S016R07.edf.event 8fbb43d322f1567ddece82c464cdf460a9df3070b5684895a7bfa7febb8e9950 +S016/S016R14.edf.event 6e9a969133a5a862400b62cb84f763eda38a0967078b1ebbfea1ca2ce8635b48 +S016/S016R10.edf dd359a54e0811d4a08e82284cc251e1ce193f365079578f18d006cf7f1e3919c +S016/S016R14.edf 80dafefa87cba873be315c21c9bc805c9f3c57b50cb9d0efa9a5b8b71037b1c7 +S016/S016R03.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S016/S016R01.edf a0396eaa7765e822a9520e78321044f7296888442e6258f3e33c550228d84576 +S016/S016R08.edf c7d59291f3d037e25c672da1bf0c202f63b7913a0f72401cb73f27deddb5ac24 +S016/S016R09.edf.event 0aaac0e6493abaa7abc2bbcabb2ba2ab9cf91a665d7b07e95e181cb6e7a7d207 +S016/S016R06.edf 8ff4a1d19384bffce8524194507bdb42946fa06da83bdb75518c9a23f9677876 +S016/S016R06.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S016/S016R10.edf.event 9a16113810c1d4f4c6d4bb0e9fa5ea774628a0b8f3e1764e93d71da831cb206e +S016/S016R13.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S016/S016R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S016/S016R04.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S016/S016R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S016/S016R08.edf.event 820bcb0b8aa75c06572fb3677af8b965e06ca92c0ff5f4eabd0d347c7b141680 +S016/S016R03.edf 976bcc77f002affd1833ecf5adbe73eca86d1d1b9e2ccaa0d8712095498cea4a +S016/S016R02.edf 129dccd14aab4f3c5761f1e301a9f75c9c0e408333fa4736771834dd0eab5d4a +S016/S016R05.edf 67d864417c5739713aa2defaf1dc9a88add841049adb829f61fd4c344c61f089 +S016/S016R05.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S016/S016R11.edf.event 0d4308120129d0f92d2b8779374ae232f98d1357e6c6427983fed655d9611af0 +S016/S016R04.edf 501fdb3cc2fb504d2948e7ce665c81e828612cf1af80a0b964b34a13803a4d6b +S016/S016R12.edf.event a7a73578d2f76bccb3e355d9d1c30ebbb1c7c5f4f6abd7b7289b88c138f56b17 +S016/S016R13.edf fd037b68ef03566fa39b6318b4b768cd0255f9e255b137cc3214a1d1175e952f +S016/S016R09.edf c1e5591ed4ea70ebe7fc300b52614affec8df35279aac42bce0ba79f4ccf6d1b +S016/S016R11.edf 067acc3474bca1ec4e4e59bf5c0e4d8402a9067fe4f1231d5e50808b64b8781f +S016/S016R07.edf 4fcbf7f4b5bae4290078934484ba3bfb2052b3cde700c9e2d0c1fb0654368f55 +S016/S016R12.edf f4e5942da16ca0992db7884a42d0b2a16bad648fc4bbd483f2954a93ca48a931 +S051/S051R05.edf 64036f696368f79d9caf39094a99727e919d33e6e1573236b6fa7f8dfaf15b61 +S051/S051R08.edf.event f15f8724ee430d797697caab42986f25c432c24a06e9476b217deb6aed7450e2 +S051/S051R02.edf e4d456647129cdfb07b277b2c9a2c761d2e6fe92ff273bd6a15456aa5118b4c9 +S051/S051R11.edf 783e75586ce3cf5c00121b3b419215cb20dddd9c8394c22fc02a82becd411b1b +S051/S051R09.edf.event cf1d9fc8033ee0c3ff02b28ba22ccb2f38d4189988404e0aa8611090eca3aba6 +S051/S051R04.edf f1560c8e23d36805035cbd36496630af7fe2a16d7676b91df7ec8f2d039b2cd8 +S051/S051R10.edf 2f1d5ca3ba1b7a9b3d5193db62a36586b29905a2ea42633219befab290cf552a +S051/S051R03.edf a7e916278599c36b9a3a58060b44a3dd0b56c12b546a86918fdcd6711f4201e1 +S051/S051R14.edf.event ad5857b78d92955c6aca208cf731246cfd8288233693e3f4a523f0703f49f73f +S051/S051R12.edf a89b20cfa7435430bd00d59a884e1c9afbd5190636c63ad9d65de5917922d986 +S051/S051R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S051/S051R06.edf.event 478b8ccd4857cac2290deccf56d25d39325800222eebfce18860bdaad7d56c20 +S051/S051R07.edf cd6fbae8dee9aefa9fbbb8fa30cc10d532bd7dcfc021e466759a42e08416a71a +S051/S051R08.edf 548006fe57af6638963b64e36efff127e1581c9f27ac932c7d4f318fcd0008ee +S051/S051R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S051/S051R13.edf f68f9dba7c2141b4fba3d861c23ae7b5e46393a3293fb3f8087ba2efc4c1165f +S051/S051R10.edf.event 23eb3f298cf113a6057c8c3099fb82c94065a1da3246d3720329fb470a09b5d7 +S051/S051R05.edf.event 906c14054fb3c295684bcbdada9fce2c7ee5c3189024361cee275c892fdcd324 +S051/S051R09.edf 501c0bb8594da4fcd1bde7bc864fdf8fd91f8c5e86a269c847b8155e2b979697 +S051/S051R14.edf 1a97ab81c5f3e4ab30c8fc11f6509b81975aa726b214a65f7f17829e25b91b10 +S051/S051R03.edf.event 93831ffc4037fe5777b156102f8c0af39014f5cb1afa9207470c2bf4bb5af867 +S051/S051R06.edf 40860d44894450c46d5dc9fe2afaf7bac8415cc5345532b7d1c121cb6bdd4cd2 +S051/S051R13.edf.event 8e39b81d7164017e1d67672dbc17ff18d31922b3f6365e9e1961814c475b2210 +S051/S051R04.edf.event 910fb8c1ca895e78ccb614a06814156c6ae67b42893086f2694aaacc81a199cb +S051/S051R11.edf.event 80df586db524a77e67f11eb275b00d505b0ba9212ee984d6f721958f2b100b4b +S051/S051R01.edf 47fed7512d9d7d87c2f85deae9f5c8e80c9734b44352ade28655e8a27ba0ecca +S051/S051R07.edf.event 1dc2bc1afb05cb56cadc05fbdf2f7ee0aa7f1bfa84e10e335f6618bbb761763e +S051/S051R12.edf.event c0e03fc052196f399444ee8e817cb9226c53c8b1d27137831cbcd0d283821560 +S066/S066R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S066/S066R04.edf c92f6dbe450f79af5e76a61188c012ce7140899ef23a4adc5ea74ffcd1c36fec +S066/S066R07.edf b35cc97a9bdda9f56f4a85a19d13ca7f3abf1b1dbeb93139a1d26b788643d95f +S066/S066R13.edf f455678ce77fb04fb42c6f4dc40e0abcb0b009f2b2bc9dfe59250d08a0d8ca34 +S066/S066R14.edf 788e35ffccd4ad43f92c6a42465aba7b781109aec6e6a6fbc4851b96e68e929f +S066/S066R09.edf.event 257122cacc7c40238756b11f4e144c0736d7ac0f933a02a368163a2db8e83122 +S066/S066R09.edf 831a2876dfc28239666c53d7aa18f1add6beaa33032fd96f5854a37bd8d30ea7 +S066/S066R12.edf.event ea1eeabac130c6b0553b7253953303c231f74a93e902a501d7882ce6c58f2f2d +S066/S066R01.edf 6511edb550c0b24d34f0fdd8ce50bd61202130524099c1e087aa92e258bdacb9 +S066/S066R03.edf.event a4955229baef6ae4fbf6af78608901c020d3c47fee4b7280c8c617c752a1865e +S066/S066R02.edf 6ac66d3775bf65c163f944648a5a608248071ae401b45acfa7adca1fe98bd23e +S066/S066R08.edf 60bf15a0629829bc2e0c615639c0f288e9b1e963a086293ee95da8a4d70f3b24 +S066/S066R10.edf.event aa88e9dc85f46564702f7964b37b2058d00e5e0b93d498e32bee49aaf8e7c745 +S066/S066R04.edf.event 8a76f461c0b920a909de21383cb5135f496cf1aa992529755f784ebf12db55c1 +S066/S066R13.edf.event 834d050efe55560305b0e19f49629260d26f65d715e477894ee2b099b0f5b1fd +S066/S066R06.edf 545722e46baeb21596eacd15e46e6236a925daf0283c23804950973900a4c22a +S066/S066R14.edf.event 8c03a670d4980d94f17b5f418cb3ae868421643e376bcfc41d654e67cab2a9f6 +S066/S066R11.edf b06a5d3fc3538be7edacf428a6c2080c011c3241996714172a456528daa2b9eb +S066/S066R07.edf.event 2e361d8b420d6db7c1d4372f2f615a9f9290ab6956917406863d12a1fdec8f5f +S066/S066R05.edf de03c1802e5f55b5de86e87dea506fe7c45c643fdfbcd6f5ffd95e7af8c8b463 +S066/S066R03.edf a760e737deaa26ac36f6c330bdcae1ed775eb6d2fc702a729eab4c0d0c7d1b93 +S066/S066R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S066/S066R10.edf 9705bb7c458b9220de25f6ecea44c046c25558c62bc975e7a3acf2b2354bf54c +S066/S066R06.edf.event 3cc4e538745fefccad07dbe59abc1ac855e1b6121abf89d9f5027ac9337e7c8f +S066/S066R08.edf.event b6e7436289258e26595ceb80330f103bbdd26d9f045dd5fe5e5bb46498a69180 +S066/S066R11.edf.event c8694215a3c53fb8dbf9397e9d5cb12ad4bf06f22ed39533939d10b8ebcacab2 +S066/S066R12.edf a3e8b2021df910995ad9792dfc2ec025ce1b858cba8946cab2bcab9899b51e2a +S066/S066R05.edf.event b1551573cd91101be666afa2abcfdb421f144c3fa966b0a62b6251bba942a18e +S044/S044R07.edf.event 2553cdc46c8017dc83d793a7cce6a36ddf99a043e777612539831e67f1aebc23 +S044/S044R09.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S044/S044R10.edf 0d7bf36388fd847667152db2d79177868cd25981779cd66edbccdbb5646d7c58 +S044/S044R06.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S044/S044R08.edf 5f85b54550e4f648712fbd56a6edda9df6d93f0169b057dcd79ffc0a20224988 +S044/S044R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S044/S044R05.edf 85675c31570d00c43ffaa607609f7e699add2f044f0051aac9ee0b1dd88f256c +S044/S044R09.edf 9dd68765db03a4e42839ff923d246b88e53ba314489c553c6c9c0740bed073b2 +S044/S044R10.edf.event 8ad7739d7d370e087ebe5db6ceadbc03ee38d96703e78cb5c91f2b714f07a25e +S044/S044R01.edf ca0199eeca73ce7b30843e2a06baacca5cfcdc4750cd01a1e237af6feec5dccf +S044/S044R13.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S044/S044R13.edf 64219a96357fe674f8926e8042f9fd2f8210dc8e653f038a1f26671cc2c8bd0b +S044/S044R14.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S044/S044R08.edf.event 48ad6251760a77acc67b82ce0b201f4244cd1385a6181c8236487af76fe1636b +S044/S044R05.edf.event 0724cecedb70811bcaf06ec5a41151edaf5209f968315c4da2e3a25bbf0fb9ec +S044/S044R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S044/S044R04.edf 5da0315dd8b4e33769cbaf69b91e88845cd460f406fc0c72b793bc1f6b2229d2 +S044/S044R02.edf 52dd4f7c19b57e0f61aef7db45993dd60678657045b7541e390defb5602724f4 +S044/S044R04.edf.event 51f07832e9b1d3d8c667f73dde4aa38f9d3e45cf2a4c2baf8e47ea328c860420 +S044/S044R12.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S044/S044R03.edf df43d9106188f5a38d0f518a5809dc454d5c6d66dcc6b3bfa3edc42041edced8 +S044/S044R12.edf 84d09ee270206ab1f93595cb00d28b51af2cee24f4dc44c23a38464db833d463 +S044/S044R06.edf d16e19dd5d5ecb9b871c11ca624ad8471c3088a3500a65314ec04f3056551e44 +S044/S044R11.edf.event 39b46b55fa02f8503f1d8726fb4de22eb131c815725552e89309f9db71c825ea +S044/S044R14.edf 717ec3207574cd4152200d6f57d531bcb9c699b4f321d34754d055407bc0a542 +S044/S044R07.edf 5d0b03dbf8b1973dcd0a77ee284ecc4cf6fa13d7113ec6b3d0f7c1cf6090ca03 +S044/S044R11.edf 9c104974be9015b57253abae61eb0c3e391314cfae8fbc94bae03c1ce0cac4b6 +S044/S044R03.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S092/S092R14.edf 99b147c5ea0bca55544ed7163d3747174a5a06a6707d057100d124062f1af173 +S092/S092R03.edf 7131a3198f80a3a770ee2396e8792f644fe9ee520edb632edcec921fc9a57c69 +S092/S092R06.edf.event 682de6811acbabf1882380fe24d80b87fda2ac6a48fbdd6eab47970ea1190440 +S092/S092R04.edf 73ca4043e4c8d38722fc6482a0e231a8505ef6b760cb26bbbb8206f481e683c1 +S092/S092R07.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S092/S092R11.edf d6952ffae0a847e93296a6a5b66c9ef66d5e685b08fceb730e84fb36793e806a +S092/S092R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S092/S092R02.edf aabf2489637c7c71e664cb697665bfdb134d5d4776b1ed95cce47bb7c02efada +S092/S092R09.edf 9ad876cd642aaa1e8607da4e838331b5254526c21940423742f224bcbcd820cf +S092/S092R06.edf 8d8c4a918a1e4081bde09019c0449531dec960908dadca7ad25ce9beb87d900a +S092/S092R10.edf.event 0930979151ea3e22809e2b68d6fad0bb672dea8e58caeb269c20f5bbed49f1a3 +S092/S092R07.edf 0067ca60ff80f5f27bd234bdc23414e0b99b0adf5cc8a989aa2c34afe6de2204 +S092/S092R12.edf.event ccb7718e9ad0d8edd7de8d12553d98a89bcb191c436a6067b3e688a579d4abc9 +S092/S092R08.edf 38c8c9e7261c6882b4fc0db19c4ccf905e5249611c6fe9d3c34ed7da098c6481 +S092/S092R09.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S092/S092R05.edf 777b5e451f9dda96881d311ef5f6c9b913a738aaee0c1ac613347dcc6936e3b4 +S092/S092R14.edf.event 0930979151ea3e22809e2b68d6fad0bb672dea8e58caeb269c20f5bbed49f1a3 +S092/S092R03.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S092/S092R01.edf ca68794592fe3ce3f0217fda4729b2ff16cc83a3bb447302236fb0d83052edfb +S092/S092R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S092/S092R12.edf 5e84bc4b49bd530383b8c28c9f0296d7847012e49b276e916278cea2d9b4ff9d +S092/S092R04.edf.event 82e7ace9f055649957ab04324ffce9101db68bcfcceac1df6786e304f9de8669 +S092/S092R10.edf fb3c55539a8144ffcb8318461c5f8c8967ae27e84a56e8aebcfd6fc0a21df933 +S092/S092R08.edf.event a27a31471bf1b8bc6ff9b144eb110e9eb0711b24514aaf47ed1cd94395c10b7e +S092/S092R11.edf.event 682de6811acbabf1882380fe24d80b87fda2ac6a48fbdd6eab47970ea1190440 +S092/S092R05.edf.event 682de6811acbabf1882380fe24d80b87fda2ac6a48fbdd6eab47970ea1190440 +S092/S092R13.edf be01e0d6d7fc971cb579f030389178b37159e6bacf90dfb41422d48bb5ad02c1 +S092/S092R13.edf.event e88110dc871719db682717f3a256188ebe916d76e5a1a09b0cea908778ae4424 +S035/S035R08.edf ef619d2dc6be970660c2a7ecbff9d084633c3fb023627604283b9606ea7fb668 +S035/S035R12.edf.event 96801f1d89a25da6122f3b69f7160e12f563f1b8380dce64f2d4b18d00121dc5 +S035/S035R12.edf 2358c7e90652d3c687fbcda65ae4ad4a0faca39dbd736c06ea1d8e20173671dd +S035/S035R14.edf 4192067f79a42c47fc73338bca078246f2325f88f5627893efb1a1b5550bcffb +S035/S035R06.edf.event a07306d470013adf2a41bea413a8bac37a03938f3358cb519e480a3b753330d9 +S035/S035R09.edf.event 4b408c3796f7bf8dcd27259a0ae6508128a2f1069560a5cff7d3eba4508a6768 +S035/S035R14.edf.event 3048bcb4e0fbb0a4db3d630b323c27c5e88b2f2b6aedecde86b33b1d4f135461 +S035/S035R07.edf d27bdac7d879d570fd9ba141bdb8cc1e9b94a1628b4a0f430e5e1964dbd90941 +S035/S035R03.edf 030e8222c98a22dac34b41a08b60208dd1b314ff3b805d0a7e102b455cd279fa +S035/S035R13.edf.event 14a453fc6552b2a6d7b55329ca3533853a89a83368101bee6b522e4a67d37283 +S035/S035R05.edf 09e053c055aaa3fd01b024aea74f20957498f72215850510184cb3484cfec3fc +S035/S035R10.edf dd4df070d8b01f68173410835b3d932f4f9a8779191c0518c0933c38bbadcc94 +S035/S035R04.edf a8f13980108a70cc3d410ab231588bf76928e51ef9234650be443ae9cb4bfab3 +S035/S035R06.edf 5324d829c96aca9fe7a0662090dff6b541693e00b782fb3bd07136896a3ddb9f +S035/S035R08.edf.event 944e95e6448926cfca766f748af6a71320282fbbea4cc8f227c8d8f4401f5bc4 +S035/S035R03.edf.event 969cbf2f95e5e05eee52395b936c0acb835c1d39af20b3327802965209d3513a +S035/S035R05.edf.event dbd0435f98476653a27f53b54a6757c6e4596f6d9318a14067e4dd50bb37a888 +S035/S035R04.edf.event dea4ebd6eb695d78f9821d3bfa3d4a325dfc5d7ff6f514c6a3f205dfee689e4a +S035/S035R11.edf 1959e6d5a8130d0f13be346a63880029c949526ff68285713d790107389f00c2 +S035/S035R02.edf 877f3cbaee32e10af7a724be0d709a3a9dc4121a4068b7e4ae418300830990c9 +S035/S035R13.edf cbb0c1b7d31129ac29c52e20ed1b01eb9322e8c72591da0bffc3ce51135cd9e9 +S035/S035R09.edf 5f930fdb0975acb9c936448d93c9e454cf3ee73ef3dae7eecc54d1a92662058c +S035/S035R07.edf.event 273c4fa451c5781d94b0c8204068736b90665f96073084c33770108605bd302e +S035/S035R01.edf c3f65f266df7240a795efc209424b75cd86a30823f63795b51a62deb095f4700 +S035/S035R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S035/S035R11.edf.event e9223514a9ad07601113bf25a3c8e05f3728fcbb5a15e7cce3b90c71df93b940 +S035/S035R10.edf.event 21ef6252cae53bae58d9207d35feda75f1e4a419273f78801e618eccfbbfee77 +S035/S035R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S046/S046R12.edf.event 3048bcb4e0fbb0a4db3d630b323c27c5e88b2f2b6aedecde86b33b1d4f135461 +S046/S046R14.edf 1583e16bc35be30f73901d64a976852fdf89fbf11cefb7b71473875497a9110e +S046/S046R11.edf.event 271170f0da25b9bc7523c8eccfc0cb14ca8d86085b0706c07a42357eb742bdb5 +S046/S046R14.edf.event 23fbb5b9f1007e8652ada0247eb06163f5b93eb5899bab7b719341fbeaf1345b +S046/S046R11.edf fd4300ba279b330343ee16ed3e53e09b84123f9df78ddb15726c5dc77d747d42 +S046/S046R01.edf 49b18f6779e3fdb549c80551379d76ab5fa7f9faa429e171bf4cd38f121ae5d7 +S046/S046R06.edf.event 2a8cf80126be415d578e94fa0061ffee1677829d53c6df1ba1aaede8c845f31e +S046/S046R10.edf.event 7eb3d99d1a46e50d5ec14297be174789c9a514ecb48636ff9ba19d90bf2ed9c3 +S046/S046R08.edf 30e3df4eb6380358330a30b9d9906286a110c46a5eae2a991e83ced4a2abe3b4 +S046/S046R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S046/S046R02.edf 73f41c42ba32cc7acb2d2312bd33308378310e3a4be74490589b95e91fb64d45 +S046/S046R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S046/S046R12.edf 3238e8feb6b1faeb6130cc813b0c147eff986e9332cbffe7b0dd9c5a87975c1b +S046/S046R04.edf ed0b219b3b7f3a340b504a801044b8a6c9c33095a59a3dfab535c6a06abd0670 +S046/S046R07.edf 6e18290e6aa7e774e61d3d5f1903b3c59680b56a9afa3807d381d267fc8f8d9a +S046/S046R08.edf.event c8e92376627dec774f2017745ed32f94ddef2c19ff70ce24fa9679133dc0e7d7 +S046/S046R07.edf.event 13687e378e5618583bff478cd8ad8d7c8c39f230597cfb455142d8199d52bd8a +S046/S046R05.edf c7887aeddeedc3aa7a7439bfb778fd8f8028eca8f53375cc99c3b5aefe2f9bf3 +S046/S046R09.edf.event 58db360bc4c16d775b7e2c4797c2215d9f1405c25061a2237ec95b2ac264f964 +S046/S046R03.edf.event e3378f798c2ae2109571b901374ec3f6e67c17b45e2f63b458ca5b1db30f1ea0 +S046/S046R09.edf 49c058fd06ce3f5fb70d637aed9953ca168d6cb2ca0ac698a8061101ba2c3054 +S046/S046R06.edf a7e2bb4b01b80d2a367d92755a106f908a09716da557f527876d6feef610482b +S046/S046R13.edf.event a6cacf979bad39813ad2028620db2781eea6e16e029b8bf69686928d2958fede +S046/S046R03.edf 23321c49c441693d32b7b85b5e77840dc94a90f2cd66c7df24d5d3e6fd01345b +S046/S046R13.edf d17bee540747f86c9a72c8e8ed054d2a85c419cca8c9daa77e6cbe0e312d8c3a +S046/S046R04.edf.event 3048bcb4e0fbb0a4db3d630b323c27c5e88b2f2b6aedecde86b33b1d4f135461 +S046/S046R05.edf.event f642bffc792f9e20fa7a76eaa7a0776154e7b8e8be7138c7683483a28a2ccc74 +S046/S046R10.edf 884de1d68843591d3e82f99a1b9c88e1bd9293ce0dede387c14a66446c6f74bb +S071/S071R11.edf.event c65a4f3a39476047a743af02cee3029310ece7de2dce5aba866f5d620c729555 +S071/S071R12.edf 54ab787be535020ba05284d323656d201776261b78e2d9f903a8b4b573dc6be2 +S071/S071R09.edf b5f606ac18c89c1a9ca45e2541d18dcb0fb0cb337bba89cb91dc36c45c5f45a4 +S071/S071R10.edf 363b6fcf3c4c82b8a7c7e4af8a4a68e4861004c3838f15c7e80b34cf2649ea10 +S071/S071R08.edf 0fe7edfcff87102410fde01bbb9a4770b20ac6359f3bd77dbbc3551cd2ce35cc +S071/S071R04.edf.event 4c61f07321b475d0f07c3a93b262cc94a5bcc7e8c59d05610786071ce45cc544 +S071/S071R08.edf.event f73e764c2a5687f81917e5c4eaa8b964ecb99e4c58ff33aa7e02e5fe1f655a98 +S071/S071R12.edf.event 954333c8a5a38083d1d58167e4f7c3abbbf8949bb46067df41825e1156c3b517 +S071/S071R09.edf.event 14bdd1b94d8b8ccdce55e581601a4a304c1444e030aee15596722a70be24c5c4 +S071/S071R05.edf 751711bc497af9317c44ba30c77fdfa1333a743541b8197f2d7a341536cbe941 +S071/S071R06.edf e3ccdfd753bde27a09107acfd381fe97d3efb253826d211373559051bae81199 +S071/S071R14.edf.event e9223514a9ad07601113bf25a3c8e05f3728fcbb5a15e7cce3b90c71df93b940 +S071/S071R05.edf.event 061a2814f7c0e21b94dd0e714f51ae50ebe28a4f57d1c4761b554a77f13694a3 +S071/S071R14.edf 6e682c11303810dbddbc47aea87f7e5f9c2d800e1cc28147daf7215488251c5f +S071/S071R03.edf.event 178339a2095d7eaf759407927ba3657ec8e96b153a8e085f8a593674f3697b42 +S071/S071R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S071/S071R02.edf d2ba66c8ec637b4fc6304b0562cbce0874ed0faea3464a72221840390717d7d6 +S071/S071R04.edf 746cf8cb98ec73c251059675f4c5b956c9e8db11bd0de7b7785950e53ebb4647 +S071/S071R03.edf 2a196695568ea8b1f977ddcc95e4e8211d55c087e04d539ec64f65537d438e58 +S071/S071R13.edf.event a3a021aa3014366fda2210569cdbdc257724ba6d0d11b1e6c924103e837c7294 +S071/S071R10.edf.event 837f0145b9da4dcc73e14962769b9c68f3eebad462eebe9d8796bc8b099af925 +S071/S071R01.edf 55ee9a28e1496f5b730ccbeaf859063b2dcc9f11039905e6fbe04fd7fbeb38d2 +S071/S071R06.edf.event 582fbf3e4f5bbd51cc3d858954988e7800ce943626d6f081f659cdd9b863fc0a +S071/S071R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S071/S071R13.edf e47c2ff64522b900eb7c950722b3cf31740747543a512b65b11a233282c8938b +S071/S071R07.edf.event e697ba3ef839244357e1c111d7dfa9afb60310a0bd8b7935ea5a426a1711194c +S071/S071R11.edf 6c798e4af0e540fcf6f026ed4f10498ebc2da3075b3d30a564822228d67af808 +S071/S071R07.edf 0e734c92705376f7e26989101d0d6bc4adff69d8eca4a8f98a17b50700cfef27 +S086/S086R08.edf.event d192ea57ff85ecd8427faf400415c2002cb41aa1189199e0c6ed62a7ecd048c1 +S086/S086R09.edf 907868e30923356a1d675361fecd1a60cd4db3e2fa52d308a5da2a10f2079331 +S086/S086R11.edf 54c35f32beb832e3549fd73264eeb3092601b2ba29b2b11c9c7be42a70795e31 +S086/S086R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S086/S086R08.edf 221dca79488392aa00b3462fcfa6a21b185e5814cf6534ca35b24780aa1da799 +S086/S086R05.edf 46d4fc4f3986d58f220d2c5db58475cdaf6c2960eab7c8e811d2206bf576130f +S086/S086R14.edf.event 23fbb5b9f1007e8652ada0247eb06163f5b93eb5899bab7b719341fbeaf1345b +S086/S086R04.edf 606f9e327b219cbd980f4cb5d83497052a07d927415d1769b039bf0ed7eda693 +S086/S086R10.edf.event 3b5e25a187d5d82fb5b851ecbebf095c41a5496c5a40492c7a2c638245bbed51 +S086/S086R11.edf.event 9b428c26e9e449e90bf57c3b9f5322d9c5bc5f4a65f709a1ccc3c22566292dbe +S086/S086R04.edf.event 272aa3698bab2b05544eceafbf5b26dafc58d0eb50a508a47c26572576853ebd +S086/S086R06.edf.event f4b63d6f50dd4c8695c739b04c7adbcdae610db216040f9c2b732bd361dc9121 +S086/S086R09.edf.event 7771fdd0442286733558fade28a9d92e116695d1dfda6da3cf3487c4467925a6 +S086/S086R03.edf b9d675c4eea24f641af5ca3bc5eb08edd0b8441100c6729a4b3652bf0997447c +S086/S086R12.edf.event 9b428c26e9e449e90bf57c3b9f5322d9c5bc5f4a65f709a1ccc3c22566292dbe +S086/S086R10.edf 2ebdf1e349101b21c3e9e9c5a24f8b6546bc7c30691c6f7cb599087554a5fc69 +S086/S086R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S086/S086R13.edf.event 54016a6260b12c8b99943951ccdc7d5425efbf9ab503ef1abeb93deafb262790 +S086/S086R07.edf 012d5e9c8d6e50fed9e25a748a638168b18fd18153a2ef69a4e1b8cf5836250b +S086/S086R02.edf 4a9237d42c10627cb84d023b5b7056000f3575ee82c10918e4cee3960f27e0e8 +S086/S086R03.edf.event 600983ef19711fe2016d742d1857fcdfa4f0ddc7c5b8c88773db0019f92315d1 +S086/S086R06.edf 370494eb367603a1bb5e16ae5a41c0c85f359c2c0d727bed2a2658ed3dd2b612 +S086/S086R14.edf 712b7758f35dbec80272ac5671e567eb1a61a898a8a05c77a47286955c8e6074 +S086/S086R01.edf 32f1b040a3f8bbc6f3393a48b81ed911095fb0cd78859a129e918f675b3c4167 +S086/S086R12.edf 966b7d92a7e7d3981d56dbf0e159fbfb3fbaf2d955e3573e384918b24431284d +S086/S086R13.edf 6fa20a1e6197c689ec1e4092bb26e75de63e5b2f755b434652eeb11a8b49bb11 +S086/S086R05.edf.event c2ef16690608bdb9ca84fdc930f584191923370f4568e2e42e5808557fef6d8e +S086/S086R07.edf.event fa68bd707dddb5ac8734d2427c1812d242ef23f1aa95b9b002fb67ce4bf47ea9 +S012/S012R08.edf.event f39b724630cf39de64096da55e80ef4acde232132367bdb459b9a4b872fdabb8 +S012/S012R10.edf f70b4292ae24574f7f4835ae89e7d5fc6350c26d0d8da86b347ea0bf4956a17a +S012/S012R14.edf e1b65406fcdf9107d1cb5ec813b7dac9a074a212172d1f79fc403304125900ff +S012/S012R13.edf.event ffbdc0e528335775cb88a14baab7b7f1fff4fa1647a473bcd640feefcbfab5a3 +S012/S012R12.edf 2b76c97a6cbc894a85a4f54385997b6aa07d3e9ec040ec1adf61a310b26f5caa +S012/S012R04.edf.event 1ccc28680a9b24af49105d307967ceb434f5229a29e70fbfde089b2d092db7c3 +S012/S012R05.edf.event bed28cb624951271916f88bc556ff204ccc63699ebc4523ed8043baa9724625a +S012/S012R09.edf 4255d6b2a406f8a8e9cded98032950cd00d97d8d40a558ea1eb34993b3a9c7c0 +S012/S012R02.edf b4f7e8b1b083835899b7b6cbb58c582fd8290311135b3e0148c5b6ffee4f6b1a +S012/S012R03.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S012/S012R11.edf fd7bda82802bb91b4e04848ce1a8a9a3e5bee822a811d0bedcf812ea4acd273e +S012/S012R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S012/S012R10.edf.event 151aa0e52269f6759e2bed18339cad06a9761f4b713071e665a50681af66afc2 +S012/S012R07.edf.event af9bed6667136366cf88d2cd7f0f5fd9dda3fa51cc4cf73d3afe4d57dc8b7502 +S012/S012R13.edf 892c9975b683852675a14d64c24730756c189133c01aa974eeeb41a7291e3d09 +S012/S012R08.edf 5d7d13ad211f615a21db64c3a4dbc8ff4ef10dc3f777b8e6441d8942e8d40336 +S012/S012R06.edf fe617707aa63e902238e4de12cd8ec22c55822d05d09f60bccab9d08ac53055a +S012/S012R09.edf.event e9aa79af3e48ec970083b6f911002eac68ffb799057d4805cd5fde8f16d76b97 +S012/S012R04.edf 974d6c7558c8d8ee48449bf9a1f40cda596febb6365e69e8e65c2d952644017e +S012/S012R14.edf.event 37b92549c1fe5d740cc394805738f2d228f8fb948bb4c3aa4817c4dd4b04b9e5 +S012/S012R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S012/S012R11.edf.event 04192a55fa7728d34533e76a1b831fe5c94d79c3bc9f1028f45644c51892a744 +S012/S012R12.edf.event 151aa0e52269f6759e2bed18339cad06a9761f4b713071e665a50681af66afc2 +S012/S012R07.edf 47e446122ad05c46d8c0e605c84641ab8a93ba2b8647f0eeceb86ee26ee9d9ab +S012/S012R05.edf e216fb3bfd30a0b9e8c94e1a99e2be1eacdca620872e8d152c129f30517bfd25 +S012/S012R01.edf ad02ce8943d45f07bcf6f69714a61005e570f3fd26f0e8e1bac38c7ad80fcc73 +S012/S012R03.edf d2af7bd93c37e741a77f8f0f611abe3b15baf1a3db65eced1bbd1011f3b193aa +S012/S012R06.edf.event de55d737a8555e174e0cd004746337287c294a01d42348abe972b98f07739b4c +S080/S080R03.edf 021c4dbe3656eeb9e95ee4ad422fd0d0acaca6ef06ab3607e373ac1bde810a6f +S080/S080R11.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S080/S080R04.edf.event 49dac749bf850e47e5d13a8edef5121c75a7baa7f50917a5c17c1dda52d3013e +S080/S080R14.edf.event 9a154a517e2cf402786cffa7d164d0656aa7a8bb30af51266fc6403fcd9d3d00 +S080/S080R11.edf 3f143f67b084e6b46bd62ceea2fb3de4b68445b319355239694dd037ae85264f +S080/S080R13.edf 85a42c5f160296eb8c67c85ccc341c70bdee43def24421b69f88aaa8200a317f +S080/S080R04.edf 39a8fcd0b3e2e7f7e33c54e867036d5bf43350e6525278d42c39cf6e3f642796 +S080/S080R08.edf b0f8d997cf95924f44e7a6fe44e525d22f66c4760587cd7589d4430d210c2d0b +S080/S080R07.edf.event 0aaac0e6493abaa7abc2bbcabb2ba2ab9cf91a665d7b07e95e181cb6e7a7d207 +S080/S080R14.edf 3fe341ba3fc26f7646e874235bd651eee3b490546c07a4d06421694d357c6a91 +S080/S080R13.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S080/S080R12.edf.event 88e1ea56ed0459a99b011a3ea2f25f947016dd359b2d4fd683f838ac00d993d5 +S080/S080R02.edf d4bca47e46e37452771dd39b66a98cffaff0bf9235a8c03efbe3c8edac3d3978 +S080/S080R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S080/S080R09.edf 1b3751f1ae69e24ceaaa4eba361b97df0cefef1d2896c36817502585196d37b6 +S080/S080R07.edf 42d1be730e0f57bd2f6974856ada684ded850b1d11e7a58754209892f2e1dc5f +S080/S080R01.edf e02ada53d55ff4abf99928b1a5da1773023170f345d2c223a3a73b0362683d62 +S080/S080R03.edf.event 39c9864c57efec906759ab97dba0ab26a900fa25ad8fc3c48b0d97ea83c3a893 +S080/S080R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S080/S080R12.edf a0b99813248b36ff650076e0fe38f205e84a6f26e18bce0a43c5510a4cd933f7 +S080/S080R05.edf.event 8612fac39dad16048db3bea4e27be45fe67ced9a22781462eb9c6e435fe28d6d +S080/S080R09.edf.event 7f2596bafc4dd481e36c47d0e562fb6c5f9b7e91c2a915f19f66844b55b75410 +S080/S080R08.edf.event ffbdc0e528335775cb88a14baab7b7f1fff4fa1647a473bcd640feefcbfab5a3 +S080/S080R10.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S080/S080R06.edf aa562fdab20e7bb423562f3e5d61c0b821e3912bb29353fbd1032df7d78ac34e +S080/S080R05.edf d97fb389b6a1c7e6d1a6c5e6f41e91d791f963bc798e793b0c494dc452bb809b +S080/S080R10.edf 8171d40610e3f67b030e5e8f99b39e2751e559398acce78a339962bb43fc1801 +S080/S080R06.edf.event e9aa79af3e48ec970083b6f911002eac68ffb799057d4805cd5fde8f16d76b97 +S054/S054R03.edf 342edbf15c5e37d6b788767bff954a2c0c8b4fcde334d733ba67cbe2dfdf3077 +S054/S054R11.edf 0801cbd50fb6bf1a9afee61b5b424a1f0025025f6fd3d8e580c2b5a76bb641f1 +S054/S054R04.edf c2caf79d559efa0dbade6c67290c2840a75d9b9266b242286aab64ea048facfc +S054/S054R05.edf.event 9e1cfe676de301ff52ef7ef70ad2cdd8b8c9562c01e98d8eae191a5c8c28aa46 +S054/S054R14.edf.event 8121b5c61470f12aea6d6cad9727a5b33c741c9f15d24ee389ddc52970df76af +S054/S054R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S054/S054R08.edf 8fccdcfa00d100148b350b772557a39b8252ceec048da5e0da93e1273cdc8c88 +S054/S054R10.edf 17828d840d9f134ad5129aa66f8842e267a5c5945126dde28fe4c621d0b61f87 +S054/S054R11.edf.event b7fc6043070236adccd2c6d2a291a12804c8a08c7d7b2194d31b1f6996080655 +S054/S054R06.edf b2ec60edb8b8c1a6cea288d328a13b5ebedc3a6d3e9df4ca97f9b2d9de0f5fab +S054/S054R14.edf 6c3ea5618b7e8ac1289f042ff45f55cf017f2e76e0512e7fbda0d2c33c3c5e3f +S054/S054R04.edf.event aaa5cf263ed5e1e7bf38b3d540f4e2d286107c553f7904af5c9cee9bfe715f19 +S054/S054R12.edf 2e1807664cfff1512a8a9ef5ff105e3a19a6c7ebf30dcc6a321244a46f943cf8 +S054/S054R06.edf.event d0280a6531ef96d2b622c2d562c05bf53a0d00439a4819213365b6e52e54abd1 +S054/S054R12.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S054/S054R13.edf b7f29036b47a2daa493f819cb3419a348d8281d39e116a06e89342da6c54e4c3 +S054/S054R10.edf.event 48eea52bb9946ed46ef7e1b621c8558d41739ffc9aa7ed8fc25eaebd171ac223 +S054/S054R09.edf ef124916d0134c53ed3dff058aa6ffbfa1275932751b98b977c55c4fc174a2b3 +S054/S054R09.edf.event 8cd8c690df55a4167dd5e136be1c424a77655375599a9d3350d5460cabab539d +S054/S054R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S054/S054R03.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S054/S054R02.edf 55442382a62972894ca7ffcb353b8aa4048043d805da35fff567ad37b47a85e8 +S054/S054R08.edf.event 7c83be0506163b52a89548c7af6d4893472403c2bab3a455c7d8c92bf3550ad8 +S054/S054R13.edf.event 26fabc186c9b04bd70469a5964b2648cb7a2115fb0a397d51de147fc640d8d83 +S054/S054R07.edf 38ab1a1b03ec75552208467a02ad50c8b29be99e587b31b1f0b21281683861c1 +S054/S054R07.edf.event 68922fc617d2a7f851f538c226fc5491b6f57526417f33c57ada180e97746c59 +S054/S054R05.edf 499cfc020d673829f77de992d6014a5946968fe26a39adbd9efcae257ac580c1 +S054/S054R01.edf 329625cd03103c4162f6de3c409f3664c68481979263f56e50b8dae272e27f7f +S033/S033R13.edf.event 815500806b68247c0c383804bb4774dc90e7f96d61e9e2b246fc5d33fcb5aafa +S033/S033R03.edf 7b062b51f8af082de4aea5b514db3ec97db2dad908166f4781566218458443c4 +S033/S033R08.edf 0ee2a5a00463bbc650852de790943bc5af90547bc325bbc65be19e9b16eaf0a1 +S033/S033R04.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S033/S033R14.edf eb372e2cb331dbc63087439c5dd18da0610f4309e68c2297949c897c6a0d6a98 +S033/S033R03.edf.event c843292c2d927d69501ccd581dd6688cd971a162df8d02a7eb20e6a3c1aa2d83 +S033/S033R10.edf 3e4a4eea41ce75b89fd2027c06115b30e6ff36ba14906dc6915441faecd4869f +S033/S033R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S033/S033R13.edf 64ef4744b1b276a462b60627dfef69b157b3fe908634eadc17ca157b083a987a +S033/S033R11.edf.event 57636037048fc90c882a690b692342a1f4fd070d9f2a219d527ef6c0f7ea7dc1 +S033/S033R04.edf d4520b8cafbbf03ce8d2159bb727f0f6032c1e9533bbc275f1fa4ddcb9cf135d +S033/S033R06.edf 772144edd7d06ab31cc649a6c460e43125e9eb13801064276529899f4b51cf01 +S033/S033R07.edf 6aeeed7165812bdf3bc7380539f53f0b0712a74392a8f37f38c4747c96260db1 +S033/S033R12.edf a2968d5f651e72700096a652c3dd2fd34f51e9c381bef25c329dad3a3e2b319f +S033/S033R09.edf e0f43954e8632d987be2a52f116febffc93f9d1dc62311d4f3831249427cec9c +S033/S033R10.edf.event 96cef3a9a9e2cd7f438d53008e256b198c48c6e0d2f0b2d70e0d296180cd58b0 +S033/S033R06.edf.event 0aaac0e6493abaa7abc2bbcabb2ba2ab9cf91a665d7b07e95e181cb6e7a7d207 +S033/S033R05.edf.event 83f18e3a845e17346dc10fd176b61c6f306078227a59bb51610834234d1454aa +S033/S033R08.edf.event 440457b385c6d53d2340acbe5512027de7800fb1514858b055b126c10e58b404 +S033/S033R01.edf 5801c97826b681eddd76641707c479385742b0fa35e5fae0935a81fd139fc646 +S033/S033R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S033/S033R12.edf.event 57e9107b34629563ac9d22f509b6f40e40ebedb8afaca03c2199613287fc06ad +S033/S033R07.edf.event 8d9c5bb3c83f5f447b4f8159b1454d55332838299c0e4a8e2dc62f413b08cea6 +S033/S033R11.edf e70abe84015480fac548205192edf6e472ba392086504118425bf80777b80984 +S033/S033R05.edf 962ba5f5aa3fbd2440ba34ef22a9d3852ae01f9f7cd93475cdb13e00782ebbd8 +S033/S033R02.edf 9a4872573c4ba67267f49d8c2d286776792fdbfe76dbd10c696950cdb34f6e90 +S033/S033R09.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S033/S033R14.edf.event 9b116dac2d8fff59f5bf4cc8d45636716575c73d6e6b2287709cbfdf0ce5f55e +S062/S062R07.edf.event 596ad948f8aa9dfdc6cd3125a3d02e115d1cd6a505023467bf5cd721125ee7fb +S062/S062R12.edf f95f21cde281182535aac5a41be11bcd12c1939a485535bf551e6da3f3ffe6f3 +S062/S062R03.edf.event 350042ca5c37cad9cabe8c469aead2bd074d996b13c69cafba8d24cd8fd825fb +S062/S062R14.edf.event f500286a76884018ad149ef34cc8871332593723b30ad3d79f8f39236a5cc25f +S062/S062R09.edf a614ede1529dc1cc60dd7ac3af1a4ae2214050ddc6c378eb5e2f8a59edb8ae68 +S062/S062R06.edf bb609b4819f7e10634350762ccb52d9b04df6746baa95b0c73b9d4f36c857a15 +S062/S062R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S062/S062R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S062/S062R05.edf.event 710672ce0f425a207ef543d23f608683e1863d7423702d511235d71ccbd289cf +S062/S062R11.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S062/S062R06.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S062/S062R12.edf.event 8ad7739d7d370e087ebe5db6ceadbc03ee38d96703e78cb5c91f2b714f07a25e +S062/S062R01.edf 3a2d4ed688479495bf9312be25dcd3ffbf393ba786f185700b611e9b95dbd4ef +S062/S062R03.edf 94b594c87e91c1e1a932bbda2022760641d3cab621132f8cbcdada1e919f5cec +S062/S062R14.edf 40d5eeb86b99c22b7fefc5a08d638d9be9bb5b8240b661451a71574b2c6d21ef +S062/S062R07.edf ba192adc3503f736ef9df7d07796f5b15b0413c3a5e05d5e7d3523970474ee11 +S062/S062R02.edf 5ce5b78bebf8a5e7cb5ed88064f0ef25f3c7cea58ffbf791453d5aa2fbdad831 +S062/S062R13.edf feb14f0a8de34b68017e66be8b19f702d3872e7102f63f6dd8fb17b1e4de4a6e +S062/S062R05.edf 2257a462ca24563b2f157d4f6618f5313933ddf7fc66986ffeac5df2de258794 +S062/S062R08.edf.event 8fb228960cd397f6100e75cca49e1045159bf62e6184dd5267ea608be1d540b6 +S062/S062R13.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S062/S062R10.edf.event 815500806b68247c0c383804bb4774dc90e7f96d61e9e2b246fc5d33fcb5aafa +S062/S062R09.edf.event de55d737a8555e174e0cd004746337287c294a01d42348abe972b98f07739b4c +S062/S062R04.edf a054c47a0c59c1a375061a59144160ada314be8ad42ad7bb3971c490572c0324 +S062/S062R04.edf.event 918e61da68f21c26600383b29f33fec7665f2f3fae232e8413bc9266cc617863 +S062/S062R08.edf 0653afdbaee66604e8f9f9161e5cfbb24fda28d9dfbd48b8a343a73a544c5e91 +S062/S062R10.edf 1d1f19e928275c373dd8611a5fba6af15fca64768101ff35b443e47cc7df8b6f +S062/S062R11.edf 1b48ec4778eb975463f126ca0d2efd093978b900f6ea0275ee32ff953d6a4339 +S021/S021R02.edf cbb27fbbb61a98a9e7edd1f0aefdc5a94d964eea0ca16fe3b752aba6b38ec399 +S021/S021R05.edf.event 99a46eb7d9a4fa08a856556abfd134cec7b55f86ee94c1eedc9b0b5214911db9 +S021/S021R08.edf.event 8bbb904a2259011a3e81d620c25b30347e4e7839d6d80c3b648c53545b89f24d +S021/S021R12.edf.event f8a465b9ebddc2704252299afc352d87e33c523fa8f80ca82a96fe0b268727cd +S021/S021R03.edf.event 0a0620d55b56f9b27d4ae912d41ca29efc65875b00d662d149ff508493487b74 +S021/S021R10.edf.event 24945e04ff21b52347d9ac969d09c4af21811576baba1bfaf35d81960e94008f +S021/S021R01.edf 983781fb39c3a747051a04f23484164de485b97ba43a7fd63fd8fe3d7f6f6f3c +S021/S021R03.edf 3404a07e58a4054c33bb554133d7988bc19fa5f2dff5ed74e1669862c44869a4 +S021/S021R04.edf.event 4df88ea43362416385ab85f64107559b8c8cae7e4f9feb2b48eb9ae5bbeea3b2 +S021/S021R10.edf 6e5ea8c53bba188385f86ae619dfe3282679deba4e651ca46ee1c91c06b2c9b1 +S021/S021R07.edf 26e5c7f56d59c4e0188f7479c8af38ee973d789f26d38b80a8e421d62b542a4d +S021/S021R13.edf.event 9a1f9c37f0137ac6d7341c2a9dbef1bc161b35fcb6bd9e0db6cb95f8c7096c5b +S021/S021R06.edf 6b27c8a6782a213bc37bd0ef13c156699a8087107b6bb8893f24c526cba684c2 +S021/S021R09.edf.event 9a1f9c37f0137ac6d7341c2a9dbef1bc161b35fcb6bd9e0db6cb95f8c7096c5b +S021/S021R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S021/S021R04.edf 9481518d6a0b086d782ba17e78fe12575500ce3c7f25071f29adc5eae093c863 +S021/S021R05.edf a03d4e4f066acaac766cb935d3a40eb11886254d82e3a38a6b06714f81df9c98 +S021/S021R11.edf.event f804d1f19243344c8d8b505a1f229c8626e7f81cf4397b94650b0e6959d27437 +S021/S021R14.edf.event 30b760c52698fc58e43824eb7302010b60c8374cc35c21494035da15da835fdf +S021/S021R14.edf 76c2f56b68c08ee48dd151faf5d10d432421ecb33a938ca9b440adeed546015d +S021/S021R07.edf.event 574e54bb07934f5ad1177a8834c912e24b6537ad2eb83ff2f84b43af7024211c +S021/S021R13.edf fde2f1b885036d11a81f403bfef81cda7aad4ffd9b81cf9ff9d725cd988a38ee +S021/S021R11.edf 7b4fc08ae9e199aef5e077ae14a912c790a6d2a0fbf8565aa2c80690800aa911 +S021/S021R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S021/S021R09.edf f5b4c7deeb1c581e455c6d5d91e2d5a403233f373c0ea36f34ce3087f92cb3a3 +S021/S021R08.edf 8abd01e9b2a9c563469beac4d632727d9b341438b93aea76258ea180de2ac6e5 +S021/S021R06.edf.event 60e820216cd891875b6597a2a69eefb270be91108dd94e6c6d72fc69bff372f6 +S021/S021R12.edf 326023193bc901b74d1fcd5e7a88f064643a41842fef121fa031d9f628cbf3ea +S022/S022R04.edf.event 1513629f6e8e700394421b828869afdbd4e2ccdf8bdbed2127f75a42b2db3ff4 +S022/S022R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S022/S022R03.edf 2c7f904749e97f1acf45a98079c515ff40f15234d0463917d5f99dfd4763797e +S022/S022R03.edf.event 23fbb5b9f1007e8652ada0247eb06163f5b93eb5899bab7b719341fbeaf1345b +S022/S022R01.edf 496fc1ca74a265ea0f888b9334ac8b16168ab201dfec3eb7d932e68a5348edc5 +S022/S022R14.edf.event ac2e8337cc4fe1a692a1f25efffa62d821b6b3bb37188c793591c0d07fbac0c0 +S022/S022R06.edf 0133a61ecfdfbd3dbb8c0d678076d9f67836b6fa90c34e35369cce7d4854914a +S022/S022R05.edf e76a89c457b5deaef2fcf393d755af60c4b91fdf804ab5c97a0e461d5421f17d +S022/S022R13.edf.event e8da7819eee7d2c77d74fa9e6aec49532a6b299f30b13b3938ad7ce0357cfb02 +S022/S022R07.edf b2b28d8b0db957a13721997f5a4ccb6ebcc7bf770c7acf0fa710a08f13f1421f +S022/S022R13.edf f032660b52f6da0047f057da1a95bb03412014abc61ff1ebdbaa526e668e63ec +S022/S022R10.edf 4162c067a5d3b1bacd9614234baa0af9a75c269926de688c50227c39567ea49e +S022/S022R06.edf.event 58db360bc4c16d775b7e2c4797c2215d9f1405c25061a2237ec95b2ac264f964 +S022/S022R12.edf 46f0835897374fdc2dbc634bc4c16a61cd03a4ca25506cf923b922629b07674d +S022/S022R08.edf.event 007c6ac586d5de80642bcf571fae31808a22c5f6b8fff35fdd8f99f1b334f97b +S022/S022R08.edf 8f890909113d536152b987caff0d70c73468b0332dca478783e665598697367a +S022/S022R09.edf 735e4a3caffbbc78077baf7f0bf0d23ec707c3926e3857995b90acdc04eb8593 +S022/S022R09.edf.event aa719b9aea445a02c8c5c6a6de32bcb12238842c8d4ae16493f623bf0d226c1d +S022/S022R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S022/S022R05.edf.event 71b0870f3a5490c969240bb5653ec1eaf656e40942ab93a71deb32dbff919601 +S022/S022R07.edf.event 22a6b841d94ebe84eeaabd93fb3e0f00da65ddf3bc8de6d5a79394e3a1394567 +S022/S022R02.edf 1e302df13c28a0cf311c54b205a42155198f2c580e2d3d4e00f6ac66dd5436e1 +S022/S022R12.edf.event 93367a70cc359570029f435dd94fb546895eb7fa6c629c6678544071cd4bdc2c +S022/S022R11.edf 514f1cc57f2a93c418ed81fd3ecbb3a1cc4e4c9a3360587dcd2badb019b443b0 +S022/S022R10.edf.event 9a1f9c37f0137ac6d7341c2a9dbef1bc161b35fcb6bd9e0db6cb95f8c7096c5b +S022/S022R11.edf.event f94fa47bd56896d1b35682e2e14d8d1fdb308a058c4a48b7360c7276bb8cd922 +S022/S022R14.edf 115ee66ec70617c149079edb9a5e823b1b218a885f5a80b56f754ad745a632b0 +S022/S022R04.edf d2f48578f30b4edea2e138e74854a09e36887f8833a3e0f48cb2346ead82b201 +S011/S011R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S011/S011R04.edf 072b75496dd935be460bc8ac9c3bf2d1fdcdb62f32ffb552955319abcb24cfe9 +S011/S011R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S011/S011R07.edf ee5542027039bf28eb7fbb95826235f618e32c74b28857ad51df388256e8fda2 +S011/S011R08.edf.event 533737c87655572a20223c364be6c8be7a035cd8cf3dc42ed8c418e46e174661 +S011/S011R04.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S011/S011R03.edf 369bc902a59bf9a9d480c2bad2a4b165b83716c7cba5760c2ce0e6500f120443 +S011/S011R10.edf.event fd77510c7ae392a42ed020605a40066a43ea3dcf80082d17cfd95141d71549ae +S011/S011R10.edf f3941502e7692576978f24c14c837b07e760fb94cdc65f2358790375d5537d87 +S011/S011R02.edf 3d64573c6da6183ec5b8d230e38609d4d7f365a2aecbcc8a310c94d13c986f7e +S011/S011R12.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S011/S011R09.edf 8e0a6cb4cfcf916fb73ca5d93839ef79131a6d76874b8307081eb03b6947c23a +S011/S011R12.edf 9034d085bc01c1c738d80a48fdc43059f88c13d587b5de1c59d4a85ecd194ccb +S011/S011R14.edf ba4007cd1ab12e9f87fb8a0d158b55e9b236cf29190db21709482290a05adbb9 +S011/S011R14.edf.event 0db4656c1041f6626ac6fd54117fb1e02890492bb86525e197e9ed116a0fe6c7 +S011/S011R05.edf e02118b61831ef02eb7268afa90ebceb136543fee310dbea1279c21bf842c285 +S011/S011R06.edf 05e960ac5207e35af7dc1fe492b78c7f473340aa88a6639a8e9d711d27c80270 +S011/S011R03.edf.event e1c00064d3aa1fb0636aaf0dedd65aa66d02a8afcf3dd70b9a5fa4df4d4ebd47 +S011/S011R11.edf 4a6c375686a70b4d0d09c789f309d8e41b76bc660d4a20f2f136fedaac2ff1a4 +S011/S011R13.edf.event 5b005cafab5d6d8968aafc95da4c7b50e36cdf3bf2d71b98cd18431017101fcf +S011/S011R01.edf 374dfa06c6df18e3bd324f4c8aefa462bacb6bf9dcbfe8d58ae6e84ed2bbc443 +S011/S011R13.edf a1a02f3cddd77bdf2eef2778bb2fe7dd76030d378d1b84850593ab3dfcb8ea3f +S011/S011R08.edf 70b05e696e9faff6566cabda58b9144e1cd8ca3b16e04178309b22ef7bb612b7 +S011/S011R11.edf.event 2553cdc46c8017dc83d793a7cce6a36ddf99a043e777612539831e67f1aebc23 +S011/S011R06.edf.event 296bafcd473fd6051b9cf19b1af83d7f0a4ba4eb310f7fbf462aa7ac3ebb8120 +S011/S011R09.edf.event f39b724630cf39de64096da55e80ef4acde232132367bdb459b9a4b872fdabb8 +S011/S011R07.edf.event c4fe35467d7d0b21a1a13fdeae18b7f036dce640d9e06acb7a946289a7fd4f44 +S011/S011R05.edf.event 017d78426c2e9f2b1807cc35ed07405ab8ff157014df0701b8524f965539b319 +S082/S082R07.edf 2e76d2510e31a569d934e5e0c79d80ad5d5b717210edfef603da9ed469a0ebe0 +S082/S082R04.edf b3e4c74b4cbbf1aaec1af0c3613fc9d2a27d6d96ac62c7907551a87fa9bf0bbd +S082/S082R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S082/S082R10.edf.event 732398d864936092994f6aa5c7a3d554d1c1c35ff4fd7a90dd0c06a61260c68b +S082/S082R06.edf 1fc2e82fe3d7cac5dc6d1b36f7e427f82dd881d8795e7c1ffaaad4309df741f5 +S082/S082R02.edf 5a60133110089cd88b358a949f2e1d584fa14f90ff4c4016554f4e3a827aed5c +S082/S082R13.edf.event c117ba4c66b5467903fddc4ed77a580e09639381683c611dd1f02cb0d311a4b5 +S082/S082R05.edf 9e318d00b9ab3f34936e0f1a63e36996213b48e72b6cef4e22f3835c58411853 +S082/S082R12.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S082/S082R12.edf a389f918554ba206ee4330deaafe1e21750fb44317401628841ee6e2e9b83a8a +S082/S082R03.edf.event 8612fac39dad16048db3bea4e27be45fe67ced9a22781462eb9c6e435fe28d6d +S082/S082R04.edf.event 23b6fc268df2ca2b48b0a505a6eed055a62dad293041c180f6f03136d0f4b36e +S082/S082R05.edf.event af9bed6667136366cf88d2cd7f0f5fd9dda3fa51cc4cf73d3afe4d57dc8b7502 +S082/S082R11.edf 7b7a5f1608517ddeed1dc7e4e349c346f90ef9d1aaabddce3dde87b65efe2120 +S082/S082R01.edf c36d3ce061cef168595ebbf3995891c60fcefa82dc1534f6dbc4fc6312d09ebe +S082/S082R11.edf.event 61878a566785fc86672b3e061fc0eefd0fb9735a4239d8f13ceb0d08ff22f60d +S082/S082R06.edf.event ffbdc0e528335775cb88a14baab7b7f1fff4fa1647a473bcd640feefcbfab5a3 +S082/S082R03.edf 1df043bd8c67a867ffafc493d40cc183b9c15f46b4a45609b27b386a6b056280 +S082/S082R13.edf 4d97f79b10606441620868d7fab83054e1a3961fc2e056f6082f75f636be17a5 +S082/S082R10.edf c7fc55b18d9be4cdc9faa3b16cfb2814139a7d3c8264e9cdb2b6a5ded49298d2 +S082/S082R14.edf 09c317d8cc56eb914443ad400d1167b60c10ab73766894ddf2934223e30e4aee +S082/S082R07.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S082/S082R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S082/S082R09.edf ba6d90669cf26f7346353afa0fd1a496ae333c8f72a5b4d7b35d0da5607b8cb9 +S082/S082R08.edf.event 00eb5e22d01c3eaeb1626660a99e0c6fbf2f4423eed750224cc549752b7b6761 +S082/S082R08.edf 47ba5b30c2427a7dd4117f26ac5491a0bdf9a2e5a619bfc418ee59b3941042c9 +S082/S082R14.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S082/S082R09.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S087/S087R11.edf.event 029131148bb6c782573739be6ec79dc67ade89f0b101169912d9c8201871bcd0 +S087/S087R04.edf.event a38580800381d773ef251e941cbd4e02611c4c4e4efab3c383f14300a3394f1e +S087/S087R10.edf 83d8cc5669b446e85209bc1d3cbcfdd915dbbb910634d096122ef2581b2e3d6f +S087/S087R14.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S087/S087R11.edf ef3af1f11e1c90c305bb5caaa47d3ca98076a3b47378c1a81b6a85b3f2eec5ef +S087/S087R02.edf 869950862ef4df60bc0a4dde3316a8db3780262e29e65a1fd4d98ab20c9859f4 +S087/S087R13.edf.event 7f2596bafc4dd481e36c47d0e562fb6c5f9b7e91c2a915f19f66844b55b75410 +S087/S087R01.edf 5876a24fa07f5580ee8df69f00a069aeeeea2a22e08f3c6ba3678a3978f4a73a +S087/S087R05.edf 9acd4c1585c623ccf52ca1d8c5793168a6d736c528af283fb68116c415cecb48 +S087/S087R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S087/S087R07.edf.event 7ee6977e5ff9c282ca6370406d7a9871d162328940b104573ac9f1d5151c4b96 +S087/S087R08.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S087/S087R09.edf 6797f700e9a9b26efa58d863349724bf7cb330f62d3653e5f87a6e8a2a9d22b8 +S087/S087R03.edf.event 0724cecedb70811bcaf06ec5a41151edaf5209f968315c4da2e3a25bbf0fb9ec +S087/S087R06.edf.event 8612fac39dad16048db3bea4e27be45fe67ced9a22781462eb9c6e435fe28d6d +S087/S087R05.edf.event b0dc1155da666ef5240efa8026cec5f4f69b5fb6f1d21226a512de3f6b62002d +S087/S087R14.edf 203089c991c61a6701bfbb1e413bbd0bbb5e0a103179852422eab0640e232a14 +S087/S087R12.edf 1fc744ced6882cc083447d4228f2868ad8cd3708506491adaf6b949e1e1aefc8 +S087/S087R12.edf.event a716ed693834141bb2b56720ce6124df46e42e86652ec02bc96746644a79378e +S087/S087R04.edf 4f1599825e6aae94add2c0ebcf8bcb23a65bed258cfb89a73a06a6861fc586c3 +S087/S087R13.edf e07def80de0aad137aeb40c1ec6cc9fe1affb9e53fba10ea09a721ddbdec630f +S087/S087R06.edf bce28b9526a4dfbc28502e3b2d2586a133900fa946ae1b2d7e19752a23d08ebe +S087/S087R08.edf 58ec622ce0deaebeb43334c3f255e81b01e612751cb0b13d2471b2ce98fb67bb +S087/S087R07.edf 70be34572544b1dbb96739d53b694fd0d983a91810f9e05addf1eced8b013b3d +S087/S087R10.edf.event 0d4308120129d0f92d2b8779374ae232f98d1357e6c6427983fed655d9611af0 +S087/S087R03.edf dc60f233daf4d2a62453582210f0186514e02e4d3967af1ef8881361cc55da5b +S087/S087R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S087/S087R09.edf.event 8553d29b6f3410b4ee7ebd3f65050ee16a492ed893d0bc9a14cf35cfa904d586 +S014/S014R06.edf.event efd977506cc195b985e542f3333bc334c4793652f9b52580c96ef8a5948f4db9 +S014/S014R07.edf.event de55d737a8555e174e0cd004746337287c294a01d42348abe972b98f07739b4c +S014/S014R02.edf 6294928380c0ad63219a2713b9f5488d570be1c0f1da72662668c1b4f5bc906d +S014/S014R13.edf.event e8abea1fb06a5ad73e91ea5c97aa84247e1c165ee2bc724e9b9c7ae6f6c017cc +S014/S014R13.edf 28ef92f5bad0bcfabde422238a74fc13e58b9e31882cd9c4ea421f806a923af9 +S014/S014R10.edf c0fdc474a78421cc6048a9c8dfc8cafa8424dd4379135b60d68a77c4ee0b2b6a +S014/S014R10.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S014/S014R09.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S014/S014R01.edf 9ff971d9225e764428182bf4833ce5a60e620b7b157df9798ea6266070442433 +S014/S014R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S014/S014R03.edf.event c4fe35467d7d0b21a1a13fdeae18b7f036dce640d9e06acb7a946289a7fd4f44 +S014/S014R09.edf 3214586a275de7cfaaa94b5b6ff39c4f8fdaf04ecb3adfddde30ae5327d5e8b8 +S014/S014R05.edf b675b2ff333e7661944d6f37edf44d0c104c0fe9d6c04661c96885fbc0c2edfa +S014/S014R11.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S014/S014R04.edf.event 07fe70f3f8a3bdbc31f5b573f0a7411d64d34385995e5b88892dc178ef898e16 +S014/S014R06.edf 2effc21654d198aa9b98432717d66111358ee5795b929bb4c433d5d9ab066e97 +S014/S014R14.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S014/S014R12.edf e6ef184a364734f2cc0fce314c93ffa545660a89567475f56ddd979a2d0fbc39 +S014/S014R07.edf 4fc8a7f723e8577d5f0a851ee416ccdd7511aed74ae3da6bcc4bd19a2dda0d1c +S014/S014R11.edf d1d6a99dc4439690c820469616b6bc71d80be65340615246741c0d67efde59c4 +S014/S014R04.edf 7e443b4a4bb000506137ffd79eae9fd915a282f4c6c5aac71ad8b8612072227d +S014/S014R03.edf 670c197481bd1761f1393c3f33e7c286f5966f483493cb6d2e05ff23de93c6a9 +S014/S014R08.edf a63bc896da7b0fa302c3c3b0ad53a4c54f1a35fd1f66bebb2cedc81cbabc9320 +S014/S014R08.edf.event a730605f9838282a7ec09530538d4f4dc6f2f5cf73c8f2e85944d5e8297c441e +S014/S014R14.edf dc0139288de0668975cfde05cce9663a186ba1b2ccb3fce71fbd4bbbb206b5ba +S014/S014R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S014/S014R12.edf.event a38580800381d773ef251e941cbd4e02611c4c4e4efab3c383f14300a3394f1e +S014/S014R05.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S075/S075R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S075/S075R12.edf.event 818acb17a86d3ee6425a512fab58b363d01b6ccc783417cdad466d1ecddc506f +S075/S075R05.edf.event aaa5cf263ed5e1e7bf38b3d540f4e2d286107c553f7904af5c9cee9bfe715f19 +S075/S075R02.edf 712ea0f0fd69d3b108cde439012068e81b4cae645d170a93d52c1ead4dd45877 +S075/S075R10.edf.event 8553d29b6f3410b4ee7ebd3f65050ee16a492ed893d0bc9a14cf35cfa904d586 +S075/S075R08.edf 12c3c5d354210080fff50cd4bb475cf89df420c18b0a1e675dee817a79b380ba +S075/S075R10.edf 5d683e7fcc9f0d168f8ba02b226ba4bfe0b00bd26216310522e24c139ef71314 +S075/S075R11.edf 8d55cffc5f065f1de6c5007d2f42393f0f4fa2d40a01de17060d03cd515d3b23 +S075/S075R04.edf.event fd77510c7ae392a42ed020605a40066a43ea3dcf80082d17cfd95141d71549ae +S075/S075R14.edf 4805ec2a2ecefc95562a0f2178e2fd3aba647559674d13fd5c46a2f2dc27ef8e +S075/S075R06.edf b07163bf94520e29c548541fd33ab783ae9953009f4357e6aa57dd6f1d81af88 +S075/S075R03.edf.event 3764472ec04047763aeff3c1680cbc45cec3a88ed5f483d80cfbb31b50a12ac9 +S075/S075R07.edf.event f338197f5dd0ca078ea8eee22145e57e694f7dce6a2bbd55f5f05346ce3b3f17 +S075/S075R12.edf 6c33faf3fad9d274e8e7718c6dc2df6e8d54e5478638684530b7f854961b0eec +S075/S075R03.edf 3c7d10331971c041abdc9fc02041ab2c80384e3b84ff3f58d9d54c7572590ca1 +S075/S075R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S075/S075R09.edf.event 61878a566785fc86672b3e061fc0eefd0fb9735a4239d8f13ceb0d08ff22f60d +S075/S075R01.edf 79c65d9990c6606d45d9c3be18ae7226cc97e4757955fd8675f313ff8f91b238 +S075/S075R14.edf.event 8cd8c690df55a4167dd5e136be1c424a77655375599a9d3350d5460cabab539d +S075/S075R04.edf 87714f913602a939c24e3ce672a689ce20f966c5d19d5f699f898a94cca4c086 +S075/S075R08.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S075/S075R11.edf.event 596ad948f8aa9dfdc6cd3125a3d02e115d1cd6a505023467bf5cd721125ee7fb +S075/S075R13.edf 10e9456acf73b9c355684b37aaf102a2d78389c47fdf0c026c39a32e08c95c71 +S075/S075R07.edf d933c3ead467e6c662fc45bdba943f20177079470d1d06ac4cdc989d6dfadf5a +S075/S075R06.edf.event 734d5da22686d9f9052ce819f7fba86e4f5225f2d3638eb1e3795e9ebd018fe1 +S075/S075R13.edf.event 83ec130ac6a664e0d88923e1496dc0806008967b51e6158521a6beb0515b2eb8 +S075/S075R09.edf b17c615c3a63e91e8e26a4b9299c1c5bcc022c2d50932520a382c713e8f1f708 +S075/S075R05.edf aa220a279d288581e32dcb10fd8e44688aacf89de7f0dece4765f0b3c40a4229 +S010/S010R09.edf 91bcc4d9068115e0d022a6a9ebb1fe05cfcba27cc1bf025beb8a3eebb72b0e7c +S010/S010R09.edf.event a788bd6a825d960534053c6559cb4f24cb7ee7a3164017d99c1dbc55f3b22215 +S010/S010R08.edf b468e77d0c8a73377b4510220c6be95bdefd572f6ed5c4b5f539c9dc0bdef485 +S010/S010R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S010/S010R12.edf.event 820bcb0b8aa75c06572fb3677af8b965e06ca92c0ff5f4eabd0d347c7b141680 +S010/S010R06.edf 5f5c213f1f7db4bdb52d8d54e8074d7f5e73655b1e040d41a7916d3d0a00b666 +S010/S010R14.edf 138893d950405102e1536365290ac15255688eaab181170afbd5178e6714ae2e +S010/S010R14.edf.event 99dc24a5732bc2857f1d3fba3d1a5b942b288ef56950e45354e03fb10183bbe6 +S010/S010R13.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S010/S010R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S010/S010R10.edf.event 8a4b5822b251050d46b816bf54fbb47530d91210b6d01cc2e6acbe662193553d +S010/S010R07.edf.event e11c72f75777baaef94ad75881c37d283df8f4f917ca8c1c823cece56a31f215 +S010/S010R05.edf.event 50f12f26efafddcd994732349e4117055595d324e4dcff8fa56160baad5d5533 +S010/S010R13.edf 10e642cfacf5d55a4a90b73f8968e2dc4da6db4b8458388d87641c21e1e00c6a +S010/S010R06.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S010/S010R03.edf.event 296bafcd473fd6051b9cf19b1af83d7f0a4ba4eb310f7fbf462aa7ac3ebb8120 +S010/S010R01.edf 0d694b503e5ea0f6088a790f09390efb1be2688ad92213f5e82fa2f343cd18c4 +S010/S010R03.edf 49016ed40a1c17164b7b56031d567ee96b00b0b3755fab072c1d78db9ccdab73 +S010/S010R11.edf 4e8a3c50558c9dce96ab060d4883d19fbfe5e859989d7c633bf9f88a6a65d467 +S010/S010R12.edf 0983609a12e6fd9b3ae99fd6968938ec5a3b012948894602233a65b720ac3975 +S010/S010R04.edf 8e68b22936cbcb7f84ed8ff037cb4a99f01064589d181c8056dbef06c1c7159b +S010/S010R02.edf d44f5f6c774771bc2b6cdd6d29c7f558e11ad11e6198cf4603f473edd4b2074a +S010/S010R05.edf e83d0bcd64dc403e430be79f865e92576b62d42ebbc07583fb1bd964238127bc +S010/S010R04.edf.event 3e275ced710e3ac50d345aab942c9fb009e11f5447566d67dfaa0345a0d96840 +S010/S010R10.edf 693b0d9240095c01eb8ec5b2d0b3887cfbb8fbfeb85a04076b79c297b5b7d42c +S010/S010R11.edf.event 710672ce0f425a207ef543d23f608683e1863d7423702d511235d71ccbd289cf +S010/S010R08.edf.event 0028edf9b5fcc76311706f809ca44f884f227acaa02c56031cf87a7937c1d9a5 +S010/S010R07.edf cf59750f6f9576e106875f257673940cbaf2b0d62e1d3b1a45260dbc46c1981b +S074/S074R02.edf 0135aacd143f05dcde96a748fb925d60823187e0b4a910051d81c91a85b72bed +S074/S074R14.edf.event 046534c84c8b0ff5bc1e42577bfd07477507c70bf5fa72787065271450df27e8 +S074/S074R07.edf d51407f00e5f51714f155025b59165e1629a6f77e1205f0f693917400489198a +S074/S074R12.edf.event ef90708131efeff70f834269381bdac1901f295f238b806a3e7faa6295beae60 +S074/S074R04.edf ea84e05235eb45067eb84f19aa09c4c5d429af1e9d865ee5eae1b2aaab46a081 +S074/S074R05.edf.event c2cda6658840270926f0e5923526e91f0b5c3b025e5e480e8128411d202279f7 +S074/S074R04.edf.event 41a37ed42798630028522e1dddfb80e81b973b74f6159b9cfbb856b13dd30d4b +S074/S074R12.edf f6ea6103b34721b8d2d8e1175d305491303cf5f028879791f14443da810d1c7a +S074/S074R06.edf.event 5bbce0aecf4877ae15055ab9b31e4f2c644ddde0790031ab0902e5c6d913e97d +S074/S074R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S074/S074R01.edf 5a9fed173c09eb34df7bfb6e79d6c15b616f2cf95c65bbed6b7b392acfa2ac3a +S074/S074R05.edf 10c0b403ec5d30a1bfdd118218814ad6a3ab807ba2f25c767aea3374b71de285 +S074/S074R13.edf.event 21d57c80128ab83ad495fc6a3f948d954b23ed08be42eabdf2d058a81fa1ebeb +S074/S074R07.edf.event 33d88435be58ca0eaa6dbf2eedec24ab55aec24d6e067979c6aa5f2549ce02bc +S074/S074R08.edf e594ad62e81f3948d52e1bf5b896598543e4c739e06df596160dd379af59b99f +S074/S074R14.edf a6b678a0c9f505a58fe9b6c769276a84a4785743a3866529febf5f489e41d6d9 +S074/S074R11.edf c7ab21c448f8059e8674b21f9bdd96311c83086c6a7faf95aec5d75c319bc01b +S074/S074R11.edf.event e6e3fae8bbcbc00e9d7d959eb1e3c2c0ab93eb9e896a0adc93c685329e429982 +S074/S074R09.edf e8d8787476b0bf24a5caa9fbc450296cc86bb92fbf25c22dfde96f893bf20c4d +S074/S074R03.edf b93be723755be84e63dd7ad2a09e396392d71a4a6c156b085a7577a1ff6fce9d +S074/S074R09.edf.event 413f7dd132c171c2c14d9a7cdebe9c5e6ea047f267c8e9fbd8669a3ad0fda05b +S074/S074R08.edf.event 6a98c81dedf4cfc225c5d5260ce569ed2e2c48b1be5698ab3cba2089b491e545 +S074/S074R03.edf.event 0c18fa49d469703f30b80a748450ef0688aa72103e201d690fb064dd55c7e540 +S074/S074R10.edf 25261ebba19c4f04b7fe9597287a49a97e91268941c9fd3609ec0a7f53fcd5e6 +S074/S074R13.edf a2816222018a1b9b734f200ea774b9e43371a8c835c729649955ec1b6ad2335b +S074/S074R10.edf.event 772fcde48d228db4376dd7486ad68145a7a40f0180612741f47a1886d5be50f4 +S074/S074R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S074/S074R06.edf c7213e39ce5e307c6619762169d0c07823db7fade9cd2eaa6696fa0ac658c008 +S059/S059R05.edf 80b7c06b3187db15ae83e290d7f82af308e44dcdcb6eef1a0162ec2d9760ad4c +S059/S059R04.edf 904e79d95c362eab86e46e0a1addbe3b80ba802b98b372c63b8872537fcd9bbe +S059/S059R05.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S059/S059R03.edf ceeae042f3be7a4d705c8b13b507e58d901cdb5e7bd4e367b571f85fe5fb6070 +S059/S059R13.edf 72f0530ffb191f575b29f72964e51fa108ae57f495db19dc11de1e7f3c35262f +S059/S059R09.edf 10491ecfa52bcc63056974652c8931ea19a0adb0d3ed331a1a4d58e5bdedb849 +S059/S059R07.edf d425bba7b0f3019f725f3b43c2a63d703b3524f177b6341beaefc1d479c9561b +S059/S059R09.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S059/S059R13.edf.event d6641564c0a8724abac8f919ad99064213b0fafe2c25f0d37d1cc67d83bda19c +S059/S059R06.edf c7ccb0ab2969a4f1d5c522e66299c08d8db2c39c9ad977a9be7613132b4bb46d +S059/S059R08.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S059/S059R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S059/S059R10.edf 20becd179b2aff43d00c5c5a11faf39a820cb59c8d43f2668d9458dddda9ddd8 +S059/S059R06.edf.event 23b6fc268df2ca2b48b0a505a6eed055a62dad293041c180f6f03136d0f4b36e +S059/S059R01.edf b8b6001fbd9853ab9cf3cc7dc2c40fe0547fd8904044d4bb5b654a083d6f1584 +S059/S059R14.edf.event ea6f6003ddc3676cda270c185ba40fd7915630a24511085aeb31217c9a1e8852 +S059/S059R02.edf 0135175166b23559b8ff05f4e3a4fa0ba0fa24a65b411e0bc1e20dfda9d89074 +S059/S059R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S059/S059R10.edf.event 927b59b2e19d89df8169362057a329c5b70fd65d1d2d0a77e56546927bc02281 +S059/S059R11.edf 6841c44bb69451d341157e05f64d00cc51ddc1d9076bf6903c5aaaff5aeed129 +S059/S059R08.edf 9c96400ddc6cddc13df50bc49a564365e8f3e24fd9fac3b6fa494cb939c9c627 +S059/S059R03.edf.event 60cee9d6dd15c5919b0d3d4f0618922c82d19e6490ea88b6fcd09fc7631fac71 +S059/S059R12.edf ed9b66b031f545783a98ccc77915ee74b1f9b0471a6edb601408b63b7320121f +S059/S059R12.edf.event aaa5cf263ed5e1e7bf38b3d540f4e2d286107c553f7904af5c9cee9bfe715f19 +S059/S059R07.edf.event 037c923028aca24716dbc8d307c618e19d4e94bb1e759e5ddb8a9d3b6b205a86 +S059/S059R11.edf.event b9568e8466c8f90e1fe1f9aab8ddb73ea16c008b7b67cbbe5863f04f2ec408f0 +S059/S059R14.edf 341c60903abc18408b921bef74bfaa5dbed75d886f13c55b25090afa23690c16 +S059/S059R04.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 \ No newline at end of file diff --git a/python/libs/mne/data/extinction_coef.mat b/python/libs/mne/data/extinction_coef.mat new file mode 100644 index 0000000..0ea91c9 Binary files /dev/null and b/python/libs/mne/data/extinction_coef.mat differ diff --git a/python/libs/mne/data/fsaverage/fsaverage-fiducials.fif b/python/libs/mne/data/fsaverage/fsaverage-fiducials.fif new file mode 100644 index 0000000..faaae97 Binary files /dev/null and b/python/libs/mne/data/fsaverage/fsaverage-fiducials.fif differ diff --git a/python/libs/mne/data/fsaverage/fsaverage-head.fif b/python/libs/mne/data/fsaverage/fsaverage-head.fif new file mode 100644 index 0000000..b66d16a Binary files /dev/null and b/python/libs/mne/data/fsaverage/fsaverage-head.fif differ diff --git a/python/libs/mne/data/fsaverage/fsaverage-inner_skull-bem.fif b/python/libs/mne/data/fsaverage/fsaverage-inner_skull-bem.fif new file mode 100644 index 0000000..6f23294 Binary files /dev/null and b/python/libs/mne/data/fsaverage/fsaverage-inner_skull-bem.fif differ diff --git a/python/libs/mne/data/fsaverage/fsaverage-trans.fif b/python/libs/mne/data/fsaverage/fsaverage-trans.fif new file mode 100644 index 0000000..92b5573 Binary files /dev/null and b/python/libs/mne/data/fsaverage/fsaverage-trans.fif differ diff --git a/python/libs/mne/data/helmets/122m.fif.gz b/python/libs/mne/data/helmets/122m.fif.gz new file mode 100644 index 0000000..79d1773 Binary files /dev/null and b/python/libs/mne/data/helmets/122m.fif.gz differ diff --git a/python/libs/mne/data/helmets/306m.fif.gz b/python/libs/mne/data/helmets/306m.fif.gz new file mode 100644 index 0000000..e57e840 Binary files /dev/null and b/python/libs/mne/data/helmets/306m.fif.gz differ diff --git a/python/libs/mne/data/helmets/306m_rt.fif.gz b/python/libs/mne/data/helmets/306m_rt.fif.gz new file mode 100644 index 0000000..60d025b Binary files /dev/null and b/python/libs/mne/data/helmets/306m_rt.fif.gz differ diff --git a/python/libs/mne/data/helmets/BabySQUID.fif.gz b/python/libs/mne/data/helmets/BabySQUID.fif.gz new file mode 100644 index 0000000..3269ffb Binary files /dev/null and b/python/libs/mne/data/helmets/BabySQUID.fif.gz differ diff --git a/python/libs/mne/data/helmets/CTF_275.fif.gz b/python/libs/mne/data/helmets/CTF_275.fif.gz new file mode 100644 index 0000000..4b36411 Binary files /dev/null and b/python/libs/mne/data/helmets/CTF_275.fif.gz differ diff --git a/python/libs/mne/data/helmets/KIT.fif.gz b/python/libs/mne/data/helmets/KIT.fif.gz new file mode 100644 index 0000000..b508585 Binary files /dev/null and b/python/libs/mne/data/helmets/KIT.fif.gz differ diff --git a/python/libs/mne/data/helmets/Magnes_2500wh.fif.gz b/python/libs/mne/data/helmets/Magnes_2500wh.fif.gz new file mode 100644 index 0000000..27275ae Binary files /dev/null and b/python/libs/mne/data/helmets/Magnes_2500wh.fif.gz differ diff --git a/python/libs/mne/data/helmets/Magnes_3600wh.fif.gz b/python/libs/mne/data/helmets/Magnes_3600wh.fif.gz new file mode 100644 index 0000000..c665595 Binary files /dev/null and b/python/libs/mne/data/helmets/Magnes_3600wh.fif.gz differ diff --git a/python/libs/mne/data/icos.fif.gz b/python/libs/mne/data/icos.fif.gz new file mode 100644 index 0000000..99e526b Binary files /dev/null and b/python/libs/mne/data/icos.fif.gz differ diff --git a/python/libs/mne/data/image/custom_layout.lout b/python/libs/mne/data/image/custom_layout.lout new file mode 100644 index 0000000..ab5b814 --- /dev/null +++ b/python/libs/mne/data/image/custom_layout.lout @@ -0,0 +1,257 @@ + 0.00 0.00 0.01 0.02 +000 0.79 0.46 0.07 0.05 0 +001 0.78 0.48 0.07 0.05 1 +002 0.76 0.51 0.07 0.05 2 +003 0.74 0.53 0.07 0.05 3 +004 0.72 0.55 0.07 0.05 4 +005 0.71 0.57 0.07 0.05 5 +006 0.69 0.59 0.07 0.05 6 +007 0.67 0.62 0.07 0.05 7 +008 0.66 0.64 0.07 0.05 8 +009 0.64 0.66 0.07 0.05 9 +010 0.62 0.68 0.07 0.05 10 +011 0.61 0.69 0.07 0.05 11 +012 0.59 0.71 0.07 0.05 12 +013 0.58 0.73 0.07 0.05 13 +014 0.56 0.75 0.07 0.05 14 +015 0.54 0.77 0.07 0.05 15 +016 0.77 0.44 0.07 0.05 16 +017 0.75 0.46 0.07 0.05 17 +018 0.73 0.49 0.07 0.05 18 +019 0.72 0.51 0.07 0.05 19 +020 0.70 0.54 0.07 0.05 20 +021 0.68 0.56 0.07 0.05 21 +022 0.66 0.58 0.07 0.05 22 +023 0.65 0.60 0.07 0.05 23 +024 0.63 0.62 0.07 0.05 24 +025 0.62 0.64 0.07 0.05 25 +026 0.60 0.66 0.07 0.05 26 +027 0.58 0.68 0.07 0.05 27 +028 0.57 0.70 0.07 0.05 28 +029 0.55 0.71 0.07 0.05 29 +030 0.53 0.73 0.07 0.05 30 +031 0.52 0.75 0.07 0.05 31 +032 0.75 0.42 0.07 0.05 32 +033 0.73 0.45 0.07 0.05 33 +034 0.71 0.47 0.07 0.05 34 +035 0.69 0.50 0.07 0.05 35 +036 0.68 0.52 0.07 0.05 36 +037 0.66 0.54 0.07 0.05 37 +038 0.64 0.57 0.07 0.05 38 +039 0.62 0.58 0.07 0.05 39 +040 0.61 0.61 0.07 0.05 40 +041 0.59 0.62 0.07 0.05 41 +042 0.58 0.64 0.07 0.05 42 +043 0.56 0.66 0.07 0.05 43 +044 0.54 0.68 0.07 0.05 44 +045 0.53 0.70 0.07 0.05 45 +046 0.51 0.72 0.07 0.05 46 +047 0.50 0.74 0.07 0.05 47 +048 0.72 0.41 0.07 0.05 48 +049 0.71 0.43 0.07 0.05 49 +050 0.69 0.46 0.07 0.05 50 +051 0.67 0.48 0.07 0.05 51 +052 0.65 0.50 0.07 0.05 52 +053 0.63 0.52 0.07 0.05 53 +054 0.62 0.55 0.07 0.05 54 +055 0.60 0.57 0.07 0.05 55 +056 0.58 0.59 0.07 0.05 56 +057 0.57 0.61 0.07 0.05 57 +058 0.55 0.63 0.07 0.05 58 +059 0.54 0.65 0.07 0.05 59 +060 0.52 0.67 0.07 0.05 60 +061 0.51 0.69 0.07 0.05 61 +062 0.49 0.71 0.07 0.05 62 +063 0.47 0.73 0.07 0.05 63 +064 0.70 0.39 0.07 0.05 64 +065 0.68 0.41 0.07 0.05 65 +066 0.66 0.44 0.07 0.05 66 +067 0.65 0.46 0.07 0.05 67 +068 0.63 0.49 0.07 0.05 68 +069 0.61 0.51 0.07 0.05 69 +070 0.59 0.53 0.07 0.05 70 +071 0.58 0.55 0.07 0.05 71 +072 0.56 0.57 0.07 0.05 72 +073 0.55 0.59 0.07 0.05 73 +074 0.53 0.61 0.07 0.05 74 +075 0.51 0.64 0.07 0.05 75 +076 0.50 0.66 0.07 0.05 76 +077 0.48 0.68 0.07 0.05 77 +078 0.47 0.69 0.07 0.05 78 +079 0.45 0.72 0.07 0.05 79 +080 0.68 0.38 0.07 0.05 80 +081 0.66 0.40 0.07 0.05 81 +082 0.64 0.42 0.07 0.05 82 +083 0.62 0.44 0.07 0.05 83 +084 0.60 0.47 0.07 0.05 84 +085 0.59 0.49 0.07 0.05 85 +086 0.57 0.51 0.07 0.05 86 +087 0.55 0.54 0.07 0.05 87 +088 0.54 0.56 0.07 0.05 88 +089 0.52 0.58 0.07 0.05 89 +090 0.50 0.60 0.07 0.05 90 +091 0.49 0.62 0.07 0.05 91 +092 0.47 0.64 0.07 0.05 92 +093 0.46 0.66 0.07 0.05 93 +094 0.44 0.68 0.07 0.05 94 +095 0.42 0.70 0.07 0.05 95 +096 0.65 0.36 0.07 0.05 96 +097 0.63 0.38 0.07 0.05 97 +098 0.61 0.41 0.07 0.05 98 +099 0.60 0.43 0.07 0.05 99 +100 0.58 0.45 0.07 0.05 100 +101 0.56 0.47 0.07 0.05 101 +102 0.55 0.50 0.07 0.05 102 +103 0.53 0.52 0.07 0.05 103 +104 0.51 0.54 0.07 0.05 104 +105 0.50 0.56 0.07 0.05 105 +106 0.48 0.58 0.07 0.05 106 +107 0.47 0.61 0.07 0.05 107 +108 0.45 0.63 0.07 0.05 108 +109 0.44 0.65 0.07 0.05 109 +110 0.42 0.67 0.07 0.05 110 +111 0.41 0.69 0.07 0.05 111 +112 0.63 0.34 0.07 0.05 112 +113 0.61 0.36 0.07 0.05 113 +114 0.59 0.39 0.07 0.05 114 +115 0.58 0.41 0.07 0.05 115 +116 0.56 0.43 0.07 0.05 116 +117 0.54 0.46 0.07 0.05 117 +118 0.52 0.48 0.07 0.05 118 +119 0.51 0.51 0.07 0.05 119 +120 0.49 0.52 0.07 0.05 120 +121 0.47 0.55 0.07 0.05 121 +122 0.46 0.57 0.07 0.05 122 +123 0.44 0.59 0.07 0.05 123 +124 0.43 0.61 0.07 0.05 124 +125 0.41 0.63 0.07 0.05 125 +126 0.40 0.65 0.07 0.05 126 +127 0.38 0.67 0.07 0.05 127 +128 0.60 0.32 0.07 0.05 128 +129 0.59 0.35 0.07 0.05 129 +130 0.56 0.37 0.07 0.05 130 +131 0.55 0.39 0.07 0.05 131 +132 0.53 0.42 0.07 0.05 132 +133 0.52 0.44 0.07 0.05 133 +134 0.50 0.46 0.07 0.05 134 +135 0.48 0.49 0.07 0.05 135 +136 0.47 0.51 0.07 0.05 136 +137 0.45 0.53 0.07 0.05 137 +138 0.43 0.56 0.07 0.05 138 +139 0.42 0.57 0.07 0.05 139 +140 0.40 0.60 0.07 0.05 140 +141 0.39 0.61 0.07 0.05 141 +142 0.37 0.63 0.07 0.05 142 +143 0.36 0.66 0.07 0.05 143 +144 0.58 0.31 0.07 0.05 144 +145 0.56 0.33 0.07 0.05 145 +146 0.54 0.35 0.07 0.05 146 +147 0.53 0.38 0.07 0.05 147 +148 0.51 0.40 0.07 0.05 148 +149 0.49 0.42 0.07 0.05 149 +150 0.48 0.45 0.07 0.05 150 +151 0.46 0.47 0.07 0.05 151 +152 0.44 0.49 0.07 0.05 152 +153 0.42 0.51 0.07 0.05 153 +154 0.41 0.53 0.07 0.05 154 +155 0.39 0.56 0.07 0.05 155 +156 0.38 0.58 0.07 0.05 156 +157 0.36 0.60 0.07 0.05 157 +158 0.35 0.62 0.07 0.05 158 +159 0.33 0.64 0.07 0.05 159 +160 0.55 0.29 0.07 0.05 160 +161 0.54 0.32 0.07 0.05 161 +162 0.52 0.34 0.07 0.05 162 +163 0.50 0.36 0.07 0.05 163 +164 0.49 0.38 0.07 0.05 164 +165 0.47 0.41 0.07 0.05 165 +166 0.45 0.43 0.07 0.05 166 +167 0.43 0.45 0.07 0.05 167 +168 0.42 0.48 0.07 0.05 168 +169 0.40 0.50 0.07 0.05 169 +170 0.39 0.52 0.07 0.05 170 +171 0.37 0.54 0.07 0.05 171 +172 0.36 0.56 0.07 0.05 172 +173 0.34 0.58 0.07 0.05 173 +174 0.33 0.60 0.07 0.05 174 +175 0.31 0.62 0.07 0.05 175 +176 0.53 0.27 0.07 0.05 176 +177 0.52 0.30 0.07 0.05 177 +178 0.50 0.32 0.07 0.05 178 +179 0.48 0.34 0.07 0.05 179 +180 0.46 0.37 0.07 0.05 180 +181 0.45 0.39 0.07 0.05 181 +182 0.43 0.41 0.07 0.05 182 +183 0.41 0.43 0.07 0.05 183 +184 0.40 0.46 0.07 0.05 184 +185 0.38 0.48 0.07 0.05 185 +186 0.36 0.50 0.07 0.05 186 +187 0.35 0.53 0.07 0.05 187 +188 0.33 0.55 0.07 0.05 188 +189 0.32 0.57 0.07 0.05 189 +190 0.30 0.59 0.07 0.05 190 +191 0.29 0.61 0.07 0.05 191 +192 0.51 0.26 0.07 0.05 192 +193 0.49 0.28 0.07 0.05 193 +194 0.47 0.31 0.07 0.05 194 +195 0.46 0.33 0.07 0.05 195 +196 0.44 0.35 0.07 0.05 196 +197 0.42 0.37 0.07 0.05 197 +198 0.41 0.40 0.07 0.05 198 +199 0.39 0.42 0.07 0.05 199 +200 0.37 0.44 0.07 0.05 200 +201 0.36 0.46 0.07 0.05 201 +202 0.34 0.49 0.07 0.05 202 +203 0.32 0.51 0.07 0.05 203 +204 0.31 0.53 0.07 0.05 204 +205 0.29 0.55 0.07 0.05 205 +206 0.28 0.57 0.07 0.05 206 +207 0.27 0.59 0.07 0.05 207 +208 0.48 0.24 0.07 0.05 208 +209 0.47 0.26 0.07 0.05 209 +210 0.45 0.28 0.07 0.05 210 +211 0.43 0.31 0.07 0.05 211 +212 0.41 0.33 0.07 0.05 212 +213 0.40 0.35 0.07 0.05 213 +214 0.38 0.38 0.07 0.05 214 +215 0.37 0.40 0.07 0.05 215 +216 0.35 0.42 0.07 0.05 216 +217 0.33 0.45 0.07 0.05 217 +218 0.32 0.47 0.07 0.05 218 +219 0.30 0.49 0.07 0.05 219 +220 0.28 0.51 0.07 0.05 220 +221 0.27 0.53 0.07 0.05 221 +222 0.25 0.55 0.07 0.05 222 +223 0.24 0.58 0.07 0.05 223 +224 0.46 0.23 0.07 0.05 224 +225 0.45 0.25 0.07 0.05 225 +226 0.43 0.27 0.07 0.05 226 +227 0.41 0.29 0.07 0.05 227 +228 0.39 0.31 0.07 0.05 228 +229 0.38 0.34 0.07 0.05 229 +230 0.36 0.36 0.07 0.05 230 +231 0.34 0.38 0.07 0.05 231 +232 0.33 0.41 0.07 0.05 232 +233 0.31 0.43 0.07 0.05 233 +234 0.29 0.45 0.07 0.05 234 +235 0.28 0.47 0.07 0.05 235 +236 0.26 0.50 0.07 0.05 236 +237 0.25 0.52 0.07 0.05 237 +238 0.24 0.54 0.07 0.05 238 +239 0.22 0.56 0.07 0.05 239 +240 0.44 0.21 0.07 0.05 240 +241 0.42 0.23 0.07 0.05 241 +242 0.41 0.25 0.07 0.05 242 +243 0.39 0.27 0.07 0.05 243 +244 0.37 0.30 0.07 0.05 244 +245 0.35 0.32 0.07 0.05 245 +246 0.33 0.34 0.07 0.05 246 +247 0.32 0.37 0.07 0.05 247 +248 0.30 0.39 0.07 0.05 248 +249 0.28 0.41 0.07 0.05 249 +250 0.27 0.43 0.07 0.05 250 +251 0.25 0.46 0.07 0.05 251 +252 0.24 0.48 0.07 0.05 252 +253 0.23 0.50 0.07 0.05 253 +254 0.21 0.52 0.07 0.05 254 +255 0.20 0.54 0.07 0.05 255 diff --git a/python/libs/mne/data/image/mni_brain.gif b/python/libs/mne/data/image/mni_brain.gif new file mode 100644 index 0000000..3d6cc08 Binary files /dev/null and b/python/libs/mne/data/image/mni_brain.gif differ diff --git a/python/libs/mne/data/mne_analyze.sel b/python/libs/mne/data/mne_analyze.sel new file mode 100644 index 0000000..ae4bf34 --- /dev/null +++ b/python/libs/mne/data/mne_analyze.sel @@ -0,0 +1,19 @@ +# +# All channels +# +Vertex:MEG 0633|MEG 0632|MEG 0423|MEG 0422|MEG 0712|MEG 0713|MEG 0433|MEG 0432|MEG 0742|MEG 0743|MEG 1822|MEG 1823|MEG 1043|MEG 1042|MEG 1112|MEG 1113|MEG 0722|MEG 0723|MEG 1142|MEG 1143|MEG 0732|MEG 0733|MEG 2212|MEG 2213|MEG 0631|MEG 0431|MEG 0711|MEG 0431|MEG 0741|MEG 1821|MEG 1041|MEG 1111|MEG 0721|MEG 1141|MEG 0731|MEG 2211 +Left-temporal:MEG 0223|MEG 0222|MEG 0212|MEG 0213|MEG 0133|MEG 0132|MEG 0112|MEG 0113|MEG 0233|MEG 0232|MEG 0243|MEG 0242|MEG 1512|MEG 1513|MEG 0143|MEG 0142|MEG 1623|MEG 1622|MEG 1613|MEG 1612|MEG 1523|MEG 1522|MEG 1543|MEG 1542|MEG 1533|MEG 1532|MEG 0221|MEG 0211|MEG 0131|MEG 0111|MEG 0231|MEG 0241|MEG 1511|MEG 0141|MEG 1621|MEG 1611|MEG 1521|MEG 1541|MEG 1531 +Right-temporal:MEG 1312|MEG 1313|MEG 1323|MEG 1322|MEG 1442|MEG 1443|MEG 1423|MEG 1422|MEG 1342|MEG 1343|MEG 1333|MEG 1332|MEG 2612|MEG 2613|MEG 1433|MEG 1432|MEG 2413|MEG 2412|MEG 2422|MEG 2423|MEG 2642|MEG 2643|MEG 2623|MEG 2622|MEG 2633|MEG 2632|MEG 1311|MEG 1321|MEG 1441|MEG 1421|MEG 1341|MEG 1331|MEG 2611|MEG 1431|MEG 2411|MEG 2421|MEG 2641|MEG 2621|MEG 2631 +Left-parietal:MEG 0633|MEG 0632|MEG 0423|MEG 0422|MEG 0412|MEG 0413|MEG 0712|MEG 0713|MEG 0433|MEG 0432|MEG 0442|MEG 0443|MEG 0742|MEG 0743|MEG 1822|MEG 1823|MEG 1813|MEG 1812|MEG 1832|MEG 1833|MEG 1843|MEG 1842|MEG 1632|MEG 1633|MEG 2013|MEG 2012|MEG 0631|MEG 0421|MEG 0411|MEG 0711|MEG 0431|MEG 0441|MEG 0741|MEG 1821|MEG 1811|MEG 1831|MEG 1841|MEG 1631|MEG 2011 +Right-parietal:MEG 1043|MEG 1042|MEG 1112|MEG 1113|MEG 1123|MEG 1122|MEG 0722|MEG 0723|MEG 1142|MEG 1143|MEG 1133|MEG 1132|MEG 0732|MEG 0733|MEG 2212|MEG 2213|MEG 2223|MEG 2222|MEG 2242|MEG 2243|MEG 2232|MEG 2233|MEG 2442|MEG 2443|MEG 2023|MEG 2022|MEG 1041|MEG 1111|MEG 1121|MEG 0721|MEG 1141|MEG 1131|MEG 0731|MEG 2211|MEG 2221|MEG 2241|MEG 2231|MEG 2441|MEG 2021 +Left-occipital:MEG 2042|MEG 2043|MEG 1913|MEG 1912|MEG 2113|MEG 2112|MEG 1922|MEG 1923|MEG 1942|MEG 1943|MEG 1642|MEG 1643|MEG 1933|MEG 1932|MEG 1733|MEG 1732|MEG 1723|MEG 1722|MEG 2143|MEG 2142|MEG 1742|MEG 1743|MEG 1712|MEG 1713|MEG 2041|MEG 1911|MEG 2111|MEG 1921|MEG 1941|MEG 1641|MEG 1931|MEG 1731|MEG 1721|MEG 2141|MEG 1741|MEG 1711 +Right-occipital:MEG 2032|MEG 2033|MEG 2313|MEG 2312|MEG 2342|MEG 2343|MEG 2322|MEG 2323|MEG 2433|MEG 2432|MEG 2122|MEG 2123|MEG 2333|MEG 2332|MEG 2513|MEG 2512|MEG 2523|MEG 2522|MEG 2133|MEG 2132|MEG 2542|MEG 2543|MEG 2532|MEG 2533|MEG 2031|MEG 2311|MEG 2341|MEG 2321|MEG 2431|MEG 2121|MEG 2331|MEG 2511|MEG 2521|MEG 2131|MEG 2541|MEG 2531 +Left-frontal:MEG 0522|MEG 0523|MEG 0512|MEG 0513|MEG 0312|MEG 0313|MEG 0342|MEG 0343|MEG 0122|MEG 0123|MEG 0822|MEG 0823|MEG 0533|MEG 0532|MEG 0543|MEG 0542|MEG 0322|MEG 0323|MEG 0612|MEG 0613|MEG 0333|MEG 0332|MEG 0622|MEG 0623|MEG 0643|MEG 0642|MEG 0521|MEG 0511|MEG 0311|MEG 0341|MEG 0121|MEG 0821|MEG 0531|MEG 0541|MEG 0321|MEG 0611|MEG 0331|MEG 0621|MEG 0641 +Right-frontal:MEG 0813|MEG 0812|MEG 0912|MEG 0913|MEG 0922|MEG 0923|MEG 1212|MEG 1213|MEG 1223|MEG 1222|MEG 1412|MEG 1413|MEG 0943|MEG 0942|MEG 0933|MEG 0932|MEG 1232|MEG 1233|MEG 1012|MEG 1013|MEG 1022|MEG 1023|MEG 1243|MEG 1242|MEG 1033|MEG 1032|MEG 0811|MEG 0911|MEG 0921|MEG 1211|MEG 1221|MEG 1411|MEG 0941|MEG 0931|MEG 1231|MEG 1011|MEG 1021|MEG 1241|MEG 1031 +# +# EEG in groups of 32 channels +# +EEG 1-32:EEG 001|EEG 002|EEG 003|EEG 004|EEG 005|EEG 006|EEG 007|EEG 008|EEG 009|EEG 010|EEG 011|EEG 012|EEG 013|EEG 014|EEG 015|EEG 016|EEG 017|EEG 018|EEG 019|EEG 020|EEG 021|EEG 022|EEG 023|EEG 024|EEG 025|EEG 026|EEG 027|EEG 028|EEG 029|EEG 030|EEG 031|EEG 032 +EEG 33-64:EEG 033|EEG 034|EEG 035|EEG 036|EEG 037|EEG 038|EEG 039|EEG 040|EEG 041|EEG 042|EEG 043|EEG 044|EEG 045|EEG 046|EEG 047|EEG 048|EEG 049|EEG 050|EEG 051|EEG 052|EEG 053|EEG 054|EEG 055|EEG 056|EEG 057|EEG 058|EEG 059|EEG 060|EEG 061|EEG 062|EEG 063|EEG 064 +EEG 65-96:EEG 065|EEG 066|EEG 067|EEG 068|EEG 069|EEG 070|EEG 071|EEG 072|EEG 073|EEG 074|EEG 075|EEG 076|EEG 077|EEG 078|EEG 079|EEG 080|EEG 081|EEG 082|EEG 083|EEG 084|EEG 085|EEG 086|EEG 087|EEG 088|EEG 089|EEG 090|EEG 091|EEG 092|EEG 093|EEG 094|EEG 095|EEG 096 +EEG 97-128:EEG 097|EEG 098|EEG 099|EEG 100|EEG 101|EEG 102|EEG 103|EEG 104|EEG 105|EEG 106|EEG 107|EEG 108|EEG 109|EEG 110|EEG 111|EEG 112|EEG 113|EEG 114|EEG 115|EEG 116|EEG 117|EEG 118|EEG 119|EEG 120|EEG 121|EEG 122|EEG 123|EEG 124|EEG 125|EEG 126|EEG 127|EEG 128 \ No newline at end of file diff --git a/python/libs/mne/datasets/__init__.py b/python/libs/mne/datasets/__init__.py new file mode 100644 index 0000000..eed9393 --- /dev/null +++ b/python/libs/mne/datasets/__init__.py @@ -0,0 +1,45 @@ +"""Functions for fetching remote datasets. + +See :ref:`datasets` for more information. +""" + +from . import fieldtrip_cmc +from . import brainstorm +from . import visual_92_categories +from . import kiloword +from . import eegbci +from . import hf_sef +from . import misc +from . import mtrf +from . import sample +from . import somato +from . import multimodal +from . import fnirs_motor +from . import opm +from . import spm_face +from . import testing +from . import _fake +from . import phantom_4dbti +from . import sleep_physionet +from . import limo +from . import refmeg_noise +from . import ssvep +from . import erp_core +from . import epilepsy_ecog +from ._fetch import fetch_dataset +from .utils import (_download_all_example_data, fetch_hcp_mmp_parcellation, + fetch_aparc_sub_parcellation, has_dataset) +from ._fsaverage.base import fetch_fsaverage +from ._infant.base import fetch_infant_template +from ._phantom.base import fetch_phantom + +__all__ = [ + '_download_all_example_data', '_fake', 'brainstorm', 'eegbci', + 'fetch_aparc_sub_parcellation', 'fetch_fsaverage', 'fetch_infant_template', + 'fetch_hcp_mmp_parcellation', 'fieldtrip_cmc', 'hf_sef', 'kiloword', + 'misc', 'mtrf', 'multimodal', 'opm', 'phantom_4dbti', 'sample', + 'sleep_physionet', 'somato', 'spm_face', 'ssvep', 'testing', + 'visual_92_categories', 'limo', 'erp_core', 'epilepsy_ecog', + 'fetch_dataset', 'fetch_phantom', 'has_dataset', 'refmeg_noise', + 'fnirs_motor' +] diff --git a/python/libs/mne/datasets/_fake/__init__.py b/python/libs/mne/datasets/_fake/__init__.py new file mode 100644 index 0000000..57b8d21 --- /dev/null +++ b/python/libs/mne/datasets/_fake/__init__.py @@ -0,0 +1,3 @@ +"""Fake dataset for testing.""" + +from ._fake import data_path, get_version diff --git a/python/libs/mne/datasets/_fake/_fake.py b/python/libs/mne/datasets/_fake/_fake.py new file mode 100644 index 0000000..61ef767 --- /dev/null +++ b/python/libs/mne/datasets/_fake/_fake.py @@ -0,0 +1,28 @@ +# Authors: Alexandre Gramfort +# Martin Luessi +# Eric Larson +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, _download_mne_dataset, + _get_version, _version_doc) + + +@verbose +def data_path(path=None, force_update=False, update_path=False, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='fake', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='fake', + conf='MNE_DATASETS_FAKE_PATH') + + +def get_version(): # noqa: D103 + return _get_version('fake') + + +get_version.__doc__ = _version_doc.format(name='fake') diff --git a/python/libs/mne/datasets/_fetch.py b/python/libs/mne/datasets/_fetch.py new file mode 100644 index 0000000..d804734 --- /dev/null +++ b/python/libs/mne/datasets/_fetch.py @@ -0,0 +1,292 @@ +# Authors: Adam Li +# +# License: BSD Style. + +import logging +import sys +import os +import os.path as op +from shutil import rmtree + +from .. import __version__ as mne_version +from ..utils import logger, warn, _safe_input +from .config import ( + _bst_license_text, + RELEASES, + TESTING_VERSIONED, + MISC_VERSIONED, +) +from .utils import _dataset_version, _do_path_update, _get_path, _mne_path +from ..fixes import _compare_version + + +_FAKE_VERSION = None # used for monkeypatching while testing versioning + + +def fetch_dataset( + dataset_params, + processor=None, + path=None, + force_update=False, + update_path=True, + download=True, + check_version=False, + return_version=False, + accept=False, + auth=None, + token=None, +): + """Fetch an MNE-compatible dataset using pooch. + + Parameters + ---------- + dataset_params : list of dict | dict + The dataset name(s) and corresponding parameters to download the + dataset(s). The dataset parameters that contains the following keys: + ``archive_name``, ``url``, ``folder_name``, ``hash``, + ``config_key`` (optional). See Notes. + processor : None | "unzip" | "untar" | instance of pooch.Unzip | instance of pooch.Untar + What to do after downloading the file. ``"unzip"`` and ``"untar"`` will + decompress the downloaded file in place; for custom extraction (e.g., + only extracting certain files from the archive) pass an instance of + :class:`pooch.Unzip` or :class:`pooch.Untar`. If ``None`` (the + default), the files are left as-is. + path : None | str + Directory in which to put the dataset. If ``None``, the dataset + location is determined by first checking whether + ``dataset_params['config_key']`` is defined, and if so, whether that + config key exists in the MNE-Python config file. If so, the configured + path is used; if not, the location is set to the value of the + ``MNE_DATA`` config key (if it exists), or ``~/mne_data`` otherwise. + force_update : bool + Force update of the dataset even if a local copy exists. + Default is False. + update_path : bool | None + If True (default), set the mne-python config to the given + path. If None, the user is prompted. + download : bool + If False and the dataset has not been downloaded yet, it will not be + downloaded and the path will be returned as ``''`` (empty string). This + is mostly used for testing purposes and can be safely ignored by most + users. + check_version : bool + Whether to check the version of the dataset or not. Each version + of the dataset is stored in the root with a ``version.txt`` file. + return_version : bool + Whether or not to return the version of the dataset or not. + Defaults to False. + accept : bool + Some MNE-supplied datasets require acceptance of an additional license. + Default is ``False``. + auth : tuple | None + Optional authentication tuple containing the username and + password/token, passed to :class:`pooch.HTTPDownloader` (e.g., + ``auth=('foo', 012345)``). + token : str | None + Optional authentication token passed to :class:`pooch.HTTPDownloader`. + + Returns + ------- + data_path : instance of Path + The path to the fetched dataset. + version : str + Only returned if ``return_version`` is True. + + See Also + -------- + mne.get_config + mne.set_config + mne.datasets.has_dataset + + Notes + ----- + The ``dataset_params`` argument must contain the following keys: + + - ``archive_name``: The name of the (possibly compressed) file to download + - ``url``: URL from which the file can be downloaded + - ``folder_name``: the subfolder within the ``MNE_DATA`` folder in which to + save and uncompress (if needed) the file(s) + - ``hash``: the cryptographic hash type of the file followed by a colon and + then the hash value (examples: "sha256:19uheid...", "md5:upodh2io...") + - ``config_key`` (optional): key passed to :func:`mne.set_config` to store + the on-disk location of the downloaded dataset (e.g., + ``"MNE_DATASETS_EEGBCI_PATH"``). This will only work for the provided + datasets listed :ref:`here `; do not use for user-defined + datasets. + + An example would look like:: + + {'dataset_name': 'sample', + 'archive_name': 'MNE-sample-data-processed.tar.gz', + 'hash': 'md5:12b75d1cb7df9dfb4ad73ed82f61094f', + 'url': 'https://osf.io/86qa2/download?version=5', + 'folder_name': 'MNE-sample-data', + 'config_key': 'MNE_DATASETS_SAMPLE_PATH'} + + For datasets where a single (possibly compressed) file must be downloaded, + pass a single :class:`dict` as ``dataset_params``. For datasets where + multiple files must be downloaded and (optionally) uncompressed separately, + pass a list of dicts. + """ # noqa E501 + import pooch + + if auth is not None: + if len(auth) != 2: + raise RuntimeError( + "auth should be a 2-tuple consisting " + "of a username and password/token." + ) + + # processor to uncompress files + if processor == "untar": + processor = pooch.Untar(extract_dir=path) + elif processor == "unzip": + processor = pooch.Unzip(extract_dir=path) + + if isinstance(dataset_params, dict): + dataset_params = [dataset_params] + + # extract configuration parameters + names = [params["dataset_name"] for params in dataset_params] + name = names[0] + dataset_dict = dataset_params[0] + config_key = dataset_dict.get('config_key', None) + folder_name = dataset_dict["folder_name"] + + # get download path for specific dataset + path = _get_path(path=path, key=config_key, name=name) + + # get the actual path to each dataset folder name + final_path = op.join(path, folder_name) + + # handle BrainStorm datasets with nested folders for datasets + if name.startswith("bst_"): + final_path = op.join(final_path, name) + + final_path = _mne_path(final_path) + + # additional condition: check for version.txt and parse it + # check if testing or misc data is outdated; if so, redownload it + want_version = RELEASES.get(name, None) + want_version = _FAKE_VERSION if name == "fake" else want_version + + # get the version of the dataset and then check if the version is outdated + data_version = _dataset_version(final_path, name) + outdated = (want_version is not None and + _compare_version(want_version, '>', data_version)) + + if outdated: + logger.info( + f"Dataset {name} version {data_version} out of date, " + f"latest version is {want_version}" + ) + empty = _mne_path("") + + # return empty string if outdated dataset and we don't want to download + if (not force_update) and outdated and not download: + logger.info( + 'Dataset out of date, force_upload=False, and download=False, ' + 'returning empty data_path') + return (empty, data_version) if return_version else empty + + # reasons to bail early (hf_sef has separate code for this): + if ( + (not force_update) + and (not outdated) + and (not name.startswith("hf_sef_")) + ): + # ...if target folder exists (otherwise pooch downloads every + # time because we don't save the archive files after unpacking, so + # pooch can't check its checksum) + if op.isdir(final_path): + if config_key is not None: + _do_path_update(path, update_path, config_key, name) + return (final_path, data_version) if return_version else final_path + # ...if download=False (useful for debugging) + elif not download: + return (empty, data_version) if return_version else empty + # ...if user didn't accept the license + elif name.startswith("bst_"): + if accept or "--accept-brainstorm-license" in sys.argv: + answer = "y" + else: + # If they don't have stdin, just accept the license + # https://github.com/mne-tools/mne-python/issues/8513#issuecomment-726823724 # noqa: E501 + answer = _safe_input( + "%sAgree (y/[n])? " % _bst_license_text, use="y") + if answer.lower() != "y": + raise RuntimeError( + "You must agree to the license to use this " "dataset" + ) + # downloader & processors + download_params = dict(progressbar=logger.level <= logging.INFO) + if name == "fake": + download_params["progressbar"] = False + if auth is not None: + download_params["auth"] = auth + if token is not None: + download_params["headers"] = {"Authorization": f"token {token}"} + downloader = pooch.HTTPDownloader(**download_params) + + # make mappings from archive names to urls and to checksums + urls = dict() + registry = dict() + for idx, this_name in enumerate(names): + this_dataset = dataset_params[idx] + archive_name = this_dataset["archive_name"] + dataset_url = this_dataset["url"] + dataset_hash = this_dataset["hash"] + urls[archive_name] = dataset_url + registry[archive_name] = dataset_hash + + # create the download manager + fetcher = pooch.create( + path=str(final_path) if processor is None else path, + base_url="", # Full URLs are given in the `urls` dict. + version=None, # Data versioning is decoupled from MNE-Python version. + urls=urls, + registry=registry, + retry_if_failed=2, # 2 retries = 3 total attempts + ) + + # use our logger level for pooch's logger too + pooch.get_logger().setLevel(logger.getEffectiveLevel()) + + for idx in range(len(names)): + # fetch and unpack the data + archive_name = dataset_params[idx]["archive_name"] + fetcher.fetch( + fname=archive_name, downloader=downloader, processor=processor + ) + # after unpacking, remove the archive file + if processor is not None: + os.remove(op.join(path, archive_name)) + + # remove version number from "misc" and "testing" datasets folder names + if name == "misc": + rmtree(final_path, ignore_errors=True) + os.replace(op.join(path, MISC_VERSIONED), final_path) + elif name == "testing": + rmtree(final_path, ignore_errors=True) + os.replace(op.join(path, TESTING_VERSIONED), final_path) + + # maybe update the config + if config_key is not None: + old_name = "brainstorm" if name.startswith("bst_") else name + _do_path_update(path, update_path, config_key, old_name) + + # compare the version of the dataset and mne + data_version = _dataset_version(path, name) + # 0.7 < 0.7.git should be False, therefore strip + if check_version and ( + _compare_version(data_version, '<', mne_version.strip(".git")) + ): + warn( + "The {name} dataset (version {current}) is older than " + "mne-python (version {newest}). If the examples fail, " + "you may need to update the {name} dataset by using " + "mne.datasets.{name}.data_path(force_update=True)".format( + name=name, current=data_version, newest=mne_version + ) + ) + return (final_path, data_version) if return_version else final_path diff --git a/python/libs/mne/datasets/_fsaverage/__init__.py b/python/libs/mne/datasets/_fsaverage/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/datasets/_fsaverage/base.py b/python/libs/mne/datasets/_fsaverage/base.py new file mode 100644 index 0000000..ba67a52 --- /dev/null +++ b/python/libs/mne/datasets/_fsaverage/base.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# Authors: Eric Larson +# License: BSD Style. + +import os +import os.path as op + +from ..utils import _manifest_check_download, _get_path +from ...utils import verbose, get_subjects_dir, set_config + +FSAVERAGE_MANIFEST_PATH = op.dirname(__file__) + + +@verbose +def fetch_fsaverage(subjects_dir=None, *, verbose=None): + """Fetch and update fsaverage. + + Parameters + ---------- + subjects_dir : str | None + The path to use as the subjects directory in the MNE-Python + config file. None will use the existing config variable (i.e., + will not change anything), and if it does not exist, will use + ``~/mne_data/MNE-fsaverage-data``. + %(verbose)s + + Returns + ------- + fs_dir : str + The fsaverage directory. + (essentially ``subjects_dir + '/fsaverage'``). + + Notes + ----- + This function is designed to provide + + 1. All modern (Freesurfer 6) fsaverage subject files + 2. All MNE fsaverage parcellations + 3. fsaverage head surface, fiducials, head<->MRI trans, 1- and 3-layer + BEMs (and surfaces) + + This function will compare the contents of ``subjects_dir/fsaverage`` + to the ones provided in the remote zip file. If any are missing, + the zip file is downloaded and files are updated. No files will + be overwritten. + + .. versionadded:: 0.18 + """ + # Code used to create the BEM (other files taken from MNE-sample-data): + # + # $ mne watershed_bem -s fsaverage -d $PWD --verbose info --copy + # $ python + # >>> bem = mne.make_bem_model('fsaverage', subjects_dir='.', verbose=True) + # >>> mne.write_bem_surfaces( + # ... 'fsaverage/bem/fsaverage-5120-5120-5120-bem.fif', bem) + # >>> sol = mne.make_bem_solution(bem, verbose=True) + # >>> mne.write_bem_solution( + # ... 'fsaverage/bem/fsaverage-5120-5120-5120-bem-sol.fif', sol) + # >>> import os + # >>> import os.path as op + # >>> names = sorted(op.join(r, f) + # ... for r, d, files in os.walk('fsaverage') + # ... for f in files) + # with open('fsaverage.txt', 'w') as fid: + # fid.write('\n'.join(names)) + # + subjects_dir = _set_montage_coreg_path(subjects_dir) + subjects_dir = op.abspath(op.expanduser(subjects_dir)) + fs_dir = op.join(subjects_dir, 'fsaverage') + os.makedirs(fs_dir, exist_ok=True) + _manifest_check_download( + manifest_path=op.join(FSAVERAGE_MANIFEST_PATH, 'root.txt'), + destination=op.join(subjects_dir), + url='https://osf.io/3bxqt/download?version=2', + hash_='5133fe92b7b8f03ae19219d5f46e4177', + ) + _manifest_check_download( + manifest_path=op.join(FSAVERAGE_MANIFEST_PATH, 'bem.txt'), + destination=op.join(subjects_dir, 'fsaverage'), + url='https://osf.io/7ve8g/download?version=4', + hash_='b31509cdcf7908af6a83dc5ee8f49fb1', + ) + return fs_dir + + +def _get_create_subjects_dir(subjects_dir): + subjects_dir = get_subjects_dir(subjects_dir, raise_error=False) + if subjects_dir is None: + subjects_dir = _get_path(None, 'MNE_DATA', 'montage coregistration') + subjects_dir = op.join(subjects_dir, 'MNE-fsaverage-data') + os.makedirs(subjects_dir, exist_ok=True) + return subjects_dir + + +def _set_montage_coreg_path(subjects_dir=None): + """Set a subject directory suitable for montage(-only) coregistration. + + Parameters + ---------- + subjects_dir : str | None + The path to use as the subjects directory in the MNE-Python + config file. None will use the existing config variable (i.e., + will not change anything), and if it does not exist, will use + ``~/mne_data/MNE-fsaverage-data``. + + Returns + ------- + subjects_dir : str + The subjects directory that was used. + + See Also + -------- + mne.datasets.fetch_fsaverage + mne.get_config + mne.set_config + + Notes + ----- + If you plan to only do EEG-montage based coregistrations with fsaverage + without any MRI warping, this function can facilitate the process. + Essentially it sets the default value for ``subjects_dir`` in MNE + functions to be ``~/mne_data/MNE-fsaverage-data`` (assuming it has + not already been set to some other value). + + .. versionadded:: 0.18 + """ + subjects_dir = _get_create_subjects_dir(subjects_dir) + old_subjects_dir = get_subjects_dir(None, raise_error=False) + if old_subjects_dir is None: + set_config('SUBJECTS_DIR', subjects_dir) + return subjects_dir diff --git a/python/libs/mne/datasets/_fsaverage/bem.txt b/python/libs/mne/datasets/_fsaverage/bem.txt new file mode 100644 index 0000000..0f4ade5 --- /dev/null +++ b/python/libs/mne/datasets/_fsaverage/bem.txt @@ -0,0 +1,12 @@ +bem/fsaverage-fiducials.fif +bem/fsaverage-5120-5120-5120-bem.fif +bem/fsaverage-head.fif +bem/outer_skin.surf +bem/brain.surf +bem/fsaverage-trans.fif +bem/fsaverage-ico-5-src.fif +bem/fsaverage-vol-5-src.fif +bem/outer_skull.surf +bem/inner_skull.surf +bem/fsaverage-5120-5120-5120-bem-sol.fif +bem/fsaverage-inner_skull-bem.fif diff --git a/python/libs/mne/datasets/_fsaverage/root.txt b/python/libs/mne/datasets/_fsaverage/root.txt new file mode 100644 index 0000000..a6d3281 --- /dev/null +++ b/python/libs/mne/datasets/_fsaverage/root.txt @@ -0,0 +1,179 @@ +fsaverage/bem/fsaverage-head-dense.fif +fsaverage/bem/fsaverage-head-medium.fif +fsaverage/bem/fsaverage-head.fif +fsaverage/bem/fsaverage-ico-5-src.fif +fsaverage/label/lh.BA1.label +fsaverage/label/lh.BA2.label +fsaverage/label/lh.BA3a.label +fsaverage/label/lh.BA3b.label +fsaverage/label/lh.BA44.label +fsaverage/label/lh.BA45.label +fsaverage/label/lh.BA4a.label +fsaverage/label/lh.BA4p.label +fsaverage/label/lh.BA6.label +fsaverage/label/lh.HCPMMP1.annot +fsaverage/label/lh.HCPMMP1_combined.annot +fsaverage/label/lh.MT.label +fsaverage/label/lh.Medial_wall.label +fsaverage/label/lh.PALS_B12.labels.gii +fsaverage/label/lh.PALS_B12_Brodmann.annot +fsaverage/label/lh.PALS_B12_Lobes.annot +fsaverage/label/lh.PALS_B12_OrbitoFrontal.annot +fsaverage/label/lh.PALS_B12_Visuotopic.annot +fsaverage/label/lh.V1.label +fsaverage/label/lh.V2.label +fsaverage/label/lh.Yeo2011_17Networks_N1000.annot +fsaverage/label/lh.Yeo2011_7Networks_N1000.annot +fsaverage/label/lh.aparc.a2005s.annot +fsaverage/label/lh.aparc.a2009s.annot +fsaverage/label/lh.aparc.annot +fsaverage/label/lh.aparc.label +fsaverage/label/lh.aparc_sub.annot +fsaverage/label/lh.cortex.label +fsaverage/label/lh.entorhinal.label +fsaverage/label/lh.oasis.chubs.annot +fsaverage/label/rh.BA1.label +fsaverage/label/rh.BA2.label +fsaverage/label/rh.BA3a.label +fsaverage/label/rh.BA3b.label +fsaverage/label/rh.BA44.label +fsaverage/label/rh.BA45.label +fsaverage/label/rh.BA4a.label +fsaverage/label/rh.BA4p.label +fsaverage/label/rh.BA6.label +fsaverage/label/rh.HCPMMP1.annot +fsaverage/label/rh.HCPMMP1_combined.annot +fsaverage/label/rh.MT.label +fsaverage/label/rh.Medial_wall.label +fsaverage/label/rh.PALS_B12.labels.gii +fsaverage/label/rh.PALS_B12_Brodmann.annot +fsaverage/label/rh.PALS_B12_Lobes.annot +fsaverage/label/rh.PALS_B12_OrbitoFrontal.annot +fsaverage/label/rh.PALS_B12_Visuotopic.annot +fsaverage/label/rh.V1.label +fsaverage/label/rh.V2.label +fsaverage/label/rh.Yeo2011_17Networks_N1000.annot +fsaverage/label/rh.Yeo2011_7Networks_N1000.annot +fsaverage/label/rh.aparc.a2005s.annot +fsaverage/label/rh.aparc.a2009s.annot +fsaverage/label/rh.aparc.annot +fsaverage/label/rh.aparc.label +fsaverage/label/rh.aparc_sub.annot +fsaverage/label/rh.cortex.label +fsaverage/label/rh.entorhinal.label +fsaverage/label/rh.oasis.chubs.annot +fsaverage/mri.2mm/README +fsaverage/mri.2mm/T1.mgz +fsaverage/mri.2mm/aseg.mgz +fsaverage/mri.2mm/brain.mgz +fsaverage/mri.2mm/brainmask.mgz +fsaverage/mri.2mm/mni305.cor.mgz +fsaverage/mri.2mm/orig.mgz +fsaverage/mri.2mm/reg.2mm.dat +fsaverage/mri.2mm/reg.2mm.mni152.dat +fsaverage/mri.2mm/subcort.mask.mgz +fsaverage/mri.2mm/subcort.prob.mgz +fsaverage/mri/T1.mgz +fsaverage/mri/aparc+aseg.mgz +fsaverage/mri/aparc.a2005s+aseg.mgz +fsaverage/mri/aparc.a2009s+aseg.mgz +fsaverage/mri/aseg.mgz +fsaverage/mri/brain.mgz +fsaverage/mri/brainmask.mgz +fsaverage/mri/lh.ribbon.mgz +fsaverage/mri/mni305.cor.mgz +fsaverage/mri/orig.mgz +fsaverage/mri/p.aseg.mgz +fsaverage/mri/rh.ribbon.mgz +fsaverage/mri/ribbon.mgz +fsaverage/mri/seghead.mgz +fsaverage/mri/subcort.prob.log +fsaverage/mri/subcort.prob.mgz +fsaverage/mri/transforms/reg.mni152.2mm.dat +fsaverage/mri/transforms/talairach.xfm +fsaverage/scripts/build-stamp.txt +fsaverage/scripts/csurfdir +fsaverage/scripts/make_average_surface.log +fsaverage/scripts/make_average_volume.log +fsaverage/scripts/mkheadsurf.log +fsaverage/scripts/mris_inflate.log +fsaverage/scripts/mris_inflate_lh.log +fsaverage/scripts/mris_inflate_rh.log +fsaverage/scripts/recon-all-status.log +fsaverage/scripts/recon-all.cmd +fsaverage/scripts/recon-all.done +fsaverage/scripts/recon-all.env +fsaverage/scripts/recon-all.env.bak +fsaverage/scripts/recon-all.local-copy +fsaverage/scripts/recon-all.log +fsaverage/surf/lh.area +fsaverage/surf/lh.area.seghead +fsaverage/surf/lh.avg_curv +fsaverage/surf/lh.avg_sulc +fsaverage/surf/lh.avg_thickness +fsaverage/surf/lh.cortex.patch.3d +fsaverage/surf/lh.cortex.patch.flat +fsaverage/surf/lh.curv +fsaverage/surf/lh.curv.seghead +fsaverage/surf/lh.fsaverage_sym.sphere.reg +fsaverage/surf/lh.inflated +fsaverage/surf/lh.inflated.H +fsaverage/surf/lh.inflated.K +fsaverage/surf/lh.inflated_avg +fsaverage/surf/lh.inflated_pre +fsaverage/surf/lh.orig +fsaverage/surf/lh.orig.avg.area.mgh +fsaverage/surf/lh.orig_avg +fsaverage/surf/lh.pial +fsaverage/surf/lh.pial.avg.area.mgh +fsaverage/surf/lh.pial_avg +fsaverage/surf/lh.pial_semi_inflated +fsaverage/surf/lh.seghead +fsaverage/surf/lh.seghead.inflated +fsaverage/surf/lh.smoothwm +fsaverage/surf/lh.sphere +fsaverage/surf/lh.sphere.left_right +fsaverage/surf/lh.sphere.reg +fsaverage/surf/lh.sphere.reg.avg +fsaverage/surf/lh.sulc +fsaverage/surf/lh.sulc.seghead +fsaverage/surf/lh.thickness +fsaverage/surf/lh.white +fsaverage/surf/lh.white.avg.area.mgh +fsaverage/surf/lh.white_avg +fsaverage/surf/lh.white_avg.H +fsaverage/surf/lh.white_avg.K +fsaverage/surf/mris_preproc.surface.lh.log +fsaverage/surf/mris_preproc.surface.rh.log +fsaverage/surf/rh.area +fsaverage/surf/rh.avg_curv +fsaverage/surf/rh.avg_sulc +fsaverage/surf/rh.avg_thickness +fsaverage/surf/rh.cortex.patch.3d +fsaverage/surf/rh.cortex.patch.flat +fsaverage/surf/rh.curv +fsaverage/surf/rh.fsaverage_sym.sphere.reg +fsaverage/surf/rh.inflated +fsaverage/surf/rh.inflated.H +fsaverage/surf/rh.inflated.K +fsaverage/surf/rh.inflated_avg +fsaverage/surf/rh.inflated_pre +fsaverage/surf/rh.orig +fsaverage/surf/rh.orig.avg.area.mgh +fsaverage/surf/rh.orig_avg +fsaverage/surf/rh.pial +fsaverage/surf/rh.pial.avg.area.mgh +fsaverage/surf/rh.pial_avg +fsaverage/surf/rh.pial_semi_inflated +fsaverage/surf/rh.smoothwm +fsaverage/surf/rh.sphere +fsaverage/surf/rh.sphere.left_right +fsaverage/surf/rh.sphere.reg +fsaverage/surf/rh.sphere.reg.avg +fsaverage/surf/rh.sulc +fsaverage/surf/rh.thickness +fsaverage/surf/rh.white +fsaverage/surf/rh.white.avg.area.mgh +fsaverage/surf/rh.white_avg +fsaverage/surf/rh.white_avg.H +fsaverage/surf/rh.white_avg.K diff --git a/python/libs/mne/datasets/_infant/ANTS1-0Months3T.txt b/python/libs/mne/datasets/_infant/ANTS1-0Months3T.txt new file mode 100644 index 0000000..fc77ace --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS1-0Months3T.txt @@ -0,0 +1,117 @@ +bem/ANTS1-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS1-0Months3T-5120-5120-5120-bem.fif +bem/ANTS1-0Months3T-fiducials.fif +bem/ANTS1-0Months3T-head.fif +bem/ANTS1-0Months3T-oct-6-src.fif +bem/ANTS1-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS10-5Months3T.txt b/python/libs/mne/datasets/_infant/ANTS10-5Months3T.txt new file mode 100644 index 0000000..cec0a3e --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS10-5Months3T.txt @@ -0,0 +1,115 @@ +bem/ANTS10-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS10-5Months3T-5120-5120-5120-bem.fif +bem/ANTS10-5Months3T-fiducials.fif +bem/ANTS10-5Months3T-head.fif +bem/ANTS10-5Months3T-oct-6-src.fif +bem/ANTS10-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS12-0Months3T.txt b/python/libs/mne/datasets/_infant/ANTS12-0Months3T.txt new file mode 100644 index 0000000..d1fdbbc --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS12-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS12-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS12-0Months3T-5120-5120-5120-bem.fif +bem/ANTS12-0Months3T-fiducials.fif +bem/ANTS12-0Months3T-head.fif +bem/ANTS12-0Months3T-oct-6-src.fif +bem/ANTS12-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS15-0Months3T.txt b/python/libs/mne/datasets/_infant/ANTS15-0Months3T.txt new file mode 100644 index 0000000..50487c0 --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS15-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS15-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS15-0Months3T-5120-5120-5120-bem.fif +bem/ANTS15-0Months3T-fiducials.fif +bem/ANTS15-0Months3T-head.fif +bem/ANTS15-0Months3T-oct-6-src.fif +bem/ANTS15-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS18-0Months3T.txt b/python/libs/mne/datasets/_infant/ANTS18-0Months3T.txt new file mode 100644 index 0000000..8f386c8 --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS18-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS18-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS18-0Months3T-5120-5120-5120-bem.fif +bem/ANTS18-0Months3T-fiducials.fif +bem/ANTS18-0Months3T-head.fif +bem/ANTS18-0Months3T-oct-6-src.fif +bem/ANTS18-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS2-0Months3T.txt b/python/libs/mne/datasets/_infant/ANTS2-0Months3T.txt new file mode 100644 index 0000000..2a6b9c2 --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS2-0Months3T.txt @@ -0,0 +1,117 @@ +bem/ANTS2-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS2-0Months3T-5120-5120-5120-bem.fif +bem/ANTS2-0Months3T-fiducials.fif +bem/ANTS2-0Months3T-head.fif +bem/ANTS2-0Months3T-oct-6-src.fif +bem/ANTS2-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS2-0Weeks3T.txt b/python/libs/mne/datasets/_infant/ANTS2-0Weeks3T.txt new file mode 100644 index 0000000..e940f24 --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS2-0Weeks3T.txt @@ -0,0 +1,117 @@ +bem/ANTS2-0Weeks3T-5120-5120-5120-bem-sol.fif +bem/ANTS2-0Weeks3T-5120-5120-5120-bem.fif +bem/ANTS2-0Weeks3T-fiducials.fif +bem/ANTS2-0Weeks3T-head.fif +bem/ANTS2-0Weeks3T-oct-6-src.fif +bem/ANTS2-0Weeks3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS2-0Years3T.txt b/python/libs/mne/datasets/_infant/ANTS2-0Years3T.txt new file mode 100644 index 0000000..7763969 --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS2-0Years3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS2-0Years3T-5120-5120-5120-bem-sol.fif +bem/ANTS2-0Years3T-5120-5120-5120-bem.fif +bem/ANTS2-0Years3T-fiducials.fif +bem/ANTS2-0Years3T-head.fif +bem/ANTS2-0Years3T-oct-6-src.fif +bem/ANTS2-0Years3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS3-0Months3T.txt b/python/libs/mne/datasets/_infant/ANTS3-0Months3T.txt new file mode 100644 index 0000000..29a7148 --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS3-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS3-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS3-0Months3T-5120-5120-5120-bem.fif +bem/ANTS3-0Months3T-fiducials.fif +bem/ANTS3-0Months3T-head.fif +bem/ANTS3-0Months3T-oct-6-src.fif +bem/ANTS3-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS4-5Months3T.txt b/python/libs/mne/datasets/_infant/ANTS4-5Months3T.txt new file mode 100644 index 0000000..b918849 --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS4-5Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS4-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS4-5Months3T-5120-5120-5120-bem.fif +bem/ANTS4-5Months3T-fiducials.fif +bem/ANTS4-5Months3T-head.fif +bem/ANTS4-5Months3T-oct-6-src.fif +bem/ANTS4-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS6-0Months3T.txt b/python/libs/mne/datasets/_infant/ANTS6-0Months3T.txt new file mode 100644 index 0000000..3235de4 --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS6-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS6-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS6-0Months3T-5120-5120-5120-bem.fif +bem/ANTS6-0Months3T-fiducials.fif +bem/ANTS6-0Months3T-head.fif +bem/ANTS6-0Months3T-oct-6-src.fif +bem/ANTS6-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS7-5Months3T.txt b/python/libs/mne/datasets/_infant/ANTS7-5Months3T.txt new file mode 100644 index 0000000..8b38563 --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS7-5Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS7-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS7-5Months3T-5120-5120-5120-bem.fif +bem/ANTS7-5Months3T-fiducials.fif +bem/ANTS7-5Months3T-head.fif +bem/ANTS7-5Months3T-oct-6-src.fif +bem/ANTS7-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/ANTS9-0Months3T.txt b/python/libs/mne/datasets/_infant/ANTS9-0Months3T.txt new file mode 100644 index 0000000..8d37f25 --- /dev/null +++ b/python/libs/mne/datasets/_infant/ANTS9-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS9-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS9-0Months3T-5120-5120-5120-bem.fif +bem/ANTS9-0Months3T-fiducials.fif +bem/ANTS9-0Months3T-head.fif +bem/ANTS9-0Months3T-oct-6-src.fif +bem/ANTS9-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/python/libs/mne/datasets/_infant/base.py b/python/libs/mne/datasets/_infant/base.py new file mode 100644 index 0000000..f4b1d34 --- /dev/null +++ b/python/libs/mne/datasets/_infant/base.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Authors: Eric Larson +# License: BSD Style. + +import os +import os.path as op + +from ..utils import _manifest_check_download +from ...utils import verbose, get_subjects_dir, _check_option, _validate_type + +_AGES = '2wk 1mo 2mo 3mo 4.5mo 6mo 7.5mo 9mo 10.5mo 12mo 15mo 18mo 2yr' +# https://github.com/christian-oreilly/infant_template_paper/releases +_ORIGINAL_URL = 'https://github.com/christian-oreilly/infant_template_paper/releases/download/v0.1-alpha/{subject}.zip' # noqa: E501 +# Formatted the same way as md5sum *.zip on Ubuntu: +_ORIGINAL_HASHES = """ +851737d5f8f246883f2aef9819c6ec29 ANTS10-5Months3T.zip +32ab6d025f4311433a82e81374f1a045 ANTS1-0Months3T.zip +48ef349e7cc542fdf63ff36d7958ab57 ANTS12-0Months3T.zip +bba22c95aa97988c6e8892d6169ed317 ANTS15-0Months3T.zip +e1bfe5e3ef380592822ced446a4008c7 ANTS18-0Months3T.zip +fa7bee6c0985b9cd15ba53820cd72ccd ANTS2-0Months3T.zip +2ad90540cdf42837c09f8ce829458a35 ANTS2-0Weeks3T.zip +73e6a8b2579b7959a96f7d294ffb7393 ANTS2-0Years3T.zip +cb7b9752894e16a4938ddfe220f6286a ANTS3-0Months3T.zip +16b2a6804c7d5443cfba2ad6f7d4ac6a ANTS4-5Months3T.zip +dbdf2a9976121f2b106da96775690da3 ANTS6-0Months3T.zip +75fe37a1bc80ed6793a8abb47681d5ab ANTS7-5Months3T.zip +790f7dba0a264262e6c1c2dfdf216215 ANTS9-0Months3T.zip +""" +_MANIFEST_PATH = op.dirname(__file__) + + +@verbose +def fetch_infant_template(age, subjects_dir=None, *, verbose=None): + """Fetch and update an infant MRI template. + + Parameters + ---------- + age : str + Age to download. Can be one of ``{'2wk', '1mo', '2mo', '3mo', '4.5mo', + '6mo', '7.5mo', '9mo', '10.5mo', '12mo', '15mo', '18mo', '2yr'}``. + subjects_dir : str | None + The path to download the template data to. + %(verbose)s + + Returns + ------- + subject : str + The standard subject name, e.g. ``ANTS4-5Month3T``. + + Notes + ----- + If you use these templates in your work, please cite + :footcite:`OReillyEtAl2021` and :footcite:`RichardsEtAl2016`. + + .. versionadded:: 0.23 + + References + ---------- + .. footbibliography:: + """ + # Code used to create the lists: + # + # $ for name in 2-0Weeks 1-0Months 2-0Months 3-0Months 4-5Months 6-0Months 7-5Months 9-0Months 10-5Months 12-0Months 15-0Months 18-0Months 2-0Years; do wget https://github.com/christian-oreilly/infant_template_paper/releases/download/v0.1-alpha/ANTS${name}3T.zip; done # noqa: E501 + # $ md5sum ANTS*.zip + # $ python + # >>> import os.path as op + # >>> import zipfile + # >>> names = [f'ANTS{name}3T' for name in '2-0Weeks 1-0Months 2-0Months 3-0Months 4-5Months 6-0Months 7-5Months 9-0Months 10-5Months 12-0Months 15-0Months 18-0Months 2-0Years'.split()] # noqa: E501 + # >>> for name in names: + # ... with zipfile.ZipFile(f'{name}.zip', 'r') as zip: + # ... names = sorted(name for name in zip.namelist() if not zipfile.Path(zip, name).is_dir()) # noqa: E501 + # ... with open(f'{name}.txt', 'w') as fid: + # ... fid.write('\n'.join(names)) + _validate_type(age, str, 'age') + _check_option('age', age, _AGES.split()) + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subjects_dir = op.abspath(subjects_dir) + unit = dict(wk='Weeks', mo='Months', yr='Years')[age[-2:]] + first = age[:-2].split('.')[0] + dash = '-5' if '.5' in age else '-0' + subject = f'ANTS{first}{dash}{unit}3T' + # Actually get and create the files + subj_dir = op.join(subjects_dir, subject) + os.makedirs(subj_dir, exist_ok=True) + # .zip -> hash mapping + orig_hashes = dict(line.strip().split()[::-1] + for line in _ORIGINAL_HASHES.strip().splitlines()) + _manifest_check_download( + manifest_path=op.join(_MANIFEST_PATH, f'{subject}.txt'), + destination=subj_dir, + url=_ORIGINAL_URL.format(subject=subject), + hash_=orig_hashes[f'{subject}.zip'], + ) + return subject diff --git a/python/libs/mne/datasets/_phantom/__init__.py b/python/libs/mne/datasets/_phantom/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/datasets/_phantom/base.py b/python/libs/mne/datasets/_phantom/base.py new file mode 100644 index 0000000..d7416d4 --- /dev/null +++ b/python/libs/mne/datasets/_phantom/base.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Authors: Eric Larson +# License: BSD Style. + +import os +import os.path as op + +from ..utils import _manifest_check_download +from ...utils import verbose, get_subjects_dir, _check_option, _validate_type + +PHANTOM_MANIFEST_PATH = op.dirname(__file__) + + +@verbose +def fetch_phantom(kind, subjects_dir=None, *, verbose=None): + """Fetch and update a phantom subject. + + Parameters + ---------- + kind : str + The kind of phantom to fetch. Can only be ``'otaniemi'`` (default). + %(subjects_dir)s + %(verbose)s + + Returns + ------- + subject_dir : str + The resulting phantom subject directory. + + See Also + -------- + mne.dipole.get_phantom_dipoles + + Notes + ----- + This function is designed to provide a head surface and T1.mgz for + the 32-dipole Otaniemi phantom. The VectorView/TRIUX phantom has the same + basic outside geometry, but different internal dipole positions. + + Unlike most FreeSurfer subjects, the Otaniemi phantom scan was aligned + to the "head" coordinate frame, so an identity head<->MRI :term:`trans` + is appropriate. + + .. versionadded:: 0.24 + """ + phantoms = dict( + otaniemi=dict(url='https://osf.io/j5czy/download?version=1', + hash='42d17db5b1db3e30327ffb4cf2649de8'), + ) + _validate_type(kind, str, 'kind') + _check_option('kind', kind, list(phantoms)) + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subject = f'phantom_{kind}' + subject_dir = op.join(subjects_dir, subject) + os.makedirs(subject_dir, exist_ok=True) + _manifest_check_download( + manifest_path=op.join(PHANTOM_MANIFEST_PATH, f'{subject}.txt'), + destination=subjects_dir, + url=phantoms[kind]['url'], + hash_=phantoms[kind]['hash'], + ) + return subject_dir diff --git a/python/libs/mne/datasets/_phantom/phantom_otaniemi.txt b/python/libs/mne/datasets/_phantom/phantom_otaniemi.txt new file mode 100644 index 0000000..84f8302 --- /dev/null +++ b/python/libs/mne/datasets/_phantom/phantom_otaniemi.txt @@ -0,0 +1,3 @@ +phantom_otaniemi/bem/phantom_otaniemi-fiducials.fif +phantom_otaniemi/mri/T1.mgz +phantom_otaniemi/surf/lh.seghead diff --git a/python/libs/mne/datasets/brainstorm/__init__.py b/python/libs/mne/datasets/brainstorm/__init__.py new file mode 100644 index 0000000..8dcf9b7 --- /dev/null +++ b/python/libs/mne/datasets/brainstorm/__init__.py @@ -0,0 +1,4 @@ +"""Brainstorm datasets.""" + +from . import (bst_raw, bst_resting, bst_auditory, bst_phantom_ctf, + bst_phantom_elekta) diff --git a/python/libs/mne/datasets/brainstorm/bst_auditory.py b/python/libs/mne/datasets/brainstorm/bst_auditory.py new file mode 100644 index 0000000..41c2f07 --- /dev/null +++ b/python/libs/mne/datasets/brainstorm/bst_auditory.py @@ -0,0 +1,50 @@ +# Authors: Mainak Jas +# +# License: BSD-3-Clause +from ...utils import verbose +from ..utils import (_get_version, _version_doc, + _data_path_doc_accept, _download_mne_dataset) + +_description = """ +URL: http://neuroimage.usc.edu/brainstorm/DatasetAuditory + - One subject, two acquisition runs of 6 minutes each + - Subject stimulated binaurally with intra-aural earphones + (air tubes+transducers) + - Each run contains: + - 200 regular beeps (440Hz) + - 40 easy deviant beeps (554.4Hz, 4 semitones higher) + - Random inter-stimulus interval: between 0.7s and 1.7s seconds, uniformly + distributed + - The subject presses a button when detecting a deviant with the right + index finger + - Auditory stimuli generated with the Matlab Psychophysics toolbox +""" + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, accept=False, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='bst_auditory', processor='nested_untar', path=path, + force_update=force_update, update_path=update_path, + download=download, accept=accept) + + +_data_path_doc = _data_path_doc_accept.format( + name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH') +_data_path_doc = _data_path_doc.replace('brainstorm dataset', + 'brainstorm (bst_auditory) dataset') +data_path.__doc__ = _data_path_doc + + +def get_version(): # noqa: D103 + return _get_version('bst_auditory') + + +get_version.__doc__ = _version_doc.format(name='brainstorm') + + +def description(): + """Get description of brainstorm (bst_auditory) dataset.""" + for desc in _description.splitlines(): + print(desc) diff --git a/python/libs/mne/datasets/brainstorm/bst_phantom_ctf.py b/python/libs/mne/datasets/brainstorm/bst_phantom_ctf.py new file mode 100644 index 0000000..87300a8 --- /dev/null +++ b/python/libs/mne/datasets/brainstorm/bst_phantom_ctf.py @@ -0,0 +1,39 @@ +# Authors: Eric Larson +# +# License: BSD-3-Clause +from ...utils import verbose +from ..utils import (_get_version, _version_doc, + _data_path_doc_accept, _download_mne_dataset) + +_description = """ +URL: http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomCtf +""" + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, accept=False, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='bst_phantom_ctf', processor='nested_untar', path=path, + force_update=force_update, update_path=update_path, + download=download, accept=accept) + + +_data_path_doc = _data_path_doc_accept.format( + name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH') +_data_path_doc = _data_path_doc.replace('brainstorm dataset', + 'brainstorm (bst_phantom_ctf) dataset') +data_path.__doc__ = _data_path_doc + + +def get_version(): # noqa: D103 + return _get_version('bst_phantom_ctf') + + +get_version.__doc__ = _version_doc.format(name='brainstorm') + + +def description(): + """Get description of brainstorm (bst_phantom_ctf) dataset.""" + for desc in _description.splitlines(): + print(desc) diff --git a/python/libs/mne/datasets/brainstorm/bst_phantom_elekta.py b/python/libs/mne/datasets/brainstorm/bst_phantom_elekta.py new file mode 100644 index 0000000..40f9266 --- /dev/null +++ b/python/libs/mne/datasets/brainstorm/bst_phantom_elekta.py @@ -0,0 +1,40 @@ +# Authors: Eric Larson +# +# License: BSD-3-Clause +from ...utils import verbose +from ..utils import (_get_version, _version_doc, + _data_path_doc_accept, _download_mne_dataset) + +_description = u""" +URL: http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta +""" + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, accept=False, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='bst_phantom_elekta', processor='nested_untar', path=path, + force_update=force_update, update_path=update_path, + download=download, accept=accept) + + +_data_path_doc = _data_path_doc_accept.format( + name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH') +_data_path_doc = _data_path_doc.replace('brainstorm dataset', + 'brainstorm (bst_phantom_elekta) ' + 'dataset') +data_path.__doc__ = _data_path_doc + + +def get_version(): # noqa: D103 + return _get_version('bst_phantom_elekta') + + +get_version.__doc__ = _version_doc.format(name='brainstorm') + + +def description(): + """Get description of brainstorm (bst_phantom_elekta) dataset.""" + for desc in _description.splitlines(): + print(desc) diff --git a/python/libs/mne/datasets/brainstorm/bst_raw.py b/python/libs/mne/datasets/brainstorm/bst_raw.py new file mode 100644 index 0000000..0616ca1 --- /dev/null +++ b/python/libs/mne/datasets/brainstorm/bst_raw.py @@ -0,0 +1,68 @@ +# Authors: Mainak Jas +# +# License: BSD-3-Clause +from functools import partial + +from ...utils import verbose, get_config +from ..utils import (has_dataset, _get_version, _version_doc, + _data_path_doc_accept, _download_mne_dataset) + + +has_brainstorm_data = partial(has_dataset, name='bst_raw') + +_description = """ +URL: http://neuroimage.usc.edu/brainstorm/DatasetMedianNerveCtf + - One subject, one acquisition run of 6 minutes + - Subject stimulated using Digitimer Constant Current Stimulator + (model DS7A) + - The run contains 200 electric stimulations randomly distributed between + left and right: + - 102 stimulations of the left hand + - 98 stimulations of the right hand + - Inter-stimulus interval: jittered between [1500, 2000]ms + - Stimuli generated using PsychToolBox on Windows PC (TTL pulse generated + with the parallel port connected to the Digitimer via the rear panel BNC) +""" + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, accept=False, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='bst_raw', processor='nested_untar', path=path, + force_update=force_update, update_path=update_path, + download=download, accept=accept) + + +_data_path_doc = _data_path_doc_accept.format( + name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH') +_data_path_doc = _data_path_doc.replace('brainstorm dataset', + 'brainstorm (bst_raw) dataset') +data_path.__doc__ = _data_path_doc + + +def get_version(): # noqa: D103 + return _get_version('bst_raw') + + +get_version.__doc__ = _version_doc.format(name='brainstorm') + + +def description(): # noqa: D103 + """Get description of brainstorm (bst_raw) dataset.""" + for desc in _description.splitlines(): + print(desc) + + +def _skip_bstraw_data(): + skip_testing = (get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') == + 'true') + skip = skip_testing or not has_brainstorm_data() + return skip + + +def requires_bstraw_data(func): + """Skip testing data test.""" + import pytest + return pytest.mark.skipif(_skip_bstraw_data(), + reason='Requires brainstorm dataset')(func) diff --git a/python/libs/mne/datasets/brainstorm/bst_resting.py b/python/libs/mne/datasets/brainstorm/bst_resting.py new file mode 100644 index 0000000..e0eb226 --- /dev/null +++ b/python/libs/mne/datasets/brainstorm/bst_resting.py @@ -0,0 +1,42 @@ +# Authors: Mainak Jas +# +# License: BSD-3-Clause +from ...utils import verbose +from ..utils import (_get_version, _version_doc, + _data_path_doc_accept, _download_mne_dataset) + +_description = """ +URL: http://neuroimage.usc.edu/brainstorm/DatasetResting + - One subject + - Two runs of 10 min of resting state recordings + - Eyes open +""" + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, accept=False, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='bst_resting', processor='nested_untar', path=path, + force_update=force_update, update_path=update_path, + download=download, accept=accept) + + +_data_path_doc = _data_path_doc_accept.format( + name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH') +_data_path_doc = _data_path_doc.replace('brainstorm dataset', + 'brainstorm (bst_resting) dataset') +data_path.__doc__ = _data_path_doc + + +def get_version(): # noqa: D103 + return _get_version('bst_resting') + + +get_version.__doc__ = _version_doc.format(name='brainstorm') + + +def description(): + """Get description of brainstorm (bst_resting) dataset.""" + for desc in _description.splitlines(): + print(desc) diff --git a/python/libs/mne/datasets/config.py b/python/libs/mne/datasets/config.py new file mode 100644 index 0000000..ceeab6a --- /dev/null +++ b/python/libs/mne/datasets/config.py @@ -0,0 +1,325 @@ +# Authors: Adam Li +# Daniel McCloy +# +# License: BSD Style. + + +_bst_license_text = """ +License +------- +This tutorial dataset (EEG and MRI data) remains a property of the MEG Lab, +McConnell Brain Imaging Center, Montreal Neurological Institute, +McGill University, Canada. Its use and transfer outside the Brainstorm +tutorial, e.g. for research purposes, is prohibited without written consent +from the MEG Lab. + +If you reference this dataset in your publications, please: + + 1) acknowledge its authors: Elizabeth Bock, Esther Florin, Francois Tadel + and Sylvain Baillet, and + 2) cite Brainstorm as indicated on the website: + http://neuroimage.usc.edu/brainstorm + +For questions, please contact Francois Tadel (francois.tadel@mcgill.ca). +""" + +_hcp_mmp_license_text = """ +License +------- +I request access to data collected by the Washington University - University +of Minnesota Consortium of the Human Connectome Project (WU-Minn HCP), and +I agree to the following: + +1. I will not attempt to establish the identity of or attempt to contact any + of the included human subjects. + +2. I understand that under no circumstances will the code that would link + these data to Protected Health Information be given to me, nor will any + additional information about individual human subjects be released to me + under these Open Access Data Use Terms. + +3. I will comply with all relevant rules and regulations imposed by my + institution. This may mean that I need my research to be approved or + declared exempt by a committee that oversees research on human subjects, + e.g. my IRB or Ethics Committee. The released HCP data are not considered + de-identified, insofar as certain combinations of HCP Restricted Data + (available through a separate process) might allow identification of + individuals. Different committees operate under different national, state + and local laws and may interpret regulations differently, so it is + important to ask about this. If needed and upon request, the HCP will + provide a certificate stating that you have accepted the HCP Open Access + Data Use Terms. + +4. I may redistribute original WU-Minn HCP Open Access data and any derived + data as long as the data are redistributed under these same Data Use Terms. + +5. I will acknowledge the use of WU-Minn HCP data and data derived from + WU-Minn HCP data when publicly presenting any results or algorithms + that benefitted from their use. + + 1. Papers, book chapters, books, posters, oral presentations, and all + other printed and digital presentations of results derived from HCP + data should contain the following wording in the acknowledgments + section: "Data were provided [in part] by the Human Connectome + Project, WU-Minn Consortium (Principal Investigators: David Van Essen + and Kamil Ugurbil; 1U54MH091657) funded by the 16 NIH Institutes and + Centers that support the NIH Blueprint for Neuroscience Research; and + by the McDonnell Center for Systems Neuroscience at Washington + University." + + 2. Authors of publications or presentations using WU-Minn HCP data + should cite relevant publications describing the methods used by the + HCP to acquire and process the data. The specific publications that + are appropriate to cite in any given study will depend on what HCP + data were used and for what purposes. An annotated and appropriately + up-to-date list of publications that may warrant consideration is + available at http://www.humanconnectome.org/about/acknowledgehcp.html + + 3. The WU-Minn HCP Consortium as a whole should not be included as an + author of publications or presentations if this authorship would be + based solely on the use of WU-Minn HCP data. + +6. Failure to abide by these guidelines will result in termination of my + privileges to access WU-Minn HCP data. +""" + +# To update the `testing` or `misc` datasets, push or merge commits to their +# respective repos, and make a new release of the dataset on GitHub. Then +# update the checksum in the MNE_DATASETS dict below, and change version +# here: ↓↓↓↓↓ ↓↓↓ +RELEASES = dict(testing='0.132', misc='0.23') +TESTING_VERSIONED = f'mne-testing-data-{RELEASES["testing"]}' +MISC_VERSIONED = f'mne-misc-data-{RELEASES["misc"]}' + +# To update any other dataset besides `testing` or `misc`, upload the new +# version of the data archive itself (e.g., to https://osf.io or wherever) and +# then update the corresponding checksum in the MNE_DATASETS dict entry below. +MNE_DATASETS = dict() + +# MANDATORY KEYS: +# - archive_name : the name of the compressed file that is downloaded +# - hash : the checksum type followed by a colon and then the checksum value +# (examples: "sha256:19uheid...", "md5:upodh2io...") +# - url : URL from which the file can be downloaded +# - folder_name : the subfolder within the MNE data folder in which to save and +# uncompress (if needed) the file(s) +# +# OPTIONAL KEYS: +# - config_key : key to use with `mne.set_config` to store the on-disk location +# of the downloaded dataset (ex: "MNE_DATASETS_EEGBCI_PATH"). + +# Testing and misc are at the top as they're updated most often +MNE_DATASETS['testing'] = dict( + archive_name=f'{TESTING_VERSIONED}.tar.gz', # 'mne-testing-data', + hash='md5:2ff8bcd18053af3ee0587dce9d6ab516', + url=('https://codeload.github.com/mne-tools/mne-testing-data/' + f'tar.gz/{RELEASES["testing"]}'), + folder_name='MNE-testing-data', + config_key='MNE_DATASETS_TESTING_PATH', +) +MNE_DATASETS['misc'] = dict( + archive_name=f'{MISC_VERSIONED}.tar.gz', # 'mne-misc-data', + hash='md5:01e409d82ff11ca8b19a27c4f7ee6794', + url=('https://codeload.github.com/mne-tools/mne-misc-data/tar.gz/' + f'{RELEASES["misc"]}'), + folder_name='MNE-misc-data', + config_key='MNE_DATASETS_MISC_PATH' +) + +MNE_DATASETS['fnirs_motor'] = dict( + archive_name='MNE-fNIRS-motor-data.tgz', + hash='md5:c4935d19ddab35422a69f3326a01fef8', + url='https://osf.io/dj3eh/download?version=1', + folder_name='MNE-fNIRS-motor-data', + config_key='MNE_DATASETS_FNIRS_MOTOR_PATH', +) + +MNE_DATASETS['kiloword'] = dict( + archive_name='MNE-kiloword-data.tar.gz', + hash='md5:3a124170795abbd2e48aae8727e719a8', + url='https://osf.io/qkvf9/download?version=1', + folder_name='MNE-kiloword-data', + config_key='MNE_DATASETS_KILOWORD_PATH', +) + +MNE_DATASETS['multimodal'] = dict( + archive_name='MNE-multimodal-data.tar.gz', + hash='md5:26ec847ae9ab80f58f204d09e2c08367', + url='https://ndownloader.figshare.com/files/5999598', + folder_name='MNE-multimodal-data', + config_key='MNE_DATASETS_MULTIMODAL_PATH', +) + +MNE_DATASETS['opm'] = dict( + archive_name='MNE-OPM-data.tar.gz', + hash='md5:370ad1dcfd5c47e029e692c85358a374', + url='https://osf.io/p6ae7/download?version=2', + folder_name='MNE-OPM-data', + config_key='MNE_DATASETS_OPM_PATH', +) + +MNE_DATASETS['phantom_4dbti'] = dict( + archive_name='MNE-phantom-4DBTi.zip', + hash='md5:938a601440f3ffa780d20a17bae039ff', + url='https://osf.io/v2brw/download?version=2', + folder_name='MNE-phantom-4DBTi', + config_key='MNE_DATASETS_PHANTOM_4DBTI_PATH', +) + +MNE_DATASETS['sample'] = dict( + archive_name='MNE-sample-data-processed.tar.gz', + hash='md5:e8f30c4516abdc12a0c08e6bae57409c', + url='https://osf.io/86qa2/download?version=6', + folder_name='MNE-sample-data', + config_key='MNE_DATASETS_SAMPLE_PATH', +) + +MNE_DATASETS['somato'] = dict( + archive_name='MNE-somato-data.tar.gz', + hash='md5:32fd2f6c8c7eb0784a1de6435273c48b', + url='https://osf.io/tp4sg/download?version=7', + folder_name='MNE-somato-data', + config_key='MNE_DATASETS_SOMATO_PATH' +) + +MNE_DATASETS['spm'] = dict( + archive_name='MNE-spm-face.tar.gz', + hash='md5:9f43f67150e3b694b523a21eb929ea75', + url='https://osf.io/je4s8/download?version=2', + folder_name='MNE-spm-face', + config_key='MNE_DATASETS_SPM_FACE_PATH', +) + +# Visual 92 categories has the dataset split into 2 files. +# We define a dictionary holding the items with the same +# value across both files: folder name and configuration key. +MNE_DATASETS['visual_92_categories'] = dict( + folder_name='MNE-visual_92_categories-data', + config_key='MNE_DATASETS_VISUAL_92_CATEGORIES_PATH', +) +MNE_DATASETS['visual_92_categories_1'] = dict( + archive_name='MNE-visual_92_categories-data-part1.tar.gz', + hash='md5:74f50bbeb65740903eadc229c9fa759f', + url='https://osf.io/8ejrs/download?version=1', + folder_name='MNE-visual_92_categories-data', + config_key='MNE_DATASETS_VISUAL_92_CATEGORIES_PATH', +) +MNE_DATASETS['visual_92_categories_2'] = dict( + archive_name='MNE-visual_92_categories-data-part2.tar.gz', + hash='md5:203410a98afc9df9ae8ba9f933370e20', + url='https://osf.io/t4yjp/download?version=1', + folder_name='MNE-visual_92_categories-data', + config_key='MNE_DATASETS_VISUAL_92_CATEGORIES_PATH', +) + +MNE_DATASETS['mtrf'] = dict( + archive_name='mTRF_1.5.zip', + hash='md5:273a390ebbc48da2c3184b01a82e4636', + url='https://osf.io/h85s2/download?version=1', + folder_name='mTRF_1.5', + config_key='MNE_DATASETS_MTRF_PATH' +) +MNE_DATASETS['refmeg_noise'] = dict( + archive_name='sample_reference_MEG_noise-raw.zip', + hash='md5:779fecd890d98b73a4832e717d7c7c45', + url='https://osf.io/drt6v/download?version=1', + folder_name='MNE-refmeg-noise-data', + config_key='MNE_DATASETS_REFMEG_NOISE_PATH' +) + +MNE_DATASETS['ssvep'] = dict( + archive_name='ssvep_example_data.zip', + hash='md5:af866bbc0f921114ac9d683494fe87d6', + url='https://osf.io/z8h6k/download?version=5', + folder_name='ssvep-example-data', + config_key='MNE_DATASETS_SSVEP_PATH' +) + +MNE_DATASETS['erp_core'] = dict( + archive_name='MNE-ERP-CORE-data.tar.gz', + hash='md5:5866c0d6213bd7ac97f254c776f6c4b1', + url='https://osf.io/rzgba/download?version=1', + folder_name='MNE-ERP-CORE-data', + config_key='MNE_DATASETS_ERP_CORE_PATH', +) + +MNE_DATASETS['epilepsy_ecog'] = dict( + archive_name='MNE-epilepsy-ecog-data.tar.gz', + hash='md5:ffb139174afa0f71ec98adbbb1729dea', + url='https://osf.io/z4epq/download?version=1', + folder_name='MNE-epilepsy-ecog-data', + config_key='MNE_DATASETS_EPILEPSY_ECOG_PATH', +) + +# Fieldtrip CMC dataset +MNE_DATASETS['fieldtrip_cmc'] = dict( + archive_name='SubjectCMC.zip', + hash='md5:6f9fd6520f9a66e20994423808d2528c', + url='https://osf.io/j9b6s/download?version=1', + folder_name='MNE-fieldtrip_cmc-data', + config_key='MNE_DATASETS_FIELDTRIP_CMC_PATH' +) + +# brainstorm datasets: +MNE_DATASETS['bst_auditory'] = dict( + archive_name='bst_auditory.tar.gz', + hash='md5:fa371a889a5688258896bfa29dd1700b', + url='https://osf.io/5t9n8/download?version=1', + folder_name='MNE-brainstorm-data', + config_key='MNE_DATASETS_BRAINSTORM_PATH', +) +MNE_DATASETS['bst_phantom_ctf'] = dict( + archive_name='bst_phantom_ctf.tar.gz', + hash='md5:80819cb7f5b92d1a5289db3fb6acb33c', + url='https://osf.io/sxr8y/download?version=1', + folder_name='MNE-brainstorm-data', + config_key='MNE_DATASETS_BRAINSTORM_PATH', +) +MNE_DATASETS['bst_phantom_elekta'] = dict( + archive_name='bst_phantom_elekta.tar.gz', + hash='md5:1badccbe17998d18cc373526e86a7aaf', + url='https://osf.io/dpcku/download?version=1', + folder_name='MNE-brainstorm-data', + config_key='MNE_DATASETS_BRAINSTORM_PATH', +) +MNE_DATASETS['bst_raw'] = dict( + archive_name='bst_raw.tar.gz', + hash='md5:fa2efaaec3f3d462b319bc24898f440c', + url='https://osf.io/9675n/download?version=2', + folder_name='MNE-brainstorm-data', + config_key='MNE_DATASETS_BRAINSTORM_PATH', +) +MNE_DATASETS['bst_resting'] = dict( + archive_name='bst_resting.tar.gz', + hash='md5:70fc7bf9c3b97c4f2eab6260ee4a0430', + url='https://osf.io/m7bd3/download?version=3', + folder_name='MNE-brainstorm-data', + config_key='MNE_DATASETS_BRAINSTORM_PATH', +) + +# HF-SEF +MNE_DATASETS['hf_sef_raw'] = dict( + archive_name='hf_sef_raw.tar.gz', + hash='md5:33934351e558542bafa9b262ac071168', + url='https://zenodo.org/record/889296/files/hf_sef_raw.tar.gz', + folder_name='hf_sef', + config_key='MNE_DATASETS_HF_SEF_PATH', +) +MNE_DATASETS['hf_sef_evoked'] = dict( + archive_name='hf_sef_evoked.tar.gz', + hash='md5:13d34cb5db584e00868677d8fb0aab2b', + url=('https://zenodo.org/record/3523071/files/' + 'hf_sef_evoked.tar.gz'), + folder_name='hf_sef', + config_key='MNE_DATASETS_HF_SEF_PATH', +) + +# "fake" dataset (for testing) +MNE_DATASETS['fake'] = dict( + archive_name='foo.tgz', + hash='md5:3194e9f7b46039bb050a74f3e1ae9908', + url=('https://github.com/mne-tools/mne-testing-data/raw/master/' + 'datasets/foo.tgz'), + folder_name='foo', + config_key='MNE_DATASETS_FAKE_PATH' +) diff --git a/python/libs/mne/datasets/eegbci/__init__.py b/python/libs/mne/datasets/eegbci/__init__.py new file mode 100644 index 0000000..7be4fbc --- /dev/null +++ b/python/libs/mne/datasets/eegbci/__init__.py @@ -0,0 +1,3 @@ +"""EEG Motor Movement/Imagery Dataset.""" + +from .eegbci import data_path, load_data, standardize diff --git a/python/libs/mne/datasets/eegbci/eegbci.py b/python/libs/mne/datasets/eegbci/eegbci.py new file mode 100644 index 0000000..2664085 --- /dev/null +++ b/python/libs/mne/datasets/eegbci/eegbci.py @@ -0,0 +1,221 @@ +# Author: Martin Billinger +# Adam Li +# Daniel McCloy +# License: BSD Style. + +import os +from os import path as op +import pkg_resources +import re + +from ..utils import _get_path, _do_path_update, _mne_path +from ...utils import _url_to_local_path, verbose + + +EEGMI_URL = 'https://physionet.org/files/eegmmidb/1.0.0/' + + +@verbose +def data_path(url, path=None, force_update=False, update_path=None, *, + verbose=None): + """Get path to local copy of EEGMMI dataset URL. + + This is a low-level function useful for getting a local copy of a + remote EEGBCI dataset :footcite:`SchalkEtAl2004` which is available at PhysioNet :footcite:`GoldbergerEtAl2000`. + + Parameters + ---------- + url : str + The dataset to use. + path : None | str + Location of where to look for the EEGBCI data storing location. + If None, the environment variable or config parameter + ``MNE_DATASETS_EEGBCI_PATH`` is used. If it doesn't exist, the + "~/mne_data" directory is used. If the EEGBCI dataset + is not found under the given path, the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python + config to the given path. If None, the user is prompted. + %(verbose)s + + Returns + ------- + path : list of Path + Local path to the given data file. This path is contained inside a list + of length one, for compatibility. + + Notes + ----- + For example, one could do: + + >>> from mne.datasets import eegbci + >>> url = 'http://www.physionet.org/physiobank/database/eegmmidb/' + >>> eegbci.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP + + This would download the given EEGBCI data file to the 'datasets' folder, + and prompt the user to save the 'datasets' path to the mne-python config, + if it isn't there already. + + References + ---------- + .. footbibliography:: + """ # noqa: E501 + import pooch + + key = 'MNE_DATASETS_EEGBCI_PATH' + name = 'EEGBCI' + path = _get_path(path, key, name) + fname = 'MNE-eegbci-data' + destination = _url_to_local_path(url, op.join(path, fname)) + destinations = [destination] + + # Fetch the file + if not op.isfile(destination) or force_update: + if op.isfile(destination): + os.remove(destination) + if not op.isdir(op.dirname(destination)): + os.makedirs(op.dirname(destination)) + pooch.retrieve( + # URL to one of Pooch's test files + url=url, + path=destination, + fname=fname + ) + + # Offer to update the path + _do_path_update(path, update_path, key, name) + destinations = [_mne_path(dest) for dest in destinations] + return destinations + + +@verbose +def load_data(subject, runs, path=None, force_update=False, update_path=None, + base_url=EEGMI_URL, verbose=None): # noqa: D301 + """Get paths to local copies of EEGBCI dataset files. + + This will fetch data for the EEGBCI dataset :footcite:`SchalkEtAl2004`, which is also + available at PhysioNet :footcite:`GoldbergerEtAl2000`. + + Parameters + ---------- + subject : int + The subject to use. Can be in the range of 1-109 (inclusive). + runs : int | list of int + The runs to use. See Notes for details. + path : None | str + Location of where to look for the EEGBCI data storing location. + If None, the environment variable or config parameter + ``MNE_DATASETS_EEGBCI_PATH`` is used. If it doesn't exist, the + "~/mne_data" directory is used. If the EEGBCI dataset + is not found under the given path, the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python + config to the given path. If None, the user is prompted. + base_url : str + The URL root for the data. + %(verbose)s + + Returns + ------- + paths : list + List of local data paths of the given type. + + Notes + ----- + The run numbers correspond to: + + ========= =================================== + run task + ========= =================================== + 1 Baseline, eyes open + 2 Baseline, eyes closed + 3, 7, 11 Motor execution: left vs right hand + 4, 8, 12 Motor imagery: left vs right hand + 5, 9, 13 Motor execution: hands vs feet + 6, 10, 14 Motor imagery: hands vs feet + ========= =================================== + + For example, one could do:: + + >>> from mne.datasets import eegbci + >>> eegbci.load_data(1, [4, 10, 14], os.getenv('HOME') + '/datasets') # doctest:+SKIP + + This would download runs 4, 10, and 14 (hand/foot motor imagery) runs from + subject 1 in the EEGBCI dataset to the 'datasets' folder, and prompt the + user to save the 'datasets' path to the mne-python config, if it isn't + there already. + + References + ---------- + .. footbibliography:: + """ # noqa: E501 + import pooch + + if not hasattr(runs, '__iter__'): + runs = [runs] + + # get local storage path + config_key = 'MNE_DATASETS_EEGBCI_PATH' + folder = 'MNE-eegbci-data' + name = 'EEGBCI' + path = _get_path(path, config_key, name) + + # extract path parts + pattern = r'(?:https?://.*)(files)/(eegmmidb)/(\d+\.\d+\.\d+)/?' + match = re.compile(pattern).match(base_url) + if match is None: + raise ValueError('base_url does not match the expected EEGMI folder ' + 'structure. Please notify MNE-Python developers.') + base_path = op.join(path, folder, *match.groups()) + + # create the download manager + fetcher = pooch.create( + path=base_path, + base_url=base_url, + version=None, # Data versioning is decoupled from MNE-Python version. + registry=None, # Registry is loaded from file, below. + retry_if_failed=2 # 2 retries = 3 total attempts + ) + + # load the checksum registry + registry = pkg_resources.resource_stream( + 'mne', op.join('data', 'eegbci_checksums.txt')) + fetcher.load_registry(registry) + + # fetch the file(s) + data_paths = [] + for run in runs: + file_part = f'S{subject:03d}/S{subject:03d}R{run:02d}.edf' + destination = op.join(base_path, file_part) + if force_update and op.isfile(destination): + os.remove(destination) + data_paths.append(fetcher.fetch(file_part)) + # update path in config if desired + _do_path_update(path, update_path, config_key, name) + return data_paths + + +def standardize(raw): + """Standardize channel positions and names. + + Parameters + ---------- + raw : instance of Raw + The raw data to standardize. Operates in-place. + """ + rename = dict() + for name in raw.ch_names: + std_name = name.strip('.') + std_name = std_name.upper() + if std_name.endswith('Z'): + std_name = std_name[:-1] + 'z' + if std_name.startswith('FP'): + std_name = 'Fp' + std_name[2:] + rename[name] = std_name + raw.rename_channels(rename) diff --git a/python/libs/mne/datasets/epilepsy_ecog/__init__.py b/python/libs/mne/datasets/epilepsy_ecog/__init__.py new file mode 100644 index 0000000..10982c2 --- /dev/null +++ b/python/libs/mne/datasets/epilepsy_ecog/__init__.py @@ -0,0 +1,3 @@ +"""Clinical epilepsy datasets.""" + +from ._data import data_path, get_version diff --git a/python/libs/mne/datasets/epilepsy_ecog/_data.py b/python/libs/mne/datasets/epilepsy_ecog/_data.py new file mode 100644 index 0000000..33535c1 --- /dev/null +++ b/python/libs/mne/datasets/epilepsy_ecog/_data.py @@ -0,0 +1,27 @@ +# Authors: Adam Li +# Alex Rockhill +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, _get_version, _version_doc, + _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='epilepsy_ecog', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format( + name='epilepsy_ecog', conf='MNE_DATASETS_EPILEPSY_ECOG_PATH') + + +def get_version(): # noqa: D103 + return _get_version('epilepsy_ecog') + + +get_version.__doc__ = _version_doc.format(name='epilepsy_ecog') diff --git a/python/libs/mne/datasets/erp_core/__init__.py b/python/libs/mne/datasets/erp_core/__init__.py new file mode 100644 index 0000000..9e25883 --- /dev/null +++ b/python/libs/mne/datasets/erp_core/__init__.py @@ -0,0 +1,3 @@ +"""ERP-CORE EEG dataset.""" + +from .erp_core import data_path, get_version diff --git a/python/libs/mne/datasets/erp_core/erp_core.py b/python/libs/mne/datasets/erp_core/erp_core.py new file mode 100644 index 0000000..76bd62c --- /dev/null +++ b/python/libs/mne/datasets/erp_core/erp_core.py @@ -0,0 +1,23 @@ +from ...utils import verbose +from ..utils import (_data_path_doc, + _get_version, _version_doc, _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='erp_core', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='erp_core', + conf='MNE_DATASETS_ERP_CORE_PATH') + + +def get_version(): # noqa: D103 + return _get_version('erp_core') + + +get_version.__doc__ = _version_doc.format(name='erp_core') diff --git a/python/libs/mne/datasets/fieldtrip_cmc/__init__.py b/python/libs/mne/datasets/fieldtrip_cmc/__init__.py new file mode 100644 index 0000000..328d81f --- /dev/null +++ b/python/libs/mne/datasets/fieldtrip_cmc/__init__.py @@ -0,0 +1,3 @@ +"""fieldtrip Cortico-Muscular Coherence (CMC) Dataset.""" + +from .fieldtrip_cmc import data_path, get_version diff --git a/python/libs/mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py b/python/libs/mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py new file mode 100644 index 0000000..d7abe1c --- /dev/null +++ b/python/libs/mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py @@ -0,0 +1,27 @@ +# Authors: Chris Holdgraf +# Alexandre Barachant +# +# License: BSD Style. +from ...utils import verbose +from ..utils import (_data_path_doc, + _get_version, _version_doc, _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='fieldtrip_cmc', processor='nested_unzip', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format( + name='fieldtrip_cmc', conf='MNE_DATASETS_FIELDTRIP_CMC_PATH') + + +def get_version(): # noqa: D103 + return _get_version('fieldtrip_cmc') + + +get_version.__doc__ = _version_doc.format(name='fieldtrip_cmc') diff --git a/python/libs/mne/datasets/fnirs_motor/__init__.py b/python/libs/mne/datasets/fnirs_motor/__init__.py new file mode 100644 index 0000000..66ec175 --- /dev/null +++ b/python/libs/mne/datasets/fnirs_motor/__init__.py @@ -0,0 +1,3 @@ +"""fNIRS motor dataset.""" + +from .fnirs_motor import data_path, get_version diff --git a/python/libs/mne/datasets/fnirs_motor/fnirs_motor.py b/python/libs/mne/datasets/fnirs_motor/fnirs_motor.py new file mode 100644 index 0000000..ce0294f --- /dev/null +++ b/python/libs/mne/datasets/fnirs_motor/fnirs_motor.py @@ -0,0 +1,26 @@ +# Authors: Eric Larson +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, _get_version, _version_doc, + _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='fnirs_motor', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='fnirs_motor', + conf='MNE_DATASETS_FNIRS_MOTOR_PATH') + + +def get_version(): # noqa: D103 + return _get_version('fnirs_motor') + + +get_version.__doc__ = _version_doc.format(name='fnirs_motor') diff --git a/python/libs/mne/datasets/hf_sef/__init__.py b/python/libs/mne/datasets/hf_sef/__init__.py new file mode 100644 index 0000000..08fe8ca --- /dev/null +++ b/python/libs/mne/datasets/hf_sef/__init__.py @@ -0,0 +1,3 @@ +"""HF-SEF dataset.""" + +from .hf_sef import data_path diff --git a/python/libs/mne/datasets/hf_sef/hf_sef.py b/python/libs/mne/datasets/hf_sef/hf_sef.py new file mode 100644 index 0000000..63d97df --- /dev/null +++ b/python/libs/mne/datasets/hf_sef/hf_sef.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +# Authors: Jussi Nurminen +# License: BSD Style. + + +import os.path as op +import os +from ...utils import verbose, _check_option +from ..utils import _get_path, _do_path_update, _download_mne_dataset +from ..config import MNE_DATASETS + + +@verbose +def data_path(dataset='evoked', path=None, force_update=False, + update_path=True, *, verbose=None): + u"""Get path to local copy of the high frequency SEF dataset. + + Gets a local copy of the high frequency SEF MEG dataset + :footcite:`NurminenEtAl2017`. + + Parameters + ---------- + dataset : 'evoked' | 'raw' + Whether to get the main dataset (evoked, structural and the rest) or + the separate dataset containing raw MEG data only. + path : None | str + Where to look for the HF-SEF data storing location. + If None, the environment variable or config parameter + ``MNE_DATASETS_HF_SEF_PATH`` is used. If it doesn't exist, the + "~/mne_data" directory is used. If the HF-SEF dataset + is not found under the given path, the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_HF_SEF_PATH in mne-python + config to the given path. If None, the user is prompted. + %(verbose)s + + Returns + ------- + path : str + Local path to the directory where the HF-SEF data is stored. + + References + ---------- + .. footbibliography:: + """ + _check_option('dataset', dataset, ('evoked', 'raw')) + if dataset == 'raw': + data_dict = MNE_DATASETS['hf_sef_raw'] + data_dict['dataset_name'] = 'hf_sef_raw' + else: + data_dict = MNE_DATASETS['hf_sef_evoked'] + data_dict['dataset_name'] = 'hf_sef_evoked' + config_key = data_dict['config_key'] + folder_name = data_dict['folder_name'] + + # get download path for specific dataset + path = _get_path(path=path, key=config_key, name=folder_name) + final_path = op.join(path, folder_name) + megdir = op.join(final_path, 'MEG', 'subject_a') + has_raw = (dataset == 'raw' and op.isdir(megdir) and + any('raw' in filename for filename in os.listdir(megdir))) + has_evoked = (dataset == 'evoked' and + op.isdir(op.join(final_path, 'subjects'))) + # data not there, or force_update requested: + if has_raw or has_evoked and not force_update: + _do_path_update(path, update_path, config_key, + folder_name) + return final_path + + # instantiate processor that unzips file + data_path = _download_mne_dataset(name=data_dict['dataset_name'], + processor='untar', path=path, + force_update=force_update, + update_path=update_path, download=True) + return data_path diff --git a/python/libs/mne/datasets/kiloword/__init__.py b/python/libs/mne/datasets/kiloword/__init__.py new file mode 100644 index 0000000..18a22f9 --- /dev/null +++ b/python/libs/mne/datasets/kiloword/__init__.py @@ -0,0 +1,3 @@ +"""MNE visual_92_categories dataset.""" + +from .kiloword import data_path, get_version diff --git a/python/libs/mne/datasets/kiloword/kiloword.py b/python/libs/mne/datasets/kiloword/kiloword.py new file mode 100644 index 0000000..c011365 --- /dev/null +++ b/python/libs/mne/datasets/kiloword/kiloword.py @@ -0,0 +1,57 @@ +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_get_version, _version_doc, _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): + """Get path to local copy of the kiloword dataset. + + This is the dataset from :footcite:`DufauEtAl2015`. + + Parameters + ---------- + path : None | str + Location of where to look for the kiloword data storing + location. If None, the environment variable or config parameter + MNE_DATASETS_KILOWORD_PATH is used. If it doesn't exist, + the "mne-python/examples" directory is used. If the + kiloword dataset is not found under the given path (e.g., + as "mne-python/examples/MNE-kiloword-data"), the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_KILOWORD_PATH in mne-python + config to the given path. If None, the user is prompted. + download : bool + If False and the kiloword dataset has not been downloaded yet, + it will not be downloaded and the path will be returned as + '' (empty string). This is mostly used for debugging purposes + and can be safely ignored by most users. + %(verbose)s + + Returns + ------- + path : list of Path + Local path to the given data file. This path is contained inside a list + of length one, for compatibility. + + References + ---------- + .. footbibliography:: + """ + return _download_mne_dataset( + name='kiloword', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +def get_version(): + """Get dataset version.""" + return _get_version('kiloword') + + +get_version.__doc__ = _version_doc.format(name='kiloword') diff --git a/python/libs/mne/datasets/limo/__init__.py b/python/libs/mne/datasets/limo/__init__.py new file mode 100644 index 0000000..f83eac6 --- /dev/null +++ b/python/libs/mne/datasets/limo/__init__.py @@ -0,0 +1,3 @@ +"""LIMO Dataset.""" + +from .limo import data_path, load_data diff --git a/python/libs/mne/datasets/limo/limo.py b/python/libs/mne/datasets/limo/limo.py new file mode 100644 index 0000000..143a9dd --- /dev/null +++ b/python/libs/mne/datasets/limo/limo.py @@ -0,0 +1,288 @@ +# Authors: Jose C. Garcia Alanis +# +# License: BSD-3-Clause + +import os +import os.path as op + +import numpy as np + +from ...channels import make_standard_montage +from ...epochs import EpochsArray +from ...io.meas_info import create_info +from ...utils import _check_pandas_installed, verbose +from ..utils import _get_path, _do_path_update, logger + + +# root url for LIMO files +root_url = 'https://files.de-1.osf.io/v1/resources/52rea/providers/osfstorage/' + + +@verbose +def data_path(subject, path=None, force_update=False, update_path=None, *, + verbose=None): + """Get path to local copy of LIMO dataset URL. + + This is a low-level function useful for getting a local copy of the + remote LIMO dataset :footcite:`Rousselet2016`. The complete dataset is + available at datashare.is.ed.ac.uk/. + + Parameters + ---------- + subject : int + Subject to download. Must be of :class:`ìnt` in the range from 1 + to 18 (inclusive). + path : None | str + Location of where to look for the LIMO data storing directory. + If None, the environment variable or config parameter + ``MNE_DATASETS_LIMO_PATH`` is used. If it doesn't exist, the + "~/mne_data" directory is used. If the LIMO dataset + is not found under the given path, the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_LIMO_PATH in mne-python + config to the given path. If None, the user is prompted. + %(verbose)s + + Returns + ------- + path : str + Local path to the given data file. + + Notes + ----- + For example, one could do: + + >>> from mne.datasets import limo + >>> limo.data_path(subject=1, path=os.getenv('HOME') + '/datasets') # doctest:+SKIP + + This would download the LIMO data file to the 'datasets' folder, + and prompt the user to save the 'datasets' path to the mne-python config, + if it isn't there already. + + References + ---------- + .. footbibliography:: + """ # noqa: E501 + import pooch + + downloader = pooch.HTTPDownloader(progressbar=True) # use tqdm + + # local storage patch + config_key = 'MNE_DATASETS_LIMO_PATH' + name = 'LIMO' + subj = f'S{subject}' + path = _get_path(path, config_key, name) + base_path = op.join(path, 'MNE-limo-data') + subject_path = op.join(base_path, subj) + # the remote URLs are in the form of UUIDs: + urls = dict( + S18={'Yr.mat': '5cf839833a4d9500178a6ff8', + 'LIMO.mat': '5cf83907e650a2001ad592e4'}, + S17={'Yr.mat': '5cf838e83a4d9500168aeb76', + 'LIMO.mat': '5cf83867a542b80019c87602'}, + S16={'Yr.mat': '5cf83857e650a20019d5778f', + 'LIMO.mat': '5cf837dc3a4d9500188a64fe'}, + S15={'Yr.mat': '5cf837cce650a2001ad591e8', + 'LIMO.mat': '5cf83758a542b8001ac7d11d'}, + S14={'Yr.mat': '5cf837493a4d9500198a938f', + 'LIMO.mat': '5cf836e4a542b8001bc7cc53'}, + S13={'Yr.mat': '5cf836d23a4d9500178a6df7', + 'LIMO.mat': '5cf836543a4d9500168ae7cb'}, + S12={'Yr.mat': '5cf83643d4c7d700193e5954', + 'LIMO.mat': '5cf835193a4d9500178a6c92'}, + S11={'Yr.mat': '5cf8356ea542b8001cc81517', + 'LIMO.mat': '5cf834f7d4c7d700163daab8'}, + S10={'Yr.mat': '5cf833b0e650a20019d57454', + 'LIMO.mat': '5cf83204e650a20018d59eb2'}, + S9={'Yr.mat': '5cf83201a542b8001cc811cf', + 'LIMO.mat': '5cf8316c3a4d9500168ae13b'}, + S8={'Yr.mat': '5cf8326ce650a20017d60373', + 'LIMO.mat': '5cf8316d3a4d9500198a8dc5'}, + S7={'Yr.mat': '5cf834a03a4d9500168ae59b', + 'LIMO.mat': '5cf83069e650a20017d600d7'}, + S6={'Yr.mat': '5cf830e6a542b80019c86a70', + 'LIMO.mat': '5cf83057a542b80019c869ca'}, + S5={'Yr.mat': '5cf8115be650a20018d58041', + 'LIMO.mat': '5cf80c0bd4c7d700193e213c'}, + S4={'Yr.mat': '5cf810c9a542b80019c8450a', + 'LIMO.mat': '5cf80bf83a4d9500198a6eb4'}, + S3={'Yr.mat': '5cf80c55d4c7d700163d8f52', + 'LIMO.mat': '5cf80bdea542b80019c83cab'}, + S2={'Yr.mat': '5cde827123fec40019e01300', + 'LIMO.mat': '5cde82682a50c4001677c259'}, + S1={'Yr.mat': '5d6d3071536cf5001a8b0c78', + 'LIMO.mat': '5d6d305f6f41fc001a3151d8'}, + ) + # these can't be in the registry file (mne/data/dataset_checksums.txt) + # because of filename duplication + hashes = dict( + S18={'Yr.mat': 'md5:87f883d442737971a80fc0a35d057e51', + 'LIMO.mat': 'md5:8b4879646f65d7876fa4adf2e40162c5'}, + S17={'Yr.mat': 'md5:7b667ec9eefd7a9996f61ae270e295ee', + 'LIMO.mat': 'md5:22eaca4e6fad54431fd61b307fc426b8'}, + S16={'Yr.mat': 'md5:c877afdb4897426421577e863a45921a', + 'LIMO.mat': 'md5:86672d7afbea1e8c39305bc3f852c8c2'}, + S15={'Yr.mat': 'md5:eea9e0140af598fefc08c886a6f05de5', + 'LIMO.mat': 'md5:aed5cb71ddbfd27c6a3ac7d3e613d07f'}, + S14={'Yr.mat': 'md5:8bd842cfd8588bd5d32e72fdbe70b66e', + 'LIMO.mat': 'md5:1e07d1f36f2eefad435a77530daf2680'}, + S13={'Yr.mat': 'md5:d7925d2af7288b8a5186dfb5dbb63d34', + 'LIMO.mat': 'md5:ba891015d2f9e447955fffa9833404ca'}, + S12={'Yr.mat': 'md5:0e1d05beaa4bf2726e0d0671b78fe41e', + 'LIMO.mat': 'md5:423fd479d71097995b6614ecb11df9ad'}, + S11={'Yr.mat': 'md5:1b0016fb9832e43b71f79c1992fcbbb1', + 'LIMO.mat': 'md5:1a281348c2a41ee899f42731d30cda70'}, + S10={'Yr.mat': 'md5:13c66f60e241b9a9cc576eaf1b55a417', + 'LIMO.mat': 'md5:3c4b41e221eb352a21bbef1a7e006f06'}, + S9={'Yr.mat': 'md5:3ae1d9c3a1d9325deea2f2dddd1ab507', + 'LIMO.mat': 'md5:5e204e2a4bcfe4f535b4b1af469b37f7'}, + S8={'Yr.mat': 'md5:7e9adbca4e03d8d7ce8ea07ccecdc8fd', + 'LIMO.mat': 'md5:88313c21d34428863590e586b2bc3408'}, + S7={'Yr.mat': 'md5:6b5290a6725ecebf1022d5d2789b186d', + 'LIMO.mat': 'md5:8c769219ebc14ce3f595063e84bfc0a9'}, + S6={'Yr.mat': 'md5:420c858a8340bf7c28910b7b0425dc5d', + 'LIMO.mat': 'md5:9cf4e1a405366d6bd0cc6d996e32fd63'}, + S5={'Yr.mat': 'md5:946436cfb474c8debae56ffb1685ecf3', + 'LIMO.mat': 'md5:241fac95d3a79d2cea081391fb7078bd'}, + S4={'Yr.mat': 'md5:c8216af78ac87b739e86e57b345cafdd', + 'LIMO.mat': 'md5:8e10ef36c2e075edc2f787581ba33459'}, + S3={'Yr.mat': 'md5:ff02e885b65b7b807146f259a30b1b5e', + 'LIMO.mat': 'md5:59b5fb3a9749003133608b5871309e2c'}, + S2={'Yr.mat': 'md5:a4329022e57fd07ceceb7d1735fd2718', + 'LIMO.mat': 'md5:98b284b567f2dd395c936366e404f2c6'}, + S1={'Yr.mat': 'md5:076c0ae78fb71d43409c1877707df30e', + 'LIMO.mat': 'md5:136c8cf89f8f111a11f531bd9fa6ae69'}, + ) + # create the download manager + fetcher = pooch.create( + path=subject_path, + base_url='', + version=None, # Data versioning is decoupled from MNE-Python version. + registry=hashes[subj], + urls={key: f'{root_url}{uuid}' for key, uuid in urls[subj].items()}, + retry_if_failed=2 # 2 retries = 3 total attempts + ) + # use our logger level for pooch's logger too + pooch.get_logger().setLevel(logger.getEffectiveLevel()) + # fetch the data + for fname in ('LIMO.mat', 'Yr.mat'): + destination = op.join(subject_path, fname) + if force_update and op.isfile(destination): + os.remove(destination) + # fetch the remote file (if local file missing or has hash mismatch) + fetcher.fetch(fname=fname, downloader=downloader) + # update path in config if desired + _do_path_update(path, update_path, config_key, name) + return base_path + + +@verbose +def load_data(subject, path=None, force_update=False, update_path=None, + verbose=None): + """Fetch subjects epochs data for the LIMO data set. + + Parameters + ---------- + subject : int + Subject to use. Must be of class ìnt in the range from 1 to 18. + path : str + Location of where to look for the LIMO data. + If None, the environment variable or config parameter + ``MNE_DATASETS_LIMO_PATH`` is used. If it doesn't exist, the + "~/mne_data" directory is used. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_LIMO_PATH in mne-python + config to the given path. If None, the user is prompted. + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The epochs. + """ # noqa: E501 + pd = _check_pandas_installed() + from scipy.io import loadmat + + # subject in question + if isinstance(subject, int) and 1 <= subject <= 18: + subj = 'S%i' % subject + else: + raise ValueError('subject must be an int in the range from 1 to 18') + + # set limo path, download and decompress files if not found + limo_path = data_path(subject, path, force_update, update_path) + + # -- 1) import .mat files + # epochs info + fname_info = op.join(limo_path, subj, 'LIMO.mat') + data_info = loadmat(fname_info) + # number of epochs per condition + design = data_info['LIMO']['design'][0][0]['X'][0][0] + data_info = data_info['LIMO']['data'][0][0][0][0] + # epochs data + fname_eeg = op.join(limo_path, subj, 'Yr.mat') + data = loadmat(fname_eeg) + + # -- 2) get epochs information from structure + # sampling rate + sfreq = data_info['sampling_rate'][0][0] + # tmin and tmax + tmin = data_info['start'][0][0] + # create events matrix + sample = np.arange(len(design)) + prev_id = np.zeros(len(design)) + ev_id = design[:, 1] + events = np.array([sample, prev_id, ev_id]).astype(int).T + # event ids, such that Face B == 1 + event_id = {'Face/A': 0, 'Face/B': 1} + + # -- 3) extract channel labels from LIMO structure + # get individual labels + labels = data_info['chanlocs']['labels'] + labels = [label for label, *_ in labels[0]] + # get montage + montage = make_standard_montage('biosemi128') + # add external electrodes (e.g., eogs) + ch_names = montage.ch_names + ['EXG1', 'EXG2', 'EXG3', 'EXG4'] + # match individual labels to labels in montage + found_inds = [ind for ind, name in enumerate(ch_names) if name in labels] + missing_chans = [name for name in ch_names if name not in labels] + assert labels == [ch_names[ind] for ind in found_inds] + + # -- 4) extract data from subjects Yr structure + # data is stored as channels x time points x epochs + # data['Yr'].shape # <-- see here + # transpose to epochs x channels time points + data = np.transpose(data['Yr'], (2, 0, 1)) + # initialize data in expected order + temp_data = np.empty((data.shape[0], len(ch_names), data.shape[2])) + # copy over the non-missing data + for source, target in enumerate(found_inds): + # avoid copy when fancy indexing + temp_data[:, target, :] = data[:, source, :] + # data to V (to match MNE's format) + data = temp_data / 1e6 + # create list containing channel types + types = ["eog" if ch.startswith("EXG") else "eeg" for ch in ch_names] + + # -- 5) Create custom info for mne epochs structure + # create info + info = create_info(ch_names, sfreq, types).set_montage(montage) + # get faces and noise variables from design matrix + event_list = list(events[:, 2]) + faces = ['B' if event else 'A' for event in event_list] + noise = list(design[:, 2]) + # create epochs metadata + metadata = {'face': faces, 'phase-coherence': noise} + metadata = pd.DataFrame(metadata) + + # -- 6) Create custom epochs array + epochs = EpochsArray(data, info, events, tmin, event_id, metadata=metadata) + epochs.info['bads'] = missing_chans # missing channels are marked as bad. + + return epochs diff --git a/python/libs/mne/datasets/misc/__init__.py b/python/libs/mne/datasets/misc/__init__.py new file mode 100644 index 0000000..884848d --- /dev/null +++ b/python/libs/mne/datasets/misc/__init__.py @@ -0,0 +1,3 @@ +"""MNE misc dataset.""" + +from ._misc import data_path, _pytest_mark diff --git a/python/libs/mne/datasets/misc/_misc.py b/python/libs/mne/datasets/misc/_misc.py new file mode 100644 index 0000000..85f6533 --- /dev/null +++ b/python/libs/mne/datasets/misc/_misc.py @@ -0,0 +1,26 @@ +# Authors: Alexandre Gramfort +# Martin Luessi +# Eric Larson +# License: BSD Style. + +from ...utils import verbose +from ..utils import has_dataset, _data_path_doc, _download_mne_dataset + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='misc', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +def _pytest_mark(): + import pytest + return pytest.mark.skipif( + not has_dataset(name='misc'), reason='Requires misc dataset') + + +data_path.__doc__ = _data_path_doc.format(name='misc', + conf='MNE_DATASETS_MISC_PATH') diff --git a/python/libs/mne/datasets/mtrf/__init__.py b/python/libs/mne/datasets/mtrf/__init__.py new file mode 100644 index 0000000..dffa76e --- /dev/null +++ b/python/libs/mne/datasets/mtrf/__init__.py @@ -0,0 +1,3 @@ +"""mTRF Dataset.""" + +from .mtrf import data_path, get_version diff --git a/python/libs/mne/datasets/mtrf/mtrf.py b/python/libs/mne/datasets/mtrf/mtrf.py new file mode 100644 index 0000000..bfc5cd0 --- /dev/null +++ b/python/libs/mne/datasets/mtrf/mtrf.py @@ -0,0 +1,30 @@ +# Authors: Chris Holdgraf +# +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, + _get_version, _version_doc, _download_mne_dataset) + + +data_name = 'mtrf' + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name=data_name, processor='unzip', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name=data_name, + conf='MNE_DATASETS_MTRF_PATH') + + +def get_version(): # noqa: D103 + return _get_version(data_name) + + +get_version.__doc__ = _version_doc.format(name=data_name) diff --git a/python/libs/mne/datasets/multimodal/__init__.py b/python/libs/mne/datasets/multimodal/__init__.py new file mode 100644 index 0000000..753f0cf --- /dev/null +++ b/python/libs/mne/datasets/multimodal/__init__.py @@ -0,0 +1,3 @@ +"""Multimodal dataset.""" + +from .multimodal import data_path, get_version diff --git a/python/libs/mne/datasets/multimodal/multimodal.py b/python/libs/mne/datasets/multimodal/multimodal.py new file mode 100644 index 0000000..4ef0fd3 --- /dev/null +++ b/python/libs/mne/datasets/multimodal/multimodal.py @@ -0,0 +1,28 @@ +# Authors: Alexandre Gramfort +# Martin Luessi +# Eric Larson +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, _get_version, _version_doc, + _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='multimodal', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='multimodal', + conf='MNE_DATASETS_MULTIMODAL_PATH') + + +def get_version(): # noqa: D103 + return _get_version('multimodal') + + +get_version.__doc__ = _version_doc.format(name='multimodal') diff --git a/python/libs/mne/datasets/opm/__init__.py b/python/libs/mne/datasets/opm/__init__.py new file mode 100644 index 0000000..6ff15e6 --- /dev/null +++ b/python/libs/mne/datasets/opm/__init__.py @@ -0,0 +1,3 @@ +"""OPM dataset.""" + +from .opm import data_path, get_version diff --git a/python/libs/mne/datasets/opm/opm.py b/python/libs/mne/datasets/opm/opm.py new file mode 100644 index 0000000..014e91f --- /dev/null +++ b/python/libs/mne/datasets/opm/opm.py @@ -0,0 +1,28 @@ +# Authors: Alexandre Gramfort +# Martin Luessi +# Eric Larson +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, _get_version, _version_doc, + _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='opm', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='opm', + conf='MNE_DATASETS_OPML_PATH') + + +def get_version(): # noqa: D103 + return _get_version('opm') + + +get_version.__doc__ = _version_doc.format(name='opm') diff --git a/python/libs/mne/datasets/phantom_4dbti/__init__.py b/python/libs/mne/datasets/phantom_4dbti/__init__.py new file mode 100644 index 0000000..0d9323a --- /dev/null +++ b/python/libs/mne/datasets/phantom_4dbti/__init__.py @@ -0,0 +1,3 @@ +"""Multimodal dataset.""" + +from .phantom_4dbti import data_path, get_version diff --git a/python/libs/mne/datasets/phantom_4dbti/phantom_4dbti.py b/python/libs/mne/datasets/phantom_4dbti/phantom_4dbti.py new file mode 100644 index 0000000..2154dee --- /dev/null +++ b/python/libs/mne/datasets/phantom_4dbti/phantom_4dbti.py @@ -0,0 +1,27 @@ +# Authors: Alexandre Gramfort +# +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, _get_version, _version_doc, + _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='phantom_4dbti', processor='unzip', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format( + name='phantom_4dbti', conf='MNE_DATASETS_PHANTOM_4DBTI_PATH') + + +def get_version(): # noqa: D103 + return _get_version('phantom_4dbti') + + +get_version.__doc__ = _version_doc.format(name='phantom_4dbti') diff --git a/python/libs/mne/datasets/refmeg_noise/__init__.py b/python/libs/mne/datasets/refmeg_noise/__init__.py new file mode 100644 index 0000000..00460d1 --- /dev/null +++ b/python/libs/mne/datasets/refmeg_noise/__init__.py @@ -0,0 +1,3 @@ +"""MEG reference-noise data set.""" + +from .refmeg_noise import data_path, get_version diff --git a/python/libs/mne/datasets/refmeg_noise/refmeg_noise.py b/python/libs/mne/datasets/refmeg_noise/refmeg_noise.py new file mode 100644 index 0000000..2027a31 --- /dev/null +++ b/python/libs/mne/datasets/refmeg_noise/refmeg_noise.py @@ -0,0 +1,26 @@ +# Authors: Jeff Hanna +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, _get_version, _version_doc, + _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='refmeg_noise', processor='unzip', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format( + name='refmeg_noise', conf='MNE_DATASETS_REFMEG_NOISE_PATH') + + +def get_version(): # noqa: D103 + return _get_version('refmeg_noise') + + +get_version.__doc__ = _version_doc.format(name='refmeg_noise') diff --git a/python/libs/mne/datasets/sample/__init__.py b/python/libs/mne/datasets/sample/__init__.py new file mode 100644 index 0000000..c94c6d5 --- /dev/null +++ b/python/libs/mne/datasets/sample/__init__.py @@ -0,0 +1,3 @@ +"""MNE sample dataset.""" + +from .sample import data_path, get_version diff --git a/python/libs/mne/datasets/sample/sample.py b/python/libs/mne/datasets/sample/sample.py new file mode 100644 index 0000000..4876b7b --- /dev/null +++ b/python/libs/mne/datasets/sample/sample.py @@ -0,0 +1,28 @@ +# Authors: Alexandre Gramfort +# Martin Luessi +# Eric Larson +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, _get_version, _version_doc, + _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='sample', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='sample', + conf='MNE_DATASETS_SAMPLE_PATH') + + +def get_version(): # noqa: D103 + return _get_version('sample') + + +get_version.__doc__ = _version_doc.format(name='sample') diff --git a/python/libs/mne/datasets/sleep_physionet/SHA1SUMS b/python/libs/mne/datasets/sleep_physionet/SHA1SUMS new file mode 100644 index 0000000..1edcecc --- /dev/null +++ b/python/libs/mne/datasets/sleep_physionet/SHA1SUMS @@ -0,0 +1,394 @@ +adabd3b01fc7bb75c523a974f38ee3ae4e57b40f SC4001E0-PSG.edf +21c998eadc8b1e3ea6727d3585186b8f76e7e70b SC4001EC-Hypnogram.edf +c6b6d7a8605cc7e7602b6028ee77f6fbf5f7581d SC4002E0-PSG.edf +386230188a3552b1fc90bba0fb7476ceaca174b6 SC4002EC-Hypnogram.edf +4d17451f7847355bcab17584de05e7e1df58c660 SC4011E0-PSG.edf +d582a3cbe2db481a362af890bc5a2f5ca7c878dc SC4011EH-Hypnogram.edf +a47d525f5147904b6890231e2ad338359c7ab94c SC4012E0-PSG.edf +fa99f60d7f54617cdd1128aff4f21c4daed763c7 SC4012EC-Hypnogram.edf +8b135afa7fb93bb5f1998fda50355944777c245e SC4021E0-PSG.edf +91043cfe46695088b17b6a02937b25efd674c3fb SC4021EH-Hypnogram.edf +d739e142b3b328c71b4752149901805dcd6d7e19 SC4022E0-PSG.edf +0c46a03699dd00e8f92a7edff99ebc4642cb3d48 SC4022EJ-Hypnogram.edf +85e58dc1e3303537dade8c5827ab58328239c384 SC4031E0-PSG.edf +6363d8b0fdc48cf396c9abf054bb4a9696d38bdb SC4031EC-Hypnogram.edf +43963d300642b3aa840e8c468f321b8162601772 SC4032E0-PSG.edf +7925514bc8d2ef3f1103130f08f7b3afd2136b88 SC4032EP-Hypnogram.edf +04d2b88d25f2ae4a65ba44cd9145bd12800a0e20 SC4041E0-PSG.edf +f148821669bd3588187b3b430bd79adf569f86d1 SC4041EC-Hypnogram.edf +76253d964d7797540ffd791e6e136023ed67a485 SC4042E0-PSG.edf +9873df429f971f8a4b720a454f6c0472b8a25ebb SC4042EC-Hypnogram.edf +ea073451b65ce8a6f1a02a8cc2b89d1a162ca0ae SC4051E0-PSG.edf +4159ef8a3e119d6dcc1bede806f6fbc017b27a0f SC4051EC-Hypnogram.edf +5a2efbd21be9b745fd534394eb2503caca7dc53f SC4052E0-PSG.edf +0e96482d44762df4da65dc4fdb970b342264d22a SC4052EC-Hypnogram.edf +1736736e585807c14f1ae8bc87a94cae222c5170 SC4061E0-PSG.edf +4bf99622c67c281b25ceccd35e7050328a2946e8 SC4061EC-Hypnogram.edf +763c7ac059f1771a0165e5cb351b176afb1cfe15 SC4062E0-PSG.edf +14f07411cd04d3b4b522d37c129334955287ff5f SC4062EC-Hypnogram.edf +1374b34f6139b6ff7e865d8243eef39ba334ef50 SC4071E0-PSG.edf +608024fd19a140ad233a4680e07c2495a74b69c2 SC4071EC-Hypnogram.edf +1c570644243d79396df612fa2b9bc027b24430e4 SC4072E0-PSG.edf +a8da6c20b9b48189f05ab537886b59dd141374d2 SC4072EH-Hypnogram.edf +0e1cc2c4e1da14ab94515e3e7e75e8ad30ec99cb SC4081E0-PSG.edf +9ec663ffa5c17afcaca59d7829d77b9165102237 SC4081EC-Hypnogram.edf +d57d4aa7cbc5045f611a3a3e342b501e086ea426 SC4082E0-PSG.edf +d43c785dba43063d7baa332671c6bac9c832b5b7 SC4082EP-Hypnogram.edf +b3502e0bd54683e973182c791aa962b804e79633 SC4091E0-PSG.edf +7aa63b408c769a4a983a908b6ba41d87dd743c6e SC4091EC-Hypnogram.edf +246e35852119b33d197db2f7bcfb1b46a5270a03 SC4092E0-PSG.edf +9d85766a83231b1c6076cb293367ccc354c57eeb SC4092EC-Hypnogram.edf +3ae168ff2c9c0c56f51205fdb10f05a4c6b2064e SC4101E0-PSG.edf +60d9c3913881e11b06ad99e9870bd1ca4d93c952 SC4101EC-Hypnogram.edf +86f307190961eaab0214fdc0213f8fe05812c7a5 SC4102E0-PSG.edf +8072e2d52bc6c19b45fbd921550e5243bc5a1de7 SC4102EC-Hypnogram.edf +e490956b4dce01c46ba88a2b847f091bb54ea16e SC4111E0-PSG.edf +12db1920e2f6083c8ab1f2c24fe35dfa03715e4a SC4111EC-Hypnogram.edf +ca24dc464df61144627588b29d35a85fcc7ac984 SC4112E0-PSG.edf +54dbc39015b0a445b51189987a00e08cc27d8f0c SC4112EC-Hypnogram.edf +33c72025a7a215ea5e255f4254cb0f93b1313369 SC4121E0-PSG.edf +daa57ece807cb5325c6d1ce059f0e8a8d1c85391 SC4121EC-Hypnogram.edf +34f5145ab62dcc5a53ba18735519e5bb2b13841a SC4122E0-PSG.edf +b7af1a32d8ca15e8185e4c94213ffc18ad7f6e8a SC4122EV-Hypnogram.edf +42ff97035aae6dd34ca9437857c48ac6f2ab97df SC4131E0-PSG.edf +5beef85170bdbb5cf2eea24a79f0f5c2c3975c4b SC4131EC-Hypnogram.edf +83493e1c32d441c9e5ee3de6a024bfb5e7ab9f5f SC4141E0-PSG.edf +511d398f22b9b2b304de27c40740a41584ff6af2 SC4141EU-Hypnogram.edf +63d13828b7ebe0d2ed7f491d2b5520e928b9b55d SC4142E0-PSG.edf +6f123e6fdc90a01b83e694d9744a6d27f3c87b25 SC4142EU-Hypnogram.edf +5a92d49699d4de369d66d9462e91b0dcb3312649 SC4151E0-PSG.edf +37dcbd339c95322d028b3a5466812697041cc373 SC4151EC-Hypnogram.edf +778626489bc4fe2c9137d2d361876d97dce97e5e SC4152E0-PSG.edf +294cdc47cd3d165031f7041c17f18dd013d216cb SC4152EC-Hypnogram.edf +e56ff3aa366fe9a04a0fdfdd4cd862e77e8ac807 SC4161E0-PSG.edf +56711b1bfed292032491f5cce57494629286a131 SC4161EC-Hypnogram.edf +722692f9940f3a1bccb9b4488c4477edf7fb128f SC4162E0-PSG.edf +c85647fb4bc1f382fe46bf9aaf579dc483115885 SC4162EC-Hypnogram.edf +f1a65522cb7d6c71ac47742535a12c88e2019dad SC4171E0-PSG.edf +dd257c8d922f08c2c8ca5236c9bf54da887c68e5 SC4171EU-Hypnogram.edf +572b81bc24c2c9482e6fc7ba9202a7bf253655e1 SC4172E0-PSG.edf +c9a3b590748d7d6c7ad97c62222bd53d8ebaf630 SC4172EC-Hypnogram.edf +23674d20572853eb6d988d24378c52123f66500c SC4181E0-PSG.edf +51fc3df2df7d4da654f3e18ed1b233d0c60cfa80 SC4181EC-Hypnogram.edf +83e8cbe882ba863da9fd3c11393c95b6fec5b7a5 SC4182E0-PSG.edf +43c487955edddb4ee2f60193a097c68c25c5dd4d SC4182EC-Hypnogram.edf +d6da621dbb20dec3494a38c7d2a0363793ac5ebe SC4191E0-PSG.edf +defc7b9368c2d3c4ab4a294757843825a83cdb5d SC4191EP-Hypnogram.edf +941353118732321d0246a1d58d72e903bd2f0d8f SC4192E0-PSG.edf +97b91b3067c5ecde766042fc2cff9e22f8023371 SC4192EV-Hypnogram.edf +38a0be6e45ddd9b1f17d09964a32e005dc5a6519 SC4201E0-PSG.edf +83822f9970d3959ad2e0613492ae39bd0fae6068 SC4201EC-Hypnogram.edf +aa69f5bd47c2ae03c9d38bfe6d0e58408744b885 SC4202E0-PSG.edf +5c5c63016b43421a523d1efcb34247e90aa6318b SC4202EC-Hypnogram.edf +c106ad072dbc975a3742f7eff151219870f0c794 SC4211E0-PSG.edf +9126937ea8a414d6ae9bc4a4194d841a891fa8a8 SC4211EC-Hypnogram.edf +a06ecb3f0a7b2c306f5ae4dbd83685f877cd945b SC4212E0-PSG.edf +a85f178b69a1cda47d11dd1e5394dfdcb58de1d4 SC4212EC-Hypnogram.edf +8733ea022d3778259a436507156cf3360ad8be06 SC4221E0-PSG.edf +b158eda4f81772095c129be77f8e60ec9d81b884 SC4221EJ-Hypnogram.edf +211410fab6381da0dfaef4134d5a05eec935a4ec SC4222E0-PSG.edf +1488fbfbc149499dafa8dafff4f7504053af429f SC4222EC-Hypnogram.edf +d96f1f35b2f77c7de706036c6e4114139e07b307 SC4231E0-PSG.edf +9f6df70676d6cddcf069ceb7f408a7989af99ce2 SC4231EJ-Hypnogram.edf +6b493fa424c1329ea1c13543d08ba82a9f1e85b6 SC4232E0-PSG.edf +d8ca7d694b3c48ab9d983b9cf67e17744c6b50fb SC4232EV-Hypnogram.edf +58719e53fe18d2fc4cb1776ab5d43306beb1325d SC4241E0-PSG.edf +fb1432e303a8f99a2256ce682db95d88772c479f SC4241EC-Hypnogram.edf +5a6277972c5f03572ed99d9ff63fb637945be778 SC4242E0-PSG.edf +bbbf097f4cc6560fc20c903fba2c7055e1549f85 SC4242EA-Hypnogram.edf +7dbc0289707ff70662d367d65de7bec188484d1b SC4251E0-PSG.edf +e38be8134e4a36eb418ca1f06a1fe02b52d0ebf1 SC4251EP-Hypnogram.edf +cb3922910ea03d06c1fc5c8f15b71339dc26bc9d SC4252E0-PSG.edf +4cb7a383736e09125a82ef7e4f17b41130c7ac00 SC4252EU-Hypnogram.edf +b81c9bd1875b33713b5eb56b58f1e120841b507f SC4261F0-PSG.edf +501eda59557bb99d530d01bdad3579f1e1158991 SC4261FM-Hypnogram.edf +c9f9ad7cd751d5be91396886a2b64a7c1de564ee SC4262F0-PSG.edf +7ccd12803c5fc602ac1929ff3afd914b894b9143 SC4262FC-Hypnogram.edf +20994715d34edb26113180ee330ce287dbf57b60 SC4271F0-PSG.edf +26c5c7f3a5c350d3505af2857835ce81252c5990 SC4271FC-Hypnogram.edf +9e79eb465e34b7eb6fe27ae3ce35d28d6693d44b SC4272F0-PSG.edf +956fe4b45d29a8999faf280a6168e332afab6abc SC4272FM-Hypnogram.edf +51811913d7854f95c319076e670d988687ca667c SC4281G0-PSG.edf +d188150831e912081dbeda2695231177200c39f9 SC4281GC-Hypnogram.edf +e9f080a766a9b7a247f228e44e9c4ec67e571c95 SC4282G0-PSG.edf +12d777787dd1975eef9015329fd774b2bfa1d53a SC4282GC-Hypnogram.edf +f81c7574a5e5829e006d0b705bf5208a3349c9c7 SC4291G0-PSG.edf +577c1345f6d070d975db5016048722f78b1b414e SC4291GA-Hypnogram.edf +7416f44a3b149b4ca1fc3e53d546a093a7333bb5 SC4292G0-PSG.edf +6e111a15160a31609761f742315df800b1311b3b SC4292GC-Hypnogram.edf +7818e5a02afa89e913111d91ecd651aa3e786e5d SC4301E0-PSG.edf +d49df84bfea28bb241c09b922cd2dc64f57c5ae5 SC4301EC-Hypnogram.edf +d52859ba6a7ded3364b0d8ef2b722e1d3edda060 SC4302E0-PSG.edf +b3d6f687831ee32f6df1da59f2d568c13f9c09d0 SC4302EV-Hypnogram.edf +b62f5104bddf452f4700c85997e51bec17f0243b SC4311E0-PSG.edf +812c34844e834b97949019741fa7f835d973725d SC4311EC-Hypnogram.edf +b0a9b4922665734773abbaba06e7aab32010b862 SC4312E0-PSG.edf +fca1935a8974eac27803e3125cea177995deca11 SC4312EM-Hypnogram.edf +335381ae310e9f1f053c37763eeee74d7d873471 SC4321E0-PSG.edf +67ba7d3b97354deb31db095e748ea3a4014fae2c SC4321EC-Hypnogram.edf +c9fdcfcce7e603b3289b7417891987fd67f6d921 SC4322E0-PSG.edf +40cf9a6397a52c7deda693ca596e928cc2b9f4e9 SC4322EC-Hypnogram.edf +f37cb4df27286e38c604cae943169ff29b1473fc SC4331F0-PSG.edf +ca943e2b73c6404f929c372ebd817b7b3b71b4dd SC4331FV-Hypnogram.edf +5bce6ea9b2d6c9bfb41065e92bf9cc05a11b5b75 SC4332F0-PSG.edf +e4595b0313d5320b0bffefa43260485e19977e3c SC4332FC-Hypnogram.edf +17de25c8f023fe632aa403a6d9525c1cde8eaef5 SC4341F0-PSG.edf +81ba3c0d8320c9ee306f678b4bc9e6e266165886 SC4341FA-Hypnogram.edf +b659037447a1871f4ba72bbe496cfbe507330530 SC4342F0-PSG.edf +e8e74c0905e89a59022ce0814ca9a050748ec9ae SC4342FA-Hypnogram.edf +631900bef36d359a0f5807a7e1b202f80b0427ac SC4351F0-PSG.edf +a15cdf3973b77198d8276dc505dbb35cb39a9b4a SC4351FA-Hypnogram.edf +325423a85890dcc921253bde7c7027d66f14033e SC4352F0-PSG.edf +1e0583b2a58432c964506ff44752d597753658c9 SC4352FV-Hypnogram.edf +30b90aaf965938d569ea362f66e2afa0c08c7017 SC4362F0-PSG.edf +fb870d50ce3f4d961d8b061a83d21e5467e4ae6c SC4362FC-Hypnogram.edf +0dc56fce13b6317f197d0b17c04f5be4af1c964f SC4371F0-PSG.edf +c19b6cbfdf3a33169ce9b4a5dc94f93b696a21ba SC4371FA-Hypnogram.edf +c024c491dd836ed0169300e7171c276fd14b1c44 SC4372F0-PSG.edf +97b2915a8a343efc7b785998c0532beaea2fbe91 SC4372FC-Hypnogram.edf +6098d2b501b82ca0ddc8893547c6990e204e8ba6 SC4381F0-PSG.edf +fdbf653a4a675843c97d0a76ef5e4cebf5d2dbcb SC4381FC-Hypnogram.edf +40ce0168d5f546fcd445996ab614f43823a7c2b1 SC4382F0-PSG.edf +796f8507254c2d8d345171c077dbd855e112eb47 SC4382FW-Hypnogram.edf +28fd8ad1aee307847e2eb579763ebca18e56f540 SC4401E0-PSG.edf +65b5671a89871351ee3da7ea800aad276a445b2a SC4401EC-Hypnogram.edf +3d4bafa57933cfb20c342e8cc54c15916a621454 SC4402E0-PSG.edf +037efea0fc8a6dfa8f85fa1f2fa6fd9a19f2c830 SC4402EW-Hypnogram.edf +30a533b67fdb2adac6a4e83088a07fe1bbaddb6c SC4411E0-PSG.edf +5df1bf20d4f29b95a2bdde853b2a157dd9530a8a SC4411EJ-Hypnogram.edf +bc8e6ea829f14da5396a4b250394c1b72d6631c3 SC4412E0-PSG.edf +f46b1dcfe4f4e3c9d4d4c8516dab9759f9c1224e SC4412EM-Hypnogram.edf +e8a5d9e0f160ae7bd0b35d75d77b4c872daa30f8 SC4421E0-PSG.edf +d2e34f9bcaac7af23da4448f742ac6ea3c895ed9 SC4421EA-Hypnogram.edf +80f246adffb92a3785f91368a77b0250aa040462 SC4422E0-PSG.edf +709251cc7ae6556544c153caf9dac7f82bba113b SC4422EA-Hypnogram.edf +194ae942cf80764e81b4cdabeed9e5a57916aab3 SC4431E0-PSG.edf +497ad7e671edab6e7adc9d35a6aa45b7fd9a706b SC4431EM-Hypnogram.edf +c45a66d27ea03bf448903fe30f17838e9a0fa0de SC4432E0-PSG.edf +10fe276e215f9406c0ddedaa48651cf480892476 SC4432EM-Hypnogram.edf +e3a09d832cb79b0095d7a311ef1b6ed7c569b79d SC4441E0-PSG.edf +68d4e44ad54069701972df66d8a81b4ca434bf2f SC4441EC-Hypnogram.edf +fe51d45e9f3e64a61fa8a5e5274b2e4951a9de43 SC4442E0-PSG.edf +efc2b86bb796b0143f61667402612dfbb85cbb78 SC4442EV-Hypnogram.edf +315db0f9d91988ddc2b198f89cc22f96190eff71 SC4451F0-PSG.edf +bc1f755c3367e378091c44481948a72fc7a928e5 SC4451FY-Hypnogram.edf +a06350e1c85b61c30c3d7d5dc640121b416fe30d SC4452F0-PSG.edf +0286d52cdf898ed8e3b17bb26b9c50ef512daf4d SC4452FW-Hypnogram.edf +e4295014c6d4474d8f7f7792c2ea088eb9e43e9f SC4461F0-PSG.edf +8980e770e58e5704bd36124f6b6bd8d5e3506e12 SC4461FA-Hypnogram.edf +53b69cb41339bc69144eaa5a5a42c2937f237fc9 SC4462F0-PSG.edf +0c6d3974e140c1e62ed2cadaed395781575af042 SC4462FJ-Hypnogram.edf +05d71b55de4c86791195391b1cec8b35e447922d SC4471F0-PSG.edf +ee235454dbfe947432f3f813c9a6384f6e42d36a SC4471FA-Hypnogram.edf +7a12c0d6f3005998472b128e06dd645a8619dae7 SC4472F0-PSG.edf +d234d5d6c396bf7ef0a2106a59ee8204429aa3c5 SC4472FA-Hypnogram.edf +c15f6a0e1802dcf74ecec41745677a4932375faf SC4481F0-PSG.edf +50fce6396aceaf35d9d7e16175053a3b78f214d0 SC4481FV-Hypnogram.edf +34d71530fd1da925ba20b4c48a07f7b18153e0c7 SC4482F0-PSG.edf +e3c48563e63eed27b071d4a7b37c45a0f9dc7eef SC4482FJ-Hypnogram.edf +23ea1f5f299c6cd99d434f014d7490621dbbc854 SC4491G0-PSG.edf +36c6c8112524c7bc9553db37601b38984946209b SC4491GJ-Hypnogram.edf +02c975bfc0773928095239b80d00ac5a7ea5880f SC4492G0-PSG.edf +3673eaad8396ef0ec36cb4299541c30653b72e1f SC4492GJ-Hypnogram.edf +1c31fc02412029bc7369979b8c9f5956420748f5 SC4501E0-PSG.edf +eb2621c1670a42eb38dfa86a9bc3326818365f3d SC4501EW-Hypnogram.edf +ff9eae25afa73115e2b184a68e3a72a39efd37e6 SC4502E0-PSG.edf +7605a1893701925ea0fdd047926bbd6c7c043875 SC4502EM-Hypnogram.edf +e12eb259c2894d45b8d0b2f0e75810c2de02237d SC4511E0-PSG.edf +e549275e9182b9e36ade5abb721098e235ecb164 SC4511EJ-Hypnogram.edf +53c5d982139d248736f6dd7ff3f97f635647eacd SC4512E0-PSG.edf +e22966c263f6ae7444704881f5249f6fb5dee0c1 SC4512EW-Hypnogram.edf +af70ffdbd3012615923f6a4901e7c0dd3a0fd8ca SC4522E0-PSG.edf +57af3eaed541229dcb2478c6050f0582e020f878 SC4522EM-Hypnogram.edf +71222ac5b7784ed1d3a79ee3e9036431d6eba9bd SC4531E0-PSG.edf +934dbfeb29f4f4db4b61e36fb8ddab4ddbf4ff94 SC4531EM-Hypnogram.edf +2d472fb64da5d05a546f780da876b90ad26208f9 SC4532E0-PSG.edf +708b43e7d43a6f5719f48c11bd6a81b037aabfc4 SC4532EV-Hypnogram.edf +4d3ec2f85149bb10fed1013831c3aa1f58049229 SC4541F0-PSG.edf +a301385e6fbde02c83f2545f17cdf75d594d37ce SC4541FA-Hypnogram.edf +2909f5b0d3fdb89e19d42b406798e9cbb4615bb6 SC4542F0-PSG.edf +9548ed641fb961fa46706339891a9453b731369f SC4542FW-Hypnogram.edf +0bf97e463cbcefb7df48bca712f29dcc74223330 SC4551F0-PSG.edf +e50b44e6b049baaeb528c31563642b2a2b933834 SC4551FC-Hypnogram.edf +dfa0adaae50110bdd0077483c31d57956020fcb9 SC4552F0-PSG.edf +7380403f8d72fa4c30013cd026cc1dad23ac2b3e SC4552FW-Hypnogram.edf +1a9baf1b072ca9d2784a404292169ff3177ea83f SC4561F0-PSG.edf +b31a2dfe652508df46f6afe03ab904c333f7b818 SC4561FJ-Hypnogram.edf +4c7081edf572cadee51d30174cd65aa6c658f5a9 SC4562F0-PSG.edf +676ab92dbc6532f67d672f80337c71f817fd3a6d SC4562FJ-Hypnogram.edf +e67f3bd381ddfb96d584f6c6d6f6762087d6553d SC4571F0-PSG.edf +08ee39eb94d819968512297ca883f9bca046de9c SC4571FV-Hypnogram.edf +deb2aef7a6a4b502c819345a7151ffc2529d4ba7 SC4572F0-PSG.edf +7a38cbe581167dfec27a15935e6d386b228616fa SC4572FC-Hypnogram.edf +16a1edbd6a089386fd7de72aef802182d0a2959d SC4581G0-PSG.edf +bfc729575cfdf5f409be2de47dad4e00d43195bf SC4581GM-Hypnogram.edf +9da93f4c2459dd4fe2e5ee6a171904d4f604cd6e SC4582G0-PSG.edf +acbade13cfae4fc5fbda2d0766feea83d114aa23 SC4582GP-Hypnogram.edf +017793b040df8a860df0e43e3e0a496e2cb3f9c1 SC4591G0-PSG.edf +f3bb949a7f82acb7fd3d8f35e92efee1402a383f SC4591GY-Hypnogram.edf +1e284bddd7952862327c83092db21805e6ab6c38 SC4592G0-PSG.edf +58d1678e9ec9f49c9c6a15031dee26d802026851 SC4592GY-Hypnogram.edf +ece6d6ce09fac6fc521cf3f1b536f1ea2a8a1778 SC4601E0-PSG.edf +8f77b05fe58f43cdfdcdba7cc3d27abcac7d37f2 SC4601EC-Hypnogram.edf +0e50df304ced29651267f43689ce49e063f808d6 SC4602E0-PSG.edf +1c52de92668fe4c89cd5e270e17017ef47880991 SC4602EJ-Hypnogram.edf +2cc6e418c0b7af472aa34d2bbd5ece85bdb6a879 SC4611E0-PSG.edf +f5715ab48f24221c28c1d5c45508c8bb58c912ec SC4611EG-Hypnogram.edf +6593e1af07101fa4c5bce8984296858be17e7d4f SC4612E0-PSG.edf +cedb61bbe7a273b12f45579963d5a84f2ab21811 SC4612EA-Hypnogram.edf +31cd2cae56977c6b872311f2a6e60827748b973d SC4621E0-PSG.edf +7acc5296b33ca4eee8d6577064c8c651ee96e527 SC4621EV-Hypnogram.edf +7a7e226d47dccd959305e3f633686335c8e66557 SC4622E0-PSG.edf +9957c9c9e0c705aac0f7125f411b2531a722601c SC4622EJ-Hypnogram.edf +6dfb32aa4c94968a52d61b90a38573d178669bfb SC4631E0-PSG.edf +48e28f93fc71ffc539776196f9d9d1365415e0b4 SC4631EM-Hypnogram.edf +3baa8081b30cc3dfece9d550289dfc94812530d5 SC4632E0-PSG.edf +cd2765ebdabc66cb4ac2320d02e3b7ab0340ede4 SC4632EA-Hypnogram.edf +0e5d109a929490cbecf59573577a97df07a05cd0 SC4641E0-PSG.edf +7b896dc5b34d71381d8462001dc3e05b145cf48c SC4641EP-Hypnogram.edf +03169b7ee9de83b2e17e9bd0d6274965e9518b37 SC4642E0-PSG.edf +d8a870d26e468a643eaebe3275e5e2912690c0d8 SC4642EP-Hypnogram.edf +f2134a2ad001bc146f3e2d9d76cb7f00f03bbe52 SC4651E0-PSG.edf +fad4311c7e11a9aa9a73a8e48d6fa966db61e71d SC4651EP-Hypnogram.edf +aa66553cb0132634d7d11ffe7fab80aa5119b3d7 SC4652E0-PSG.edf +6ed9c4f66c03e56f86730ddd8986f3600c040d4a SC4652EG-Hypnogram.edf +c6057505d2acf7b08371e266cf0fca1bfeb1e4e1 SC4661E0-PSG.edf +06474e72126d2a00c1968e70730e1deac060f94e SC4661EJ-Hypnogram.edf +24d278194360dc78ebd0cfe940fb4d5f7f93ccbc SC4662E0-PSG.edf +07ca0fbfb6030289a089f84e50d7bbfd043f31ad SC4662EJ-Hypnogram.edf +4357aa9fedf0b53896d41e5dccd7b525f7212177 SC4671G0-PSG.edf +459889157743c434933194446af5168cb145dfcb SC4671GJ-Hypnogram.edf +fd86b31a5c22176e1887e2fac460edce42bd2fdf SC4672G0-PSG.edf +dedb182b8c063cefabf1763eb19cd26d0608017f SC4672GV-Hypnogram.edf +3f60b5ad5e1092e90c38f2072b3c041bd7313550 SC4701E0-PSG.edf +196a388f60ee4aecfa982f89e2db03ff91e906e7 SC4701EC-Hypnogram.edf +a6853fee26b1541f85be7ddc3f42f06ccfe2fcfc SC4702E0-PSG.edf +464f7382ec11703b5bc6512930fdfbb1ab6d030a SC4702EA-Hypnogram.edf +e97d691bfecf770ca4e47289b846886c16ef19fb SC4711E0-PSG.edf +81ec5d0288f36c4368e5f06f21980f99774bf533 SC4711EC-Hypnogram.edf +9b99be6cb45af22bdbead7ea01f1375631c9b365 SC4712E0-PSG.edf +66b121441a45ae19852b7002fd78c2caf236631a SC4712EA-Hypnogram.edf +5c9caa01cc1f8065f87195c9f2dc2aeebf83c03d SC4721E0-PSG.edf +efe62b1e8bac1ea08dbf12374ca6812a6f271d5e SC4721EC-Hypnogram.edf +a473f32a6075e9ed830a8e9a246129e05959e8b7 SC4722E0-PSG.edf +efb2358de27da4219f64f7bfb37912dc9efb0281 SC4722EM-Hypnogram.edf +b03e4a2df4d086778f3426ed7b6c5bf800cbfe92 SC4731E0-PSG.edf +eb3dc65d7184d676a6678a70b18730d11a414588 SC4731EM-Hypnogram.edf +574ff5c0634137f7d5c51eb5f7626b451f1f9b9d SC4732E0-PSG.edf +77a523ca9ef4698885b681bf4e27d28dc5c58424 SC4732EJ-Hypnogram.edf +e6ff7462f4ce401e9aff9b3d9c93f0710bc37678 SC4741E0-PSG.edf +bda4d1ab190f4160ec7a3f4420e30d718f02369e SC4741EA-Hypnogram.edf +2b09f78a2f276061c8758a55585fae7355b38111 SC4742E0-PSG.edf +d4bb4266859c2f92ae8ba96111d59d8ab467f6a0 SC4742EC-Hypnogram.edf +17c356a283b026e507331209512453573bcfebe5 SC4751E0-PSG.edf +d35737e86979127ea01b95dcecea018dd2e44f45 SC4751EC-Hypnogram.edf +b650a49d6e3bb81971e4689c720ee079404857e6 SC4752E0-PSG.edf +3d1c86d8d7ecb6ff79ee12cb950690e929394161 SC4752EM-Hypnogram.edf +8bde3f0d5ab6a592f229dfd7886341b3f800bdb3 SC4761E0-PSG.edf +3dbf15f28a293ac89dcf458d844a8c6443aaf1e6 SC4761EP-Hypnogram.edf +7bdc8eacf1a6502c8f007b08556b7e8b52180d44 SC4762E0-PSG.edf +f6ae10f082a10ead671bfd5fdc50f62c42b9f10d SC4762EG-Hypnogram.edf +ac8c2be9175cb02e00cccb5d5df2acfaf05971cc SC4771G0-PSG.edf +09e80b973502d89368d7823ad4aec7417b735f6e SC4771GC-Hypnogram.edf +eea8671791936358037e5d096491865069989a85 SC4772G0-PSG.edf +25a3b8859091a70ca0cff9ebb777879aa156689e SC4772GC-Hypnogram.edf +0ce00a144dd9bc1b0e20cd30e6501a3852e4dbef SC4801G0-PSG.edf +f82d2b8e45723f2a69f8c30286cc68486b0792a6 SC4801GC-Hypnogram.edf +8959ada929c07945757bd6c9ef0267e7c9427a66 SC4802G0-PSG.edf +41ff2d1118425f5828342c07aa58b9d346755b1a SC4802GV-Hypnogram.edf +dcae3307af54ccf5349945e2fa493464de0a5da2 SC4811G0-PSG.edf +2406ce37b86fc3c7492a3ebe89ae58d15686b33d SC4811GG-Hypnogram.edf +fd93757cf6bcf45854fca960a067612352e05547 SC4812G0-PSG.edf +244b3bbb4987db0a9cef85950d14899ab9a3aec4 SC4812GV-Hypnogram.edf +9008c6ffc917fb90a3d399e768fe3c563a144a2f SC4821G0-PSG.edf +59534244c603cd5c3c27db26ae2f014983ec6c9b SC4821GC-Hypnogram.edf +84f9a60f6b0e7ac33388d8f6492096bcfa60bc18 SC4822G0-PSG.edf +8d14c371bc290658469729addee4461866bb67e2 SC4822GC-Hypnogram.edf +b9d11484126ebff1884034396d6a20c62c0ef48d ST7011J0-PSG.edf +ff28e5e01296cefed49ae0c27cfb3ebc42e710bf ST7011JP-Hypnogram.edf +b97c67d2ec40721349fd6faea32ea7155a11940a ST7012J0-PSG.edf +7a98a0ebba9e5e8fc4aac9ab82849385570d7789 ST7012JP-Hypnogram.edf +552e579d96e6c4ae083c7e1422e11b945ebcdabd ST7021J0-PSG.edf +635b07240047ade50649ff0f72ccde792f464f09 ST7021JM-Hypnogram.edf +ebabfa224599201d9baf91311f78f6410971810f ST7022J0-PSG.edf +228c608743abcc28f8c4946e8394ecf8e6ada89c ST7022JM-Hypnogram.edf +41f8e344b9872d93c8c2f2da283252231584b08f ST7041J0-PSG.edf +422655bae4525d121bd45fead048207be9b34c4b ST7041JO-Hypnogram.edf +229ee3bb4d060332c219c3dc1153732ab5499d57 ST7042J0-PSG.edf +eff297358a0c9d175109ba692ac3f9f4cd2c08ed ST7042JO-Hypnogram.edf +17b186214e8944667571f52098564e377b32d695 ST7051J0-PSG.edf +d7696bd1b891dd85e96e20ea727dcebe49ab6dfd ST7051JA-Hypnogram.edf +489fcb38c07688192d9c0eae5455d95241028ad8 ST7052J0-PSG.edf +64f2718c004e64ab598979da139b90452febc9bf ST7052JA-Hypnogram.edf +9fb2b4ed47a6d4b2f0b60a354123e491e8738b19 ST7061J0-PSG.edf +fd9214d026453fce71efa2975ea732e1c1458f69 ST7061JR-Hypnogram.edf +afc5599194648da5568dafa1a811818e77df4842 ST7062J0-PSG.edf +c2a4abe15f08f230b734a328494ab0d2ae9dc786 ST7062JR-Hypnogram.edf +010a65ad86b79d19c372a421f0e7c975e56278c8 ST7071J0-PSG.edf +bc08c797bb7aaf92de1c869d46c6dd4590939996 ST7071JA-Hypnogram.edf +15c5aa5591e35d60ba25044cdd4b3d748d3c0cfc ST7072J0-PSG.edf +1a7813b7a2389c0346e3844835590b9cb2f40f56 ST7072JA-Hypnogram.edf +cb66a0493d90d0d1204936e3e7c944ed536265e3 ST7081J0-PSG.edf +8259b52c62203b85268d23b3a2d87605fdcfa2a6 ST7081JW-Hypnogram.edf +b1cb29c7a7321b7e628d04a477338c4f62f0c093 ST7082J0-PSG.edf +bc33c3aba61c0fa937ef56d4ce7b1468c80663b5 ST7082JW-Hypnogram.edf +b046dd63d92339914eca0489d8a4c566b69e7723 ST7091J0-PSG.edf +af845641a8118d004bcfa6b597f23517e3a752e9 ST7091JE-Hypnogram.edf +2986f4d64f5118c5e356a2abe6bf86521ffde339 ST7092J0-PSG.edf +ec89bb908ff70e123ffa94bc2c11bb1ce54bcb6a ST7092JE-Hypnogram.edf +5662b560f095b8397303cced87e43d407a0d18f7 ST7101J0-PSG.edf +5919542c566d882fbf947c66f4858ad17199103a ST7101JE-Hypnogram.edf +f697a140f18d1005107fcbb7c81d85a5e8cb6ec6 ST7102J0-PSG.edf +1f05e92c9ca076350f981d0ec75ad720606bacbc ST7102JE-Hypnogram.edf +e2bf9db482f230a56372603d23fb12f5c56062f7 ST7111J0-PSG.edf +5964553fe07cbca302526b2153a2507f7d02fab8 ST7111JE-Hypnogram.edf +d3c7907b9b1e4f087f31bd655548b8673b6ec735 ST7112J0-PSG.edf +e4d8406eaca361d2c5d9953b3c67ed1098dd5925 ST7112JE-Hypnogram.edf +6e90bac48e48f71e5572944a364009eab6ea818d ST7121J0-PSG.edf +a991ed3d8be6d55ee563545077f3d280466a4989 ST7121JE-Hypnogram.edf +ae7426c464296ec0a839ccaa9763e3f2c57f41f1 ST7122J0-PSG.edf +b6c2c21e3cf17b371b31af78c64f28aa5811e36f ST7122JE-Hypnogram.edf +d0d6c83b76f627b067e0daac3c181e3666f8ab08 ST7131J0-PSG.edf +91ee1bd29b156b33e03cb8c324a8fac15ec06674 ST7131JR-Hypnogram.edf +54a50dcc40e3d6677b80c629b2f908339d9a7c3e ST7132J0-PSG.edf +028a5c4ed911d67a17b45f12966b32c46949d374 ST7132JR-Hypnogram.edf +6bf8feeabc2259d15f1f535abda90caacc8d4a86 ST7141J0-PSG.edf +203e78e02a92a9f85f07790398f64c66f248e5cc ST7141JE-Hypnogram.edf +b42eb28089bbdcbf3244dead53fd01d5f5ac3ddf ST7142J0-PSG.edf +1f7cc3a1923dd6a3504c82d76f820555ad0b6a1b ST7142JE-Hypnogram.edf +c0df1253b6509c4b4ed9e1283f26cf206a8c725c ST7151J0-PSG.edf +cfcb0089e22244bc5047f61e72a39735cbdc36cf ST7151JA-Hypnogram.edf +faefa07a1ca180861d6f26d5f35285c009dca21e ST7152J0-PSG.edf +27e9b4527eea33ded9072db3c6626f94a966da58 ST7152JA-Hypnogram.edf +8a4f1c44a17b5d665cc30f1141d003043274ac2b ST7161J0-PSG.edf +5a1ef1d375b01f83264e84db4af58acded68f15e ST7161JM-Hypnogram.edf +66925c8fa9f6da18f8590dcf2a6174cfe46e912d ST7162J0-PSG.edf +18b3d7eb9685ec8131fc0a8f81ba6205122595dc ST7162JM-Hypnogram.edf +67c47cb92de8806c60303a4baa87ca6cf52a2245 ST7171J0-PSG.edf +13c371fc4384751cc4bdd3044c6a0813ea12816e ST7171JA-Hypnogram.edf +a46118a5ca9cfaa62ca11c6a8b079e82877305ef ST7172J0-PSG.edf +8de0f3f59dd27d07f5f6a74216814ced08f104b5 ST7172JA-Hypnogram.edf +501f2f9d9ebe15e6dfc86fda6e90f9a54a39660a ST7181J0-PSG.edf +483aa0b448393d61043c98c204c93d4c60abb6bd ST7181JR-Hypnogram.edf +0eab40d3687a2cf708e48137eab26c0c43b75773 ST7182J0-PSG.edf +50efc607882659f8229db773703f5b973b471ed4 ST7182JR-Hypnogram.edf +b1b10cd45a7c0f91286c6fc3f755e59af483bac1 ST7191J0-PSG.edf +e7fcb89cf0f1484ab114bf40dcf2bf4cd413696b ST7191JR-Hypnogram.edf +e80de913aa41b987a43d94cf8f0106d61e4e883b ST7192J0-PSG.edf +def09a7d469984005b0c8414b7995ae8e269fd15 ST7192JR-Hypnogram.edf +454233ae9e6a948848030c5f4d9e60dfcb0facde ST7201J0-PSG.edf +17a0e8aebb885a960a74343bace57d2ab0b6296a ST7201JO-Hypnogram.edf +1e97e392968415da67432842c952344b6d3cdc8c ST7202J0-PSG.edf +ed26efdb6b2d9e815f2a725970262cb9c15c7b98 ST7202JO-Hypnogram.edf +c6582cfa8fcf6542a688fa8842011a93d86f2c60 ST7211J0-PSG.edf +b8756397056f623674c3b03db808b2c8c64b0a0a ST7211JJ-Hypnogram.edf +389f3920b39b4b9ad4fba6f91198299b7c6f6676 ST7212J0-PSG.edf +e25e47adf0c0f09df542ef061272ed9569fb80ea ST7212JJ-Hypnogram.edf +58315bec82d381dec56bf96924a94014462bb608 ST7221J0-PSG.edf +7656827835362b7b44b296bad83ff6001e14f489 ST7221JA-Hypnogram.edf +4961a08b87416246b8b8186190eca0e96da6a50d ST7222J0-PSG.edf +da840db60086e43a2429fb1322ede5e5976b3cda ST7222JA-Hypnogram.edf +7a850ce4bc6bd14ea072f3a45b002f8015cf2f14 ST7241J0-PSG.edf +bbaac4f2c2f330f70583eb179d855fcf42b4fbff ST7241JO-Hypnogram.edf +5c8bd182bfc9609929094769718b2835fe1099ad ST7242J0-PSG.edf +f70b3dfce2c14f01221a66a4acb522df1affffdb ST7242JO-Hypnogram.edf diff --git a/python/libs/mne/datasets/sleep_physionet/__init__.py b/python/libs/mne/datasets/sleep_physionet/__init__.py new file mode 100644 index 0000000..04536a9 --- /dev/null +++ b/python/libs/mne/datasets/sleep_physionet/__init__.py @@ -0,0 +1 @@ +from . import age, temazepam, _utils diff --git a/python/libs/mne/datasets/sleep_physionet/_utils.py b/python/libs/mne/datasets/sleep_physionet/_utils.py new file mode 100644 index 0000000..85e6088 --- /dev/null +++ b/python/libs/mne/datasets/sleep_physionet/_utils.py @@ -0,0 +1,215 @@ +# -*- coding: utf-8 -*- +# Authors: Alexandre Gramfort +# Joan Massich +# +# License: BSD Style. + +import os +import os.path as op + +import numpy as np + +from ...utils import (verbose, _TempDir, _check_pandas_installed, + _on_missing) +from ..utils import _get_path + +AGE_SLEEP_RECORDS = op.join(op.dirname(__file__), 'age_records.csv') +TEMAZEPAM_SLEEP_RECORDS = op.join(op.dirname(__file__), + 'temazepam_records.csv') + +TEMAZEPAM_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/ST-subjects.xls' # noqa: E501 +TEMAZEPAM_RECORDS_URL_SHA1 = 'f52fffe5c18826a2bd4c5d5cb375bb4a9008c885' + +AGE_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/SC-subjects.xls' # noqa: E501 +AGE_RECORDS_URL_SHA1 = '0ba6650892c5d33a8e2b3f62ce1cc9f30438c54f' + +sha1sums_fname = op.join(op.dirname(__file__), 'SHA1SUMS') + + +def _fetch_one(fname, hashsum, path, force_update, base_url): + import pooch + # Fetch the file + url = base_url + '/' + fname + destination = op.join(path, fname) + if not op.isfile(destination) or force_update: + if op.isfile(destination): + os.remove(destination) + if not op.isdir(op.dirname(destination)): + os.makedirs(op.dirname(destination)) + pooch.retrieve( + url=url, + known_hash=f"sha1:{hashsum}", + path=path, + fname=fname + ) + return destination + + +@verbose +def _data_path(path=None, verbose=None): + """Get path to local copy of EEG Physionet age Polysomnography dataset URL. + + This is a low-level function useful for getting a local copy of a + remote Polysomnography dataset :footcite:`KempEtAl2000` which is available + at PhysioNet :footcite:`GoldbergerEtAl2000`. + + Parameters + ---------- + path : None | str + Location of where to look for the data storing location. + If None, the environment variable or config parameter + ``PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist, the "~/mne_data" + directory is used. If the dataset is not found under the given path, + the data will be automatically downloaded to the specified folder. + %(verbose)s + + Returns + ------- + path : list of Path + Local path to the given data file. This path is contained inside a list + of length one, for compatibility. + + References + ---------- + .. footbibliography:: + """ # noqa: E501 + key = 'PHYSIONET_SLEEP_PATH' + name = 'PHYSIONET_SLEEP' + path = _get_path(path, key, name) + return op.join(path, 'physionet-sleep-data') + + +def _update_sleep_temazepam_records(fname=TEMAZEPAM_SLEEP_RECORDS): + """Help function to download Physionet's temazepam dataset records.""" + import pooch + + pd = _check_pandas_installed() + tmp = _TempDir() + + # Download subjects info. + subjects_fname = op.join(tmp, 'ST-subjects.xls') + pooch.retrieve( + url=TEMAZEPAM_RECORDS_URL, + known_hash=f"sha1:{TEMAZEPAM_RECORDS_URL_SHA1}", + path=tmp, + fname=op.basename(subjects_fname) + ) + + # Load and Massage the checksums. + sha1_df = pd.read_csv(sha1sums_fname, sep=' ', header=None, + names=['sha', 'fname'], engine='python') + select_age_records = (sha1_df.fname.str.startswith('ST') & + sha1_df.fname.str.endswith('edf')) + sha1_df = sha1_df[select_age_records] + sha1_df['id'] = [name[:6] for name in sha1_df.fname] + + # Load and massage the data. + data = pd.read_excel(subjects_fname, header=[0, 1]) + data = data.set_index(('Subject - age - sex', 'Nr')) + data.index.name = 'subject' + data.columns.names = [None, None] + data = (data.set_index([('Subject - age - sex', 'Age'), + ('Subject - age - sex', 'M1/F2')], append=True) + .stack(level=0).reset_index()) + + data = data.rename(columns={('Subject - age - sex', 'Age'): 'age', + ('Subject - age - sex', 'M1/F2'): 'sex', + 'level_3': 'drug'}) + data['id'] = ['ST7{:02d}{:1d}'.format(s, n) + for s, n in zip(data.subject, data['night nr'])] + + data = pd.merge(sha1_df, data, how='outer', on='id') + data['record type'] = (data.fname.str.split('-', expand=True)[1] + .str.split('.', expand=True)[0] + .astype('category')) + + data = data.set_index(['id', 'subject', 'age', 'sex', 'drug', + 'lights off', 'night nr', 'record type']).unstack() + data.columns = [l1 + '_' + l2 for l1, l2 in data.columns] + data = data.reset_index().drop(columns=['id']) + + data['sex'] = (data.sex.astype('category') + .cat.rename_categories({1: 'male', 2: 'female'})) + + data['drug'] = data['drug'].str.split(expand=True)[0] + data['subject_orig'] = data['subject'] + data['subject'] = data.index // 2 # to make sure index is from 0 to 21 + + # Save the data. + data.to_csv(fname, index=False) + + +def _update_sleep_age_records(fname=AGE_SLEEP_RECORDS): + """Help function to download Physionet's age dataset records.""" + import pooch + pd = _check_pandas_installed() + tmp = _TempDir() + + # Download subjects info. + subjects_fname = op.join(tmp, 'SC-subjects.xls') + pooch.retrieve( + url=AGE_RECORDS_URL, + known_hash=f"sha1:{AGE_RECORDS_URL_SHA1}", + path=tmp, + fname=op.basename(subjects_fname) + ) + + # Load and Massage the checksums. + sha1_df = pd.read_csv(sha1sums_fname, sep=' ', header=None, + names=['sha', 'fname'], engine='python') + select_age_records = (sha1_df.fname.str.startswith('SC') & + sha1_df.fname.str.endswith('edf')) + sha1_df = sha1_df[select_age_records] + sha1_df['id'] = [name[:6] for name in sha1_df.fname] + + # Load and massage the data. + data = pd.read_excel(subjects_fname) + data = data.rename(index=str, columns={'sex (F=1)': 'sex', + 'LightsOff': 'lights off'}) + data['sex'] = (data.sex.astype('category') + .cat.rename_categories({1: 'female', 2: 'male'})) + + data['id'] = ['SC4{:02d}{:1d}'.format(s, n) + for s, n in zip(data.subject, data.night)] + + data = data.set_index('id').join(sha1_df.set_index('id')).dropna() + + data['record type'] = (data.fname.str.split('-', expand=True)[1] + .str.split('.', expand=True)[0] + .astype('category')) + + data = data.reset_index().drop(columns=['id']) + data = data[['subject', 'night', 'record type', 'age', 'sex', 'lights off', + 'sha', 'fname']] + + # Save the data. + data.to_csv(fname, index=False) + + +def _check_subjects(subjects, n_subjects, missing=None, on_missing='raise'): + """Check whether subjects are available. + + Parameters + ---------- + subjects : list + Subject numbers to be checked. + n_subjects : int + Number of subjects available. + missing : list | None + Subject numbers that are missing. + on_missing : 'raise' | 'warn' | 'ignore' + What to do if one or several subjects are not available. Valid keys + are 'raise' | 'warn' | 'ignore'. Default is 'error'. If on_missing + is 'warn' it will proceed but warn, if 'ignore' it will proceed + silently. + """ + valid_subjects = np.arange(n_subjects) + if missing is not None: + valid_subjects = np.setdiff1d(valid_subjects, missing) + unknown_subjects = np.setdiff1d(subjects, valid_subjects) + if unknown_subjects.size > 0: + subjects_list = ', '.join([str(s) for s in unknown_subjects]) + msg = (f'This dataset contains subjects 0 to {n_subjects - 1} with ' + f'missing subjects {missing}. Unknown subjects: ' + f'{subjects_list}.') + _on_missing(on_missing, msg) diff --git a/python/libs/mne/datasets/sleep_physionet/age.py b/python/libs/mne/datasets/sleep_physionet/age.py new file mode 100644 index 0000000..2f54403 --- /dev/null +++ b/python/libs/mne/datasets/sleep_physionet/age.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +# Authors: Alexandre Gramfort +# Joan Massich +# +# License: BSD Style. + +import numpy as np + +from ...utils import verbose +from ._utils import _fetch_one, _data_path, _on_missing, AGE_SLEEP_RECORDS +from ._utils import _check_subjects + +data_path = _data_path # expose _data_path(..) as data_path(..) + +BASE_URL = 'https://physionet.org/physiobank/database/sleep-edfx/sleep-cassette/' # noqa: E501 + + +@verbose +def fetch_data(subjects, recording=(1, 2), path=None, force_update=False, + base_url=BASE_URL, on_missing='raise', *, verbose=None): # noqa: D301, E501 + """Get paths to local copies of PhysioNet Polysomnography dataset files. + + This will fetch data from the publicly available subjects from PhysioNet's + study of age effects on sleep in healthy subjects + :footcite:`MourtazaevEtAl1995,GoldbergerEtAl2000`. This + corresponds to a subset of 153 recordings from 37 males and 41 females that + were 25-101 years old at the time of the recordings. There are two night + recordings per subject except for subjects 13, 36 and 52 which have one + record missing each due to missing recording hardware. + + See more details in + `physionet website `_. + + Parameters + ---------- + subjects : list of int + The subjects to use. Can be in the range of 0-82 (inclusive), however + the following subjects are not available: 39, 68, 69, 78 and 79. + recording : list of int + The night recording indices. Valid values are : [1], [2], or [1, 2]. + The following recordings are not available: recording 1 for subject 36 + and 52, and recording 2 for subject 13. + path : None | str + Location of where to look for the PhysioNet data storing location. + If None, the environment variable or config parameter + ``PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist, the "~/mne_data" + directory is used. If the Polysomnography dataset is not found under + the given path, the data will be automatically downloaded to the + specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + base_url : str + The URL root. + on_missing : 'raise' | 'warn' | 'ignore' + What to do if one or several recordings are not available. Valid keys + are 'raise' | 'warn' | 'ignore'. Default is 'error'. If on_missing + is 'warn' it will proceed but warn, if 'ignore' it will proceed + silently. + %(verbose)s + + Returns + ------- + paths : list + List of local data paths of the given type. + + See Also + -------- + mne.datasets.sleep_physionet.temazepam.fetch_data + + Notes + ----- + For example, one could do: + + >>> from mne.datasets import sleep_physionet + >>> sleep_physionet.age.fetch_data(subjects=[0]) # doctest: +SKIP + + This would download data for subject 0 if it isn't there already. + + References + ---------- + .. footbibliography:: + """ # noqa: E501 + records = np.loadtxt(AGE_SLEEP_RECORDS, + skiprows=1, + delimiter=',', + usecols=(0, 1, 2, 6, 7), + dtype={'names': ('subject', 'record', 'type', 'sha', + 'fname'), + 'formats': (' +# Joan Massich +# +# License: BSD Style. + +import numpy as np + +from ...utils import verbose +from ._utils import _fetch_one, _data_path, TEMAZEPAM_SLEEP_RECORDS +from ._utils import _check_subjects + +data_path = _data_path # expose _data_path(..) as data_path(..) + +BASE_URL = 'https://physionet.org/physiobank/database/sleep-edfx/sleep-telemetry/' # noqa: E501 + + +@verbose +def fetch_data(subjects, path=None, force_update=False, base_url=BASE_URL, *, + verbose=None): + """Get paths to local copies of PhysioNet Polysomnography dataset files. + + This will fetch data from the publicly available subjects from PhysioNet's + study of Temazepam effects on sleep :footcite:`KempEtAl2000`. This + corresponds to a set of 22 subjects. Subjects had mild difficulty falling + asleep but were otherwise healthy. + + See more details in the `physionet website + `_ + :footcite:`GoldbergerEtAl2000`. + + Parameters + ---------- + subjects : list of int + The subjects to use. Can be in the range of 0-21 (inclusive). + path : None | str + Location of where to look for the PhysioNet data storing location. + If None, the environment variable or config parameter + ``PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist, the "~/mne_data" + directory is used. If the Polysomnography dataset is not found under + the given path, the data will be automatically downloaded to the + specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + base_url : str + The base URL to download from. + %(verbose)s + + Returns + ------- + paths : list + List of local data paths of the given type. + + See Also + -------- + mne.datasets.sleep_physionet.age.fetch_data + + Notes + ----- + For example, one could do: + + >>> from mne.datasets import sleep_physionet + >>> sleep_physionet.temazepam.fetch_data(subjects=[1]) # doctest: +SKIP + + This would download data for subject 0 if it isn't there already. + + References + ---------- + .. footbibliography:: + """ + records = np.loadtxt(TEMAZEPAM_SLEEP_RECORDS, + skiprows=1, + delimiter=',', + usecols=(0, 3, 6, 7, 8, 9), + dtype={'names': ('subject', 'record', 'hyp sha', + 'psg sha', 'hyp fname', 'psg fname'), + 'formats': (' +# Joan Massich +# +# License: BSD Style. + +import os.path as op +import numpy as np +import pytest + +from numpy.testing import assert_array_equal +import pooch + +from mne.utils import requires_good_network +from mne.utils import requires_pandas, requires_version +from mne.datasets.sleep_physionet import age, temazepam +from mne.datasets.sleep_physionet._utils import _update_sleep_temazepam_records +from mne.datasets.sleep_physionet._utils import _update_sleep_age_records +from mne.datasets.sleep_physionet._utils import AGE_SLEEP_RECORDS +from mne.datasets.sleep_physionet._utils import TEMAZEPAM_SLEEP_RECORDS + + +@pytest.fixture(scope='session') +def physionet_tmpdir(tmp_path_factory): + """Fixture exposing a temporary directory for testing.""" + return str(tmp_path_factory.mktemp('physionet_files')) + + +class _FakeFetch: + + def __init__(self): + self.call_args_list = list() + + def __call__(self, *args, **kwargs): + self.call_args_list.append((args, kwargs)) + + @property + def call_count(self): + return len(self.call_args_list) + + +def _keep_basename_only(path_structure): + return np.vectorize(op.basename)(np.array(path_structure)) + + +def _get_expected_url(name): + base = 'https://physionet.org/physiobank/database/sleep-edfx/' + midle = 'sleep-cassette/' if name.startswith('SC') else 'sleep-telemetry/' + return base + midle + '/' + name + + +def _get_expected_path(base, name): + return op.join(base, name) + + +def _check_mocked_function_calls(mocked_func, call_fname_hash_pairs, + base_path): + # Check mocked_func has been called the right amount of times. + assert mocked_func.call_count == len(call_fname_hash_pairs) + + # Check it has been called with the right parameters in the right + # order. + for idx, current in enumerate(call_fname_hash_pairs): + _, call_kwargs = mocked_func.call_args_list[idx] + hash_type, hash = call_kwargs['known_hash'].split(':') + assert call_kwargs['url'] == _get_expected_url(current['name']) + assert op.join(call_kwargs['path'], call_kwargs['fname']) == \ + _get_expected_path(base_path, current['name']) + assert hash == current['hash'] + assert hash_type == 'sha1' + + +@pytest.mark.timeout(60) +@pytest.mark.xfail(strict=False) +@requires_good_network +@requires_pandas +@requires_version('xlrd', '0.9') +def test_run_update_age_records(tmp_path): + """Test Sleep Physionet URL handling.""" + import pandas as pd + fname = op.join(str(tmp_path), "records.csv") + _update_sleep_age_records(fname) + data = pd.read_csv(fname) + pd.testing.assert_frame_equal(data, pd.read_csv(AGE_SLEEP_RECORDS)) + + +@pytest.mark.parametrize('subject', [39, 68, 69, 78, 79, 83]) +def test_sleep_physionet_age_missing_subjects(physionet_tmpdir, subject, + download_is_error): + """Test handling of missing subjects in Sleep Physionet age fetcher.""" + with pytest.raises( + ValueError, match='This dataset contains subjects 0 to 82'): + age.fetch_data( + subjects=[subject], recording=[1], on_missing='raise', + path=physionet_tmpdir) + with pytest.warns(RuntimeWarning, + match='This dataset contains subjects 0 to 82'): + age.fetch_data( + subjects=[subject], recording=[1], on_missing='warn', + path=physionet_tmpdir) + paths = age.fetch_data( + subjects=[subject], recording=[1], on_missing='ignore', + path=physionet_tmpdir) + assert paths == [] + + +@pytest.mark.parametrize('subject,recording', [(13, 2), (36, 1), (52, 1)]) +def test_sleep_physionet_age_missing_recordings(physionet_tmpdir, subject, + recording, download_is_error): + """Test handling of missing recordings in Sleep Physionet age fetcher.""" + with pytest.raises( + ValueError, match=f'Requested recording {recording} for subject'): + age.fetch_data(subjects=[subject], recording=[recording], + on_missing='raise', path=physionet_tmpdir) + with pytest.warns(RuntimeWarning, + match=f'Requested recording {recording} for subject'): + age.fetch_data(subjects=[subject], recording=[recording], + on_missing='warn', path=physionet_tmpdir) + paths = age.fetch_data(subjects=[subject], recording=[recording], + on_missing='ignore', path=physionet_tmpdir) + assert paths == [] + + +def test_sleep_physionet_age(physionet_tmpdir, monkeypatch, download_is_error): + """Test Sleep Physionet URL handling.""" + # check download_is_error patching + with pytest.raises(AssertionError, match='Test should not download'): + age.fetch_data(subjects=[0], recording=[1], path=physionet_tmpdir) + # then patch + my_func = _FakeFetch() + monkeypatch.setattr(pooch, 'retrieve', my_func) + + paths = age.fetch_data(subjects=[0], recording=[1], path=physionet_tmpdir) + assert_array_equal(_keep_basename_only(paths), + [['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf']]) + + paths = age.fetch_data(subjects=[0, 1], recording=[1], + path=physionet_tmpdir) + assert_array_equal(_keep_basename_only(paths), + [['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf'], + ['SC4011E0-PSG.edf', 'SC4011EH-Hypnogram.edf']]) + + paths = age.fetch_data(subjects=[0], recording=[1, 2], + path=physionet_tmpdir) + assert_array_equal(_keep_basename_only(paths), + [['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf'], + ['SC4002E0-PSG.edf', 'SC4002EC-Hypnogram.edf']]) + + EXPECTED_CALLS = ( + {'name': 'SC4001E0-PSG.edf', + 'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'}, + {'name': 'SC4001EC-Hypnogram.edf', + 'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'}, + {'name': 'SC4001E0-PSG.edf', + 'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'}, + {'name': 'SC4001EC-Hypnogram.edf', + 'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'}, + {'name': 'SC4011E0-PSG.edf', + 'hash': '4d17451f7847355bcab17584de05e7e1df58c660'}, + {'name': 'SC4011EH-Hypnogram.edf', + 'hash': 'd582a3cbe2db481a362af890bc5a2f5ca7c878dc'}, + {'name': 'SC4001E0-PSG.edf', + 'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'}, + {'name': 'SC4001EC-Hypnogram.edf', + 'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'}, + {'name': 'SC4002E0-PSG.edf', + 'hash': 'c6b6d7a8605cc7e7602b6028ee77f6fbf5f7581d'}, + {'name': 'SC4002EC-Hypnogram.edf', + 'hash': '386230188a3552b1fc90bba0fb7476ceaca174b6'}) + base_path = age.data_path(path=physionet_tmpdir) + _check_mocked_function_calls(my_func, EXPECTED_CALLS, base_path) + + +@pytest.mark.xfail(strict=False) +@requires_good_network +@requires_pandas +@requires_version('xlrd', '0.9') +def test_run_update_temazepam_records(tmp_path): + """Test Sleep Physionet URL handling.""" + import pandas as pd + fname = op.join(str(tmp_path), "records.csv") + _update_sleep_temazepam_records(fname) + data = pd.read_csv(fname) + + pd.testing.assert_frame_equal( + data, pd.read_csv(TEMAZEPAM_SLEEP_RECORDS)) + + +def test_sleep_physionet_temazepam(physionet_tmpdir, monkeypatch): + """Test Sleep Physionet URL handling.""" + my_func = _FakeFetch() + monkeypatch.setattr(pooch, 'retrieve', my_func) + + paths = temazepam.fetch_data(subjects=[0], path=physionet_tmpdir) + assert_array_equal(_keep_basename_only(paths), + [['ST7011J0-PSG.edf', 'ST7011JP-Hypnogram.edf']]) + + EXPECTED_CALLS = ( + {'name': 'ST7011J0-PSG.edf', + 'hash': 'b9d11484126ebff1884034396d6a20c62c0ef48d'}, + {'name': 'ST7011JP-Hypnogram.edf', + 'hash': 'ff28e5e01296cefed49ae0c27cfb3ebc42e710bf'}) + base_path = temazepam.data_path(path=physionet_tmpdir) + _check_mocked_function_calls(my_func, EXPECTED_CALLS, base_path) + + with pytest.raises( + ValueError, match='This dataset contains subjects 0 to 21'): + paths = temazepam.fetch_data(subjects=[22], path=physionet_tmpdir) diff --git a/python/libs/mne/datasets/somato/__init__.py b/python/libs/mne/datasets/somato/__init__.py new file mode 100644 index 0000000..4777bbe --- /dev/null +++ b/python/libs/mne/datasets/somato/__init__.py @@ -0,0 +1,3 @@ +"""Somatosensory dataset.""" + +from .somato import data_path, get_version diff --git a/python/libs/mne/datasets/somato/somato.py b/python/libs/mne/datasets/somato/somato.py new file mode 100644 index 0000000..db69ac4 --- /dev/null +++ b/python/libs/mne/datasets/somato/somato.py @@ -0,0 +1,28 @@ +# Authors: Alexandre Gramfort +# Martin Luessi +# Eric Larson +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, _get_version, _version_doc, + _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='somato', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='somato', + conf='MNE_DATASETS_SOMATO_PATH') + + +def get_version(): # noqa: D103 + return _get_version('somato') + + +get_version.__doc__ = _version_doc.format(name='somato') diff --git a/python/libs/mne/datasets/spm_face/__init__.py b/python/libs/mne/datasets/spm_face/__init__.py new file mode 100644 index 0000000..dfe2edd --- /dev/null +++ b/python/libs/mne/datasets/spm_face/__init__.py @@ -0,0 +1,3 @@ +"""SPM face dataset.""" + +from .spm_data import data_path, has_spm_data, get_version, requires_spm_data diff --git a/python/libs/mne/datasets/spm_face/spm_data.py b/python/libs/mne/datasets/spm_face/spm_data.py new file mode 100644 index 0000000..2c71fc1 --- /dev/null +++ b/python/libs/mne/datasets/spm_face/spm_data.py @@ -0,0 +1,46 @@ +# Authors: Denis Engemann +# +# License: BSD Style. + +from functools import partial + +from ...utils import verbose, get_config +from ..utils import (has_dataset, _data_path_doc, _get_version, + _version_doc, _download_mne_dataset) + + +has_spm_data = partial(has_dataset, name='spm') + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='spm', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='spm', + conf='MNE_DATASETS_SPM_DATA_PATH') + + +def get_version(): # noqa: D103 + return _get_version('spm') + + +get_version.__doc__ = _version_doc.format(name='spm') + + +def _skip_spm_data(): + skip_testing = (get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') == + 'true') + skip = skip_testing or not has_spm_data() + return skip + + +def requires_spm_data(func): + """Skip testing data test.""" + import pytest + return pytest.mark.skipif(_skip_spm_data(), + reason='Requires spm dataset')(func) diff --git a/python/libs/mne/datasets/ssvep/__init__.py b/python/libs/mne/datasets/ssvep/__init__.py new file mode 100644 index 0000000..a7a3d1d --- /dev/null +++ b/python/libs/mne/datasets/ssvep/__init__.py @@ -0,0 +1,3 @@ +"""SSVEP dataset.""" + +from .ssvep import data_path, get_version diff --git a/python/libs/mne/datasets/ssvep/ssvep.py b/python/libs/mne/datasets/ssvep/ssvep.py new file mode 100644 index 0000000..d914ec9 --- /dev/null +++ b/python/libs/mne/datasets/ssvep/ssvep.py @@ -0,0 +1,26 @@ +# Authors: Dominik Welke +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_data_path_doc, _get_version, _version_doc, + _download_mne_dataset) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + return _download_mne_dataset( + name='ssvep', processor='unzip', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='ssvep', + conf='MNE_DATASETS_SSVEP_PATH') + + +def get_version(): # noqa: D103 + return _get_version('ssvep') + + +get_version.__doc__ = _version_doc.format(name='ssvep') diff --git a/python/libs/mne/datasets/testing/__init__.py b/python/libs/mne/datasets/testing/__init__.py new file mode 100644 index 0000000..b56c5f3 --- /dev/null +++ b/python/libs/mne/datasets/testing/__init__.py @@ -0,0 +1,4 @@ +"""MNE testing dataset.""" + +from ._testing import (data_path, requires_testing_data, get_version, + _pytest_param, _pytest_mark) diff --git a/python/libs/mne/datasets/testing/_testing.py b/python/libs/mne/datasets/testing/_testing.py new file mode 100644 index 0000000..7332fb5 --- /dev/null +++ b/python/libs/mne/datasets/testing/_testing.py @@ -0,0 +1,68 @@ +# Authors: Alexandre Gramfort +# Martin Luessi +# Eric Larson +# License: BSD Style. + +from functools import partial + +from ...utils import verbose, get_config +from ..utils import (has_dataset, _data_path_doc, _get_version, + _version_doc, _download_mne_dataset) + +has_testing_data = partial(has_dataset, name='testing') + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): # noqa: D103 + # Make sure we don't do something stupid + if download and \ + get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') == 'true': + raise RuntimeError('Cannot download data if skipping is forced') + + return _download_mne_dataset( + name='testing', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='testing', + conf='MNE_DATASETS_TESTING_PATH') + + +def get_version(): # noqa: D103 + return _get_version('testing') + + +get_version.__doc__ = _version_doc.format(name='testing') + + +# Allow forcing of testing dataset skip (for Debian tests) using: +# `make test-no-testing-data` +def _skip_testing_data(): + skip_testing = (get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') == + 'true') + skip = skip_testing or not has_testing_data() + return skip + + +def requires_testing_data(func): + """Skip testing data test.""" + return _pytest_mark()(func) + + +def _pytest_param(*args, **kwargs): + if len(args) == 0: + args = ('testing_data',) + import pytest + # turn anything that uses testing data into an auto-skipper by + # setting params=[testing._pytest_param()], or by parametrizing functions + # with testing._pytest_param(whatever) + kwargs['marks'] = kwargs.get('marks', list()) + [_pytest_mark()] + return pytest.param(*args, **kwargs) + + +def _pytest_mark(): + import pytest + return pytest.mark.skipif( + _skip_testing_data(), reason='Requires testing dataset') diff --git a/python/libs/mne/datasets/tests/__init__.py b/python/libs/mne/datasets/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/datasets/tests/test_datasets.py b/python/libs/mne/datasets/tests/test_datasets.py new file mode 100644 index 0000000..37e1030 --- /dev/null +++ b/python/libs/mne/datasets/tests/test_datasets.py @@ -0,0 +1,280 @@ +from functools import partial +import os +from os import path as op +import re +import shutil +import zipfile + +import pooch +import pytest + +from mne import datasets, read_labels_from_annot, write_labels_to_annot +from mne.datasets import (testing, fetch_infant_template, fetch_phantom, + fetch_dataset) +from mne.datasets._fsaverage.base import _set_montage_coreg_path +from mne.datasets._infant import base as infant_base +from mne.datasets._phantom import base as phantom_base +from mne.datasets.utils import _manifest_check_download, _mne_path + +from mne.utils import (requires_good_network, + get_subjects_dir, ArgvSetter, _pl, use_log_level, + catch_logging, hashfunc) + + +subjects_dir = testing.data_path(download=False) / 'subjects' + + +def test_datasets_basic(tmp_path, monkeypatch): + """Test simple dataset functions.""" + # XXX 'hf_sef' and 'misc' do not conform to these standards + for dname in ('sample', 'somato', 'spm_face', 'testing', 'opm', + 'bst_raw', 'bst_auditory', 'bst_resting', 'multimodal', + 'bst_phantom_ctf', 'bst_phantom_elekta', 'kiloword', + 'mtrf', 'phantom_4dbti', + 'visual_92_categories', 'fieldtrip_cmc'): + if dname.startswith('bst'): + dataset = getattr(datasets.brainstorm, dname) + else: + dataset = getattr(datasets, dname) + if str(dataset.data_path(download=False)) != '.': + assert isinstance(dataset.get_version(), str) + assert datasets.has_dataset(dname) + else: + assert dataset.get_version() is None + assert not datasets.has_dataset(dname) + print('%s: %s' % (dname, datasets.has_dataset(dname))) + tempdir = str(tmp_path) + # Explicitly test one that isn't preset (given the config) + monkeypatch.setenv('MNE_DATASETS_SAMPLE_PATH', tempdir) + dataset = datasets.sample + assert str(dataset.data_path(download=False)) == '.' + assert dataset.get_version() != '' + assert dataset.get_version() is None + # don't let it read from the config file to get the directory, + # force it to look for the default + monkeypatch.setenv('_MNE_FAKE_HOME_DIR', tempdir) + monkeypatch.delenv('SUBJECTS_DIR', raising=False) + assert (str(datasets.utils._get_path(None, 'foo', 'bar')) == + op.join(tempdir, 'mne_data')) + assert get_subjects_dir(None) is None + _set_montage_coreg_path() + sd = get_subjects_dir() + assert sd.endswith('MNE-fsaverage-data') + monkeypatch.setenv('MNE_DATA', str(tmp_path / 'foo')) + with pytest.raises(FileNotFoundError, match='as specified by MNE_DAT'): + testing.data_path(download=False) + + +@requires_good_network +def test_downloads(tmp_path, monkeypatch, capsys): + """Test dataset URL and version handling.""" + # Try actually downloading a dataset + kwargs = dict(path=str(tmp_path), verbose=True) + # XXX we shouldn't need to disable capsys here, but there's a pytest bug + # that we're hitting (https://github.com/pytest-dev/pytest/issues/5997) + # now that we use pooch + with capsys.disabled(): + path = datasets._fake.data_path(update_path=False, **kwargs) + assert op.isdir(path) + assert op.isfile(op.join(path, 'bar')) + assert not datasets.has_dataset('fake') # not in the desired path + assert datasets._fake.get_version() is None + assert datasets.utils._get_version('fake') is None + monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmp_path)) + with pytest.warns(RuntimeWarning, match='non-standard config'): + new_path = datasets._fake.data_path(update_path=True, **kwargs) + assert path == new_path + out, _ = capsys.readouterr() + assert 'Downloading' not in out + # No version: shown as existing but unknown version + assert datasets.has_dataset('fake') + # XXX logic bug, should be "unknown" + assert datasets._fake.get_version() == '0.0' + # With a version but no required one: shown as existing and gives version + fname = tmp_path / 'foo' / 'version.txt' + with open(fname, 'w') as fid: + fid.write('0.1') + assert datasets.has_dataset('fake') + assert datasets._fake.get_version() == '0.1' + datasets._fake.data_path(download=False, **kwargs) + out, _ = capsys.readouterr() + assert 'out of date' not in out + # With the required version: shown as existing with the required version + monkeypatch.setattr(datasets._fetch, '_FAKE_VERSION', '0.1') + assert datasets.has_dataset('fake') + assert datasets._fake.get_version() == '0.1' + datasets._fake.data_path(download=False, **kwargs) + out, _ = capsys.readouterr() + assert 'out of date' not in out + monkeypatch.setattr(datasets._fetch, '_FAKE_VERSION', '0.2') + # With an older version: + # 1. Marked as not actually being present + assert not datasets.has_dataset('fake') + # 2. Will try to update when `data_path` gets called, with logged message + want_msg = 'Correctly trying to download newer version' + + def _error_download(self, fname, downloader, processor): + url = self.get_url(fname) + full_path = self.abspath / fname + assert 'foo.tgz' in url + assert str(tmp_path) in str(full_path) + raise RuntimeError(want_msg) + + monkeypatch.setattr(pooch.Pooch, 'fetch', _error_download) + with pytest.raises(RuntimeError, match=want_msg): + datasets._fake.data_path(**kwargs) + out, _ = capsys.readouterr() + assert re.match(r'.* 0\.1 .*out of date.* 0\.2.*', out, re.MULTILINE), out + + +@pytest.mark.slowtest +@testing.requires_testing_data +@requires_good_network +def test_fetch_parcellations(tmp_path): + """Test fetching parcellations.""" + this_subjects_dir = str(tmp_path) + os.mkdir(op.join(this_subjects_dir, 'fsaverage')) + os.mkdir(op.join(this_subjects_dir, 'fsaverage', 'label')) + os.mkdir(op.join(this_subjects_dir, 'fsaverage', 'surf')) + for hemi in ('lh', 'rh'): + shutil.copyfile( + op.join(subjects_dir, 'fsaverage', 'surf', '%s.white' % hemi), + op.join(this_subjects_dir, 'fsaverage', 'surf', '%s.white' % hemi)) + # speed up by prenteding we have one of them + with open(op.join(this_subjects_dir, 'fsaverage', 'label', + 'lh.aparc_sub.annot'), 'wb'): + pass + datasets.fetch_aparc_sub_parcellation(subjects_dir=this_subjects_dir) + with ArgvSetter(('--accept-hcpmmp-license',)): + datasets.fetch_hcp_mmp_parcellation(subjects_dir=this_subjects_dir) + for hemi in ('lh', 'rh'): + assert op.isfile(op.join(this_subjects_dir, 'fsaverage', 'label', + '%s.aparc_sub.annot' % hemi)) + # test our annot round-trips here + kwargs = dict(subject='fsaverage', hemi='both', sort=False, + subjects_dir=this_subjects_dir) + labels = read_labels_from_annot(parc='HCPMMP1', **kwargs) + write_labels_to_annot( + labels, parc='HCPMMP1_round', + table_name='./left.fsaverage164.label.gii', **kwargs) + orig = op.join(this_subjects_dir, 'fsaverage', 'label', 'lh.HCPMMP1.annot') + first = hashfunc(orig) + new = orig[:-6] + '_round.annot' + second = hashfunc(new) + assert first == second + + +_zip_fnames = ['foo/foo.txt', 'foo/bar.txt', 'foo/baz.txt'] + + +def _fake_zip_fetch(url, path, fname, known_hash): + fname = op.join(path, fname) + with zipfile.ZipFile(fname, 'w') as zipf: + with zipf.open('foo/', 'w'): + pass + for fname in _zip_fnames: + with zipf.open(fname, 'w'): + pass + + +@pytest.mark.parametrize('n_have', range(len(_zip_fnames))) +def test_manifest_check_download(tmp_path, n_have, monkeypatch): + """Test our manifest downloader.""" + monkeypatch.setattr(pooch, 'retrieve', _fake_zip_fetch) + destination = op.join(str(tmp_path), 'empty') + manifest_path = op.join(str(tmp_path), 'manifest.txt') + with open(manifest_path, 'w') as fid: + for fname in _zip_fnames: + fid.write('%s\n' % fname) + assert n_have in range(len(_zip_fnames) + 1) + assert not op.isdir(destination) + if n_have > 0: + os.makedirs(op.join(destination, 'foo')) + assert op.isdir(op.join(destination, 'foo')) + for fname in _zip_fnames: + assert not op.isfile(op.join(destination, fname)) + for fname in _zip_fnames[:n_have]: + with open(op.join(destination, fname), 'w'): + pass + with catch_logging() as log: + with use_log_level(True): + # we mock the pooch.retrieve so these are not used + url = hash_ = '' + _manifest_check_download(manifest_path, destination, url, hash_) + log = log.getvalue() + n_missing = 3 - n_have + assert ('%d file%s missing from' % (n_missing, _pl(n_missing))) in log + for want in ('Extracting missing', 'Successfully '): + if n_missing > 0: + assert want in log + else: + assert want not in log + assert op.isdir(destination) + for fname in _zip_fnames: + assert op.isfile(op.join(destination, fname)) + + +def _fake_mcd(manifest_path, destination, url, hash_, name=None, + fake_files=False): + if name is None: + name = url.split('/')[-1].split('.')[0] + assert name in url + assert name in destination + assert name in manifest_path + assert len(hash_) == 32 + if fake_files: + with open(manifest_path) as fid: + for path in fid: + path = path.strip() + if not path: + continue + fname = op.join(destination, path) + os.makedirs(op.dirname(fname), exist_ok=True) + with open(fname, 'wb'): + pass + + +def test_infant(tmp_path, monkeypatch): + """Test fetch_infant_template.""" + monkeypatch.setattr(infant_base, '_manifest_check_download', _fake_mcd) + fetch_infant_template('12mo', subjects_dir=tmp_path) + with pytest.raises(ValueError, match='Invalid value for'): + fetch_infant_template('0mo', subjects_dir=tmp_path) + + +def test_phantom(tmp_path, monkeypatch): + """Test phantom data downloading.""" + # The Otaniemi file is only ~6MB, so in principle maybe we could test + # an actual download here. But it doesn't seem worth it given that + # CircleCI will at least test the VectorView one, and this file should + # not change often. + monkeypatch.setattr(phantom_base, '_manifest_check_download', + partial(_fake_mcd, name='phantom_otaniemi', + fake_files=True)) + fetch_phantom('otaniemi', subjects_dir=tmp_path) + assert op.isfile(tmp_path / 'phantom_otaniemi' / 'mri' / 'T1.mgz') + + +def test_fetch_uncompressed_file(tmp_path): + """Test downloading an uncompressed file with our fetch function.""" + dataset_dict = dict( + dataset_name='license', + url=('https://raw.githubusercontent.com/mne-tools/mne-python/main/' + 'LICENSE.txt'), + archive_name='LICENSE.foo', + folder_name=op.join(tmp_path, 'foo'), + hash=None) + fetch_dataset(dataset_dict, path=None, force_update=True) + assert (tmp_path / 'foo' / 'LICENSE.foo').is_file() + + +def test_mne_path(): + """Test our Path wrapping.""" + path = _mne_path("") + assert str(path) == '.' + with pytest.deprecated_call(match=r'pathlib\.Path object'): + assert path + 'me' == f'.{op.sep}me' + assert str(path / 'me') == 'me' + assert str('me' / path) == 'me' + with pytest.raises(TypeError, match='can only concatenate str'): + 'me' + path # our paths were absolute, so this should raise an error diff --git a/python/libs/mne/datasets/utils.py b/python/libs/mne/datasets/utils.py new file mode 100644 index 0000000..cba8e18 --- /dev/null +++ b/python/libs/mne/datasets/utils.py @@ -0,0 +1,556 @@ +# Authors: Alexandre Gramfort +# Martin Luessi +# Eric Larson +# Denis Egnemann +# Stefan Appelhoff +# Adam Li +# Daniel McCloy +# +# License: BSD Style. + +from collections import OrderedDict +import os +import os.path as op +from pathlib import WindowsPath, PosixPath +import sys +import zipfile +import tempfile + +import numpy as np + +from .config import _hcp_mmp_license_text, MNE_DATASETS +from ..label import read_labels_from_annot, Label, write_labels_to_annot +from ..utils import (get_config, set_config, logger, _validate_type, warn, + verbose, get_subjects_dir, _pl, _safe_input) +from ..utils.docs import docdict, _docformat + + +_data_path_doc = """Get path to local copy of {name} dataset. + + Parameters + ---------- + path : None | str + Location of where to look for the {name} dataset. + If None, the environment variable or config parameter + ``{conf}`` is used. If it doesn't exist, the + "~/mne_data" directory is used. If the {name} dataset + is not found under the given path, the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the {name} dataset even if a local copy exists. + Default is False. + update_path : bool | None + If True (default), set the ``{conf}`` in mne-python + config to the given path. If None, the user is prompted. + download : bool + If False and the {name} dataset has not been downloaded yet, + it will not be downloaded and the path will be returned as + '' (empty string). This is mostly used for debugging purposes + and can be safely ignored by most users. + %(verbose)s + + Returns + ------- + path : instance of Path + Path to {name} dataset directory. +""" +_data_path_doc_accept = _data_path_doc.split('%(verbose)s') +_data_path_doc_accept[-1] = '%(verbose)s' + _data_path_doc_accept[-1] +_data_path_doc_accept.insert(1, ' %(accept)s') +_data_path_doc_accept = ''.join(_data_path_doc_accept) +_data_path_doc = _docformat(_data_path_doc, docdict) +_data_path_doc_accept = _docformat(_data_path_doc_accept, docdict) + +_version_doc = """Get version of the local {name} dataset. + + Returns + ------- + version : str | None + Version of the {name} local dataset, or None if the dataset + does not exist locally. +""" + + +def _dataset_version(path, name): + """Get the version of the dataset.""" + ver_fname = op.join(path, 'version.txt') + if op.exists(ver_fname): + with open(ver_fname, 'r') as fid: + version = fid.readline().strip() # version is on first line + else: + logger.debug(f'Version file missing: {ver_fname}') + # Sample dataset versioning was introduced after 0.3 + # SPM dataset was introduced with 0.7 + versions = dict(sample='0.7', spm='0.3') + version = versions.get(name, '0.0') + return version + + +def _get_path(path, key, name): + """Get a dataset path.""" + # 1. Input + _validate_type(path, ('path-like', None), path) + if path is not None: + return path + # 2. get_config(key) — unless key is None or "" (special get_config values) + # 3. get_config('MNE_DATA') + path = get_config(key or 'MNE_DATA', get_config('MNE_DATA')) + if path is not None: + if not op.exists(path): + msg = (f"Download location {path} as specified by MNE_DATA does " + f"not exist. Either create this directory manually and try " + f"again, or set MNE_DATA to an existing directory.") + raise FileNotFoundError(msg) + return _mne_path(path) + # 4. ~/mne_data (but use a fake home during testing so we don't + # unnecessarily create ~/mne_data) + logger.info('Using default location ~/mne_data for %s...' % name) + path = op.join(os.getenv('_MNE_FAKE_HOME_DIR', + op.expanduser("~")), 'mne_data') + if not op.exists(path): + logger.info('Creating ~/mne_data') + try: + os.mkdir(path) + except OSError: + raise OSError("User does not have write permissions " + "at '%s', try giving the path as an " + "argument to data_path() where user has " + "write permissions, for ex:data_path" + "('/home/xyz/me2/')" % (path)) + return _mne_path(path) + + +def _do_path_update(path, update_path, key, name): + """Update path.""" + path = op.abspath(path) + identical = get_config(key, '', use_env=False) == path + if not identical: + if update_path is None: + update_path = True + if '--update-dataset-path' in sys.argv: + answer = 'y' + else: + msg = ('Do you want to set the path:\n %s\nas the default ' + '%s dataset path in the mne-python config [y]/n? ' + % (path, name)) + answer = _safe_input(msg, alt='pass update_path=True') + if answer.lower() == 'n': + update_path = False + + if update_path: + set_config(key, str(path), set_env=False) + return path + + +def _download_mne_dataset(name, processor, path, force_update, + update_path, download, accept=False): + """Aux function for downloading internal MNE datasets.""" + import pooch + from mne.datasets._fetch import fetch_dataset + + # import pooch library for handling the dataset downloading + dataset_params = MNE_DATASETS[name] + dataset_params['dataset_name'] = name + config_key = MNE_DATASETS[name]['config_key'] + folder_name = MNE_DATASETS[name]['folder_name'] + + # get download path for specific dataset + path = _get_path(path=path, key=config_key, name=name) + + # instantiate processor that unzips file + if processor == 'nested_untar': + processor_ = pooch.Untar(extract_dir=op.join(path, folder_name)) + elif processor == 'nested_unzip': + processor_ = pooch.Unzip(extract_dir=op.join(path, folder_name)) + else: + processor_ = processor + + # handle case of multiple sub-datasets with different urls + if name == 'visual_92_categories': + dataset_params = [] + for name in ['visual_92_categories_1', 'visual_92_categories_2']: + this_dataset = MNE_DATASETS[name] + this_dataset['dataset_name'] = name + dataset_params.append(this_dataset) + + return fetch_dataset(dataset_params=dataset_params, processor=processor_, + path=path, force_update=force_update, + update_path=update_path, download=download, + accept=accept) + + +def _get_version(name): + """Get a dataset version.""" + from mne.datasets._fetch import fetch_dataset + + if not has_dataset(name): + return None + dataset_params = MNE_DATASETS[name] + dataset_params['dataset_name'] = name + config_key = MNE_DATASETS[name]['config_key'] + + # get download path for specific dataset + path = _get_path(path=None, key=config_key, name=name) + + return fetch_dataset(dataset_params, path=path, + return_version=True)[1] + + +def has_dataset(name): + """Check for presence of a dataset. + + Parameters + ---------- + name : str | dict + The dataset to check. Strings refer to one of the supported datasets + listed :ref:`here `. A :class:`dict` can be used to check for + user-defined datasets (see the Notes section of :func:`fetch_dataset`), + and must contain keys ``dataset_name``, ``archive_name``, ``url``, + ``folder_name``, ``hash``. + + Returns + ------- + has : bool + True if the dataset is present. + """ + from mne.datasets._fetch import fetch_dataset + + if isinstance(name, dict): + dataset_name = name['dataset_name'] + dataset_params = name + else: + dataset_name = 'spm' if name == 'spm_face' else name + dataset_params = MNE_DATASETS[dataset_name] + dataset_params['dataset_name'] = dataset_name + + config_key = dataset_params['config_key'] + + # get download path for specific dataset + path = _get_path(path=None, key=config_key, name=dataset_name) + + dp = fetch_dataset(dataset_params, path=path, download=False, + check_version=False) + if dataset_name.startswith('bst_'): + check = dataset_name + else: + check = MNE_DATASETS[dataset_name]['folder_name'] + return str(dp).endswith(check) + + +@verbose +def _download_all_example_data(verbose=True): + """Download all datasets used in examples and tutorials.""" + # This function is designed primarily to be used by CircleCI, to: + # + # 1. Streamline data downloading + # 2. Make CircleCI fail early (rather than later) if some necessary data + # cannot be retrieved. + # 3. Avoid download statuses and timing biases in rendered examples. + # + # verbose=True by default so we get nice status messages. + # Consider adding datasets from here to CircleCI for PR-auto-build + from . import (sample, testing, misc, spm_face, somato, brainstorm, + eegbci, multimodal, opm, hf_sef, mtrf, fieldtrip_cmc, + kiloword, phantom_4dbti, sleep_physionet, limo, + fnirs_motor, refmeg_noise, fetch_infant_template, + fetch_fsaverage, ssvep, erp_core, epilepsy_ecog, + fetch_phantom) + sample_path = sample.data_path() + testing.data_path() + misc.data_path() + spm_face.data_path() + somato.data_path() + hf_sef.data_path() + multimodal.data_path() + fnirs_motor.data_path() + opm.data_path() + mtrf.data_path() + fieldtrip_cmc.data_path() + kiloword.data_path() + phantom_4dbti.data_path() + refmeg_noise.data_path() + ssvep.data_path() + epilepsy_ecog.data_path() + brainstorm.bst_raw.data_path(accept=True) + brainstorm.bst_auditory.data_path(accept=True) + brainstorm.bst_resting.data_path(accept=True) + phantom_path = brainstorm.bst_phantom_elekta.data_path(accept=True) + fetch_phantom('otaniemi', subjects_dir=phantom_path) + brainstorm.bst_phantom_ctf.data_path(accept=True) + eegbci.load_data(1, [6, 10, 14], update_path=True) + for subj in range(4): + eegbci.load_data(subj + 1, runs=[3], update_path=True) + sleep_physionet.age.fetch_data(subjects=[0, 1], recording=[1]) + # If the user has SUBJECTS_DIR, respect it, if not, set it to the EEG one + # (probably on CircleCI, or otherwise advanced user) + fetch_fsaverage(None) + fetch_infant_template('6mo') + fetch_hcp_mmp_parcellation( + subjects_dir=sample_path / 'subjects', accept=True) + limo.load_data(subject=1, update_path=True) + + erp_core.data_path() + + +@verbose +def fetch_aparc_sub_parcellation(subjects_dir=None, verbose=None): + """Fetch the modified subdivided aparc parcellation. + + This will download and install the subdivided aparc parcellation + :footcite:'KhanEtAl2018' files for + FreeSurfer's fsaverage to the specified directory. + + Parameters + ---------- + subjects_dir : str | None + The subjects directory to use. The file will be placed in + ``subjects_dir + '/fsaverage/label'``. + %(verbose)s + + References + ---------- + .. footbibliography:: + """ + import pooch + + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + destination = op.join(subjects_dir, 'fsaverage', 'label') + urls = dict(lh='https://osf.io/p92yb/download', + rh='https://osf.io/4kxny/download') + hashes = dict(lh='9e4d8d6b90242b7e4b0145353436ef77', + rh='dd6464db8e7762d969fc1d8087cd211b') + for hemi in ('lh', 'rh'): + fname = f'{hemi}.aparc_sub.annot' + fpath = op.join(destination, fname) + if not op.isfile(fpath): + pooch.retrieve( + url=urls[hemi], + known_hash=f"md5:{hashes[hemi]}", + path=destination, + fname=fname + ) + + +@verbose +def fetch_hcp_mmp_parcellation(subjects_dir=None, combine=True, *, + accept=False, verbose=None): + """Fetch the HCP-MMP parcellation. + + This will download and install the HCP-MMP parcellation + :footcite:`GlasserEtAl2016` files for FreeSurfer's fsaverage + :footcite:`Mills2016` to the specified directory. + + Parameters + ---------- + subjects_dir : str | None + The subjects directory to use. The file will be placed in + ``subjects_dir + '/fsaverage/label'``. + combine : bool + If True, also produce the combined/reduced set of 23 labels per + hemisphere as ``HCPMMP1_combined.annot`` + :footcite:`GlasserEtAl2016supp`. + %(accept)s + %(verbose)s + + Notes + ----- + Use of this parcellation is subject to terms of use on the + `HCP-MMP webpage `_. + + References + ---------- + .. footbibliography:: + """ + import pooch + + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + destination = op.join(subjects_dir, 'fsaverage', 'label') + fnames = [op.join(destination, '%s.HCPMMP1.annot' % hemi) + for hemi in ('lh', 'rh')] + urls = dict(lh='https://ndownloader.figshare.com/files/5528816', + rh='https://ndownloader.figshare.com/files/5528819') + hashes = dict(lh='46a102b59b2fb1bb4bd62d51bf02e975', + rh='75e96b331940227bbcb07c1c791c2463') + if not all(op.isfile(fname) for fname in fnames): + if accept or '--accept-hcpmmp-license' in sys.argv: + answer = 'y' + else: + answer = _safe_input('%s\nAgree (y/[n])? ' % _hcp_mmp_license_text) + if answer.lower() != 'y': + raise RuntimeError('You must agree to the license to use this ' + 'dataset') + for hemi, fpath in zip(('lh', 'rh'), fnames): + if not op.isfile(fpath): + fname = op.basename(fpath) + pooch.retrieve( + url=urls[hemi], + known_hash=f"md5:{hashes[hemi]}", + path=destination, + fname=fname + ) + + if combine: + fnames = [op.join(destination, '%s.HCPMMP1_combined.annot' % hemi) + for hemi in ('lh', 'rh')] + if all(op.isfile(fname) for fname in fnames): + return + # otherwise, let's make them + logger.info('Creating combined labels') + groups = OrderedDict([ + ('Primary Visual Cortex (V1)', + ('V1',)), + ('Early Visual Cortex', + ('V2', 'V3', 'V4')), + ('Dorsal Stream Visual Cortex', + ('V3A', 'V3B', 'V6', 'V6A', 'V7', 'IPS1')), + ('Ventral Stream Visual Cortex', + ('V8', 'VVC', 'PIT', 'FFC', 'VMV1', 'VMV2', 'VMV3')), + ('MT+ Complex and Neighboring Visual Areas', + ('V3CD', 'LO1', 'LO2', 'LO3', 'V4t', 'FST', 'MT', 'MST', 'PH')), + ('Somatosensory and Motor Cortex', + ('4', '3a', '3b', '1', '2')), + ('Paracentral Lobular and Mid Cingulate Cortex', + ('24dd', '24dv', '6mp', '6ma', 'SCEF', '5m', '5L', '5mv',)), + ('Premotor Cortex', + ('55b', '6d', '6a', 'FEF', '6v', '6r', 'PEF')), + ('Posterior Opercular Cortex', + ('43', 'FOP1', 'OP4', 'OP1', 'OP2-3', 'PFcm')), + ('Early Auditory Cortex', + ('A1', 'LBelt', 'MBelt', 'PBelt', 'RI')), + ('Auditory Association Cortex', + ('A4', 'A5', 'STSdp', 'STSda', 'STSvp', 'STSva', 'STGa', 'TA2',)), + ('Insular and Frontal Opercular Cortex', + ('52', 'PI', 'Ig', 'PoI1', 'PoI2', 'FOP2', 'FOP3', + 'MI', 'AVI', 'AAIC', 'Pir', 'FOP4', 'FOP5')), + ('Medial Temporal Cortex', + ('H', 'PreS', 'EC', 'PeEc', 'PHA1', 'PHA2', 'PHA3',)), + ('Lateral Temporal Cortex', + ('PHT', 'TE1p', 'TE1m', 'TE1a', 'TE2p', 'TE2a', + 'TGv', 'TGd', 'TF',)), + ('Temporo-Parieto-Occipital Junction', + ('TPOJ1', 'TPOJ2', 'TPOJ3', 'STV', 'PSL',)), + ('Superior Parietal Cortex', + ('LIPv', 'LIPd', 'VIP', 'AIP', 'MIP', + '7PC', '7AL', '7Am', '7PL', '7Pm',)), + ('Inferior Parietal Cortex', + ('PGp', 'PGs', 'PGi', 'PFm', 'PF', 'PFt', 'PFop', + 'IP0', 'IP1', 'IP2',)), + ('Posterior Cingulate Cortex', + ('DVT', 'ProS', 'POS1', 'POS2', 'RSC', 'v23ab', 'd23ab', + '31pv', '31pd', '31a', '23d', '23c', 'PCV', '7m',)), + ('Anterior Cingulate and Medial Prefrontal Cortex', + ('33pr', 'p24pr', 'a24pr', 'p24', 'a24', 'p32pr', 'a32pr', 'd32', + 'p32', 's32', '8BM', '9m', '10v', '10r', '25',)), + ('Orbital and Polar Frontal Cortex', + ('47s', '47m', 'a47r', '11l', '13l', + 'a10p', 'p10p', '10pp', '10d', 'OFC', 'pOFC',)), + ('Inferior Frontal Cortex', + ('44', '45', 'IFJp', 'IFJa', 'IFSp', 'IFSa', '47l', 'p47r',)), + ('DorsoLateral Prefrontal Cortex', + ('8C', '8Av', 'i6-8', 's6-8', 'SFL', '8BL', '9p', '9a', '8Ad', + 'p9-46v', 'a9-46v', '46', '9-46d',)), + ('???', + ('???',))]) + assert len(groups) == 23 + labels_out = list() + + for hemi in ('lh', 'rh'): + labels = read_labels_from_annot('fsaverage', 'HCPMMP1', hemi=hemi, + subjects_dir=subjects_dir, + sort=False) + label_names = [ + '???' if label.name.startswith('???') else + label.name.split('_')[1] for label in labels] + used = np.zeros(len(labels), bool) + for key, want in groups.items(): + assert '\t' not in key + these_labels = [li for li, label_name in enumerate(label_names) + if label_name in want] + assert not used[these_labels].any() + assert len(these_labels) == len(want) + used[these_labels] = True + these_labels = [labels[li] for li in these_labels] + # take a weighted average to get the color + # (here color == task activation) + w = np.array([len(label.vertices) for label in these_labels]) + w = w / float(w.sum()) + color = np.dot(w, [label.color for label in these_labels]) + these_labels = sum(these_labels, + Label([], subject='fsaverage', hemi=hemi)) + these_labels.name = key + these_labels.color = color + labels_out.append(these_labels) + assert used.all() + assert len(labels_out) == 46 + for hemi, side in (('lh', 'left'), ('rh', 'right')): + table_name = './%s.fsaverage164.label.gii' % (side,) + write_labels_to_annot(labels_out, 'fsaverage', 'HCPMMP1_combined', + hemi=hemi, subjects_dir=subjects_dir, + sort=False, table_name=table_name) + + +def _manifest_check_download(manifest_path, destination, url, hash_): + import pooch + + with open(manifest_path, 'r') as fid: + names = [name.strip() for name in fid.readlines()] + manifest_path = op.basename(manifest_path) + need = list() + for name in names: + if not op.isfile(op.join(destination, name)): + need.append(name) + logger.info('%d file%s missing from %s in %s' + % (len(need), _pl(need), manifest_path, destination)) + if len(need) > 0: + with tempfile.TemporaryDirectory() as path: + logger.info('Downloading missing files remotely') + + fname_path = op.join(path, 'temp.zip') + pooch.retrieve( + url=url, + known_hash=f"md5:{hash_}", + path=path, + fname=op.basename(fname_path) + ) + + logger.info('Extracting missing file%s' % (_pl(need),)) + with zipfile.ZipFile(fname_path, 'r') as ff: + members = set(f for f in ff.namelist() if not f.endswith('/')) + missing = sorted(members.symmetric_difference(set(names))) + if len(missing): + raise RuntimeError('Zip file did not have correct names:' + '\n%s' % ('\n'.join(missing))) + for name in need: + ff.extract(name, path=destination) + logger.info('Successfully extracted %d file%s' + % (len(need), _pl(need))) + + +# Adapted from pathlib.Path.__new__ +def _mne_path(path): + klass = MNEWindowsPath if os.name == 'nt' else MNEPosixPath + out = klass._from_parts((path,)) + if not out._flavour.is_supported: + raise NotImplementedError("cannot instantiate %r on your system" + % (klass.__name__,)) + return out + + +class _PathAdd: + + def __add__(self, other): + if isinstance(other, str): + warn('data_path functions now return pathlib.Path objects which ' + 'do not natively support the plus (+) operator, switch to ' + 'using forward slash (/) instead. Support for plus will be ' + 'removed in 1.2.', DeprecationWarning) + return f'{str(self)}{op.sep}{other}' + raise NotImplementedError + + +class MNEWindowsPath(_PathAdd, WindowsPath): # noqa: D101 + pass + + +class MNEPosixPath(_PathAdd, PosixPath): # noqa: D101 + pass diff --git a/python/libs/mne/datasets/visual_92_categories/__init__.py b/python/libs/mne/datasets/visual_92_categories/__init__.py new file mode 100644 index 0000000..a0b26c1 --- /dev/null +++ b/python/libs/mne/datasets/visual_92_categories/__init__.py @@ -0,0 +1,3 @@ +"""MNE visual_92_categories dataset.""" + +from .visual_92_categories import data_path, get_version diff --git a/python/libs/mne/datasets/visual_92_categories/visual_92_categories.py b/python/libs/mne/datasets/visual_92_categories/visual_92_categories.py new file mode 100644 index 0000000..df687aa --- /dev/null +++ b/python/libs/mne/datasets/visual_92_categories/visual_92_categories.py @@ -0,0 +1,60 @@ +# License: BSD Style. + +from ...utils import verbose +from ..utils import (_download_mne_dataset, _data_path_doc, _get_version, + _version_doc) + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, *, verbose=None): + """ + Get path to local copy of visual_92_categories dataset. + + .. note:: The dataset contains four fif-files, the trigger files and the T1 + mri image. This dataset is rather big in size (more than 5 GB). + + Parameters + ---------- + path : None | str + Location of where to look for the visual_92_categories data storing + location. If None, the environment variable or config parameter + MNE_DATASETS_VISUAL_92_CATEGORIES_PATH is used. If it doesn't exist, + the "mne-python/examples" directory is used. If the + visual_92_categories dataset is not found under the given path (e.g., + as "mne-python/examples/MNE-visual_92_categories-data"), the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_VISUAL_92_CATEGORIES_PATH in mne-python + config to the given path. If None, the user is prompted. + %(verbose)s + + Returns + ------- + path : instance of Path + Local path to the given data file. + + Notes + ----- + The visual_92_categories dataset is documented in the following publication + Radoslaw M. Cichy, Dimitrios Pantazis, Aude Oliva (2014) Resolving + human object recognition in space and time. doi: 10.1038/NN.3635 + """ + return _download_mne_dataset( + name='visual_92_categories', processor='untar', path=path, + force_update=force_update, update_path=update_path, + download=download) + + +data_path.__doc__ = _data_path_doc.format( + name='visual_92_categories', conf='MNE_DATASETS_VISUAL_92_CATEGORIES_PATH') + + +def get_version(): + """Get dataset version.""" + return _get_version('visual_92_categories') + + +get_version.__doc__ = _version_doc.format(name='visual_92_categories') diff --git a/python/libs/mne/decoding/__init__.py b/python/libs/mne/decoding/__init__.py new file mode 100644 index 0000000..2b01362 --- /dev/null +++ b/python/libs/mne/decoding/__init__.py @@ -0,0 +1,14 @@ +"""Decoding and encoding, including machine learning and receptive fields.""" + +from .transformer import (PSDEstimator, Vectorizer, + UnsupervisedSpatialFilter, TemporalFilter, + Scaler, FilterEstimator) +from .mixin import TransformerMixin +from .base import BaseEstimator, LinearModel, get_coef, cross_val_multiscore +from .csp import CSP, SPoC +from .ssd import SSD +from .ems import compute_ems, EMS +from .time_frequency import TimeFrequency +from .receptive_field import ReceptiveField +from .time_delaying_ridge import TimeDelayingRidge +from .search_light import SlidingEstimator, GeneralizingEstimator diff --git a/python/libs/mne/decoding/base.py b/python/libs/mne/decoding/base.py new file mode 100644 index 0000000..85331da --- /dev/null +++ b/python/libs/mne/decoding/base.py @@ -0,0 +1,538 @@ +"""Base class copy from sklearn.base.""" +# Authors: Gael Varoquaux +# Romain Trachel +# Alexandre Gramfort +# Jean-Remi King +# +# License: BSD-3-Clause + +import numpy as np +import time +import numbers +from ..parallel import parallel_func +from ..fixes import BaseEstimator, is_classifier, _get_check_scoring +from ..utils import logger, warn, fill_doc + + +class LinearModel(BaseEstimator): + """Compute and store patterns from linear models. + + The linear model coefficients (filters) are used to extract discriminant + neural sources from the measured data. This class computes the + corresponding patterns of these linear filters to make them more + interpretable :footcite:`HaufeEtAl2014`. + + Parameters + ---------- + model : object | None + A linear model from scikit-learn with a fit method + that updates a ``coef_`` attribute. + If None the model will be LogisticRegression. + + Attributes + ---------- + filters_ : ndarray, shape ([n_targets], n_features) + If fit, the filters used to decompose the data. + patterns_ : ndarray, shape ([n_targets], n_features) + If fit, the patterns used to restore M/EEG signals. + + See Also + -------- + CSP + mne.preprocessing.ICA + mne.preprocessing.Xdawn + + Notes + ----- + .. versionadded:: 0.10 + + References + ---------- + .. footbibliography:: + """ + + def __init__(self, model=None): # noqa: D102 + if model is None: + from sklearn.linear_model import LogisticRegression + model = LogisticRegression(solver='liblinear') + + self.model = model + self._estimator_type = getattr(model, "_estimator_type", None) + + def fit(self, X, y, **fit_params): + """Estimate the coefficients of the linear model. + + Save the coefficients in the attribute ``filters_`` and + computes the attribute ``patterns_``. + + Parameters + ---------- + X : array, shape (n_samples, n_features) + The training input samples to estimate the linear coefficients. + y : array, shape (n_samples, [n_targets]) + The target values. + **fit_params : dict of string -> object + Parameters to pass to the fit method of the estimator. + + Returns + ------- + self : instance of LinearModel + Returns the modified instance. + """ + X, y = np.asarray(X), np.asarray(y) + if X.ndim != 2: + raise ValueError('LinearModel only accepts 2-dimensional X, got ' + '%s instead.' % (X.shape,)) + if y.ndim > 2: + raise ValueError('LinearModel only accepts up to 2-dimensional y, ' + 'got %s instead.' % (y.shape,)) + + # fit the Model + self.model.fit(X, y, **fit_params) + + # Computes patterns using Haufe's trick: A = Cov_X . W . Precision_Y + + inv_Y = 1. + X = X - X.mean(0, keepdims=True) + if y.ndim == 2 and y.shape[1] != 1: + y = y - y.mean(0, keepdims=True) + inv_Y = np.linalg.pinv(np.cov(y.T)) + self.patterns_ = np.cov(X.T).dot(self.filters_.T.dot(inv_Y)).T + + return self + + @property + def filters_(self): + if hasattr(self.model, 'coef_'): + # Standard Linear Model + filters = self.model.coef_ + elif hasattr(self.model.best_estimator_, 'coef_'): + # Linear Model with GridSearchCV + filters = self.model.best_estimator_.coef_ + else: + raise ValueError('model does not have a `coef_` attribute.') + if filters.ndim == 2 and filters.shape[0] == 1: + filters = filters[0] + return filters + + def transform(self, X): + """Transform the data using the linear model. + + Parameters + ---------- + X : array, shape (n_samples, n_features) + The data to transform. + + Returns + ------- + y_pred : array, shape (n_samples,) + The predicted targets. + """ + return self.model.transform(X) + + def fit_transform(self, X, y): + """Fit the data and transform it using the linear model. + + Parameters + ---------- + X : array, shape (n_samples, n_features) + The training input samples to estimate the linear coefficients. + y : array, shape (n_samples,) + The target values. + + Returns + ------- + y_pred : array, shape (n_samples,) + The predicted targets. + """ + return self.fit(X, y).transform(X) + + def predict(self, X): + """Compute predictions of y from X. + + Parameters + ---------- + X : array, shape (n_samples, n_features) + The data used to compute the predictions. + + Returns + ------- + y_pred : array, shape (n_samples,) + The predictions. + """ + return self.model.predict(X) + + def predict_proba(self, X): + """Compute probabilistic predictions of y from X. + + Parameters + ---------- + X : array, shape (n_samples, n_features) + The data used to compute the predictions. + + Returns + ------- + y_pred : array, shape (n_samples, n_classes) + The probabilities. + """ + return self.model.predict_proba(X) + + def decision_function(self, X): + """Compute distance from the decision function of y from X. + + Parameters + ---------- + X : array, shape (n_samples, n_features) + The data used to compute the predictions. + + Returns + ------- + y_pred : array, shape (n_samples, n_classes) + The distances. + """ + return self.model.decision_function(X) + + def score(self, X, y): + """Score the linear model computed on the given test data. + + Parameters + ---------- + X : array, shape (n_samples, n_features) + The data to transform. + y : array, shape (n_samples,) + The target values. + + Returns + ------- + score : float + Score of the linear model. + """ + return self.model.score(X, y) + + +def _set_cv(cv, estimator=None, X=None, y=None): + """Set the default CV depending on whether clf is classifier/regressor.""" + # Detect whether classification or regression + if estimator in ['classifier', 'regressor']: + est_is_classifier = estimator == 'classifier' + else: + est_is_classifier = is_classifier(estimator) + # Setup CV + from sklearn import model_selection as models + from sklearn.model_selection import (check_cv, StratifiedKFold, KFold) + if isinstance(cv, (int, np.int64)): + XFold = StratifiedKFold if est_is_classifier else KFold + cv = XFold(n_splits=cv) + elif isinstance(cv, str): + if not hasattr(models, cv): + raise ValueError('Unknown cross-validation') + cv = getattr(models, cv) + cv = cv() + cv = check_cv(cv=cv, y=y, classifier=est_is_classifier) + + # Extract train and test set to retrieve them at predict time + cv_splits = [(train, test) for train, test in + cv.split(X=np.zeros_like(y), y=y)] + + if not np.all([len(train) for train, _ in cv_splits]): + raise ValueError('Some folds do not have any train epochs.') + + return cv, cv_splits + + +def _check_estimator(estimator, get_params=True): + """Check whether an object has the methods required by sklearn.""" + valid_methods = ('predict', 'transform', 'predict_proba', + 'decision_function') + if ( + (not hasattr(estimator, 'fit')) or + (not any(hasattr(estimator, method) for method in valid_methods)) + ): + raise ValueError('estimator must be a scikit-learn transformer or ' + 'an estimator with the fit and a predict-like (e.g. ' + 'predict_proba) or a transform method.') + + if get_params and not hasattr(estimator, 'get_params'): + raise ValueError('estimator must be a scikit-learn transformer or an ' + 'estimator with the get_params method that allows ' + 'cloning.') + + +def _get_inverse_funcs(estimator, terminal=True): + """Retrieve the inverse functions of an pipeline or an estimator.""" + inverse_func = [False] + if hasattr(estimator, 'steps'): + # if pipeline, retrieve all steps by nesting + inverse_func = list() + for _, est in estimator.steps: + inverse_func.extend(_get_inverse_funcs(est, terminal=False)) + elif hasattr(estimator, 'inverse_transform'): + # if not pipeline attempt to retrieve inverse function + inverse_func = [estimator.inverse_transform] + + # If terminal node, check that that the last estimator is a classifier, + # and remove it from the transformers. + if terminal: + last_is_estimator = inverse_func[-1] is False + all_invertible = not(False in inverse_func[:-1]) + if last_is_estimator and all_invertible: + # keep all inverse transformation and remove last estimation + inverse_func = inverse_func[:-1] + else: + inverse_func = list() + + return inverse_func + + +def get_coef(estimator, attr='filters_', inverse_transform=False): + """Retrieve the coefficients of an estimator ending with a Linear Model. + + This is typically useful to retrieve "spatial filters" or "spatial + patterns" of decoding models :footcite:`HaufeEtAl2014`. + + Parameters + ---------- + estimator : object | None + An estimator from scikit-learn. + attr : str + The name of the coefficient attribute to retrieve, typically + ``'filters_'`` (default) or ``'patterns_'``. + inverse_transform : bool + If True, returns the coefficients after inverse transforming them with + the transformer steps of the estimator. + + Returns + ------- + coef : array + The coefficients. + + References + ---------- + .. footbibliography:: + """ + # Get the coefficients of the last estimator in case of nested pipeline + est = estimator + while hasattr(est, 'steps'): + est = est.steps[-1][1] + + squeeze_first_dim = False + + # If SlidingEstimator, loop across estimators + if hasattr(est, 'estimators_'): + coef = list() + for this_est in est.estimators_: + coef.append(get_coef(this_est, attr, inverse_transform)) + coef = np.transpose(coef) + coef = coef[np.newaxis] # fake a sample dimension + squeeze_first_dim = True + elif not hasattr(est, attr): + raise ValueError('This estimator does not have a %s attribute:\n%s' + % (attr, est)) + else: + coef = getattr(est, attr) + + if coef.ndim == 1: + coef = coef[np.newaxis] + squeeze_first_dim = True + + # inverse pattern e.g. to get back physical units + if inverse_transform: + if not hasattr(estimator, 'steps') and not hasattr(est, 'estimators_'): + raise ValueError('inverse_transform can only be applied onto ' + 'pipeline estimators.') + # The inverse_transform parameter will call this method on any + # estimator contained in the pipeline, in reverse order. + for inverse_func in _get_inverse_funcs(estimator)[::-1]: + coef = inverse_func(coef) + + if squeeze_first_dim: + coef = coef[0] + + return coef + + +@fill_doc +def cross_val_multiscore(estimator, X, y=None, groups=None, scoring=None, + cv=None, n_jobs=1, verbose=0, fit_params=None, + pre_dispatch='2*n_jobs'): + """Evaluate a score by cross-validation. + + Parameters + ---------- + estimator : instance of sklearn.base.BaseEstimator + The object to use to fit the data. + Must implement the 'fit' method. + X : array-like, shape (n_samples, n_dimensional_features,) + The data to fit. Can be, for example a list, or an array at least 2d. + y : array-like, shape (n_samples, n_targets,) + The target variable to try to predict in the case of + supervised learning. + groups : array-like, with shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + scoring : str, callable | None + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + Note that when using an estimator which inherently returns + multidimensional output - in particular, SlidingEstimator + or GeneralizingEstimator - you should set the scorer + there, not here. + cv : int, cross-validation generator | iterable + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds in a ``(Stratified)KFold``, + - An object to be used as a cross-validation generator. + - An iterable yielding train, test splits. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, + :class:`sklearn.model_selection.StratifiedKFold` is used. In all + other cases, :class:`sklearn.model_selection.KFold` is used. + %(n_jobs)s + verbose : int, optional + The verbosity level. + fit_params : dict, optional + Parameters to pass to the fit method of the estimator. + pre_dispatch : int, or str, optional + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - None, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + - An int, giving the exact number of total jobs that are + spawned + - A string, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + Returns + ------- + scores : array of float, shape (n_splits,) | shape (n_splits, n_scores) + Array of scores of the estimator for each run of the cross validation. + """ + # This code is copied from sklearn + + from sklearn.base import clone + from sklearn.utils import indexable + from sklearn.model_selection._split import check_cv + check_scoring = _get_check_scoring() + + X, y, groups = indexable(X, y, groups) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + cv_iter = list(cv.split(X, y, groups)) + scorer = check_scoring(estimator, scoring=scoring) + # We clone the estimator to make sure that all the folds are + # independent, and that it is pickle-able. + # Note: this parallelization is implemented using MNE Parallel + parallel, p_func, n_jobs = parallel_func(_fit_and_score, n_jobs, + pre_dispatch=pre_dispatch) + scores = parallel(p_func(clone(estimator), X, y, scorer, train, test, + 0, None, fit_params) + for train, test in cv_iter) + return np.array(scores)[:, 0, ...] # flatten over joblib output. + + +def _fit_and_score(estimator, X, y, scorer, train, test, verbose, + parameters, fit_params, return_train_score=False, + return_parameters=False, return_n_test_samples=False, + return_times=False, error_score='raise'): + """Fit estimator and compute scores for a given dataset split.""" + # This code is adapted from sklearn + from ..fixes import _check_fit_params + from sklearn.utils.metaestimators import _safe_split + from sklearn.utils.validation import _num_samples + + if verbose > 1: + if parameters is None: + msg = '' + else: + msg = '%s' % (', '.join('%s=%s' % (k, v) + for k, v in parameters.items())) + print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) + + # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} + fit_params = _check_fit_params(X, fit_params, train) + + if parameters is not None: + estimator.set_params(**parameters) + + start_time = time.time() + + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + + try: + if y_train is None: + estimator.fit(X_train, **fit_params) + else: + estimator.fit(X_train, y_train, **fit_params) + + except Exception as e: + # Note fit time as time until error + fit_time = time.time() - start_time + score_time = 0.0 + if error_score == 'raise': + raise + elif isinstance(error_score, numbers.Number): + test_score = error_score + if return_train_score: + train_score = error_score + warn("Classifier fit failed. The score on this train-test" + " partition for these parameters will be set to %f. " + "Details: \n%r" % (error_score, e)) + else: + raise ValueError("error_score must be the string 'raise' or a" + " numeric value. (Hint: if using 'raise', please" + " make sure that it has been spelled correctly.)") + + else: + fit_time = time.time() - start_time + test_score = _score(estimator, X_test, y_test, scorer) + score_time = time.time() - start_time - fit_time + if return_train_score: + train_score = _score(estimator, X_train, y_train, scorer) + + if verbose > 2: + msg += ", score=%f" % test_score + if verbose > 1: + total_time = score_time + fit_time + end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time)) + print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) + + ret = [train_score, test_score] if return_train_score else [test_score] + + if return_n_test_samples: + ret.append(_num_samples(X_test)) + if return_times: + ret.extend([fit_time, score_time]) + if return_parameters: + ret.append(parameters) + return ret + + +def _score(estimator, X_test, y_test, scorer): + """Compute the score of an estimator on a given test set. + + This code is the same as sklearn.model_selection._validation._score + but accepts to output arrays instead of floats. + """ + if y_test is None: + score = scorer(estimator, X_test) + else: + score = scorer(estimator, X_test, y_test) + if hasattr(score, 'item'): + try: + # e.g. unwrap memmapped scalars + score = score.item() + except ValueError: + # non-scalar? + pass + return score diff --git a/python/libs/mne/decoding/csp.py b/python/libs/mne/decoding/csp.py new file mode 100644 index 0000000..b7f1305 --- /dev/null +++ b/python/libs/mne/decoding/csp.py @@ -0,0 +1,829 @@ +# -*- coding: utf-8 -*- +# Authors: Romain Trachel +# Alexandre Gramfort +# Alexandre Barachant +# Clemens Brunner +# Jean-Remi King +# +# License: BSD-3-Clause + +import copy as cp + +import numpy as np + +from .base import BaseEstimator +from .mixin import TransformerMixin +from ..cov import _regularized_covariance +from ..fixes import pinv +from ..utils import fill_doc, _check_option, _validate_type, copy_doc + + +@fill_doc +class CSP(TransformerMixin, BaseEstimator): + """M/EEG signal decomposition using the Common Spatial Patterns (CSP). + + This class can be used as a supervised decomposition to estimate spatial + filters for feature extraction. CSP in the context of EEG was first + described in :footcite:`KolesEtAl1990`; a comprehensive tutorial on CSP can + be found in :footcite:`BlankertzEtAl2008`. Multi-class solving is + implemented from :footcite:`Grosse-WentrupBuss2008`. + + Parameters + ---------- + n_components : int (default 4) + The number of components to decompose M/EEG signals. This number should + be set by cross-validation. + reg : float | str | None (default None) + If not None (same as ``'empirical'``, default), allow regularization + for covariance estimation. If float (between 0 and 1), shrinkage is + used. For str values, ``reg`` will be passed as ``method`` to + :func:`mne.compute_covariance`. + log : None | bool (default None) + If ``transform_into`` equals ``'average_power'`` and ``log`` is None or + True, then apply a log transform to standardize features, else features + are z-scored. If ``transform_into`` is ``'csp_space'``, ``log`` must be + None. + cov_est : 'concat' | 'epoch' (default 'concat') + If ``'concat'``, covariance matrices are estimated on concatenated + epochs for each class. If ``'epoch'``, covariance matrices are + estimated on each epoch separately and then averaged over each class. + transform_into : 'average_power' | 'csp_space' (default 'average_power') + If 'average_power' then ``self.transform`` will return the average + power of each spatial filter. If ``'csp_space'``, ``self.transform`` + will return the data in CSP space. + norm_trace : bool (default False) + Normalize class covariance by its trace. Trace normalization is a step + of the original CSP algorithm :footcite:`KolesEtAl1990` to eliminate + magnitude variations in the EEG between individuals. It is not applied + in more recent work :footcite:`BlankertzEtAl2008`, + :footcite:`Grosse-WentrupBuss2008` and can have a negative impact on + pattern order. + cov_method_params : dict | None + Parameters to pass to :func:`mne.compute_covariance`. + + .. versionadded:: 0.16 + %(rank_none)s + + .. versionadded:: 0.17 + component_order : 'mutual_info' | 'alternate' (default 'mutual_info') + If ``'mutual_info'`` order components by decreasing mutual information + (in the two-class case this uses a simplification which orders + components by decreasing absolute deviation of the eigenvalues from 0.5 + :footcite:`BarachantEtAl2010`). For the two-class case, ``'alternate'`` + orders components by starting with the largest eigenvalue, followed by + the smallest, the second-to-largest, the second-to-smallest, and so on + :footcite:`BlankertzEtAl2008`. + + .. versionadded:: 0.21 + + Attributes + ---------- + filters_ : ndarray, shape (n_channels, n_channels) + If fit, the CSP components used to decompose the data, else None. + patterns_ : ndarray, shape (n_channels, n_channels) + If fit, the CSP patterns used to restore M/EEG signals, else None. + mean_ : ndarray, shape (n_components,) + If fit, the mean squared power for each component. + std_ : ndarray, shape (n_components,) + If fit, the std squared power for each component. + + See Also + -------- + mne.preprocessing.Xdawn, SPoC + + References + ---------- + .. footbibliography:: + """ + + def __init__(self, n_components=4, reg=None, log=None, cov_est='concat', + transform_into='average_power', norm_trace=False, + cov_method_params=None, rank=None, + component_order='mutual_info'): + # Init default CSP + if not isinstance(n_components, int): + raise ValueError('n_components must be an integer.') + self.n_components = n_components + self.rank = rank + self.reg = reg + + # Init default cov_est + if not (cov_est == "concat" or cov_est == "epoch"): + raise ValueError("unknown covariance estimation method") + self.cov_est = cov_est + + # Init default transform_into + self.transform_into = _check_option('transform_into', transform_into, + ['average_power', 'csp_space']) + + # Init default log + if transform_into == 'average_power': + if log is not None and not isinstance(log, bool): + raise ValueError('log must be a boolean if transform_into == ' + '"average_power".') + else: + if log is not None: + raise ValueError('log must be a None if transform_into == ' + '"csp_space".') + self.log = log + + _validate_type(norm_trace, bool, 'norm_trace') + self.norm_trace = norm_trace + self.cov_method_params = cov_method_params + self.component_order = _check_option('component_order', + component_order, + ('mutual_info', 'alternate')) + + def _check_Xy(self, X, y=None): + """Check input data.""" + if not isinstance(X, np.ndarray): + raise ValueError("X should be of type ndarray (got %s)." + % type(X)) + if y is not None: + if len(X) != len(y) or len(y) < 1: + raise ValueError('X and y must have the same length.') + if X.ndim < 3: + raise ValueError('X must have at least 3 dimensions.') + + def fit(self, X, y): + """Estimate the CSP decomposition on epochs. + + Parameters + ---------- + X : ndarray, shape (n_epochs, n_channels, n_times) + The data on which to estimate the CSP. + y : array, shape (n_epochs,) + The class for each epoch. + + Returns + ------- + self : instance of CSP + Returns the modified instance. + """ + self._check_Xy(X, y) + + self._classes = np.unique(y) + n_classes = len(self._classes) + if n_classes < 2: + raise ValueError("n_classes must be >= 2.") + if n_classes > 2 and self.component_order == 'alternate': + raise ValueError("component_order='alternate' requires two " + "classes, but data contains {} classes; use " + "component_order='mutual_info' " + "instead.".format(n_classes)) + + covs, sample_weights = self._compute_covariance_matrices(X, y) + eigen_vectors, eigen_values = self._decompose_covs(covs, + sample_weights) + ix = self._order_components(covs, sample_weights, eigen_vectors, + eigen_values, self.component_order) + + eigen_vectors = eigen_vectors[:, ix] + + self.filters_ = eigen_vectors.T + self.patterns_ = pinv(eigen_vectors) + + pick_filters = self.filters_[:self.n_components] + X = np.asarray([np.dot(pick_filters, epoch) for epoch in X]) + + # compute features (mean power) + X = (X ** 2).mean(axis=2) + + # To standardize features + self.mean_ = X.mean(axis=0) + self.std_ = X.std(axis=0) + + return self + + def transform(self, X): + """Estimate epochs sources given the CSP filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data. + + Returns + ------- + X : ndarray + If self.transform_into == 'average_power' then returns the power of + CSP features averaged over time and shape (n_epochs, n_sources) + If self.transform_into == 'csp_space' then returns the data in CSP + space and shape is (n_epochs, n_sources, n_times). + """ + if not isinstance(X, np.ndarray): + raise ValueError("X should be of type ndarray (got %s)." % type(X)) + if self.filters_ is None: + raise RuntimeError('No filters available. Please first fit CSP ' + 'decomposition.') + + pick_filters = self.filters_[:self.n_components] + X = np.asarray([np.dot(pick_filters, epoch) for epoch in X]) + + # compute features (mean band power) + if self.transform_into == 'average_power': + X = (X ** 2).mean(axis=2) + log = True if self.log is None else self.log + if log: + X = np.log(X) + else: + X -= self.mean_ + X /= self.std_ + return X + + @copy_doc(TransformerMixin.fit_transform) + def fit_transform(self, X, y, **fit_params): # noqa: D102 + return super().fit_transform(X, y=y, **fit_params) + + @fill_doc + def plot_patterns(self, info, components=None, ch_type=None, + vmin=None, vmax=None, cmap='RdBu_r', sensors=True, + colorbar=True, scalings=None, units='a.u.', res=64, + size=1, cbar_fmt='%3.1f', name_format='CSP%01d', + show=True, show_names=False, title=None, mask=None, + mask_params=None, outlines='head', contours=6, + image_interp='bilinear', average=None, + sphere=None): + """Plot topographic patterns of components. + + The patterns explain how the measured data was generated from the + neural sources (a.k.a. the forward model). + + Parameters + ---------- + %(info_not_none)s Used for fitting. If not available, consider using + :func:`mne.create_info`. + components : float | array of float | None + The patterns to plot. If None, n_components will be shown. + ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None + The channel type to plot. For 'grad', the gradiometers are + collected in pairs and the RMS for each pair is plotted. + If None, then first available channel type from order given + above is used. Defaults to None. + vmin : float | callable + The value specifying the lower bound of the color range. + If None, and vmax is None, -vmax is used. Else np.min(data). + If callable, the output equals vmin(data). + vmax : float | callable + The value specifying the upper bound of the color range. + If None, the maximum absolute value is used. If vmin is None, + but vmax is not, default np.min(data). + If callable, the output equals vmax(data). + cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None + Colormap to use. If tuple, the first value indicates the colormap + to use and the second value is a boolean defining interactivity. In + interactive mode the colors are adjustable by clicking and dragging + the colorbar with left and right mouse button. Left mouse button + moves the scale up and down and right mouse button adjusts the + range. Hitting space bar resets the range. Up and down arrows can + be used to change the colormap. If None, 'Reds' is used for all + positive data, otherwise defaults to 'RdBu_r'. If 'interactive', + translates to (None, True). Defaults to 'RdBu_r'. + + .. warning:: Interactive mode works smoothly only for a small + amount of topomaps. + sensors : bool | str + Add markers for sensor locations to the plot. Accepts matplotlib + plot format string (e.g., 'r+' for red plusses). If True, + a circle will be used (via .add_artist). Defaults to True. + colorbar : bool + Plot a colorbar. + scalings : dict | float | None + The scalings of the channel types to be applied for plotting. + If None, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``. + units : dict | str | None + The unit of the channel type used for colorbar label. If + scale is None the unit is automatically determined. + res : int + The resolution of the topomap image (n pixels along each side). + size : float + Side length per topomap in inches. + cbar_fmt : str + String format for colorbar values. + name_format : str + String format for topomap values. Defaults to "CSP%%01d". + show : bool + Show figure if True. + show_names : bool | callable + If True, show channel names on top of the map. If a callable is + passed, channel names will be formatted using the callable; e.g., + to delete the prefix 'MEG ' from all channel names, pass the + function lambda x: x.replace('MEG ', ''). If ``mask`` is not None, + only significant sensors will be shown. + title : str | None + Title. If None (default), no title is displayed. + %(mask_patterns_topomap)s + %(mask_params_topomap)s + %(outlines_topomap)s + contours : int | array of float + The number of contour lines to draw. If 0, no contours will be + drawn. When an integer, matplotlib ticker locator is used to find + suitable values for the contour thresholds (may sometimes be + inaccurate, use array for accuracy). If an array, the values + represent the levels for the contours. Defaults to 6. + image_interp : str + The image interpolation to be used. + All matplotlib options are accepted. + average : float | None + The time window around a given time to be used for averaging + (seconds). For example, 0.01 would translate into window that + starts 5 ms before and ends 5 ms after a given time point. + Defaults to None, which means no averaging. + %(sphere_topomap_auto)s + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The figure. + """ + from .. import EvokedArray + if components is None: + components = np.arange(self.n_components) + + # set sampling frequency to have 1 component per time point + info = cp.deepcopy(info) + with info._unlock(): + info['sfreq'] = 1. + # create an evoked + patterns = EvokedArray(self.patterns_.T, info, tmin=0) + # the call plot_topomap + return patterns.plot_topomap( + times=components, ch_type=ch_type, + vmin=vmin, vmax=vmax, cmap=cmap, colorbar=colorbar, res=res, + cbar_fmt=cbar_fmt, sensors=sensors, + scalings=scalings, units=units, time_unit='s', + time_format=name_format, size=size, show_names=show_names, + title=title, mask_params=mask_params, mask=mask, outlines=outlines, + contours=contours, image_interp=image_interp, show=show, + average=average, sphere=sphere) + + @fill_doc + def plot_filters(self, info, components=None, ch_type=None, + vmin=None, vmax=None, cmap='RdBu_r', sensors=True, + colorbar=True, scalings=None, units='a.u.', res=64, + size=1, cbar_fmt='%3.1f', name_format='CSP%01d', + show=True, show_names=False, title=None, mask=None, + mask_params=None, outlines='head', contours=6, + image_interp='bilinear', average=None): + """Plot topographic filters of components. + + The filters are used to extract discriminant neural sources from + the measured data (a.k.a. the backward model). + + Parameters + ---------- + %(info_not_none)s Used for fitting. If not available, consider using + :func:`mne.create_info`. + components : float | array of float | None + The patterns to plot. If None, n_components will be shown. + ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None + The channel type to plot. For 'grad', the gradiometers are + collected in pairs and the RMS for each pair is plotted. + If None, then first available channel type from order given + above is used. Defaults to None. + vmin : float | callable + The value specifying the lower bound of the color range. + If None, and vmax is None, -vmax is used. Else np.min(data). + If callable, the output equals vmin(data). + vmax : float | callable + The value specifying the upper bound of the color range. + If None, the maximum absolute value is used. If vmin is None, + but vmax is not, defaults to np.min(data). + If callable, the output equals vmax(data). + cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None + Colormap to use. If tuple, the first value indicates the colormap + to use and the second value is a boolean defining interactivity. In + interactive mode the colors are adjustable by clicking and dragging + the colorbar with left and right mouse button. Left mouse button + moves the scale up and down and right mouse button adjusts the + range. Hitting space bar resets the range. Up and down arrows can + be used to change the colormap. If None, 'Reds' is used for all + positive data, otherwise defaults to 'RdBu_r'. If 'interactive', + translates to (None, True). Defaults to 'RdBu_r'. + + .. warning:: Interactive mode works smoothly only for a small + amount of topomaps. + sensors : bool | str + Add markers for sensor locations to the plot. Accepts matplotlib + plot format string (e.g., 'r+' for red plusses). If True, + a circle will be used (via .add_artist). Defaults to True. + colorbar : bool + Plot a colorbar. + scalings : dict | float | None + The scalings of the channel types to be applied for plotting. + If None, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``. + units : dict | str | None + The unit of the channel type used for colorbar label. If + scale is None the unit is automatically determined. + res : int + The resolution of the topomap image (n pixels along each side). + size : float + Side length per topomap in inches. + cbar_fmt : str + String format for colorbar values. + name_format : str + String format for topomap values. Defaults to "CSP%%01d". + show : bool + Show figure if True. + show_names : bool | callable + If True, show channel names on top of the map. If a callable is + passed, channel names will be formatted using the callable; e.g., + to delete the prefix 'MEG ' from all channel names, pass the + function lambda x: x.replace('MEG ', ''). If ``mask`` is not None, + only significant sensors will be shown. + title : str | None + Title. If None (default), no title is displayed. + mask : ndarray of bool, shape (n_channels, n_times) | None + The channels to be marked as significant at a given time point. + Indices set to `True` will be considered. Defaults to None. + mask_params : dict | None + Additional plotting parameters for plotting significant sensors. + Default (None) equals:: + + dict(marker='o', markerfacecolor='w', markeredgecolor='k', + linewidth=0, markersize=4) + %(outlines_topomap)s + contours : int | array of float + The number of contour lines to draw. If 0, no contours will be + drawn. When an integer, matplotlib ticker locator is used to find + suitable values for the contour thresholds (may sometimes be + inaccurate, use array for accuracy). If an array, the values + represent the levels for the contours. Defaults to 6. + image_interp : str + The image interpolation to be used. + All matplotlib options are accepted. + average : float | None + The time window around a given time to be used for averaging + (seconds). For example, 0.01 would translate into window that + starts 5 ms before and ends 5 ms after a given time point. + Defaults to None, which means no averaging. + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The figure. + """ + from .. import EvokedArray + if components is None: + components = np.arange(self.n_components) + + # set sampling frequency to have 1 component per time point + info = cp.deepcopy(info) + with info._unlock(): + info['sfreq'] = 1. + # create an evoked + filters = EvokedArray(self.filters_.T, info, tmin=0) + # the call plot_topomap + return filters.plot_topomap( + times=components, ch_type=ch_type, vmin=vmin, + vmax=vmax, cmap=cmap, colorbar=colorbar, res=res, + cbar_fmt=cbar_fmt, sensors=sensors, scalings=scalings, units=units, + time_unit='s', time_format=name_format, size=size, + show_names=show_names, title=title, mask_params=mask_params, + mask=mask, outlines=outlines, contours=contours, + image_interp=image_interp, show=show, average=average) + + def _compute_covariance_matrices(self, X, y): + _, n_channels, _ = X.shape + + if self.cov_est == "concat": + cov_estimator = self._concat_cov + elif self.cov_est == "epoch": + cov_estimator = self._epoch_cov + + covs = [] + sample_weights = [] + for this_class in self._classes: + cov, weight = cov_estimator(X[y == this_class]) + + if self.norm_trace: + cov /= np.trace(cov) + + covs.append(cov) + sample_weights.append(weight) + + return np.stack(covs), np.array(sample_weights) + + def _concat_cov(self, x_class): + """Concatenate epochs before computing the covariance.""" + _, n_channels, _ = x_class.shape + + x_class = np.transpose(x_class, [1, 0, 2]) + x_class = x_class.reshape(n_channels, -1) + cov = _regularized_covariance( + x_class, reg=self.reg, method_params=self.cov_method_params, + rank=self.rank) + weight = x_class.shape[0] + + return cov, weight + + def _epoch_cov(self, x_class): + """Mean of per-epoch covariances.""" + cov = sum(_regularized_covariance( + this_X, reg=self.reg, + method_params=self.cov_method_params, + rank=self.rank) for this_X in x_class) + cov /= len(x_class) + weight = len(x_class) + + return cov, weight + + def _decompose_covs(self, covs, sample_weights): + from scipy import linalg + n_classes = len(covs) + if n_classes == 2: + eigen_values, eigen_vectors = linalg.eigh(covs[0], covs.sum(0)) + else: + # The multiclass case is adapted from + # http://github.com/alexandrebarachant/pyRiemann + eigen_vectors, D = _ajd_pham(covs) + eigen_vectors = self._normalize_eigenvectors(eigen_vectors.T, covs, + sample_weights) + eigen_values = None + return eigen_vectors, eigen_values + + def _compute_mutual_info(self, covs, sample_weights, eigen_vectors): + class_probas = sample_weights / sample_weights.sum() + + mutual_info = [] + for jj in range(eigen_vectors.shape[1]): + aa, bb = 0, 0 + for (cov, prob) in zip(covs, class_probas): + tmp = np.dot(np.dot(eigen_vectors[:, jj].T, cov), + eigen_vectors[:, jj]) + aa += prob * np.log(np.sqrt(tmp)) + bb += prob * (tmp ** 2 - 1) + mi = - (aa + (3.0 / 16) * (bb ** 2)) + mutual_info.append(mi) + + return mutual_info + + def _normalize_eigenvectors(self, eigen_vectors, covs, sample_weights): + # Here we apply an euclidean mean. See pyRiemann for other metrics + mean_cov = np.average(covs, axis=0, weights=sample_weights) + + for ii in range(eigen_vectors.shape[1]): + tmp = np.dot(np.dot(eigen_vectors[:, ii].T, mean_cov), + eigen_vectors[:, ii]) + eigen_vectors[:, ii] /= np.sqrt(tmp) + return eigen_vectors + + def _order_components(self, covs, sample_weights, eigen_vectors, + eigen_values, component_order): + n_classes = len(self._classes) + if component_order == 'mutual_info' and n_classes > 2: + mutual_info = self._compute_mutual_info(covs, sample_weights, + eigen_vectors) + ix = np.argsort(mutual_info)[::-1] + elif component_order == 'mutual_info' and n_classes == 2: + ix = np.argsort(np.abs(eigen_values - 0.5))[::-1] + elif component_order == 'alternate' and n_classes == 2: + i = np.argsort(eigen_values) + ix = np.empty_like(i) + ix[1::2] = i[:len(i) // 2] + ix[0::2] = i[len(i) // 2:][::-1] + return ix + + +def _ajd_pham(X, eps=1e-6, max_iter=15): + """Approximate joint diagonalization based on Pham's algorithm. + + This is a direct implementation of the PHAM's AJD algorithm [1]. + + Parameters + ---------- + X : ndarray, shape (n_epochs, n_channels, n_channels) + A set of covariance matrices to diagonalize. + eps : float, default 1e-6 + The tolerance for stopping criterion. + max_iter : int, default 1000 + The maximum number of iteration to reach convergence. + + Returns + ------- + V : ndarray, shape (n_channels, n_channels) + The diagonalizer. + D : ndarray, shape (n_epochs, n_channels, n_channels) + The set of quasi diagonal matrices. + + References + ---------- + .. [1] Pham, Dinh Tuan. "Joint approximate diagonalization of positive + definite Hermitian matrices." SIAM Journal on Matrix Analysis and + Applications 22, no. 4 (2001): 1136-1152. + + """ + # Adapted from http://github.com/alexandrebarachant/pyRiemann + n_epochs = X.shape[0] + + # Reshape input matrix + A = np.concatenate(X, axis=0).T + + # Init variables + n_times, n_m = A.shape + V = np.eye(n_times) + epsilon = n_times * (n_times - 1) * eps + + for it in range(max_iter): + decr = 0 + for ii in range(1, n_times): + for jj in range(ii): + Ii = np.arange(ii, n_m, n_times) + Ij = np.arange(jj, n_m, n_times) + + c1 = A[ii, Ii] + c2 = A[jj, Ij] + + g12 = np.mean(A[ii, Ij] / c1) + g21 = np.mean(A[ii, Ij] / c2) + + omega21 = np.mean(c1 / c2) + omega12 = np.mean(c2 / c1) + omega = np.sqrt(omega12 * omega21) + + tmp = np.sqrt(omega21 / omega12) + tmp1 = (tmp * g12 + g21) / (omega + 1) + tmp2 = (tmp * g12 - g21) / max(omega - 1, 1e-9) + + h12 = tmp1 + tmp2 + h21 = np.conj((tmp1 - tmp2) / tmp) + + decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0 + + tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21) + tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21)) + tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]]) + + A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :]) + tmp = np.c_[A[:, Ii], A[:, Ij]] + tmp = np.reshape(tmp, (n_times * n_epochs, 2), order='F') + tmp = np.dot(tmp, tau.T) + + tmp = np.reshape(tmp, (n_times, n_epochs * 2), order='F') + A[:, Ii] = tmp[:, :n_epochs] + A[:, Ij] = tmp[:, n_epochs:] + V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :]) + if decr < epsilon: + break + D = np.reshape(A, (n_times, -1, n_times)).transpose(1, 0, 2) + return V, D + + +@fill_doc +class SPoC(CSP): + """Implementation of the SPoC spatial filtering. + + Source Power Comodulation (SPoC) :footcite:`DahneEtAl2014` allows to + extract spatial filters and + patterns by using a target (continuous) variable in the decomposition + process in order to give preference to components whose power correlates + with the target variable. + + SPoC can be seen as an extension of the CSP driven by a continuous + variable rather than a discrete variable. Typical applications include + extraction of motor patterns using EMG power or audio patterns using sound + envelope. + + Parameters + ---------- + n_components : int + The number of components to decompose M/EEG signals. + reg : float | str | None (default None) + If not None (same as ``'empirical'``, default), allow + regularization for covariance estimation. + If float, shrinkage is used (0 <= shrinkage <= 1). + For str options, ``reg`` will be passed to ``method`` to + :func:`mne.compute_covariance`. + log : None | bool (default None) + If transform_into == 'average_power' and log is None or True, then + applies a log transform to standardize the features, else the features + are z-scored. If transform_into == 'csp_space', then log must be None. + transform_into : {'average_power', 'csp_space'} + If 'average_power' then self.transform will return the average power of + each spatial filter. If 'csp_space' self.transform will return the data + in CSP space. Defaults to 'average_power'. + cov_method_params : dict | None + Parameters to pass to :func:`mne.compute_covariance`. + + .. versionadded:: 0.16 + %(rank_none)s + + .. versionadded:: 0.17 + + Attributes + ---------- + filters_ : ndarray, shape (n_channels, n_channels) + If fit, the SPoC spatial filters, else None. + patterns_ : ndarray, shape (n_channels, n_channels) + If fit, the SPoC spatial patterns, else None. + mean_ : ndarray, shape (n_components,) + If fit, the mean squared power for each component. + std_ : ndarray, shape (n_components,) + If fit, the std squared power for each component. + + See Also + -------- + mne.preprocessing.Xdawn, CSP + + References + ---------- + .. footbibliography:: + """ + + def __init__(self, n_components=4, reg=None, log=None, + transform_into='average_power', cov_method_params=None, + rank=None): + """Init of SPoC.""" + super(SPoC, self).__init__(n_components=n_components, reg=reg, log=log, + cov_est="epoch", norm_trace=False, + transform_into=transform_into, rank=rank, + cov_method_params=cov_method_params) + # Covariance estimation have to be done on the single epoch level, + # unlike CSP where covariance estimation can also be achieved through + # concatenation of all epochs from the same class. + delattr(self, 'cov_est') + delattr(self, 'norm_trace') + + def fit(self, X, y): + """Estimate the SPoC decomposition on epochs. + + Parameters + ---------- + X : ndarray, shape (n_epochs, n_channels, n_times) + The data on which to estimate the SPoC. + y : array, shape (n_epochs,) + The class for each epoch. + + Returns + ------- + self : instance of SPoC + Returns the modified instance. + """ + from scipy import linalg + self._check_Xy(X, y) + + if len(np.unique(y)) < 2: + raise ValueError("y must have at least two distinct values.") + + # The following code is directly copied from pyRiemann + + # Normalize target variable + target = y.astype(np.float64) + target -= target.mean() + target /= target.std() + + n_epochs, n_channels = X.shape[:2] + + # Estimate single trial covariance + covs = np.empty((n_epochs, n_channels, n_channels)) + for ii, epoch in enumerate(X): + covs[ii] = _regularized_covariance( + epoch, reg=self.reg, method_params=self.cov_method_params, + rank=self.rank) + + C = covs.mean(0) + Cz = np.mean(covs * target[:, np.newaxis, np.newaxis], axis=0) + + # solve eigenvalue decomposition + evals, evecs = linalg.eigh(Cz, C) + evals = evals.real + evecs = evecs.real + # sort vectors + ix = np.argsort(np.abs(evals))[::-1] + + # sort eigenvectors + evecs = evecs[:, ix].T + + # spatial patterns + self.patterns_ = linalg.pinv(evecs).T # n_channels x n_channels + self.filters_ = evecs # n_channels x n_channels + + pick_filters = self.filters_[:self.n_components] + X = np.asarray([np.dot(pick_filters, epoch) for epoch in X]) + + # compute features (mean band power) + X = (X ** 2).mean(axis=-1) + + # To standardize features + self.mean_ = X.mean(axis=0) + self.std_ = X.std(axis=0) + + return self + + def transform(self, X): + """Estimate epochs sources given the SPoC filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data. + + Returns + ------- + X : ndarray + If self.transform_into == 'average_power' then returns the power of + CSP features averaged over time and shape (n_epochs, n_sources) + If self.transform_into == 'csp_space' then returns the data in CSP + space and shape is (n_epochs, n_sources, n_times). + """ + return super(SPoC, self).transform(X) diff --git a/python/libs/mne/decoding/ems.py b/python/libs/mne/decoding/ems.py new file mode 100644 index 0000000..083f725 --- /dev/null +++ b/python/libs/mne/decoding/ems.py @@ -0,0 +1,217 @@ +# Author: Denis Engemann +# Alexandre Gramfort +# Jean-Remi King +# +# License: BSD-3-Clause + +from collections import Counter + +import numpy as np + +from .mixin import TransformerMixin, EstimatorMixin +from .base import _set_cv +from ..io.pick import _picks_to_idx +from ..parallel import parallel_func +from ..utils import logger, verbose +from .. import pick_types, pick_info + + +class EMS(TransformerMixin, EstimatorMixin): + """Transformer to compute event-matched spatial filters. + + This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire + time course. No time + window needs to be specified. The result is a spatial filter at each + time point and a corresponding time course. Intuitively, the result + gives the similarity between the filter at each time point and the + data vector (sensors) at that time point. + + .. note:: EMS only works for binary classification. + + Attributes + ---------- + filters_ : ndarray, shape (n_channels, n_times) + The set of spatial filters. + classes_ : ndarray, shape (n_classes,) + The target classes. + + References + ---------- + .. footbibliography:: + """ + + def __repr__(self): # noqa: D105 + if hasattr(self, 'filters_'): + return '' % ( + len(self.filters_), len(self.classes_)) + else: + return '' + + def fit(self, X, y): + """Fit the spatial filters. + + .. note : EMS is fitted on data normalized by channel type before the + fitting of the spatial filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The training data. + y : array of int, shape (n_epochs) + The target classes. + + Returns + ------- + self : instance of EMS + Returns self. + """ + classes = np.unique(y) + if len(classes) != 2: + raise ValueError('EMS only works for binary classification.') + self.classes_ = classes + filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0) + filters /= np.linalg.norm(filters, axis=0)[None, :] + self.filters_ = filters + return self + + def transform(self, X): + """Transform the data by the spatial filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The input data. + + Returns + ------- + X : array, shape (n_epochs, n_times) + The input data transformed by the spatial filters. + """ + Xt = np.sum(X * self.filters_, axis=1) + return Xt + + +@verbose +def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, cv=None, + verbose=None): + """Compute event-matched spatial filter on epochs. + + This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire + time course. No time + window needs to be specified. The result is a spatial filter at each + time point and a corresponding time course. Intuitively, the result + gives the similarity between the filter at each time point and the + data vector (sensors) at that time point. + + .. note : EMS only works for binary classification. + + .. note : The present function applies a leave-one-out cross-validation, + following Schurger et al's paper. However, we recommend using + a stratified k-fold cross-validation. Indeed, leave-one-out tends + to overfit and cannot be used to estimate the variance of the + prediction within a given fold. + + .. note : Because of the leave-one-out, this function needs an equal + number of epochs in each of the two conditions. + + Parameters + ---------- + epochs : instance of mne.Epochs + The epochs. + conditions : list of str | None, default None + If a list of strings, strings must match the epochs.event_id's key as + well as the number of conditions supported by the objective_function. + If None keys in epochs.event_id are used. + %(picks_good_data)s + %(n_jobs)s + cv : cross-validation object | str | None, default LeaveOneOut + The cross-validation scheme. + %(verbose)s + + Returns + ------- + surrogate_trials : ndarray, shape (n_trials // 2, n_times) + The trial surrogates. + mean_spatial_filter : ndarray, shape (n_channels, n_times) + The set of spatial filters. + conditions : ndarray, shape (n_classes,) + The conditions used. Values correspond to original event ids. + + References + ---------- + .. footbibliography:: + """ + logger.info('...computing surrogate time series. This can take some time') + + # Default to leave-one-out cv + cv = 'LeaveOneOut' if cv is None else cv + picks = _picks_to_idx(epochs.info, picks) + + if not len(set(Counter(epochs.events[:, 2]).values())) == 1: + raise ValueError('The same number of epochs is required by ' + 'this function. Please consider ' + '`epochs.equalize_event_counts`') + + if conditions is None: + conditions = epochs.event_id.keys() + epochs = epochs.copy() + else: + epochs = epochs[conditions] + + epochs.drop_bad() + + if len(conditions) != 2: + raise ValueError('Currently this function expects exactly 2 ' + 'conditions but you gave me %i' % + len(conditions)) + + ev = epochs.events[:, 2] + # Special care to avoid path dependent mappings and orders + conditions = list(sorted(conditions)) + cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions] + + info = pick_info(epochs.info, picks) + data = epochs.get_data(picks=picks) + + # Scale (z-score) the data by channel type + # XXX the z-scoring is applied outside the CV, which is not standard. + for ch_type in ['mag', 'grad', 'eeg']: + if ch_type in epochs: + # FIXME should be applied to all sort of data channels + if ch_type == 'eeg': + this_picks = pick_types(info, meg=False, eeg=True) + else: + this_picks = pick_types(info, meg=ch_type, eeg=False) + data[:, this_picks] /= np.std(data[:, this_picks]) + + # Setup cross-validation. Need to use _set_cv to deal with sklearn + # deprecation of cv objects. + y = epochs.events[:, 2] + _, cv_splits = _set_cv(cv, 'classifier', X=y, y=y) + + parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs) + # FIXME this parallelization should be removed. + # 1) it's numpy computation so it's already efficient, + # 2) it duplicates the data in RAM, + # 3) the computation is already super fast. + out = parallel(p_func(_ems_diff, data, cond_idx, train, test) + for train, test in cv_splits) + + surrogate_trials, spatial_filter = zip(*out) + surrogate_trials = np.array(surrogate_trials) + spatial_filter = np.mean(spatial_filter, axis=0) + + return surrogate_trials, spatial_filter, epochs.events[:, 2] + + +def _ems_diff(data0, data1): + """Compute the default diff objective function.""" + return np.mean(data0, axis=0) - np.mean(data1, axis=0) + + +def _run_ems(objective_function, data, cond_idx, train, test): + """Run EMS.""" + d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx)) + d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :] + # compute surrogates + return np.sum(data[test[0]] * d, axis=0), d diff --git a/python/libs/mne/decoding/mixin.py b/python/libs/mne/decoding/mixin.py new file mode 100644 index 0000000..b2c491b --- /dev/null +++ b/python/libs/mne/decoding/mixin.py @@ -0,0 +1,85 @@ + + +class TransformerMixin(object): + """Mixin class for all transformers in scikit-learn.""" + + def fit_transform(self, X, y=None, **fit_params): + """Fit to data, then transform it. + + Fits transformer to ``X`` and ``y`` with optional parameters + ``fit_params``, and returns a transformed version of ``X``. + + Parameters + ---------- + X : array, shape (n_samples, n_features) + Training set. + y : array, shape (n_samples,) + Target values or class labels. + **fit_params : dict + Additional fitting parameters passed to the ``fit`` method.. + + Returns + ------- + X_new : array, shape (n_samples, n_features_new) + Transformed array. + """ + # non-optimized default implementation; override when a better + # method is possible for a given clustering algorithm + if y is None: + # fit method of arity 1 (unsupervised transformation) + return self.fit(X, **fit_params).transform(X) + else: + # fit method of arity 2 (supervised transformation) + return self.fit(X, y, **fit_params).transform(X) + + +class EstimatorMixin(object): + """Mixin class for estimators.""" + + def get_params(self, deep=True): + """Get the estimator params. + + Parameters + ---------- + deep : bool + Deep. + """ + return + + def set_params(self, **params): + """Set parameters (mimics sklearn API). + + Parameters + ---------- + **params : dict + Extra parameters. + + Returns + ------- + inst : object + The instance. + """ + if not params: + return self + valid_params = self.get_params(deep=True) + for key, value in params.items(): + split = key.split('__', 1) + if len(split) > 1: + # nested objects case + name, sub_name = split + if name not in valid_params: + raise ValueError('Invalid parameter %s for estimator %s. ' + 'Check the list of available parameters ' + 'with `estimator.get_params().keys()`.' % + (name, self)) + sub_object = valid_params[name] + sub_object.set_params(**{sub_name: value}) + else: + # simple objects case + if key not in valid_params: + raise ValueError('Invalid parameter %s for estimator %s. ' + 'Check the list of available parameters ' + 'with `estimator.get_params().keys()`.' % + (key, self.__class__.__name__)) + setattr(self, key, value) + return self diff --git a/python/libs/mne/decoding/receptive_field.py b/python/libs/mne/decoding/receptive_field.py new file mode 100644 index 0000000..c395438 --- /dev/null +++ b/python/libs/mne/decoding/receptive_field.py @@ -0,0 +1,483 @@ +# -*- coding: utf-8 -*- +# Authors: Chris Holdgraf +# Eric Larson + +# License: BSD-3-Clause + +import numbers + +import numpy as np + +from .base import get_coef, BaseEstimator, _check_estimator +from .time_delaying_ridge import TimeDelayingRidge +from ..fixes import is_regressor +from ..utils import _validate_type, verbose, fill_doc, _VerboseDep + + +@fill_doc +class ReceptiveField(BaseEstimator, _VerboseDep): + """Fit a receptive field model. + + This allows you to fit an encoding model (stimulus to brain) or a decoding + model (brain to stimulus) using time-lagged input features (for example, a + spectro- or spatio-temporal receptive field, or STRF) + :footcite:`TheunissenEtAl2001,WillmoreSmyth2003,CrosseEtAl2016,HoldgrafEtAl2016`. + + Parameters + ---------- + tmin : float + The starting lag, in seconds (or samples if ``sfreq`` == 1). + tmax : float + The ending lag, in seconds (or samples if ``sfreq`` == 1). + Must be >= tmin. + sfreq : float + The sampling frequency used to convert times into samples. + feature_names : array, shape (n_features,) | None + Names for input features to the model. If None, feature names will + be auto-generated from the shape of input data after running `fit`. + estimator : instance of sklearn.base.BaseEstimator | float | None + The model used in fitting inputs and outputs. This can be any + scikit-learn-style model that contains a fit and predict method. If a + float is passed, it will be interpreted as the ``alpha`` parameter + to be passed to a Ridge regression model. If `None`, then a Ridge + regression model with an alpha of 0 will be used. + fit_intercept : bool | None + If True (default), the sample mean is removed before fitting. + If ``estimator`` is a :class:`sklearn.base.BaseEstimator`, + this must be None or match ``estimator.fit_intercept``. + scoring : ['r2', 'corrcoef'] + Defines how predictions will be scored. Currently must be one of + 'r2' (coefficient of determination) or 'corrcoef' (the correlation + coefficient). + patterns : bool + If True, inverse coefficients will be computed upon fitting using the + covariance matrix of the inputs, and the cross-covariance of the + inputs/outputs, according to :footcite:`HaufeEtAl2014`. Defaults to + False. + n_jobs : int | str + Number of jobs to run in parallel. Can be 'cuda' if CuPy + is installed properly and ``estimator is None``. + + .. versionadded:: 0.18 + edge_correction : bool + If True (default), correct the autocorrelation coefficients for + non-zero delays for the fact that fewer samples are available. + Disabling this speeds up performance at the cost of accuracy + depending on the relationship between epoch length and model + duration. Only used if ``estimator`` is float or None. + + .. versionadded:: 0.18 + %(verbose)s + + Attributes + ---------- + coef_ : array, shape ([n_outputs, ]n_features, n_delays) + The coefficients from the model fit, reshaped for easy visualization. + During :meth:`mne.decoding.ReceptiveField.fit`, if ``y`` has one + dimension (time), the ``n_outputs`` dimension here is omitted. + patterns_ : array, shape ([n_outputs, ]n_features, n_delays) + If fit, the inverted coefficients from the model. + delays_ : array, shape (n_delays,), dtype int + The delays used to fit the model, in indices. To return the delays + in seconds, use ``self.delays_ / self.sfreq`` + valid_samples_ : slice + The rows to keep during model fitting after removing rows with + missing values due to time delaying. This can be used to get an + output equivalent to using :func:`numpy.convolve` or + :func:`numpy.correlate` with ``mode='valid'``. + + See Also + -------- + mne.decoding.TimeDelayingRidge + + Notes + ----- + For a causal system, the encoding model will have significant + non-zero values only at positive lags. In other words, lags point + backward in time relative to the input, so positive lags correspond + to previous input time samples, while negative lags correspond to + future input time samples. + + References + ---------- + .. footbibliography:: + """ # noqa E501 + + @verbose + def __init__(self, tmin, tmax, sfreq, feature_names=None, estimator=None, + fit_intercept=None, scoring='r2', patterns=False, + n_jobs=1, edge_correction=True, verbose=None): + self.feature_names = feature_names + self.sfreq = float(sfreq) + self.tmin = tmin + self.tmax = tmax + self.estimator = 0. if estimator is None else estimator + self.fit_intercept = fit_intercept + self.scoring = scoring + self.patterns = patterns + self.n_jobs = n_jobs + self.edge_correction = edge_correction + + def __repr__(self): # noqa: D105 + s = "tmin, tmax : (%.3f, %.3f), " % (self.tmin, self.tmax) + estimator = self.estimator + if not isinstance(estimator, str): + estimator = type(self.estimator) + s += "estimator : %s, " % (estimator,) + if hasattr(self, 'coef_'): + if self.feature_names is not None: + feats = self.feature_names + if len(feats) == 1: + s += "feature: %s, " % feats[0] + else: + s += "features : [%s, ..., %s], " % (feats[0], feats[-1]) + s += "fit: True" + else: + s += "fit: False" + if hasattr(self, 'scores_'): + s += "scored (%s)" % self.scoring + return "" % s + + def _delay_and_reshape(self, X, y=None): + """Delay and reshape the variables.""" + if not isinstance(self.estimator_, TimeDelayingRidge): + # X is now shape (n_times, n_epochs, n_feats, n_delays) + X = _delay_time_series(X, self.tmin, self.tmax, self.sfreq, + fill_mean=self.fit_intercept) + X = _reshape_for_est(X) + # Concat times + epochs + if y is not None: + y = y.reshape(-1, y.shape[-1], order='F') + return X, y + + def fit(self, X, y): + """Fit a receptive field model. + + Parameters + ---------- + X : array, shape (n_times[, n_epochs], n_features) + The input features for the model. + y : array, shape (n_times[, n_epochs][, n_outputs]) + The output features for the model. + + Returns + ------- + self : instance + The instance so you can chain operations. + """ + from scipy import linalg + if self.scoring not in _SCORERS.keys(): + raise ValueError('scoring must be one of %s, got' + '%s ' % (sorted(_SCORERS.keys()), self.scoring)) + from sklearn.base import clone + X, y, _, self._y_dim = self._check_dimensions(X, y) + + if self.tmin > self.tmax: + raise ValueError('tmin (%s) must be at most tmax (%s)' + % (self.tmin, self.tmax)) + # Initialize delays + self.delays_ = _times_to_delays(self.tmin, self.tmax, self.sfreq) + + # Define the slice that we should use in the middle + self.valid_samples_ = _delays_to_slice(self.delays_) + + if isinstance(self.estimator, numbers.Real): + if self.fit_intercept is None: + self.fit_intercept = True + estimator = TimeDelayingRidge( + self.tmin, self.tmax, self.sfreq, alpha=self.estimator, + fit_intercept=self.fit_intercept, n_jobs=self.n_jobs, + edge_correction=self.edge_correction) + elif is_regressor(self.estimator): + estimator = clone(self.estimator) + if self.fit_intercept is not None and \ + estimator.fit_intercept != self.fit_intercept: + raise ValueError( + 'Estimator fit_intercept (%s) != initialization ' + 'fit_intercept (%s), initialize ReceptiveField with the ' + 'same fit_intercept value or use fit_intercept=None' + % (estimator.fit_intercept, self.fit_intercept)) + self.fit_intercept = estimator.fit_intercept + else: + raise ValueError('`estimator` must be a float or an instance' + ' of `BaseEstimator`,' + ' got type %s.' % type(self.estimator)) + self.estimator_ = estimator + del estimator + _check_estimator(self.estimator_) + + # Create input features + n_times, n_epochs, n_feats = X.shape + n_outputs = y.shape[-1] + n_delays = len(self.delays_) + + # Update feature names if we have none + if ((self.feature_names is not None) and + (len(self.feature_names) != n_feats)): + raise ValueError('n_features in X does not match feature names ' + '(%s != %s)' % (n_feats, len(self.feature_names))) + + # Create input features + X, y = self._delay_and_reshape(X, y) + + self.estimator_.fit(X, y) + coef = get_coef(self.estimator_, 'coef_') # (n_targets, n_features) + shape = [n_feats, n_delays] + if self._y_dim > 1: + shape.insert(0, -1) + self.coef_ = coef.reshape(shape) + + # Inverse-transform model weights + if self.patterns: + if isinstance(self.estimator_, TimeDelayingRidge): + cov_ = self.estimator_.cov_ / float(n_times * n_epochs - 1) + y = y.reshape(-1, y.shape[-1], order='F') + else: + X = X - X.mean(0, keepdims=True) + cov_ = np.cov(X.T) + del X + + # Inverse output covariance + if y.ndim == 2 and y.shape[1] != 1: + y = y - y.mean(0, keepdims=True) + inv_Y = linalg.pinv(np.cov(y.T)) + else: + inv_Y = 1. / float(n_times * n_epochs - 1) + del y + + # Inverse coef according to Haufe's method + # patterns has shape (n_feats * n_delays, n_outputs) + coef = np.reshape(self.coef_, (n_feats * n_delays, n_outputs)) + patterns = cov_.dot(coef.dot(inv_Y)) + self.patterns_ = patterns.reshape(shape) + + return self + + def predict(self, X): + """Generate predictions with a receptive field. + + Parameters + ---------- + X : array, shape (n_times[, n_epochs], n_channels) + The input features for the model. + + Returns + ------- + y_pred : array, shape (n_times[, n_epochs][, n_outputs]) + The output predictions. "Note that valid samples (those + unaffected by edge artifacts during the time delaying step) can + be obtained using ``y_pred[rf.valid_samples_]``. + """ + if not hasattr(self, 'delays_'): + raise ValueError('Estimator has not been fit yet.') + X, _, X_dim = self._check_dimensions(X, None, predict=True)[:3] + del _ + # convert to sklearn and back + pred_shape = X.shape[:-1] + if self._y_dim > 1: + pred_shape = pred_shape + (self.coef_.shape[0],) + X, _ = self._delay_and_reshape(X) + y_pred = self.estimator_.predict(X) + y_pred = y_pred.reshape(pred_shape, order='F') + shape = list(y_pred.shape) + if X_dim <= 2: + shape.pop(1) # epochs + extra = 0 + else: + extra = 1 + shape = shape[:self._y_dim + extra] + y_pred.shape = shape + return y_pred + + def score(self, X, y): + """Score predictions generated with a receptive field. + + This calls ``self.predict``, then masks the output of this + and ``y` with ``self.valid_samples_``. Finally, it passes + this to a :mod:`sklearn.metrics` scorer. + + Parameters + ---------- + X : array, shape (n_times[, n_epochs], n_channels) + The input features for the model. + y : array, shape (n_times[, n_epochs][, n_outputs]) + Used for scikit-learn compatibility. + + Returns + ------- + scores : list of float, shape (n_outputs,) + The scores estimated by the model for each output (e.g. mean + R2 of ``predict(X)``). + """ + # Create our scoring object + scorer_ = _SCORERS[self.scoring] + + # Generate predictions, then reshape so we can mask time + X, y = self._check_dimensions(X, y, predict=True)[:2] + n_times, n_epochs, n_outputs = y.shape + y_pred = self.predict(X) + y_pred = y_pred[self.valid_samples_] + y = y[self.valid_samples_] + + # Re-vectorize and call scorer + y = y.reshape([-1, n_outputs], order='F') + y_pred = y_pred.reshape([-1, n_outputs], order='F') + assert y.shape == y_pred.shape + scores = scorer_(y, y_pred, multioutput='raw_values') + return scores + + def _check_dimensions(self, X, y, predict=False): + X_dim = X.ndim + y_dim = y.ndim if y is not None else 0 + if X_dim == 2: + # Ensure we have a 3D input by adding singleton epochs dimension + X = X[:, np.newaxis, :] + if y is not None: + if y_dim == 1: + y = y[:, np.newaxis, np.newaxis] # epochs, outputs + elif y_dim == 2: + y = y[:, np.newaxis, :] # epochs + else: + raise ValueError('y must be shape (n_times[, n_epochs]' + '[,n_outputs], got %s' % (y.shape,)) + elif X.ndim == 3: + if y is not None: + if y.ndim == 2: + y = y[:, :, np.newaxis] # Add an outputs dim + elif y.ndim != 3: + raise ValueError('If X has 3 dimensions, ' + 'y must have 2 or 3 dimensions') + else: + raise ValueError('X must be shape (n_times[, n_epochs],' + ' n_features), got %s' % (X.shape,)) + if y is not None: + if X.shape[0] != y.shape[0]: + raise ValueError('X and y do not have the same n_times\n' + '%s != %s' % (X.shape[0], y.shape[0])) + if X.shape[1] != y.shape[1]: + raise ValueError('X and y do not have the same n_epochs\n' + '%s != %s' % (X.shape[1], y.shape[1])) + if predict and y.shape[-1] != len(self.estimator_.coef_): + raise ValueError('Number of outputs does not match' + ' estimator coefficients dimensions') + return X, y, X_dim, y_dim + + +def _delay_time_series(X, tmin, tmax, sfreq, fill_mean=False): + """Return a time-lagged input time series. + + Parameters + ---------- + X : array, shape (n_times[, n_epochs], n_features) + The time series to delay. Must be 2D or 3D. + tmin : int | float + The starting lag. + tmax : int | float + The ending lag. + Must be >= tmin. + sfreq : int | float + The sampling frequency of the series. Defaults to 1.0. + fill_mean : bool + If True, the fill value will be the mean along the time dimension + of the feature, and each cropped and delayed segment of data + will be shifted to have the same mean value (ensuring that mean + subtraction works properly). If False, the fill value will be zero. + + Returns + ------- + delayed : array, shape(n_times[, n_epochs][, n_features], n_delays) + The delayed data. It has the same shape as X, with an extra dimension + appended to the end. + + Examples + -------- + >>> tmin, tmax = -0.1, 0.2 + >>> sfreq = 10. + >>> x = np.arange(1, 6) + >>> x_del = _delay_time_series(x, tmin, tmax, sfreq) + >>> print(x_del) # doctest:+SKIP + [[2. 1. 0. 0.] + [3. 2. 1. 0.] + [4. 3. 2. 1.] + [5. 4. 3. 2.] + [0. 5. 4. 3.]] + """ + _check_delayer_params(tmin, tmax, sfreq) + delays = _times_to_delays(tmin, tmax, sfreq) + # Iterate through indices and append + delayed = np.zeros(X.shape + (len(delays),)) + if fill_mean: + mean_value = X.mean(axis=0) + if X.ndim == 3: + mean_value = np.mean(mean_value, axis=0) + delayed[:] = mean_value[:, np.newaxis] + for ii, ix_delay in enumerate(delays): + # Create zeros to populate w/ delays + if ix_delay < 0: + out = delayed[:ix_delay, ..., ii] + use_X = X[-ix_delay:] + elif ix_delay > 0: + out = delayed[ix_delay:, ..., ii] + use_X = X[:-ix_delay] + else: # == 0 + out = delayed[..., ii] + use_X = X + out[:] = use_X + if fill_mean: + out[:] += (mean_value - use_X.mean(axis=0)) + return delayed + + +def _times_to_delays(tmin, tmax, sfreq): + """Convert a tmin/tmax in seconds to delays.""" + # Convert seconds to samples + delays = np.arange(int(np.round(tmin * sfreq)), + int(np.round(tmax * sfreq) + 1)) + return delays + + +def _delays_to_slice(delays): + """Find the slice to be taken in order to remove missing values.""" + # Negative values == cut off rows at the end + min_delay = None if delays[-1] <= 0 else delays[-1] + # Positive values == cut off rows at the end + max_delay = None if delays[0] >= 0 else delays[0] + return slice(min_delay, max_delay) + + +def _check_delayer_params(tmin, tmax, sfreq): + """Check delayer input parameters. For future custom delay support.""" + _validate_type(sfreq, 'numeric', '`sfreq`') + + for tlim in (tmin, tmax): + _validate_type(tlim, 'numeric', 'tmin/tmax') + if not tmin <= tmax: + raise ValueError('tmin must be <= tmax') + + +def _reshape_for_est(X_del): + """Convert X_del to a sklearn-compatible shape.""" + n_times, n_epochs, n_feats, n_delays = X_del.shape + X_del = X_del.reshape(n_times, n_epochs, -1) # concatenate feats + X_del = X_del.reshape(n_times * n_epochs, -1, order='F') + return X_del + + +# Create a correlation scikit-learn-style scorer +def _corr_score(y_true, y, multioutput=None): + from scipy.stats import pearsonr + assert multioutput == 'raw_values' + for this_y in (y_true, y): + if this_y.ndim != 2: + raise ValueError('inputs must be shape (samples, outputs), got %s' + % (this_y.shape,)) + return np.array([pearsonr(y_true[:, ii], y[:, ii])[0] + for ii in range(y.shape[-1])]) + + +def _r2_score(y_true, y, multioutput=None): + from sklearn.metrics import r2_score + return r2_score(y_true, y, multioutput=multioutput) + + +_SCORERS = {'r2': _r2_score, 'corrcoef': _corr_score} diff --git a/python/libs/mne/decoding/search_light.py b/python/libs/mne/decoding/search_light.py new file mode 100644 index 0000000..84bcdd1 --- /dev/null +++ b/python/libs/mne/decoding/search_light.py @@ -0,0 +1,672 @@ +# Author: Jean-Remi King +# +# License: BSD-3-Clause + +import numpy as np + +from .mixin import TransformerMixin +from .base import BaseEstimator, _check_estimator +from ..fixes import _get_check_scoring +from ..parallel import parallel_func +from ..utils import (_validate_type, array_split_idx, ProgressBar, + verbose, fill_doc, _VerboseDep) + + +@fill_doc +class SlidingEstimator(BaseEstimator, TransformerMixin, _VerboseDep): + """Search Light. + + Fit, predict and score a series of models to each subset of the dataset + along the last dimension. Each entry in the last dimension is referred + to as a task. + + Parameters + ---------- + %(base_estimator)s + %(scoring)s + %(n_jobs)s + %(verbose)s + + Attributes + ---------- + estimators_ : array-like, shape (n_tasks,) + List of fitted scikit-learn estimators (one per task). + """ + + @verbose + def __init__(self, base_estimator, scoring=None, n_jobs=1, *, + verbose=None): # noqa: D102 + _check_estimator(base_estimator) + self._estimator_type = getattr(base_estimator, "_estimator_type", None) + self.base_estimator = base_estimator + self.n_jobs = n_jobs + self.scoring = scoring + + _validate_type(self.n_jobs, 'int', 'n_jobs') + + def __repr__(self): # noqa: D105 + repr_str = '<' + super(SlidingEstimator, self).__repr__() + if hasattr(self, 'estimators_'): + repr_str = repr_str[:-1] + repr_str += ', fitted with %i estimators' % len(self.estimators_) + return repr_str + '>' + + def fit(self, X, y, **fit_params): + """Fit a series of independent estimators to the dataset. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The training input samples. For each data slice, a clone estimator + is fitted independently. The feature dimension can be + multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks). + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + **fit_params : dict of string -> object + Parameters to pass to the fit method of the estimator. + + Returns + ------- + self : object + Return self. + """ + self._check_Xy(X, y) + self.estimators_ = list() + self.fit_params = fit_params + # For fitting, the parallelization is across estimators. + parallel, p_func, n_jobs = parallel_func(_sl_fit, self.n_jobs, + verbose=False) + n_jobs = min(n_jobs, X.shape[-1]) + mesg = 'Fitting %s' % (self.__class__.__name__,) + with ProgressBar(X.shape[-1], mesg=mesg) as pb: + estimators = parallel( + p_func(self.base_estimator, split, y, pb.subset(pb_idx), + **fit_params) + for pb_idx, split in array_split_idx(X, n_jobs, axis=-1)) + + # Each parallel job can have a different number of training estimators + # We can't directly concatenate them because of sklearn's Bagging API + # (see scikit-learn #9720) + self.estimators_ = np.empty(X.shape[-1], dtype=object) + idx = 0 + for job_estimators in estimators: + for est in job_estimators: + self.estimators_[idx] = est + idx += 1 + return self + + def fit_transform(self, X, y, **fit_params): + """Fit and transform a series of independent estimators to the dataset. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The training input samples. For each task, a clone estimator + is fitted independently. The feature dimension can be + multidimensional, e.g.:: + + X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + **fit_params : dict of string -> object + Parameters to pass to the fit method of the estimator. + + Returns + ------- + y_pred : array, shape (n_samples, n_tasks) | (n_samples, n_tasks, n_targets) + The predicted values for each estimator. + """ # noqa: E501 + return self.fit(X, y, **fit_params).transform(X) + + def _transform(self, X, method): + """Aux. function to make parallel predictions/transformation.""" + self._check_Xy(X) + method = _check_method(self.base_estimator, method) + if X.shape[-1] != len(self.estimators_): + raise ValueError('The number of estimators does not match ' + 'X.shape[-1]') + # For predictions/transforms the parallelization is across the data and + # not across the estimators to avoid memory load. + mesg = 'Transforming %s' % (self.__class__.__name__,) + parallel, p_func, n_jobs = parallel_func( + _sl_transform, self.n_jobs, verbose=False) + n_jobs = min(n_jobs, X.shape[-1]) + X_splits = np.array_split(X, n_jobs, axis=-1) + idx, est_splits = zip(*array_split_idx(self.estimators_, n_jobs)) + with ProgressBar(X.shape[-1], mesg=mesg) as pb: + y_pred = parallel(p_func(est, x, method, pb.subset(pb_idx)) + for pb_idx, est, x in zip( + idx, est_splits, X_splits)) + + y_pred = np.concatenate(y_pred, axis=1) + return y_pred + + def transform(self, X): + """Transform each data slice/task with a series of independent estimators. + + The number of tasks in X should match the number of tasks/estimators + given at fit time. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The input samples. For each data slice/task, the corresponding + estimator makes a transformation of the data, e.g. + ``[estimators[ii].transform(X[..., ii]) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks). + + Returns + ------- + Xt : array, shape (n_samples, n_estimators) + The transformed values generated by each estimator. + """ # noqa: E501 + return self._transform(X, 'transform') + + def predict(self, X): + """Predict each data slice/task with a series of independent estimators. + + The number of tasks in X should match the number of tasks/estimators + given at fit time. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The input samples. For each data slice, the corresponding estimator + makes the sample predictions, e.g.: + ``[estimators[ii].predict(X[..., ii]) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks). + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators) | (n_samples, n_tasks, n_targets) + Predicted values for each estimator/data slice. + """ # noqa: E501 + return self._transform(X, 'predict') + + def predict_proba(self, X): + """Predict each data slice with a series of independent estimators. + + The number of tasks in X should match the number of tasks/estimators + given at fit time. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The input samples. For each data slice, the corresponding estimator + makes the sample probabilistic predictions, e.g.: + ``[estimators[ii].predict_proba(X[..., ii]) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks). + + Returns + ------- + y_pred : array, shape (n_samples, n_tasks, n_classes) + Predicted probabilities for each estimator/data slice/task. + """ # noqa: E501 + return self._transform(X, 'predict_proba') + + def decision_function(self, X): + """Estimate distances of each data slice to the hyperplanes. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The input samples. For each data slice, the corresponding estimator + outputs the distance to the hyperplane, e.g.: + ``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators). + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2) + Predicted distances for each estimator/data slice. + + Notes + ----- + This requires base_estimator to have a ``decision_function`` method. + """ # noqa: E501 + return self._transform(X, 'decision_function') + + def _check_Xy(self, X, y=None): + """Aux. function to check input data.""" + if y is not None: + if len(X) != len(y) or len(y) < 1: + raise ValueError('X and y must have the same length.') + if X.ndim < 3: + raise ValueError('X must have at least 3 dimensions.') + + def score(self, X, y): + """Score each estimator on each task. + + The number of tasks in X should match the number of tasks/estimators + given at fit time, i.e. we need + ``X.shape[-1] == len(self.estimators_)``. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The input samples. For each data slice, the corresponding estimator + scores the prediction, e.g.: + ``[estimators[ii].score(X[..., ii], y) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks). + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + + Returns + ------- + score : array, shape (n_samples, n_estimators) + Score for each estimator/task. + """ # noqa: E501 + check_scoring = _get_check_scoring() + + self._check_Xy(X) + if X.shape[-1] != len(self.estimators_): + raise ValueError('The number of estimators does not match ' + 'X.shape[-1]') + + scoring = check_scoring(self.base_estimator, self.scoring) + y = _fix_auc(scoring, y) + + # For predictions/transforms the parallelization is across the data and + # not across the estimators to avoid memory load. + parallel, p_func, n_jobs = parallel_func(_sl_score, self.n_jobs) + n_jobs = min(n_jobs, X.shape[-1]) + X_splits = np.array_split(X, n_jobs, axis=-1) + est_splits = np.array_split(self.estimators_, n_jobs) + score = parallel(p_func(est, scoring, x, y) + for (est, x) in zip(est_splits, X_splits)) + + score = np.concatenate(score, axis=0) + return score + + @property + def classes_(self): + if not hasattr(self.estimators_[0], 'classes_'): + raise AttributeError('classes_ attribute available only if ' + 'base_estimator has it, and estimator %s does' + ' not' % (self.estimators_[0],)) + return self.estimators_[0].classes_ + + +@fill_doc +def _sl_fit(estimator, X, y, pb, **fit_params): + """Aux. function to fit SlidingEstimator in parallel. + + Fit a clone estimator to each slice of data. + + Parameters + ---------- + %(base_estimator)s + X : array, shape (n_samples, nd_features, n_estimators) + The target data. The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + y : array, shape (n_sample, ) + The target values. + fit_params : dict | None + Parameters to pass to the fit method of the estimator. + + Returns + ------- + estimators_ : list of estimators + The fitted estimators. + """ + from sklearn.base import clone + estimators_ = list() + for ii in range(X.shape[-1]): + est = clone(estimator) + est.fit(X[..., ii], y, **fit_params) + estimators_.append(est) + pb.update(ii + 1) + return estimators_ + + +def _sl_transform(estimators, X, method, pb): + """Aux. function to transform SlidingEstimator in parallel. + + Applies transform/predict/decision_function etc for each slice of data. + + Parameters + ---------- + estimators : list of estimators + The fitted estimators. + X : array, shape (n_samples, nd_features, n_estimators) + The target data. The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + method : str + The estimator method to use (e.g. 'predict', 'transform'). + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2) + The transformations for each slice of data. + """ # noqa: E501 + for ii, est in enumerate(estimators): + transform = getattr(est, method) + _y_pred = transform(X[..., ii]) + # Initialize array of predictions on the first transform iteration + if ii == 0: + y_pred = _sl_init_pred(_y_pred, X) + y_pred[:, ii, ...] = _y_pred + pb.update(ii + 1) + return y_pred + + +def _sl_init_pred(y_pred, X): + """Aux. function to SlidingEstimator to initialize y_pred.""" + n_sample, n_tasks = X.shape[0], X.shape[-1] + y_pred = np.zeros((n_sample, n_tasks) + y_pred.shape[1:], y_pred.dtype) + return y_pred + + +def _sl_score(estimators, scoring, X, y): + """Aux. function to score SlidingEstimator in parallel. + + Predict and score each slice of data. + + Parameters + ---------- + estimators : list, shape (n_tasks,) + The fitted estimators. + X : array, shape (n_samples, nd_features, n_tasks) + The target data. The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks) + scoring : callable, str or None + If scoring is None (default), the predictions are internally + generated by estimator.score(). Else, we must first get the + predictions to pass them to ad-hoc scorer. + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + + Returns + ------- + score : array, shape (n_tasks,) + The score for each task / slice of data. + """ + n_tasks = X.shape[-1] + score = np.zeros(n_tasks) + for ii, est in enumerate(estimators): + score[ii] = scoring(est, X[..., ii], y) + return score + + +def _check_method(estimator, method): + """Check that an estimator has the method attribute. + + If method == 'transform' and estimator does not have 'transform', use + 'predict' instead. + """ + if method == 'transform' and not hasattr(estimator, 'transform'): + method = 'predict' + if not hasattr(estimator, method): + ValueError('base_estimator does not have `%s` method.' % method) + return method + + +@fill_doc +class GeneralizingEstimator(SlidingEstimator): + """Generalization Light. + + Fit a search-light along the last dimension and use them to apply a + systematic cross-tasks generalization. + + Parameters + ---------- + %(base_estimator)s + %(scoring)s + %(n_jobs)s + %(verbose)s + """ + + def __repr__(self): # noqa: D105 + repr_str = super(GeneralizingEstimator, self).__repr__() + if hasattr(self, 'estimators_'): + repr_str = repr_str[:-1] + repr_str += ', fitted with %i estimators>' % len(self.estimators_) + return repr_str + + def _transform(self, X, method): + """Aux. function to make parallel predictions/transformation.""" + self._check_Xy(X) + method = _check_method(self.base_estimator, method) + mesg = 'Transforming %s' % (self.__class__.__name__,) + parallel, p_func, n_jobs = parallel_func( + _gl_transform, self.n_jobs, verbose=False) + n_jobs = min(n_jobs, X.shape[-1]) + with ProgressBar(X.shape[-1] * len(self.estimators_), mesg=mesg) as pb: + y_pred = parallel( + p_func(self.estimators_, x_split, method, pb.subset(pb_idx)) + for pb_idx, x_split in array_split_idx( + X, n_jobs, axis=-1, n_per_split=len(self.estimators_))) + + y_pred = np.concatenate(y_pred, axis=2) + return y_pred + + def transform(self, X): + """Transform each data slice with all possible estimators. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The input samples. For estimator the corresponding data slice is + used to make a transformation. The feature dimension can be + multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators). + + Returns + ------- + Xt : array, shape (n_samples, n_estimators, n_slices) + The transformed values generated by each estimator. + """ + return self._transform(X, 'transform') + + def predict(self, X): + """Predict each data slice with all possible estimators. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The training input samples. For each data slice, a fitted estimator + predicts each slice of the data independently. The feature + dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators). + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators, n_slices) | (n_samples, n_estimators, n_slices, n_targets) + The predicted values for each estimator. + """ # noqa: E501 + return self._transform(X, 'predict') + + def predict_proba(self, X): + """Estimate probabilistic estimates of each data slice with all possible estimators. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The training input samples. For each data slice, a fitted estimator + predicts a slice of the data. The feature dimension can be + multidimensional e.g. + ``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``. + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes) + The predicted values for each estimator. + + Notes + ----- + This requires ``base_estimator`` to have a ``predict_proba`` method. + """ # noqa: E501 + return self._transform(X, 'predict_proba') + + def decision_function(self, X): + """Estimate distances of each data slice to all hyperplanes. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The training input samples. Each estimator outputs the distance to + its hyperplane, e.g.: + ``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + ``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``. + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes * (n_classes-1) // 2) + The predicted values for each estimator. + + Notes + ----- + This requires ``base_estimator`` to have a ``decision_function`` + method. + """ # noqa: E501 + return self._transform(X, 'decision_function') + + def score(self, X, y): + """Score each of the estimators on the tested dimensions. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The input samples. For each data slice, the corresponding estimator + scores the prediction, e.g.: + ``[estimators[ii].score(X[..., ii], y) for ii in range(n_slices)]``. + The feature dimension can be multidimensional e.g. + ``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``. + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + + Returns + ------- + score : array, shape (n_samples, n_estimators, n_slices) + Score for each estimator / data slice couple. + """ # noqa: E501 + check_scoring = _get_check_scoring() + self._check_Xy(X) + # For predictions/transforms the parallelization is across the data and + # not across the estimators to avoid memory load. + mesg = 'Scoring %s' % (self.__class__.__name__,) + parallel, p_func, n_jobs = parallel_func(_gl_score, self.n_jobs, + verbose=False) + n_jobs = min(n_jobs, X.shape[-1]) + scoring = check_scoring(self.base_estimator, self.scoring) + y = _fix_auc(scoring, y) + with ProgressBar(X.shape[-1] * len(self.estimators_), mesg=mesg) as pb: + score = parallel(p_func(self.estimators_, scoring, x, y, + pb.subset(pb_idx)) + for pb_idx, x in array_split_idx( + X, n_jobs, axis=-1, + n_per_split=len(self.estimators_))) + + score = np.concatenate(score, axis=1) + return score + + +def _gl_transform(estimators, X, method, pb): + """Transform the dataset. + + This will apply each estimator to all slices of the data. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The training input samples. For each data slice, a clone estimator + is fitted independently. The feature dimension can be multidimensional + e.g. X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + + Returns + ------- + Xt : array, shape (n_samples, n_slices) + The transformed values generated by each estimator. + """ + n_sample, n_iter = X.shape[0], X.shape[-1] + for ii, est in enumerate(estimators): + # stack generalized data for faster prediction + X_stack = X.transpose(np.r_[0, X.ndim - 1, range(1, X.ndim - 1)]) + X_stack = X_stack.reshape(np.r_[n_sample * n_iter, X_stack.shape[2:]]) + transform = getattr(est, method) + _y_pred = transform(X_stack) + # unstack generalizations + if _y_pred.ndim == 2: + _y_pred = np.reshape(_y_pred, [n_sample, n_iter, _y_pred.shape[1]]) + else: + shape = np.r_[n_sample, n_iter, _y_pred.shape[1:]].astype(int) + _y_pred = np.reshape(_y_pred, shape) + # Initialize array of predictions on the first transform iteration + if ii == 0: + y_pred = _gl_init_pred(_y_pred, X, len(estimators)) + y_pred[:, ii, ...] = _y_pred + pb.update((ii + 1) * n_iter) + return y_pred + + +def _gl_init_pred(y_pred, X, n_train): + """Aux. function to GeneralizingEstimator to initialize y_pred.""" + n_sample, n_iter = X.shape[0], X.shape[-1] + if y_pred.ndim == 3: + y_pred = np.zeros((n_sample, n_train, n_iter, y_pred.shape[-1]), + y_pred.dtype) + else: + y_pred = np.zeros((n_sample, n_train, n_iter), y_pred.dtype) + return y_pred + + +def _gl_score(estimators, scoring, X, y, pb): + """Score GeneralizingEstimator in parallel. + + Predict and score each slice of data. + + Parameters + ---------- + estimators : list of estimators + The fitted estimators. + scoring : callable, string or None + If scoring is None (default), the predictions are internally + generated by estimator.score(). Else, we must first get the + predictions to pass them to ad-hoc scorer. + X : array, shape (n_samples, nd_features, n_slices) + The target data. The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + + Returns + ------- + score : array, shape (n_estimators, n_slices) + The score for each slice of data. + """ + # FIXME: The level parallelization may be a bit high, and might be memory + # consuming. Perhaps need to lower it down to the loop across X slices. + score_shape = [len(estimators), X.shape[-1]] + for jj in range(X.shape[-1]): + for ii, est in enumerate(estimators): + _score = scoring(est, X[..., jj], y) + # Initialize array of predictions on the first score iteration + if (ii == 0) and (jj == 0): + dtype = type(_score) + score = np.zeros(score_shape, dtype) + score[ii, jj, ...] = _score + pb.update(jj * len(estimators) + ii + 1) + return score + + +def _fix_auc(scoring, y): + from sklearn.preprocessing import LabelEncoder + # This fixes sklearn's inability to compute roc_auc when y not in [0, 1] + # scikit-learn/scikit-learn#6874 + if scoring is not None: + score_func = getattr(scoring, '_score_func', None) + kwargs = getattr(scoring, '_kwargs', {}) + if (getattr(score_func, '__name__', '') == 'roc_auc_score' and + kwargs.get('multi_class', 'raise') == 'raise'): + if np.ndim(y) != 1 or len(set(y)) != 2: + raise ValueError('roc_auc scoring can only be computed for ' + 'two-class problems.') + y = LabelEncoder().fit_transform(y) + return y diff --git a/python/libs/mne/decoding/ssd.py b/python/libs/mne/decoding/ssd.py new file mode 100644 index 0000000..f6ac42b --- /dev/null +++ b/python/libs/mne/decoding/ssd.py @@ -0,0 +1,292 @@ +# Author: Denis A. Engemann +# Victoria Peterson +# License: BSD-3-Clause + +import numpy as np + +from ..filter import filter_data +from ..cov import _regularized_covariance +from . import TransformerMixin, BaseEstimator +from ..time_frequency import psd_array_welch +from ..utils import _time_mask, fill_doc, _validate_type, _check_option +from ..io.pick import _get_channel_types, _picks_to_idx + + +@fill_doc +class SSD(BaseEstimator, TransformerMixin): + """ + M/EEG signal decomposition using the Spatio-Spectral Decomposition (SSD). + + SSD seeks to maximize the power at a frequency band of interest while + simultaneously minimizing it at the flanking (surrounding) frequency bins + (considered noise). It extremizes the covariance matrices associated with + signal and noise :footcite:`NikulinEtAl2011`. + + SSD can either be used as a dimensionality reduction method or a + ‘denoised’ low rank factorization method :footcite:`HaufeEtAl2014b`. + + Parameters + ---------- + %(info_not_none)s Must match the input data. + filt_params_signal : dict + Filtering for the frequencies of interest. + filt_params_noise : dict + Filtering for the frequencies of non-interest. + reg : float | str | None (default) + Which covariance estimator to use. + If not None (same as 'empirical'), allow regularization for + covariance estimation. If float, shrinkage is used + (0 <= shrinkage <= 1). For str options, reg will be passed to + method to :func:`mne.compute_covariance`. + n_components : int | None (default None) + The number of components to extract from the signal. + If n_components is None, no dimensionality reduction is applied. + picks : array of int | None (default None) + The indices of good channels. + sort_by_spectral_ratio : bool (default False) + If set to True, the components are sorted accordingly + to the spectral ratio. + See Eq. (24) in :footcite:`NikulinEtAl2011`. + return_filtered : bool (default True) + If return_filtered is True, data is bandpassed and projected onto + the SSD components. + n_fft : int (default None) + If sort_by_spectral_ratio is set to True, then the SSD sources will be + sorted accordingly to their spectral ratio which is calculated based on + :func:`mne.time_frequency.psd_array_welch` function. The n_fft parameter + set the length of FFT used. + See :func:`mne.time_frequency.psd_array_welch` for more information. + cov_method_params : dict | None (default None) + As in :class:`mne.decoding.SPoC` + The default is None. + rank : None | dict | ‘info’ | ‘full’ + As in :class:`mne.decoding.SPoC` + This controls the rank computation that can be read from the + measurement info or estimated from the data. + See Notes of :func:`mne.compute_rank` for details. + We recommend to use 'full' when working with epoched data. + + Attributes + ---------- + filters_ : array, shape (n_channels, n_components) + The spatial filters to be multiplied with the signal. + patterns_ : array, shape (n_components, n_channels) + The patterns for reconstructing the signal from the filtered data. + + References + ---------- + .. footbibliography:: + """ + + def __init__(self, info, filt_params_signal, filt_params_noise, + reg=None, n_components=None, picks=None, + sort_by_spectral_ratio=True, return_filtered=False, + n_fft=None, cov_method_params=None, rank=None): + """Initialize instance.""" + dicts = {"signal": filt_params_signal, "noise": filt_params_noise} + for param, dd in [('l', 0), ('h', 0), ('l', 1), ('h', 1)]: + key = ('signal', 'noise')[dd] + if param + '_freq' not in dicts[key]: + raise ValueError( + '%s must be defined in filter parameters for %s' + % (param + '_freq', key)) + val = dicts[key][param + '_freq'] + if not isinstance(val, (int, float)): + _validate_type(val, ('numeric',), f'{key} {param}_freq') + # check freq bands + if (filt_params_noise['l_freq'] > filt_params_signal['l_freq'] or + filt_params_signal['h_freq'] > filt_params_noise['h_freq']): + raise ValueError('Wrongly specified frequency bands!\n' + 'The signal band-pass must be within the noise ' + 'band-pass!') + self.picks_ = _picks_to_idx(info, picks, none='data', exclude='bads') + del picks + ch_types = _get_channel_types(info, picks=self.picks_, unique=True) + if len(ch_types) > 1: + raise ValueError('At this point SSD only supports fitting ' + 'single channel types. Your info has %i types' % + (len(ch_types))) + self.info = info + self.freqs_signal = (filt_params_signal['l_freq'], + filt_params_signal['h_freq']) + self.freqs_noise = (filt_params_noise['l_freq'], + filt_params_noise['h_freq']) + self.filt_params_signal = filt_params_signal + self.filt_params_noise = filt_params_noise + # check if boolean + if not isinstance(sort_by_spectral_ratio, (bool)): + raise ValueError('sort_by_spectral_ratio must be boolean') + self.sort_by_spectral_ratio = sort_by_spectral_ratio + if n_fft is None: + self.n_fft = int(self.info['sfreq']) + else: + self.n_fft = int(n_fft) + # check if boolean + if not isinstance(return_filtered, (bool)): + raise ValueError('return_filtered must be boolean') + self.return_filtered = return_filtered + self.reg = reg + self.n_components = n_components + self.rank = rank + self.cov_method_params = cov_method_params + + def _check_X(self, X): + """Check input data.""" + _validate_type(X, np.ndarray, 'X') + _check_option('X.ndim', X.ndim, (2, 3)) + n_chan = X.shape[-2] + if n_chan != self.info['nchan']: + raise ValueError('Info must match the input data.' + 'Found %i channels but expected %i.' % + (n_chan, self.info['nchan'])) + + def fit(self, X, y=None): + """Estimate the SSD decomposition on raw or epoched data. + + Parameters + ---------- + X : array, shape ([n_epochs, ]n_channels, n_times) + The input data from which to estimate the SSD. Either 2D array + obtained from continuous data or 3D array obtained from epoched + data. + y : None | array, shape (n_samples,) + Used for scikit-learn compatibility. + + Returns + ------- + self : instance of SSD + Returns the modified instance. + """ + from scipy.linalg import eigh + self._check_X(X) + X_aux = X[..., self.picks_, :] + + X_signal = filter_data( + X_aux, self.info['sfreq'], **self.filt_params_signal) + X_noise = filter_data( + X_aux, self.info['sfreq'], **self.filt_params_noise) + X_noise -= X_signal + if X.ndim == 3: + X_signal = np.hstack(X_signal) + X_noise = np.hstack(X_noise) + + cov_signal = _regularized_covariance( + X_signal, reg=self.reg, method_params=self.cov_method_params, + rank=self.rank, info=self.info) + cov_noise = _regularized_covariance( + X_noise, reg=self.reg, method_params=self.cov_method_params, + rank=self.rank, info=self.info) + + eigvals_, eigvects_ = eigh(cov_signal, cov_noise) + # sort in descending order + ix = np.argsort(eigvals_)[::-1] + self.eigvals_ = eigvals_[ix] + self.filters_ = eigvects_[:, ix] + self.patterns_ = np.linalg.pinv(self.filters_) + # We assume that ordering by spectral ratio is more important + # than the initial ordering. This ording should be also learned when + # fitting. + X_ssd = self.filters_.T @ X[..., self.picks_, :] + sorter_spec = Ellipsis + if self.sort_by_spectral_ratio: + _, sorter_spec = self.get_spectral_ratio(ssd_sources=X_ssd) + self.sorter_spec = sorter_spec + return self + + def transform(self, X): + """Estimate epochs sources given the SSD filters. + + Parameters + ---------- + X : array, shape ([n_epochs, ]n_channels, n_times) + The input data from which to estimate the SSD. Either 2D array + obtained from continuous data or 3D array obtained from epoched + data. + + Returns + ------- + X_ssd : array, shape ([n_epochs, ]n_components, n_times) + The processed data. + """ + self._check_X(X) + if self.filters_ is None: + raise RuntimeError('No filters available. Please first call fit') + if self.return_filtered: + X_aux = X[..., self.picks_, :] + X = filter_data(X_aux, self.info['sfreq'], + **self.filt_params_signal) + X_ssd = self.filters_.T @ X[..., self.picks_, :] + if X.ndim == 2: + X_ssd = X_ssd[self.sorter_spec][:self.n_components] + else: + X_ssd = X_ssd[:, self.sorter_spec, :][:, :self.n_components, :] + return X_ssd + + def get_spectral_ratio(self, ssd_sources): + """Get the spectal signal-to-noise ratio for each spatial filter. + + Spectral ratio measure for best n_components selection + See :footcite:`NikulinEtAl2011`, Eq. (24). + + Parameters + ---------- + ssd_sources : array + Data projected to SSD space. + + Returns + ------- + spec_ratio : array, shape (n_channels) + Array with the sprectal ratio value for each component. + sorter_spec : array, shape (n_channels) + Array of indices for sorting spec_ratio. + + References + ---------- + .. footbibliography:: + """ + psd, freqs = psd_array_welch( + ssd_sources, sfreq=self.info['sfreq'], n_fft=self.n_fft) + sig_idx = _time_mask(freqs, *self.freqs_signal) + noise_idx = _time_mask(freqs, *self.freqs_noise) + if psd.ndim == 3: + mean_sig = psd[:, :, sig_idx].mean(axis=2).mean(axis=0) + mean_noise = psd[:, :, noise_idx].mean(axis=2).mean(axis=0) + spec_ratio = mean_sig / mean_noise + else: + mean_sig = psd[:, sig_idx].mean(axis=1) + mean_noise = psd[:, noise_idx].mean(axis=1) + spec_ratio = mean_sig / mean_noise + sorter_spec = spec_ratio.argsort()[::-1] + return spec_ratio, sorter_spec + + def inverse_transform(self): + """Not implemented yet.""" + raise NotImplementedError('inverse_transform is not yet available.') + + def apply(self, X): + """Remove selected components from the signal. + + This procedure will reconstruct M/EEG signals from which the dynamics + described by the excluded components is subtracted + (denoised by low-rank factorization). + See :footcite:`HaufeEtAl2014b` for more information. + + .. note:: Unlike in other classes with an apply method, + only NumPy arrays are supported (not instances of MNE objects). + + Parameters + ---------- + X : array, shape ([n_epochs, ]n_channels, n_times) + The input data from which to estimate the SSD. Either 2D array + obtained from continuous data or 3D array obtained from epoched + data. + + Returns + ------- + X : array, shape ([n_epochs, ]n_channels, n_times) + The processed data. + """ + X_ssd = self.transform(X) + pick_patterns = self.patterns_[self.sorter_spec][:self.n_components].T + X = pick_patterns @ X_ssd + return X diff --git a/python/libs/mne/decoding/tests/__init__.py b/python/libs/mne/decoding/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/decoding/tests/test_base.py b/python/libs/mne/decoding/tests/test_base.py new file mode 100644 index 0000000..81d33f5 --- /dev/null +++ b/python/libs/mne/decoding/tests/test_base.py @@ -0,0 +1,427 @@ +# Author: Jean-Remi King, +# Marijn van Vliet, +# +# License: BSD-3-Clause + +import numpy as np +from numpy.testing import (assert_array_equal, assert_array_almost_equal, + assert_equal, assert_allclose, assert_array_less) +import pytest + +from mne import create_info, EpochsArray +from mne.fixes import is_regressor, is_classifier +from mne.utils import requires_sklearn, requires_version +from mne.decoding.base import (_get_inverse_funcs, LinearModel, get_coef, + cross_val_multiscore, BaseEstimator) +from mne.decoding.search_light import SlidingEstimator +from mne.decoding import (Scaler, TransformerMixin, Vectorizer, + GeneralizingEstimator) + + +def _make_data(n_samples=1000, n_features=5, n_targets=3): + """Generate some testing data. + + Parameters + ---------- + n_samples : int + The number of samples. + n_features : int + The number of features. + n_targets : int + The number of targets. + + Returns + ------- + X : ndarray, shape (n_samples, n_features) + The measured data. + Y : ndarray, shape (n_samples, n_targets) + The latent variables generating the data. + A : ndarray, shape (n_features, n_targets) + The forward model, mapping the latent variables (=Y) to the measured + data (=X). + """ + # Define Y latent factors + np.random.seed(0) + cov_Y = np.eye(n_targets) * 10 + np.random.rand(n_targets, n_targets) + cov_Y = (cov_Y + cov_Y.T) / 2. + mean_Y = np.random.rand(n_targets) + Y = np.random.multivariate_normal(mean_Y, cov_Y, size=n_samples) + + # The Forward model + A = np.random.randn(n_features, n_targets) + + X = Y.dot(A.T) + X += np.random.randn(n_samples, n_features) # add noise + X += np.random.rand(n_features) # Put an offset + + return X, Y, A + + +@requires_sklearn +def test_get_coef(): + """Test getting linear coefficients (filters/patterns) from estimators.""" + from sklearn.base import TransformerMixin, BaseEstimator + from sklearn.pipeline import make_pipeline + from sklearn.preprocessing import StandardScaler + from sklearn import svm + from sklearn.linear_model import Ridge + from sklearn.model_selection import GridSearchCV + + lm_classification = LinearModel() + assert (is_classifier(lm_classification)) + + lm_regression = LinearModel(Ridge()) + assert (is_regressor(lm_regression)) + + parameters = {'kernel': ['linear'], 'C': [1, 10]} + lm_gs_classification = LinearModel( + GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=1)) + assert (is_classifier(lm_gs_classification)) + + lm_gs_regression = LinearModel( + GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=1)) + assert (is_regressor(lm_gs_regression)) + + # Define a classifier, an invertible transformer and an non-invertible one. + + class Clf(BaseEstimator): + def fit(self, X, y): + return self + + class NoInv(TransformerMixin): + def fit(self, X, y): + return self + + def transform(self, X): + return X + + class Inv(NoInv): + def inverse_transform(self, X): + return X + + X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1) + + # I. Test inverse function + + # Check that we retrieve the right number of inverse functions even if + # there are nested pipelines + good_estimators = [ + (1, make_pipeline(Inv(), Clf())), + (2, make_pipeline(Inv(), Inv(), Clf())), + (3, make_pipeline(Inv(), make_pipeline(Inv(), Inv()), Clf())), + ] + + for expected_n, est in good_estimators: + est.fit(X, y) + assert (expected_n == len(_get_inverse_funcs(est))) + + bad_estimators = [ + Clf(), # no preprocessing + Inv(), # final estimator isn't classifier + make_pipeline(NoInv(), Clf()), # first step isn't invertible + make_pipeline(Inv(), make_pipeline( + Inv(), NoInv()), Clf()), # nested step isn't invertible + ] + for est in bad_estimators: + est.fit(X, y) + invs = _get_inverse_funcs(est) + assert_equal(invs, list()) + + # II. Test get coef for classification/regression estimators and pipelines + rng = np.random.RandomState(0) + for clf in (lm_regression, + lm_gs_classification, + make_pipeline(StandardScaler(), lm_classification), + make_pipeline(StandardScaler(), lm_gs_regression)): + + # generate some categorical/continuous data + # according to the type of estimator. + if is_classifier(clf): + n, n_features = 1000, 3 + X = rng.rand(n, n_features) + y = np.arange(n) % 2 + else: + X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1) + y = np.ravel(y) + + clf.fit(X, y) + + # Retrieve final linear model + filters = get_coef(clf, 'filters_', False) + if hasattr(clf, 'steps'): + if hasattr(clf.steps[-1][-1].model, 'best_estimator_'): + # Linear Model with GridSearchCV + coefs = clf.steps[-1][-1].model.best_estimator_.coef_ + else: + # Standard Linear Model + coefs = clf.steps[-1][-1].model.coef_ + else: + if hasattr(clf.model, 'best_estimator_'): + # Linear Model with GridSearchCV + coefs = clf.model.best_estimator_.coef_ + else: + # Standard Linear Model + coefs = clf.model.coef_ + if coefs.ndim == 2 and coefs.shape[0] == 1: + coefs = coefs[0] + assert_array_equal(filters, coefs) + patterns = get_coef(clf, 'patterns_', False) + assert (filters[0] != patterns[0]) + n_chans = X.shape[1] + assert_array_equal(filters.shape, patterns.shape, [n_chans, n_chans]) + + # Inverse transform linear model + filters_inv = get_coef(clf, 'filters_', True) + assert (filters[0] != filters_inv[0]) + patterns_inv = get_coef(clf, 'patterns_', True) + assert (patterns[0] != patterns_inv[0]) + + +class _Noop(BaseEstimator, TransformerMixin): + + def fit(self, X, y=None): + return self + + def transform(self, X): + return X.copy() + + inverse_transform = transform + + +@requires_sklearn +@pytest.mark.parametrize('inverse', (True, False)) +@pytest.mark.parametrize('Scale, kwargs', [ + (Scaler, dict(info=None, scalings='mean')), + (_Noop, dict()), +]) +def test_get_coef_inverse_transform(inverse, Scale, kwargs): + """Test get_coef with and without inverse_transform.""" + from sklearn.linear_model import Ridge + from sklearn.pipeline import make_pipeline + lm_regression = LinearModel(Ridge()) + X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1) + # Check with search_light and combination of preprocessing ending with sl: + # slider = SlidingEstimator(make_pipeline(StandardScaler(), lm_regression)) + # XXX : line above should work but does not as only last step is + # used in get_coef ... + slider = SlidingEstimator(make_pipeline(lm_regression)) + X = np.transpose([X, -X], [1, 2, 0]) # invert X across 2 time samples + clf = make_pipeline(Scale(**kwargs), slider) + clf.fit(X, y) + patterns = get_coef(clf, 'patterns_', inverse) + filters = get_coef(clf, 'filters_', inverse) + assert_array_equal(filters.shape, patterns.shape, X.shape[1:]) + # the two time samples get inverted patterns + assert_equal(patterns[0, 0], -patterns[0, 1]) + for t in [0, 1]: + filters_t = get_coef( + clf.named_steps['slidingestimator'].estimators_[t], + 'filters_', False) + if Scale is _Noop: + assert_array_equal(filters_t, filters[:, t]) + + +@requires_sklearn +@pytest.mark.parametrize('n_features', [1, 5]) +@pytest.mark.parametrize('n_targets', [1, 3]) +def test_get_coef_multiclass(n_features, n_targets): + """Test get_coef on multiclass problems.""" + # Check patterns with more than 1 regressor + from sklearn.linear_model import LinearRegression, Ridge + from sklearn.pipeline import make_pipeline + X, Y, A = _make_data( + n_samples=30000, n_features=n_features, n_targets=n_targets) + lm = LinearModel(LinearRegression()).fit(X, Y) + assert_array_equal(lm.filters_.shape, lm.patterns_.shape) + if n_targets == 1: + want_shape = (n_features,) + else: + want_shape = (n_targets, n_features) + assert_array_equal(lm.filters_.shape, want_shape) + if n_features > 1 and n_targets > 1: + assert_array_almost_equal(A, lm.patterns_.T, decimal=2) + lm = LinearModel(Ridge(alpha=0)) + clf = make_pipeline(lm) + clf.fit(X, Y) + if n_features > 1 and n_targets > 1: + assert_allclose(A, lm.patterns_.T, atol=2e-2) + coef = get_coef(clf, 'patterns_', inverse_transform=True) + assert_allclose(lm.patterns_, coef, atol=1e-5) + + # With epochs, scaler, and vectorizer (typical use case) + X_epo = X.reshape(X.shape + (1,)) + info = create_info(n_features, 1000., 'eeg') + lm = LinearModel(Ridge(alpha=1)) + clf = make_pipeline( + Scaler(info, scalings=dict(eeg=1.)), # XXX adding this step breaks + Vectorizer(), + lm, + ) + clf.fit(X_epo, Y) + if n_features > 1 and n_targets > 1: + assert_allclose(A, lm.patterns_.T, atol=2e-2) + coef = get_coef(clf, 'patterns_', inverse_transform=True) + lm_patterns_ = lm.patterns_[..., np.newaxis] + assert_allclose(lm_patterns_, coef, atol=1e-5) + + # Check can pass fitting parameters + lm.fit(X, Y, sample_weight=np.ones(len(Y))) + + +@requires_version('sklearn', '0.22') # roc_auc_ovr_weighted +@pytest.mark.parametrize('n_classes, n_channels, n_times', [ + (4, 10, 2), + (4, 3, 2), + (3, 2, 1), + (3, 1, 2), +]) +def test_get_coef_multiclass_full(n_classes, n_channels, n_times): + """Test a full example with pattern extraction.""" + from sklearn.pipeline import make_pipeline + from sklearn.linear_model import LogisticRegression + from sklearn.model_selection import StratifiedKFold + data = np.zeros((10 * n_classes, n_channels, n_times)) + # Make only the first channel informative + for ii in range(n_classes): + data[ii * 10:(ii + 1) * 10, 0] = ii + events = np.zeros((len(data), 3), int) + events[:, 0] = np.arange(len(events)) + events[:, 2] = data[:, 0, 0] + info = create_info(n_channels, 1000., 'eeg') + epochs = EpochsArray(data, info, events, tmin=0) + clf = make_pipeline( + Scaler(epochs.info), Vectorizer(), + LinearModel(LogisticRegression(random_state=0, multi_class='ovr')), + ) + scorer = 'roc_auc_ovr_weighted' + time_gen = GeneralizingEstimator(clf, scorer, verbose=True) + X = epochs.get_data() + y = epochs.events[:, 2] + n_splits = 3 + cv = StratifiedKFold(n_splits=n_splits) + scores = cross_val_multiscore(time_gen, X, y, cv=cv, verbose=True) + want = (n_splits,) + if n_times > 1: + want += (n_times, n_times) + assert scores.shape == want + assert_array_less(0.8, scores) + clf.fit(X, y) + patterns = get_coef(clf, 'patterns_', inverse_transform=True) + assert patterns.shape == (n_classes, n_channels, n_times) + assert_allclose(patterns[:, 1:], 0., atol=1e-7) # no other channels useful + + +@requires_sklearn +def test_linearmodel(): + """Test LinearModel class for computing filters and patterns.""" + # check categorical target fit in standard linear model + from sklearn.linear_model import LinearRegression + rng = np.random.RandomState(0) + clf = LinearModel() + n, n_features = 20, 3 + X = rng.rand(n, n_features) + y = np.arange(n) % 2 + clf.fit(X, y) + assert_equal(clf.filters_.shape, (n_features,)) + assert_equal(clf.patterns_.shape, (n_features,)) + with pytest.raises(ValueError): + wrong_X = rng.rand(n, n_features, 99) + clf.fit(wrong_X, y) + + # check categorical target fit in standard linear model with GridSearchCV + from sklearn import svm + from sklearn.model_selection import GridSearchCV + parameters = {'kernel': ['linear'], 'C': [1, 10]} + clf = LinearModel( + GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=1)) + clf.fit(X, y) + assert_equal(clf.filters_.shape, (n_features,)) + assert_equal(clf.patterns_.shape, (n_features,)) + with pytest.raises(ValueError): + wrong_X = rng.rand(n, n_features, 99) + clf.fit(wrong_X, y) + + # check continuous target fit in standard linear model with GridSearchCV + n_targets = 1 + Y = rng.rand(n, n_targets) + clf = LinearModel( + GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=1)) + clf.fit(X, y) + assert_equal(clf.filters_.shape, (n_features, )) + assert_equal(clf.patterns_.shape, (n_features, )) + with pytest.raises(ValueError): + wrong_y = rng.rand(n, n_features, 99) + clf.fit(X, wrong_y) + + # check multi-target fit in standard linear model + n_targets = 5 + Y = rng.rand(n, n_targets) + clf = LinearModel(LinearRegression()) + clf.fit(X, Y) + assert_equal(clf.filters_.shape, (n_targets, n_features)) + assert_equal(clf.patterns_.shape, (n_targets, n_features)) + with pytest.raises(ValueError): + wrong_y = rng.rand(n, n_features, 99) + clf.fit(X, wrong_y) + + +@requires_sklearn +def test_cross_val_multiscore(): + """Test cross_val_multiscore for computing scores on decoding over time.""" + from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score + from sklearn.linear_model import LogisticRegression, LinearRegression + + logreg = LogisticRegression(solver='liblinear', random_state=0) + + # compare to cross-val-score + X = np.random.rand(20, 3) + y = np.arange(20) % 2 + cv = KFold(2, random_state=0, shuffle=True) + clf = logreg + assert_array_equal(cross_val_score(clf, X, y, cv=cv), + cross_val_multiscore(clf, X, y, cv=cv)) + + # Test with search light + X = np.random.rand(20, 4, 3) + y = np.arange(20) % 2 + clf = SlidingEstimator(logreg, scoring='accuracy') + scores_acc = cross_val_multiscore(clf, X, y, cv=cv) + assert_array_equal(np.shape(scores_acc), [2, 3]) + + # check values + scores_acc_manual = list() + for train, test in cv.split(X, y): + clf.fit(X[train], y[train]) + scores_acc_manual.append(clf.score(X[test], y[test])) + assert_array_equal(scores_acc, scores_acc_manual) + + # check scoring metric + # raise an error if scoring is defined at cross-val-score level and + # search light, because search light does not return a 1-dimensional + # prediction. + pytest.raises(ValueError, cross_val_multiscore, clf, X, y, cv=cv, + scoring='roc_auc') + clf = SlidingEstimator(logreg, scoring='roc_auc') + scores_auc = cross_val_multiscore(clf, X, y, cv=cv, n_jobs=1) + scores_auc_manual = list() + for train, test in cv.split(X, y): + clf.fit(X[train], y[train]) + scores_auc_manual.append(clf.score(X[test], y[test])) + assert_array_equal(scores_auc, scores_auc_manual) + + # indirectly test that cross_val_multiscore rightly detects the type of + # estimator and generates a StratifiedKFold for classiers and a KFold + # otherwise + X = np.random.randn(1000, 3) + y = np.ones(1000, dtype=int) + y[::2] = 0 + clf = logreg + reg = LinearRegression() + for cross_val in (cross_val_score, cross_val_multiscore): + manual = cross_val(clf, X, y, cv=StratifiedKFold(2)) + auto = cross_val(clf, X, y, cv=2) + assert_array_equal(manual, auto) + + manual = cross_val(reg, X, y, cv=KFold(2)) + auto = cross_val(reg, X, y, cv=2) + assert_array_equal(manual, auto) diff --git a/python/libs/mne/decoding/tests/test_csp.py b/python/libs/mne/decoding/tests/test_csp.py new file mode 100644 index 0000000..f995811 --- /dev/null +++ b/python/libs/mne/decoding/tests/test_csp.py @@ -0,0 +1,358 @@ +# Author: Alexandre Gramfort +# Romain Trachel +# Alexandre Barachant +# Jean-Remi King +# +# License: BSD-3-Clause + +import os.path as op + +import numpy as np +import pytest +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_equal) + +from mne import io, Epochs, read_events, pick_types +from mne.decoding.csp import CSP, _ajd_pham, SPoC +from mne.utils import requires_sklearn + +data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') +raw_fname = op.join(data_dir, 'test_raw.fif') +event_name = op.join(data_dir, 'test-eve.fif') + +tmin, tmax = -0.2, 0.5 +event_id = dict(aud_l=1, vis_l=3) +# if stop is too small pca may fail in some cases, but we're okay on this file +start, stop = 0, 8 + + +def simulate_data(target, n_trials=100, n_channels=10, random_state=42): + """Simulate data according to an instantaneous mixin model. + + Data are simulated in the statistical source space, where one source is + modulated according to a target variable, before being mixed with a + random mixing matrix. + """ + rs = np.random.RandomState(random_state) + + # generate a orthogonal mixin matrix + mixing_mat = np.linalg.svd(rs.randn(n_channels, n_channels))[0] + + S = rs.randn(n_trials, n_channels, 50) + S[:, 0] *= np.atleast_2d(np.sqrt(target)).T + S[:, 1:] *= 0.01 # less noise + + X = np.dot(mixing_mat, S).transpose((1, 0, 2)) + + return X, mixing_mat + + +def deterministic_toy_data(classes=('class_a', 'class_b')): + """Generate a small deterministic toy data set. + + Four independent sources are modulated by the target class and mixed + into signal space. + """ + sources_a = np.array([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]], + dtype=float) * 2 - 1 + + sources_b = np.array([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]], + dtype=float) * 2 - 1 + + sources_a[0, :] *= 1 + sources_a[1, :] *= 2 + + sources_b[2, :] *= 3 + sources_b[3, :] *= 4 + + mixing = np.array([[1.0, 0.8, 0.6, 0.4], + [0.8, 1.0, 0.8, 0.6], + [0.6, 0.8, 1.0, 0.8], + [0.4, 0.6, 0.8, 1.0]]) + + x_class_a = mixing @ sources_a + x_class_b = mixing @ sources_b + + x = np.stack([x_class_a, x_class_b]) + y = np.array(classes) + + return x, y + + +@pytest.mark.slowtest +def test_csp(): + """Test Common Spatial Patterns algorithm on epochs.""" + raw = io.read_raw_fif(raw_fname, preload=False) + events = read_events(event_name) + picks = pick_types(raw.info, meg=True, stim=False, ecg=False, + eog=False, exclude='bads') + picks = picks[2:12:3] # subselect channels -> disable proj! + raw.add_proj([], remove_existing=True) + epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, + baseline=(None, 0), preload=True, proj=False) + epochs_data = epochs.get_data() + n_channels = epochs_data.shape[1] + y = epochs.events[:, -1] + + # Init + pytest.raises(ValueError, CSP, n_components='foo', norm_trace=False) + for reg in ['foo', -0.1, 1.1]: + csp = CSP(reg=reg, norm_trace=False) + pytest.raises(ValueError, csp.fit, epochs_data, epochs.events[:, -1]) + for reg in ['oas', 'ledoit_wolf', 0, 0.5, 1.]: + CSP(reg=reg, norm_trace=False) + for cov_est in ['foo', None]: + pytest.raises(ValueError, CSP, cov_est=cov_est, norm_trace=False) + with pytest.raises(TypeError, match='instance of bool'): + CSP(norm_trace='foo') + for cov_est in ['concat', 'epoch']: + CSP(cov_est=cov_est, norm_trace=False) + + n_components = 3 + # Fit + for norm_trace in [True, False]: + csp = CSP(n_components=n_components, norm_trace=norm_trace) + csp.fit(epochs_data, epochs.events[:, -1]) + + assert_equal(len(csp.mean_), n_components) + assert_equal(len(csp.std_), n_components) + + # Transform + X = csp.fit_transform(epochs_data, y) + sources = csp.transform(epochs_data) + assert (sources.shape[1] == n_components) + assert (csp.filters_.shape == (n_channels, n_channels)) + assert (csp.patterns_.shape == (n_channels, n_channels)) + assert_array_almost_equal(sources, X) + + # Test data exception + pytest.raises(ValueError, csp.fit, epochs_data, + np.zeros_like(epochs.events)) + pytest.raises(ValueError, csp.fit, epochs, y) + pytest.raises(ValueError, csp.transform, epochs) + + # Test plots + epochs.pick_types(meg='mag') + cmap = ('RdBu', True) + components = np.arange(n_components) + for plot in (csp.plot_patterns, csp.plot_filters): + plot(epochs.info, components=components, res=12, show=False, cmap=cmap) + + # Test with more than 2 classes + epochs = Epochs(raw, events, tmin=tmin, tmax=tmax, picks=picks, + event_id=dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4), + baseline=(None, 0), proj=False, preload=True) + epochs_data = epochs.get_data() + n_channels = epochs_data.shape[1] + + n_channels = epochs_data.shape[1] + for cov_est in ['concat', 'epoch']: + csp = CSP(n_components=n_components, cov_est=cov_est, norm_trace=False) + csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data) + assert_equal(len(csp._classes), 4) + assert_array_equal(csp.filters_.shape, [n_channels, n_channels]) + assert_array_equal(csp.patterns_.shape, [n_channels, n_channels]) + + # Test average power transform + n_components = 2 + assert (csp.transform_into == 'average_power') + feature_shape = [len(epochs_data), n_components] + X_trans = dict() + for log in (None, True, False): + csp = CSP(n_components=n_components, log=log, norm_trace=False) + assert (csp.log is log) + Xt = csp.fit_transform(epochs_data, epochs.events[:, 2]) + assert_array_equal(Xt.shape, feature_shape) + X_trans[str(log)] = Xt + # log=None => log=True + assert_array_almost_equal(X_trans['None'], X_trans['True']) + # Different normalization return different transform + assert (np.sum((X_trans['True'] - X_trans['False']) ** 2) > 1.) + # Check wrong inputs + pytest.raises(ValueError, CSP, transform_into='average_power', log='foo') + + # Test csp space transform + csp = CSP(transform_into='csp_space', norm_trace=False) + assert (csp.transform_into == 'csp_space') + for log in ('foo', True, False): + pytest.raises(ValueError, CSP, transform_into='csp_space', log=log, + norm_trace=False) + n_components = 2 + csp = CSP(n_components=n_components, transform_into='csp_space', + norm_trace=False) + Xt = csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data) + feature_shape = [len(epochs_data), n_components, epochs_data.shape[2]] + assert_array_equal(Xt.shape, feature_shape) + + # Check mixing matrix on simulated data + y = np.array([100] * 50 + [1] * 50) + X, A = simulate_data(y) + + for cov_est in ['concat', 'epoch']: + # fit csp + csp = CSP(n_components=1, cov_est=cov_est, norm_trace=False) + csp.fit(X, y) + + # check the first pattern match the mixing matrix + # the sign might change + corr = np.abs(np.corrcoef(csp.patterns_[0, :].T, A[:, 0])[0, 1]) + assert np.abs(corr) > 0.99 + + # check output + out = csp.transform(X) + corr = np.abs(np.corrcoef(out[:, 0], y)[0, 1]) + assert np.abs(corr) > 0.95 + + +@requires_sklearn +def test_regularized_csp(): + """Test Common Spatial Patterns algorithm using regularized covariance.""" + raw = io.read_raw_fif(raw_fname) + events = read_events(event_name) + picks = pick_types(raw.info, meg=True, stim=False, ecg=False, + eog=False, exclude='bads') + picks = picks[1:13:3] + epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, + baseline=(None, 0), preload=True) + epochs_data = epochs.get_data() + n_channels = epochs_data.shape[1] + + n_components = 3 + reg_cov = [None, 0.05, 'ledoit_wolf', 'oas'] + for reg in reg_cov: + csp = CSP(n_components=n_components, reg=reg, norm_trace=False, + rank=None) + csp.fit(epochs_data, epochs.events[:, -1]) + y = epochs.events[:, -1] + X = csp.fit_transform(epochs_data, y) + assert (csp.filters_.shape == (n_channels, n_channels)) + assert (csp.patterns_.shape == (n_channels, n_channels)) + assert_array_almost_equal(csp.fit(epochs_data, y). + transform(epochs_data), X) + + # test init exception + pytest.raises(ValueError, csp.fit, epochs_data, + np.zeros_like(epochs.events)) + pytest.raises(ValueError, csp.fit, epochs, y) + pytest.raises(ValueError, csp.transform, epochs) + + csp.n_components = n_components + sources = csp.transform(epochs_data) + assert (sources.shape[1] == n_components) + + +@requires_sklearn +def test_csp_pipeline(): + """Test if CSP works in a pipeline.""" + from sklearn.svm import SVC + from sklearn.pipeline import Pipeline + csp = CSP(reg=1, norm_trace=False) + svc = SVC() + pipe = Pipeline([("CSP", csp), ("SVC", svc)]) + pipe.set_params(CSP__reg=0.2) + assert (pipe.get_params()["CSP__reg"] == 0.2) + + +def test_ajd(): + """Test approximate joint diagonalization.""" + # The implementation shuold obtain the same + # results as the Matlab implementation by Pham Dinh-Tuan. + # Generate a set of cavariances matrices for test purpose + n_times, n_channels = 10, 3 + seed = np.random.RandomState(0) + diags = 2.0 + 0.1 * seed.randn(n_times, n_channels) + A = 2 * seed.rand(n_channels, n_channels) - 1 + A /= np.atleast_2d(np.sqrt(np.sum(A ** 2, 1))).T + covmats = np.empty((n_times, n_channels, n_channels)) + for i in range(n_times): + covmats[i] = np.dot(np.dot(A, np.diag(diags[i])), A.T) + V, D = _ajd_pham(covmats) + # Results obtained with original matlab implementation + V_matlab = [[-3.507280775058041, -5.498189967306344, 7.720624541198574], + [0.694689013234610, 0.775690358505945, -1.162043086446043], + [-0.592603135588066, -0.598996925696260, 1.009550086271192]] + assert_array_almost_equal(V, V_matlab) + + +def test_spoc(): + """Test SPoC.""" + X = np.random.randn(10, 10, 20) + y = np.random.randn(10) + + spoc = SPoC(n_components=4) + spoc.fit(X, y) + Xt = spoc.transform(X) + assert_array_equal(Xt.shape, [10, 4]) + spoc = SPoC(n_components=4, transform_into='csp_space') + spoc.fit(X, y) + Xt = spoc.transform(X) + assert_array_equal(Xt.shape, [10, 4, 20]) + assert_array_equal(spoc.filters_.shape, [10, 10]) + assert_array_equal(spoc.patterns_.shape, [10, 10]) + + # check y + pytest.raises(ValueError, spoc.fit, X, y * 0) + + # Check that doesn't take CSP-spcific input + pytest.raises(TypeError, SPoC, cov_est='epoch') + + # Check mixing matrix on simulated data + rs = np.random.RandomState(42) + y = rs.rand(100) * 50 + 1 + X, A = simulate_data(y) + + # fit spoc + spoc = SPoC(n_components=1) + spoc.fit(X, y) + + # check the first patterns match the mixing matrix + corr = np.abs(np.corrcoef(spoc.patterns_[0, :].T, A[:, 0])[0, 1]) + assert np.abs(corr) > 0.99 + + # check output + out = spoc.transform(X) + corr = np.abs(np.corrcoef(out[:, 0], y)[0, 1]) + assert np.abs(corr) > 0.85 + + +def test_csp_twoclass_symmetry(): + """Test that CSP is symmetric when swapping classes.""" + x, y = deterministic_toy_data(['class_a', 'class_b']) + csp = CSP(norm_trace=False, transform_into='average_power', log=True) + log_power = csp.fit_transform(x, y) + log_power_ratio_ab = log_power[0] - log_power[1] + + x, y = deterministic_toy_data(['class_b', 'class_a']) + csp = CSP(norm_trace=False, transform_into='average_power', log=True) + log_power = csp.fit_transform(x, y) + log_power_ratio_ba = log_power[0] - log_power[1] + + assert_array_almost_equal(log_power_ratio_ab, + log_power_ratio_ba) + + +def test_csp_component_ordering(): + """Test that CSP component ordering works as expected.""" + x, y = deterministic_toy_data(['class_a', 'class_b']) + + pytest.raises(ValueError, CSP, component_order='invalid') + + # component_order='alternate' only works with two classes + csp = CSP(component_order='alternate') + with pytest.raises(ValueError): + csp.fit(np.zeros((3, 0, 0)), ['a', 'b', 'c']) + + p_alt = CSP(component_order='alternate').fit(x, y).patterns_ + p_mut = CSP(component_order='mutual_info').fit(x, y).patterns_ + + # This permutation of p_alt and p_mut is explained by the particular + # eigenvalues of the toy data: [0.06, 0.1, 0.5, 0.8]. + # p_alt arranges them to [0.8, 0.06, 0.5, 0.1] + # p_mut arranges them to [0.06, 0.1, 0.8, 0.5] + assert_array_almost_equal(p_alt, p_mut[[2, 0, 3, 1]]) diff --git a/python/libs/mne/decoding/tests/test_ems.py b/python/libs/mne/decoding/tests/test_ems.py new file mode 100644 index 0000000..96b8902 --- /dev/null +++ b/python/libs/mne/decoding/tests/test_ems.py @@ -0,0 +1,84 @@ +# Author: Denis A. Engemann +# +# License: BSD-3-Clause + +import os.path as op +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_equal +import pytest + +from mne import io, Epochs, read_events, pick_types +from mne.utils import requires_sklearn +from mne.decoding import compute_ems, EMS + +data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') +curdir = op.join(op.dirname(__file__)) + +raw_fname = op.join(data_dir, 'test_raw.fif') +event_name = op.join(data_dir, 'test-eve.fif') + +tmin, tmax = -0.2, 0.5 +event_id = dict(aud_l=1, vis_l=3) + + +@requires_sklearn +def test_ems(): + """Test event-matched spatial filters.""" + from sklearn.model_selection import StratifiedKFold + raw = io.read_raw_fif(raw_fname, preload=False) + + # create unequal number of events + events = read_events(event_name) + events[-2, 2] = 3 + picks = pick_types(raw.info, meg=True, stim=False, ecg=False, + eog=False, exclude='bads') + picks = picks[1:13:3] + epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, + baseline=(None, 0), preload=True) + pytest.raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l']) + epochs.equalize_event_counts(epochs.event_id) + + pytest.raises(KeyError, compute_ems, epochs, ['blah', 'hahah']) + surrogates, filters, conditions = compute_ems(epochs) + assert_equal(list(set(conditions)), [1, 3]) + + events = read_events(event_name) + event_id2 = dict(aud_l=1, aud_r=2, vis_l=3) + epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks, + baseline=(None, 0), preload=True) + epochs.equalize_event_counts(epochs.event_id) + + n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']]) + + pytest.raises(ValueError, compute_ems, epochs) + surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l']) + assert_equal(n_expected, len(surrogates)) + assert_equal(n_expected, len(conditions)) + assert_equal(list(set(conditions)), [2, 3]) + + # test compute_ems cv + epochs = epochs['aud_r', 'vis_l'] + epochs.equalize_event_counts(epochs.event_id) + cv = StratifiedKFold(n_splits=3) + compute_ems(epochs, cv=cv) + compute_ems(epochs, cv=2) + pytest.raises(ValueError, compute_ems, epochs, cv='foo') + pytest.raises(ValueError, compute_ems, epochs, cv=len(epochs) + 1) + raw.close() + + # EMS transformer, check that identical to compute_ems + X = epochs.get_data() + y = epochs.events[:, 2] + X = X / np.std(X) # X scaled outside cv in compute_ems + Xt, coefs = list(), list() + ems = EMS() + assert_equal(ems.__repr__(), '') + # manual leave-one-out to avoid sklearn version problem + for test in range(len(y)): + train = np.setdiff1d(range(len(y)), np.atleast_1d(test)) + ems.fit(X[train], y[train]) + coefs.append(ems.filters_) + Xt.append(ems.transform(X[[test]])) + assert_equal(ems.__repr__(), '') + assert_array_almost_equal(filters, np.mean(coefs, axis=0)) + assert_array_almost_equal(surrogates, np.vstack(Xt)) diff --git a/python/libs/mne/decoding/tests/test_receptive_field.py b/python/libs/mne/decoding/tests/test_receptive_field.py new file mode 100644 index 0000000..98ae1c4 --- /dev/null +++ b/python/libs/mne/decoding/tests/test_receptive_field.py @@ -0,0 +1,545 @@ +# Authors: Chris Holdgraf +# +# License: BSD-3-Clause +import os.path as op + +import pytest +import numpy as np + +from numpy import einsum +from numpy.fft import rfft, irfft +from numpy.testing import assert_array_equal, assert_allclose, assert_equal + +from mne.utils import requires_sklearn +from mne.decoding import ReceptiveField, TimeDelayingRidge +from mne.decoding.receptive_field import (_delay_time_series, _SCORERS, + _times_to_delays, _delays_to_slice) +from mne.decoding.time_delaying_ridge import (_compute_reg_neighbors, + _compute_corrs) + + +data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') +raw_fname = op.join(data_dir, 'test_raw.fif') +event_name = op.join(data_dir, 'test-eve.fif') + +tmin, tmax = -0.1, 0.5 +event_id = dict(aud_l=1, vis_l=3) + +# Loading raw data +n_jobs_test = (1, 'cuda') + + +def test_compute_reg_neighbors(): + """Test fast calculation of laplacian regularizer.""" + for reg_type in ( + ('ridge', 'ridge'), + ('ridge', 'laplacian'), + ('laplacian', 'ridge'), + ('laplacian', 'laplacian')): + for n_ch_x, n_delays in ( + (1, 1), (1, 2), (2, 1), (1, 3), (3, 1), (1, 4), (4, 1), + (2, 2), (2, 3), (3, 2), (3, 3), + (2, 4), (4, 2), (3, 4), (4, 3), (4, 4), + (5, 4), (4, 5), (5, 5), + (20, 9), (9, 20)): + for normed in (True, False): + reg_direct = _compute_reg_neighbors( + n_ch_x, n_delays, reg_type, 'direct', normed=normed) + reg_csgraph = _compute_reg_neighbors( + n_ch_x, n_delays, reg_type, 'csgraph', normed=normed) + assert_allclose( + reg_direct, reg_csgraph, atol=1e-7, + err_msg='%s: %s' % (reg_type, (n_ch_x, n_delays))) + + +@requires_sklearn +def test_rank_deficiency(): + """Test signals that are rank deficient.""" + # See GH#4253 + from sklearn.linear_model import Ridge + N = 256 + fs = 1. + tmin, tmax = -50, 100 + reg = 0.1 + rng = np.random.RandomState(0) + eeg = rng.randn(N, 1) + eeg *= 100 + eeg = rfft(eeg, axis=0) + eeg[N // 4:] = 0 # rank-deficient lowpass + eeg = irfft(eeg, axis=0) + win = np.hanning(N // 8) + win /= win.mean() + y = np.apply_along_axis(np.convolve, 0, eeg, win, mode='same') + y += rng.randn(*y.shape) * 100 + + for est in (Ridge(reg), reg): + rf = ReceptiveField(tmin, tmax, fs, estimator=est, patterns=True) + rf.fit(eeg, y) + pred = rf.predict(eeg) + assert_equal(y.shape, pred.shape) + corr = np.corrcoef(y.ravel(), pred.ravel())[0, 1] + assert corr > 0.995 + + +def test_time_delay(): + """Test that time-delaying w/ times and samples works properly.""" + # Explicit delays + sfreq + X = np.random.RandomState(0).randn(1000, 2) + assert (X == 0).sum() == 0 # need this for later + test_tlims = [ + ((1, 2), 1), + ((1, 1), 1), + ((0, 2), 1), + ((0, 1), 1), + ((0, 0), 1), + ((-1, 2), 1), + ((-1, 1), 1), + ((-1, 0), 1), + ((-1, -1), 1), + ((-2, 2), 1), + ((-2, 1), 1), + ((-2, 0), 1), + ((-2, -1), 1), + ((-2, -1), 1), + ((0, .2), 10), + ((-.1, .1), 10)] + for (tmin, tmax), isfreq in test_tlims: + # sfreq must be int/float + with pytest.raises(TypeError, match='`sfreq` must be an instance of'): + _delay_time_series(X, tmin, tmax, sfreq=[1]) + # Delays must be int/float + with pytest.raises(TypeError, match='.*complex.*'): + _delay_time_series(X, np.complex128(tmin), tmax, 1) + # Make sure swapaxes works + start, stop = int(round(tmin * isfreq)), int(round(tmax * isfreq)) + 1 + n_delays = stop - start + X_delayed = _delay_time_series(X, tmin, tmax, isfreq) + assert_equal(X_delayed.shape, (1000, 2, n_delays)) + # Make sure delay slice is correct + delays = _times_to_delays(tmin, tmax, isfreq) + assert_array_equal(delays, np.arange(start, stop)) + keep = _delays_to_slice(delays) + expected = np.where((X_delayed != 0).all(-1).all(-1))[0] + got = np.arange(len(X_delayed))[keep] + assert_array_equal(got, expected) + assert X_delayed[keep].shape[-1] > 0 + assert (X_delayed[keep] == 0).sum() == 0 + + del_zero = int(round(-tmin * isfreq)) + for ii in range(-2, 3): + idx = del_zero + ii + err_msg = '[%s,%s] (%s): %s %s' % (tmin, tmax, isfreq, ii, idx) + if 0 <= idx < X_delayed.shape[-1]: + if ii == 0: + assert_array_equal(X_delayed[:, :, idx], X, + err_msg=err_msg) + elif ii < 0: # negative delay + assert_array_equal(X_delayed[:ii, :, idx], X[-ii:, :], + err_msg=err_msg) + assert_array_equal(X_delayed[ii:, :, idx], 0.) + else: + assert_array_equal(X_delayed[ii:, :, idx], X[:-ii, :], + err_msg=err_msg) + assert_array_equal(X_delayed[:ii, :, idx], 0.) + + +@pytest.mark.slowtest # slow on Azure +@pytest.mark.parametrize('n_jobs', n_jobs_test) +@requires_sklearn +def test_receptive_field_basic(n_jobs): + """Test model prep and fitting.""" + from sklearn.linear_model import Ridge + # Make sure estimator pulling works + mod = Ridge() + rng = np.random.RandomState(1337) + + # Test the receptive field model + # Define parameters for the model and simulate inputs + weights + tmin, tmax = -10., 0 + n_feats = 3 + rng = np.random.RandomState(0) + X = rng.randn(10000, n_feats) + w = rng.randn(int((tmax - tmin) + 1) * n_feats) + + # Delay inputs and cut off first 4 values since they'll be cut in the fit + X_del = np.concatenate( + _delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1) + y = np.dot(X_del, w) + + # Fit the model and test values + feature_names = ['feature_%i' % ii for ii in [0, 1, 2]] + rf = ReceptiveField(tmin, tmax, 1, feature_names, estimator=mod, + patterns=True) + rf.fit(X, y) + assert_array_equal(rf.delays_, np.arange(tmin, tmax + 1)) + + y_pred = rf.predict(X) + assert_allclose(y[rf.valid_samples_], y_pred[rf.valid_samples_], atol=1e-2) + scores = rf.score(X, y) + assert scores > .99 + assert_allclose(rf.coef_.T.ravel(), w, atol=1e-3) + # Make sure different input shapes work + rf.fit(X[:, np.newaxis:], y[:, np.newaxis]) + rf.fit(X, y[:, np.newaxis]) + with pytest.raises(ValueError, match='If X has 3 .* y must have 2 or 3'): + rf.fit(X[..., np.newaxis], y) + with pytest.raises(ValueError, match='X must be shape'): + rf.fit(X[:, 0], y) + with pytest.raises(ValueError, match='X and y do not have the same n_epo'): + rf.fit(X[:, np.newaxis], np.tile(y[:, np.newaxis, np.newaxis], + [1, 2, 1])) + with pytest.raises(ValueError, match='X and y do not have the same n_tim'): + rf.fit(X, y[:-2]) + with pytest.raises(ValueError, match='n_features in X does not match'): + rf.fit(X[:, :1], y) + # auto-naming features + feature_names = ['feature_%s' % ii for ii in [0, 1, 2]] + rf = ReceptiveField(tmin, tmax, 1, estimator=mod, + feature_names=feature_names) + assert_equal(rf.feature_names, feature_names) + rf = ReceptiveField(tmin, tmax, 1, estimator=mod) + rf.fit(X, y) + assert_equal(rf.feature_names, None) + # Float becomes ridge + rf = ReceptiveField(tmin, tmax, 1, ['one', 'two', 'three'], estimator=0) + str(rf) # repr works before fit + rf.fit(X, y) + assert isinstance(rf.estimator_, TimeDelayingRidge) + str(rf) # repr works after fit + rf = ReceptiveField(tmin, tmax, 1, ['one'], estimator=0) + rf.fit(X[:, [0]], y) + str(rf) # repr with one feature + # Should only accept estimators or floats + with pytest.raises(ValueError, match='`estimator` must be a float or'): + ReceptiveField(tmin, tmax, 1, estimator='foo').fit(X, y) + with pytest.raises(ValueError, match='`estimator` must be a float or'): + ReceptiveField(tmin, tmax, 1, estimator=np.array([1, 2, 3])).fit(X, y) + with pytest.raises(ValueError, match='tmin .* must be at most tmax'): + ReceptiveField(5, 4, 1).fit(X, y) + # scorers + for key, val in _SCORERS.items(): + rf = ReceptiveField(tmin, tmax, 1, ['one'], + estimator=0, scoring=key, patterns=True) + rf.fit(X[:, [0]], y) + y_pred = rf.predict(X[:, [0]]).T.ravel()[:, np.newaxis] + assert_allclose(val(y[:, np.newaxis], y_pred, + multioutput='raw_values'), + rf.score(X[:, [0]], y), rtol=1e-2) + with pytest.raises(ValueError, match='inputs must be shape'): + _SCORERS['corrcoef'](y.ravel(), y_pred, multioutput='raw_values') + # Need correct scorers + with pytest.raises(ValueError, match='scoring must be one of'): + ReceptiveField(tmin, tmax, 1., scoring='foo').fit(X, y) + + +@pytest.mark.parametrize('n_jobs', n_jobs_test) +def test_time_delaying_fast_calc(n_jobs): + """Test time delaying and fast calculations.""" + X = np.array([[1, 2, 3], [5, 7, 11]]).T + # all negative + smin, smax = 1, 2 + X_del = _delay_time_series(X, smin, smax, 1.) + # (n_times, n_features, n_delays) -> (n_times, n_features * n_delays) + X_del.shape = (X.shape[0], -1) + expected = np.array([[0, 1, 2], [0, 0, 1], [0, 5, 7], [0, 0, 5]]).T + assert_allclose(X_del, expected) + Xt_X = np.dot(X_del.T, X_del) + expected = [[5, 2, 19, 10], [2, 1, 7, 5], [19, 7, 74, 35], [10, 5, 35, 25]] + assert_allclose(Xt_X, expected) + x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0] + assert_allclose(x_xt, expected) + # all positive + smin, smax = -2, -1 + X_del = _delay_time_series(X, smin, smax, 1.) + X_del.shape = (X.shape[0], -1) + expected = np.array([[3, 0, 0], [2, 3, 0], [11, 0, 0], [7, 11, 0]]).T + assert_allclose(X_del, expected) + Xt_X = np.dot(X_del.T, X_del) + expected = [[9, 6, 33, 21], [6, 13, 22, 47], + [33, 22, 121, 77], [21, 47, 77, 170]] + assert_allclose(Xt_X, expected) + x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0] + assert_allclose(x_xt, expected) + # both sides + smin, smax = -1, 1 + X_del = _delay_time_series(X, smin, smax, 1.) + X_del.shape = (X.shape[0], -1) + expected = np.array([[2, 3, 0], [1, 2, 3], [0, 1, 2], + [7, 11, 0], [5, 7, 11], [0, 5, 7]]).T + assert_allclose(X_del, expected) + Xt_X = np.dot(X_del.T, X_del) + expected = [[13, 8, 3, 47, 31, 15], + [8, 14, 8, 29, 52, 31], + [3, 8, 5, 11, 29, 19], + [47, 29, 11, 170, 112, 55], + [31, 52, 29, 112, 195, 112], + [15, 31, 19, 55, 112, 74]] + assert_allclose(Xt_X, expected) + x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0] + assert_allclose(x_xt, expected) + + # slightly harder to get the non-Toeplitz correction correct + X = np.array([[1, 2, 3, 5]]).T + smin, smax = 0, 3 + X_del = _delay_time_series(X, smin, smax, 1.) + X_del.shape = (X.shape[0], -1) + expected = np.array([[1, 2, 3, 5], [0, 1, 2, 3], + [0, 0, 1, 2], [0, 0, 0, 1]]).T + assert_allclose(X_del, expected) + Xt_X = np.dot(X_del.T, X_del) + expected = [[39, 23, 13, 5], [23, 14, 8, 3], [13, 8, 5, 2], [5, 3, 2, 1]] + assert_allclose(Xt_X, expected) + x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0] + assert_allclose(x_xt, expected) + + # even worse + X = np.array([[1, 2, 3], [5, 7, 11]]).T + smin, smax = 0, 2 + X_del = _delay_time_series(X, smin, smax, 1.) + X_del.shape = (X.shape[0], -1) + expected = np.array([[1, 2, 3], [0, 1, 2], [0, 0, 1], + [5, 7, 11], [0, 5, 7], [0, 0, 5]]).T + assert_allclose(X_del, expected) + Xt_X = np.dot(X_del.T, X_del) + expected = np.array([[14, 8, 3, 52, 31, 15], + [8, 5, 2, 29, 19, 10], + [3, 2, 1, 11, 7, 5], + [52, 29, 11, 195, 112, 55], + [31, 19, 7, 112, 74, 35], + [15, 10, 5, 55, 35, 25]]) + assert_allclose(Xt_X, expected) + x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0] + assert_allclose(x_xt, expected) + + # And a bunch of random ones for good measure + rng = np.random.RandomState(0) + X = rng.randn(25, 3) + y = np.empty((25, 2)) + vals = (0, -1, 1, -2, 2, -11, 11) + for smax in vals: + for smin in vals: + if smin > smax: + continue + for ii in range(X.shape[1]): + kernel = rng.randn(smax - smin + 1) + kernel -= np.mean(kernel) + y[:, ii % y.shape[-1]] = np.convolve(X[:, ii], kernel, 'same') + x_xt, x_yt, n_ch_x, _, _ = _compute_corrs(X, y, smin, smax + 1) + X_del = _delay_time_series(X, smin, smax, 1., fill_mean=False) + x_yt_true = einsum('tfd,to->ofd', X_del, y) + x_yt_true = np.reshape(x_yt_true, (x_yt_true.shape[0], -1)).T + assert_allclose(x_yt, x_yt_true, atol=1e-7, err_msg=(smin, smax)) + X_del.shape = (X.shape[0], -1) + x_xt_true = np.dot(X_del.T, X_del).T + assert_allclose(x_xt, x_xt_true, atol=1e-7, err_msg=(smin, smax)) + + +@pytest.mark.parametrize('n_jobs', n_jobs_test) +@requires_sklearn +def test_receptive_field_1d(n_jobs): + """Test that the fast solving works like Ridge.""" + from sklearn.linear_model import Ridge + rng = np.random.RandomState(0) + x = rng.randn(500, 1) + for delay in range(-2, 3): + y = np.zeros(500) + slims = [(-2, 4)] + if delay == 0: + y[:] = x[:, 0] + elif delay < 0: + y[:delay] = x[-delay:, 0] + slims += [(-4, -1)] + else: + y[delay:] = x[:-delay, 0] + slims += [(1, 2)] + for ndim in (1, 2): + y.shape = (y.shape[0],) + (1,) * (ndim - 1) + for slim in slims: + smin, smax = slim + lap = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian', + fit_intercept=False, n_jobs=n_jobs) + for estimator in (Ridge(alpha=0.), Ridge(alpha=0.1), 0., 0.1, + lap): + for offset in (-100, 0, 100): + model = ReceptiveField(smin, smax, 1., + estimator=estimator, + n_jobs=n_jobs) + use_x = x + offset + model.fit(use_x, y) + if estimator is lap: + continue # these checks are too stringent + assert_allclose(model.estimator_.intercept_, -offset, + atol=1e-1) + assert_array_equal(model.delays_, + np.arange(smin, smax + 1)) + expected = (model.delays_ == delay).astype(float) + expected = expected[np.newaxis] # features + if y.ndim == 2: + expected = expected[np.newaxis] # outputs + assert_equal(model.coef_.ndim, ndim + 1) + assert_allclose(model.coef_, expected, atol=1e-3) + start = model.valid_samples_.start or 0 + stop = len(use_x) - (model.valid_samples_.stop or 0) + assert stop - start >= 495 + assert_allclose( + model.predict(use_x)[model.valid_samples_], + y[model.valid_samples_], atol=1e-2) + score = np.mean(model.score(use_x, y)) + assert score > 0.9999 + + +@pytest.mark.parametrize('n_jobs', n_jobs_test) +@requires_sklearn +def test_receptive_field_nd(n_jobs): + """Test multidimensional support.""" + from sklearn.linear_model import Ridge + # multidimensional + rng = np.random.RandomState(3) + x = rng.randn(1000, 3) + y = np.zeros((1000, 2)) + smin, smax = 0, 5 + # This is a weird assignment, but it's just a way to distribute some + # unique values at various delays, and "expected" explains how they + # should appear in the resulting RF + for ii in range(1, 5): + y[ii:, ii % 2] += (-1) ** ii * ii * x[:-ii, ii % 3] + y -= np.mean(y, axis=0) + x -= np.mean(x, axis=0) + x_off = x + 1e3 + expected = [ + [[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 4, 0], + [0, 0, 2, 0, 0, 0]], + [[0, 0, 0, -3, 0, 0], + [0, -1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]], + ] + tdr_l = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian', n_jobs=n_jobs) + tdr_nc = TimeDelayingRidge(smin, smax, 1., 0.1, n_jobs=n_jobs, + edge_correction=False) + for estimator, atol in zip((Ridge(alpha=0.), 0., 0.01, tdr_l, tdr_nc), + (1e-3, 1e-3, 1e-3, 5e-3, 5e-2)): + model = ReceptiveField(smin, smax, 1., + estimator=estimator) + model.fit(x, y) + assert_array_equal(model.delays_, + np.arange(smin, smax + 1)) + assert_allclose(model.coef_, expected, atol=atol) + tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type='foo', + n_jobs=n_jobs) + model = ReceptiveField(smin, smax, 1., estimator=tdr) + with pytest.raises(ValueError, match='reg_type entries must be one of'): + model.fit(x, y) + tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type=['laplacian'], + n_jobs=n_jobs) + model = ReceptiveField(smin, smax, 1., estimator=tdr) + with pytest.raises(ValueError, match='reg_type must have two elements'): + model.fit(x, y) + model = ReceptiveField(smin, smax, 1, estimator=tdr, fit_intercept=False) + with pytest.raises(ValueError, match='fit_intercept'): + model.fit(x, y) + + # Now check the intercept_ + tdr = TimeDelayingRidge(smin, smax, 1., 0., n_jobs=n_jobs) + tdr_no = TimeDelayingRidge(smin, smax, 1., 0., fit_intercept=False, + n_jobs=n_jobs) + for estimator in (Ridge(alpha=0.), tdr, + Ridge(alpha=0., fit_intercept=False), tdr_no): + # first with no intercept in the data + model = ReceptiveField(smin, smax, 1., estimator=estimator) + model.fit(x, y) + assert_allclose(model.estimator_.intercept_, 0., atol=1e-7, + err_msg=repr(estimator)) + assert_allclose(model.coef_, expected, atol=1e-3, + err_msg=repr(estimator)) + y_pred = model.predict(x) + assert_allclose(y_pred[model.valid_samples_], + y[model.valid_samples_], + atol=1e-2, err_msg=repr(estimator)) + score = np.mean(model.score(x, y)) + assert score > 0.9999 + + # now with an intercept in the data + model.fit(x_off, y) + if estimator.fit_intercept: + val = [-6000, 4000] + itol = 0.5 + ctol = 5e-4 + else: + val = itol = 0. + ctol = 2. + assert_allclose(model.estimator_.intercept_, val, atol=itol, + err_msg=repr(estimator)) + assert_allclose(model.coef_, expected, atol=ctol, rtol=ctol, + err_msg=repr(estimator)) + if estimator.fit_intercept: + ptol = 1e-2 + stol = 0.999999 + else: + ptol = 10 + stol = 0.6 + y_pred = model.predict(x_off)[model.valid_samples_] + assert_allclose(y_pred, y[model.valid_samples_], + atol=ptol, err_msg=repr(estimator)) + score = np.mean(model.score(x_off, y)) + assert score > stol, estimator + model = ReceptiveField(smin, smax, 1., fit_intercept=False) + model.fit(x_off, y) + assert_allclose(model.estimator_.intercept_, 0., atol=1e-7) + score = np.mean(model.score(x_off, y)) + assert score > 0.6 + + +def _make_data(n_feats, n_targets, n_samples, tmin, tmax): + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_feats) + w = rng.randn(int((tmax - tmin) + 1) * n_feats, n_targets) + # Delay inputs + X_del = np.concatenate( + _delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1) + y = np.dot(X_del, w) + return X, y + + +@requires_sklearn +def test_inverse_coef(): + """Test inverse coefficients computation.""" + from sklearn.linear_model import Ridge + + tmin, tmax = 0., 10. + n_feats, n_targets, n_samples = 3, 2, 1000 + n_delays = int((tmax - tmin) + 1) + + # Check coefficient dims, for all estimator types + X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax) + tdr = TimeDelayingRidge(tmin, tmax, 1., 0.1, 'laplacian') + for estimator in (0., 0.01, Ridge(alpha=0.), tdr): + rf = ReceptiveField(tmin, tmax, 1., estimator=estimator, + patterns=True) + rf.fit(X, y) + inv_rf = ReceptiveField(tmin, tmax, 1., estimator=estimator, + patterns=True) + inv_rf.fit(y, X) + + assert_array_equal(rf.coef_.shape, rf.patterns_.shape, + (n_targets, n_feats, n_delays)) + assert_array_equal(inv_rf.coef_.shape, inv_rf.patterns_.shape, + (n_feats, n_targets, n_delays)) + + # we should have np.dot(patterns.T,coef) ~ np.eye(n) + c0 = rf.coef_.reshape(n_targets, n_feats * n_delays) + c1 = rf.patterns_.reshape(n_targets, n_feats * n_delays) + assert_allclose(np.dot(c0, c1.T), np.eye(c0.shape[0]), atol=0.2) + + +@requires_sklearn +def test_linalg_warning(): + """Test that warnings are issued when no regularization is applied.""" + from sklearn.linear_model import Ridge + n_feats, n_targets, n_samples = 5, 60, 50 + X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax) + for estimator in (0., Ridge(alpha=0.)): + rf = ReceptiveField(tmin, tmax, 1., estimator=estimator) + with pytest.warns((RuntimeWarning, UserWarning), + match='[Singular|scipy.linalg.solve]'): + rf.fit(y, X) diff --git a/python/libs/mne/decoding/tests/test_search_light.py b/python/libs/mne/decoding/tests/test_search_light.py new file mode 100644 index 0000000..1891248 --- /dev/null +++ b/python/libs/mne/decoding/tests/test_search_light.py @@ -0,0 +1,293 @@ +# Author: Jean-Remi King, +# +# License: BSD-3-Clause + +import numpy as np +from numpy.testing import assert_array_equal, assert_equal +import pytest + +from mne.utils import requires_sklearn, _record_warnings +from mne.fixes import _get_args +from mne.decoding.search_light import SlidingEstimator, GeneralizingEstimator +from mne.decoding.transformer import Vectorizer + + +def make_data(): + """Make data.""" + n_epochs, n_chan, n_time = 50, 32, 10 + X = np.random.rand(n_epochs, n_chan, n_time) + y = np.arange(n_epochs) % 2 + for ii in range(n_time): + coef = np.random.randn(n_chan) + X[y == 0, :, ii] += coef + X[y == 1, :, ii] -= coef + return X, y + + +@requires_sklearn +def test_search_light(): + """Test SlidingEstimator.""" + from sklearn.linear_model import Ridge, LogisticRegression + from sklearn.pipeline import make_pipeline + from sklearn.metrics import roc_auc_score, make_scorer + with _record_warnings(): # NumPy module import + from sklearn.ensemble import BaggingClassifier + from sklearn.base import is_classifier + + logreg = LogisticRegression(solver='liblinear', multi_class='ovr', + random_state=0) + + X, y = make_data() + n_epochs, _, n_time = X.shape + # init + pytest.raises(ValueError, SlidingEstimator, 'foo') + sl = SlidingEstimator(Ridge()) + assert (not is_classifier(sl)) + sl = SlidingEstimator(LogisticRegression(solver='liblinear')) + assert (is_classifier(sl)) + # fit + assert_equal(sl.__repr__()[:18], '') + pytest.raises(ValueError, sl.fit, X[1:], y) + pytest.raises(ValueError, sl.fit, X[:, :, 0], y) + sl.fit(X, y, sample_weight=np.ones_like(y)) + + # transforms + pytest.raises(ValueError, sl.predict, X[:, :, :2]) + y_pred = sl.predict(X) + assert (y_pred.dtype == int) + assert_array_equal(y_pred.shape, [n_epochs, n_time]) + y_proba = sl.predict_proba(X) + assert (y_proba.dtype == float) + assert_array_equal(y_proba.shape, [n_epochs, n_time, 2]) + + # score + score = sl.score(X, y) + assert_array_equal(score.shape, [n_time]) + assert (np.sum(np.abs(score)) != 0) + assert (score.dtype == float) + + sl = SlidingEstimator(logreg) + assert_equal(sl.scoring, None) + + # Scoring method + for scoring in ['foo', 999]: + sl = SlidingEstimator(logreg, scoring=scoring) + sl.fit(X, y) + pytest.raises((ValueError, TypeError), sl.score, X, y) + + # Check sklearn's roc_auc fix: scikit-learn/scikit-learn#6874 + # -- 3 class problem + sl = SlidingEstimator(logreg, scoring='roc_auc') + y = np.arange(len(X)) % 3 + sl.fit(X, y) + with pytest.raises(ValueError, match='for two-class'): + sl.score(X, y) + # But check that valid ones should work with new enough sklearn + if 'multi_class' in _get_args(roc_auc_score): + scoring = make_scorer( + roc_auc_score, needs_proba=True, multi_class='ovo') + sl = SlidingEstimator(logreg, scoring=scoring) + sl.fit(X, y) + sl.score(X, y) # smoke test + + # -- 2 class problem not in [0, 1] + y = np.arange(len(X)) % 2 + 1 + sl.fit(X, y) + score = sl.score(X, y) + assert_array_equal(score, [roc_auc_score(y - 1, _y_pred - 1) + for _y_pred in sl.decision_function(X).T]) + y = np.arange(len(X)) % 2 + + # Cannot pass a metric as a scoring parameter + sl1 = SlidingEstimator(logreg, scoring=roc_auc_score) + sl1.fit(X, y) + pytest.raises(ValueError, sl1.score, X, y) + + # Now use string as scoring + sl1 = SlidingEstimator(logreg, scoring='roc_auc') + sl1.fit(X, y) + rng = np.random.RandomState(0) + X = rng.randn(*X.shape) # randomize X to avoid AUCs in [0, 1] + score_sl = sl1.score(X, y) + assert_array_equal(score_sl.shape, [n_time]) + assert (score_sl.dtype == float) + + # Check that scoring was applied adequately + scoring = make_scorer(roc_auc_score, needs_threshold=True) + score_manual = [scoring(est, x, y) for est, x in zip( + sl1.estimators_, X.transpose(2, 0, 1))] + assert_array_equal(score_manual, score_sl) + + # n_jobs + sl = SlidingEstimator(logreg, n_jobs=1, scoring='roc_auc') + score_1job = sl.fit(X, y).score(X, y) + sl.n_jobs = 2 + score_njobs = sl.fit(X, y).score(X, y) + assert_array_equal(score_1job, score_njobs) + sl.predict(X) + + # n_jobs > n_estimators + sl.fit(X[..., [0]], y) + sl.predict(X[..., [0]]) + + # pipeline + + class _LogRegTransformer(LogisticRegression): + # XXX needs transformer in pipeline to get first proba only + def __init__(self): + super(_LogRegTransformer, self).__init__() + self.multi_class = 'ovr' + self.random_state = 0 + self.solver = 'liblinear' + + def transform(self, X): + return super(_LogRegTransformer, self).predict_proba(X)[..., 1] + + pipe = make_pipeline(SlidingEstimator(_LogRegTransformer()), + logreg) + pipe.fit(X, y) + pipe.predict(X) + + # n-dimensional feature space + X = np.random.rand(10, 3, 4, 2) + y = np.arange(10) % 2 + y_preds = list() + for n_jobs in [1, 2]: + pipe = SlidingEstimator( + make_pipeline(Vectorizer(), logreg), n_jobs=n_jobs) + y_preds.append(pipe.fit(X, y).predict(X)) + features_shape = pipe.estimators_[0].steps[0][1].features_shape_ + assert_array_equal(features_shape, [3, 4]) + assert_array_equal(y_preds[0], y_preds[1]) + + # Bagging classifiers + X = np.random.rand(10, 3, 4) + for n_jobs in (1, 2): + pipe = SlidingEstimator(BaggingClassifier(None, 2), n_jobs=n_jobs) + pipe.fit(X, y) + pipe.score(X, y) + assert (isinstance(pipe.estimators_[0], BaggingClassifier)) + + +@requires_sklearn +def test_generalization_light(): + """Test GeneralizingEstimator.""" + from sklearn.pipeline import make_pipeline + from sklearn.linear_model import LogisticRegression + from sklearn.metrics import roc_auc_score + + logreg = LogisticRegression(solver='liblinear', multi_class='ovr', + random_state=0) + + X, y = make_data() + n_epochs, _, n_time = X.shape + # fit + gl = GeneralizingEstimator(logreg) + assert_equal(repr(gl)[:23], '') + # transforms + y_pred = gl.predict(X) + assert_array_equal(y_pred.shape, [n_epochs, n_time, n_time]) + assert (y_pred.dtype == int) + y_proba = gl.predict_proba(X) + assert (y_proba.dtype == float) + assert_array_equal(y_proba.shape, [n_epochs, n_time, n_time, 2]) + + # transform to different datasize + y_pred = gl.predict(X[:, :, :2]) + assert_array_equal(y_pred.shape, [n_epochs, n_time, 2]) + + # score + score = gl.score(X[:, :, :3], y) + assert_array_equal(score.shape, [n_time, 3]) + assert (np.sum(np.abs(score)) != 0) + assert (score.dtype == float) + + gl = GeneralizingEstimator(logreg, scoring='roc_auc') + gl.fit(X, y) + score = gl.score(X, y) + auc = roc_auc_score(y, gl.estimators_[0].predict_proba(X[..., 0])[..., 1]) + assert_equal(score[0, 0], auc) + + for scoring in ['foo', 999]: + gl = GeneralizingEstimator(logreg, scoring=scoring) + gl.fit(X, y) + pytest.raises((ValueError, TypeError), gl.score, X, y) + + # Check sklearn's roc_auc fix: scikit-learn/scikit-learn#6874 + # -- 3 class problem + gl = GeneralizingEstimator(logreg, scoring='roc_auc') + y = np.arange(len(X)) % 3 + gl.fit(X, y) + pytest.raises(ValueError, gl.score, X, y) + # -- 2 class problem not in [0, 1] + y = np.arange(len(X)) % 2 + 1 + gl.fit(X, y) + score = gl.score(X, y) + manual_score = [[roc_auc_score(y - 1, _y_pred) for _y_pred in _y_preds] + for _y_preds in gl.decision_function(X).transpose(1, 2, 0)] + assert_array_equal(score, manual_score) + + # n_jobs + gl = GeneralizingEstimator(logreg, n_jobs=2) + gl.fit(X, y) + y_pred = gl.predict(X) + assert_array_equal(y_pred.shape, [n_epochs, n_time, n_time]) + score = gl.score(X, y) + assert_array_equal(score.shape, [n_time, n_time]) + + # n_jobs > n_estimators + gl.fit(X[..., [0]], y) + gl.predict(X[..., [0]]) + + # n-dimensional feature space + X = np.random.rand(10, 3, 4, 2) + y = np.arange(10) % 2 + y_preds = list() + for n_jobs in [1, 2]: + pipe = GeneralizingEstimator( + make_pipeline(Vectorizer(), logreg), n_jobs=n_jobs) + y_preds.append(pipe.fit(X, y).predict(X)) + features_shape = pipe.estimators_[0].steps[0][1].features_shape_ + assert_array_equal(features_shape, [3, 4]) + assert_array_equal(y_preds[0], y_preds[1]) + + +@requires_sklearn +def test_cross_val_predict(): + """Test cross_val_predict with predict_proba.""" + from sklearn.linear_model import LinearRegression + from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + from sklearn.base import BaseEstimator, clone + from sklearn.model_selection import cross_val_predict + rng = np.random.RandomState(42) + X = rng.randn(10, 1, 3) + y = rng.randint(0, 2, 10) + + estimator = SlidingEstimator(LinearRegression()) + cross_val_predict(estimator, X, y, cv=2) + + class Classifier(BaseEstimator): + """Moch class that does not have classes_ attribute.""" + + def __init__(self): + self.base_estimator = LinearDiscriminantAnalysis() + + def fit(self, X, y): + self.estimator_ = clone(self.base_estimator).fit(X, y) + return self + + def predict_proba(self, X): + return self.estimator_.predict_proba(X) + + with pytest.raises(AttributeError, match="classes_ attribute"): + estimator = SlidingEstimator(Classifier()) + cross_val_predict(estimator, X, y, method='predict_proba', cv=2) + + estimator = SlidingEstimator(LinearDiscriminantAnalysis()) + cross_val_predict(estimator, X, y, method='predict_proba', cv=2) diff --git a/python/libs/mne/decoding/tests/test_ssd.py b/python/libs/mne/decoding/tests/test_ssd.py new file mode 100644 index 0000000..6f671c6 --- /dev/null +++ b/python/libs/mne/decoding/tests/test_ssd.py @@ -0,0 +1,324 @@ +# Author: Denis A. Engemann +# Victoria Peterson +# License: BSD-3-Clause + +import numpy as np +import pytest +from numpy.testing import (assert_array_almost_equal, assert_array_equal) +from mne import io +from mne.time_frequency import psd_array_welch +from mne.decoding.ssd import SSD +from mne.utils import requires_sklearn +from mne.filter import filter_data +from mne import create_info +from mne.decoding import CSP + +freqs_sig = 9, 12 +freqs_noise = 8, 13 + + +def simulate_data(freqs_sig=[9, 12], n_trials=100, n_channels=20, + n_samples=500, samples_per_second=250, + n_components=5, SNR=0.05, random_state=42): + """Simulate data according to an instantaneous mixin model. + + Data are simulated in the statistical source space, where n=n_components + sources contain the peak of interest. + """ + rng = np.random.RandomState(random_state) + + filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], + l_trans_bandwidth=1, h_trans_bandwidth=1, + fir_design='firwin') + + # generate an orthogonal mixin matrix + mixing_mat = np.linalg.svd(rng.randn(n_channels, n_channels))[0] + # define sources + S_s = rng.randn(n_trials * n_samples, n_components) + # filter source in the specific freq. band of interest + S_s = filter_data(S_s.T, samples_per_second, **filt_params_signal).T + S_n = rng.randn(n_trials * n_samples, n_channels - n_components) + S = np.hstack((S_s, S_n)) + # mix data + X_s = np.dot(mixing_mat[:, :n_components], S_s.T).T + X_n = np.dot(mixing_mat[:, n_components:], S_n.T).T + # add noise + X_s = X_s / np.linalg.norm(X_s, 'fro') + X_n = X_n / np.linalg.norm(X_n, 'fro') + X = SNR * X_s + (1 - SNR) * X_n + X = X.T + S = S.T + return X, mixing_mat, S + + +@pytest.mark.slowtest +def test_ssd(): + """Test Common Spatial Patterns algorithm on raw data.""" + X, A, S = simulate_data() + sf = 250 + n_channels = X.shape[0] + info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg') + n_components_true = 5 + + # Init + filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + ssd = SSD(info, filt_params_signal, filt_params_noise) + # freq no int + freq = 'foo' + filt_params_signal = dict(l_freq=freq, h_freq=freqs_sig[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + with pytest.raises(TypeError, match='must be an instance '): + ssd = SSD(info, filt_params_signal, filt_params_noise) + + # Wrongly specified noise band + freq = 2 + filt_params_signal = dict(l_freq=freq, h_freq=freqs_sig[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + with pytest.raises(ValueError, match='Wrongly specified '): + ssd = SSD(info, filt_params_signal, filt_params_noise) + + # filt param no dict + filt_params_signal = freqs_sig + filt_params_noise = freqs_noise + with pytest.raises(ValueError, match='must be defined'): + ssd = SSD(info, filt_params_signal, filt_params_noise) + + # Data type + filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + ssd = SSD(info, filt_params_signal, filt_params_noise) + raw = io.RawArray(X, info) + + pytest.raises(TypeError, ssd.fit, raw) + + # check non-boolean return_filtered + with pytest.raises(ValueError, match='return_filtered'): + ssd = SSD(info, filt_params_signal, filt_params_noise, + return_filtered=0) + + # check non-boolean sort_by_spectral_ratio + with pytest.raises(ValueError, match='sort_by_spectral_ratio'): + ssd = SSD(info, filt_params_signal, filt_params_noise, + sort_by_spectral_ratio=0) + + # More than 1 channel type + ch_types = np.reshape([['mag'] * 10, ['eeg'] * 10], n_channels) + info_2 = create_info(ch_names=n_channels, sfreq=sf, ch_types=ch_types) + + with pytest.raises(ValueError, match='At this point SSD'): + ssd = SSD(info_2, filt_params_signal, filt_params_noise) + + # Number of channels + info_3 = create_info(ch_names=n_channels + 1, sfreq=sf, ch_types='eeg') + ssd = SSD(info_3, filt_params_signal, filt_params_noise) + pytest.raises(ValueError, ssd.fit, X) + + # Fit + n_components = 10 + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=n_components) + + # Call transform before fit + pytest.raises(AttributeError, ssd.transform, X) + + # Check outputs + ssd.fit(X) + + assert (ssd.filters_.shape == (n_channels, n_channels)) + assert (ssd.patterns_.shape == (n_channels, n_channels)) + + # Transform + X_ssd = ssd.fit_transform(X) + assert (X_ssd.shape[0] == n_components) + # back and forward + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=None, sort_by_spectral_ratio=False) + ssd.fit(X) + X_denoised = ssd.apply(X) + assert_array_almost_equal(X_denoised, X) + # denoised by low-rank-factorization + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=n_components, sort_by_spectral_ratio=True) + ssd.fit(X) + X_denoised = ssd.apply(X) + assert (np.linalg.matrix_rank(X_denoised) == n_components) + + # Power ratio ordering + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=None, sort_by_spectral_ratio=False) + ssd.fit(X) + spec_ratio, sorter_spec = ssd.get_spectral_ratio(ssd.transform(X)) + # since we now that the number of true components is 5, the relative + # difference should be low for the first 5 components and then increases + index_diff = np.argmax(-np.diff(spec_ratio)) + assert index_diff == n_components_true - 1 + # Check detected peaks + # fit ssd + n_components = n_components_true + filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=n_components, sort_by_spectral_ratio=False) + ssd.fit(X) + + out = ssd.transform(X) + psd_out, _ = psd_array_welch(out[0], sfreq=250, n_fft=250) + psd_S, _ = psd_array_welch(S[0], sfreq=250, n_fft=250) + corr = np.abs(np.corrcoef((psd_out, psd_S))[0, 1]) + assert np.abs(corr) > 0.95 + # Check pattern estimation + # Since there is no exact ordering of the recovered patterns + # a pair-wise greedy search will be done + error = list() + for ii in range(n_channels): + corr = np.abs(np.corrcoef(ssd.patterns_[ii, :].T, A[:, 0])[0, 1]) + error.append(1 - corr) + min_err = np.min(error) + assert min_err < 0.3 # threshold taken from SSD original paper + + +def test_ssd_epoched_data(): + """Test Common Spatial Patterns algorithm on epoched data. + + Compare the outputs when raw data is used. + """ + X, A, S = simulate_data(n_trials=100, n_channels=20, n_samples=500) + sf = 250 + n_channels = X.shape[0] + info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg') + n_components_true = 5 + + # Build epochs as sliding windows over the continuous raw file + + # Epoch length is 1 second + X_e = np.reshape(X, (100, 20, 500)) + + # Fit + filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], + l_trans_bandwidth=4, h_trans_bandwidth=4) + filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], + l_trans_bandwidth=4, h_trans_bandwidth=4) + + # ssd on epochs + ssd_e = SSD(info, filt_params_signal, filt_params_noise) + ssd_e.fit(X_e) + # ssd on raw + ssd = SSD(info, filt_params_signal, filt_params_noise) + ssd.fit(X) + + # Check if the 5 first 5 components are the same for both + _, sorter_spec_e = ssd_e.get_spectral_ratio(ssd_e.transform(X_e)) + _, sorter_spec = ssd.get_spectral_ratio(ssd.transform(X)) + assert_array_equal(sorter_spec_e[:n_components_true], + sorter_spec[:n_components_true]) + + +@requires_sklearn +def test_ssd_pipeline(): + """Test if SSD works in a pipeline.""" + from sklearn.pipeline import Pipeline + sf = 250 + X, A, S = simulate_data(n_trials=100, n_channels=20, n_samples=500) + X_e = np.reshape(X, (100, 20, 500)) + # define bynary random output + y = np.random.randint(2, size=100) + + info = create_info(ch_names=20, sfreq=sf, ch_types='eeg') + + filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], + l_trans_bandwidth=4, h_trans_bandwidth=4) + filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], + l_trans_bandwidth=4, h_trans_bandwidth=4) + ssd = SSD(info, filt_params_signal, filt_params_noise) + csp = CSP() + pipe = Pipeline([('SSD', ssd), ('CSP', csp)]) + pipe.set_params(SSD__n_components=5) + pipe.set_params(CSP__n_components=2) + out = pipe.fit_transform(X_e, y) + assert (out.shape == (100, 2)) + assert (pipe.get_params()['SSD__n_components'] == 5) + + +def test_sorting(): + """Test sorting learning during training.""" + X, _, _ = simulate_data(n_trials=100, n_channels=20, n_samples=500) + # Epoch length is 1 second + X = np.reshape(X, (100, 20, 500)) + # split data + Xtr, Xte = X[:80], X[80:] + sf = 250 + n_channels = Xtr.shape[1] + info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg') + + filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], + l_trans_bandwidth=4, h_trans_bandwidth=4) + filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], + l_trans_bandwidth=4, h_trans_bandwidth=4) + + # check sort_by_spectral_ratio set to False + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=None, sort_by_spectral_ratio=False) + ssd.fit(Xtr) + _, sorter_tr = ssd.get_spectral_ratio(ssd.transform(Xtr)) + _, sorter_te = ssd.get_spectral_ratio(ssd.transform(Xte)) + assert any(sorter_tr != sorter_te) + + # check sort_by_spectral_ratio set to True + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=None, sort_by_spectral_ratio=True) + ssd.fit(Xtr) + + # check sorters + sorter_in = ssd.sorter_spec + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=None, sort_by_spectral_ratio=False) + ssd.fit(Xtr) + _, sorter_out = ssd.get_spectral_ratio(ssd.transform(Xtr)) + + assert all(sorter_in == sorter_out) + + +def test_return_filtered(): + """Test return filtered option.""" + # Check return_filtered + # Simulated more noise data and with broader frequency than the desired + X, _, _ = simulate_data(SNR=0.9, freqs_sig=[4, 13]) + sf = 250 + n_channels = X.shape[0] + info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg') + + filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + + # return filtered to true + ssd = SSD(info, filt_params_signal, filt_params_noise, + sort_by_spectral_ratio=False, return_filtered=True) + ssd.fit(X) + + out = ssd.transform(X) + psd_out, freqs = psd_array_welch(out[0], sfreq=250, n_fft=250) + freqs_up = int(freqs[psd_out > 0.5][0]), int(freqs[psd_out > 0.5][-1]) + assert (freqs_up == freqs_sig) + + # return filtered to false + ssd = SSD(info, filt_params_signal, filt_params_noise, + sort_by_spectral_ratio=False, return_filtered=False) + ssd.fit(X) + + out = ssd.transform(X) + psd_out, freqs = psd_array_welch(out[0], sfreq=250, n_fft=250) + freqs_up = int(freqs[psd_out > 0.5][0]), int(freqs[psd_out > 0.5][-1]) + assert (freqs_up != freqs_sig) diff --git a/python/libs/mne/decoding/tests/test_time_frequency.py b/python/libs/mne/decoding/tests/test_time_frequency.py new file mode 100644 index 0000000..a482627 --- /dev/null +++ b/python/libs/mne/decoding/tests/test_time_frequency.py @@ -0,0 +1,43 @@ +# Author: Jean-Remi King, +# +# License: BSD-3-Clause + + +import numpy as np +from numpy.testing import assert_array_equal +import pytest + +from mne.utils import requires_sklearn +from mne.decoding.time_frequency import TimeFrequency + + +@requires_sklearn +def test_timefrequency(): + """Test TimeFrequency.""" + from sklearn.base import clone + # Init + n_freqs = 3 + freqs = [20, 21, 22] + tf = TimeFrequency(freqs, sfreq=100) + for output in ['avg_power', 'foo', None]: + pytest.raises(ValueError, TimeFrequency, freqs, output=output) + tf = clone(tf) + + # Fit + n_epochs, n_chans, n_times = 10, 2, 100 + X = np.random.rand(n_epochs, n_chans, n_times) + tf.fit(X, None) + + # Transform + tf = TimeFrequency(freqs, sfreq=100) + tf.fit_transform(X, None) + # 3-D X + Xt = tf.transform(X) + assert_array_equal(Xt.shape, [n_epochs, n_chans, n_freqs, n_times]) + # 2-D X + Xt = tf.transform(X[:, 0, :]) + assert_array_equal(Xt.shape, [n_epochs, n_freqs, n_times]) + # 3-D with decim + tf = TimeFrequency(freqs, sfreq=100, decim=2) + Xt = tf.transform(X) + assert_array_equal(Xt.shape, [n_epochs, n_chans, n_freqs, n_times // 2]) diff --git a/python/libs/mne/decoding/tests/test_transformer.py b/python/libs/mne/decoding/tests/test_transformer.py new file mode 100644 index 0000000..ed92120 --- /dev/null +++ b/python/libs/mne/decoding/tests/test_transformer.py @@ -0,0 +1,242 @@ +# Author: Mainak Jas +# Romain Trachel +# +# License: BSD-3-Clause + +import os.path as op +import numpy as np + +import pytest +from numpy.testing import (assert_array_equal, assert_array_almost_equal, + assert_allclose, assert_equal) + +from mne import io, read_events, Epochs, pick_types +from mne.decoding import (Scaler, FilterEstimator, PSDEstimator, Vectorizer, + UnsupervisedSpatialFilter, TemporalFilter) +from mne.defaults import DEFAULTS +from mne.utils import requires_sklearn, check_version + +tmin, tmax = -0.2, 0.5 +event_id = dict(aud_l=1, vis_l=3) +start, stop = 0, 8 + +data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') +raw_fname = op.join(data_dir, 'test_raw.fif') +event_name = op.join(data_dir, 'test-eve.fif') + + +@pytest.mark.parametrize('info, method', [ + (True, None), + (True, dict(mag=5, grad=10, eeg=20)), + (False, 'mean'), + (False, 'median'), +]) +def test_scaler(info, method): + """Test methods of Scaler.""" + raw = io.read_raw_fif(raw_fname) + events = read_events(event_name) + picks = pick_types(raw.info, meg=True, stim=False, ecg=False, + eog=False, exclude='bads') + picks = picks[1:13:3] + + epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, + baseline=(None, 0), preload=True) + epochs_data = epochs.get_data() + y = epochs.events[:, -1] + + epochs_data_t = epochs_data.transpose([1, 0, 2]) + if method in ('mean', 'median'): + if not check_version('sklearn'): + with pytest.raises(ImportError, match='No module'): + Scaler(info, method) + return + + if info: + info = epochs.info + scaler = Scaler(info, method) + X = scaler.fit_transform(epochs_data, y) + assert_equal(X.shape, epochs_data.shape) + if method is None or isinstance(method, dict): + sd = DEFAULTS['scalings'] if method is None else method + stds = np.zeros(len(picks)) + for key in ('mag', 'grad'): + stds[pick_types(epochs.info, meg=key)] = 1. / sd[key] + stds[pick_types(epochs.info, meg=False, eeg=True)] = 1. / sd['eeg'] + means = np.zeros(len(epochs.ch_names)) + elif method == 'mean': + stds = np.array([np.std(ch_data) for ch_data in epochs_data_t]) + means = np.array([np.mean(ch_data) for ch_data in epochs_data_t]) + else: # median + percs = np.array([np.percentile(ch_data, [25, 50, 75]) + for ch_data in epochs_data_t]) + stds = percs[:, 2] - percs[:, 0] + means = percs[:, 1] + assert_allclose(X * stds[:, np.newaxis] + means[:, np.newaxis], + epochs_data, rtol=1e-12, atol=1e-20, err_msg=method) + + X2 = scaler.fit(epochs_data, y).transform(epochs_data) + assert_array_equal(X, X2) + + # inverse_transform + Xi = scaler.inverse_transform(X) + assert_array_almost_equal(epochs_data, Xi) + + # Test init exception + pytest.raises(ValueError, Scaler, None, None) + pytest.raises(TypeError, scaler.fit, epochs, y) + pytest.raises(TypeError, scaler.transform, epochs) + epochs_bad = Epochs(raw, events, event_id, 0, 0.01, baseline=None, + picks=np.arange(len(raw.ch_names))) # non-data chs + scaler = Scaler(epochs_bad.info, None) + pytest.raises(ValueError, scaler.fit, epochs_bad.get_data(), y) + + +def test_filterestimator(): + """Test methods of FilterEstimator.""" + raw = io.read_raw_fif(raw_fname) + events = read_events(event_name) + picks = pick_types(raw.info, meg=True, stim=False, ecg=False, + eog=False, exclude='bads') + picks = picks[1:13:3] + epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, + baseline=(None, 0), preload=True) + epochs_data = epochs.get_data() + + # Add tests for different combinations of l_freq and h_freq + filt = FilterEstimator(epochs.info, l_freq=40, h_freq=80) + y = epochs.events[:, -1] + X = filt.fit_transform(epochs_data, y) + assert (X.shape == epochs_data.shape) + assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X) + + filt = FilterEstimator(epochs.info, l_freq=None, h_freq=40, + filter_length='auto', + l_trans_bandwidth='auto', h_trans_bandwidth='auto') + y = epochs.events[:, -1] + X = filt.fit_transform(epochs_data, y) + + filt = FilterEstimator(epochs.info, l_freq=1, h_freq=1) + y = epochs.events[:, -1] + with pytest.warns(RuntimeWarning, match='longer than the signal'): + pytest.raises(ValueError, filt.fit_transform, epochs_data, y) + + filt = FilterEstimator(epochs.info, l_freq=40, h_freq=None, + filter_length='auto', + l_trans_bandwidth='auto', h_trans_bandwidth='auto') + X = filt.fit_transform(epochs_data, y) + + # Test init exception + pytest.raises(ValueError, filt.fit, epochs, y) + pytest.raises(ValueError, filt.transform, epochs) + + +def test_psdestimator(): + """Test methods of PSDEstimator.""" + raw = io.read_raw_fif(raw_fname) + events = read_events(event_name) + picks = pick_types(raw.info, meg=True, stim=False, ecg=False, + eog=False, exclude='bads') + picks = picks[1:13:3] + epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, + baseline=(None, 0), preload=True) + epochs_data = epochs.get_data() + psd = PSDEstimator(2 * np.pi, 0, np.inf) + y = epochs.events[:, -1] + X = psd.fit_transform(epochs_data, y) + + assert (X.shape[0] == epochs_data.shape[0]) + assert_array_equal(psd.fit(epochs_data, y).transform(epochs_data), X) + + # Test init exception + pytest.raises(ValueError, psd.fit, epochs, y) + pytest.raises(ValueError, psd.transform, epochs) + + +def test_vectorizer(): + """Test Vectorizer.""" + data = np.random.rand(150, 18, 6) + vect = Vectorizer() + result = vect.fit_transform(data) + assert_equal(result.ndim, 2) + + # check inverse_trasnform + orig_data = vect.inverse_transform(result) + assert_equal(orig_data.ndim, 3) + assert_array_equal(orig_data, data) + assert_array_equal(vect.inverse_transform(result[1:]), data[1:]) + + # check with different shape + assert_equal(vect.fit_transform(np.random.rand(150, 18, 6, 3)).shape, + (150, 324)) + assert_equal(vect.fit_transform(data[1:]).shape, (149, 108)) + + # check if raised errors are working correctly + vect.fit(np.random.rand(105, 12, 3)) + pytest.raises(ValueError, vect.transform, np.random.rand(105, 12, 3, 1)) + pytest.raises(ValueError, vect.inverse_transform, + np.random.rand(102, 12, 12)) + + +@requires_sklearn +def test_unsupervised_spatial_filter(): + """Test unsupervised spatial filter.""" + from sklearn.decomposition import PCA + from sklearn.kernel_ridge import KernelRidge + raw = io.read_raw_fif(raw_fname) + events = read_events(event_name) + picks = pick_types(raw.info, meg=True, stim=False, ecg=False, + eog=False, exclude='bads') + picks = picks[1:13:3] + epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, + preload=True, baseline=None, verbose=False) + + # Test estimator + pytest.raises(ValueError, UnsupervisedSpatialFilter, KernelRidge(2)) + + # Test fit + X = epochs.get_data() + n_components = 4 + usf = UnsupervisedSpatialFilter(PCA(n_components)) + usf.fit(X) + usf1 = UnsupervisedSpatialFilter(PCA(n_components)) + + # test transform + assert_equal(usf.transform(X).ndim, 3) + # test fit_transform + assert_array_almost_equal(usf.transform(X), usf1.fit_transform(X)) + assert_equal(usf.transform(X).shape[1], n_components) + assert_array_almost_equal(usf.inverse_transform(usf.transform(X)), X) + + # Test with average param + usf = UnsupervisedSpatialFilter(PCA(4), average=True) + usf.fit_transform(X) + pytest.raises(ValueError, UnsupervisedSpatialFilter, PCA(4), 2) + + +def test_temporal_filter(): + """Test methods of TemporalFilter.""" + X = np.random.rand(5, 5, 1200) + + # Test init test + values = (('10hz', None, 100., 'auto'), (5., '10hz', 100., 'auto'), + (10., 20., 5., 'auto'), (None, None, 100., '5hz')) + for low, high, sf, ltrans in values: + filt = TemporalFilter(low, high, sf, ltrans, fir_design='firwin') + pytest.raises(ValueError, filt.fit_transform, X) + + # Add tests for different combinations of l_freq and h_freq + for low, high in ((5., 15.), (None, 15.), (5., None)): + filt = TemporalFilter(low, high, sfreq=100., fir_design='firwin') + Xt = filt.fit_transform(X) + assert_array_equal(filt.fit_transform(X), Xt) + assert (X.shape == Xt.shape) + + # Test fit and transform numpy type check + with pytest.raises(ValueError, match='Data to be filtered must be'): + filt.transform([1, 2]) + + # Test with 2 dimensional data array + X = np.random.rand(101, 500) + filt = TemporalFilter(l_freq=25., h_freq=50., sfreq=1000., + filter_length=150, fir_design='firwin2') + assert_equal(filt.fit_transform(X).shape, X.shape) diff --git a/python/libs/mne/decoding/time_delaying_ridge.py b/python/libs/mne/decoding/time_delaying_ridge.py new file mode 100644 index 0000000..c3674cc --- /dev/null +++ b/python/libs/mne/decoding/time_delaying_ridge.py @@ -0,0 +1,365 @@ +# -*- coding: utf-8 -*- +"""TimeDelayingRidge class.""" +# Authors: Eric Larson +# Ross Maddox +# +# License: BSD-3-Clause + +import numpy as np + +from .base import BaseEstimator +from ..cuda import _setup_cuda_fft_multiply_repeated +from ..filter import next_fast_len +from ..fixes import jit +from ..parallel import check_n_jobs +from ..utils import warn, ProgressBar, logger + + +def _compute_corrs(X, y, smin, smax, n_jobs=1, fit_intercept=False, + edge_correction=True): + """Compute auto- and cross-correlations.""" + if fit_intercept: + # We could do this in the Fourier domain, too, but it should + # be a bit cleaner numerically to do it here. + X_offset = np.mean(X, axis=0) + y_offset = np.mean(y, axis=0) + if X.ndim == 3: + X_offset = X_offset.mean(axis=0) + y_offset = np.mean(y_offset, axis=0) + X = X - X_offset + y = y - y_offset + else: + X_offset = y_offset = 0. + if X.ndim == 2: + assert y.ndim == 2 + X = X[:, np.newaxis, :] + y = y[:, np.newaxis, :] + assert X.shape[:2] == y.shape[:2] + len_trf = smax - smin + len_x, n_epochs, n_ch_x = X.shape + len_y, n_epcohs, n_ch_y = y.shape + assert len_x == len_y + + n_fft = next_fast_len(2 * X.shape[0] - 1) + + n_jobs, cuda_dict = _setup_cuda_fft_multiply_repeated( + n_jobs, [1.], n_fft, 'correlation calculations') + + # create our Toeplitz indexer + ij = np.empty((len_trf, len_trf), int) + for ii in range(len_trf): + ij[ii, ii:] = np.arange(len_trf - ii) + x = np.arange(n_fft - 1, n_fft - len_trf + ii, -1) + ij[ii + 1:, ii] = x + + x_xt = np.zeros([n_ch_x * len_trf] * 2) + x_y = np.zeros((len_trf, n_ch_x, n_ch_y), order='F') + n = n_epochs * (n_ch_x * (n_ch_x + 1) // 2 + n_ch_x) + logger.info('Fitting %d epochs, %d channels' % (n_epochs, n_ch_x)) + pb = ProgressBar(n, mesg='Sample') + count = 0 + pb.update(count) + for ei in range(n_epochs): + this_X = X[:, ei, :] + # XXX maybe this is what we should parallelize over CPUs at some point + X_fft = cuda_dict['rfft'](this_X, n=n_fft, axis=0) + X_fft_conj = X_fft.conj() + y_fft = cuda_dict['rfft'](y[:, ei, :], n=n_fft, axis=0) + + for ch0 in range(n_ch_x): + for oi, ch1 in enumerate(range(ch0, n_ch_x)): + this_result = cuda_dict['irfft']( + X_fft[:, ch0] * X_fft_conj[:, ch1], n=n_fft, axis=0) + # Our autocorrelation structure is a Toeplitz matrix, but + # it's faster to create the Toeplitz ourselves than use + # linalg.toeplitz. + this_result = this_result[ij] + # However, we need to adjust for coeffs that are cut off, + # i.e. the non-zero delays should not have the same AC value + # as the zero-delay ones (because they actually have fewer + # coefficients). + # + # These adjustments also follow a Toeplitz structure, so we + # construct a matrix of what has been left off, compute their + # inner products, and remove them. + if edge_correction: + _edge_correct(this_result, this_X, smax, smin, ch0, ch1) + + # Store the results in our output matrix + x_xt[ch0 * len_trf:(ch0 + 1) * len_trf, + ch1 * len_trf:(ch1 + 1) * len_trf] += this_result + if ch0 != ch1: + x_xt[ch1 * len_trf:(ch1 + 1) * len_trf, + ch0 * len_trf:(ch0 + 1) * len_trf] += this_result.T + count += 1 + pb.update(count) + + # compute the crosscorrelations + cc_temp = cuda_dict['irfft']( + y_fft * X_fft_conj[:, slice(ch0, ch0 + 1)], n=n_fft, axis=0) + if smin < 0 and smax >= 0: + x_y[:-smin, ch0] += cc_temp[smin:] + x_y[len_trf - smax:, ch0] += cc_temp[:smax] + else: + x_y[:, ch0] += cc_temp[smin:smax] + count += 1 + pb.update(count) + + x_y = np.reshape(x_y, (n_ch_x * len_trf, n_ch_y), order='F') + return x_xt, x_y, n_ch_x, X_offset, y_offset + + +@jit() +def _edge_correct(this_result, this_X, smax, smin, ch0, ch1): + if smax > 0: + tail = _toeplitz_dot(this_X[-1:-smax:-1, ch0], + this_X[-1:-smax:-1, ch1]) + if smin > 0: + tail = tail[smin - 1:, smin - 1:] + this_result[max(-smin + 1, 0):, max(-smin + 1, 0):] -= tail + if smin < 0: + head = _toeplitz_dot(this_X[:-smin, ch0], + this_X[:-smin, ch1])[::-1, ::-1] + if smax < 0: + head = head[:smax, :smax] + this_result[:-smin, :-smin] -= head + + +@jit() +def _toeplitz_dot(a, b): + """Create upper triangular Toeplitz matrices & compute the dot product.""" + # This is equivalent to: + # a = linalg.toeplitz(a) + # b = linalg.toeplitz(b) + # a[np.triu_indices(len(a), 1)] = 0 + # b[np.triu_indices(len(a), 1)] = 0 + # out = np.dot(a.T, b) + assert a.shape == b.shape and a.ndim == 1 + out = np.outer(a, b) + for ii in range(1, len(a)): + out[ii, ii:] += out[ii - 1, ii - 1:-1] + out[ii + 1:, ii] += out[ii:-1, ii - 1] + return out + + +def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct', + normed=False): + """Compute regularization parameter from neighbors.""" + from scipy import linalg + from scipy.sparse.csgraph import laplacian + known_types = ('ridge', 'laplacian') + if isinstance(reg_type, str): + reg_type = (reg_type,) * 2 + if len(reg_type) != 2: + raise ValueError('reg_type must have two elements, got %s' + % (len(reg_type),)) + for r in reg_type: + if r not in known_types: + raise ValueError('reg_type entries must be one of %s, got %s' + % (known_types, r)) + reg_time = (reg_type[0] == 'laplacian' and n_delays > 1) + reg_chs = (reg_type[1] == 'laplacian' and n_ch_x > 1) + if not reg_time and not reg_chs: + return np.eye(n_ch_x * n_delays) + # regularize time + if reg_time: + reg = np.eye(n_delays) + stride = n_delays + 1 + reg.flat[1::stride] += -1 + reg.flat[n_delays::stride] += -1 + reg.flat[n_delays + 1:-n_delays - 1:stride] += 1 + args = [reg] * n_ch_x + reg = linalg.block_diag(*args) + else: + reg = np.zeros((n_delays * n_ch_x,) * 2) + + # regularize features + if reg_chs: + block = n_delays * n_delays + row_offset = block * n_ch_x + stride = n_delays * n_ch_x + 1 + reg.flat[n_delays:-row_offset:stride] += -1 + reg.flat[n_delays + row_offset::stride] += 1 + reg.flat[row_offset:-n_delays:stride] += -1 + reg.flat[:-(n_delays + row_offset):stride] += 1 + assert np.array_equal(reg[::-1, ::-1], reg) + + if method == 'direct': + if normed: + norm = np.sqrt(np.diag(reg)) + reg /= norm + reg /= norm[:, np.newaxis] + return reg + else: + # Use csgraph. Note that our -1's above are really the neighbors! + # If we ever want to allow arbitrary adjacency matrices, this is how + # we'd want to do it. + reg = laplacian(-reg, normed=normed) + return reg + + +def _fit_corrs(x_xt, x_y, n_ch_x, reg_type, alpha, n_ch_in): + """Fit the model using correlation matrices.""" + # do the regularized solving + from scipy import linalg + n_ch_out = x_y.shape[1] + assert x_y.shape[0] % n_ch_x == 0 + n_delays = x_y.shape[0] // n_ch_x + reg = _compute_reg_neighbors(n_ch_x, n_delays, reg_type) + mat = x_xt + alpha * reg + # From sklearn + try: + # Note: we must use overwrite_a=False in order to be able to + # use the fall-back solution below in case a LinAlgError + # is raised + w = linalg.solve(mat, x_y, sym_pos=True, overwrite_a=False) + except np.linalg.LinAlgError: + warn('Singular matrix in solving dual problem. Using ' + 'least-squares solution instead.') + w = linalg.lstsq(mat, x_y, lapack_driver='gelsy')[0] + w = w.T.reshape([n_ch_out, n_ch_in, n_delays]) + return w + + +class TimeDelayingRidge(BaseEstimator): + """Ridge regression of data with time delays. + + Parameters + ---------- + tmin : int | float + The starting lag, in seconds (or samples if ``sfreq`` == 1). + Negative values correspond to times in the past. + tmax : int | float + The ending lag, in seconds (or samples if ``sfreq`` == 1). + Positive values correspond to times in the future. + Must be >= tmin. + sfreq : float + The sampling frequency used to convert times into samples. + alpha : float + The ridge (or laplacian) regularization factor. + reg_type : str | list + Can be "ridge" (default) or "laplacian". + Can also be a 2-element list specifying how to regularize in time + and across adjacent features. + fit_intercept : bool + If True (default), the sample mean is removed before fitting. + n_jobs : int | str + The number of jobs to use. Can be an int (default 1) or ``'cuda'``. + + .. versionadded:: 0.18 + edge_correction : bool + If True (default), correct the autocorrelation coefficients for + non-zero delays for the fact that fewer samples are available. + Disabling this speeds up performance at the cost of accuracy + depending on the relationship between epoch length and model + duration. Only used if ``estimator`` is float or None. + + .. versionadded:: 0.18 + + See Also + -------- + mne.decoding.ReceptiveField + + Notes + ----- + This class is meant to be used with :class:`mne.decoding.ReceptiveField` + by only implicitly doing the time delaying. For reasonable receptive + field and input signal sizes, it should be more CPU and memory + efficient by using frequency-domain methods (FFTs) to compute the + auto- and cross-correlations. + """ + + _estimator_type = "regressor" + + def __init__(self, tmin, tmax, sfreq, alpha=0., reg_type='ridge', + fit_intercept=True, n_jobs=1, edge_correction=True): + if tmin > tmax: + raise ValueError('tmin must be <= tmax, got %s and %s' + % (tmin, tmax)) + self.tmin = float(tmin) + self.tmax = float(tmax) + self.sfreq = float(sfreq) + self.alpha = float(alpha) + self.reg_type = reg_type + self.fit_intercept = fit_intercept + self.edge_correction = edge_correction + self.n_jobs = n_jobs + + @property + def _smin(self): + return int(round(self.tmin * self.sfreq)) + + @property + def _smax(self): + return int(round(self.tmax * self.sfreq)) + 1 + + def fit(self, X, y): + """Estimate the coefficients of the linear model. + + Parameters + ---------- + X : array, shape (n_samples[, n_epochs], n_features) + The training input samples to estimate the linear coefficients. + y : array, shape (n_samples[, n_epochs], n_outputs) + The target values. + + Returns + ------- + self : instance of TimeDelayingRidge + Returns the modified instance. + """ + if X.ndim == 3: + assert y.ndim == 3 + assert X.shape[:2] == y.shape[:2] + else: + assert X.ndim == 2 and y.ndim == 2 + assert X.shape[0] == y.shape[0] + n_jobs = check_n_jobs(self.n_jobs, allow_cuda=True) + # These are split into two functions because it's possible that we + # might want to allow people to do them separately (e.g., to test + # different regularization parameters). + self.cov_, x_y_, n_ch_x, X_offset, y_offset = _compute_corrs( + X, y, self._smin, self._smax, n_jobs, self.fit_intercept, + self.edge_correction) + self.coef_ = _fit_corrs(self.cov_, x_y_, n_ch_x, + self.reg_type, self.alpha, n_ch_x) + # This is the sklearn formula from LinearModel (will be 0. for no fit) + if self.fit_intercept: + self.intercept_ = y_offset - np.dot(X_offset, self.coef_.sum(-1).T) + else: + self.intercept_ = 0. + return self + + def predict(self, X): + """Predict the output. + + Parameters + ---------- + X : array, shape (n_samples[, n_epochs], n_features) + The data. + + Returns + ------- + X : ndarray + The predicted response. + """ + from scipy.signal import fftconvolve + + if X.ndim == 2: + X = X[:, np.newaxis, :] + singleton = True + else: + singleton = False + out = np.zeros(X.shape[:2] + (self.coef_.shape[0],)) + smin = self._smin + offset = max(smin, 0) + for ei in range(X.shape[1]): + for oi in range(self.coef_.shape[0]): + for fi in range(self.coef_.shape[1]): + temp = fftconvolve(X[:, ei, fi], self.coef_[oi, fi]) + temp = temp[max(-smin, 0):][:len(out) - offset] + out[offset:len(temp) + offset, ei, oi] += temp + out += self.intercept_ + if singleton: + out = out[:, 0, :] + return out diff --git a/python/libs/mne/decoding/time_frequency.py b/python/libs/mne/decoding/time_frequency.py new file mode 100644 index 0000000..b291a51 --- /dev/null +++ b/python/libs/mne/decoding/time_frequency.py @@ -0,0 +1,146 @@ +# Author: Jean-Remi King +# +# License: BSD-3-Clause + +import numpy as np +from .mixin import TransformerMixin +from .base import BaseEstimator +from ..time_frequency.tfr import _compute_tfr, _check_tfr_param +from ..utils import fill_doc, _check_option, verbose, _VerboseDep + + +@fill_doc +class TimeFrequency(TransformerMixin, BaseEstimator, _VerboseDep): + """Time frequency transformer. + + Time-frequency transform of times series along the last axis. + + Parameters + ---------- + freqs : array-like of float, shape (n_freqs,) + The frequencies. + sfreq : float | int, default 1.0 + Sampling frequency of the data. + method : 'multitaper' | 'morlet', default 'morlet' + The time-frequency method. 'morlet' convolves a Morlet wavelet. + 'multitaper' uses Morlet wavelets windowed with multiple DPSS + multitapers. + n_cycles : float | array of float, default 7.0 + Number of cycles in the Morlet wavelet. Fixed number + or one per frequency. + time_bandwidth : float, default None + If None and method=multitaper, will be set to 4.0 (3 tapers). + Time x (Full) Bandwidth product. Only applies if + method == 'multitaper'. The number of good tapers (low-bias) is + chosen automatically based on this to equal floor(time_bandwidth - 1). + use_fft : bool, default True + Use the FFT for convolutions or not. + decim : int | slice, default 1 + To reduce memory usage, decimation factor after time-frequency + decomposition. + If `int`, returns tfr[..., ::decim]. + If `slice`, returns tfr[..., decim]. + + .. note:: Decimation may create aliasing artifacts, yet decimation + is done after the convolutions. + + output : str, default 'complex' + * 'complex' : single trial complex. + * 'power' : single trial power. + * 'phase' : single trial phase. + %(n_jobs)s + The number of epochs to process at the same time. The parallelization + is implemented across channels. + %(verbose)s + + See Also + -------- + mne.time_frequency.tfr_morlet + mne.time_frequency.tfr_multitaper + """ + + @verbose + def __init__(self, freqs, sfreq=1.0, method='morlet', n_cycles=7.0, + time_bandwidth=None, use_fft=True, decim=1, output='complex', + n_jobs=1, *, verbose=None): # noqa: D102 + freqs, sfreq, _, n_cycles, time_bandwidth, decim = \ + _check_tfr_param(freqs, sfreq, method, True, n_cycles, + time_bandwidth, use_fft, decim, output) + self.freqs = freqs + self.sfreq = sfreq + self.method = method + self.n_cycles = n_cycles + self.time_bandwidth = time_bandwidth + self.use_fft = use_fft + self.decim = decim + # Check that output is not an average metric (e.g. ITC) + self.output = _check_option('output', output, + ['complex', 'power', 'phase']) + self.n_jobs = n_jobs + + def fit_transform(self, X, y=None): + """Time-frequency transform of times series along the last axis. + + Parameters + ---------- + X : array, shape (n_samples, n_channels, n_times) + The training data samples. The channel dimension can be zero- or + 1-dimensional. + y : None + For scikit-learn compatibility purposes. + + Returns + ------- + Xt : array, shape (n_samples, n_channels, n_freqs, n_times) + The time-frequency transform of the data, where n_channels can be + zero- or 1-dimensional. + """ + return self.fit(X, y).transform(X) + + def fit(self, X, y=None): # noqa: D401 + """Do nothing (for scikit-learn compatibility purposes). + + Parameters + ---------- + X : array, shape (n_samples, n_channels, n_times) + The training data. + y : array | None + The target values. + + Returns + ------- + self : object + Return self. + """ + return self + + def transform(self, X): + """Time-frequency transform of times series along the last axis. + + Parameters + ---------- + X : array, shape (n_samples, n_channels, n_times) + The training data samples. The channel dimension can be zero- or + 1-dimensional. + + Returns + ------- + Xt : array, shape (n_samples, n_channels, n_freqs, n_times) + The time-frequency transform of the data, where n_channels can be + zero- or 1-dimensional. + """ + # Ensure 3-dimensional X + shape = X.shape[1:-1] + if not shape: + X = X[:, np.newaxis, :] + + # Compute time-frequency + Xt = _compute_tfr(X, self.freqs, self.sfreq, self.method, + self.n_cycles, True, self.time_bandwidth, + self.use_fft, self.decim, self.output, self.n_jobs) + + # Back to original shape + if not shape: + Xt = Xt[:, 0, :] + + return Xt diff --git a/python/libs/mne/decoding/transformer.py b/python/libs/mne/decoding/transformer.py new file mode 100644 index 0000000..2562cde --- /dev/null +++ b/python/libs/mne/decoding/transformer.py @@ -0,0 +1,848 @@ +# -*- coding: utf-8 -*- +# Authors: Mainak Jas +# Alexandre Gramfort +# Romain Trachel +# +# License: BSD-3-Clause + +import numpy as np + +from .mixin import TransformerMixin +from .base import BaseEstimator + +from .. import pick_types +from ..filter import filter_data, _triage_filter_params +from ..time_frequency.psd import psd_array_multitaper +from ..utils import (fill_doc, _check_option, _validate_type, verbose, + _VerboseDep) +from ..io.pick import (pick_info, _pick_data_channels, _picks_by_type, + _picks_to_idx) +from ..cov import _check_scalings_user + + +class _ConstantScaler(): + """Scale channel types using constant values.""" + + def __init__(self, info, scalings, do_scaling=True): + self._scalings = scalings + self._info = info + self._do_scaling = do_scaling + + def fit(self, X, y=None): + scalings = _check_scalings_user(self._scalings) + picks_by_type = _picks_by_type(pick_info( + self._info, _pick_data_channels(self._info, exclude=()))) + std = np.ones(sum(len(p[1]) for p in picks_by_type)) + if X.shape[1] != len(std): + raise ValueError('info had %d data channels but X has %d channels' + % (len(std), len(X))) + if self._do_scaling: # this is silly, but necessary for completeness + for kind, picks in picks_by_type: + std[picks] = 1. / scalings[kind] + self.std_ = std + self.mean_ = np.zeros_like(std) + return self + + def transform(self, X): + return X / self.std_ + + def inverse_transform(self, X, y=None): + return X * self.std_ + + def fit_transform(self, X, y=None): + return self.fit(X, y).transform(X) + + +def _sklearn_reshape_apply(func, return_result, X, *args, **kwargs): + """Reshape epochs and apply function.""" + if not isinstance(X, np.ndarray): + raise ValueError("data should be an np.ndarray, got %s." % type(X)) + orig_shape = X.shape + X = np.reshape(X.transpose(0, 2, 1), (-1, orig_shape[1])) + X = func(X, *args, **kwargs) + if return_result: + X.shape = (orig_shape[0], orig_shape[2], orig_shape[1]) + X = X.transpose(0, 2, 1) + return X + + +@fill_doc +class Scaler(TransformerMixin, BaseEstimator): + u"""Standardize channel data. + + This class scales data for each channel. It differs from scikit-learn + classes (e.g., :class:`sklearn.preprocessing.StandardScaler`) in that + it scales each *channel* by estimating μ and σ using data from all + time points and epochs, as opposed to standardizing each *feature* + (i.e., each time point for each channel) by estimating using μ and σ + using data from all epochs. + + Parameters + ---------- + %(info)s Only necessary if ``scalings`` is a dict or None. + scalings : dict, str, default None + Scaling method to be applied to data channel wise. + + * if scalings is None (default), scales mag by 1e15, grad by 1e13, + and eeg by 1e6. + * if scalings is :class:`dict`, keys are channel types and values + are scale factors. + * if ``scalings=='median'``, + :class:`sklearn.preprocessing.RobustScaler` + is used (requires sklearn version 0.17+). + * if ``scalings=='mean'``, + :class:`sklearn.preprocessing.StandardScaler` + is used. + + with_mean : bool, default True + If True, center the data using mean (or median) before scaling. + Ignored for channel-type scaling. + with_std : bool, default True + If True, scale the data to unit variance (``scalings='mean'``), + quantile range (``scalings='median``), or using channel type + if ``scalings`` is a dict or None). + """ + + def __init__(self, info=None, scalings=None, with_mean=True, + with_std=True): # noqa: D102 + self.info = info + self.with_mean = with_mean + self.with_std = with_std + self.scalings = scalings + + if not (scalings is None or isinstance(scalings, (dict, str))): + raise ValueError('scalings type should be dict, str, or None, ' + 'got %s' % type(scalings)) + if isinstance(scalings, str): + _check_option('scalings', scalings, ['mean', 'median']) + if scalings is None or isinstance(scalings, dict): + if info is None: + raise ValueError('Need to specify "info" if scalings is' + '%s' % type(scalings)) + self._scaler = _ConstantScaler(info, scalings, self.with_std) + elif scalings == 'mean': + from sklearn.preprocessing import StandardScaler + self._scaler = StandardScaler( + with_mean=self.with_mean, with_std=self.with_std) + else: # scalings == 'median': + from sklearn.preprocessing import RobustScaler + self._scaler = RobustScaler( + with_centering=self.with_mean, with_scaling=self.with_std) + + def fit(self, epochs_data, y=None): + """Standardize data across channels. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data to concatenate channels. + y : array, shape (n_epochs,) + The label for each epoch. + + Returns + ------- + self : instance of Scaler + The modified instance. + """ + _validate_type(epochs_data, np.ndarray, 'epochs_data') + if epochs_data.ndim == 2: + epochs_data = epochs_data[..., np.newaxis] + assert epochs_data.ndim == 3, epochs_data.shape + _sklearn_reshape_apply(self._scaler.fit, False, epochs_data, y=y) + return self + + def transform(self, epochs_data): + """Standardize data across channels. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels[, n_times]) + The data. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The data concatenated over channels. + + Notes + ----- + This function makes a copy of the data before the operations and the + memory usage may be large with big data. + """ + _validate_type(epochs_data, np.ndarray, 'epochs_data') + if epochs_data.ndim == 2: # can happen with SlidingEstimator + if self.info is not None: + assert len(self.info['ch_names']) == epochs_data.shape[1] + epochs_data = epochs_data[..., np.newaxis] + assert epochs_data.ndim == 3, epochs_data.shape + return _sklearn_reshape_apply(self._scaler.transform, True, + epochs_data) + + def fit_transform(self, epochs_data, y=None): + """Fit to data, then transform it. + + Fits transformer to epochs_data and y and returns a transformed version + of epochs_data. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data. + y : None | array, shape (n_epochs,) + The label for each epoch. + Defaults to None. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The data concatenated over channels. + + Notes + ----- + This function makes a copy of the data before the operations and the + memory usage may be large with big data. + """ + return self.fit(epochs_data, y).transform(epochs_data) + + def inverse_transform(self, epochs_data): + """Invert standardization of data across channels. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The data concatenated over channels. + + Notes + ----- + This function makes a copy of the data before the operations and the + memory usage may be large with big data. + """ + assert epochs_data.ndim == 3, epochs_data.shape + return _sklearn_reshape_apply(self._scaler.inverse_transform, True, + epochs_data) + + +class Vectorizer(TransformerMixin): + """Transform n-dimensional array into 2D array of n_samples by n_features. + + This class reshapes an n-dimensional array into an n_samples * n_features + array, usable by the estimators and transformers of scikit-learn. + + Attributes + ---------- + features_shape_ : tuple + Stores the original shape of data. + + Examples + -------- + clf = make_pipeline(SpatialFilter(), _XdawnTransformer(), Vectorizer(), + LogisticRegression()) + """ + + def fit(self, X, y=None): + """Store the shape of the features of X. + + Parameters + ---------- + X : array-like + The data to fit. Can be, for example a list, or an array of at + least 2d. The first dimension must be of length n_samples, where + samples are the independent samples used by the estimator + (e.g. n_epochs for epoched data). + y : None | array, shape (n_samples,) + Used for scikit-learn compatibility. + + Returns + ------- + self : instance of Vectorizer + Return the modified instance. + """ + X = np.asarray(X) + self.features_shape_ = X.shape[1:] + return self + + def transform(self, X): + """Convert given array into two dimensions. + + Parameters + ---------- + X : array-like + The data to fit. Can be, for example a list, or an array of at + least 2d. The first dimension must be of length n_samples, where + samples are the independent samples used by the estimator + (e.g. n_epochs for epoched data). + + Returns + ------- + X : array, shape (n_samples, n_features) + The transformed data. + """ + X = np.asarray(X) + if X.shape[1:] != self.features_shape_: + raise ValueError("Shape of X used in fit and transform must be " + "same") + return X.reshape(len(X), -1) + + def fit_transform(self, X, y=None): + """Fit the data, then transform in one step. + + Parameters + ---------- + X : array-like + The data to fit. Can be, for example a list, or an array of at + least 2d. The first dimension must be of length n_samples, where + samples are the independent samples used by the estimator + (e.g. n_epochs for epoched data). + y : None | array, shape (n_samples,) + Used for scikit-learn compatibility. + + Returns + ------- + X : array, shape (n_samples, -1) + The transformed data. + """ + return self.fit(X).transform(X) + + def inverse_transform(self, X): + """Transform 2D data back to its original feature shape. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Data to be transformed back to original shape. + + Returns + ------- + X : array + The data transformed into shape as used in fit. The first + dimension is of length n_samples. + """ + X = np.asarray(X) + if X.ndim not in (2, 3): + raise ValueError("X should be of 2 or 3 dimensions but has shape " + "%s" % (X.shape,)) + return X.reshape(X.shape[:-1] + self.features_shape_) + + +@fill_doc +class PSDEstimator(TransformerMixin, _VerboseDep): + """Compute power spectral density (PSD) using a multi-taper method. + + Parameters + ---------- + sfreq : float + The sampling frequency. + fmin : float + The lower frequency of interest. + fmax : float + The upper frequency of interest. + bandwidth : float + The bandwidth of the multi taper windowing function in Hz. + adaptive : bool + Use adaptive weights to combine the tapered spectra into PSD + (slow, use n_jobs >> 1 to speed up computation). + low_bias : bool + Only use tapers with more than 90%% spectral concentration within + bandwidth. + n_jobs : int + Number of parallel jobs to use (only used if adaptive=True). + %(normalization)s + %(verbose)s + + See Also + -------- + mne.time_frequency.psd_multitaper + """ + + @verbose + def __init__(self, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None, + adaptive=False, low_bias=True, n_jobs=1, + normalization='length', *, verbose=None): # noqa: D102 + self.sfreq = sfreq + self.fmin = fmin + self.fmax = fmax + self.bandwidth = bandwidth + self.adaptive = adaptive + self.low_bias = low_bias + self.n_jobs = n_jobs + self.normalization = normalization + + def fit(self, epochs_data, y): + """Compute power spectral density (PSD) using a multi-taper method. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data. + y : array, shape (n_epochs,) + The label for each epoch. + + Returns + ------- + self : instance of PSDEstimator + The modified instance. + """ + if not isinstance(epochs_data, np.ndarray): + raise ValueError("epochs_data should be of type ndarray (got %s)." + % type(epochs_data)) + + return self + + def transform(self, epochs_data): + """Compute power spectral density (PSD) using a multi-taper method. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data. + + Returns + ------- + psd : array, shape (n_signals, n_freqs) or (n_freqs,) + The computed PSD. + """ + if not isinstance(epochs_data, np.ndarray): + raise ValueError("epochs_data should be of type ndarray (got %s)." + % type(epochs_data)) + psd, _ = psd_array_multitaper( + epochs_data, sfreq=self.sfreq, fmin=self.fmin, fmax=self.fmax, + bandwidth=self.bandwidth, adaptive=self.adaptive, + low_bias=self.low_bias, normalization=self.normalization, + n_jobs=self.n_jobs) + return psd + + +@fill_doc +class FilterEstimator(TransformerMixin, _VerboseDep): + """Estimator to filter RtEpochs. + + Applies a zero-phase low-pass, high-pass, band-pass, or band-stop + filter to the channels selected by "picks". + + l_freq and h_freq are the frequencies below which and above which, + respectively, to filter out of the data. Thus the uses are: + + - l_freq < h_freq: band-pass filter + - l_freq > h_freq: band-stop filter + - l_freq is not None, h_freq is None: low-pass filter + - l_freq is None, h_freq is not None: high-pass filter + + If n_jobs > 1, more memory is required as "len(picks) * n_times" + additional time points need to be temporarily stored in memory. + + Parameters + ---------- + %(info_not_none)s + %(l_freq)s + %(h_freq)s + %(picks_good_data)s + %(filter_length)s + %(l_trans_bandwidth)s + %(h_trans_bandwidth)s + n_jobs : int | str + Number of jobs to run in parallel. + Can be 'cuda' if ``cupy`` is installed properly and method='fir'. + method : str + 'fir' will use overlap-add FIR filtering, 'iir' will use IIR + forward-backward filtering (via filtfilt). + iir_params : dict | None + Dictionary of parameters to use for IIR filtering. + See mne.filter.construct_iir_filter for details. If iir_params + is None and method="iir", 4th order Butterworth will be used. + %(fir_design)s + %(verbose)s + + See Also + -------- + TemporalFilter + + Notes + ----- + This is primarily meant for use in conjunction with + :class:`mne_realtime.RtEpochs`. In general it is not recommended in a + normal processing pipeline as it may result in edge artifacts. Use with + caution. + """ + + def __init__(self, info, l_freq, h_freq, picks=None, filter_length='auto', + l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=1, + method='fir', iir_params=None, fir_design='firwin', *, + verbose=None): # noqa: D102 + self.info = info + self.l_freq = l_freq + self.h_freq = h_freq + self.picks = _picks_to_idx(info, picks) + self.filter_length = filter_length + self.l_trans_bandwidth = l_trans_bandwidth + self.h_trans_bandwidth = h_trans_bandwidth + self.n_jobs = n_jobs + self.method = method + self.iir_params = iir_params + self.fir_design = fir_design + + def fit(self, epochs_data, y): + """Filter data. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data. + y : array, shape (n_epochs,) + The label for each epoch. + + Returns + ------- + self : instance of FilterEstimator + The modified instance. + """ + if not isinstance(epochs_data, np.ndarray): + raise ValueError("epochs_data should be of type ndarray (got %s)." + % type(epochs_data)) + + if self.picks is None: + self.picks = pick_types(self.info, meg=True, eeg=True, + ref_meg=False, exclude=[]) + + if self.l_freq == 0: + self.l_freq = None + if self.h_freq is not None and self.h_freq > (self.info['sfreq'] / 2.): + self.h_freq = None + if self.l_freq is not None and not isinstance(self.l_freq, float): + self.l_freq = float(self.l_freq) + if self.h_freq is not None and not isinstance(self.h_freq, float): + self.h_freq = float(self.h_freq) + + if self.info['lowpass'] is None or (self.h_freq is not None and + (self.l_freq is None or + self.l_freq < self.h_freq) and + self.h_freq < + self.info['lowpass']): + with self.info._unlock(): + self.info['lowpass'] = self.h_freq + + if self.info['highpass'] is None or (self.l_freq is not None and + (self.h_freq is None or + self.l_freq < self.h_freq) and + self.l_freq > + self.info['highpass']): + with self.info._unlock(): + self.info['highpass'] = self.l_freq + + return self + + def transform(self, epochs_data): + """Filter data. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The data after filtering. + """ + if not isinstance(epochs_data, np.ndarray): + raise ValueError("epochs_data should be of type ndarray (got %s)." + % type(epochs_data)) + epochs_data = np.atleast_3d(epochs_data) + return filter_data( + epochs_data, self.info['sfreq'], self.l_freq, self.h_freq, + self.picks, self.filter_length, self.l_trans_bandwidth, + self.h_trans_bandwidth, method=self.method, + iir_params=self.iir_params, n_jobs=self.n_jobs, copy=False, + fir_design=self.fir_design, verbose=False) + + +class UnsupervisedSpatialFilter(TransformerMixin, BaseEstimator): + """Use unsupervised spatial filtering across time and samples. + + Parameters + ---------- + estimator : instance of sklearn.base.BaseEstimator + Estimator using some decomposition algorithm. + average : bool, default False + If True, the estimator is fitted on the average across samples + (e.g. epochs). + """ + + def __init__(self, estimator, average=False): # noqa: D102 + # XXX: Use _check_estimator #3381 + for attr in ('fit', 'transform', 'fit_transform'): + if not hasattr(estimator, attr): + raise ValueError('estimator must be a scikit-learn ' + 'transformer, missing %s method' % attr) + + if not isinstance(average, bool): + raise ValueError("average parameter must be of bool type, got " + "%s instead" % type(bool)) + + self.estimator = estimator + self.average = average + + def fit(self, X, y=None): + """Fit the spatial filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data to be filtered. + y : None | array, shape (n_samples,) + Used for scikit-learn compatibility. + + Returns + ------- + self : instance of UnsupervisedSpatialFilter + Return the modified instance. + """ + if self.average: + X = np.mean(X, axis=0).T + else: + n_epochs, n_channels, n_times = X.shape + # trial as time samples + X = np.transpose(X, (1, 0, 2)).reshape((n_channels, n_epochs * + n_times)).T + self.estimator.fit(X) + return self + + def fit_transform(self, X, y=None): + """Transform the data to its filtered components after fitting. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data to be filtered. + y : None | array, shape (n_samples,) + Used for scikit-learn compatibility. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The transformed data. + """ + return self.fit(X).transform(X) + + def transform(self, X): + """Transform the data to its spatial filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data to be filtered. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The transformed data. + """ + return self._apply_method(X, 'transform') + + def inverse_transform(self, X): + """Inverse transform the data to its original space. + + Parameters + ---------- + X : array, shape (n_epochs, n_components, n_times) + The data to be inverted. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The transformed data. + """ + return self._apply_method(X, 'inverse_transform') + + def _apply_method(self, X, method): + """Vectorize time samples as trials, apply method and reshape back. + + Parameters + ---------- + X : array, shape (n_epochs, n_dims, n_times) + The data to be inverted. + + Returns + ------- + X : array, shape (n_epochs, n_dims, n_times) + The transformed data. + """ + n_epochs, n_channels, n_times = X.shape + # trial as time samples + X = np.transpose(X, [1, 0, 2]) + X = np.reshape(X, [n_channels, n_epochs * n_times]).T + # apply method + method = getattr(self.estimator, method) + X = method(X) + # put it back to n_epochs, n_dimensions + X = np.reshape(X.T, [-1, n_epochs, n_times]).transpose([1, 0, 2]) + return X + + +@fill_doc +class TemporalFilter(TransformerMixin): + """Estimator to filter data array along the last dimension. + + Applies a zero-phase low-pass, high-pass, band-pass, or band-stop + filter to the channels. + + l_freq and h_freq are the frequencies below which and above which, + respectively, to filter out of the data. Thus the uses are: + + - l_freq < h_freq: band-pass filter + - l_freq > h_freq: band-stop filter + - l_freq is not None, h_freq is None: low-pass filter + - l_freq is None, h_freq is not None: high-pass filter + + See :func:`mne.filter.filter_data`. + + Parameters + ---------- + l_freq : float | None + Low cut-off frequency in Hz. If None the data are only low-passed. + h_freq : float | None + High cut-off frequency in Hz. If None the data are only + high-passed. + sfreq : float, default 1.0 + Sampling frequency in Hz. + filter_length : str | int, default 'auto' + Length of the FIR filter to use (if applicable): + + * int: specified length in samples. + * 'auto' (default in 0.14): the filter length is chosen based + on the size of the transition regions (7 times the reciprocal + of the shortest transition band). + * str: (default in 0.13 is "10s") a human-readable time in + units of "s" or "ms" (e.g., "10s" or "5500ms") will be + converted to that number of samples if ``phase="zero"``, or + the shortest power-of-two length at least that duration for + ``phase="zero-double"``. + + l_trans_bandwidth : float | str + Width of the transition band at the low cut-off frequency in Hz + (high pass or cutoff 1 in bandpass). Can be "auto" + (default in 0.14) to use a multiple of ``l_freq``:: + + min(max(l_freq * 0.25, 2), l_freq) + + Only used for ``method='fir'``. + h_trans_bandwidth : float | str + Width of the transition band at the high cut-off frequency in Hz + (low pass or cutoff 2 in bandpass). Can be "auto" + (default in 0.14) to use a multiple of ``h_freq``:: + + min(max(h_freq * 0.25, 2.), info['sfreq'] / 2. - h_freq) + + Only used for ``method='fir'``. + n_jobs : int | str, default 1 + Number of jobs to run in parallel. + Can be 'cuda' if ``cupy`` is installed properly and method='fir'. + method : str, default 'fir' + 'fir' will use overlap-add FIR filtering, 'iir' will use IIR + forward-backward filtering (via filtfilt). + iir_params : dict | None, default None + Dictionary of parameters to use for IIR filtering. + See mne.filter.construct_iir_filter for details. If iir_params + is None and method="iir", 4th order Butterworth will be used. + fir_window : str, default 'hamming' + The window to use in FIR design, can be "hamming", "hann", + or "blackman". + fir_design : str + Can be "firwin" (default) to use :func:`scipy.signal.firwin`, + or "firwin2" to use :func:`scipy.signal.firwin2`. "firwin" uses + a time-domain design technique that generally gives improved + attenuation using fewer samples than "firwin2". + + .. versionadded:: 0.15 + %(verbose)s + + See Also + -------- + FilterEstimator + Vectorizer + mne.filter.filter_data + """ + + @verbose + def __init__(self, l_freq=None, h_freq=None, sfreq=1.0, + filter_length='auto', l_trans_bandwidth='auto', + h_trans_bandwidth='auto', n_jobs=1, method='fir', + iir_params=None, fir_window='hamming', fir_design='firwin', + *, verbose=None): # noqa: D102 + self.l_freq = l_freq + self.h_freq = h_freq + self.sfreq = sfreq + self.filter_length = filter_length + self.l_trans_bandwidth = l_trans_bandwidth + self.h_trans_bandwidth = h_trans_bandwidth + self.n_jobs = n_jobs + self.method = method + self.iir_params = iir_params + self.fir_window = fir_window + self.fir_design = fir_design + + if not isinstance(self.n_jobs, int) and self.n_jobs == 'cuda': + raise ValueError('n_jobs must be int or "cuda", got %s instead.' + % type(self.n_jobs)) + + def fit(self, X, y=None): + """Do nothing (for scikit-learn compatibility purposes). + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) or or shape (n_channels, n_times) + The data to be filtered over the last dimension. The channels + dimension can be zero when passing a 2D array. + y : None + Not used, for scikit-learn compatibility issues. + + Returns + ------- + self : instance of TemporalFilter + The modified instance. + """ # noqa: E501 + return self + + def transform(self, X): + """Filter data along the last dimension. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) or shape (n_channels, n_times) + The data to be filtered over the last dimension. The channels + dimension can be zero when passing a 2D array. + + Returns + ------- + X : array + The data after filtering. + """ # noqa: E501 + X = np.atleast_2d(X) + + if X.ndim > 3: + raise ValueError("Array must be of at max 3 dimensions instead " + "got %s dimensional matrix" % (X.ndim)) + + shape = X.shape + X = X.reshape(-1, shape[-1]) + (X, self.sfreq, self.l_freq, self.h_freq, self.l_trans_bandwidth, + self.h_trans_bandwidth, self.filter_length, _, self.fir_window, + self.fir_design) = \ + _triage_filter_params(X, self.sfreq, self.l_freq, self.h_freq, + self.l_trans_bandwidth, + self.h_trans_bandwidth, self.filter_length, + self.method, phase='zero', + fir_window=self.fir_window, + fir_design=self.fir_design) + X = filter_data(X, self.sfreq, self.l_freq, self.h_freq, + filter_length=self.filter_length, + l_trans_bandwidth=self.l_trans_bandwidth, + h_trans_bandwidth=self.h_trans_bandwidth, + n_jobs=self.n_jobs, method=self.method, + iir_params=self.iir_params, copy=False, + fir_window=self.fir_window, fir_design=self.fir_design) + return X.reshape(shape) diff --git a/python/libs/mne/defaults.py b/python/libs/mne/defaults.py new file mode 100644 index 0000000..42c41de --- /dev/null +++ b/python/libs/mne/defaults.py @@ -0,0 +1,194 @@ +# Authors: Alexandre Gramfort +# Denis A. Engemann +# Eric Larson +# +# License: BSD-3-Clause + +from copy import deepcopy + +DEFAULTS = dict( + color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m', emg='k', + ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k', + exci='k', ias='k', syst='k', seeg='saddlebrown', dbs='seagreen', + dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', + fnirs_cw_amplitude='k', fnirs_fd_ac_amplitude='k', + fnirs_fd_phase='k', fnirs_od='k', csd='k', whitened='k'), + si_units=dict(mag='T', grad='T/m', eeg='V', eog='V', ecg='V', emg='V', + misc='AU', seeg='V', dbs='V', dipole='Am', gof='GOF', + bio='V', ecog='V', hbo='M', hbr='M', ref_meg='T', + fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', + fnirs_fd_phase='rad', fnirs_od='V', csd='V/m²', + whitened='Z'), + units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', + misc='AU', seeg='mV', dbs='µV', dipole='nAm', gof='GOF', + bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', + fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', + fnirs_fd_phase='rad', fnirs_od='V', csd='mV/m²', + whitened='Z'), + # scalings for the units + scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, + misc=1.0, seeg=1e3, dbs=1e6, ecog=1e6, dipole=1e9, gof=1.0, + bio=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, + fnirs_cw_amplitude=1.0, fnirs_fd_ac_amplitude=1.0, + fnirs_fd_phase=1., fnirs_od=1.0, csd=1e3, whitened=1.), + # rough guess for a good plot + scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, + ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', + stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, + seeg=1e-4, dbs=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, + hbr=10e-6, whitened=10., fnirs_cw_amplitude=2e-2, + fnirs_fd_ac_amplitude=2e-2, fnirs_fd_phase=2e-1, + fnirs_od=2e-2, csd=200e-4, + dipole=1e-7, gof=1e2), + scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings + seeg=1e1, dbs=1e4, ecog=1e4, hbo=1e4, hbr=1e4), + ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), + misc=(-5., 5.), seeg=(-20., 20.), dbs=(-200., 200.), + dipole=(-100., 100.), gof=(0., 1.), bio=(-500., 500.), + ecog=(-200., 200.), hbo=(0, 20), hbr=(0, 20), csd=(-50., 50.)), + titles=dict(mag='Magnetometers', grad='Gradiometers', eeg='EEG', eog='EOG', + ecg='ECG', emg='EMG', misc='misc', seeg='sEEG', dbs='DBS', + bio='BIO', dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', + ref_meg='Reference Magnetometers', + fnirs_cw_amplitude='fNIRS (CW amplitude)', + fnirs_fd_ac_amplitude='fNIRS (FD AC amplitude)', + fnirs_fd_phase='fNIRS (FD phase)', + fnirs_od='fNIRS (OD)', hbr='Deoxyhemoglobin', + gof='Goodness of fit', csd='Current source density', + stim='Stimulus', + ), + mask_params=dict(marker='o', + markerfacecolor='w', + markeredgecolor='k', + linewidth=0, + markeredgewidth=1, + markersize=4), + coreg=dict( + mri_fid_opacity=1.0, + dig_fid_opacity=1.0, + + mri_fid_scale=5e-3, + dig_fid_scale=8e-3, + extra_scale=4e-3, + eeg_scale=4e-3, eegp_scale=20e-3, eegp_height=0.1, + ecog_scale=5e-3, + seeg_scale=5e-3, + dbs_scale=5e-3, + fnirs_scale=5e-3, + source_scale=5e-3, + detector_scale=5e-3, + hpi_scale=4e-3, + + head_color=(0.988, 0.89, 0.74), + hpi_color=(1., 0., 1.), + extra_color=(1., 1., 1.), + meg_color=(0., 0.25, 0.5), ref_meg_color=(0.5, 0.5, 0.5), + helmet_color=(0.0, 0.0, 0.6), + eeg_color=(1., 0.596, 0.588), eegp_color=(0.839, 0.15, 0.16), + ecog_color=(1., 1., 1.), + dbs_color=(0.82, 0.455, 0.659), + seeg_color=(1., 1., .3), + fnirs_color=(1., .647, 0.), + source_color=(1., .05, 0.), + detector_color=(.3, .15, .15), + lpa_color=(1., 0., 0.), + nasion_color=(0., 1., 0.), + rpa_color=(0., 0., 1.), + ), + noise_std=dict(grad=5e-13, mag=20e-15, eeg=0.2e-6), + eloreta_options=dict(eps=1e-6, max_iter=20, force_equal=False), + depth_mne=dict(exp=0.8, limit=10., limit_depth_chs=True, + combine_xyz='spectral', allow_fixed_depth=False), + depth_sparse=dict(exp=0.8, limit=None, limit_depth_chs='whiten', + combine_xyz='fro', allow_fixed_depth=True), + interpolation_method=dict(eeg='spline', meg='MNE', fnirs='nearest'), + volume_options=dict( + alpha=None, resolution=1., surface_alpha=None, blending='mip', + silhouette_alpha=None, silhouette_linewidth=2.), + prefixes={'': 1e0, 'd': 1e1, 'c': 1e2, 'm': 1e3, 'µ': 1e6, 'u': 1e6, + 'n': 1e9, 'p': 1e12, 'f': 1e15}, + transform_zooms=dict( + translation=None, rigid=None, affine=None, sdr=None), + transform_niter=dict( + translation=(10000, 1000, 100), + rigid=(10000, 1000, 100), + affine=(10000, 1000, 100), + sdr=(10, 10, 5)), + volume_label_indices=( + # Left and middle + 4, # Left-Lateral-Ventricle + 5, # Left-Inf-Lat-Vent + + 8, # Left-Cerebellum-Cortex + + 10, # Left-Thalamus-Proper + 11, # Left-Caudate + 12, # Left-Putamen + 13, # Left-Pallidum + 14, # 3rd-Ventricle + 15, # 4th-Ventricle + 16, # Brain-Stem + 17, # Left-Hippocampus + 18, # Left-Amygdala + + 26, # Left-Accumbens-area + + 28, # Left-VentralDC + + # Right + 43, # Right-Lateral-Ventricle + 44, # Right-Inf-Lat-Vent + + 47, # Right-Cerebellum-Cortex + + 49, # Right-Thalamus-Proper + 50, # Right-Caudate + 51, # Right-Putamen + 52, # Right-Pallidum + 53, # Right-Hippocampus + 54, # Right-Amygdala + + 58, # Right-Accumbens-area + + 60, # Right-VentralDC + ), + report_stc_plot_kwargs=dict( + views=('lateral', 'medial'), + hemi='split', + backend='pyvistaqt', + time_viewer=False, + show_traces=False, + size=(450, 450), + background='white', + time_label=None, + add_data_kwargs={ + 'colorbar_kwargs': { + 'label_font_size': 12, + 'n_labels': 5 + } + } + ) +) + + +def _handle_default(k, v=None): + """Avoid dicts as default keyword arguments. + + Use this function instead to resolve default dict values. Example usage:: + + scalings = _handle_default('scalings', scalings) + + """ + this_mapping = deepcopy(DEFAULTS[k]) + if v is not None: + if isinstance(v, dict): + this_mapping.update(v) + else: + for key in this_mapping: + this_mapping[key] = v + return this_mapping + + +HEAD_SIZE_DEFAULT = 0.095 # in [m] +_BORDER_DEFAULT = 'mean' +_EXTRAPOLATE_DEFAULT = 'auto' diff --git a/python/libs/mne/dipole.py b/python/libs/mne/dipole.py new file mode 100644 index 0000000..1c42f45 --- /dev/null +++ b/python/libs/mne/dipole.py @@ -0,0 +1,1598 @@ +# -*- coding: utf-8 -*- +"""Single-dipole functions and classes.""" + +# Authors: Alexandre Gramfort +# Eric Larson +# +# License: Simplified BSD + +from copy import deepcopy +import functools +from functools import partial +import re + +import numpy as np + +from .cov import compute_whitener, _ensure_cov +from .io.constants import FIFF +from .io.pick import pick_types +from .io.proj import make_projector, _needs_eeg_average_ref_proj +from .bem import _fit_sphere +from .evoked import _read_evoked, _aspect_rev, _write_evokeds +from .fixes import pinvh +from ._freesurfer import read_freesurfer_lut, _get_aseg +from .transforms import _print_coord_trans, _coord_frame_name, apply_trans +from .viz.evoked import _plot_evoked +from ._freesurfer import head_to_mni, head_to_mri +from .forward._make_forward import (_get_trans, _setup_bem, + _prep_meg_channels, _prep_eeg_channels) +from .forward._compute_forward import (_compute_forwards_meeg, + _prep_field_computation) + +from .surface import (transform_surface_to, _compute_nearest, + _points_outside_surface) +from .bem import _bem_find_surface, _bem_surf_name +from .source_space import _make_volume_source_space, SourceSpaces +from .parallel import parallel_func +from .utils import (logger, verbose, _time_mask, warn, _check_fname, + check_fname, _pl, fill_doc, _check_option, ShiftTimeMixin, + _svd_lwork, _repeated_svd, _get_blas_funcs, _validate_type, + _VerboseDep) + + +@fill_doc +class Dipole(_VerboseDep): + u"""Dipole class for sequential dipole fits. + + .. note:: This class should usually not be instantiated directly, + instead :func:`mne.read_dipole` should be used. + + Used to store positions, orientations, amplitudes, times, goodness of fit + of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit + or certain inverse solvers. Note that dipole position vectors are given in + the head coordinate frame. + + Parameters + ---------- + times : array, shape (n_dipoles,) + The time instants at which each dipole was fitted (sec). + pos : array, shape (n_dipoles, 3) + The dipoles positions (m) in head coordinates. + amplitude : array, shape (n_dipoles,) + The amplitude of the dipoles (Am). + ori : array, shape (n_dipoles, 3) + The dipole orientations (normalized to unit length). + gof : array, shape (n_dipoles,) + The goodness of fit. + name : str | None + Name of the dipole. + conf : dict + Confidence limits in dipole orientation for "vol" in m^3 (volume), + "depth" in m (along the depth axis), "long" in m (longitudinal axis), + "trans" in m (transverse axis), "qlong" in Am, and "qtrans" in Am + (currents). The current confidence limit in the depth direction is + assumed to be zero (although it can be non-zero when a BEM is used). + + .. versionadded:: 0.15 + khi2 : array, shape (n_dipoles,) + The χ^2 values for the fits. + + .. versionadded:: 0.15 + nfree : array, shape (n_dipoles,) + The number of free parameters for each fit. + + .. versionadded:: 0.15 + %(verbose)s + + See Also + -------- + fit_dipole + DipoleFixed + read_dipole + + Notes + ----- + This class is for sequential dipole fits, where the position + changes as a function of time. For fixed dipole fits, where the + position is fixed as a function of time, use :class:`mne.DipoleFixed`. + """ + + @verbose + def __init__(self, times, pos, amplitude, ori, gof, + name=None, conf=None, khi2=None, nfree=None, + *, verbose=None): # noqa: D102 + self.times = np.array(times) + self.pos = np.array(pos) + self.amplitude = np.array(amplitude) + self.ori = np.array(ori) + self.gof = np.array(gof) + self.name = name + self.conf = dict() + if conf is not None: + for key, value in conf.items(): + self.conf[key] = np.array(value) + self.khi2 = np.array(khi2) if khi2 is not None else None + self.nfree = np.array(nfree) if nfree is not None else None + + def __repr__(self): # noqa: D105 + s = "n_times : %s" % len(self.times) + s += ", tmin : %0.3f" % np.min(self.times) + s += ", tmax : %0.3f" % np.max(self.times) + return "" % s + + @verbose + def save(self, fname, overwrite=False, *, verbose=None): + """Save dipole in a .dip or .bdip file. + + Parameters + ---------- + fname : str + The name of the .dip or .bdip file. + %(overwrite)s + + .. versionadded:: 0.20 + %(verbose)s + + Notes + ----- + .. versionchanged:: 0.20 + Support for writing bdip (Xfit binary) files. + """ + # obligatory fields + fname = _check_fname(fname, overwrite=overwrite) + if fname.endswith('.bdip'): + _write_dipole_bdip(fname, self) + else: + _write_dipole_text(fname, self) + + @fill_doc + def crop(self, tmin=None, tmax=None, include_tmax=True): + """Crop data to a given time interval. + + Parameters + ---------- + tmin : float | None + Start time of selection in seconds. + tmax : float | None + End time of selection in seconds. + %(include_tmax)s + + Returns + ------- + self : instance of Dipole + The cropped instance. + """ + sfreq = None + if len(self.times) > 1: + sfreq = 1. / np.median(np.diff(self.times)) + mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq, + include_tmax=include_tmax) + for attr in ('times', 'pos', 'gof', 'amplitude', 'ori', + 'khi2', 'nfree'): + if getattr(self, attr) is not None: + setattr(self, attr, getattr(self, attr)[mask]) + for key in self.conf.keys(): + self.conf[key] = self.conf[key][mask] + return self + + def copy(self): + """Copy the Dipoles object. + + Returns + ------- + dip : instance of Dipole + The copied dipole instance. + """ + return deepcopy(self) + + @verbose + def plot_locations(self, trans, subject, subjects_dir=None, + mode='orthoview', coord_frame='mri', idx='gof', + show_all=True, ax=None, block=False, show=True, + scale=5e-3, color=(1.0, 0.0, 0.0), fig=None, + verbose=None, title=None): + """Plot dipole locations in 3d. + + Parameters + ---------- + trans : dict + The mri to head trans. + subject : str + The subject name corresponding to FreeSurfer environment + variable SUBJECT. + %(subjects_dir)s + mode : str + Can be ``'arrow'``, ``'sphere'`` or ``'orthoview'``. + + .. versionadded:: 0.14.0 + coord_frame : str + Coordinate frame to use, 'head' or 'mri'. Defaults to 'mri'. + + .. versionadded:: 0.14.0 + idx : int | 'gof' | 'amplitude' + Index of the initially plotted dipole. Can also be 'gof' to plot + the dipole with highest goodness of fit value or 'amplitude' to + plot the dipole with the highest amplitude. The dipoles can also be + browsed through using up/down arrow keys or mouse scroll. Defaults + to 'gof'. Only used if mode equals 'orthoview'. + + .. versionadded:: 0.14.0 + show_all : bool + Whether to always plot all the dipoles. If True (default), the + active dipole is plotted as a red dot and it's location determines + the shown MRI slices. The the non-active dipoles are plotted as + small blue dots. If False, only the active dipole is plotted. + Only used if mode equals 'orthoview'. + + .. versionadded:: 0.14.0 + ax : instance of matplotlib Axes3D | None + Axes to plot into. If None (default), axes will be created. + Only used if mode equals 'orthoview'. + + .. versionadded:: 0.14.0 + block : bool + Whether to halt program execution until the figure is closed. + Defaults to False. Only used if mode equals 'orthoview'. + + .. versionadded:: 0.14.0 + show : bool + Show figure if True. Defaults to True. + Only used if mode equals 'orthoview'. + + scale : float + The scale of the dipoles if ``mode`` is 'arrow' or 'sphere'. + color : tuple + The color of the dipoles if ``mode`` is 'arrow' or 'sphere'. + fig : instance of Figure3D | None + PyVista figure in which to plot the alignment. + If ``None``, creates a new 600x600 pixel figure with black + background. + + .. versionadded:: 0.14.0 + %(verbose)s + %(title_dipole_locs_fig)s + + .. versionadded:: 0.21.0 + + Returns + ------- + fig : instance of Figure3D or matplotlib.figure.Figure + The PyVista figure or matplotlib Figure. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + _check_option('mode', mode, [None, 'arrow', 'sphere', 'orthoview']) + + from .viz import plot_dipole_locations + return plot_dipole_locations( + self, trans, subject, subjects_dir, mode, coord_frame, idx, + show_all, ax, block, show, scale=scale, color=color, fig=fig, + title=title) + + @verbose + def to_mni(self, subject, trans, subjects_dir=None, + verbose=None): + """Convert dipole location from head to MNI coordinates. + + Parameters + ---------- + %(subject)s + %(trans_not_none)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + pos_mni : array, shape (n_pos, 3) + The MNI coordinates (in mm) of pos. + """ + mri_head_t, trans = _get_trans(trans) + return head_to_mni(self.pos, subject, mri_head_t, + subjects_dir=subjects_dir, verbose=verbose) + + @verbose + def to_mri(self, subject, trans, subjects_dir=None, + verbose=None): + """Convert dipole location from head to MRI surface RAS coordinates. + + Parameters + ---------- + %(subject)s + %(trans_not_none)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + pos_mri : array, shape (n_pos, 3) + The Freesurfer surface RAS coordinates (in mm) of pos. + """ + mri_head_t, trans = _get_trans(trans) + return head_to_mri(self.pos, subject, mri_head_t, + subjects_dir=subjects_dir, verbose=verbose) + + @verbose + def to_volume_labels(self, trans, subject='fsaverage', aseg='aparc+aseg', + subjects_dir=None, verbose=None): + """Find an ROI in atlas for the dipole positions. + + Parameters + ---------- + %(trans)s + %(subject)s + %(aseg)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + labels : list + List of anatomical region names from anatomical segmentation atlas. + + Notes + ----- + .. versionadded:: 0.24 + """ + aseg_img, aseg_data = _get_aseg(aseg, subject, subjects_dir) + mri_vox_t = np.linalg.inv(aseg_img.header.get_vox2ras_tkr()) + + # Load freesurface atlas LUT + lut_inv = read_freesurfer_lut()[0] + lut = {v: k for k, v in lut_inv.items()} + + # transform to voxel space from head space + pos = self.to_mri(subject, trans, subjects_dir=subjects_dir, + verbose=verbose) + pos = apply_trans(mri_vox_t, pos) + pos = np.rint(pos).astype(int) + + # Get voxel value and label from LUT + labels = [lut.get(aseg_data[tuple(coord)], 'Unknown') for coord in pos] + return labels + + def plot_amplitudes(self, color='k', show=True): + """Plot the dipole amplitudes as a function of time. + + Parameters + ---------- + color : matplotlib color + Color to use for the trace. + show : bool + Show figure if True. + + Returns + ------- + fig : matplotlib.figure.Figure + The figure object containing the plot. + """ + from .viz import plot_dipole_amplitudes + return plot_dipole_amplitudes([self], [color], show) + + def __getitem__(self, item): + """Get a time slice. + + Parameters + ---------- + item : array-like or slice + The slice of time points to use. + + Returns + ------- + dip : instance of Dipole + The sliced dipole. + """ + if isinstance(item, int): # make sure attributes stay 2d + item = [item] + + selected_times = self.times[item].copy() + selected_pos = self.pos[item, :].copy() + selected_amplitude = self.amplitude[item].copy() + selected_ori = self.ori[item, :].copy() + selected_gof = self.gof[item].copy() + selected_name = self.name + selected_conf = dict() + for key in self.conf.keys(): + selected_conf[key] = self.conf[key][item] + selected_khi2 = self.khi2[item] if self.khi2 is not None else None + selected_nfree = self.nfree[item] if self.nfree is not None else None + return Dipole( + selected_times, selected_pos, selected_amplitude, selected_ori, + selected_gof, selected_name, selected_conf, selected_khi2, + selected_nfree) + + def __len__(self): + """Return the number of dipoles. + + Returns + ------- + len : int + The number of dipoles. + + Examples + -------- + This can be used as:: + + >>> len(dipoles) # doctest: +SKIP + 10 + """ + return self.pos.shape[0] + + +def _read_dipole_fixed(fname): + """Read a fixed dipole FIF file.""" + logger.info('Reading %s ...' % fname) + info, nave, aspect_kind, comment, times, data, _ = _read_evoked(fname) + return DipoleFixed(info, data, times, nave, aspect_kind, comment=comment) + + +@fill_doc +class DipoleFixed(ShiftTimeMixin, _VerboseDep): + """Dipole class for fixed-position dipole fits. + + .. note:: This class should usually not be instantiated directly, + instead :func:`mne.read_dipole` should be used. + + Parameters + ---------- + %(info_not_none)s + data : array, shape (n_channels, n_times) + The dipole data. + times : array, shape (n_times,) + The time points. + nave : int + Number of averages. + aspect_kind : int + The kind of data. + comment : str + The dipole comment. + %(verbose)s + + See Also + -------- + read_dipole + Dipole + fit_dipole + + Notes + ----- + This class is for fixed-position dipole fits, where the position + (and maybe orientation) is static over time. For sequential dipole fits, + where the position can change a function of time, use :class:`mne.Dipole`. + + .. versionadded:: 0.12 + """ + + @verbose + def __init__(self, info, data, times, nave, aspect_kind, + comment='', *, verbose=None): # noqa: D102 + self.info = info + self.nave = nave + self._aspect_kind = aspect_kind + self.kind = _aspect_rev.get(aspect_kind, 'unknown') + self.comment = comment + self.times = times + self.data = data + self.preload = True + self._update_first_last() + + def __repr__(self): # noqa: D105 + s = "n_times : %s" % len(self.times) + s += ", tmin : %s" % np.min(self.times) + s += ", tmax : %s" % np.max(self.times) + return "" % s + + def copy(self): + """Copy the DipoleFixed object. + + Returns + ------- + inst : instance of DipoleFixed + The copy. + + Notes + ----- + .. versionadded:: 0.16 + """ + return deepcopy(self) + + @property + def ch_names(self): + """Channel names.""" + return self.info['ch_names'] + + @verbose + def save(self, fname, verbose=None): + """Save dipole in a .fif file. + + Parameters + ---------- + fname : str + The name of the .fif file. Must end with ``'.fif'`` or + ``'.fif.gz'`` to make it explicit that the file contains + dipole information in FIF format. + %(verbose)s + """ + check_fname(fname, 'DipoleFixed', ('-dip.fif', '-dip.fif.gz', + '_dip.fif', '_dip.fif.gz',), + ('.fif', '.fif.gz')) + _write_evokeds(fname, self, check=False) + + def plot(self, show=True, time_unit='s'): + """Plot dipole data. + + Parameters + ---------- + show : bool + Call pyplot.show() at the end or not. + time_unit : str + The units for the time axis, can be "ms" or "s" (default). + + .. versionadded:: 0.16 + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The figure containing the time courses. + """ + return _plot_evoked(self, picks=None, exclude=(), unit=True, show=show, + ylim=None, xlim='tight', proj=False, hline=None, + units=None, scalings=None, titles=None, axes=None, + gfp=False, window_title=None, spatial_colors=False, + plot_type="butterfly", selectable=False, + time_unit=time_unit) + + +# ############################################################################# +# IO +@verbose +def read_dipole(fname, verbose=None): + """Read .dip file from Neuromag/xfit or MNE. + + Parameters + ---------- + fname : str + The name of the .dip or .fif file. + %(verbose)s + + Returns + ------- + %(dipole)s + + See Also + -------- + Dipole + DipoleFixed + fit_dipole + + Notes + ----- + .. versionchanged:: 0.20 + Support for reading bdip (Xfit binary) format. + """ + fname = _check_fname(fname, overwrite='read', must_exist=True) + if fname.endswith('.fif') or fname.endswith('.fif.gz'): + return _read_dipole_fixed(fname) + elif fname.endswith('.bdip'): + return _read_dipole_bdip(fname) + else: + return _read_dipole_text(fname) + + +def _read_dipole_text(fname): + """Read a dipole text file.""" + # Figure out the special fields + need_header = True + def_line = name = None + # There is a bug in older np.loadtxt regarding skipping fields, + # so just read the data ourselves (need to get name and header anyway) + data = list() + with open(fname, 'r') as fid: + for line in fid: + if not (line.startswith('%') or line.startswith('#')): + need_header = False + data.append(line.strip().split()) + else: + if need_header: + def_line = line + if line.startswith('##') or line.startswith('%%'): + m = re.search('Name "(.*) dipoles"', line) + if m: + name = m.group(1) + del line + data = np.atleast_2d(np.array(data, float)) + if def_line is None: + raise IOError('Dipole text file is missing field definition ' + 'comment, cannot parse %s' % (fname,)) + # actually parse the fields + def_line = def_line.lstrip('%').lstrip('#').strip() + # MNE writes it out differently than Elekta, let's standardize them... + fields = re.sub(r'([X|Y|Z] )\(mm\)', # "X (mm)", etc. + lambda match: match.group(1).strip() + '/mm', def_line) + fields = re.sub(r'\((.*?)\)', # "Q(nAm)", etc. + lambda match: '/' + match.group(1), fields) + fields = re.sub('(begin|end) ', # "begin" and "end" with no units + lambda match: match.group(1) + '/ms', fields) + fields = fields.lower().split() + required_fields = ('begin/ms', + 'x/mm', 'y/mm', 'z/mm', + 'q/nam', 'qx/nam', 'qy/nam', 'qz/nam', + 'g/%') + optional_fields = ('khi^2', 'free', # standard ones + # now the confidence fields (up to 5!) + 'vol/mm^3', 'depth/mm', 'long/mm', 'trans/mm', + 'qlong/nam', 'qtrans/nam') + conf_scales = [1e-9, 1e-3, 1e-3, 1e-3, 1e-9, 1e-9] + missing_fields = sorted(set(required_fields) - set(fields)) + if len(missing_fields) > 0: + raise RuntimeError('Could not find necessary fields in header: %s' + % (missing_fields,)) + handled_fields = set(required_fields) | set(optional_fields) + assert len(handled_fields) == len(required_fields) + len(optional_fields) + ignored_fields = sorted(set(fields) - + set(handled_fields) - + {'end/ms'}) + if len(ignored_fields) > 0: + warn('Ignoring extra fields in dipole file: %s' % (ignored_fields,)) + if len(fields) != data.shape[1]: + raise IOError('More data fields (%s) found than data columns (%s): %s' + % (len(fields), data.shape[1], fields)) + + logger.info("%d dipole(s) found" % len(data)) + + if 'end/ms' in fields: + if np.diff(data[:, [fields.index('begin/ms'), + fields.index('end/ms')]], 1, -1).any(): + warn('begin and end fields differed, but only begin will be used ' + 'to store time values') + + # Find the correct column in our data array, then scale to proper units + idx = [fields.index(field) for field in required_fields] + assert len(idx) >= 9 + times = data[:, idx[0]] / 1000. + pos = 1e-3 * data[:, idx[1:4]] # put data in meters + amplitude = data[:, idx[4]] + norm = amplitude.copy() + amplitude /= 1e9 + norm[norm == 0] = 1 + ori = data[:, idx[5:8]] / norm[:, np.newaxis] + gof = data[:, idx[8]] + # Deal with optional fields + optional = [None] * 2 + for fi, field in enumerate(optional_fields[:2]): + if field in fields: + optional[fi] = data[:, fields.index(field)] + khi2, nfree = optional + conf = dict() + for field, scale in zip(optional_fields[2:], conf_scales): # confidence + if field in fields: + conf[field.split('/')[0]] = scale * data[:, fields.index(field)] + return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree) + + +def _write_dipole_text(fname, dip): + fmt = ' %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.2f' + header = ('# begin end X (mm) Y (mm) Z (mm)' + ' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%') + t = dip.times[:, np.newaxis] * 1000. + gof = dip.gof[:, np.newaxis] + amp = 1e9 * dip.amplitude[:, np.newaxis] + out = (t, t, dip.pos / 1e-3, amp, dip.ori * amp, gof) + + # optional fields + fmts = dict(khi2=(' khi^2', ' %8.1f', 1.), + nfree=(' free', ' %5d', 1), + vol=(' vol/mm^3', ' %9.3f', 1e9), + depth=(' depth/mm', ' %9.3f', 1e3), + long=(' long/mm', ' %8.3f', 1e3), + trans=(' trans/mm', ' %9.3f', 1e3), + qlong=(' Qlong/nAm', ' %10.3f', 1e9), + qtrans=(' Qtrans/nAm', ' %11.3f', 1e9), + ) + for key in ('khi2', 'nfree'): + data = getattr(dip, key) + if data is not None: + header += fmts[key][0] + fmt += fmts[key][1] + out += (data[:, np.newaxis] * fmts[key][2],) + for key in ('vol', 'depth', 'long', 'trans', 'qlong', 'qtrans'): + data = dip.conf.get(key) + if data is not None: + header += fmts[key][0] + fmt += fmts[key][1] + out += (data[:, np.newaxis] * fmts[key][2],) + out = np.concatenate(out, axis=-1) + + # NB CoordinateSystem is hard-coded as Head here + with open(fname, 'wb') as fid: + fid.write('# CoordinateSystem "Head"\n'.encode('utf-8')) + fid.write((header + '\n').encode('utf-8')) + np.savetxt(fid, out, fmt=fmt) + if dip.name is not None: + fid.write(('## Name "%s dipoles" Style "Dipoles"' + % dip.name).encode('utf-8')) + + +_BDIP_ERROR_KEYS = ('depth', 'long', 'trans', 'qlong', 'qtrans') + + +def _read_dipole_bdip(fname): + name = None + nfree = None + with open(fname, 'rb') as fid: + # Which dipole in a multi-dipole set + times = list() + pos = list() + amplitude = list() + ori = list() + gof = list() + conf = dict(vol=list()) + khi2 = list() + has_errors = None + while True: + num = np.frombuffer(fid.read(4), '>i4') + if len(num) == 0: + break + times.append(np.frombuffer(fid.read(4), '>f4')[0]) + fid.read(4) # end + fid.read(12) # r0 + pos.append(np.frombuffer(fid.read(12), '>f4')) + Q = np.frombuffer(fid.read(12), '>f4') + amplitude.append(np.linalg.norm(Q)) + ori.append(Q / amplitude[-1]) + gof.append(100 * np.frombuffer(fid.read(4), '>f4')[0]) + this_has_errors = bool(np.frombuffer(fid.read(4), '>i4')[0]) + if has_errors is None: + has_errors = this_has_errors + for key in _BDIP_ERROR_KEYS: + conf[key] = list() + assert has_errors == this_has_errors + fid.read(4) # Noise level used for error computations + limits = np.frombuffer(fid.read(20), '>f4') # error limits + for key, lim in zip(_BDIP_ERROR_KEYS, limits): + conf[key].append(lim) + fid.read(100) # (5, 5) fully describes the conf. ellipsoid + conf['vol'].append(np.frombuffer(fid.read(4), '>f4')[0]) + khi2.append(np.frombuffer(fid.read(4), '>f4')[0]) + fid.read(4) # prob + fid.read(4) # total noise estimate + return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree) + + +def _write_dipole_bdip(fname, dip): + with open(fname, 'wb+') as fid: + for ti, t in enumerate(dip.times): + fid.write(np.zeros(1, '>i4').tobytes()) # int dipole + fid.write(np.array([t, 0]).astype('>f4').tobytes()) + fid.write(np.zeros(3, '>f4').tobytes()) # r0 + fid.write(dip.pos[ti].astype('>f4').tobytes()) # pos + Q = dip.amplitude[ti] * dip.ori[ti] + fid.write(Q.astype('>f4').tobytes()) + fid.write(np.array(dip.gof[ti] / 100., '>f4').tobytes()) + has_errors = int(bool(len(dip.conf))) + fid.write(np.array(has_errors, '>i4').tobytes()) # has_errors + fid.write(np.zeros(1, '>f4').tobytes()) # noise level + for key in _BDIP_ERROR_KEYS: + val = dip.conf[key][ti] if key in dip.conf else 0. + assert val.shape == () + fid.write(np.array(val, '>f4').tobytes()) + fid.write(np.zeros(25, '>f4').tobytes()) + conf = dip.conf['vol'][ti] if 'vol' in dip.conf else 0. + fid.write(np.array(conf, '>f4').tobytes()) + khi2 = dip.khi2[ti] if dip.khi2 is not None else 0 + fid.write(np.array(khi2, '>f4').tobytes()) + fid.write(np.zeros(1, '>f4').tobytes()) # prob + fid.write(np.zeros(1, '>f4').tobytes()) # total noise est + + +# ############################################################################# +# Fitting + +def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1): + """Compute the forward solution and do other nice stuff.""" + B = _compute_forwards_meeg(rr, fwd_data, n_jobs, silent=True) + B = np.concatenate(B, axis=1) + assert np.isfinite(B).all() + B_orig = B.copy() + + # Apply projection and whiten (cov has projections already) + _, _, dgemm = _get_ddot_dgemv_dgemm() + B = dgemm(1., B, whitener.T) + + # column normalization doesn't affect our fitting, so skip for now + # S = np.sum(B * B, axis=1) # across channels + # scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)), + # axis=1)), 3) + # B *= scales[:, np.newaxis] + scales = np.ones(3) + return B, B_orig, scales + + +@verbose +def _make_guesses(surf, grid, exclude, mindist, n_jobs=1, verbose=None): + """Make a guess space inside a sphere or BEM surface.""" + if 'rr' in surf: + logger.info('Guess surface (%s) is in %s coordinates' + % (_bem_surf_name[surf['id']], + _coord_frame_name(surf['coord_frame']))) + else: + logger.info('Making a spherical guess space with radius %7.1f mm...' + % (1000 * surf['R'])) + logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid)) + src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist, + do_neighbors=False, n_jobs=n_jobs)[0] + assert 'vertno' in src + # simplify the result to make things easier later + src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']], + nuse=src['nuse'], coord_frame=src['coord_frame'], + vertno=np.arange(src['nuse']), type='discrete') + return SourceSpaces([src]) + + +def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None, + lwork=None): + """Calculate the residual sum of squares.""" + if fwd_svd is None: + fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0] + uu, sing, vv = _repeated_svd(fwd, lwork, overwrite_a=True) + else: + uu, sing, vv = fwd_svd + gof = _dipole_gof(uu, sing, vv, B, B2)[0] + # mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version + return 1. - gof + + +@functools.lru_cache(None) +def _get_ddot_dgemv_dgemm(): + return _get_blas_funcs(np.float64, ('dot', 'gemv', 'gemm')) + + +def _dipole_gof(uu, sing, vv, B, B2): + """Calculate the goodness of fit from the forward SVD.""" + ddot, dgemv, _ = _get_ddot_dgemv_dgemm() + ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.) > 0.2 else 2 + one = dgemv(1., vv[:ncomp], B) # np.dot(vv[:ncomp], B) + Bm2 = ddot(one, one) # np.sum(one * one) + gof = Bm2 / B2 + return gof, one + + +def _fit_Q(fwd_data, whitener, B, B2, B_orig, rd, ori=None): + """Fit the dipole moment once the location is known.""" + from scipy import linalg + if 'fwd' in fwd_data: + # should be a single precomputed "guess" (i.e., fixed position) + assert rd is None + fwd = fwd_data['fwd'] + assert fwd.shape[0] == 3 + fwd_orig = fwd_data['fwd_orig'] + assert fwd_orig.shape[0] == 3 + scales = fwd_data['scales'] + assert scales.shape == (3,) + fwd_svd = fwd_data['fwd_svd'][0] + else: + fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener, + rd[np.newaxis, :]) + fwd_svd = None + if ori is None: + if fwd_svd is None: + fwd_svd = linalg.svd(fwd, full_matrices=False) + uu, sing, vv = fwd_svd + gof, one = _dipole_gof(uu, sing, vv, B, B2) + ncomp = len(one) + one /= sing[:ncomp] + Q = np.dot(one, uu.T[:ncomp]) + else: + fwd = np.dot(ori[np.newaxis], fwd) + sing = np.linalg.norm(fwd) + one = np.dot(fwd / sing, B) + gof = (one * one)[0] / B2 + Q = ori * np.sum(one / sing) + ncomp = 3 + # Counteract the effect of column normalization + Q *= scales[0] + B_residual_noproj = B_orig - np.dot(fwd_orig.T, Q) + return Q, gof, B_residual_noproj, ncomp + + +def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs, + guess_data, fwd_data, whitener, ori, n_jobs, rank, rhoend): + """Fit a single dipole to the given whitened, projected data.""" + from scipy.optimize import fmin_cobyla + parallel, p_fun, _ = parallel_func(fun, n_jobs) + # parallel over time points + res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs, + guess_data, fwd_data, whitener, + fmin_cobyla, ori, rank, rhoend) + for B, t in zip(data.T, times)) + pos = np.array([r[0] for r in res]) + amp = np.array([r[1] for r in res]) + ori = np.array([r[2] for r in res]) + gof = np.array([r[3] for r in res]) * 100 # convert to percentage + conf = None + if res[0][4] is not None: + conf = np.array([r[4] for r in res]) + keys = ['vol', 'depth', 'long', 'trans', 'qlong', 'qtrans'] + conf = {key: conf[:, ki] for ki, key in enumerate(keys)} + khi2 = np.array([r[5] for r in res]) + nfree = np.array([r[6] for r in res]) + residual_noproj = np.array([r[7] for r in res]).T + + return pos, amp, ori, gof, conf, khi2, nfree, residual_noproj + + +'''Simplex code in case we ever want/need it for testing + +def _make_tetra_simplex(): + """Make the initial tetrahedron""" + # + # For this definition of a regular tetrahedron, see + # + # http://mathworld.wolfram.com/Tetrahedron.html + # + x = np.sqrt(3.0) / 3.0 + r = np.sqrt(6.0) / 12.0 + R = 3 * r + d = x / 2.0 + simplex = 1e-2 * np.array([[x, 0.0, -r], + [-d, 0.5, -r], + [-d, -0.5, -r], + [0., 0., R]]) + return simplex + + +def try_(p, y, psum, ndim, fun, ihi, neval, fac): + """Helper to try a value""" + ptry = np.empty(ndim) + fac1 = (1.0 - fac) / ndim + fac2 = fac1 - fac + ptry = psum * fac1 - p[ihi] * fac2 + ytry = fun(ptry) + neval += 1 + if ytry < y[ihi]: + y[ihi] = ytry + psum[:] += ptry - p[ihi] + p[ihi] = ptry + return ytry, neval + + +def _simplex_minimize(p, ftol, stol, fun, max_eval=1000): + """Minimization with the simplex algorithm + + Modified from Numerical recipes""" + y = np.array([fun(s) for s in p]) + ndim = p.shape[1] + assert p.shape[0] == ndim + 1 + mpts = ndim + 1 + neval = 0 + psum = p.sum(axis=0) + + loop = 1 + while(True): + ilo = 1 + if y[1] > y[2]: + ihi = 1 + inhi = 2 + else: + ihi = 2 + inhi = 1 + for i in range(mpts): + if y[i] < y[ilo]: + ilo = i + if y[i] > y[ihi]: + inhi = ihi + ihi = i + elif y[i] > y[inhi]: + if i != ihi: + inhi = i + + rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo])) + if rtol < ftol: + break + if neval >= max_eval: + raise RuntimeError('Maximum number of evaluations exceeded.') + if stol > 0: # Has the simplex collapsed? + dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2)) + if loop > 5 and dsum < stol: + break + + ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.) + if ytry <= y[ilo]: + ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.) + elif ytry >= y[inhi]: + ysave = y[ihi] + ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5) + if ytry >= ysave: + for i in range(mpts): + if i != ilo: + psum[:] = 0.5 * (p[i] + p[ilo]) + p[i] = psum + y[i] = fun(psum) + neval += ndim + psum = p.sum(axis=0) + loop += 1 +''' + + +def _fit_confidence(rd, Q, ori, whitener, fwd_data): + # As describedd in the Xfit manual, confidence intervals can be calculated + # by examining a linearization of model at the best-fitting location, + # i.e. taking the Jacobian and using the whitener: + # + # J = [∂b/∂x ∂b/∂y ∂b/∂z ∂b/∂Qx ∂b/∂Qy ∂b/∂Qz] + # C = (J.T C^-1 J)^-1 + # + # And then the confidence interval is the diagonal of C, scaled by 1.96 + # (for 95% confidence). + from scipy import linalg + direction = np.empty((3, 3)) + # The coordinate system has the x axis aligned with the dipole orientation, + direction[0] = ori + # the z axis through the origin of the sphere model + rvec = rd - fwd_data['inner_skull']['r0'] + direction[2] = rvec - ori * np.dot(ori, rvec) # orthogonalize + direction[2] /= np.linalg.norm(direction[2]) + # and the y axis perpendical with these forming a right-handed system. + direction[1] = np.cross(direction[2], direction[0]) + assert np.allclose(np.dot(direction, direction.T), np.eye(3)) + # Get spatial deltas in dipole coordinate directions + deltas = (-1e-4, 1e-4) + J = np.empty((whitener.shape[0], 6)) + for ii in range(3): + fwds = [] + for delta in deltas: + this_r = rd[np.newaxis] + delta * direction[ii] + fwds.append( + np.dot(Q, _dipole_forwards(fwd_data, whitener, this_r)[0])) + J[:, ii] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0] + # Get current (Q) deltas in the dipole directions + deltas = np.array([-0.01, 0.01]) * np.linalg.norm(Q) + this_fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis])[0] + for ii in range(3): + fwds = [] + for delta in deltas: + fwds.append(np.dot(Q + delta * direction[ii], this_fwd)) + J[:, ii + 3] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0] + # J is already whitened, so we don't need to do np.dot(whitener, J). + # However, the units in the Jacobian are potentially quite different, + # so we need to do some normalization during inversion, then revert. + direction_norm = np.linalg.norm(J[:, :3]) + Q_norm = np.linalg.norm(J[:, 3:5]) # omit possible zero Z + norm = np.array([direction_norm] * 3 + [Q_norm] * 3) + J /= norm + J = np.dot(J.T, J) + C = pinvh(J, rtol=1e-14) + C /= norm + C /= norm[:, np.newaxis] + conf = 1.96 * np.sqrt(np.diag(C)) + # The confidence volume of the dipole location is obtained from by + # taking the eigenvalues of the upper left submatrix and computing + # v = 4π/3 √(c^3 λ1 λ2 λ3) with c = 7.81, or: + vol_conf = 4 * np.pi / 3. * np.sqrt( + 476.379541 * np.prod(linalg.eigh(C[:3, :3], eigvals_only=True))) + conf = np.concatenate([conf, [vol_conf]]) + # Now we reorder and subselect the proper columns: + # vol, depth, long, trans, Qlong, Qtrans (discard Qdepth, assumed zero) + conf = conf[[6, 2, 0, 1, 3, 4]] + return conf + + +def _surface_constraint(rd, surf, min_dist_to_inner_skull): + """Surface fitting constraint.""" + dist = _compute_nearest(surf['rr'], rd[np.newaxis, :], + return_dists=True)[1][0] + if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]: + dist *= -1. + # Once we know the dipole is below the inner skull, + # let's check if its distance to the inner skull is at least + # min_dist_to_inner_skull. This can be enforced by adding a + # constrain proportional to its distance. + dist -= min_dist_to_inner_skull + return dist + + +def _sphere_constraint(rd, r0, R_adj): + """Sphere fitting constraint.""" + return R_adj - np.sqrt(np.sum((rd - r0) ** 2)) + + +def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs, + guess_data, fwd_data, whitener, fmin_cobyla, ori, rank, + rhoend): + """Fit a single bit of data.""" + B = np.dot(whitener, B_orig) + + # make constraint function to keep the solver within the inner skull + if 'rr' in fwd_data['inner_skull']: # bem + surf = fwd_data['inner_skull'] + constraint = partial(_surface_constraint, surf=surf, + min_dist_to_inner_skull=min_dist_to_inner_skull) + else: # sphere + surf = None + constraint = partial( + _sphere_constraint, r0=fwd_data['inner_skull']['r0'], + R_adj=fwd_data['inner_skull']['R'] - min_dist_to_inner_skull) + + # Find a good starting point (find_best_guess in C) + B2 = np.dot(B, B) + if B2 == 0: + warn('Zero field found for time %s' % t) + return np.zeros(3), 0, np.zeros(3), 0, B + + idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd) + for fi, fwd_svd in enumerate(guess_data['fwd_svd'])]) + x0 = guess_rrs[idx] + lwork = _svd_lwork((3, B.shape[0])) + fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener, + lwork=lwork) + + # Tested minimizers: + # Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC + # Several were similar, but COBYLA won for having a handy constraint + # function we can use to ensure we stay inside the inner skull / + # smallest sphere + rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(), + rhobeg=5e-2, rhoend=rhoend, disp=False) + + # simplex = _make_tetra_simplex() + x0 + # _simplex_minimize(simplex, 1e-4, 2e-4, fun) + # rd_final = simplex[0] + + # Compute the dipole moment at the final point + Q, gof, residual_noproj, n_comp = _fit_Q( + fwd_data, whitener, B, B2, B_orig, rd_final, ori=ori) + khi2 = (1 - gof) * B2 + nfree = rank - n_comp + amp = np.sqrt(np.dot(Q, Q)) + norm = 1. if amp == 0. else amp + ori = Q / norm + + conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data) + + msg = '---- Fitted : %7.1f ms' % (1000. * t) + if surf is not None: + dist_to_inner_skull = _compute_nearest( + surf['rr'], rd_final[np.newaxis, :], return_dists=True)[1][0] + msg += (", distance to inner skull : %2.4f mm" + % (dist_to_inner_skull * 1000.)) + + logger.info(msg) + return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj + + +def _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs, + guess_data, fwd_data, whitener, + fmin_cobyla, ori, rank, rhoend): + """Fit a data using a fixed position.""" + B = np.dot(whitener, B_orig) + B2 = np.dot(B, B) + if B2 == 0: + warn('Zero field found for time %s' % t) + return np.zeros(3), 0, np.zeros(3), 0, np.zeros(6) + # Compute the dipole moment + Q, gof, residual_noproj = _fit_Q(guess_data, whitener, B, B2, B_orig, + rd=None, ori=ori)[:3] + if ori is None: + amp = np.sqrt(np.dot(Q, Q)) + norm = 1. if amp == 0. else amp + ori = Q / norm + else: + amp = np.dot(Q, ori) + rd_final = guess_rrs[0] + # This will be slow, and we don't use it anyway, so omit it for now: + # conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data) + conf = khi2 = nfree = None + # No corresponding 'logger' message here because it should go *very* fast + return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj + + +@verbose +def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1, + pos=None, ori=None, rank=None, accuracy='normal', tol=5e-5, + verbose=None): + """Fit a dipole. + + Parameters + ---------- + evoked : instance of Evoked + The dataset to fit. + cov : str | instance of Covariance + The noise covariance. + bem : str | instance of ConductorModel + The BEM filename (str) or conductor model. + trans : str | None + The head<->MRI transform filename. Must be provided unless BEM + is a sphere model. + min_dist : float + Minimum distance (in millimeters) from the dipole to the inner skull. + Must be positive. Note that because this is a constraint passed to + a solver it is not strict but close, i.e. for a ``min_dist=5.`` the + fits could be 4.9 mm from the inner skull. + %(n_jobs)s + It is used in field computation and fitting. + pos : ndarray, shape (3,) | None + Position of the dipole to use. If None (default), sequential + fitting (different position and orientation for each time instance) + is performed. If a position (in head coords) is given as an array, + the position is fixed during fitting. + + .. versionadded:: 0.12 + ori : ndarray, shape (3,) | None + Orientation of the dipole to use. If None (default), the + orientation is free to change as a function of time. If an + orientation (in head coordinates) is given as an array, ``pos`` + must also be provided, and the routine computes the amplitude and + goodness of fit of the dipole at the given position and orientation + for each time instant. + + .. versionadded:: 0.12 + %(rank_none)s + + .. versionadded:: 0.20 + accuracy : str + Can be "normal" (default) or "accurate", which gives the most accurate + coil definition but is typically not necessary for real-world data. + + .. versionadded:: 0.24 + tol : float + Final accuracy of the optimization (see ``rhoend`` argument of + :func:`scipy.optimize.fmin_cobyla`). + + .. versionadded:: 0.24 + %(verbose)s + + Returns + ------- + dip : instance of Dipole or DipoleFixed + The dipole fits. A :class:`mne.DipoleFixed` is returned if + ``pos`` and ``ori`` are both not None, otherwise a + :class:`mne.Dipole` is returned. + residual : instance of Evoked + The M-EEG data channels with the fitted dipolar activity removed. + + See Also + -------- + mne.beamformer.rap_music + Dipole + DipoleFixed + read_dipole + + Notes + ----- + .. versionadded:: 0.9.0 + """ + from scipy import linalg + # This could eventually be adapted to work with other inputs, these + # are what is needed: + + evoked = evoked.copy() + _validate_type(accuracy, str, 'accuracy') + _check_option('accuracy', accuracy, ('accurate', 'normal')) + + # Determine if a list of projectors has an average EEG ref + if _needs_eeg_average_ref_proj(evoked.info): + raise ValueError('EEG average reference is mandatory for dipole ' + 'fitting.') + if min_dist < 0: + raise ValueError('min_dist should be positive. Got %s' % min_dist) + if ori is not None and pos is None: + raise ValueError('pos must be provided if ori is not None') + + data = evoked.data + if not np.isfinite(data).all(): + raise ValueError('Evoked data must be finite') + info = evoked.info + times = evoked.times.copy() + comment = evoked.comment + + # Convert the min_dist to meters + min_dist_to_inner_skull = min_dist / 1000. + del min_dist + + # Figure out our inputs + neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False, + exclude=[])) + if isinstance(bem, str): + bem_extra = bem + else: + bem_extra = repr(bem) + logger.info('BEM : %s' % bem_extra) + mri_head_t, trans = _get_trans(trans) + logger.info('MRI transform : %s' % trans) + bem = _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=False) + if not bem['is_sphere']: + # Find the best-fitting sphere + inner_skull = _bem_find_surface(bem, 'inner_skull') + inner_skull = inner_skull.copy() + R, r0 = _fit_sphere(inner_skull['rr'], disp=False) + # r0 back to head frame for logging + r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0] + inner_skull['r0'] = r0 + logger.info('Head origin : ' + '%6.1f %6.1f %6.1f mm rad = %6.1f mm.' + % (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R)) + del R, r0 + else: + r0 = bem['r0'] + if len(bem.get('layers', [])) > 0: + R = bem['layers'][0]['rad'] + kind = 'rad' + else: # MEG-only + # Use the minimum distance to the MEG sensors as the radius then + R = np.dot(np.linalg.inv(info['dev_head_t']['trans']), + np.hstack([r0, [1.]]))[:3] # r0 -> device + R = R - [info['chs'][pick]['loc'][:3] + for pick in pick_types(info, meg=True, exclude=[])] + if len(R) == 0: + raise RuntimeError('No MEG channels found, but MEG-only ' + 'sphere model used') + R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors + kind = 'max_rad' + logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm, ' + '%s = %6.1f mm' + % (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], kind, R)) + inner_skull = dict(R=R, r0=r0) # NB sphere model defined in head frame + del R, r0 + + # Deal with DipoleFixed cases here + if pos is not None: + fixed_position = True + pos = np.array(pos, float) + if pos.shape != (3,): + raise ValueError('pos must be None or a 3-element array-like,' + ' got %s' % (pos,)) + logger.info('Fixed position : %6.1f %6.1f %6.1f mm' + % tuple(1000 * pos)) + if ori is not None: + ori = np.array(ori, float) + if ori.shape != (3,): + raise ValueError('oris must be None or a 3-element array-like,' + ' got %s' % (ori,)) + norm = np.sqrt(np.sum(ori * ori)) + if not np.isclose(norm, 1): + raise ValueError('ori must be a unit vector, got length %s' + % (norm,)) + logger.info('Fixed orientation : %6.4f %6.4f %6.4f mm' + % tuple(ori)) + else: + logger.info('Free orientation : ') + fit_n_jobs = 1 # only use 1 job to do the guess fitting + else: + fixed_position = False + # Eventually these could be parameters, but they are just used for + # the initial grid anyway + guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf + guess_mindist = max(0.005, min_dist_to_inner_skull) + guess_exclude = 0.02 + + logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,)) + if guess_mindist > 0.0: + logger.info('Guess mindist : %6.1f mm' + % (1000 * guess_mindist,)) + if guess_exclude > 0: + logger.info('Guess exclude : %6.1f mm' + % (1000 * guess_exclude,)) + logger.info(f'Using {accuracy} MEG coil definitions.') + fit_n_jobs = n_jobs + cov = _ensure_cov(cov) + logger.info('') + + _print_coord_trans(mri_head_t) + _print_coord_trans(info['dev_head_t']) + logger.info('%d bad channels total' % len(info['bads'])) + + # Forward model setup (setup_forward_model from setup.c) + ch_types = evoked.get_channel_types() + + megcoils, compcoils, megnames, meg_info = [], [], [], None + eegels, eegnames = [], [] + if 'grad' in ch_types or 'mag' in ch_types: + megcoils, compcoils, megnames, meg_info = \ + _prep_meg_channels(info, exclude='bads', + accuracy=accuracy, verbose=verbose) + if 'eeg' in ch_types: + eegels, eegnames = _prep_eeg_channels(info, exclude='bads', + verbose=verbose) + + # Ensure that MEG and/or EEG channels are present + if len(megcoils + eegels) == 0: + raise RuntimeError('No MEG or EEG channels found.') + + # Whitener for the data + logger.info('Decomposing the sensor noise covariance matrix...') + picks = pick_types(info, meg=True, eeg=True, ref_meg=False) + + # In case we want to more closely match MNE-C for debugging: + # from .io.pick import pick_info + # from .cov import prepare_noise_cov + # info_nb = pick_info(info, picks) + # cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False) + # nzero = (cov['eig'] > 0) + # n_chan = len(info_nb['ch_names']) + # whitener = np.zeros((n_chan, n_chan), dtype=np.float64) + # whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero]) + # whitener = np.dot(whitener, cov['eigvec']) + + whitener, _, rank = compute_whitener(cov, info, picks=picks, + rank=rank, return_rank=True) + + # Proceed to computing the fits (make_guess_data) + if fixed_position: + guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True])) + logger.info('Compute forward for dipole location...') + else: + logger.info('\n---- Computing the forward solution for the guesses...') + guess_src = _make_guesses(inner_skull, guess_grid, guess_exclude, + guess_mindist, n_jobs=n_jobs)[0] + # grid coordinates go from mri to head frame + transform_surface_to(guess_src, 'head', mri_head_t) + logger.info('Go through all guess source locations...') + + # inner_skull goes from mri to head frame + if 'rr' in inner_skull: + transform_surface_to(inner_skull, 'head', mri_head_t) + if fixed_position: + if 'rr' in inner_skull: + check = _surface_constraint(pos, inner_skull, + min_dist_to_inner_skull) + else: + check = _sphere_constraint( + pos, inner_skull['r0'], + R_adj=inner_skull['R'] - min_dist_to_inner_skull) + if check <= 0: + raise ValueError('fixed position is %0.1fmm outside the inner ' + 'skull boundary' % (-1000 * check,)) + + # C code computes guesses w/sphere model for speed, don't bother here + fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None], + ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'], + inner_skull=inner_skull) + # fwd_data['inner_skull'] in head frame, bem in mri, confusing... + _prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs, + verbose=False) + guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards( + fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs) + # decompose ahead of time + guess_fwd_svd = [linalg.svd(fwd, full_matrices=False) + for fwd in np.array_split(guess_fwd, + len(guess_src['rr']))] + guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd, + fwd_orig=guess_fwd_orig, scales=guess_fwd_scales) + del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales # destroyed + logger.info('[done %d source%s]' % (guess_src['nuse'], + _pl(guess_src['nuse']))) + + # Do actual fits + data = data[picks] + ch_names = [info['ch_names'][p] for p in picks] + proj_op = make_projector(info['projs'], ch_names, info['bads'])[0] + fun = _fit_dipole_fixed if fixed_position else _fit_dipole + out = _fit_dipoles( + fun, min_dist_to_inner_skull, data, times, guess_src['rr'], + guess_data, fwd_data, whitener, ori, n_jobs, rank, tol) + assert len(out) == 8 + if fixed_position and ori is not None: + # DipoleFixed + data = np.array([out[1], out[3]]) + out_info = deepcopy(info) + loc = np.concatenate([pos, ori, np.zeros(6)]) + out_info._unlocked = True + out_info['chs'] = [ + dict(ch_name='dip 01', loc=loc, kind=FIFF.FIFFV_DIPOLE_WAVE, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_AM, + coil_type=FIFF.FIFFV_COIL_DIPOLE, + unit_mul=0, range=1, cal=1., scanno=1, logno=1), + dict(ch_name='goodness', loc=np.full(12, np.nan), + kind=FIFF.FIFFV_GOODNESS_FIT, unit=FIFF.FIFF_UNIT_AM, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + coil_type=FIFF.FIFFV_COIL_NONE, + unit_mul=0, range=1., cal=1., scanno=2, logno=100)] + for key in ['hpi_meas', 'hpi_results', 'projs']: + out_info[key] = list() + for key in ['acq_pars', 'acq_stim', 'description', 'dig', + 'experimenter', 'hpi_subsystem', 'proj_id', 'proj_name', + 'subject_info']: + out_info[key] = None + out_info._unlocked = False + out_info['bads'] = [] + out_info._update_redundant() + out_info._check_consistency() + dipoles = DipoleFixed(out_info, data, times, evoked.nave, + evoked._aspect_kind, comment=comment) + else: + dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment, + out[4], out[5], out[6]) + residual = evoked.copy().apply_proj() # set the projs active + residual.data[picks] = np.dot(proj_op, out[-1]) + logger.info('%d time points fitted' % len(dipoles.times)) + return dipoles, residual + + +def get_phantom_dipoles(kind='vectorview'): + """Get standard phantom dipole locations and orientations. + + Parameters + ---------- + kind : str + Get the information for the given system: + + ``vectorview`` (default) + The Neuromag VectorView phantom. + ``otaniemi`` + The older Neuromag phantom used at Otaniemi. + + Returns + ------- + pos : ndarray, shape (n_dipoles, 3) + The dipole positions. + ori : ndarray, shape (n_dipoles, 3) + The dipole orientations. + + See Also + -------- + mne.datasets.fetch_phantom + + Notes + ----- + The Elekta phantoms have a radius of 79.5mm, and HPI coil locations + in the XY-plane at the axis extrema (e.g., (79.5, 0), (0, -79.5), ...). + """ + _check_option('kind', kind, ['vectorview', 'otaniemi']) + if kind == 'vectorview': + # these values were pulled from a scanned image provided by + # Elekta folks + a = np.array([59.7, 48.6, 35.8, 24.8, 37.2, 27.5, 15.8, 7.9]) + b = np.array([46.1, 41.9, 38.3, 31.5, 13.9, 16.2, 20.0, 19.3]) + x = np.concatenate((a, [0] * 8, -b, [0] * 8)) + y = np.concatenate(([0] * 8, -a, [0] * 8, b)) + c = [22.9, 23.5, 25.5, 23.1, 52.0, 46.4, 41.0, 33.0] + d = [44.4, 34.0, 21.6, 12.7, 62.4, 51.5, 39.1, 27.9] + z = np.concatenate((c, c, d, d)) + signs = ([1, -1] * 4 + [-1, 1] * 4) * 2 + elif kind == 'otaniemi': + # these values were pulled from an Neuromag manual + # (NM20456A, 13.7.1999, p.65) + a = np.array([56.3, 47.6, 39.0, 30.3]) + b = np.array([32.5, 27.5, 22.5, 17.5]) + c = np.zeros(4) + x = np.concatenate((a, b, c, c, -a, -b, c, c)) + y = np.concatenate((c, c, -a, -b, c, c, b, a)) + z = np.concatenate((b, a, b, a, b, a, a, b)) + signs = [-1] * 8 + [1] * 16 + [-1] * 8 + pos = np.vstack((x, y, z)).T / 1000. + # Locs are always in XZ or YZ, and so are the oris. The oris are + # also in the same plane and tangential, so it's easy to determine + # the orientation. + ori = list() + for pi, this_pos in enumerate(pos): + this_ori = np.zeros(3) + idx = np.where(this_pos == 0)[0] + # assert len(idx) == 1 + idx = np.setdiff1d(np.arange(3), idx[0]) + this_ori[idx] = (this_pos[idx][::-1] / + np.linalg.norm(this_pos[idx])) * [1, -1] + this_ori *= signs[pi] + # Now we have this quality, which we could uncomment to + # double-check: + # np.testing.assert_allclose(np.dot(this_ori, this_pos) / + # np.linalg.norm(this_pos), 0, + # atol=1e-15) + ori.append(this_ori) + ori = np.array(ori) + return pos, ori + + +def _concatenate_dipoles(dipoles): + """Concatenate a list of dipoles.""" + times, pos, amplitude, ori, gof = [], [], [], [], [] + for dipole in dipoles: + times.append(dipole.times) + pos.append(dipole.pos) + amplitude.append(dipole.amplitude) + ori.append(dipole.ori) + gof.append(dipole.gof) + + return Dipole(np.concatenate(times), np.concatenate(pos), + np.concatenate(amplitude), np.concatenate(ori), + np.concatenate(gof), name=None) diff --git a/python/libs/mne/epochs.py b/python/libs/mne/epochs.py new file mode 100644 index 0000000..3ac4299 --- /dev/null +++ b/python/libs/mne/epochs.py @@ -0,0 +1,3856 @@ +# -*- coding: utf-8 -*- + +"""Tools for working with epoched data.""" + +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Daniel Strohmeier +# Denis Engemann +# Mainak Jas +# Stefan Appelhoff +# +# License: BSD-3-Clause + +from functools import partial +from collections import Counter +from copy import deepcopy +import json +import operator +import os.path as op + +import numpy as np + +from .io.utils import _construct_bids_filename +from .io.write import (start_and_end_file, start_block, end_block, + write_int, write_float, write_float_matrix, + write_double_matrix, write_complex_float_matrix, + write_complex_double_matrix, write_id, write_string, + _get_split_size, _NEXT_FILE_BUFFER, INT32_MAX) +from .io.meas_info import (read_meas_info, write_meas_info, _merge_info, + _ensure_infos_match, ContainsMixin) +from .io.open import fiff_open, _get_next_fname +from .io.tree import dir_tree_find +from .io.tag import read_tag, read_tag_info +from .io.constants import FIFF +from .io.fiff.raw import _get_fname_rep +from .io.pick import (channel_indices_by_type, channel_type, + pick_channels, pick_info, _pick_data_channels, + _DATA_CH_TYPES_SPLIT, _picks_to_idx) +from .io.proj import setup_proj, ProjMixin +from .io.base import BaseRaw, TimeMixin, _get_ch_factors +from .bem import _check_origin +from .evoked import EvokedArray, _check_decim +from .baseline import rescale, _log_rescale, _check_baseline +from .channels.channels import (UpdateChannelsMixin, + SetChannelsMixin, InterpolationMixin) +from .filter import detrend, FilterMixin, _check_fun +from .parallel import parallel_func + +from .event import (_read_events_fif, make_fixed_length_events, + match_event_names) +from .fixes import rng_uniform +from .viz import (plot_epochs, plot_epochs_psd, plot_epochs_psd_topomap, + plot_epochs_image, plot_topo_image_epochs, plot_drop_log) +from .utils import (_check_fname, check_fname, logger, verbose, + _time_mask, check_random_state, warn, _pl, + sizeof_fmt, SizeMixin, copy_function_doc_to_method_doc, + _check_pandas_installed, + _check_preload, GetEpochsMixin, + _prepare_read_metadata, _prepare_write_metadata, + _check_event_id, _gen_events, _check_option, + _check_combine, ShiftTimeMixin, _build_data_frame, + _check_pandas_index_arguments, _convert_times, + _scale_dataframe_data, _check_time_format, object_size, + _on_missing, _validate_type, _ensure_events, + _path_like, _VerboseDep) +from .utils.docs import fill_doc +from .annotations import (_write_annotations, _read_annotations_fif, + EpochAnnotationsMixin) + + +def _pack_reject_params(epochs): + reject_params = dict() + for key in ('reject', 'flat', 'reject_tmin', 'reject_tmax'): + val = getattr(epochs, key, None) + if val is not None: + reject_params[key] = val + return reject_params + + +def _save_split(epochs, fname, part_idx, n_parts, fmt, split_naming, + overwrite): + """Split epochs. + + Anything new added to this function also needs to be added to + BaseEpochs.save to account for new file sizes. + """ + # insert index in filename + base, ext = op.splitext(fname) + if part_idx > 0: + if split_naming == 'neuromag': + fname = '%s-%d%s' % (base, part_idx, ext) + else: + assert split_naming == 'bids' + fname = _construct_bids_filename(base, ext, part_idx, + validate=False) + _check_fname(fname, overwrite=overwrite) + + next_fname = None + if part_idx < n_parts - 1: + if split_naming == 'neuromag': + next_fname = '%s-%d%s' % (base, part_idx + 1, ext) + else: + assert split_naming == 'bids' + next_fname = _construct_bids_filename(base, ext, part_idx + 1, + validate=False) + next_idx = part_idx + 1 + else: + next_idx = None + + with start_and_end_file(fname) as fid: + _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx) + + +def _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx): + info = epochs.info + meas_id = info['meas_id'] + + start_block(fid, FIFF.FIFFB_MEAS) + write_id(fid, FIFF.FIFF_BLOCK_ID) + if info['meas_id'] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id']) + + # Write measurement info + write_meas_info(fid, info) + + # One or more evoked data sets + start_block(fid, FIFF.FIFFB_PROCESSED_DATA) + start_block(fid, FIFF.FIFFB_MNE_EPOCHS) + + # write events out after getting data to ensure bad events are dropped + data = epochs.get_data() + + _check_option('fmt', fmt, ['single', 'double']) + + if np.iscomplexobj(data): + if fmt == 'single': + write_function = write_complex_float_matrix + elif fmt == 'double': + write_function = write_complex_double_matrix + else: + if fmt == 'single': + write_function = write_float_matrix + elif fmt == 'double': + write_function = write_double_matrix + + # Epoch annotations are written if there are any + annotations = getattr(epochs, 'annotations', []) + if annotations is not None and len(annotations): + _write_annotations(fid, annotations) + + # write Epoch event windows + start_block(fid, FIFF.FIFFB_MNE_EVENTS) + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, epochs.events.T) + write_string(fid, FIFF.FIFF_DESCRIPTION, _event_id_string(epochs.event_id)) + end_block(fid, FIFF.FIFFB_MNE_EVENTS) + + # Metadata + if epochs.metadata is not None: + start_block(fid, FIFF.FIFFB_MNE_METADATA) + metadata = _prepare_write_metadata(epochs.metadata) + write_string(fid, FIFF.FIFF_DESCRIPTION, metadata) + end_block(fid, FIFF.FIFFB_MNE_METADATA) + + # First and last sample + first = int(round(epochs.tmin * info['sfreq'])) # round just to be safe + last = first + len(epochs.times) - 1 + write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first) + write_int(fid, FIFF.FIFF_LAST_SAMPLE, last) + + # write raw original sampling rate + write_float(fid, FIFF.FIFF_MNE_EPOCHS_RAW_SFREQ, epochs._raw_sfreq) + + # save baseline + if epochs.baseline is not None: + bmin, bmax = epochs.baseline + write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin) + write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax) + + # The epochs itself + decal = np.empty(info['nchan']) + for k in range(info['nchan']): + decal[k] = 1.0 / (info['chs'][k]['cal'] * + info['chs'][k].get('scale', 1.0)) + + data *= decal[np.newaxis, :, np.newaxis] + + write_function(fid, FIFF.FIFF_EPOCH, data) + + # undo modifications to data + data /= decal[np.newaxis, :, np.newaxis] + + write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG, + json.dumps(epochs.drop_log)) + + reject_params = _pack_reject_params(epochs) + if reject_params: + write_string(fid, FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT, + json.dumps(reject_params)) + + write_int(fid, FIFF.FIFF_MNE_EPOCHS_SELECTION, + epochs.selection) + + # And now write the next file info in case epochs are split on disk + if next_fname is not None and n_parts > 1: + start_block(fid, FIFF.FIFFB_REF) + write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE) + write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname)) + if meas_id is not None: + write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id) + write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx) + end_block(fid, FIFF.FIFFB_REF) + + end_block(fid, FIFF.FIFFB_MNE_EPOCHS) + end_block(fid, FIFF.FIFFB_PROCESSED_DATA) + end_block(fid, FIFF.FIFFB_MEAS) + + +def _event_id_string(event_id): + return ';'.join([k + ':' + str(v) for k, v in event_id.items()]) + + +def _merge_events(events, event_id, selection): + """Merge repeated events.""" + event_id = event_id.copy() + new_events = events.copy() + event_idxs_to_delete = list() + unique_events, counts = np.unique(events[:, 0], return_counts=True) + for ev in unique_events[counts > 1]: + + # indices at which the non-unique events happened + idxs = (events[:, 0] == ev).nonzero()[0] + + # Figure out new value for events[:, 1]. Set to 0, if mixed vals exist + unique_priors = np.unique(events[idxs, 1]) + new_prior = unique_priors[0] if len(unique_priors) == 1 else 0 + + # If duplicate time samples have same event val, "merge" == "drop" + # and no new event_id key will be created + ev_vals = np.unique(events[idxs, 2]) + if len(ev_vals) <= 1: + new_event_val = ev_vals[0] + + # Else, make a new event_id for the merged event + else: + + # Find all event_id keys involved in duplicated events. These + # keys will be merged to become a new entry in "event_id" + event_id_keys = list(event_id.keys()) + event_id_vals = list(event_id.values()) + new_key_comps = [event_id_keys[event_id_vals.index(value)] + for value in ev_vals] + + # Check if we already have an entry for merged keys of duplicate + # events ... if yes, reuse it + for key in event_id: + if set(key.split('/')) == set(new_key_comps): + new_event_val = event_id[key] + break + + # Else, find an unused value for the new key and make an entry into + # the event_id dict + else: + ev_vals = np.unique( + np.concatenate((list(event_id.values()), + events[:, 1:].flatten()), + axis=0)) + if ev_vals[0] > 1: + new_event_val = 1 + else: + diffs = np.diff(ev_vals) + idx = np.where(diffs > 1)[0] + idx = -1 if len(idx) == 0 else idx[0] + new_event_val = ev_vals[idx] + 1 + + new_event_id_key = '/'.join(sorted(new_key_comps)) + event_id[new_event_id_key] = int(new_event_val) + + # Replace duplicate event times with merged event and remember which + # duplicate indices to delete later + new_events[idxs[0], 1] = new_prior + new_events[idxs[0], 2] = new_event_val + event_idxs_to_delete.extend(idxs[1:]) + + # Delete duplicate event idxs + new_events = np.delete(new_events, event_idxs_to_delete, 0) + new_selection = np.delete(selection, event_idxs_to_delete, 0) + + return new_events, event_id, new_selection + + +def _handle_event_repeated(events, event_id, event_repeated, selection, + drop_log): + """Handle repeated events. + + Note that drop_log will be modified inplace + """ + assert len(events) == len(selection) + selection = np.asarray(selection) + + unique_events, u_ev_idxs = np.unique(events[:, 0], return_index=True) + + # Return early if no duplicates + if len(unique_events) == len(events): + return events, event_id, selection, drop_log + + # Else, we have duplicates. Triage ... + _check_option('event_repeated', event_repeated, ['error', 'drop', 'merge']) + drop_log = list(drop_log) + if event_repeated == 'error': + raise RuntimeError('Event time samples were not unique. Consider ' + 'setting the `event_repeated` parameter."') + + elif event_repeated == 'drop': + logger.info('Multiple event values for single event times found. ' + 'Keeping the first occurrence and dropping all others.') + new_events = events[u_ev_idxs] + new_selection = selection[u_ev_idxs] + drop_ev_idxs = np.setdiff1d(selection, new_selection) + for idx in drop_ev_idxs: + drop_log[idx] = drop_log[idx] + ('DROP DUPLICATE',) + selection = new_selection + elif event_repeated == 'merge': + logger.info('Multiple event values for single event times found. ' + 'Creating new event value to reflect simultaneous events.') + new_events, event_id, new_selection = \ + _merge_events(events, event_id, selection) + drop_ev_idxs = np.setdiff1d(selection, new_selection) + for idx in drop_ev_idxs: + drop_log[idx] = drop_log[idx] + ('MERGE DUPLICATE',) + selection = new_selection + drop_log = tuple(drop_log) + + # Remove obsolete kv-pairs from event_id after handling + keys = new_events[:, 1:].flatten() + event_id = {k: v for k, v in event_id.items() if v in keys} + + return new_events, event_id, selection, drop_log + + +@fill_doc +class BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin, ShiftTimeMixin, + SetChannelsMixin, InterpolationMixin, FilterMixin, + TimeMixin, SizeMixin, GetEpochsMixin, EpochAnnotationsMixin, + _VerboseDep): + """Abstract base class for `~mne.Epochs`-type classes. + + .. warning:: This class provides basic functionality and should never be + instantiated directly. + + Parameters + ---------- + %(info_not_none)s + data : ndarray | None + If ``None``, data will be read from the Raw object. If ndarray, must be + of shape (n_epochs, n_channels, n_times). + %(events_epochs)s + %(event_id)s + %(epochs_tmin_tmax)s + %(baseline_epochs)s + Defaults to ``(None, 0)``, i.e. beginning of the the data until + time point zero. + %(raw_epochs)s + %(picks_all)s + %(reject_epochs)s + %(flat)s + %(decim)s + %(epochs_reject_tmin_tmax)s + %(detrend_epochs)s + %(proj_epochs)s + %(on_missing_epochs)s + preload_at_end : bool + %(epochs_preload)s + selection : iterable | None + Iterable of indices of selected epochs. If ``None``, will be + automatically generated, corresponding to all non-zero events. + drop_log : tuple | None + Tuple of tuple of strings indicating which epochs have been marked to + be ignored. + filename : str | None + The filename (if the epochs are read from disk). + %(metadata_epochs)s + %(event_repeated_epochs)s + %(verbose)s + raw_sfreq : float + The original Raw object sampling rate. If None, then it is set to + ``info['sfreq']``. + annotations : instance of mne.Annotations | None + Annotations to set. + + Notes + ----- + The ``BaseEpochs`` class is public to allow for stable type-checking in + user code (i.e., ``isinstance(my_epochs, BaseEpochs)``) but should not be + used as a constructor for Epochs objects (use instead :class:`mne.Epochs`). + """ + + @verbose + def __init__(self, info, data, events, event_id=None, + tmin=-0.2, tmax=0.5, + baseline=(None, 0), raw=None, picks=None, reject=None, + flat=None, decim=1, reject_tmin=None, reject_tmax=None, + detrend=None, proj=True, on_missing='raise', + preload_at_end=False, selection=None, drop_log=None, + filename=None, metadata=None, event_repeated='error', + *, verbose=None, raw_sfreq=None, + annotations=None): # noqa: D102 + if events is not None: # RtEpochs can have events=None + events = _ensure_events(events) + events_max = events.max() + if events_max > INT32_MAX: + raise ValueError( + f'events array values must not exceed {INT32_MAX}, ' + f'got {events_max}') + event_id = _check_event_id(event_id, events) + self.event_id = event_id + del event_id + + if events is not None: # RtEpochs can have events=None + for key, val in self.event_id.items(): + if val not in events[:, 2]: + msg = ('No matching events found for %s ' + '(event id %i)' % (key, val)) + _on_missing(on_missing, msg) + + # ensure metadata matches original events size + self.selection = np.arange(len(events)) + self.events = events + + # same as self.metadata = metadata, but suppress log in favor + # of logging below (after setting self.selection) + GetEpochsMixin.metadata.fset(self, metadata, verbose=False) + del events + + values = list(self.event_id.values()) + selected = np.where(np.in1d(self.events[:, 2], values))[0] + if selection is None: + selection = selected + else: + selection = np.array(selection, int) + if selection.shape != (len(selected),): + raise ValueError('selection must be shape %s got shape %s' + % (selected.shape, selection.shape)) + self.selection = selection + if drop_log is None: + self.drop_log = tuple( + () if k in self.selection else ('IGNORED',) + for k in range(max(len(self.events), + max(self.selection) + 1))) + else: + self.drop_log = drop_log + + self.events = self.events[selected] + + self.events, self.event_id, self.selection, self.drop_log = \ + _handle_event_repeated( + self.events, self.event_id, event_repeated, + self.selection, self.drop_log) + + # then subselect + sub = np.where(np.in1d(selection, self.selection))[0] + if isinstance(metadata, list): + metadata = [metadata[s] for s in sub] + elif metadata is not None: + metadata = metadata.iloc[sub] + + # Remove temporarily set metadata from above, and set + # again to get the correct log ("adding metadata", instead of + # "replacing existing metadata") + GetEpochsMixin.metadata.fset(self, None, verbose=False) + self.metadata = metadata + del metadata + + n_events = len(self.events) + if n_events > 1: + if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0: + warn('The events passed to the Epochs constructor are not ' + 'chronologically ordered.', RuntimeWarning) + + if n_events > 0: + logger.info('%d matching events found' % n_events) + else: + raise ValueError('No desired events found.') + else: + self.drop_log = tuple() + self.selection = np.array([], int) + self.metadata = metadata + # do not set self.events here, let subclass do it + + if (detrend not in [None, 0, 1]) or isinstance(detrend, bool): + raise ValueError('detrend must be None, 0, or 1') + self.detrend = detrend + + self._raw = raw + info._check_consistency() + self.picks = _picks_to_idx(info, picks, none='all', exclude=(), + allow_empty=False) + self.info = pick_info(info, self.picks) + del info + self._current = 0 + + if data is None: + self.preload = False + self._data = None + self._do_baseline = True + else: + assert decim == 1 + if data.ndim != 3 or data.shape[2] != \ + round((tmax - tmin) * self.info['sfreq']) + 1: + raise RuntimeError('bad data shape') + if data.shape[0] != len(self.events): + raise ValueError( + 'The number of epochs and the number of events must match') + self.preload = True + self._data = data + self._do_baseline = False + self._offset = None + + if tmin > tmax: + raise ValueError('tmin has to be less than or equal to tmax') + + # Handle times + sfreq = float(self.info['sfreq']) + start_idx = int(round(tmin * sfreq)) + self._raw_times = np.arange(start_idx, + int(round(tmax * sfreq)) + 1) / sfreq + self._set_times(self._raw_times) + + # check reject_tmin and reject_tmax + if reject_tmin is not None: + if (np.isclose(reject_tmin, tmin)): + # adjust for potential small deviations due to sampling freq + reject_tmin = self.tmin + elif reject_tmin < tmin: + raise ValueError(f'reject_tmin needs to be None or >= tmin ' + f'(got {reject_tmin})') + + if reject_tmax is not None: + if (np.isclose(reject_tmax, tmax)): + # adjust for potential small deviations due to sampling freq + reject_tmax = self.tmax + elif reject_tmax > tmax: + raise ValueError(f'reject_tmax needs to be None or <= tmax ' + f'(got {reject_tmax})') + + if (reject_tmin is not None) and (reject_tmax is not None): + if reject_tmin >= reject_tmax: + raise ValueError(f'reject_tmin ({reject_tmin}) needs to be ' + f' < reject_tmax ({reject_tmax})') + + self.reject_tmin = reject_tmin + self.reject_tmax = reject_tmax + + # decimation + self._decim = 1 + self.decimate(decim) + + # baseline correction: replace `None` tuple elements with actual times + self.baseline = _check_baseline(baseline, times=self.times, + sfreq=self.info['sfreq']) + if self.baseline is not None and self.baseline != baseline: + logger.info(f'Setting baseline interval to ' + f'[{self.baseline[0]}, {self.baseline[1]}] sec') + + logger.info(_log_rescale(self.baseline)) + + # setup epoch rejection + self.reject = None + self.flat = None + self._reject_setup(reject, flat) + + # do the rest + valid_proj = [True, 'delayed', False] + if proj not in valid_proj: + raise ValueError('"proj" must be one of %s, not %s' + % (valid_proj, proj)) + if proj == 'delayed': + self._do_delayed_proj = True + logger.info('Entering delayed SSP mode.') + else: + self._do_delayed_proj = False + activate = False if self._do_delayed_proj else proj + self._projector, self.info = setup_proj(self.info, False, + activate=activate) + if preload_at_end: + assert self._data is None + assert self.preload is False + self.load_data() # this will do the projection + elif proj is True and self._projector is not None and data is not None: + # let's make sure we project if data was provided and proj + # requested + # we could do this with np.einsum, but iteration should be + # more memory safe in most instances + for ii, epoch in enumerate(self._data): + self._data[ii] = np.dot(self._projector, epoch) + self._filename = str(filename) if filename is not None else filename + if raw_sfreq is None: + raw_sfreq = self.info['sfreq'] + self._raw_sfreq = raw_sfreq + self._check_consistency() + self.set_annotations(annotations) + + def _check_consistency(self): + """Check invariants of epochs object.""" + if hasattr(self, 'events'): + assert len(self.selection) == len(self.events) + assert len(self.drop_log) >= len(self.events) + assert len(self.selection) == sum( + (len(dl) == 0 for dl in self.drop_log)) + assert hasattr(self, '_times_readonly') + assert not self.times.flags['WRITEABLE'] + assert isinstance(self.drop_log, tuple) + assert all(isinstance(log, tuple) for log in self.drop_log) + assert all(isinstance(s, str) for log in self.drop_log for s in log) + + def reset_drop_log_selection(self): + """Reset the drop_log and selection entries. + + This method will simplify ``self.drop_log`` and ``self.selection`` + so that they are meaningless (tuple of empty tuples and increasing + integers, respectively). This can be useful when concatenating + many Epochs instances, as ``drop_log`` can accumulate many entries + which can become problematic when saving. + """ + self.selection = np.arange(len(self.events)) + self.drop_log = (tuple(),) * len(self.events) + self._check_consistency() + + def load_data(self): + """Load the data if not already preloaded. + + Returns + ------- + epochs : instance of Epochs + The epochs object. + + Notes + ----- + This function operates in-place. + + .. versionadded:: 0.10.0 + """ + if self.preload: + return self + self._data = self._get_data() + self.preload = True + self._do_baseline = False + self._decim_slice = slice(None, None, None) + self._decim = 1 + self._raw_times = self.times + assert self._data.shape[-1] == len(self.times) + self._raw = None # shouldn't need it anymore + return self + + @verbose + def decimate(self, decim, offset=0, verbose=None): + """Decimate the epochs. + + Parameters + ---------- + %(decim)s + %(offset_decim)s + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The decimated Epochs object. + + See Also + -------- + mne.Evoked.decimate + mne.Epochs.resample + mne.io.Raw.resample + + Notes + ----- + %(decim_notes)s + + If ``decim`` is 1, this method does not copy the underlying data. + + .. versionadded:: 0.10.0 + + References + ---------- + .. footbibliography:: + """ + decim, offset, new_sfreq = _check_decim(self.info, decim, offset) + start_idx = int(round(-self._raw_times[0] * (self.info['sfreq'] * + self._decim))) + self._decim *= decim + i_start = start_idx % self._decim + offset + decim_slice = slice(i_start, None, self._decim) + with self.info._unlock(): + self.info['sfreq'] = new_sfreq + if self.preload: + if decim != 1: + self._data = self._data[:, :, decim_slice].copy() + self._raw_times = self._raw_times[decim_slice].copy() + else: + self._data = np.ascontiguousarray(self._data) + self._decim_slice = slice(None) + self._decim = 1 + else: + self._decim_slice = decim_slice + self._set_times(self._raw_times[self._decim_slice]) + return self + + @verbose + def apply_baseline(self, baseline=(None, 0), *, verbose=None): + """Baseline correct epochs. + + Parameters + ---------- + %(baseline_epochs)s + Defaults to ``(None, 0)``, i.e. beginning of the the data until + time point zero. + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The baseline-corrected Epochs object. + + Notes + ----- + Baseline correction can be done multiple times, but can never be + reverted once the data has been loaded. + + .. versionadded:: 0.10.0 + """ + baseline = _check_baseline(baseline, times=self.times, + sfreq=self.info['sfreq']) + + if self.preload: + if self.baseline is not None and baseline is None: + raise RuntimeError('You cannot remove baseline correction ' + 'from preloaded data once it has been ' + 'applied.') + self._do_baseline = True + picks = self._detrend_picks + rescale(self._data, self.times, baseline, copy=False, picks=picks) + self._do_baseline = False + else: # logging happens in "rescale" in "if" branch + logger.info(_log_rescale(baseline)) + # For EpochsArray and Epochs, this is already True: + # assert self._do_baseline is True + # ... but for EpochsFIF it's not, so let's set it explicitly + self._do_baseline = True + self.baseline = baseline + return self + + def _reject_setup(self, reject, flat): + """Set self._reject_time and self._channel_type_idx.""" + idx = channel_indices_by_type(self.info) + reject = deepcopy(reject) if reject is not None else dict() + flat = deepcopy(flat) if flat is not None else dict() + for rej, kind in zip((reject, flat), ('reject', 'flat')): + if not isinstance(rej, dict): + raise TypeError('reject and flat must be dict or None, not %s' + % type(rej)) + bads = set(rej.keys()) - set(idx.keys()) + if len(bads) > 0: + raise KeyError('Unknown channel types found in %s: %s' + % (kind, bads)) + + for key in idx.keys(): + # don't throw an error if rejection/flat would do nothing + if len(idx[key]) == 0 and (np.isfinite(reject.get(key, np.inf)) or + flat.get(key, -1) >= 0): + # This is where we could eventually add e.g. + # self.allow_missing_reject_keys check to allow users to + # provide keys that don't exist in data + raise ValueError("No %s channel found. Cannot reject based on " + "%s." % (key.upper(), key.upper())) + + # check for invalid values + for rej, kind in zip((reject, flat), ('Rejection', 'Flat')): + for key, val in rej.items(): + if val is None or val < 0: + raise ValueError('%s value must be a number >= 0, not "%s"' + % (kind, val)) + + # now check to see if our rejection and flat are getting more + # restrictive + old_reject = self.reject if self.reject is not None else dict() + old_flat = self.flat if self.flat is not None else dict() + bad_msg = ('{kind}["{key}"] == {new} {op} {old} (old value), new ' + '{kind} values must be at least as stringent as ' + 'previous ones') + + # copy thresholds for channel types that were used previously, but not + # passed this time + for key in set(old_reject) - set(reject): + reject[key] = old_reject[key] + # make sure new thresholds are at least as stringent as the old ones + for key in reject: + if key in old_reject and reject[key] > old_reject[key]: + raise ValueError( + bad_msg.format(kind='reject', key=key, new=reject[key], + old=old_reject[key], op='>')) + + # same for flat thresholds + for key in set(old_flat) - set(flat): + flat[key] = old_flat[key] + for key in flat: + if key in old_flat and flat[key] < old_flat[key]: + raise ValueError( + bad_msg.format(kind='flat', key=key, new=flat[key], + old=old_flat[key], op='<')) + + # after validation, set parameters + self._bad_dropped = False + self._channel_type_idx = idx + self.reject = reject if len(reject) > 0 else None + self.flat = flat if len(flat) > 0 else None + + if (self.reject_tmin is None) and (self.reject_tmax is None): + self._reject_time = None + else: + if self.reject_tmin is None: + reject_imin = None + else: + idxs = np.nonzero(self.times >= self.reject_tmin)[0] + reject_imin = idxs[0] + if self.reject_tmax is None: + reject_imax = None + else: + idxs = np.nonzero(self.times <= self.reject_tmax)[0] + reject_imax = idxs[-1] + self._reject_time = slice(reject_imin, reject_imax) + + @verbose # verbose is used by mne-realtime + def _is_good_epoch(self, data, verbose=None): + """Determine if epoch is good.""" + if isinstance(data, str): + return False, (data,) + if data is None: + return False, ('NO_DATA',) + n_times = len(self.times) + if data.shape[1] < n_times: + # epoch is too short ie at the end of the data + return False, ('TOO_SHORT',) + if self.reject is None and self.flat is None: + return True, None + else: + if self._reject_time is not None: + data = data[:, self._reject_time] + + return _is_good(data, self.ch_names, self._channel_type_idx, + self.reject, self.flat, full_report=True, + ignore_chs=self.info['bads']) + + @verbose + def _detrend_offset_decim(self, epoch, picks, verbose=None): + """Aux Function: detrend, baseline correct, offset, decim. + + Note: operates inplace + """ + if (epoch is None) or isinstance(epoch, str): + return epoch + + # Detrend + if self.detrend is not None: + # We explicitly detrend just data channels (not EMG, ECG, EOG which + # are processed by baseline correction) + use_picks = _pick_data_channels(self.info, exclude=()) + epoch[use_picks] = detrend(epoch[use_picks], self.detrend, axis=1) + + # Baseline correct + if self._do_baseline: + rescale( + epoch, self._raw_times, self.baseline, picks=picks, copy=False, + verbose=False) + + # Decimate if necessary (i.e., epoch not preloaded) + epoch = epoch[:, self._decim_slice] + + # handle offset + if self._offset is not None: + epoch += self._offset + + return epoch + + def iter_evoked(self, copy=False): + """Iterate over epochs as a sequence of Evoked objects. + + The Evoked objects yielded will each contain a single epoch (i.e., no + averaging is performed). + + This method resets the object iteration state to the first epoch. + + Parameters + ---------- + copy : bool + If False copies of data and measurement info will be omitted + to save time. + """ + self.__iter__() + + while True: + try: + out = self.__next__(True) + except StopIteration: + break + data, event_id = out + tmin = self.times[0] + info = self.info + if copy: + info = deepcopy(self.info) + data = data.copy() + + yield EvokedArray(data, info, tmin, comment=str(event_id)) + + def subtract_evoked(self, evoked=None): + """Subtract an evoked response from each epoch. + + Can be used to exclude the evoked response when analyzing induced + activity, see e.g. [1]_. + + Parameters + ---------- + evoked : instance of Evoked | None + The evoked response to subtract. If None, the evoked response + is computed from Epochs itself. + + Returns + ------- + self : instance of Epochs + The modified instance (instance is also modified inplace). + + References + ---------- + .. [1] David et al. "Mechanisms of evoked and induced responses in + MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006. + """ + logger.info('Subtracting Evoked from Epochs') + if evoked is None: + picks = _pick_data_channels(self.info, exclude=[]) + evoked = self.average(picks) + + # find the indices of the channels to use + picks = pick_channels(evoked.ch_names, include=self.ch_names) + + # make sure the omitted channels are not data channels + if len(picks) < len(self.ch_names): + sel_ch = [evoked.ch_names[ii] for ii in picks] + diff_ch = list(set(self.ch_names).difference(sel_ch)) + diff_idx = [self.ch_names.index(ch) for ch in diff_ch] + diff_types = [channel_type(self.info, idx) for idx in diff_idx] + bad_idx = [diff_types.index(t) for t in diff_types if t in + _DATA_CH_TYPES_SPLIT] + if len(bad_idx) > 0: + bad_str = ', '.join([diff_ch[ii] for ii in bad_idx]) + raise ValueError('The following data channels are missing ' + 'in the evoked response: %s' % bad_str) + logger.info(' The following channels are not included in the ' + 'subtraction: %s' % ', '.join(diff_ch)) + + # make sure the times match + if (len(self.times) != len(evoked.times) or + np.max(np.abs(self.times - evoked.times)) >= 1e-7): + raise ValueError('Epochs and Evoked object do not contain ' + 'the same time points.') + + # handle SSPs + if not self.proj and evoked.proj: + warn('Evoked has SSP applied while Epochs has not.') + if self.proj and not evoked.proj: + evoked = evoked.copy().apply_proj() + + # find the indices of the channels to use in Epochs + ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks] + + # do the subtraction + if self.preload: + self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :] + else: + if self._offset is None: + self._offset = np.zeros((len(self.ch_names), len(self.times)), + dtype=np.float64) + self._offset[ep_picks] -= evoked.data[picks] + logger.info('[done]') + + return self + + @fill_doc + def average(self, picks=None, method="mean", by_event_type=False): + """Compute an average over epochs. + + Parameters + ---------- + %(picks_all_data)s + method : str | callable + How to combine the data. If "mean"/"median", the mean/median + are returned. + Otherwise, must be a callable which, when passed an array of shape + (n_epochs, n_channels, n_time) returns an array of shape + (n_channels, n_time). + Note that due to file type limitations, the kind for all + these will be "average". + %(by_event_type)s + + Returns + ------- + %(evoked_by_event_type_returns)s + + Notes + ----- + Computes an average of all epochs in the instance, even if + they correspond to different conditions. To average by condition, + do ``epochs[condition].average()`` for each condition separately. + + When picks is None and epochs contain only ICA channels, no channels + are selected, resulting in an error. This is because ICA channels + are not considered data channels (they are of misc type) and only data + channels are selected when picks is None. + + The ``method`` parameter allows e.g. robust averaging. + For example, one could do: + + >>> from scipy.stats import trim_mean # doctest:+SKIP + >>> trim = lambda x: trim_mean(x, 0.1, axis=0) # doctest:+SKIP + >>> epochs.average(method=trim) # doctest:+SKIP + + This would compute the trimmed mean. + """ + if by_event_type: + evokeds = list() + for event_type in self.event_id.keys(): + ev = self[event_type]._compute_aggregate(picks=picks, + mode=method) + ev.comment = event_type + evokeds.append(ev) + else: + evokeds = self._compute_aggregate(picks=picks, mode=method) + return evokeds + + @fill_doc + def standard_error(self, picks=None, by_event_type=False): + """Compute standard error over epochs. + + Parameters + ---------- + %(picks_all_data)s + %(by_event_type)s + + Returns + ------- + %(std_err_by_event_type_returns)s + """ + return self.average(picks=picks, method="std", + by_event_type=by_event_type) + + def _compute_aggregate(self, picks, mode='mean'): + """Compute the mean, median, or std over epochs and return Evoked.""" + # if instance contains ICA channels they won't be included unless picks + # is specified + if picks is None: + check_ICA = [x.startswith('ICA') for x in self.ch_names] + if np.all(check_ICA): + raise TypeError('picks must be specified (i.e. not None) for ' + 'ICA channel data') + elif np.any(check_ICA): + warn('ICA channels will not be included unless explicitly ' + 'selected in picks') + + n_channels = len(self.ch_names) + n_times = len(self.times) + + if self.preload: + n_events = len(self.events) + fun = _check_combine(mode, valid=('mean', 'median', 'std')) + data = fun(self._data) + assert len(self.events) == len(self._data) + if data.shape != self._data.shape[1:]: + raise RuntimeError( + 'You passed a function that resulted n data of shape {}, ' + 'but it should be {}.'.format( + data.shape, self._data.shape[1:])) + else: + if mode not in {"mean", "std"}: + raise ValueError("If data are not preloaded, can only compute " + "mean or standard deviation.") + data = np.zeros((n_channels, n_times)) + n_events = 0 + for e in self: + if np.iscomplexobj(e): + data = data.astype(np.complex128) + data += e + n_events += 1 + + if n_events > 0: + data /= n_events + else: + data.fill(np.nan) + + # convert to stderr if requested, could do in one pass but do in + # two (slower) in case there are large numbers + if mode == "std": + data_mean = data.copy() + data.fill(0.) + for e in self: + data += (e - data_mean) ** 2 + data = np.sqrt(data / n_events) + + if mode == "std": + kind = 'standard_error' + data /= np.sqrt(n_events) + else: + kind = "average" + + return self._evoked_from_epoch_data(data, self.info, picks, n_events, + kind, self._name) + + @property + def _name(self): + """Give a nice string representation based on event ids.""" + if len(self.event_id) == 1: + comment = next(iter(self.event_id.keys())) + else: + count = Counter(self.events[:, 2]) + comments = list() + for key, value in self.event_id.items(): + comments.append('%.2f × %s' % ( + float(count[value]) / len(self.events), key)) + comment = ' + '.join(comments) + return comment + + def _evoked_from_epoch_data(self, data, info, picks, n_events, kind, + comment): + """Create an evoked object from epoch data.""" + info = deepcopy(info) + # don't apply baseline correction; we'll set evoked.baseline manually + evoked = EvokedArray(data, info, tmin=self.times[0], comment=comment, + nave=n_events, kind=kind, baseline=None) + evoked.baseline = self.baseline + + # the above constructor doesn't recreate the times object precisely + # due to numerical precision issues + evoked.times = self.times.copy() + + # pick channels + picks = _picks_to_idx(self.info, picks, 'data_or_ica', ()) + ch_names = [evoked.ch_names[p] for p in picks] + evoked.pick_channels(ch_names) + + if len(evoked.info['ch_names']) == 0: + raise ValueError('No data channel found when averaging.') + + if evoked.nave < 1: + warn('evoked object is empty (based on less than 1 epoch)') + + return evoked + + @property + def ch_names(self): + """Channel names.""" + return self.info['ch_names'] + + @copy_function_doc_to_method_doc(plot_epochs) + def plot(self, picks=None, scalings=None, n_epochs=20, n_channels=20, + title=None, events=None, event_color=None, + order=None, show=True, block=False, decim='auto', noise_cov=None, + butterfly=False, show_scrollbars=True, show_scalebars=True, + epoch_colors=None, event_id=None, group_by='type', + precompute=None, use_opengl=None, *, theme=None): + return plot_epochs(self, picks=picks, scalings=scalings, + n_epochs=n_epochs, n_channels=n_channels, + title=title, events=events, event_color=event_color, + order=order, show=show, block=block, decim=decim, + noise_cov=noise_cov, butterfly=butterfly, + show_scrollbars=show_scrollbars, + show_scalebars=show_scalebars, + epoch_colors=epoch_colors, event_id=event_id, + group_by=group_by, precompute=precompute, + use_opengl=use_opengl, theme=theme) + + @copy_function_doc_to_method_doc(plot_epochs_psd) + def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None, + proj=False, bandwidth=None, adaptive=False, low_bias=True, + normalization='length', picks=None, ax=None, color='black', + xscale='linear', area_mode='std', area_alpha=0.33, + dB=True, estimate='auto', show=True, n_jobs=1, + average=False, line_alpha=None, spatial_colors=True, + sphere=None, exclude='bads', verbose=None): + return plot_epochs_psd(self, fmin=fmin, fmax=fmax, tmin=tmin, + tmax=tmax, proj=proj, bandwidth=bandwidth, + adaptive=adaptive, low_bias=low_bias, + normalization=normalization, picks=picks, ax=ax, + color=color, xscale=xscale, area_mode=area_mode, + area_alpha=area_alpha, dB=dB, estimate=estimate, + show=show, n_jobs=n_jobs, average=average, + line_alpha=line_alpha, + spatial_colors=spatial_colors, sphere=sphere, + exclude=exclude, verbose=verbose) + + @copy_function_doc_to_method_doc(plot_epochs_psd_topomap) + def plot_psd_topomap(self, bands=None, tmin=None, + tmax=None, proj=False, bandwidth=None, adaptive=False, + low_bias=True, normalization='length', ch_type=None, + cmap=None, agg_fun=None, dB=True, + n_jobs=1, normalize=False, cbar_fmt='auto', + outlines='head', axes=None, show=True, + sphere=None, vlim=(None, None), verbose=None): + return plot_epochs_psd_topomap( + self, bands=bands, tmin=tmin, tmax=tmax, + proj=proj, bandwidth=bandwidth, adaptive=adaptive, + low_bias=low_bias, normalization=normalization, ch_type=ch_type, + cmap=cmap, agg_fun=agg_fun, dB=dB, n_jobs=n_jobs, + normalize=normalize, cbar_fmt=cbar_fmt, outlines=outlines, + axes=axes, show=show, sphere=sphere, vlim=vlim, verbose=verbose) + + @copy_function_doc_to_method_doc(plot_topo_image_epochs) + def plot_topo_image(self, layout=None, sigma=0., vmin=None, vmax=None, + colorbar=None, order=None, cmap='RdBu_r', + layout_scale=.95, title=None, scalings=None, + border='none', fig_facecolor='k', fig_background=None, + font_color='w', show=True): + return plot_topo_image_epochs( + self, layout=layout, sigma=sigma, vmin=vmin, vmax=vmax, + colorbar=colorbar, order=order, cmap=cmap, + layout_scale=layout_scale, title=title, scalings=scalings, + border=border, fig_facecolor=fig_facecolor, + fig_background=fig_background, font_color=font_color, show=show) + + @verbose + def drop_bad(self, reject='existing', flat='existing', verbose=None): + """Drop bad epochs without retaining the epochs data. + + Should be used before slicing operations. + + .. warning:: This operation is slow since all epochs have to be read + from disk. To avoid reading epochs from disk multiple + times, use :meth:`mne.Epochs.load_data()`. + + .. note:: To constrain the time period used for estimation of signal + quality, set ``epochs.reject_tmin`` and + ``epochs.reject_tmax``, respectively. + + Parameters + ---------- + %(reject_drop_bad)s + %(flat_drop_bad)s + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The epochs with bad epochs dropped. Operates in-place. + + Notes + ----- + Dropping bad epochs can be done multiple times with different + ``reject`` and ``flat`` parameters. However, once an epoch is + dropped, it is dropped forever, so if more lenient thresholds may + subsequently be applied, `epochs.copy ` should be + used. + """ + if reject == 'existing': + if flat == 'existing' and self._bad_dropped: + return + reject = self.reject + if flat == 'existing': + flat = self.flat + if any(isinstance(rej, str) and rej != 'existing' for + rej in (reject, flat)): + raise ValueError('reject and flat, if strings, must be "existing"') + self._reject_setup(reject, flat) + self._get_data(out=False, verbose=verbose) + return self + + def drop_log_stats(self, ignore=('IGNORED',)): + """Compute the channel stats based on a drop_log from Epochs. + + Parameters + ---------- + ignore : list + The drop reasons to ignore. + + Returns + ------- + perc : float + Total percentage of epochs dropped. + + See Also + -------- + plot_drop_log + """ + return _drop_log_stats(self.drop_log, ignore) + + @copy_function_doc_to_method_doc(plot_drop_log) + def plot_drop_log(self, threshold=0, n_max_plot=20, subject=None, + color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',), + show=True): + if not self._bad_dropped: + raise ValueError("You cannot use plot_drop_log since bad " + "epochs have not yet been dropped. " + "Use epochs.drop_bad().") + return plot_drop_log(self.drop_log, threshold, n_max_plot, subject, + color=color, width=width, ignore=ignore, + show=show) + + @copy_function_doc_to_method_doc(plot_epochs_image) + def plot_image(self, picks=None, sigma=0., vmin=None, vmax=None, + colorbar=True, order=None, show=True, units=None, + scalings=None, cmap=None, fig=None, axes=None, + overlay_times=None, combine=None, group_by=None, + evoked=True, ts_args=None, title=None, clear=False): + return plot_epochs_image(self, picks=picks, sigma=sigma, vmin=vmin, + vmax=vmax, colorbar=colorbar, order=order, + show=show, units=units, scalings=scalings, + cmap=cmap, fig=fig, axes=axes, + overlay_times=overlay_times, combine=combine, + group_by=group_by, evoked=evoked, + ts_args=ts_args, title=title, clear=clear) + + @verbose + def drop(self, indices, reason='USER', verbose=None): + """Drop epochs based on indices or boolean mask. + + .. note:: The indices refer to the current set of undropped epochs + rather than the complete set of dropped and undropped epochs. + They are therefore not necessarily consistent with any + external indices (e.g., behavioral logs). To drop epochs + based on external criteria, do not use the ``preload=True`` + flag when constructing an Epochs object, and call this + method before calling the :meth:`mne.Epochs.drop_bad` or + :meth:`mne.Epochs.load_data` methods. + + Parameters + ---------- + indices : array of int or bool + Set epochs to remove by specifying indices to remove or a boolean + mask to apply (where True values get removed). Events are + correspondingly modified. + reason : str + Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc). + Default: 'USER'. + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The epochs with indices dropped. Operates in-place. + """ + indices = np.atleast_1d(indices) + + if indices.ndim > 1: + raise ValueError("indices must be a scalar or a 1-d array") + + if indices.dtype == bool: + indices = np.where(indices)[0] + try_idx = np.where(indices < 0, indices + len(self.events), indices) + + out_of_bounds = (try_idx < 0) | (try_idx >= len(self.events)) + if out_of_bounds.any(): + first = indices[out_of_bounds][0] + raise IndexError("Epoch index %d is out of bounds" % first) + keep = np.setdiff1d(np.arange(len(self.events)), try_idx) + self._getitem(keep, reason, copy=False, drop_event_id=False) + count = len(try_idx) + logger.info('Dropped %d epoch%s: %s' % + (count, _pl(count), ', '.join(map(str, np.sort(try_idx))))) + + return self + + def _get_epoch_from_raw(self, idx, verbose=None): + """Get a given epoch from disk.""" + raise NotImplementedError + + def _project_epoch(self, epoch): + """Process a raw epoch based on the delayed param.""" + # whenever requested, the first epoch is being projected. + if (epoch is None) or isinstance(epoch, str): + # can happen if t < 0 or reject based on annotations + return epoch + proj = self._do_delayed_proj or self.proj + if self._projector is not None and proj is True: + epoch = np.dot(self._projector, epoch) + return epoch + + @verbose + def _get_data(self, out=True, picks=None, item=None, *, units=None, + tmin=None, tmax=None, verbose=None): + """Load all data, dropping bad epochs along the way. + + Parameters + ---------- + out : bool + Return the data. Setting this to False is used to reject bad + epochs without caching all the data, which saves memory. + %(picks_all)s + item : slice | array-like | str | list | None + See docstring of get_data method. + %(units)s + tmin : int | float | None + Start time of data to get in seconds. + tmax : int | float | None + End time of data to get in seconds. + %(verbose)s + """ + start, stop = self._handle_tmin_tmax(tmin, tmax) + + if item is None: + item = slice(None) + elif not self._bad_dropped: + raise ValueError( + 'item must be None in epochs.get_data() unless bads have been ' + 'dropped. Consider using epochs.drop_bad().') + select = self._item_to_select(item) # indices or slice + use_idx = np.arange(len(self.events))[select] + n_events = len(use_idx) + # in case there are no good events + if self.preload: + # we will store our result in our existing array + data = self._data + else: + # we start out with an empty array, allocate only if necessary + data = np.empty((0, len(self.info['ch_names']), len(self.times))) + msg = (f'for {n_events} events and {len(self._raw_times)} ' + 'original time points') + if self._decim > 1: + msg += ' (prior to decimation)' + if getattr(self._raw, "preload", False): + logger.info(f'Using data from preloaded Raw {msg} ...') + else: + logger.info(f'Loading data {msg} ...') + + orig_picks = picks + if orig_picks is None: + picks = _picks_to_idx(self.info, picks, "all", exclude=()) + else: + picks = _picks_to_idx(self.info, picks) + + # handle units param only if we are going to return data (out==True) + if (units is not None) and out: + ch_factors = _get_ch_factors(self, units, picks) + + if self._bad_dropped: + if not out: + return + if self.preload: + data = data[select] + if orig_picks is not None: + data = data[:, picks] + if units is not None: + data *= ch_factors[:, np.newaxis] + if start != 0 or stop != self.times.size: + data = data[..., start:stop] + return data + + # we need to load from disk, drop, and return data + detrend_picks = self._detrend_picks + for ii, idx in enumerate(use_idx): + # faster to pre-allocate memory here + epoch_noproj = self._get_epoch_from_raw(idx) + epoch_noproj = self._detrend_offset_decim( + epoch_noproj, detrend_picks) + if self._do_delayed_proj: + epoch_out = epoch_noproj + else: + epoch_out = self._project_epoch(epoch_noproj) + if ii == 0: + data = np.empty((n_events, len(self.ch_names), + len(self.times)), dtype=epoch_out.dtype) + data[ii] = epoch_out + else: + # bads need to be dropped, this might occur after a preload + # e.g., when calling drop_bad w/new params + good_idx = [] + n_out = 0 + drop_log = list(self.drop_log) + assert n_events == len(self.selection) + if not self.preload: + detrend_picks = self._detrend_picks + for idx, sel in enumerate(self.selection): + if self.preload: # from memory + if self._do_delayed_proj: + epoch_noproj = self._data[idx] + epoch = self._project_epoch(epoch_noproj) + else: + epoch_noproj = None + epoch = self._data[idx] + else: # from disk + epoch_noproj = self._get_epoch_from_raw(idx) + epoch_noproj = self._detrend_offset_decim( + epoch_noproj, detrend_picks) + epoch = self._project_epoch(epoch_noproj) + + epoch_out = epoch_noproj if self._do_delayed_proj else epoch + is_good, bad_tuple = self._is_good_epoch( + epoch, verbose=verbose) + if not is_good: + assert isinstance(bad_tuple, tuple) + assert all(isinstance(x, str) for x in bad_tuple) + drop_log[sel] = drop_log[sel] + bad_tuple + continue + good_idx.append(idx) + + # store the epoch if there is a reason to (output or update) + if out or self.preload: + # faster to pre-allocate, then trim as necessary + if n_out == 0 and not self.preload: + data = np.empty((n_events, epoch_out.shape[0], + epoch_out.shape[1]), + dtype=epoch_out.dtype, order='C') + data[n_out] = epoch_out + n_out += 1 + self.drop_log = tuple(drop_log) + del drop_log + + self._bad_dropped = True + logger.info("%d bad epochs dropped" % (n_events - len(good_idx))) + + # adjust the data size if there is a reason to (output or update) + if out or self.preload: + if data.flags['OWNDATA'] and data.flags['C_CONTIGUOUS']: + data.resize((n_out,) + data.shape[1:], refcheck=False) + else: + data = data[:n_out] + if self.preload: + self._data = data + + # Now update our properties (excepd data, which is already fixed) + self._getitem(good_idx, None, copy=False, drop_event_id=False, + select_data=False) + + if out: + if orig_picks is not None: + data = data[:, picks] + if units is not None: + data *= ch_factors[:, np.newaxis] + if start != 0 or stop != self.times.size: + data = data[..., start:stop] + return data + else: + return None + + @property + def _detrend_picks(self): + if self._do_baseline: + return _pick_data_channels( + self.info, with_ref_meg=True, with_aux=True, exclude=()) + else: + return [] + + @fill_doc + def get_data(self, picks=None, item=None, units=None, tmin=None, + tmax=None): + """Get all epochs as a 3D array. + + Parameters + ---------- + %(picks_all)s + item : slice | array-like | str | list | None + The items to get. See :meth:`mne.Epochs.__getitem__` for + a description of valid options. This can be substantially faster + for obtaining an ndarray than :meth:`~mne.Epochs.__getitem__` + for repeated access on large Epochs objects. + None (default) is an alias for ``slice(None)``. + + .. versionadded:: 0.20 + %(units)s + + .. versionadded:: 0.24 + tmin : int | float | None + Start time of data to get in seconds. + + .. versionadded:: 0.24.0 + tmax : int | float | None + End time of data to get in seconds. + + .. versionadded:: 0.24.0 + + Returns + ------- + data : array of shape (n_epochs, n_channels, n_times) + A view on epochs data. + """ + return self._get_data(picks=picks, item=item, units=units, tmin=tmin, + tmax=tmax) + + @verbose + def apply_function(self, fun, picks=None, dtype=None, n_jobs=1, + channel_wise=True, verbose=None, **kwargs): + """Apply a function to a subset of channels. + + %(applyfun_summary_epochs)s + + Parameters + ---------- + %(fun_applyfun)s + %(picks_all_data_noref)s + %(dtype_applyfun)s + %(n_jobs)s + %(channel_wise_applyfun_epo)s + %(verbose)s + %(kwargs_fun)s + + Returns + ------- + self : instance of Epochs + The epochs object with transformed data. + """ + _check_preload(self, 'epochs.apply_function') + picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) + + if not callable(fun): + raise ValueError('fun needs to be a function') + + data_in = self._data + if dtype is not None and dtype != self._data.dtype: + self._data = self._data.astype(dtype) + + if channel_wise: + if n_jobs == 1: + _fun = partial(_check_fun, fun, **kwargs) + # modify data inplace to save memory + for idx in picks: + self._data[:, idx, :] = np.apply_along_axis( + _fun, -1, data_in[:, idx, :]) + else: + # use parallel function + parallel, p_fun, _ = parallel_func(_check_fun, n_jobs) + data_picks_new = parallel(p_fun( + fun, data_in[:, p, :], **kwargs) for p in picks) + for pp, p in enumerate(picks): + self._data[:, p, :] = data_picks_new[pp] + else: + self._data = _check_fun(fun, data_in, **kwargs) + + return self + + @property + def times(self): + """Time vector in seconds.""" + return self._times_readonly + + def _set_times(self, times): + """Set self._times_readonly (and make it read only).""" + # naming used to indicate that it shouldn't be + # changed directly, but rather via this method + self._times_readonly = times.copy() + self._times_readonly.flags['WRITEABLE'] = False + + @property + def tmin(self): + """First time point.""" + return self.times[0] + + @property + def filename(self): + """The filename.""" + return self._filename + + @property + def tmax(self): + """Last time point.""" + return self.times[-1] + + def __repr__(self): + """Build string representation.""" + s = ' %s events ' % len(self.events) + s += '(all good)' if self._bad_dropped else '(good & bad)' + s += ', %g - %g sec' % (self.tmin, self.tmax) + s += ', baseline ' + if self.baseline is None: + s += 'off' + else: + s += f'{self.baseline[0]:g} – {self.baseline[1]:g} sec' + if self.baseline != _check_baseline( + self.baseline, times=self.times, sfreq=self.info['sfreq'], + on_baseline_outside_data='adjust'): + s += ' (baseline period was cropped after baseline correction)' + + s += ', ~%s' % (sizeof_fmt(self._size),) + s += ', data%s loaded' % ('' if self.preload else ' not') + s += ', with metadata' if self.metadata is not None else '' + max_events = 10 + counts = ['%r: %i' % (k, sum(self.events[:, 2] == v)) + for k, v in list(self.event_id.items())[:max_events]] + if len(self.event_id) > 0: + s += ',' + '\n '.join([''] + counts) + if len(self.event_id) > max_events: + not_shown_events = len(self.event_id) - max_events + s += f"\n and {not_shown_events} more events ..." + class_name = self.__class__.__name__ + class_name = 'Epochs' if class_name == 'BaseEpochs' else class_name + return '<%s | %s>' % (class_name, s) + + def _repr_html_(self): + from .html_templates import repr_templates_env + if self.baseline is None: + baseline = 'off' + else: + baseline = tuple([f'{b:.3f}' for b in self.baseline]) + baseline = f'{baseline[0]} – {baseline[1]} sec' + + if isinstance(self.event_id, dict): + event_strings = [] + for k, v in sorted(self.event_id.items()): + n_events = sum(self.events[:, 2] == v) + event_strings.append(f'{k}: {n_events}') + elif isinstance(self.event_id, list): + event_strings = [] + for k in self.event_id: + n_events = sum(self.events[:, 2] == k) + event_strings.append(f'{k}: {n_events}') + elif isinstance(self.event_id, int): + n_events = len(self.events[:, 2]) + event_strings = [f'{self.event_id}: {n_events}'] + else: + event_strings = None + + t = repr_templates_env.get_template('epochs.html.jinja') + t = t.render(epochs=self, baseline=baseline, events=event_strings) + return t + + @verbose + def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None): + """Crop a time interval from the epochs. + + Parameters + ---------- + tmin : float | None + Start time of selection in seconds. + tmax : float | None + End time of selection in seconds. + %(include_tmax)s + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The cropped epochs object, modified in-place. + + Notes + ----- + %(notes_tmax_included_by_default)s + """ + # XXX this could be made to work on non-preloaded data... + _check_preload(self, 'Modifying data of epochs') + + if tmin is None: + tmin = self.tmin + elif tmin < self.tmin: + warn('tmin is not in epochs time interval. tmin is set to ' + 'epochs.tmin') + tmin = self.tmin + + if tmax is None: + tmax = self.tmax + elif tmax > self.tmax: + warn('tmax is not in epochs time interval. tmax is set to ' + 'epochs.tmax') + tmax = self.tmax + include_tmax = True + + tmask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'], + include_tmax=include_tmax) + self._set_times(self.times[tmask]) + self._raw_times = self._raw_times[tmask] + self._data = self._data[:, :, tmask] + + # Adjust rejection period + if self.reject_tmin is not None and self.reject_tmin < self.tmin: + logger.info( + f'reject_tmin is not in epochs time interval. ' + f'Setting reject_tmin to epochs.tmin ({self.tmin} sec)') + self.reject_tmin = self.tmin + if self.reject_tmax is not None and self.reject_tmax > self.tmax: + logger.info( + f'reject_tmax is not in epochs time interval. ' + f'Setting reject_tmax to epochs.tmax ({self.tmax} sec)') + self.reject_tmax = self.tmax + return self + + def copy(self): + """Return copy of Epochs instance. + + Returns + ------- + epochs : instance of Epochs + A copy of the object. + """ + return deepcopy(self) + + def __deepcopy__(self, memodict): + """Make a deepcopy.""" + cls = self.__class__ + result = cls.__new__(cls) + for k, v in self.__dict__.items(): + # drop_log is immutable and _raw is private (and problematic to + # deepcopy) + if k in ('drop_log', '_raw', '_times_readonly'): + memodict[id(v)] = v + else: + v = deepcopy(v, memodict) + result.__dict__[k] = v + return result + + @verbose + def save(self, fname, split_size='2GB', fmt='single', overwrite=False, + split_naming='neuromag', verbose=True): + """Save epochs in a fif file. + + Parameters + ---------- + fname : str + The name of the file, which should end with ``-epo.fif`` or + ``-epo.fif.gz``. + split_size : str | int + Large raw files are automatically split into multiple pieces. This + parameter specifies the maximum size of each piece. If the + parameter is an integer, it specifies the size in Bytes. It is + also possible to pass a human-readable string, e.g., 100MB. + Note: Due to FIFF file limitations, the maximum split size is 2GB. + + .. versionadded:: 0.10.0 + fmt : str + Format to save data. Valid options are 'double' or + 'single' for 64- or 32-bit float, or for 128- or + 64-bit complex numbers respectively. Note: Data are processed with + double precision. Choosing single-precision, the saved data + will slightly differ due to the reduction in precision. + + .. versionadded:: 0.17 + %(overwrite)s + To overwrite original file (the same one that was loaded), + data must be preloaded upon reading. This defaults to True in 0.18 + but will change to False in 0.19. + + .. versionadded:: 0.18 + %(split_naming)s + + .. versionadded:: 0.24 + %(verbose)s + + Notes + ----- + Bad epochs will be dropped before saving the epochs to disk. + """ + check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz', + '_epo.fif', '_epo.fif.gz')) + + # check for file existence and expand `~` if present + fname = _check_fname(fname=fname, overwrite=overwrite) + + split_size_bytes = _get_split_size(split_size) + + _check_option('fmt', fmt, ['single', 'double']) + + # to know the length accurately. The get_data() call would drop + # bad epochs anyway + self.drop_bad() + # total_size tracks sizes that get split + # over_size tracks overhead (tags, things that get written to each) + if len(self) == 0: + warn('Saving epochs with no data') + total_size = 0 + else: + d = self[0].get_data() + # this should be guaranteed by subclasses + assert d.dtype in ('>f8', 'c16', '= 1, n_parts + if n_parts > 1: + logger.info(f'Splitting into {n_parts} parts') + if n_parts > 100: # This must be an error + raise ValueError( + f'Split size {split_size} would result in writing ' + f'{n_parts} files') + + if len(self.drop_log) > 100000: + warn(f'epochs.drop_log contains {len(self.drop_log)} entries ' + f'which will incur up to a {sizeof_fmt(drop_size)} writing ' + f'overhead (per split file), consider using ' + f'epochs.reset_drop_log_selection() prior to writing') + + epoch_idxs = np.array_split(np.arange(n_epochs), n_parts) + + for part_idx, epoch_idx in enumerate(epoch_idxs): + this_epochs = self[epoch_idx] if n_parts > 1 else self + # avoid missing event_ids in splits + this_epochs.event_id = self.event_id + _save_split(this_epochs, fname, part_idx, n_parts, fmt, + split_naming, overwrite) + + @verbose + def export(self, fname, fmt='auto', *, overwrite=False, verbose=None): + """Export Epochs to external formats. + + Supported formats: EEGLAB (set, uses :mod:`eeglabio`) + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + %(fmt_export_params)s + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_epochs)s + %(export_eeglab_note)s + """ + from .export import export_epochs + export_epochs(fname, self, fmt, overwrite=overwrite, verbose=verbose) + + def equalize_event_counts(self, event_ids=None, method='mintime'): + """Equalize the number of trials in each condition. + + It tries to make the remaining epochs occurring as close as possible in + time. This method works based on the idea that if there happened to be + some time-varying (like on the scale of minutes) noise characteristics + during a recording, they could be compensated for (to some extent) in + the equalization process. This method thus seeks to reduce any of + those effects by minimizing the differences in the times of the events + within a `~mne.Epochs` instance. For example, if one event type + occurred at time points ``[1, 2, 3, 4, 120, 121]`` and the another one + at ``[3.5, 4.5, 120.5, 121.5]``, this method would remove the events at + times ``[1, 2]`` for the first event type – and not the events at times + ``[120, 121]``. + + Parameters + ---------- + event_ids : None | list | dict + The event types to equalize. + + If ``None`` (default), equalize the counts of **all** event types + present in the `~mne.Epochs` instance. + + If a list, each element can either be a string (event name) or a + list of strings. In the case where one of the entries is a list of + strings, event types in that list will be grouped together before + equalizing trial counts across conditions. + + If a dictionary, the keys are considered as the event names whose + counts to equalize, i.e., passing ``dict(A=1, B=2)`` will have the + same effect as passing ``['A', 'B']``. This is useful if you intend + to pass an ``event_id`` dictionary that was used when creating + `~mne.Epochs`. + + In the case where partial matching is used (using ``/`` in + the event names), the event types will be matched according to the + provided tags, that is, processing works as if the ``event_ids`` + matched by the provided tags had been supplied instead. + The ``event_ids`` must identify non-overlapping subsets of the + epochs. + method : str + If ``'truncate'``, events will be truncated from the end of each + type of events. If ``'mintime'``, timing differences between each + event type will be minimized. + + Returns + ------- + epochs : instance of Epochs + The modified instance. It is modified in-place. + indices : array of int + Indices from the original events list that were dropped. + + Notes + ----- + For example (if ``epochs.event_id`` was ``{'Left': 1, 'Right': 2, + 'Nonspatial':3}``: + + epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial']) + + would equalize the number of trials in the ``'Nonspatial'`` condition + with the total number of trials in the ``'Left'`` and ``'Right'`` + conditions combined. + + If multiple indices are provided (e.g. ``'Left'`` and ``'Right'`` in + the example above), it is not guaranteed that after equalization the + conditions will contribute equally. E.g., it is possible to end up + with 70 ``'Nonspatial'`` epochs, 69 ``'Left'`` and 1 ``'Right'``. + + .. versionchanged:: 0.23 + Default to equalizing all events in the passed instance if no + event names were specified explicitly. + """ + from collections.abc import Iterable + _validate_type(event_ids, types=(Iterable, None), + item_name='event_ids', type_name='list-like or None') + if isinstance(event_ids, str): + raise TypeError(f'event_ids must be list-like or None, but ' + f'received a string: {event_ids}') + + if event_ids is None: + event_ids = list(self.event_id) + elif not event_ids: + raise ValueError('event_ids must have at least one element') + + if not self._bad_dropped: + self.drop_bad() + # figure out how to equalize + eq_inds = list() + + # deal with hierarchical tags + ids = self.event_id + orig_ids = list(event_ids) + tagging = False + if "/" in "".join(ids): + # make string inputs a list of length 1 + event_ids = [[x] if isinstance(x, str) else x + for x in event_ids] + for ids_ in event_ids: # check if tagging is attempted + if any([id_ not in ids for id_ in ids_]): + tagging = True + # 1. treat everything that's not in event_id as a tag + # 2a. for tags, find all the event_ids matched by the tags + # 2b. for non-tag ids, just pass them directly + # 3. do this for every input + event_ids = [[k for k in ids + if all((tag in k.split("/") + for tag in id_))] # ids matching all tags + if all(id__ not in ids for id__ in id_) + else id_ # straight pass for non-tag inputs + for id_ in event_ids] + for ii, id_ in enumerate(event_ids): + if len(id_) == 0: + raise KeyError(f"{orig_ids[ii]} not found in the epoch " + "object's event_id.") + elif len({sub_id in ids for sub_id in id_}) != 1: + err = ("Don't mix hierarchical and regular event_ids" + " like in \'%s\'." % ", ".join(id_)) + raise ValueError(err) + + # raise for non-orthogonal tags + if tagging is True: + events_ = [set(self[x].events[:, 0]) for x in event_ids] + doubles = events_[0].intersection(events_[1]) + if len(doubles): + raise ValueError("The two sets of epochs are " + "overlapping. Provide an " + "orthogonal selection.") + + for eq in event_ids: + eq_inds.append(self._keys_to_idx(eq)) + + event_times = [self.events[e, 0] for e in eq_inds] + indices = _get_drop_indices(event_times, method) + # need to re-index indices + indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)]) + self.drop(indices, reason='EQUALIZED_COUNT') + # actually remove the indices + return self, indices + + @verbose + def to_data_frame(self, picks=None, index=None, + scalings=None, copy=True, long_format=False, + time_format='ms', *, verbose=None): + """Export data in tabular structure as a pandas DataFrame. + + Channels are converted to columns in the DataFrame. By default, + additional columns "time", "epoch" (epoch number), and "condition" + (epoch event description) are added, unless ``index`` is not ``None`` + (in which case the columns specified in ``index`` will be used to form + the DataFrame's index instead). + + Parameters + ---------- + %(picks_all)s + %(index_df_epo)s + Valid string values are 'time', 'epoch', and 'condition'. + Defaults to ``None``. + %(scalings_df)s + %(copy_df)s + %(long_format_df_epo)s + %(time_format_df)s + + .. versionadded:: 0.20 + %(verbose)s + + Returns + ------- + %(df_return)s + """ + # check pandas once here, instead of in each private utils function + pd = _check_pandas_installed() # noqa + # arg checking + valid_index_args = ['time', 'epoch', 'condition'] + valid_time_formats = ['ms', 'timedelta'] + index = _check_pandas_index_arguments(index, valid_index_args) + time_format = _check_time_format(time_format, valid_time_formats) + # get data + picks = _picks_to_idx(self.info, picks, 'all', exclude=()) + data = self.get_data()[:, picks, :] + times = self.times + n_epochs, n_picks, n_times = data.shape + data = np.hstack(data).T # (time*epochs) x signals + if copy: + data = data.copy() + data = _scale_dataframe_data(self, data, picks, scalings) + # prepare extra columns / multiindex + mindex = list() + times = np.tile(times, n_epochs) + times = _convert_times(self, times, time_format) + mindex.append(('time', times)) + rev_event_id = {v: k for k, v in self.event_id.items()} + conditions = [rev_event_id[k] for k in self.events[:, 2]] + mindex.append(('condition', np.repeat(conditions, n_times))) + mindex.append(('epoch', np.repeat(self.selection, n_times))) + assert all(len(mdx) == len(mindex[0]) for mdx in mindex) + # build DataFrame + df = _build_data_frame(self, data, picks, long_format, mindex, index, + default_index=['condition', 'epoch', 'time']) + return df + + def as_type(self, ch_type='grad', mode='fast'): + """Compute virtual epochs using interpolated fields. + + .. Warning:: Using virtual epochs to compute inverse can yield + unexpected results. The virtual channels have ``'_v'`` appended + at the end of the names to emphasize that the data contained in + them are interpolated. + + Parameters + ---------- + ch_type : str + The destination channel type. It can be 'mag' or 'grad'. + mode : str + Either ``'accurate'`` or ``'fast'``, determines the quality of the + Legendre polynomial expansion used. ``'fast'`` should be sufficient + for most applications. + + Returns + ------- + epochs : instance of mne.EpochsArray + The transformed epochs object containing only virtual channels. + + Notes + ----- + This method returns a copy and does not modify the data it + operates on. It also returns an EpochsArray instance. + + .. versionadded:: 0.20.0 + """ + from .forward import _as_meg_type_inst + return _as_meg_type_inst(self, ch_type=ch_type, mode=mode) + + +def _drop_log_stats(drop_log, ignore=('IGNORED',)): + """Compute drop log stats. + + Parameters + ---------- + drop_log : list of list + Epoch drop log from Epochs.drop_log. + ignore : list + The drop reasons to ignore. + + Returns + ------- + perc : float + Total percentage of epochs dropped. + """ + if not isinstance(drop_log, tuple) or \ + not all(isinstance(d, tuple) for d in drop_log) or \ + not all(isinstance(s, str) for d in drop_log for s in d): + raise TypeError('drop_log must be a tuple of tuple of str') + perc = 100 * np.mean([len(d) > 0 for d in drop_log + if not any(r in ignore for r in d)]) + return perc + + +def make_metadata(events, event_id, tmin, tmax, sfreq, + row_events=None, keep_first=None, keep_last=None): + """Generate metadata from events for use with `mne.Epochs`. + + This function mimics the epoching process (it constructs time windows + around time-locked "events of interest") and collates information about + any other events that occurred within those time windows. The information + is returned as a :class:`pandas.DataFrame` suitable for use as + `~mne.Epochs` metadata: one row per time-locked event, and columns + indicating presence/absence and latency of each ancillary event type. + + The function will also return a new ``events`` array and ``event_id`` + dictionary that correspond to the generated metadata. + + Parameters + ---------- + events : array, shape (m, 3) + The :term:`events array `. By default, the returned metadata + :class:`~pandas.DataFrame` will have as many rows as the events array. + To create rows for only a subset of events, pass the ``row_events`` + parameter. + event_id : dict + A mapping from event names (keys) to event IDs (values). The event + names will be incorporated as columns of the returned metadata + :class:`~pandas.DataFrame`. + tmin, tmax : float + Start and end of the time interval for metadata generation in seconds, + relative to the time-locked event of the respective time window. + + .. note:: + If you are planning to attach the generated metadata to + `~mne.Epochs` and intend to include only events that fall inside + your epochs time interval, pass the same ``tmin`` and ``tmax`` + values here as you use for your epochs. + + sfreq : float + The sampling frequency of the data from which the events array was + extracted. + row_events : list of str | str | None + Event types around which to create the time windows / for which to + create **rows** in the returned metadata :class:`pandas.DataFrame`. If + provided, the string(s) must be keys of ``event_id``. If ``None`` + (default), rows are created for **all** event types present in + ``event_id``. + keep_first : str | list of str | None + Specify subsets of :term:`hierarchical event descriptors` (HEDs, + inspired by :footcite:`BigdelyShamloEtAl2013`) matching events of which + the **first occurrence** within each time window shall be stored in + addition to the original events. + + .. note:: + There is currently no way to retain **all** occurrences of a + repeated event. The ``keep_first`` parameter can be used to specify + subsets of HEDs, effectively creating a new event type that is the + union of all events types described by the matching HED pattern. + Only the very first event of this set will be kept. + + For example, you might have two response events types, + ``response/left`` and ``response/right``; and in trials with both + responses occurring, you want to keep only the first response. In this + case, you can pass ``keep_first='response'``. This will add two new + columns to the metadata: ``response``, indicating at what **time** the + event occurred, relative to the time-locked event; and + ``first_response``, stating which **type** (``'left'`` or ``'right'``) + of event occurred. + To match specific subsets of HEDs describing different sets of events, + pass a list of these subsets, e.g. + ``keep_first=['response', 'stimulus']``. If ``None`` (default), no + event aggregation will take place and no new columns will be created. + + .. note:: + By default, this function will always retain the first instance + of any event in each time window. For example, if a time window + contains two ``'response'`` events, the generated ``response`` + column will automatically refer to the first of the two events. In + this specific case, it is therefore **not** necessary to make use of + the ``keep_first`` parameter – unless you need to differentiate + between two types of responses, like in the example above. + + keep_last : list of str | None + Same as ``keep_first``, but for keeping only the **last** occurrence + of matching events. The column indicating the **type** of an event + ``myevent`` will be named ``last_myevent``. + + Returns + ------- + metadata : pandas.DataFrame + Metadata for each row event, with the following columns: + + - ``event_name``, with strings indicating the name of the time-locked + event ("row event") for that specific time window + + - one column per event type in ``event_id``, with the same name; floats + indicating the latency of the event in seconds, relative to the + time-locked event + + - if applicable, additional columns named after the ``keep_first`` and + ``keep_last`` event types; floats indicating the latency of the + event in seconds, relative to the time-locked event + + - if applicable, additional columns ``first_{event_type}`` and + ``last_{event_type}`` for ``keep_first`` and ``keep_last`` event + types, respetively; the values will be strings indicating which event + types were matched by the provided HED patterns + + events : array, shape (n, 3) + The events corresponding to the generated metadata, i.e. one + time-locked event per row. + event_id : dict + The event dictionary corresponding to the new events array. This will + be identical to the input dictionary unless ``row_events`` is supplied, + in which case it will only contain the events provided there. + + Notes + ----- + The time window used for metadata generation need not correspond to the + time window used to create the `~mne.Epochs`, to which the metadata will + be attached; it may well be much shorter or longer, or not overlap at all, + if desired. The can be useful, for example, to include events that occurred + before or after an epoch, e.g. during the inter-trial interval. + + .. versionadded:: 0.23 + + References + ---------- + .. footbibliography:: + """ + pd = _check_pandas_installed() + + _validate_type(event_id, types=(dict,), item_name='event_id') + _validate_type(row_events, types=(None, str, list, tuple), + item_name='row_events') + _validate_type(keep_first, types=(None, str, list, tuple), + item_name='keep_first') + _validate_type(keep_last, types=(None, str, list, tuple), + item_name='keep_last') + + if not event_id: + raise ValueError('event_id dictionary must contain at least one entry') + + def _ensure_list(x): + if x is None: + return [] + elif isinstance(x, str): + return [x] + else: + return list(x) + + row_events = _ensure_list(row_events) + keep_first = _ensure_list(keep_first) + keep_last = _ensure_list(keep_last) + + keep_first_and_last = set(keep_first) & set(keep_last) + if keep_first_and_last: + raise ValueError(f'The event names in keep_first and keep_last must ' + f'be mutually exclusive. Specified in both: ' + f'{", ".join(sorted(keep_first_and_last))}') + del keep_first_and_last + + for param_name, values in dict(keep_first=keep_first, + keep_last=keep_last).items(): + for first_last_event_name in values: + try: + match_event_names(event_id, [first_last_event_name]) + except KeyError: + raise ValueError( + f'Event "{first_last_event_name}", specified in ' + f'{param_name}, cannot be found in event_id dictionary') + + event_name_diff = sorted(set(row_events) - set(event_id.keys())) + if event_name_diff: + raise ValueError( + f'Present in row_events, but missing from event_id: ' + f'{", ".join(event_name_diff)}') + del event_name_diff + + # First and last sample of each epoch, relative to the time-locked event + # This follows the approach taken in mne.Epochs + start_sample = int(round(tmin * sfreq)) + stop_sample = int(round(tmax * sfreq)) + 1 + + # Make indexing easier + # We create the DataFrame before subsetting the events so we end up with + # indices corresponding to the original event indices. Not used for now, + # but might come in handy sometime later + events_df = pd.DataFrame(events, columns=('sample', 'prev_id', 'id')) + id_to_name_map = {v: k for k, v in event_id.items()} + + # Only keep events that are of interest + events = events[np.in1d(events[:, 2], list(event_id.values()))] + events_df = events_df.loc[events_df['id'].isin(event_id.values()), :] + + # Prepare & condition the metadata DataFrame + + # Avoid column name duplications if the exact same event name appears in + # event_id.keys() and keep_first / keep_last simultaneously + keep_first_cols = [col for col in keep_first if col not in event_id] + keep_last_cols = [col for col in keep_last if col not in event_id] + first_cols = [f'first_{col}' for col in keep_first_cols] + last_cols = [f'last_{col}' for col in keep_last_cols] + + columns = ['event_name', + *event_id.keys(), + *keep_first_cols, + *keep_last_cols, + *first_cols, + *last_cols] + + data = np.empty((len(events_df), len(columns))) + metadata = pd.DataFrame(data=data, columns=columns, index=events_df.index) + + # Event names + metadata.iloc[:, 0] = '' + + # Event times + start_idx = 1 + stop_idx = (start_idx + len(event_id.keys()) + + len(keep_first_cols + keep_last_cols)) + metadata.iloc[:, start_idx:stop_idx] = np.nan + + # keep_first and keep_last names + start_idx = stop_idx + metadata.iloc[:, start_idx:] = None + + # We're all set, let's iterate over all eventns and fill in in the + # respective cells in the metadata. We will subset this to include only + # `row_events` later + for row_event in events_df.itertuples(name='RowEvent'): + row_idx = row_event.Index + metadata.loc[row_idx, 'event_name'] = \ + id_to_name_map[row_event.id] + + # Determine which events fall into the current epoch + window_start_sample = row_event.sample + start_sample + window_stop_sample = row_event.sample + stop_sample + events_in_window = events_df.loc[ + (events_df['sample'] >= window_start_sample) & + (events_df['sample'] <= window_stop_sample), :] + + assert not events_in_window.empty + + # Store the metadata + for event in events_in_window.itertuples(name='Event'): + event_sample = event.sample - row_event.sample + event_time = event_sample / sfreq + event_time = 0 if np.isclose(event_time, 0) else event_time + event_name = id_to_name_map[event.id] + + if not np.isnan(metadata.loc[row_idx, event_name]): + # Event already exists in current time window! + assert metadata.loc[row_idx, event_name] <= event_time + + if event_name not in keep_last: + continue + + metadata.loc[row_idx, event_name] = event_time + + # Handle keep_first and keep_last event aggregation + for event_group_name in keep_first + keep_last: + if event_name not in match_event_names( + event_id, [event_group_name] + ): + continue + + if event_group_name in keep_first: + first_last_col = f'first_{event_group_name}' + else: + first_last_col = f'last_{event_group_name}' + + old_time = metadata.loc[row_idx, event_group_name] + if not np.isnan(old_time): + if ((event_group_name in keep_first and + old_time <= event_time) or + (event_group_name in keep_last and + old_time >= event_time)): + continue + + if event_group_name not in event_id: + # This is an HED. Strip redundant information from the + # event name + name = (event_name + .replace(event_group_name, '') + .replace('//', '/') + .strip('/')) + metadata.loc[row_idx, first_last_col] = name + del name + + metadata.loc[row_idx, event_group_name] = event_time + + # Only keep rows of interest + if row_events: + event_id_timelocked = {name: val for name, val in event_id.items() + if name in row_events} + events = events[np.in1d(events[:, 2], + list(event_id_timelocked.values()))] + metadata = metadata.loc[ + metadata['event_name'].isin(event_id_timelocked)] + assert len(events) == len(metadata) + event_id = event_id_timelocked + + return metadata, events, event_id + + +@fill_doc +class Epochs(BaseEpochs): + """Epochs extracted from a Raw instance. + + Parameters + ---------- + %(raw_epochs)s + %(events_epochs)s + %(event_id)s + %(epochs_tmin_tmax)s + %(baseline_epochs)s + Defaults to ``(None, 0)``, i.e. beginning of the the data until + time point zero. + %(picks_all)s + preload : bool + %(epochs_preload)s + %(reject_epochs)s + %(flat)s + %(proj_epochs)s + %(decim)s + %(epochs_reject_tmin_tmax)s + %(detrend_epochs)s + %(on_missing_epochs)s + %(reject_by_annotation_epochs)s + %(metadata_epochs)s + %(event_repeated_epochs)s + %(verbose)s + + Attributes + ---------- + %(info_not_none)s + event_id : dict + Names of conditions corresponding to event_ids. + ch_names : list of string + List of channel names. + selection : array + List of indices of selected events (not dropped or ignored etc.). For + example, if the original event array had 4 events and the second event + has been dropped, this attribute would be np.array([0, 2, 3]). + preload : bool + Indicates whether epochs are in memory. + drop_log : tuple of tuple + A tuple of the same length as the event array used to initialize the + Epochs object. If the i-th original event is still part of the + selection, drop_log[i] will be an empty tuple; otherwise it will be + a tuple of the reasons the event is not longer in the selection, e.g.: + + - 'IGNORED' + If it isn't part of the current subset defined by the user + - 'NO_DATA' or 'TOO_SHORT' + If epoch didn't contain enough data names of channels that exceeded + the amplitude threshold + - 'EQUALIZED_COUNTS' + See :meth:`~mne.Epochs.equalize_event_counts` + - 'USER' + For user-defined reasons (see :meth:`~mne.Epochs.drop`). + filename : str + The filename of the object. + times : ndarray + Time vector in seconds. Goes from ``tmin`` to ``tmax``. Time interval + between consecutive time samples is equal to the inverse of the + sampling frequency. + + See Also + -------- + mne.epochs.combine_event_ids + mne.Epochs.equalize_event_counts + + Notes + ----- + When accessing data, Epochs are detrended, baseline-corrected, and + decimated, then projectors are (optionally) applied. + + For indexing and slicing using ``epochs[...]``, see + :meth:`mne.Epochs.__getitem__`. + + All methods for iteration over objects (using :meth:`mne.Epochs.__iter__`, + :meth:`mne.Epochs.iter_evoked` or :meth:`mne.Epochs.next`) use the same + internal state. + + If ``event_repeated`` is set to ``'merge'``, the coinciding events + (duplicates) will be merged into a single event_id and assigned a new + id_number as:: + + event_id['{event_id_1}/{event_id_2}/...'] = new_id_number + + For example with the event_id ``{'aud': 1, 'vis': 2}`` and the events + ``[[0, 0, 1], [0, 0, 2]]``, the "merge" behavior will update both event_id + and events to be: ``{'aud/vis': 3}`` and ``[[0, 0, 3]]`` respectively. + + There is limited support for :class:`~mne.Annotations` in the + :class:`~mne.Epochs` class. Currently annotations that are present in the + :class:`~mne.io.Raw` object will be preserved in the resulting + :class:`~mne.Epochs` object, but: + + 1. It is not yet possible to add annotations + to the Epochs object programmatically (via code) or interactively + (through the plot window) + 2. Concatenating :class:`~mne.Epochs` objects + that contain annotations is not supported, and any annotations will + be dropped when concatenating. + 3. Annotations will be lost on save. + """ + + @verbose + def __init__(self, raw, events, event_id=None, tmin=-0.2, tmax=0.5, + baseline=(None, 0), picks=None, preload=False, reject=None, + flat=None, proj=True, decim=1, reject_tmin=None, + reject_tmax=None, detrend=None, on_missing='raise', + reject_by_annotation=True, metadata=None, + event_repeated='error', verbose=None): # noqa: D102 + if not isinstance(raw, BaseRaw): + raise ValueError('The first argument to `Epochs` must be an ' + 'instance of mne.io.BaseRaw') + info = deepcopy(raw.info) + + # proj is on when applied in Raw + proj = proj or raw.proj + + self.reject_by_annotation = reject_by_annotation + + # keep track of original sfreq (needed for annotations) + raw_sfreq = raw.info['sfreq'] + + # call BaseEpochs constructor + super(Epochs, self).__init__( + info, None, events, event_id, tmin, tmax, + metadata=metadata, baseline=baseline, raw=raw, picks=picks, + reject=reject, flat=flat, decim=decim, reject_tmin=reject_tmin, + reject_tmax=reject_tmax, detrend=detrend, + proj=proj, on_missing=on_missing, preload_at_end=preload, + event_repeated=event_repeated, verbose=verbose, + raw_sfreq=raw_sfreq, annotations=raw.annotations) + + @verbose + def _get_epoch_from_raw(self, idx, verbose=None): + """Load one epoch from disk. + + Returns + ------- + data : array | str | None + If string, it's details on rejection reason. + If array, it's the data in the desired range (good segment) + If None, it means no data is available. + """ + if self._raw is None: + # This should never happen, as raw=None only if preload=True + raise ValueError('An error has occurred, no valid raw file found. ' + 'Please report this to the mne-python ' + 'developers.') + sfreq = self._raw.info['sfreq'] + event_samp = self.events[idx, 0] + # Read a data segment from "start" to "stop" in samples + first_samp = self._raw.first_samp + start = int(round(event_samp + self._raw_times[0] * sfreq)) + start -= first_samp + stop = start + len(self._raw_times) + + # reject_tmin, and reject_tmax need to be converted to samples to + # check the reject_by_annotation boundaries: reject_start, reject_stop + reject_tmin = self.reject_tmin + if reject_tmin is None: + reject_tmin = self._raw_times[0] + reject_start = int(round(event_samp + reject_tmin * sfreq)) + reject_start -= first_samp + + reject_tmax = self.reject_tmax + if reject_tmax is None: + reject_tmax = self._raw_times[-1] + diff = int(round((self._raw_times[-1] - reject_tmax) * sfreq)) + reject_stop = stop - diff + + logger.debug(' Getting epoch for %d-%d' % (start, stop)) + data = self._raw._check_bad_segment(start, stop, self.picks, + reject_start, reject_stop, + self.reject_by_annotation) + return data + + +@fill_doc +class EpochsArray(BaseEpochs): + """Epochs object from numpy array. + + Parameters + ---------- + data : array, shape (n_epochs, n_channels, n_times) + The channels' time series for each epoch. See notes for proper units of + measure. + %(info_not_none)s Consider using :func:`mne.create_info` to populate this + structure. + events : None | array of int, shape (n_events, 3) + The events typically returned by the read_events function. + If some events don't match the events of interest as specified + by event_id, they will be marked as 'IGNORED' in the drop log. + If None (default), all event values are set to 1 and event time-samples + are set to range(n_epochs). + tmin : float + Start time before event. If nothing provided, defaults to 0. + event_id : int | list of int | dict | None + The id of the event to consider. If dict, + the keys can later be used to access associated events. Example: + dict(auditory=1, visual=3). If int, a dict will be created with + the id as string. If a list, all events with the IDs specified + in the list are used. If None, all events will be used with + and a dict is created with string integer names corresponding + to the event id integers. + %(reject_epochs)s + %(flat)s + reject_tmin : scalar | None + Start of the time window used to reject epochs (with the default None, + the window will start with tmin). + reject_tmax : scalar | None + End of the time window used to reject epochs (with the default None, + the window will end with tmax). + %(baseline_epochs)s + Defaults to ``None``, i.e. no baseline correction. + proj : bool | 'delayed' + Apply SSP projection vectors. See :class:`mne.Epochs` for details. + on_missing : str + See :class:`mne.Epochs` docstring for details. + metadata : instance of pandas.DataFrame | None + See :class:`mne.Epochs` docstring for details. + + .. versionadded:: 0.16 + selection : ndarray | None + The selection compared to the original set of epochs. + Can be None to use ``np.arange(len(events))``. + + .. versionadded:: 0.16 + %(verbose)s + + See Also + -------- + create_info + EvokedArray + io.RawArray + + Notes + ----- + Proper units of measure: + + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog + * T: mag + * T/m: grad + * M: hbo, hbr + * Am: dipole + * AU: misc + + EpochsArray does not set `Annotations`. If you would like to create + simulated data with Annotations that are then preserved in the Epochs + object, you would use `mne.io.RawArray` first and then create an + `mne.Epochs` object. + """ + + @verbose + def __init__(self, data, info, events=None, tmin=0, event_id=None, + reject=None, flat=None, reject_tmin=None, + reject_tmax=None, baseline=None, proj=True, + on_missing='raise', metadata=None, selection=None, + verbose=None): # noqa: D102 + dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 + data = np.asanyarray(data, dtype=dtype) + if data.ndim != 3: + raise ValueError('Data must be a 3D array of shape (n_epochs, ' + 'n_channels, n_samples)') + + if len(info['ch_names']) != data.shape[1]: + raise ValueError('Info and data must have same number of ' + 'channels.') + if events is None: + n_epochs = len(data) + events = _gen_events(n_epochs) + info = info.copy() # do not modify original info + tmax = (data.shape[2] - 1) / info['sfreq'] + tmin + + super(EpochsArray, self).__init__( + info, data, events, event_id, tmin, tmax, baseline, + reject=reject, flat=flat, reject_tmin=reject_tmin, + reject_tmax=reject_tmax, decim=1, metadata=metadata, + selection=selection, proj=proj, on_missing=on_missing, + verbose=verbose) + if self.baseline is not None: + self._do_baseline = True + if len(events) != np.in1d(self.events[:, 2], + list(self.event_id.values())).sum(): + raise ValueError('The events must only contain event numbers from ' + 'event_id') + detrend_picks = self._detrend_picks + for e in self._data: + # This is safe without assignment b/c there is no decim + self._detrend_offset_decim(e, detrend_picks) + self.drop_bad() + + +def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True): + """Collapse event_ids from an epochs instance into a new event_id. + + Parameters + ---------- + epochs : instance of Epochs + The epochs to operate on. + old_event_ids : str, or list + Conditions to collapse together. + new_event_id : dict, or int + A one-element dict (or a single integer) for the new + condition. Note that for safety, this cannot be any + existing id (in epochs.event_id.values()). + copy : bool + Whether to return a new instance or modify in place. + + Returns + ------- + epochs : instance of Epochs + The modified epochs. + + Notes + ----- + This For example (if epochs.event_id was ``{'Left': 1, 'Right': 2}``:: + + combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12}) + + would create a 'Directional' entry in epochs.event_id replacing + 'Left' and 'Right' (combining their trials). + """ + epochs = epochs.copy() if copy else epochs + old_event_ids = np.asanyarray(old_event_ids) + if isinstance(new_event_id, int): + new_event_id = {str(new_event_id): new_event_id} + else: + if not isinstance(new_event_id, dict): + raise ValueError('new_event_id must be a dict or int') + if not len(list(new_event_id.keys())) == 1: + raise ValueError('new_event_id dict must have one entry') + new_event_num = list(new_event_id.values())[0] + new_event_num = operator.index(new_event_num) + if new_event_num in epochs.event_id.values(): + raise ValueError('new_event_id value must not already exist') + # could use .pop() here, but if a latter one doesn't exist, we're + # in trouble, so run them all here and pop() later + old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids]) + # find the ones to replace + inds = np.any(epochs.events[:, 2][:, np.newaxis] == + old_event_nums[np.newaxis, :], axis=1) + # replace the event numbers in the events list + epochs.events[inds, 2] = new_event_num + # delete old entries + for key in old_event_ids: + epochs.event_id.pop(key) + # add the new entry + epochs.event_id.update(new_event_id) + return epochs + + +def equalize_epoch_counts(epochs_list, method='mintime'): + """Equalize the number of trials in multiple Epoch instances. + + Parameters + ---------- + epochs_list : list of Epochs instances + The Epochs instances to equalize trial counts for. + method : str + If 'truncate', events will be truncated from the end of each event + list. If 'mintime', timing differences between each event list will be + minimized. + + Notes + ----- + This tries to make the remaining epochs occurring as close as possible in + time. This method works based on the idea that if there happened to be some + time-varying (like on the scale of minutes) noise characteristics during + a recording, they could be compensated for (to some extent) in the + equalization process. This method thus seeks to reduce any of those effects + by minimizing the differences in the times of the events in the two sets of + epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the + other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times + [1, 2] in the first epochs and not [120, 121]. + + Examples + -------- + >>> equalize_epoch_counts([epochs1, epochs2]) # doctest: +SKIP + """ + if not all(isinstance(e, BaseEpochs) for e in epochs_list): + raise ValueError('All inputs must be Epochs instances') + + # make sure bad epochs are dropped + for e in epochs_list: + if not e._bad_dropped: + e.drop_bad() + event_times = [e.events[:, 0] for e in epochs_list] + indices = _get_drop_indices(event_times, method) + for e, inds in zip(epochs_list, indices): + e.drop(inds, reason='EQUALIZED_COUNT') + + +def _get_drop_indices(event_times, method): + """Get indices to drop from multiple event timing lists.""" + small_idx = np.argmin([e.shape[0] for e in event_times]) + small_e_times = event_times[small_idx] + _check_option('method', method, ['mintime', 'truncate']) + indices = list() + for e in event_times: + if method == 'mintime': + mask = _minimize_time_diff(small_e_times, e) + else: + mask = np.ones(e.shape[0], dtype=bool) + mask[small_e_times.shape[0]:] = False + indices.append(np.where(np.logical_not(mask))[0]) + + return indices + + +def _minimize_time_diff(t_shorter, t_longer): + """Find a boolean mask to minimize timing differences.""" + from scipy.interpolate import interp1d + keep = np.ones((len(t_longer)), dtype=bool) + # special case: length zero or one + if len(t_shorter) < 2: # interp1d won't work + keep.fill(False) + if len(t_shorter) == 1: + idx = np.argmin(np.abs(t_longer - t_shorter)) + keep[idx] = True + return keep + scores = np.ones((len(t_longer))) + x1 = np.arange(len(t_shorter)) + # The first set of keep masks to test + kwargs = dict(copy=False, bounds_error=False, assume_sorted=True) + shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1], + **kwargs) + for ii in range(len(t_longer) - len(t_shorter)): + scores.fill(np.inf) + # set up the keep masks to test, eliminating any rows that are already + # gone + keep_mask = ~np.eye(len(t_longer), dtype=bool)[keep] + keep_mask[:, ~keep] = False + # Check every possible removal to see if it minimizes + x2 = np.arange(len(t_longer) - ii - 1) + t_keeps = np.array([t_longer[km] for km in keep_mask]) + longer_interp = interp1d(x2, t_keeps, axis=1, + fill_value=t_keeps[:, -1], + **kwargs) + d1 = longer_interp(x1) - t_shorter + d2 = shorter_interp(x2) - t_keeps + scores[keep] = np.abs(d1, d1).sum(axis=1) + np.abs(d2, d2).sum(axis=1) + keep[np.argmin(scores)] = False + return keep + + +@verbose +def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False, + ignore_chs=[], verbose=None): + """Test if data segment e is good according to reject and flat. + + If full_report=True, it will give True/False as well as a list of all + offending channels. + """ + bad_tuple = tuple() + has_printed = False + checkable = np.ones(len(ch_names), dtype=bool) + checkable[np.array([c in ignore_chs + for c in ch_names], dtype=bool)] = False + for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']): + if refl is not None: + for key, thresh in refl.items(): + idx = channel_type_idx[key] + name = key.upper() + if len(idx) > 0: + e_idx = e[idx] + deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1) + checkable_idx = checkable[idx] + idx_deltas = np.where(np.logical_and(f(deltas, thresh), + checkable_idx))[0] + + if len(idx_deltas) > 0: + bad_names = [ch_names[idx[i]] for i in idx_deltas] + if (not has_printed): + logger.info(' Rejecting %s epoch based on %s : ' + '%s' % (t, name, bad_names)) + has_printed = True + if not full_report: + return False + else: + bad_tuple += tuple(bad_names) + + if not full_report: + return True + else: + if bad_tuple == (): + return True, None + else: + return False, bad_tuple + + +def _read_one_epoch_file(f, tree, preload): + """Read a single FIF file.""" + with f as fid: + # Read the measurement info + info, meas = read_meas_info(fid, tree, clean_bads=True) + + # read in the Annotations if they exist + annotations = _read_annotations_fif(fid, tree) + events, mappings = _read_events_fif(fid, tree) + + # Metadata + metadata = None + metadata_tree = dir_tree_find(tree, FIFF.FIFFB_MNE_METADATA) + if len(metadata_tree) > 0: + for dd in metadata_tree[0]['directory']: + kind = dd.kind + pos = dd.pos + if kind == FIFF.FIFF_DESCRIPTION: + metadata = read_tag(fid, pos).data + metadata = _prepare_read_metadata(metadata) + break + + # Locate the data of interest + processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA) + del meas + if len(processed) == 0: + raise ValueError('Could not find processed data') + + epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS) + if len(epochs_node) == 0: + # before version 0.11 we errantly saved with this tag instead of + # an MNE tag + epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS) + if len(epochs_node) == 0: + epochs_node = dir_tree_find(tree, 122) # 122 used before v0.11 + if len(epochs_node) == 0: + raise ValueError('Could not find epochs data') + + my_epochs = epochs_node[0] + + # Now find the data in the block + data = None + data_tag = None + bmin, bmax = None, None + baseline = None + selection = None + drop_log = None + raw_sfreq = None + reject_params = {} + for k in range(my_epochs['nent']): + kind = my_epochs['directory'][k].kind + pos = my_epochs['directory'][k].pos + if kind == FIFF.FIFF_FIRST_SAMPLE: + tag = read_tag(fid, pos) + first = int(tag.data) + elif kind == FIFF.FIFF_LAST_SAMPLE: + tag = read_tag(fid, pos) + last = int(tag.data) + elif kind == FIFF.FIFF_EPOCH: + # delay reading until later + fid.seek(pos, 0) + data_tag = read_tag_info(fid) + data_tag.pos = pos + data_tag.type = data_tag.type ^ (1 << 30) + elif kind in [FIFF.FIFF_MNE_BASELINE_MIN, 304]: + # Constant 304 was used before v0.11 + tag = read_tag(fid, pos) + bmin = float(tag.data) + elif kind in [FIFF.FIFF_MNE_BASELINE_MAX, 305]: + # Constant 305 was used before v0.11 + tag = read_tag(fid, pos) + bmax = float(tag.data) + elif kind == FIFF.FIFF_MNE_EPOCHS_SELECTION: + tag = read_tag(fid, pos) + selection = np.array(tag.data) + elif kind == FIFF.FIFF_MNE_EPOCHS_DROP_LOG: + tag = read_tag(fid, pos) + drop_log = tag.data + drop_log = json.loads(drop_log) + drop_log = tuple(tuple(x) for x in drop_log) + elif kind == FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT: + tag = read_tag(fid, pos) + reject_params = json.loads(tag.data) + elif kind == FIFF.FIFF_MNE_EPOCHS_RAW_SFREQ: + tag = read_tag(fid, pos) + raw_sfreq = tag.data + + if bmin is not None or bmax is not None: + baseline = (bmin, bmax) + + n_samp = last - first + 1 + logger.info(' Found the data of interest:') + logger.info(' t = %10.2f ... %10.2f ms' + % (1000 * first / info['sfreq'], + 1000 * last / info['sfreq'])) + if info['comps'] is not None: + logger.info(' %d CTF compensation matrices available' + % len(info['comps'])) + + # Inspect the data + if data_tag is None: + raise ValueError('Epochs data not found') + epoch_shape = (len(info['ch_names']), n_samp) + size_expected = len(events) * np.prod(epoch_shape) + # on read double-precision is always used + if data_tag.type == FIFF.FIFFT_FLOAT: + datatype = np.float64 + fmt = '>f4' + elif data_tag.type == FIFF.FIFFT_DOUBLE: + datatype = np.float64 + fmt = '>f8' + elif data_tag.type == FIFF.FIFFT_COMPLEX_FLOAT: + datatype = np.complex128 + fmt = '>c8' + elif data_tag.type == FIFF.FIFFT_COMPLEX_DOUBLE: + datatype = np.complex128 + fmt = '>c16' + fmt_itemsize = np.dtype(fmt).itemsize + assert fmt_itemsize in (4, 8, 16) + size_actual = data_tag.size // fmt_itemsize - 16 // fmt_itemsize + + if not size_actual == size_expected: + raise ValueError('Incorrect number of samples (%d instead of %d)' + % (size_actual, size_expected)) + + # Calibration factors + cals = np.array([[info['chs'][k]['cal'] * + info['chs'][k].get('scale', 1.0)] + for k in range(info['nchan'])], np.float64) + + # Read the data + if preload: + data = read_tag(fid, data_tag.pos).data.astype(datatype) + data *= cals + + # Put it all together + tmin = first / info['sfreq'] + tmax = last / info['sfreq'] + event_id = ({str(e): e for e in np.unique(events[:, 2])} + if mappings is None else mappings) + # In case epochs didn't have a FIFF.FIFF_MNE_EPOCHS_SELECTION tag + # (version < 0.8): + if selection is None: + selection = np.arange(len(events)) + if drop_log is None: + drop_log = ((),) * len(events) + + return (info, data, data_tag, events, event_id, metadata, tmin, tmax, + baseline, selection, drop_log, epoch_shape, cals, reject_params, + fmt, annotations, raw_sfreq) + + +@verbose +def read_epochs(fname, proj=True, preload=True, verbose=None): + """Read epochs from a fif file. + + Parameters + ---------- + %(fname_epochs)s + %(proj_epochs)s + preload : bool + If True, read all epochs from disk immediately. If ``False``, epochs + will be read on demand. + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The epochs. + """ + return EpochsFIF(fname, proj, preload, verbose) + + +class _RawContainer(object): + """Helper for a raw data container.""" + + def __init__(self, fid, data_tag, event_samps, epoch_shape, + cals, fmt): # noqa: D102 + self.fid = fid + self.data_tag = data_tag + self.event_samps = event_samps + self.epoch_shape = epoch_shape + self.cals = cals + self.proj = False + self.fmt = fmt + + def __del__(self): # noqa: D105 + self.fid.close() + + +@fill_doc +class EpochsFIF(BaseEpochs): + """Epochs read from disk. + + Parameters + ---------- + %(fname_epochs)s + %(proj_epochs)s + preload : bool + If True, read all epochs from disk immediately. If False, epochs will + be read on demand. + %(verbose)s + + See Also + -------- + mne.Epochs + mne.epochs.combine_event_ids + mne.Epochs.equalize_event_counts + """ + + @verbose + def __init__(self, fname, proj=True, preload=True, + verbose=None): # noqa: D102 + if _path_like(fname): + check_fname( + fname=fname, filetype='epochs', + endings=('-epo.fif', '-epo.fif.gz', '_epo.fif', '_epo.fif.gz') + ) + fname = _check_fname(fname=fname, must_exist=True, + overwrite='read') + elif not preload: + raise ValueError('preload must be used with file-like objects') + + fnames = [fname] + ep_list = list() + raw = list() + for fname in fnames: + fname_rep = _get_fname_rep(fname) + logger.info('Reading %s ...' % fname_rep) + fid, tree, _ = fiff_open(fname, preload=preload) + next_fname = _get_next_fname(fid, fname, tree) + (info, data, data_tag, events, event_id, metadata, tmin, tmax, + baseline, selection, drop_log, epoch_shape, cals, + reject_params, fmt, annotations, raw_sfreq) = \ + _read_one_epoch_file(fid, tree, preload) + + if (events[:, 0] < 0).any(): + events = events.copy() + warn('Incorrect events detected on disk, setting event ' + 'numbers to consecutive increasing integers') + events[:, 0] = np.arange(1, len(events) + 1) + # here we ignore missing events, since users should already be + # aware of missing events if they have saved data that way + # we also retain original baseline without re-applying baseline + # correction (data is being baseline-corrected when written to + # disk) + epoch = BaseEpochs( + info, data, events, event_id, tmin, tmax, + baseline=None, + metadata=metadata, on_missing='ignore', + selection=selection, drop_log=drop_log, + proj=False, verbose=False, raw_sfreq=raw_sfreq) + epoch.baseline = baseline + epoch._do_baseline = False # might be superfluous but won't hurt + ep_list.append(epoch) + + if not preload: + # store everything we need to index back to the original data + raw.append(_RawContainer(fiff_open(fname)[0], data_tag, + events[:, 0].copy(), epoch_shape, + cals, fmt)) + + if next_fname is not None: + fnames.append(next_fname) + + unsafe_annot_add = raw_sfreq is None + (info, data, raw_sfreq, events, event_id, tmin, tmax, metadata, + baseline, selection, drop_log) = \ + _concatenate_epochs(ep_list, with_data=preload, add_offset=False) + # we need this uniqueness for non-preloaded data to work properly + if len(np.unique(events[:, 0])) != len(events): + raise RuntimeError('Event time samples were not unique') + + # correct the drop log + assert len(drop_log) % len(fnames) == 0 + step = len(drop_log) // len(fnames) + offsets = np.arange(step, len(drop_log) + 1, step) + drop_log = list(drop_log) + for i1, i2 in zip(offsets[:-1], offsets[1:]): + other_log = drop_log[i1:i2] + for k, (a, b) in enumerate(zip(drop_log, other_log)): + if a == ('IGNORED',) and b != ('IGNORED',): + drop_log[k] = b + drop_log = tuple(drop_log[:step]) + + # call BaseEpochs constructor + # again, ensure we're retaining the baseline period originally loaded + # from disk without trying to re-apply baseline correction + super(EpochsFIF, self).__init__( + info, data, events, event_id, tmin, tmax, + baseline=None, raw=raw, + proj=proj, preload_at_end=False, on_missing='ignore', + selection=selection, drop_log=drop_log, filename=fname_rep, + metadata=metadata, verbose=verbose, raw_sfreq=raw_sfreq, + annotations=annotations, **reject_params) + self.baseline = baseline + self._do_baseline = False + # use the private property instead of drop_bad so that epochs + # are not all read from disk for preload=False + self._bad_dropped = True + # private property to suggest that people re-save epochs if they add + # annotations + self._unsafe_annot_add = unsafe_annot_add + + @verbose + def _get_epoch_from_raw(self, idx, verbose=None): + """Load one epoch from disk.""" + # Find the right file and offset to use + event_samp = self.events[idx, 0] + for raw in self._raw: + idx = np.where(raw.event_samps == event_samp)[0] + if len(idx) == 1: + fmt = raw.fmt + idx = idx[0] + size = np.prod(raw.epoch_shape) * np.dtype(fmt).itemsize + offset = idx * size + 16 # 16 = Tag header + break + else: + # read the correct subset of the data + raise RuntimeError('Correct epoch could not be found, please ' + 'contact mne-python developers') + # the following is equivalent to this, but faster: + # + # >>> data = read_tag(raw.fid, raw.data_tag.pos).data.astype(float) + # >>> data *= raw.cals[np.newaxis, :, :] + # >>> data = data[idx] + # + # Eventually this could be refactored in io/tag.py if other functions + # could make use of it + raw.fid.seek(raw.data_tag.pos + offset, 0) + if fmt == '>c8': + read_fmt = '>f4' + elif fmt == '>c16': + read_fmt = '>f8' + else: + read_fmt = fmt + data = np.frombuffer(raw.fid.read(size), read_fmt) + if read_fmt != fmt: + data = data.view(fmt) + data = data.astype(np.complex128) + else: + data = data.astype(np.float64) + + data.shape = raw.epoch_shape + data *= raw.cals + return data + + +@fill_doc +def bootstrap(epochs, random_state=None): + """Compute epochs selected by bootstrapping. + + Parameters + ---------- + epochs : Epochs instance + epochs data to be bootstrapped + %(random_state)s + + Returns + ------- + epochs : Epochs instance + The bootstrap samples + """ + if not epochs.preload: + raise RuntimeError('Modifying data of epochs is only supported ' + 'when preloading is used. Use preload=True ' + 'in the constructor.') + + rng = check_random_state(random_state) + epochs_bootstrap = epochs.copy() + n_events = len(epochs_bootstrap.events) + idx = rng_uniform(rng)(0, n_events, n_events) + epochs_bootstrap = epochs_bootstrap[idx] + return epochs_bootstrap + + +def _check_merge_epochs(epochs_list): + """Aux function.""" + if len({tuple(epochs.event_id.items()) for epochs in epochs_list}) != 1: + raise NotImplementedError("Epochs with unequal values for event_id") + if len({epochs.tmin for epochs in epochs_list}) != 1: + raise NotImplementedError("Epochs with unequal values for tmin") + if len({epochs.tmax for epochs in epochs_list}) != 1: + raise NotImplementedError("Epochs with unequal values for tmax") + if len({epochs.baseline for epochs in epochs_list}) != 1: + raise NotImplementedError("Epochs with unequal values for baseline") + + +@verbose +def add_channels_epochs(epochs_list, verbose=None): + """Concatenate channels, info and data from two Epochs objects. + + Parameters + ---------- + epochs_list : list of Epochs + Epochs object to concatenate. + %(verbose)s Defaults to True if any of the input epochs have verbose=True. + + Returns + ------- + epochs : instance of Epochs + Concatenated epochs. + """ + if not all(e.preload for e in epochs_list): + raise ValueError('All epochs must be preloaded.') + + info = _merge_info([epochs.info for epochs in epochs_list]) + data = [epochs._data for epochs in epochs_list] + _check_merge_epochs(epochs_list) + for d in data: + if len(d) != len(data[0]): + raise ValueError('all epochs must be of the same length') + + data = np.concatenate(data, axis=1) + + if len(info['chs']) != data.shape[1]: + err = "Data shape does not match channel number in measurement info" + raise RuntimeError(err) + + events = epochs_list[0].events.copy() + all_same = all(np.array_equal(events, epochs.events) + for epochs in epochs_list[1:]) + if not all_same: + raise ValueError('Events must be the same.') + + proj = any(e.proj for e in epochs_list) + + epochs = epochs_list[0].copy() + epochs.info = info + epochs.picks = None + epochs.events = events + epochs.preload = True + epochs._bad_dropped = True + epochs._data = data + epochs._projector, epochs.info = setup_proj(epochs.info, False, + activate=proj) + return epochs + + +def _concatenate_epochs(epochs_list, with_data=True, add_offset=True, *, + on_mismatch='raise'): + """Auxiliary function for concatenating epochs.""" + if not isinstance(epochs_list, (list, tuple)): + raise TypeError('epochs_list must be a list or tuple, got %s' + % (type(epochs_list),)) + + # to make warning messages only occur once during concatenation + warned = False + + for ei, epochs in enumerate(epochs_list): + if not isinstance(epochs, BaseEpochs): + raise TypeError('epochs_list[%d] must be an instance of Epochs, ' + 'got %s' % (ei, type(epochs))) + + if (getattr(epochs, 'annotations', None) is not None and + len(epochs.annotations) > 0 and + not warned): + warned = True + warn('Concatenation of Annotations within Epochs is not supported ' + 'yet. All annotations will be dropped.') + + # create a copy, so that the Annotations are not modified in place + # from the original object + epochs = epochs.copy() + epochs.set_annotations(None) + out = epochs_list[0] + offsets = [0] + if with_data: + out.drop_bad() + offsets.append(len(out)) + events = [out.events] + metadata = [out.metadata] + baseline, tmin, tmax = out.baseline, out.tmin, out.tmax + raw_sfreq = out._raw_sfreq + info = deepcopy(out.info) + drop_log = out.drop_log + event_id = deepcopy(out.event_id) + selection = out.selection + # offset is the last epoch + tmax + 10 second + shift = int((10 + tmax) * out.info['sfreq']) + events_offset = int(np.max(events[0][:, 0])) + shift + events_overflow = False + warned = False + for ii, epochs in enumerate(epochs_list[1:], 1): + _ensure_infos_match(epochs.info, info, f'epochs[{ii}]', + on_mismatch=on_mismatch) + if not np.allclose(epochs.times, epochs_list[0].times): + raise ValueError('Epochs must have same times') + + if epochs.baseline != baseline: + raise ValueError('Baseline must be same for all epochs') + + if epochs._raw_sfreq != raw_sfreq and not warned: + warned = True + warn('The original raw sampling rate of the Epochs does not ' + 'match for all Epochs. Please proceed cautiously.') + + # compare event_id + common_keys = list(set(event_id).intersection(set(epochs.event_id))) + for key in common_keys: + if not event_id[key] == epochs.event_id[key]: + msg = ('event_id values must be the same for identical keys ' + 'for all concatenated epochs. Key "{}" maps to {} in ' + 'some epochs and to {} in others.') + raise ValueError(msg.format(key, event_id[key], + epochs.event_id[key])) + + if with_data: + epochs.drop_bad() + offsets.append(len(epochs)) + evs = epochs.events.copy() + if len(epochs.events) == 0: + warn('One of the Epochs objects to concatenate was empty.') + elif add_offset: + # We need to cast to a native Python int here to detect an + # overflow of a numpy int32 (which is the default on windows) + max_timestamp = int(np.max(evs[:, 0])) + evs[:, 0] += events_offset + events_offset += max_timestamp + shift + if events_offset > INT32_MAX: + warn(f'Event number greater than {INT32_MAX} created, ' + 'events[:, 0] will be assigned consecutive increasing ' + 'integer values') + events_overflow = True + add_offset = False # we no longer need to add offset + events.append(evs) + selection = np.concatenate((selection, epochs.selection)) + drop_log = drop_log + epochs.drop_log + event_id.update(epochs.event_id) + metadata.append(epochs.metadata) + events = np.concatenate(events, axis=0) + # check to see if we exceeded our maximum event offset + if events_overflow: + events[:, 0] = np.arange(1, len(events) + 1) + + # Create metadata object (or make it None) + n_have = sum(this_meta is not None for this_meta in metadata) + if n_have == 0: + metadata = None + elif n_have != len(metadata): + raise ValueError('%d of %d epochs instances have metadata, either ' + 'all or none must have metadata' + % (n_have, len(metadata))) + else: + pd = _check_pandas_installed(strict=False) + if pd is not False: + metadata = pd.concat(metadata) + else: # dict of dicts + metadata = sum(metadata, list()) + assert len(offsets) == (len(epochs_list) if with_data else 0) + 1 + data = None + if with_data: + offsets = np.cumsum(offsets) + for start, stop, epochs in zip(offsets[:-1], offsets[1:], epochs_list): + this_data = epochs.get_data() + if data is None: + data = np.empty( + (offsets[-1], len(out.ch_names), len(out.times)), + dtype=this_data.dtype) + data[start:stop] = this_data + return (info, data, raw_sfreq, events, event_id, tmin, tmax, metadata, + baseline, selection, drop_log) + + +def _finish_concat(info, data, raw_sfreq, events, event_id, tmin, tmax, + metadata, baseline, selection, drop_log): + """Finish concatenation for epochs not read from disk.""" + selection = np.where([len(d) == 0 for d in drop_log])[0] + out = BaseEpochs( + info, data, events, event_id, tmin, tmax, baseline=baseline, + selection=selection, drop_log=drop_log, proj=False, + on_missing='ignore', metadata=metadata, raw_sfreq=raw_sfreq) + out.drop_bad() + return out + + +@verbose +def concatenate_epochs(epochs_list, add_offset=True, *, on_mismatch='raise', + verbose=None): + """Concatenate a list of `~mne.Epochs` into one `~mne.Epochs` object. + + .. note:: Unlike `~mne.concatenate_raws`, this function does **not** + modify any of the input data. + + Parameters + ---------- + epochs_list : list + List of `~mne.Epochs` instances to concatenate (in that order). + add_offset : bool + If True, a fixed offset is added to the event times from different + Epochs sets, such that they are easy to distinguish after the + concatenation. + If False, the event times are unaltered during the concatenation. + %(on_mismatch_info)s + %(verbose)s + + .. versionadded:: 0.24 + + Returns + ------- + epochs : instance of Epochs + The result of the concatenation. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + return _finish_concat(*_concatenate_epochs(epochs_list, + add_offset=add_offset, + on_mismatch=on_mismatch)) + + +@verbose +def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, + origin='auto', weight_all=True, int_order=8, ext_order=3, + destination=None, ignore_ref=False, return_mapping=False, + mag_scale=100., verbose=None): + """Average data using Maxwell filtering, transforming using head positions. + + Parameters + ---------- + epochs : instance of Epochs + The epochs to operate on. + %(head_pos_maxwell)s + orig_sfreq : float | None + The original sample frequency of the data (that matches the + event sample numbers in ``epochs.events``). Can be ``None`` + if data have not been decimated or resampled. + %(picks_all_data)s + %(origin_maxwell)s + weight_all : bool + If True, all channels are weighted by the SSS basis weights. + If False, only MEG channels are weighted, other channels + receive uniform weight per epoch. + %(int_order_maxwell)s + %(ext_order_maxwell)s + %(destination_maxwell_dest)s + %(ignore_ref_maxwell)s + return_mapping : bool + If True, return the mapping matrix. + %(mag_scale_maxwell)s + + .. versionadded:: 0.13 + %(verbose)s + + Returns + ------- + evoked : instance of Evoked + The averaged epochs. + + See Also + -------- + mne.preprocessing.maxwell_filter + mne.chpi.read_head_pos + + Notes + ----- + The Maxwell filtering version of this algorithm is described in [1]_, + in section V.B "Virtual signals and movement correction", equations + 40-44. For additional validation, see [2]_. + + Regularization has not been added because in testing it appears to + decrease dipole localization accuracy relative to using all components. + Fine calibration and cross-talk cancellation, however, could be added + to this algorithm based on user demand. + + .. versionadded:: 0.11 + + References + ---------- + .. [1] Taulu S. and Kajola M. "Presentation of electromagnetic + multichannel data: The signal space separation method," + Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005. + .. [2] Wehner DT, Hämäläinen MS, Mody M, Ahlfors SP. "Head movements + of children in MEG: Quantification, effects on source + estimation, and compensation. NeuroImage 40:541–550, 2008. + """ # noqa: E501 + from .preprocessing.maxwell import (_trans_sss_basis, _reset_meg_bads, + _check_usable, _col_norm_pinv, + _get_n_moments, _get_mf_picks_fix_mags, + _prep_mf_coils, _check_destination, + _remove_meg_projs, _get_coil_scale) + if head_pos is None: + raise TypeError('head_pos must be provided and cannot be None') + from .chpi import head_pos_to_trans_rot_t + if not isinstance(epochs, BaseEpochs): + raise TypeError('epochs must be an instance of Epochs, not %s' + % (type(epochs),)) + orig_sfreq = epochs.info['sfreq'] if orig_sfreq is None else orig_sfreq + orig_sfreq = float(orig_sfreq) + if isinstance(head_pos, np.ndarray): + head_pos = head_pos_to_trans_rot_t(head_pos) + trn, rot, t = head_pos + del head_pos + _check_usable(epochs) + origin = _check_origin(origin, epochs.info, 'head') + recon_trans = _check_destination(destination, epochs.info, True) + + logger.info('Aligning and averaging up to %s epochs' + % (len(epochs.events))) + if not np.array_equal(epochs.events[:, 0], np.unique(epochs.events[:, 0])): + raise RuntimeError('Epochs must have monotonically increasing events') + info_to = epochs.info.copy() + meg_picks, mag_picks, grad_picks, good_mask, _ = \ + _get_mf_picks_fix_mags(info_to, int_order, ext_order, ignore_ref) + coil_scale, mag_scale = _get_coil_scale( + meg_picks, mag_picks, grad_picks, mag_scale, info_to) + n_channels, n_times = len(epochs.ch_names), len(epochs.times) + other_picks = np.setdiff1d(np.arange(n_channels), meg_picks) + data = np.zeros((n_channels, n_times)) + count = 0 + # keep only MEG w/bad channels marked in "info_from" + info_from = pick_info(info_to, meg_picks[good_mask], copy=True) + all_coils_recon = _prep_mf_coils(info_to, ignore_ref=ignore_ref) + all_coils = _prep_mf_coils(info_from, ignore_ref=ignore_ref) + # remove MEG bads in "to" info + _reset_meg_bads(info_to) + # set up variables + w_sum = 0. + n_in, n_out = _get_n_moments([int_order, ext_order]) + S_decomp = 0. # this will end up being a weighted average + last_trans = None + decomp_coil_scale = coil_scale[good_mask] + exp = dict(int_order=int_order, ext_order=ext_order, head_frame=True, + origin=origin) + n_in = _get_n_moments(int_order) + for ei, epoch in enumerate(epochs): + event_time = epochs.events[epochs._current - 1, 0] / orig_sfreq + use_idx = np.where(t <= event_time)[0] + if len(use_idx) == 0: + trans = info_to['dev_head_t']['trans'] + else: + use_idx = use_idx[-1] + trans = np.vstack([np.hstack([rot[use_idx], trn[[use_idx]].T]), + [[0., 0., 0., 1.]]]) + loc_str = ', '.join('%0.1f' % tr for tr in (trans[:3, 3] * 1000)) + if last_trans is None or not np.allclose(last_trans, trans): + logger.info(' Processing epoch %s (device location: %s mm)' + % (ei + 1, loc_str)) + reuse = False + last_trans = trans + else: + logger.info(' Processing epoch %s (device location: same)' + % (ei + 1,)) + reuse = True + epoch = epoch.copy() # because we operate inplace + if not reuse: + S = _trans_sss_basis(exp, all_coils, trans, + coil_scale=decomp_coil_scale) + # Get the weight from the un-regularized version (eq. 44) + weight = np.linalg.norm(S[:, :n_in]) + # XXX Eventually we could do cross-talk and fine-cal here + S *= weight + S_decomp += S # eq. 41 + epoch[slice(None) if weight_all else meg_picks] *= weight + data += epoch # eq. 42 + w_sum += weight + count += 1 + del info_from + mapping = None + if count == 0: + data.fill(np.nan) + else: + data[meg_picks] /= w_sum + data[other_picks] /= w_sum if weight_all else count + # Finalize weighted average decomp matrix + S_decomp /= w_sum + # Get recon matrix + # (We would need to include external here for regularization to work) + exp['ext_order'] = 0 + S_recon = _trans_sss_basis(exp, all_coils_recon, recon_trans) + exp['ext_order'] = ext_order + # We could determine regularization on basis of destination basis + # matrix, restricted to good channels, as regularizing individual + # matrices within the loop above does not seem to work. But in + # testing this seemed to decrease localization quality in most cases, + # so we do not provide the option here. + S_recon /= coil_scale + # Invert + pS_ave = _col_norm_pinv(S_decomp)[0][:n_in] + pS_ave *= decomp_coil_scale.T + # Get mapping matrix + mapping = np.dot(S_recon, pS_ave) + # Apply mapping + data[meg_picks] = np.dot(mapping, data[meg_picks[good_mask]]) + info_to['dev_head_t'] = recon_trans # set the reconstruction transform + evoked = epochs._evoked_from_epoch_data(data, info_to, picks, + n_events=count, kind='average', + comment=epochs._name) + _remove_meg_projs(evoked) # remove MEG projectors, they won't apply now + logger.info('Created Evoked dataset from %s epochs' % (count,)) + return (evoked, mapping) if return_mapping else evoked + + +@verbose +def make_fixed_length_epochs(raw, duration=1., preload=False, + reject_by_annotation=True, proj=True, overlap=0., + id=1, verbose=None): + """Divide continuous raw data into equal-sized consecutive epochs. + + Parameters + ---------- + raw : instance of Raw + Raw data to divide into segments. + duration : float + Duration of each epoch in seconds. Defaults to 1. + %(preload)s + %(reject_by_annotation_epochs)s + + .. versionadded:: 0.21.0 + %(proj_epochs)s + + .. versionadded:: 0.22.0 + overlap : float + The overlap between epochs, in seconds. Must be + ``0 <= overlap < duration``. Default is 0, i.e., no overlap. + + .. versionadded:: 0.23.0 + id : int + The id to use (default 1). + + .. versionadded:: 0.24.0 + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + Segmented data. + + Notes + ----- + .. versionadded:: 0.20 + """ + events = make_fixed_length_events(raw, id=id, duration=duration, + overlap=overlap) + delta = 1. / raw.info['sfreq'] + return Epochs(raw, events, event_id=[id], tmin=0, tmax=duration - delta, + baseline=None, preload=preload, + reject_by_annotation=reject_by_annotation, proj=proj, + verbose=verbose) diff --git a/python/libs/mne/event.py b/python/libs/mne/event.py new file mode 100644 index 0000000..5aee3ba --- /dev/null +++ b/python/libs/mne/event.py @@ -0,0 +1,1510 @@ +"""IO with fif files containing events.""" + +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Teon Brooks +# Clement Moutard +# +# License: BSD-3-Clause + +import os.path as op +from collections.abc import Sequence + +import numpy as np + +from .utils import (check_fname, logger, verbose, _get_stim_channel, warn, + _validate_type, _check_option, fill_doc, _check_fname, + _on_missing, _check_on_missing) +from .io.constants import FIFF +from .io.tree import dir_tree_find +from .io.tag import read_tag +from .io.open import fiff_open +from .io.write import write_int, start_block, start_and_end_file, end_block +from .io.pick import pick_channels + + +@fill_doc +def pick_events(events, include=None, exclude=None, step=False): + """Select some :term:`events`. + + Parameters + ---------- + %(events)s + include : int | list | None + A event id to include or a list of them. + If None all events are included. + exclude : int | list | None + A event id to exclude or a list of them. + If None no event is excluded. If include is not None + the exclude parameter is ignored. + step : bool + If True (default is False), events have a step format according + to the argument output='step' in the function find_events(). + In this case, the two last columns are considered in inclusion/ + exclusion criteria. + + Returns + ------- + events : array, shape (n_events, 3) + The list of events. + """ + if include is not None: + if not isinstance(include, list): + include = [include] + mask = np.zeros(len(events), dtype=bool) + for e in include: + mask = np.logical_or(mask, events[:, 2] == e) + if step: + mask = np.logical_or(mask, events[:, 1] == e) + events = events[mask] + elif exclude is not None: + if not isinstance(exclude, list): + exclude = [exclude] + mask = np.ones(len(events), dtype=bool) + for e in exclude: + mask = np.logical_and(mask, events[:, 2] != e) + if step: + mask = np.logical_and(mask, events[:, 1] != e) + events = events[mask] + else: + events = np.copy(events) + + if len(events) == 0: + raise RuntimeError("No events found") + + return events + + +def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax, + new_id=None, fill_na=None): + """Define new events by co-occurrence of existing events. + + This function can be used to evaluate events depending on the + temporal lag to another event. For example, this can be used to + analyze evoked responses which were followed by a button press within + a defined time window. + + Parameters + ---------- + events : ndarray + Array as returned by mne.find_events. + reference_id : int + The reference event. The event defining the epoch of interest. + target_id : int + The target event. The event co-occurring in within a certain time + window around the reference event. + sfreq : float + The sampling frequency of the data. + tmin : float + The lower limit in seconds from the target event. + tmax : float + The upper limit border in seconds from the target event. + new_id : int + New ID for the new event. + fill_na : int | None + Fill event to be inserted if target is not available within the time + window specified. If None, the 'null' events will be dropped. + + Returns + ------- + new_events : ndarray + The new defined events. + lag : ndarray + Time lag between reference and target in milliseconds. + """ + if new_id is None: + new_id = reference_id + + tsample = 1e3 / sfreq + imin = int(tmin * sfreq) + imax = int(tmax * sfreq) + + new_events = [] + lag = [] + for event in events.copy().astype(int): + if event[2] == reference_id: + lower = event[0] + imin + upper = event[0] + imax + res = events[(events[:, 0] > lower) & + (events[:, 0] < upper) & (events[:, 2] == target_id)] + if res.any(): + lag += [event[0] - res[0][0]] + event[2] = new_id + new_events += [event] + elif fill_na is not None: + event[2] = fill_na + new_events += [event] + lag.append(np.nan) + + new_events = np.array(new_events) + + with np.errstate(invalid='ignore'): # casting nans + lag = np.abs(lag, dtype='f8') + if lag.any(): + lag *= tsample + else: + lag = np.array([]) + + return new_events if new_events.any() else np.array([]), lag + + +def _read_events_fif(fid, tree): + """Aux function.""" + # Find the desired block + events = dir_tree_find(tree, FIFF.FIFFB_MNE_EVENTS) + + if len(events) == 0: + fid.close() + raise ValueError('Could not find event data') + + events = events[0] + event_list = None + event_id = None + for d in events['directory']: + kind = d.kind + pos = d.pos + if kind == FIFF.FIFF_MNE_EVENT_LIST: + tag = read_tag(fid, pos) + event_list = tag.data + event_list.shape = (-1, 3) + break + if event_list is None: + raise ValueError('Could not find any events') + for d in events['directory']: + kind = d.kind + pos = d.pos + if kind == FIFF.FIFF_DESCRIPTION: + tag = read_tag(fid, pos) + event_id = tag.data + m_ = [[s[::-1] for s in m[::-1].split(':', 1)] + for m in event_id.split(';')] + event_id = {k: int(v) for v, k in m_} + break + elif kind == FIFF.FIFF_MNE_EVENT_COMMENTS: + tag = read_tag(fid, pos) + event_id = tag.data + event_id = event_id.tobytes().decode('latin-1').split('\x00')[:-1] + assert len(event_id) == len(event_list) + event_id = {k: v[2] for k, v in zip(event_id, event_list)} + break + return event_list, event_id + + +@verbose +def read_events(filename, include=None, exclude=None, mask=None, + mask_type='and', return_event_id=False, verbose=None): + """Read :term:`events` from fif or text file. + + See :ref:`tut-events-vs-annotations` and :ref:`tut-event-arrays` + for more information about events. + + Parameters + ---------- + filename : str + Name of the input file. + If the extension is .fif, events are read assuming + the file is in FIF format, otherwise (e.g., .eve, + .lst, .txt) events are read as coming from text. + Note that new format event files do not contain + the "time" column (used to be the second column). + include : int | list | None + A event id to include or a list of them. + If None all events are included. + exclude : int | list | None + A event id to exclude or a list of them. + If None no event is excluded. If include is not None + the exclude parameter is ignored. + mask : int | None + The value of the digital mask to apply to the stim channel values. + If None (default), no masking is performed. + mask_type : 'and' | 'not_and' + The type of operation between the mask and the trigger. + Choose 'and' (default) for MNE-C masking behavior. + + .. versionadded:: 0.13 + return_event_id : bool + If True, ``event_id`` will be returned. This is only possible for + ``-annot.fif`` files produced with MNE-C ``mne_browse_raw``. + + .. versionadded:: 0.20 + %(verbose)s + + Returns + ------- + %(events)s + event_id : dict + Dictionary of ``{str: int}`` mappings of event IDs. + + See Also + -------- + find_events, write_events + + Notes + ----- + This function will discard the offset line (i.e., first line with zero + event number) if it is present in a text file. + + For more information on ``mask`` and ``mask_type``, see + :func:`mne.find_events`. + """ + check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz', + '-eve.lst', '-eve.txt', '_eve.fif', + '_eve.fif.gz', '_eve.lst', '_eve.txt', + '-annot.fif', # MNE-C annot + )) + + ext = op.splitext(filename)[1].lower() + if ext == '.fif' or ext == '.gz': + fid, tree, _ = fiff_open(filename) + with fid as f: + event_list, event_id = _read_events_fif(f, tree) + # hack fix for windows to avoid bincount problems + event_list = event_list.astype(int) + else: + # Have to read this in as float64 then convert because old style + # eve/lst files had a second float column that will raise errors + lines = np.loadtxt(filename, dtype=np.float64).astype(int) + if len(lines) == 0: + raise ValueError('No text lines found') + + if lines.ndim == 1: # Special case for only one event + lines = lines[np.newaxis, :] + + if len(lines[0]) == 4: # Old format eve/lst + goods = [0, 2, 3] # Omit "time" variable + elif len(lines[0]) == 3: + goods = [0, 1, 2] + else: + raise ValueError('Unknown number of columns in event text file') + + event_list = lines[:, goods] + if (mask is not None and event_list.shape[0] > 0 and + event_list[0, 2] == 0): + event_list = event_list[1:] + warn('first row of event file discarded (zero-valued)') + event_id = None + + event_list = pick_events(event_list, include, exclude) + unmasked_len = event_list.shape[0] + if mask is not None: + event_list = _mask_trigs(event_list, mask, mask_type) + masked_len = event_list.shape[0] + if masked_len < unmasked_len: + warn('{} of {} events masked'.format(unmasked_len - masked_len, + unmasked_len)) + out = event_list + if return_event_id: + if event_id is None: + raise RuntimeError('No event_id found in the file') + out = (out, event_id) + return out + + +@verbose +def write_events(filename, events, *, overwrite=False, event_list=None, + verbose=None): + """Write :term:`events` to file. + + Parameters + ---------- + filename : str + Name of the output file. + If the extension is .fif, events are written in + binary FIF format, otherwise (e.g., .eve, .lst, + .txt) events are written as plain text. + Note that new format event files do not contain + the "time" column (used to be the second column). + %(events)s + %(overwrite)s + event_list : array, shape (n_events, 3) + Deprecated, use argument events instead. + %(verbose)s + + See Also + -------- + read_events + """ + if event_list is not None: + warn('Argument "event_list" is deprecated, use "events" instead.', + DeprecationWarning) + events = event_list + del event_list + + filename = _check_fname(filename, overwrite=overwrite) + check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz', + '-eve.lst', '-eve.txt', '_eve.fif', + '_eve.fif.gz', '_eve.lst', '_eve.txt')) + ext = op.splitext(filename)[1].lower() + if ext in ('.fif', '.gz'): + # Start writing... + with start_and_end_file(filename) as fid: + start_block(fid, FIFF.FIFFB_MNE_EVENTS) + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, events.T) + end_block(fid, FIFF.FIFFB_MNE_EVENTS) + else: + with open(filename, 'w') as f: + for e in events: + f.write('%6d %6d %3d\n' % tuple(e)) + + +def _find_stim_steps(data, first_samp, pad_start=None, pad_stop=None, merge=0): + changed = np.diff(data, axis=1) != 0 + idx = np.where(np.all(changed, axis=0))[0] + if len(idx) == 0: + return np.empty((0, 3), dtype='int32') + + pre_step = data[0, idx] + idx += 1 + post_step = data[0, idx] + idx += first_samp + steps = np.c_[idx, pre_step, post_step] + + if pad_start is not None: + v = steps[0, 1] + if v != pad_start: + steps = np.insert(steps, 0, [0, pad_start, v], axis=0) + + if pad_stop is not None: + v = steps[-1, 2] + if v != pad_stop: + last_idx = len(data[0]) + first_samp + steps = np.append(steps, [[last_idx, v, pad_stop]], axis=0) + + if merge != 0: + diff = np.diff(steps[:, 0]) + idx = (diff <= abs(merge)) + if np.any(idx): + where = np.where(idx)[0] + keep = np.logical_not(idx) + if merge > 0: + # drop the earlier event + steps[where + 1, 1] = steps[where, 1] + keep = np.append(keep, True) + else: + # drop the later event + steps[where, 2] = steps[where + 1, 2] + keep = np.insert(keep, 0, True) + + is_step = (steps[:, 1] != steps[:, 2]) + keep = np.logical_and(keep, is_step) + steps = steps[keep] + + return steps + + +def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0, + stim_channel=None): + """Find all steps in data from a stim channel. + + Parameters + ---------- + raw : Raw object + The raw data. + pad_start : None | int + Values to assume outside of the stim channel (e.g., if pad_start=0 and + the stim channel starts with value 5, an event of [0, 0, 5] will be + inserted at the beginning). With None, no steps will be inserted. + pad_stop : None | int + Values to assume outside of the stim channel, see ``pad_start``. + merge : int + Merge steps occurring in neighboring samples. The integer value + indicates over how many samples events should be merged, and the sign + indicates in which direction they should be merged (negative means + towards the earlier event, positive towards the later event). + stim_channel : None | str | list of str + Name of the stim channel or all the stim channels + affected by the trigger. If None, the config variables + 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', + etc. are read. If these are not found, it will default to + 'STI101' or 'STI 014', whichever is present. + + Returns + ------- + steps : array, shape = (n_samples, 3) + For each step in the stim channel the values [sample, v_from, v_to]. + The first column contains the event time in samples (the first sample + with the new value). The second column contains the stim channel value + before the step, and the third column contains value after the step. + + See Also + -------- + find_events : More sophisticated options for finding events in a Raw file. + """ + # pull stim channel from config if necessary + stim_channel = _get_stim_channel(stim_channel, raw.info) + + picks = pick_channels(raw.info['ch_names'], include=stim_channel) + if len(picks) == 0: + raise ValueError('No stim channel found to extract event triggers.') + data, _ = raw[picks, :] + if np.any(data < 0): + warn('Trigger channel contains negative values, using absolute value.') + data = np.abs(data) # make sure trig channel is positive + data = data.astype(np.int64) + + return _find_stim_steps(data, raw.first_samp, pad_start=pad_start, + pad_stop=pad_stop, merge=merge) + + +@verbose +def _find_events(data, first_samp, verbose=None, output='onset', + consecutive='increasing', min_samples=0, mask=None, + uint_cast=False, mask_type='and', initial_event=False): + """Help find events.""" + assert data.shape[0] == 1 # data should be only a row vector + + if min_samples > 0: + merge = int(min_samples // 1) + if merge == min_samples: + merge -= 1 + else: + merge = 0 + + data = data.astype(np.int64) + if uint_cast: + data = data.astype(np.uint16).astype(np.int64) + if data.min() < 0: + warn('Trigger channel contains negative values, using absolute ' + 'value. If data were acquired on a Neuromag system with ' + 'STI016 active, consider using uint_cast=True to work around ' + 'an acquisition bug') + data = np.abs(data) # make sure trig channel is positive + + events = _find_stim_steps(data, first_samp, pad_stop=0, merge=merge) + initial_value = data[0, 0] + if initial_value != 0: + if initial_event: + events = np.insert( + events, 0, [first_samp, 0, initial_value], axis=0) + else: + logger.info('Trigger channel has a non-zero initial value of {} ' + '(consider using initial_event=True to detect this ' + 'event)'.format(initial_value)) + + events = _mask_trigs(events, mask, mask_type) + + # Determine event onsets and offsets + if consecutive == 'increasing': + onsets = (events[:, 2] > events[:, 1]) + offsets = np.logical_and(np.logical_or(onsets, (events[:, 2] == 0)), + (events[:, 1] > 0)) + elif consecutive: + onsets = (events[:, 2] > 0) + offsets = (events[:, 1] > 0) + else: + onsets = (events[:, 1] == 0) + offsets = (events[:, 2] == 0) + + onset_idx = np.where(onsets)[0] + offset_idx = np.where(offsets)[0] + + if len(onset_idx) == 0 or len(offset_idx) == 0: + return np.empty((0, 3), dtype='int32') + + # delete orphaned onsets/offsets + if onset_idx[0] > offset_idx[0]: + logger.info("Removing orphaned offset at the beginning of the file.") + offset_idx = np.delete(offset_idx, 0) + + if onset_idx[-1] > offset_idx[-1]: + logger.info("Removing orphaned onset at the end of the file.") + onset_idx = np.delete(onset_idx, -1) + + if output == 'onset': + events = events[onset_idx] + elif output == 'step': + idx = np.union1d(onset_idx, offset_idx) + events = events[idx] + elif output == 'offset': + event_id = events[onset_idx, 2] + events = events[offset_idx] + events[:, 1] = events[:, 2] + events[:, 2] = event_id + events[:, 0] -= 1 + else: + raise ValueError("Invalid output parameter %r" % output) + + logger.info("%s events found" % len(events)) + logger.info("Event IDs: %s" % np.unique(events[:, 2])) + + return events + + +def _find_unique_events(events): + """Uniquify events (ie remove duplicated rows.""" + e = np.ascontiguousarray(events).view( + np.dtype((np.void, events.dtype.itemsize * events.shape[1]))) + _, idx = np.unique(e, return_index=True) + n_dupes = len(events) - len(idx) + if n_dupes > 0: + warn("Some events are duplicated in your different stim channels." + " %d events were ignored during deduplication." % n_dupes) + return events[idx] + + +@verbose +def find_events(raw, stim_channel=None, output='onset', + consecutive='increasing', min_duration=0, + shortest_event=2, mask=None, uint_cast=False, + mask_type='and', initial_event=False, verbose=None): + """Find :term:`events` from raw file. + + See :ref:`tut-events-vs-annotations` and :ref:`tut-event-arrays` + for more information about events. + + Parameters + ---------- + raw : Raw object + The raw data. + stim_channel : None | str | list of str + Name of the stim channel or all the stim channels + affected by triggers. If None, the config variables + 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', + etc. are read. If these are not found, it will fall back to + 'STI 014' if present, then fall back to the first channel of type + 'stim', if present. If multiple channels are provided + then the returned events are the union of all the events + extracted from individual stim channels. + output : 'onset' | 'offset' | 'step' + Whether to report when events start, when events end, or both. + consecutive : bool | 'increasing' + If True, consider instances where the value of the events + channel changes without first returning to zero as multiple + events. If False, report only instances where the value of the + events channel changes from/to zero. If 'increasing', report + adjacent events only when the second event code is greater than + the first. + min_duration : float + The minimum duration of a change in the events channel required + to consider it as an event (in seconds). + shortest_event : int + Minimum number of samples an event must last (default is 2). If the + duration is less than this an exception will be raised. + mask : int | None + The value of the digital mask to apply to the stim channel values. + If None (default), no masking is performed. + uint_cast : bool + If True (default False), do a cast to ``uint16`` on the channel + data. This can be used to fix a bug with STI101 and STI014 in + Neuromag acquisition setups that use channel STI016 (channel 16 + turns data into e.g. -32768), similar to ``mne_fix_stim14 --32`` + in MNE-C. + + .. versionadded:: 0.12 + mask_type : 'and' | 'not_and' + The type of operation between the mask and the trigger. + Choose 'and' (default) for MNE-C masking behavior. + + .. versionadded:: 0.13 + initial_event : bool + If True (default False), an event is created if the stim channel has a + value different from 0 as its first sample. This is useful if an event + at t=0s is present. + + .. versionadded:: 0.16 + %(verbose)s + + Returns + ------- + %(events)s + + See Also + -------- + find_stim_steps : Find all the steps in the stim channel. + read_events : Read events from disk. + write_events : Write events to disk. + + Notes + ----- + .. warning:: If you are working with downsampled data, events computed + before decimation are no longer valid. Please recompute + your events after decimation, but note this reduces the + precision of event timing. + + Examples + -------- + Consider data with a stim channel that looks like:: + + [0, 32, 32, 33, 32, 0] + + By default, find_events returns all samples at which the value of the + stim channel increases:: + + >>> print(find_events(raw)) # doctest: +SKIP + [[ 1 0 32] + [ 3 32 33]] + + If consecutive is False, find_events only returns the samples at which + the stim channel changes from zero to a non-zero value:: + + >>> print(find_events(raw, consecutive=False)) # doctest: +SKIP + [[ 1 0 32]] + + If consecutive is True, find_events returns samples at which the + event changes, regardless of whether it first returns to zero:: + + >>> print(find_events(raw, consecutive=True)) # doctest: +SKIP + [[ 1 0 32] + [ 3 32 33] + [ 4 33 32]] + + If output is 'offset', find_events returns the last sample of each event + instead of the first one:: + + >>> print(find_events(raw, consecutive=True, # doctest: +SKIP + ... output='offset')) + [[ 2 33 32] + [ 3 32 33] + [ 4 0 32]] + + If output is 'step', find_events returns the samples at which an event + starts or ends:: + + >>> print(find_events(raw, consecutive=True, # doctest: +SKIP + ... output='step')) + [[ 1 0 32] + [ 3 32 33] + [ 4 33 32] + [ 5 32 0]] + + To ignore spurious events, it is also possible to specify a minimum + event duration. Assuming our events channel has a sample rate of + 1000 Hz:: + + >>> print(find_events(raw, consecutive=True, # doctest: +SKIP + ... min_duration=0.002)) + [[ 1 0 32]] + + For the digital mask, if mask_type is set to 'and' it will take the + binary representation of the digital mask, e.g. 5 -> '00000101', and will + allow the values to pass where mask is one, e.g.:: + + 7 '0000111' <- trigger value + 37 '0100101' <- mask + ---------------- + 5 '0000101' + + For the digital mask, if mask_type is set to 'not_and' it will take the + binary representation of the digital mask, e.g. 5 -> '00000101', and will + block the values where mask is one, e.g.:: + + 7 '0000111' <- trigger value + 37 '0100101' <- mask + ---------------- + 2 '0000010' + """ + min_samples = min_duration * raw.info['sfreq'] + + # pull stim channel from config if necessary + try: + stim_channel = _get_stim_channel(stim_channel, raw.info) + except ValueError: + if len(raw.annotations) > 0: + raise ValueError("No stim channels found, but the raw object has " + "annotations. Consider using " + "mne.events_from_annotations to convert these to " + "events.") + else: + raise + + picks = pick_channels(raw.info['ch_names'], include=stim_channel) + if len(picks) == 0: + raise ValueError('No stim channel found to extract event triggers.') + data, _ = raw[picks, :] + + events_list = [] + for d in data: + events = _find_events(d[np.newaxis, :], raw.first_samp, + verbose=verbose, output=output, + consecutive=consecutive, min_samples=min_samples, + mask=mask, uint_cast=uint_cast, + mask_type=mask_type, initial_event=initial_event) + # add safety check for spurious events (for ex. from neuromag syst.) by + # checking the number of low sample events + n_short_events = np.sum(np.diff(events[:, 0]) < shortest_event) + if n_short_events > 0: + raise ValueError("You have %i events shorter than the " + "shortest_event. These are very unusual and you " + "may want to set min_duration to a larger value " + "e.g. x / raw.info['sfreq']. Where x = 1 sample " + "shorter than the shortest event " + "length." % (n_short_events)) + + events_list.append(events) + + events = np.concatenate(events_list, axis=0) + events = _find_unique_events(events) + events = events[np.argsort(events[:, 0])] + return events + + +def _mask_trigs(events, mask, mask_type): + """Mask digital trigger values.""" + _check_option('mask_type', mask_type, ['not_and', 'and']) + if mask is not None: + _validate_type(mask, "int", "mask", "int or None") + n_events = len(events) + if n_events == 0: + return events.copy() + + if mask is not None: + if mask_type == 'not_and': + mask = np.bitwise_not(mask) + elif mask_type != 'and': + raise ValueError("'mask_type' should be either 'and'" + " or 'not_and', instead of '%s'" % mask_type) + events[:, 1:] = np.bitwise_and(events[:, 1:], mask) + events = events[events[:, 1] != events[:, 2]] + + return events + + +def merge_events(events, ids, new_id, replace_events=True): + """Merge a set of :term:`events`. + + Parameters + ---------- + events : array, shape (n_events_in, 3) + Events. + ids : array of int + The ids of events to merge. + new_id : int + The new id. + replace_events : bool + If True (default), old event ids are replaced. Otherwise, + new events will be added to the old event list. + + Returns + ------- + new_events : array, shape (n_events_out, 3) + The new events. + + Notes + ----- + Rather than merging events you can use hierarchical event_id + in Epochs. For example, here:: + + >>> event_id = {'auditory/left': 1, 'auditory/right': 2} + + And the condition 'auditory' would correspond to either 1 or 2. + + Examples + -------- + Here is quick example of the behavior:: + + >>> events = [[134, 0, 1], [341, 0, 2], [502, 0, 3]] + >>> merge_events(events, [1, 2], 12, replace_events=True) + array([[134, 0, 12], + [341, 0, 12], + [502, 0, 3]]) + >>> merge_events(events, [1, 2], 12, replace_events=False) + array([[134, 0, 1], + [134, 0, 12], + [341, 0, 2], + [341, 0, 12], + [502, 0, 3]]) + """ + events = np.asarray(events) + events_out = events.copy() + idx_touched = [] # to keep track of the original events we can keep + for col in [1, 2]: + for i in ids: + mask = events[:, col] == i + events_out[mask, col] = new_id + idx_touched.append(np.where(mask)[0]) + if not replace_events: + idx_touched = np.unique(np.concatenate(idx_touched)) + events_out = np.concatenate((events_out, events[idx_touched]), axis=0) + # Now sort in lexical order + events_out = events_out[np.lexsort(events_out.T[::-1])] + return events_out + + +@fill_doc +def shift_time_events(events, ids, tshift, sfreq): + """Shift a set of :term:`events`. + + Parameters + ---------- + %(events)s + ids : ndarray of int | None + The ids of events to shift. + tshift : float + Time-shift event. Use positive value tshift for forward shifting + the event and negative value for backward shift. + sfreq : float + The sampling frequency of the data. + + Returns + ------- + new_events : array of int, shape (n_new_events, 3) + The new events. + """ + events = events.copy() + if ids is None: + mask = slice(None) + else: + mask = np.in1d(events[:, 2], ids) + events[mask, 0] += int(tshift * sfreq) + + return events + + +@fill_doc +def make_fixed_length_events(raw, id=1, start=0, stop=None, duration=1., + first_samp=True, overlap=0.): + """Make a set of :term:`events` separated by a fixed duration. + + Parameters + ---------- + raw : instance of Raw + A raw object to use the data from. + id : int + The id to use (default 1). + start : float + Time of first event (in seconds). + stop : float | None + Maximum time of last event (in seconds). If None, events extend to the + end of the recording. + duration : float + The duration to separate events by (in seconds). + first_samp : bool + If True (default), times will have :term:`first_samp` added to them, as + in :func:`mne.find_events`. This behavior is not desirable if the + returned events will be combined with event times that already + have :term:`first_samp` added to them, e.g. event times that come + from :func:`mne.find_events`. + overlap : float + The overlap between events (in seconds). + Must be ``0 <= overlap < duration``. + + .. versionadded:: 0.18 + + Returns + ------- + %(events)s + """ + from .io.base import BaseRaw + _validate_type(raw, BaseRaw, "raw") + _validate_type(id, int, "id") + _validate_type(duration, "numeric", "duration") + _validate_type(overlap, "numeric", "overlap") + duration, overlap = float(duration), float(overlap) + if not 0 <= overlap < duration: + raise ValueError('overlap must be >=0 but < duration (%s), got %s' + % (duration, overlap)) + + start = raw.time_as_index(start, use_rounding=True)[0] + if stop is not None: + stop = raw.time_as_index(stop, use_rounding=True)[0] + else: + stop = raw.last_samp + 1 + if first_samp: + start = start + raw.first_samp + stop = min([stop + raw.first_samp, raw.last_samp + 1]) + else: + stop = min([stop, len(raw.times)]) + # Make sure we don't go out the end of the file: + stop -= int(np.round(raw.info['sfreq'] * duration)) + # This should be inclusive due to how we generally use start and stop... + ts = np.arange(start, stop + 1, + raw.info['sfreq'] * (duration - overlap)).astype(int) + n_events = len(ts) + if n_events == 0: + raise ValueError('No events produced, check the values of start, ' + 'stop, and duration') + events = np.c_[ts, np.zeros(n_events, dtype=int), + id * np.ones(n_events, dtype=int)] + return events + + +def concatenate_events(events, first_samps, last_samps): + """Concatenate event lists to be compatible with concatenate_raws. + + This is useful, for example, if you processed and/or changed + events in raw files separately before combining them using + :func:`mne.concatenate_raws`. + + Parameters + ---------- + events : list of array + List of :term:`events` arrays, typically each extracted from a + corresponding raw file that is being concatenated. + first_samps : list or array of int + First sample numbers of the raw files concatenated. + last_samps : list or array of int + Last sample numbers of the raw files concatenated. + + Returns + ------- + events : array + The concatenated events. + + See Also + -------- + mne.concatenate_raws + """ + _validate_type(events, list, "events") + if not (len(events) == len(last_samps) and + len(events) == len(first_samps)): + raise ValueError('events, first_samps, and last_samps must all have ' + 'the same lengths') + first_samps = np.array(first_samps) + last_samps = np.array(last_samps) + n_samps = np.cumsum(last_samps - first_samps + 1) + events_out = events[0] + for e, f, n in zip(events[1:], first_samps[1:], n_samps[:-1]): + # remove any skip since it doesn't exist in concatenated files + e2 = e.copy() + e2[:, 0] -= f + # add offset due to previous files, plus original file offset + e2[:, 0] += n + first_samps[0] + events_out = np.concatenate((events_out, e2), axis=0) + + return events_out + + +@fill_doc +class AcqParserFIF(object): + """Parser for Elekta data acquisition settings. + + This class parses parameters (e.g. events and averaging categories) that + are defined in the Elekta TRIUX/VectorView data acquisition software (DACQ) + and stored in ``info['acq_pars']``. It can be used to reaverage raw data + according to DACQ settings and modify original averaging settings if + necessary. + + Parameters + ---------- + %(info_not_none)s This is where the DACQ parameters will be taken from. + + Attributes + ---------- + categories : list + List of averaging categories marked active in DACQ. + events : list + List of events that are in use (referenced by some averaging category). + reject : dict + Rejection criteria from DACQ that can be used with mne.Epochs. + Note that mne does not support all DACQ rejection criteria + (e.g. spike, slope). + flat : dict + Flatness rejection criteria from DACQ that can be used with mne.Epochs. + acq_dict : dict + All DACQ parameters. + + See Also + -------- + mne.io.Raw.acqparser : Access the parser through a Raw attribute. + + Notes + ----- + Any averaging category (also non-active ones) can be accessed by indexing + as ``acqparserfif['category_name']``. + """ + + # DACQ variables always start with one of these + _acq_var_magic = ['ERF', 'DEF', 'ACQ', 'TCP'] + + # averager related DACQ variable names (without preceding 'ERF') + # old versions (DACQ < 3.4) + _dacq_vars_compat = ('megMax', 'megMin', 'megNoise', 'megSlope', + 'megSpike', 'eegMax', 'eegMin', 'eegNoise', + 'eegSlope', 'eegSpike', 'eogMax', 'ecgMax', 'ncateg', + 'nevent', 'stimSource', 'triggerMap', 'update', + 'artefIgnore', 'averUpdate') + + _event_vars_compat = ('Comment', 'Delay') + + _cat_vars = ('Comment', 'Display', 'Start', 'State', 'End', 'Event', + 'Nave', 'ReqEvent', 'ReqWhen', 'ReqWithin', 'SubAve') + + # new versions only (DACQ >= 3.4) + _dacq_vars = _dacq_vars_compat + ('magMax', 'magMin', 'magNoise', + 'magSlope', 'magSpike', 'version') + + _event_vars = _event_vars_compat + ('Name', 'Channel', 'NewBits', + 'OldBits', 'NewMask', 'OldMask') + + def __init__(self, info): # noqa: D102 + acq_pars = info['acq_pars'] + if not acq_pars: + raise ValueError('No acquisition parameters') + self.acq_dict = dict(self._acqpars_gen(acq_pars)) + if 'ERFversion' in self.acq_dict: + self.compat = False # DACQ ver >= 3.4 + elif 'ERFncateg' in self.acq_dict: # probably DACQ < 3.4 + self.compat = True + else: + raise ValueError('Cannot parse acquisition parameters') + dacq_vars = self._dacq_vars_compat if self.compat else self._dacq_vars + # set instance variables + for var in dacq_vars: + val = self.acq_dict['ERF' + var] + if var[:3] in ['mag', 'meg', 'eeg', 'eog', 'ecg']: + val = float(val) + elif var in ['ncateg', 'nevent']: + val = int(val) + setattr(self, var.lower(), val) + self.stimsource = ( + 'Internal' if self.stimsource == '1' else 'External') + # collect all events and categories + self._events = self._events_from_acq_pars() + self._categories = self._categories_from_acq_pars() + # mark events that are used by a category + for cat in self._categories.values(): + if cat['event']: + self._events[cat['event']]['in_use'] = True + if cat['reqevent']: + self._events[cat['reqevent']]['in_use'] = True + # make mne rejection dicts based on the averager parameters + self.reject = {'grad': self.megmax, 'eeg': self.eegmax, + 'eog': self.eogmax, 'ecg': self.ecgmax} + if not self.compat: + self.reject['mag'] = self.magmax + self.reject = {k: float(v) for k, v in self.reject.items() + if float(v) > 0} + self.flat = {'grad': self.megmin, 'eeg': self.eegmin} + if not self.compat: + self.flat['mag'] = self.magmin + self.flat = {k: float(v) for k, v in self.flat.items() + if float(v) > 0} + + def __repr__(self): # noqa: D105 + s = ' bits for old DACQ versions + _compat_event_lookup = {1: 1, 2: 2, 3: 4, 4: 8, 5: 16, 6: 32, 7: 3, + 8: 5, 9: 6, 10: 7, 11: 9, 12: 10, 13: 11, + 14: 12, 15: 13, 16: 14, 17: 15} + events = dict() + for evnum in range(1, self.nevent + 1): + evnum_s = str(evnum).zfill(2) # '01', '02' etc. + evdi = dict() + event_vars = (self._event_vars_compat if self.compat + else self._event_vars) + for var in event_vars: + # name of DACQ variable, e.g. 'ERFeventNewBits01' + acq_key = 'ERFevent' + var + evnum_s + # corresponding dict key, e.g. 'newbits' + dict_key = var.lower() + val = self.acq_dict[acq_key] + # type convert numeric values + if dict_key in ['newbits', 'oldbits', 'newmask', 'oldmask']: + val = int(val) + elif dict_key in ['delay']: + val = float(val) + evdi[dict_key] = val + evdi['in_use'] = False # __init__() will set this + evdi['index'] = evnum + if self.compat: + evdi['name'] = str(evnum) + evdi['oldmask'] = 63 + evdi['newmask'] = 63 + evdi['oldbits'] = 0 + evdi['newbits'] = _compat_event_lookup[evnum] + events[evnum] = evdi + return events + + def _acqpars_gen(self, acq_pars): + """Yield key/value pairs from ``info['acq_pars'])``.""" + key, val = '', '' + for line in acq_pars.split(): + if any([line.startswith(x) for x in self._acq_var_magic]): + key = line + val = '' + else: + if not key: + raise ValueError('Cannot parse acquisition parameters') + # DACQ splits items with spaces into multiple lines + val += ' ' + line if val else line + yield key, val + + def _categories_from_acq_pars(self): + """Collect DACQ averaging categories into a dict. + + Categories are keyed by the comment field in DACQ. Each category is + itself represented a dict containing the category parameters. + """ + cats = dict() + for catnum in [str(x).zfill(2) for x in range(1, self.nevent + 1)]: + catdi = dict() + # read all category variables + for var in self._cat_vars: + acq_key = 'ERFcat' + var + catnum + class_key = var.lower() + val = self.acq_dict[acq_key] + catdi[class_key] = val + # some type conversions + catdi['display'] = (catdi['display'] == '1') + catdi['state'] = (catdi['state'] == '1') + for key in ['start', 'end', 'reqwithin']: + catdi[key] = float(catdi[key]) + for key in ['nave', 'event', 'reqevent', 'reqwhen', 'subave']: + catdi[key] = int(catdi[key]) + # some convenient extra (non-DACQ) vars + catdi['index'] = int(catnum) # index of category in DACQ list + cats[catdi['comment']] = catdi + return cats + + def _events_mne_to_dacq(self, mne_events): + """Create list of DACQ events based on mne trigger transitions list. + + mne_events is typically given by mne.find_events (use consecutive=True + to get all transitions). Output consists of rows in the form + [t, 0, event_codes] where t is time in samples and event_codes is all + DACQ events compatible with the transition, bitwise ORed together: + e.g. [t1, 0, 5] means that events 1 and 3 occurred at time t1, + as 2**(1 - 1) + 2**(3 - 1) = 5. + """ + events_ = mne_events.copy() + events_[:, 1:3] = 0 + for n, ev in self._events.items(): + if ev['in_use']: + pre_ok = ( + np.bitwise_and(ev['oldmask'], + mne_events[:, 1]) == ev['oldbits']) + post_ok = ( + np.bitwise_and(ev['newmask'], + mne_events[:, 2]) == ev['newbits']) + ok_ind = np.where(pre_ok & post_ok) + events_[ok_ind, 2] |= 1 << (n - 1) + return events_ + + def _mne_events_to_category_t0(self, cat, mne_events, sfreq): + """Translate mne_events to epoch zero times (t0). + + First mne events (trigger transitions) are converted into DACQ events. + Then the zero times for the epochs are obtained by considering the + reference and conditional (required) events and the delay to stimulus. + """ + cat_ev = cat['event'] + cat_reqev = cat['reqevent'] + # first convert mne events to dacq event list + events = self._events_mne_to_dacq(mne_events) + # next, take req. events and delays into account + times = events[:, 0] + # indices of times where ref. event occurs + refEvents_inds = np.where(events[:, 2] & (1 << cat_ev - 1))[0] + refEvents_t = times[refEvents_inds] + if cat_reqev: + # indices of times where req. event occurs + reqEvents_inds = np.where(events[:, 2] & ( + 1 << cat_reqev - 1))[0] + reqEvents_t = times[reqEvents_inds] + # relative (to refevent) time window where req. event + # must occur (e.g. [0 .2]) + twin = [0, (-1)**(cat['reqwhen']) * cat['reqwithin']] + win = np.round(np.array(sorted(twin)) * sfreq) # to samples + refEvents_wins = refEvents_t[:, None] + win + req_acc = np.zeros(refEvents_inds.shape, dtype=bool) + for t in reqEvents_t: + # mark time windows where req. condition is satisfied + reqEvent_in_win = np.logical_and( + t >= refEvents_wins[:, 0], t <= refEvents_wins[:, 1]) + req_acc |= reqEvent_in_win + # drop ref. events where req. event condition is not satisfied + refEvents_inds = refEvents_inds[np.where(req_acc)] + refEvents_t = times[refEvents_inds] + # adjust for trigger-stimulus delay by delaying the ref. event + refEvents_t += int(np.round(self._events[cat_ev]['delay'] * sfreq)) + return refEvents_t + + @property + def categories(self): + """Return list of averaging categories ordered by DACQ index. + + Only returns categories marked active in DACQ. + """ + cats = sorted(self._categories_in_use.values(), + key=lambda cat: cat['index']) + return cats + + @property + def events(self): + """Return events ordered by DACQ index. + + Only returns events that are in use (referred to by a category). + """ + evs = sorted(self._events_in_use.values(), key=lambda ev: ev['index']) + return evs + + @property + def _categories_in_use(self): + return {k: v for k, v in self._categories.items() if v['state']} + + @property + def _events_in_use(self): + return {k: v for k, v in self._events.items() if v['in_use']} + + def get_condition(self, raw, condition=None, stim_channel=None, mask=None, + uint_cast=None, mask_type='and', delayed_lookup=True): + """Get averaging parameters for a condition (averaging category). + + Output is designed to be used with the Epochs class to extract the + corresponding epochs. + + Parameters + ---------- + raw : Raw object + An instance of Raw. + condition : None | str | dict | list of dict + Condition or a list of conditions. Conditions can be strings + (DACQ comment field, e.g. 'Auditory left') or category dicts + (e.g. acqp['Auditory left'], where acqp is an instance of + AcqParserFIF). If None, get all conditions marked active in + DACQ. + stim_channel : None | str | list of str + Name of the stim channel or all the stim channels + affected by the trigger. If None, the config variables + 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', + etc. are read. If these are not found, it will fall back to + 'STI101' or 'STI 014' if present, then fall back to the first + channel of type 'stim', if present. + mask : int | None + The value of the digital mask to apply to the stim channel values. + If None (default), no masking is performed. + uint_cast : bool + If True (default False), do a cast to ``uint16`` on the channel + data. This can be used to fix a bug with STI101 and STI014 in + Neuromag acquisition setups that use channel STI016 (channel 16 + turns data into e.g. -32768), similar to ``mne_fix_stim14 --32`` + in MNE-C. + mask_type : 'and' | 'not_and' + The type of operation between the mask and the trigger. + Choose 'and' for MNE-C masking behavior. + delayed_lookup : bool + If True, use the 'delayed lookup' procedure implemented in Elekta + software. When a trigger transition occurs, the lookup of + the new trigger value will not happen immediately at the following + sample, but with a 1-sample delay. This allows a slight + asynchrony between trigger onsets, when they are intended to be + synchronous. If you have accurate hardware and want to detect + transitions with a resolution of one sample, use + delayed_lookup=False. + + Returns + ------- + conds_data : dict or list of dict + Each dict has the following keys: + + events : array, shape (n_epochs_out, 3) + List of zero time points (t0) for the epochs matching the + condition. Use as the ``events`` parameter to Epochs. Note + that these are not (necessarily) actual events. + event_id : dict + Name of condition and index compatible with ``events``. + Should be passed as the ``event_id`` parameter to Epochs. + tmin : float + Epoch starting time relative to t0. Use as the ``tmin`` + parameter to Epochs. + tmax : float + Epoch ending time relative to t0. Use as the ``tmax`` + parameter to Epochs. + """ + if condition is None: + condition = self.categories # get all + if not isinstance(condition, list): + condition = [condition] # single cond -> listify + conds_data = list() + for cat in condition: + if isinstance(cat, str): + cat = self[cat] + mne_events = find_events(raw, stim_channel=stim_channel, mask=mask, + mask_type=mask_type, output='step', + uint_cast=uint_cast, consecutive=True, + verbose=False, shortest_event=1) + if delayed_lookup: + ind = np.where(np.diff(mne_events[:, 0]) == 1)[0] + if 1 in np.diff(ind): + raise ValueError('There are several subsequent ' + 'transitions on the trigger channel. ' + 'This will not work well with ' + 'delayed_lookup=True. You may want to ' + 'check your trigger data and ' + 'set delayed_lookup=False.') + mne_events[ind, 2] = mne_events[ind + 1, 2] + mne_events = np.delete(mne_events, ind + 1, axis=0) + sfreq = raw.info['sfreq'] + cat_t0_ = self._mne_events_to_category_t0(cat, mne_events, sfreq) + # make it compatible with the usual events array + cat_t0 = np.c_[cat_t0_, np.zeros(cat_t0_.shape), + cat['index'] * np.ones(cat_t0_.shape) + ].astype(np.uint32) + cat_id = {cat['comment']: cat['index']} + tmin, tmax = cat['start'], cat['end'] + conds_data.append(dict(events=cat_t0, event_id=cat_id, + tmin=tmin, tmax=tmax)) + return conds_data[0] if len(conds_data) == 1 else conds_data + + +def match_event_names(event_names, keys, *, on_missing='raise'): + """Search a collection of event names for matching (sub-)groups of events. + + This function is particularly helpful when using grouped event names + (i.e., event names containing forward slashes ``/``). Please see the + Examples section below for a working example. + + Parameters + ---------- + event_names : array-like of str | dict + Either a collection of event names, or the ``event_id`` dictionary + mapping event names to event codes. + keys : array-like of str | str + One or multiple event names or groups to search for in ``event_names``. + on_missing : 'raise' | 'warn' | 'ignore' + How to handle situations when none of the ``keys`` can be found in + ``event_names``. If ``'warn'`` or ``'ignore'``, an empty list will be + returned. + + Returns + ------- + matches : list of str + All event names that match any of the ``keys`` provided. + + Notes + ----- + .. versionadded:: 1.0 + + Examples + -------- + Assuming the following grouped event names in the data, you could easily + query for all ``auditory`` and ``left`` event names:: + + >>> event_names = [ + ... 'auditory/left', + ... 'auditory/right', + ... 'visual/left', + ... 'visual/right' + ... ] + >>> match_event_names( + ... event_names=event_names, + ... keys=['auditory', 'left'] + ... ) + ['auditory/left', 'auditory/right', 'visual/left'] + """ + _check_on_missing(on_missing) + + if isinstance(event_names, dict): + event_names = list(event_names) + + # ensure we have a list of `keys` + if ( + isinstance(keys, (Sequence, np.ndarray)) and + not isinstance(keys, str) + ): + keys = list(keys) + else: + keys = [keys] + + matches = [] + + # form the hierarchical event name mapping + for key in keys: + if not isinstance(key, str): + raise ValueError(f'keys must be strings, got {type(key)} ({key})') + + matches.extend( + name for name in event_names + if set(key.split('/')).issubset(name.split('/')) + ) + + if not matches: + _on_missing( + on_missing=on_missing, + msg=f'Event name "{key}" could not be found. The following events ' + f'are present in the data: {", ".join(event_names)}', + error_klass=KeyError + ) + + matches = sorted(set(matches)) # deduplicate if necessary + return matches diff --git a/python/libs/mne/evoked.py b/python/libs/mne/evoked.py new file mode 100644 index 0000000..b6daa3d --- /dev/null +++ b/python/libs/mne/evoked.py @@ -0,0 +1,1562 @@ +# -*- coding: utf-8 -*- +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Denis Engemann +# Andrew Dykstra +# Mads Jensen +# Jona Sassenhagen +# +# License: BSD-3-Clause + +from copy import deepcopy +import numpy as np + +from .baseline import rescale, _log_rescale, _check_baseline +from .channels.channels import (UpdateChannelsMixin, + SetChannelsMixin, InterpolationMixin) +from .channels.layout import _merge_ch_data, _pair_grad_sensors +from .defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT +from .filter import detrend, FilterMixin, _check_fun +from .utils import (check_fname, logger, verbose, _time_mask, warn, sizeof_fmt, + SizeMixin, copy_function_doc_to_method_doc, _validate_type, + fill_doc, _check_option, ShiftTimeMixin, _build_data_frame, + _check_pandas_installed, _check_pandas_index_arguments, + _convert_times, _scale_dataframe_data, _check_time_format, + _check_preload, _check_fname, _VerboseDep) +from .viz import (plot_evoked, plot_evoked_topomap, plot_evoked_field, + plot_evoked_image, plot_evoked_topo) +from .viz.evoked import plot_evoked_white, plot_evoked_joint +from .viz.topomap import _topomap_animation + +from .io.constants import FIFF +from .io.open import fiff_open +from .io.tag import read_tag +from .io.tree import dir_tree_find +from .io.pick import pick_types, _picks_to_idx, _FNIRS_CH_TYPES_SPLIT +from .io.meas_info import (ContainsMixin, read_meas_info, write_meas_info, + _read_extended_ch_info, _rename_list, + _ensure_infos_match) +from .io.proj import ProjMixin +from .io.write import (start_and_end_file, start_block, end_block, + write_int, write_string, write_float_matrix, + write_id, write_float, write_complex_float_matrix) +from .io.base import TimeMixin, _check_maxshield, _get_ch_factors +from .parallel import parallel_func + +_aspect_dict = { + 'average': FIFF.FIFFV_ASPECT_AVERAGE, + 'standard_error': FIFF.FIFFV_ASPECT_STD_ERR, + 'single_epoch': FIFF.FIFFV_ASPECT_SINGLE, + 'partial_average': FIFF.FIFFV_ASPECT_SUBAVERAGE, + 'alternating_subaverage': FIFF.FIFFV_ASPECT_ALTAVERAGE, + 'sample_cut_out_by_graph': FIFF.FIFFV_ASPECT_SAMPLE, + 'power_density_spectrum': FIFF.FIFFV_ASPECT_POWER_DENSITY, + 'dipole_amplitude_cuvre': FIFF.FIFFV_ASPECT_DIPOLE_WAVE, + 'squid_modulation_lower_bound': FIFF.FIFFV_ASPECT_IFII_LOW, + 'squid_modulation_upper_bound': FIFF.FIFFV_ASPECT_IFII_HIGH, + 'squid_gate_setting': FIFF.FIFFV_ASPECT_GATE, +} +_aspect_rev = {val: key for key, val in _aspect_dict.items()} + + +@fill_doc +class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin, SetChannelsMixin, + InterpolationMixin, FilterMixin, TimeMixin, SizeMixin, + ShiftTimeMixin, _VerboseDep): + """Evoked data. + + Parameters + ---------- + fname : str + Name of evoked/average FIF file to load. + If None no data is loaded. + condition : int, or str + Dataset ID number (int) or comment/name (str). Optional if there is + only one data set in file. + proj : bool, optional + Apply SSP projection vectors. + kind : str + Either 'average' or 'standard_error'. The type of data to read. + Only used if 'condition' is a str. + allow_maxshield : bool | str (default False) + If True, allow loading of data that has been recorded with internal + active compensation (MaxShield). Data recorded with MaxShield should + generally not be loaded directly, but should first be processed using + SSS/tSSS to remove the compensation signals that may also affect brain + activity. Can also be "yes" to load without eliciting a warning. + %(verbose)s + + Attributes + ---------- + %(info_not_none)s + ch_names : list of str + List of channels' names. + nave : int + Number of averaged epochs. + kind : str + Type of data, either average or standard_error. + comment : str + Comment on dataset. Can be the condition. + data : array of shape (n_channels, n_times) + Evoked response. + first : int + First time sample. + last : int + Last time sample. + tmin : float + The first time point in seconds. + tmax : float + The last time point in seconds. + times : array + Time vector in seconds. Goes from ``tmin`` to ``tmax``. Time interval + between consecutive time samples is equal to the inverse of the + sampling frequency. + baseline : None | tuple of length 2 + This attribute reflects whether the data has been baseline-corrected + (it will be a ``tuple`` then) or not (it will be ``None``). + + Notes + ----- + Evoked objects can only contain the average of a single set of conditions. + """ + + @verbose + def __init__(self, fname, condition=None, proj=True, + kind='average', allow_maxshield=False, *, + verbose=None): # noqa: D102 + _validate_type(proj, bool, "'proj'") + # Read the requested data + fname = _check_fname(fname=fname, must_exist=True, overwrite='read') + self.info, self.nave, self._aspect_kind, self.comment, self.times, \ + self.data, self.baseline = _read_evoked(fname, condition, kind, + allow_maxshield) + self._update_first_last() + self.preload = True + # project and baseline correct + if proj: + self.apply_proj() + self.filename = fname + + @property + def kind(self): + """The data kind.""" + return _aspect_rev[self._aspect_kind] + + @kind.setter + def kind(self, kind): + _check_option('kind', kind, list(_aspect_dict.keys())) + self._aspect_kind = _aspect_dict[kind] + + @property + def data(self): + """The data matrix.""" + return self._data + + @data.setter + def data(self, data): + """Set the data matrix.""" + self._data = data + + @fill_doc + def get_data(self, picks=None, units=None, tmin=None, tmax=None): + """Get evoked data as 2D array. + + Parameters + ---------- + %(picks_all)s + %(units)s + tmin : float | None + Start time of data to get in seconds. + tmax : float | None + End time of data to get in seconds. + + Returns + ------- + data : ndarray, shape (n_channels, n_times) + A view on evoked data. + + Notes + ----- + .. versionadded:: 0.24 + """ + picks = _picks_to_idx(self.info, picks, "all", exclude=()) + + start, stop = self._handle_tmin_tmax(tmin, tmax) + + data = self.data[picks, start:stop] + + if units is not None: + ch_factors = _get_ch_factors(self, units, picks) + data *= ch_factors[:, np.newaxis] + + return data + + @verbose + def apply_function(self, fun, picks=None, dtype=None, n_jobs=1, + verbose=None, **kwargs): + """Apply a function to a subset of channels. + + %(applyfun_summary_evoked)s + + Parameters + ---------- + %(fun_applyfun_evoked)s + %(picks_all_data_noref)s + %(dtype_applyfun)s + %(n_jobs)s + %(verbose)s + %(kwargs_fun)s + + Returns + ------- + self : instance of Evoked + The evoked object with transformed data. + """ + _check_preload(self, 'evoked.apply_function') + picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) + + if not callable(fun): + raise ValueError('fun needs to be a function') + + data_in = self._data + if dtype is not None and dtype != self._data.dtype: + self._data = self._data.astype(dtype) + + # check the dimension of the incoming evoked data + _check_option('evoked.ndim', self._data.ndim, [2]) + + if n_jobs == 1: + # modify data inplace to save memory + for idx in picks: + self._data[idx, :] = _check_fun(fun, data_in[idx, :], **kwargs) + else: + # use parallel function + parallel, p_fun, _ = parallel_func(_check_fun, n_jobs) + data_picks_new = parallel(p_fun( + fun, data_in[p, :], **kwargs) for p in picks) + for pp, p in enumerate(picks): + self._data[p, :] = data_picks_new[pp] + + return self + + @verbose + def apply_baseline(self, baseline=(None, 0), *, verbose=None): + """Baseline correct evoked data. + + Parameters + ---------- + %(baseline_evoked)s + Defaults to ``(None, 0)``, i.e. beginning of the the data until + time point zero. + %(verbose)s + + Returns + ------- + evoked : instance of Evoked + The baseline-corrected Evoked object. + + Notes + ----- + Baseline correction can be done multiple times. + + .. versionadded:: 0.13.0 + """ + baseline = _check_baseline(baseline, times=self.times, + sfreq=self.info['sfreq']) + if self.baseline is not None and baseline is None: + raise ValueError('The data has already been baseline-corrected. ' + 'Cannot remove existing basline correction.') + elif baseline is None: + # Do not rescale + logger.info(_log_rescale(None)) + else: + # Actually baseline correct the data. Logging happens in rescale(). + self.data = rescale(self.data, self.times, baseline, copy=False) + self.baseline = baseline + + return self + + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Save evoked data to a file. + + Parameters + ---------- + fname : str + The name of the file, which should end with ``-ave.fif(.gz)`` or + ``_ave.fif(.gz)``. + %(overwrite)s + %(verbose)s + + Notes + ----- + To write multiple conditions into a single file, use + `mne.write_evokeds`. + + .. versionchanged:: 0.23 + Information on baseline correction will be stored with the data, + and will be restored when reading again via `mne.read_evokeds`. + """ + write_evokeds(fname, self, overwrite=overwrite) + + def __repr__(self): # noqa: D105 + max_comment_length = 1000 + if len(self.comment) > max_comment_length: + comment = self.comment[:max_comment_length] + comment += "..." + else: + comment = self.comment + s = "'%s' (%s, N=%s)" % (comment, self.kind, self.nave) + s += ", %0.5g – %0.5g sec" % (self.times[0], self.times[-1]) + s += ', baseline ' + if self.baseline is None: + s += 'off' + else: + s += f'{self.baseline[0]:g} – {self.baseline[1]:g} sec' + if self.baseline != _check_baseline( + self.baseline, times=self.times, sfreq=self.info['sfreq'], + on_baseline_outside_data='adjust'): + s += ' (baseline period was cropped after baseline correction)' + s += ", %s ch" % self.data.shape[0] + s += ", ~%s" % (sizeof_fmt(self._size),) + return "" % s + + @property + def ch_names(self): + """Channel names.""" + return self.info['ch_names'] + + @property + def tmin(self): + """First time point. + + .. versionadded:: 0.21 + """ + return self.times[0] + + @property + def tmax(self): + """Last time point. + + .. versionadded:: 0.21 + """ + return self.times[-1] + + @fill_doc + def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None): + """Crop data to a given time interval. + + Parameters + ---------- + tmin : float | None + Start time of selection in seconds. + tmax : float | None + End time of selection in seconds. + %(include_tmax)s + %(verbose)s + + Returns + ------- + evoked : instance of Evoked + The cropped Evoked object, modified in-place. + + Notes + ----- + %(notes_tmax_included_by_default)s + """ + if tmin is None: + tmin = self.tmin + elif tmin < self.tmin: + warn(f'tmin is not in Evoked time interval. tmin is set to ' + f'evoked.tmin ({self.tmin:g} sec)') + tmin = self.tmin + + if tmax is None: + tmax = self.tmax + elif tmax > self.tmax: + warn(f'tmax is not in Evoked time interval. tmax is set to ' + f'evoked.tmax ({self.tmax:g} sec)') + tmax = self.tmax + include_tmax = True + + mask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'], + include_tmax=include_tmax) + self.times = self.times[mask] + self._update_first_last() + self.data = self.data[:, mask] + + return self + + @verbose + def decimate(self, decim, offset=0, verbose=None): + """Decimate the evoked data. + + Parameters + ---------- + %(decim)s + %(offset_decim)s + %(verbose)s + + Returns + ------- + evoked : instance of Evoked + The decimated Evoked object. + + See Also + -------- + Epochs.decimate + Epochs.resample + mne.io.Raw.resample + + Notes + ----- + %(decim_notes)s + + .. versionadded:: 0.13.0 + """ + decim, offset, new_sfreq = _check_decim(self.info, decim, offset) + start_idx = int(round(self.times[0] * (self.info['sfreq'] * decim))) + i_start = start_idx % decim + offset + decim_slice = slice(i_start, None, decim) + with self.info._unlock(): + self.info['sfreq'] = new_sfreq + self.data = self.data[:, decim_slice].copy() + self.times = self.times[decim_slice].copy() + self._update_first_last() + return self + + @copy_function_doc_to_method_doc(plot_evoked) + def plot(self, picks=None, exclude='bads', unit=True, show=True, ylim=None, + xlim='tight', proj=False, hline=None, units=None, scalings=None, + titles=None, axes=None, gfp=False, window_title=None, + spatial_colors=False, zorder='unsorted', selectable=True, + noise_cov=None, time_unit='s', sphere=None, verbose=None): + return plot_evoked( + self, picks=picks, exclude=exclude, unit=unit, show=show, + ylim=ylim, proj=proj, xlim=xlim, hline=hline, units=units, + scalings=scalings, titles=titles, axes=axes, gfp=gfp, + window_title=window_title, spatial_colors=spatial_colors, + zorder=zorder, selectable=selectable, noise_cov=noise_cov, + time_unit=time_unit, sphere=sphere, verbose=verbose) + + @copy_function_doc_to_method_doc(plot_evoked_image) + def plot_image(self, picks=None, exclude='bads', unit=True, show=True, + clim=None, xlim='tight', proj=False, units=None, + scalings=None, titles=None, axes=None, cmap='RdBu_r', + colorbar=True, mask=None, mask_style=None, + mask_cmap='Greys', mask_alpha=.25, time_unit='s', + show_names=None, group_by=None, sphere=None): + return plot_evoked_image( + self, picks=picks, exclude=exclude, unit=unit, show=show, + clim=clim, xlim=xlim, proj=proj, units=units, scalings=scalings, + titles=titles, axes=axes, cmap=cmap, colorbar=colorbar, mask=mask, + mask_style=mask_style, mask_cmap=mask_cmap, mask_alpha=mask_alpha, + time_unit=time_unit, show_names=show_names, group_by=group_by, + sphere=sphere) + + @copy_function_doc_to_method_doc(plot_evoked_topo) + def plot_topo(self, layout=None, layout_scale=0.945, color=None, + border='none', ylim=None, scalings=None, title=None, + proj=False, vline=[0.0], fig_background=None, + merge_grads=False, legend=True, axes=None, + background_color='w', noise_cov=None, exclude='bads', + show=True): + """ + Notes + ----- + .. versionadded:: 0.10.0 + """ + return plot_evoked_topo( + self, layout=layout, layout_scale=layout_scale, + color=color, border=border, ylim=ylim, scalings=scalings, + title=title, proj=proj, vline=vline, fig_background=fig_background, + merge_grads=merge_grads, legend=legend, axes=axes, + background_color=background_color, noise_cov=noise_cov, + exclude=exclude, show=show) + + @copy_function_doc_to_method_doc(plot_evoked_topomap) + def plot_topomap(self, times="auto", ch_type=None, vmin=None, + vmax=None, cmap=None, sensors=True, colorbar=True, + scalings=None, units=None, res=64, + size=1, cbar_fmt="%3.1f", + time_unit='s', time_format=None, + proj=False, show=True, show_names=False, title=None, + mask=None, mask_params=None, outlines='head', + contours=6, image_interp='bilinear', average=None, + axes=None, extrapolate=_EXTRAPOLATE_DEFAULT, sphere=None, + border=_BORDER_DEFAULT, nrows=1, ncols='auto'): + return plot_evoked_topomap( + self, times=times, ch_type=ch_type, vmin=vmin, + vmax=vmax, cmap=cmap, sensors=sensors, colorbar=colorbar, + scalings=scalings, units=units, res=res, + size=size, cbar_fmt=cbar_fmt, time_unit=time_unit, + time_format=time_format, proj=proj, show=show, + show_names=show_names, title=title, mask=mask, + mask_params=mask_params, outlines=outlines, contours=contours, + image_interp=image_interp, average=average, + axes=axes, extrapolate=extrapolate, sphere=sphere, border=border, + nrows=nrows, ncols=ncols) + + @copy_function_doc_to_method_doc(plot_evoked_field) + def plot_field(self, surf_maps, time=None, time_label='t = %0.0f ms', + n_jobs=1, fig=None, vmax=None, n_contours=21, verbose=None): + return plot_evoked_field(self, surf_maps, time=time, + time_label=time_label, n_jobs=n_jobs, + fig=fig, vmax=vmax, n_contours=n_contours, + verbose=verbose) + + @copy_function_doc_to_method_doc(plot_evoked_white) + def plot_white(self, noise_cov, show=True, rank=None, time_unit='s', + sphere=None, axes=None, verbose=None): + return plot_evoked_white( + self, noise_cov=noise_cov, rank=rank, show=show, + time_unit=time_unit, sphere=sphere, axes=axes, verbose=verbose) + + @copy_function_doc_to_method_doc(plot_evoked_joint) + def plot_joint(self, times="peaks", title='', picks=None, + exclude='bads', show=True, ts_args=None, + topomap_args=None): + return plot_evoked_joint(self, times=times, title=title, picks=picks, + exclude=exclude, show=show, ts_args=ts_args, + topomap_args=topomap_args) + + @fill_doc + def animate_topomap(self, ch_type=None, times=None, frame_rate=None, + butterfly=False, blit=True, show=True, time_unit='s', + sphere=None, *, extrapolate=_EXTRAPOLATE_DEFAULT, + verbose=None): + """Make animation of evoked data as topomap timeseries. + + The animation can be paused/resumed with left mouse button. + Left and right arrow keys can be used to move backward or forward + in time. + + Parameters + ---------- + ch_type : str | None + Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg', + 'hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', and 'fnirs_od'. + If None, first available channel type from the above list is used. + Defaults to None. + times : array of float | None + The time points to plot. If None, 10 evenly spaced samples are + calculated over the evoked time series. Defaults to None. + frame_rate : int | None + Frame rate for the animation in Hz. If None, + frame rate = sfreq / 10. Defaults to None. + butterfly : bool + Whether to plot the data as butterfly plot under the topomap. + Defaults to False. + blit : bool + Whether to use blit to optimize drawing. In general, it is + recommended to use blit in combination with ``show=True``. If you + intend to save the animation it is better to disable blit. + Defaults to True. + show : bool + Whether to show the animation. Defaults to True. + time_unit : str + The units for the time axis, can be "ms" (default in 0.16) + or "s" (will become the default in 0.17). + + .. versionadded:: 0.16 + %(sphere_topomap_auto)s + %(extrapolate_topomap)s + + .. versionadded:: 0.22 + %(verbose)s + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The figure. + anim : instance of matplotlib.animation.FuncAnimation + Animation of the topomap. + + Notes + ----- + .. versionadded:: 0.12.0 + """ + return _topomap_animation( + self, ch_type=ch_type, times=times, frame_rate=frame_rate, + butterfly=butterfly, blit=blit, show=show, time_unit=time_unit, + sphere=sphere, extrapolate=extrapolate, verbose=verbose) + + def as_type(self, ch_type='grad', mode='fast'): + """Compute virtual evoked using interpolated fields. + + .. Warning:: Using virtual evoked to compute inverse can yield + unexpected results. The virtual channels have ``'_v'`` appended + at the end of the names to emphasize that the data contained in + them are interpolated. + + Parameters + ---------- + ch_type : str + The destination channel type. It can be 'mag' or 'grad'. + mode : str + Either ``'accurate'`` or ``'fast'``, determines the quality of the + Legendre polynomial expansion used. ``'fast'`` should be sufficient + for most applications. + + Returns + ------- + evoked : instance of mne.Evoked + The transformed evoked object containing only virtual channels. + + Notes + ----- + This method returns a copy and does not modify the data it + operates on. It also returns an EvokedArray instance. + + .. versionadded:: 0.9.0 + """ + from .forward import _as_meg_type_inst + return _as_meg_type_inst(self, ch_type=ch_type, mode=mode) + + @fill_doc + def detrend(self, order=1, picks=None): + """Detrend data. + + This function operates in-place. + + Parameters + ---------- + order : int + Either 0 or 1, the order of the detrending. 0 is a constant + (DC) detrend, 1 is a linear detrend. + %(picks_good_data)s + + Returns + ------- + evoked : instance of Evoked + The detrended evoked object. + """ + picks = _picks_to_idx(self.info, picks) + self.data[picks] = detrend(self.data[picks], order, axis=-1) + return self + + def copy(self): + """Copy the instance of evoked. + + Returns + ------- + evoked : instance of Evoked + A copy of the object. + """ + evoked = deepcopy(self) + return evoked + + def __neg__(self): + """Negate channel responses. + + Returns + ------- + evoked_neg : instance of Evoked + The Evoked instance with channel data negated and '-' + prepended to the comment. + """ + out = self.copy() + out.data *= -1 + + if out.comment is not None and ' + ' in out.comment: + out.comment = f'({out.comment})' # multiple conditions in evoked + out.comment = f'- {out.comment or "unknown"}' + return out + + def get_peak(self, ch_type=None, tmin=None, tmax=None, + mode='abs', time_as_index=False, merge_grads=False, + return_amplitude=False): + """Get location and latency of peak amplitude. + + Parameters + ---------- + ch_type : str | None + The channel type to use. Defaults to None. If more than one sensor + Type is present in the data the channel type has to be explicitly + set. + tmin : float | None + The minimum point in time to be considered for peak getting. + If None (default), the beginning of the data is used. + tmax : float | None + The maximum point in time to be considered for peak getting. + If None (default), the end of the data is used. + mode : {'pos', 'neg', 'abs'} + How to deal with the sign of the data. If 'pos' only positive + values will be considered. If 'neg' only negative values will + be considered. If 'abs' absolute values will be considered. + Defaults to 'abs'. + time_as_index : bool + Whether to return the time index instead of the latency in seconds. + merge_grads : bool + If True, compute peak from merged gradiometer data. + return_amplitude : bool + If True, return also the amplitude at the maximum response. + + .. versionadded:: 0.16 + + Returns + ------- + ch_name : str + The channel exhibiting the maximum response. + latency : float | int + The time point of the maximum response, either latency in seconds + or index. + amplitude : float + The amplitude of the maximum response. Only returned if + return_amplitude is True. + + .. versionadded:: 0.16 + """ # noqa: E501 + supported = ('mag', 'grad', 'eeg', 'seeg', 'dbs', 'ecog', 'misc', + 'None') + _FNIRS_CH_TYPES_SPLIT + types_used = self.get_channel_types(unique=True, only_data_chs=True) + + _check_option('ch_type', str(ch_type), supported) + + if ch_type is not None and ch_type not in types_used: + raise ValueError('Channel type `{ch_type}` not found in this ' + 'evoked object.'.format(ch_type=ch_type)) + + elif len(types_used) > 1 and ch_type is None: + raise RuntimeError('More than one sensor type found. `ch_type` ' + 'must not be `None`, pass a sensor type ' + 'value instead') + + if merge_grads: + if ch_type != 'grad': + raise ValueError('Channel type must be grad for merge_grads') + elif mode == 'neg': + raise ValueError('Negative mode (mode=neg) does not make ' + 'sense with merge_grads=True') + + meg = eeg = misc = seeg = dbs = ecog = fnirs = False + picks = None + if ch_type in ('mag', 'grad'): + meg = ch_type + elif ch_type == 'eeg': + eeg = True + elif ch_type == 'misc': + misc = True + elif ch_type == 'seeg': + seeg = True + elif ch_type == 'dbs': + dbs = True + elif ch_type == 'ecog': + ecog = True + elif ch_type in _FNIRS_CH_TYPES_SPLIT: + fnirs = ch_type + + if ch_type is not None: + if merge_grads: + picks = _pair_grad_sensors(self.info, topomap_coords=False) + else: + picks = pick_types(self.info, meg=meg, eeg=eeg, misc=misc, + seeg=seeg, ecog=ecog, ref_meg=False, + fnirs=fnirs, dbs=dbs) + data = self.data + ch_names = self.ch_names + + if picks is not None: + data = data[picks] + ch_names = [ch_names[k] for k in picks] + + if merge_grads: + data, _ = _merge_ch_data(data, ch_type, []) + ch_names = [ch_name[:-1] + 'X' for ch_name in ch_names[::2]] + + ch_idx, time_idx, max_amp = _get_peak(data, self.times, tmin, + tmax, mode) + + out = (ch_names[ch_idx], time_idx if time_as_index else + self.times[time_idx]) + + if return_amplitude: + out += (max_amp,) + + return out + + @verbose + def to_data_frame(self, picks=None, index=None, + scalings=None, copy=True, long_format=False, + time_format='ms', *, verbose=None): + """Export data in tabular structure as a pandas DataFrame. + + Channels are converted to columns in the DataFrame. By default, + an additional column "time" is added, unless ``index='time'`` + (in which case time values form the DataFrame's index). + + Parameters + ---------- + %(picks_all)s + %(index_df_evk)s + Defaults to ``None``. + %(scalings_df)s + %(copy_df)s + %(long_format_df_raw)s + %(time_format_df)s + + .. versionadded:: 0.20 + %(verbose)s + + Returns + ------- + %(df_return)s + """ + # check pandas once here, instead of in each private utils function + pd = _check_pandas_installed() # noqa + # arg checking + valid_index_args = ['time'] + valid_time_formats = ['ms', 'timedelta'] + index = _check_pandas_index_arguments(index, valid_index_args) + time_format = _check_time_format(time_format, valid_time_formats) + # get data + picks = _picks_to_idx(self.info, picks, 'all', exclude=()) + data = self.data[picks, :] + times = self.times + data = data.T + if copy: + data = data.copy() + data = _scale_dataframe_data(self, data, picks, scalings) + # prepare extra columns / multiindex + mindex = list() + times = _convert_times(self, times, time_format) + mindex.append(('time', times)) + # build DataFrame + df = _build_data_frame(self, data, picks, long_format, mindex, index, + default_index=['time']) + return df + + +def _check_decim(info, decim, offset): + """Check decimation parameters.""" + if decim < 1 or decim != int(decim): + raise ValueError('decim must be an integer > 0') + decim = int(decim) + new_sfreq = info['sfreq'] / float(decim) + lowpass = info['lowpass'] + if decim > 1 and lowpass is None: + warn('The measurement information indicates data is not low-pass ' + 'filtered. The decim=%i parameter will result in a sampling ' + 'frequency of %g Hz, which can cause aliasing artifacts.' + % (decim, new_sfreq)) + elif decim > 1 and new_sfreq < 3 * lowpass: + warn('The measurement information indicates a low-pass frequency ' + 'of %g Hz. The decim=%i parameter will result in a sampling ' + 'frequency of %g Hz, which can cause aliasing artifacts.' + % (lowpass, decim, new_sfreq)) # > 50% nyquist lim + offset = int(offset) + if not 0 <= offset < decim: + raise ValueError('decim must be at least 0 and less than %s, got ' + '%s' % (decim, offset)) + return decim, offset, new_sfreq + + +@fill_doc +class EvokedArray(Evoked): + """Evoked object from numpy array. + + Parameters + ---------- + data : array of shape (n_channels, n_times) + The channels' evoked response. See notes for proper units of measure. + %(info_not_none)s Consider using :func:`mne.create_info` to populate this + structure. + tmin : float + Start time before event. Defaults to 0. + comment : str + Comment on dataset. Can be the condition. Defaults to ''. + nave : int + Number of averaged epochs. Defaults to 1. + kind : str + Type of data, either average or standard_error. Defaults to 'average'. + %(baseline_evoked)s + Defaults to ``None``, i.e. no baseline correction. + + .. versionadded:: 0.23 + %(verbose)s + + See Also + -------- + EpochsArray, io.RawArray, create_info + + Notes + ----- + Proper units of measure: + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog + * T: mag + * T/m: grad + * M: hbo, hbr + * Am: dipole + * AU: misc + """ + + @verbose + def __init__(self, data, info, tmin=0., comment='', nave=1, kind='average', + baseline=None, *, verbose=None): # noqa: D102 + dtype = np.complex128 if np.iscomplexobj(data) else np.float64 + data = np.asanyarray(data, dtype=dtype) + + if data.ndim != 2: + raise ValueError('Data must be a 2D array of shape (n_channels, ' + 'n_samples), got shape %s' % (data.shape,)) + + if len(info['ch_names']) != np.shape(data)[0]: + raise ValueError('Info (%s) and data (%s) must have same number ' + 'of channels.' % (len(info['ch_names']), + np.shape(data)[0])) + + self.data = data + + self.first = int(round(tmin * info['sfreq'])) + self.last = self.first + np.shape(data)[-1] - 1 + self.times = np.arange(self.first, self.last + 1, + dtype=np.float64) / info['sfreq'] + self.info = info.copy() # do not modify original info + self.nave = nave + self.kind = kind + self.comment = comment + self.picks = None + self.preload = True + self._projector = None + _validate_type(self.kind, "str", "kind") + if self.kind not in _aspect_dict: + raise ValueError('unknown kind "%s", should be "average" or ' + '"standard_error"' % (self.kind,)) + self._aspect_kind = _aspect_dict[self.kind] + + self.baseline = baseline + if self.baseline is not None: # omit log msg if not baselining + self.apply_baseline(self.baseline) + + +def _get_entries(fid, evoked_node, allow_maxshield=False): + """Get all evoked entries.""" + comments = list() + aspect_kinds = list() + for ev in evoked_node: + for k in range(ev['nent']): + my_kind = ev['directory'][k].kind + pos = ev['directory'][k].pos + if my_kind == FIFF.FIFF_COMMENT: + tag = read_tag(fid, pos) + comments.append(tag.data) + my_aspect = _get_aspect(ev, allow_maxshield)[0] + for k in range(my_aspect['nent']): + my_kind = my_aspect['directory'][k].kind + pos = my_aspect['directory'][k].pos + if my_kind == FIFF.FIFF_ASPECT_KIND: + tag = read_tag(fid, pos) + aspect_kinds.append(int(tag.data)) + comments = np.atleast_1d(comments) + aspect_kinds = np.atleast_1d(aspect_kinds) + if len(comments) != len(aspect_kinds) or len(comments) == 0: + fid.close() + raise ValueError('Dataset names in FIF file ' + 'could not be found.') + t = [_aspect_rev[a] for a in aspect_kinds] + t = ['"' + c + '" (' + tt + ')' for tt, c in zip(t, comments)] + t = '\n'.join(t) + return comments, aspect_kinds, t + + +def _get_aspect(evoked, allow_maxshield): + """Get Evoked data aspect.""" + is_maxshield = False + aspect = dir_tree_find(evoked, FIFF.FIFFB_ASPECT) + if len(aspect) == 0: + _check_maxshield(allow_maxshield) + aspect = dir_tree_find(evoked, FIFF.FIFFB_IAS_ASPECT) + is_maxshield = True + if len(aspect) > 1: + logger.info('Multiple data aspects found. Taking first one.') + return aspect[0], is_maxshield + + +def _get_evoked_node(fname): + """Get info in evoked file.""" + f, tree, _ = fiff_open(fname) + with f as fid: + _, meas = read_meas_info(fid, tree, verbose=False) + evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED) + return evoked_node + + +def _check_evokeds_ch_names_times(all_evoked): + evoked = all_evoked[0] + ch_names = evoked.ch_names + for ii, ev in enumerate(all_evoked[1:]): + if ev.ch_names != ch_names: + if set(ev.ch_names) != set(ch_names): + raise ValueError( + "%s and %s do not contain the same channels." % (evoked, + ev)) + else: + warn("Order of channels differs, reordering channels ...") + ev = ev.copy() + ev.reorder_channels(ch_names) + all_evoked[ii + 1] = ev + if not np.max(np.abs(ev.times - evoked.times)) < 1e-7: + raise ValueError("%s and %s do not contain the same time instants" + % (evoked, ev)) + return all_evoked + + +def combine_evoked(all_evoked, weights): + """Merge evoked data by weighted addition or subtraction. + + Each `~mne.Evoked` in ``all_evoked`` should have the same channels and the + same time instants. Subtraction can be performed by passing + ``weights=[1, -1]``. + + .. Warning:: + Other than cases like simple subtraction mentioned above (where all + weights are -1 or 1), if you provide numeric weights instead of using + ``'equal'`` or ``'nave'``, the resulting `~mne.Evoked` object's + ``.nave`` attribute (which is used to scale noise covariance when + applying the inverse operator) may not be suitable for inverse imaging. + + Parameters + ---------- + all_evoked : list of Evoked + The evoked datasets. + weights : list of float | 'equal' | 'nave' + The weights to apply to the data of each evoked instance, or a string + describing the weighting strategy to apply: ``'nave'`` computes + sum-to-one weights proportional to each object's ``nave`` attribute; + ``'equal'`` weights each `~mne.Evoked` by ``1 / len(all_evoked)``. + + Returns + ------- + evoked : Evoked + The new evoked data. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + naves = np.array([evk.nave for evk in all_evoked], float) + if isinstance(weights, str): + _check_option('weights', weights, ['nave', 'equal']) + if weights == 'nave': + weights = naves / naves.sum() + else: + weights = np.ones_like(naves) / len(naves) + else: + weights = np.array(weights, float) + + if weights.ndim != 1 or weights.size != len(all_evoked): + raise ValueError('weights must be the same size as all_evoked') + + # cf. https://en.wikipedia.org/wiki/Weighted_arithmetic_mean, section on + # "weighted sample variance". The variance of a weighted sample mean is: + # + # σ² = w₁² σ₁² + w₂² σ₂² + ... + wₙ² σₙ² + # + # We estimate the variance of each evoked instance as 1 / nave to get: + # + # σ² = w₁² / nave₁ + w₂² / nave₂ + ... + wₙ² / naveₙ + # + # And our resulting nave is the reciprocal of this: + new_nave = 1. / np.sum(weights ** 2 / naves) + # This general formula is equivalent to formulae in Matti's manual + # (pp 128-129), where: + # new_nave = sum(naves) when weights='nave' and + # new_nave = 1. / sum(1. / naves) when weights are all 1. + + all_evoked = _check_evokeds_ch_names_times(all_evoked) + evoked = all_evoked[0].copy() + + # use union of bad channels + bads = list(set(b for e in all_evoked for b in e.info['bads'])) + evoked.info['bads'] = bads + evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked)) + evoked.nave = new_nave + + comment = '' + for idx, (w, e) in enumerate(zip(weights, all_evoked)): + # pick sign + sign = '' if w >= 0 else '-' + # format weight + weight = '' if np.isclose(abs(w), 1.) else f'{abs(w):0.3f}' + # format multiplier + multiplier = ' × ' if weight else '' + # format comment + if e.comment is not None and ' + ' in e.comment: # multiple conditions + this_comment = f'({e.comment})' + else: + this_comment = f'{e.comment or "unknown"}' + # assemble everything + if idx == 0: + comment += f'{sign}{weight}{multiplier}{this_comment}' + else: + comment += f' {sign or "+"} {weight}{multiplier}{this_comment}' + # special-case: combine_evoked([e1, -e2], [1, -1]) + evoked.comment = comment.replace(' - - ', ' + ') + return evoked + + +@verbose +def read_evokeds(fname, condition=None, baseline=None, kind='average', + proj=True, allow_maxshield=False, verbose=None): + """Read evoked dataset(s). + + Parameters + ---------- + fname : str + The file name, which should end with -ave.fif or -ave.fif.gz. + condition : int or str | list of int or str | None + The index or list of indices of the evoked dataset to read. FIF files + can contain multiple datasets. If None, all datasets are returned as a + list. + %(baseline_evoked)s + If ``None`` (default), do not apply baseline correction. + + .. note:: Note that if the read `~mne.Evoked` objects have already + been baseline-corrected, the data retrieved from disk will + **always** be baseline-corrected (in fact, only the + baseline-corrected version of the data will be saved, so + there is no way to undo this procedure). Only **after** the + data has been loaded, a custom (additional) baseline + correction **may** be optionally applied by passing a tuple + here. Passing ``None`` will **not** remove an existing + baseline correction, but merely omit the optional, additional + baseline correction. + kind : str + Either 'average' or 'standard_error', the type of data to read. + proj : bool + If False, available projectors won't be applied to the data. + allow_maxshield : bool | str (default False) + If True, allow loading of data that has been recorded with internal + active compensation (MaxShield). Data recorded with MaxShield should + generally not be loaded directly, but should first be processed using + SSS/tSSS to remove the compensation signals that may also affect brain + activity. Can also be "yes" to load without eliciting a warning. + %(verbose)s + + Returns + ------- + evoked : Evoked or list of Evoked + The evoked dataset(s); one `~mne.Evoked` if ``condition`` is an + integer or string; or a list of `~mne.Evoked` if ``condition`` is + ``None`` or a list. + + See Also + -------- + write_evokeds + + Notes + ----- + .. versionchanged:: 0.23 + If the read `~mne.Evoked` objects had been baseline-corrected before + saving, this will be reflected in their ``baseline`` attribute after + reading. + """ + check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz', + '_ave.fif', '_ave.fif.gz')) + logger.info('Reading %s ...' % fname) + return_list = True + if condition is None: + evoked_node = _get_evoked_node(fname) + condition = range(len(evoked_node)) + elif not isinstance(condition, list): + condition = [condition] + return_list = False + + out = [] + for c in condition: + evoked = Evoked(fname, c, kind=kind, proj=proj, + allow_maxshield=allow_maxshield, + verbose=verbose) + if baseline is None and evoked.baseline is None: + logger.info(_log_rescale(None)) + elif baseline is None and evoked.baseline is not None: + # Don't touch an existing baseline + bmin, bmax = evoked.baseline + logger.info(f'Loaded Evoked data is baseline-corrected ' + f'(baseline: [{bmin:g}, {bmax:g}] sec)') + else: + evoked.apply_baseline(baseline) + out.append(evoked) + + return out if return_list else out[0] + + +def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): + """Read evoked data from a FIF file.""" + if fname is None: + raise ValueError('No evoked filename specified') + + f, tree, _ = fiff_open(fname) + with f as fid: + # Read the measurement info + info, meas = read_meas_info(fid, tree, clean_bads=True) + + # Locate the data of interest + processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA) + if len(processed) == 0: + raise ValueError('Could not find processed data') + + evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED) + if len(evoked_node) == 0: + raise ValueError('Could not find evoked data') + + # find string-based entry + if isinstance(condition, str): + if kind not in _aspect_dict.keys(): + raise ValueError('kind must be "average" or ' + '"standard_error"') + + comments, aspect_kinds, t = _get_entries(fid, evoked_node, + allow_maxshield) + goods = (np.in1d(comments, [condition]) & + np.in1d(aspect_kinds, [_aspect_dict[kind]])) + found_cond = np.where(goods)[0] + if len(found_cond) != 1: + raise ValueError('condition "%s" (%s) not found, out of ' + 'found datasets:\n%s' + % (condition, kind, t)) + condition = found_cond[0] + elif condition is None: + if len(evoked_node) > 1: + _, _, conditions = _get_entries(fid, evoked_node, + allow_maxshield) + raise TypeError("Evoked file has more than one " + "condition, the condition parameters " + "must be specified from:\n%s" % conditions) + else: + condition = 0 + + if condition >= len(evoked_node) or condition < 0: + raise ValueError('Data set selector out of range') + + my_evoked = evoked_node[condition] + + # Identify the aspects + with info._unlock(): + my_aspect, info['maxshield'] = _get_aspect(my_evoked, + allow_maxshield) + + # Now find the data in the evoked block + nchan = 0 + sfreq = -1 + chs = [] + baseline = bmin = bmax = None + comment = last = first = first_time = nsamp = None + for k in range(my_evoked['nent']): + my_kind = my_evoked['directory'][k].kind + pos = my_evoked['directory'][k].pos + if my_kind == FIFF.FIFF_COMMENT: + tag = read_tag(fid, pos) + comment = tag.data + elif my_kind == FIFF.FIFF_FIRST_SAMPLE: + tag = read_tag(fid, pos) + first = int(tag.data) + elif my_kind == FIFF.FIFF_LAST_SAMPLE: + tag = read_tag(fid, pos) + last = int(tag.data) + elif my_kind == FIFF.FIFF_NCHAN: + tag = read_tag(fid, pos) + nchan = int(tag.data) + elif my_kind == FIFF.FIFF_SFREQ: + tag = read_tag(fid, pos) + sfreq = float(tag.data) + elif my_kind == FIFF.FIFF_CH_INFO: + tag = read_tag(fid, pos) + chs.append(tag.data) + elif my_kind == FIFF.FIFF_FIRST_TIME: + tag = read_tag(fid, pos) + first_time = float(tag.data) + elif my_kind == FIFF.FIFF_NO_SAMPLES: + tag = read_tag(fid, pos) + nsamp = int(tag.data) + elif my_kind == FIFF.FIFF_MNE_BASELINE_MIN: + tag = read_tag(fid, pos) + bmin = float(tag.data) + elif my_kind == FIFF.FIFF_MNE_BASELINE_MAX: + tag = read_tag(fid, pos) + bmax = float(tag.data) + + if comment is None: + comment = 'No comment' + + if bmin is not None or bmax is not None: + # None's should've been replaced with floats + assert bmin is not None and bmax is not None + baseline = (bmin, bmax) + + # Local channel information? + if nchan > 0: + if chs is None: + raise ValueError('Local channel information was not found ' + 'when it was expected.') + + if len(chs) != nchan: + raise ValueError('Number of channels and number of ' + 'channel definitions are different') + + ch_names_mapping = _read_extended_ch_info(chs, my_evoked, fid) + info['chs'] = chs + info['bads'][:] = _rename_list(info['bads'], ch_names_mapping) + logger.info(' Found channel information in evoked data. ' + 'nchan = %d' % nchan) + if sfreq > 0: + info['sfreq'] = sfreq + + # Read the data in the aspect block + nave = 1 + epoch = [] + for k in range(my_aspect['nent']): + kind = my_aspect['directory'][k].kind + pos = my_aspect['directory'][k].pos + if kind == FIFF.FIFF_COMMENT: + tag = read_tag(fid, pos) + comment = tag.data + elif kind == FIFF.FIFF_ASPECT_KIND: + tag = read_tag(fid, pos) + aspect_kind = int(tag.data) + elif kind == FIFF.FIFF_NAVE: + tag = read_tag(fid, pos) + nave = int(tag.data) + elif kind == FIFF.FIFF_EPOCH: + tag = read_tag(fid, pos) + epoch.append(tag) + + nepoch = len(epoch) + if nepoch != 1 and nepoch != info['nchan']: + raise ValueError('Number of epoch tags is unreasonable ' + '(nepoch = %d nchan = %d)' + % (nepoch, info['nchan'])) + + if nepoch == 1: + # Only one epoch + data = epoch[0].data + # May need a transpose if the number of channels is one + if data.shape[1] == 1 and info['nchan'] == 1: + data = data.T + else: + # Put the old style epochs together + data = np.concatenate([e.data[None, :] for e in epoch], axis=0) + if np.isrealobj(data): + data = data.astype(np.float64) + else: + data = data.astype(np.complex128) + + if first_time is not None and nsamp is not None: + times = first_time + np.arange(nsamp) / info['sfreq'] + elif first is not None: + nsamp = last - first + 1 + times = np.arange(first, last + 1) / info['sfreq'] + else: + raise RuntimeError('Could not read time parameters') + del first, last + if nsamp is not None and data.shape[1] != nsamp: + raise ValueError('Incorrect number of samples (%d instead of ' + ' %d)' % (data.shape[1], nsamp)) + logger.info(' Found the data of interest:') + logger.info(' t = %10.2f ... %10.2f ms (%s)' + % (1000 * times[0], 1000 * times[-1], comment)) + if info['comps'] is not None: + logger.info(' %d CTF compensation matrices available' + % len(info['comps'])) + logger.info(' nave = %d - aspect type = %d' + % (nave, aspect_kind)) + + # Calibrate + cals = np.array([info['chs'][k]['cal'] * + info['chs'][k].get('scale', 1.0) + for k in range(info['nchan'])]) + data *= cals[:, np.newaxis] + + return info, nave, aspect_kind, comment, times, data, baseline + + +@verbose +def write_evokeds(fname, evoked, *, on_mismatch='raise', overwrite=False, + verbose=None): + """Write an evoked dataset to a file. + + Parameters + ---------- + fname : str + The file name, which should end with -ave.fif or -ave.fif.gz. + evoked : Evoked instance, or list of Evoked instances + The evoked dataset, or list of evoked datasets, to save in one file. + Note that the measurement info from the first evoked instance is used, + so be sure that information matches. + %(on_mismatch_info)s + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + + .. versionadded:: 0.24 + + See Also + -------- + read_evokeds + + Notes + ----- + .. versionchanged:: 0.23 + Information on baseline correction will be stored with each individual + `~mne.Evoked` object, and will be restored when reading the data again + via `mne.read_evokeds`. + """ + _write_evokeds(fname, evoked, on_mismatch=on_mismatch, overwrite=overwrite) + + +def _write_evokeds(fname, evoked, check=True, *, on_mismatch='raise', + overwrite=False): + """Write evoked data.""" + from .dipole import DipoleFixed # avoid circular import + + fname = _check_fname(fname=fname, overwrite=overwrite) + if check: + check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz', + '_ave.fif', '_ave.fif.gz')) + + if not isinstance(evoked, (list, tuple)): + evoked = [evoked] + + warned = False + # Create the file and save the essentials + with start_and_end_file(fname) as fid: + + start_block(fid, FIFF.FIFFB_MEAS) + write_id(fid, FIFF.FIFF_BLOCK_ID) + if evoked[0].info['meas_id'] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info['meas_id']) + + # Write measurement info + write_meas_info(fid, evoked[0].info) + + # One or more evoked data sets + start_block(fid, FIFF.FIFFB_PROCESSED_DATA) + for ei, e in enumerate(evoked): + if ei: + _ensure_infos_match(info1=evoked[0].info, info2=e.info, + name=f'evoked[{ei}]', + on_mismatch=on_mismatch) + start_block(fid, FIFF.FIFFB_EVOKED) + + # Comment is optional + if e.comment is not None and len(e.comment) > 0: + write_string(fid, FIFF.FIFF_COMMENT, e.comment) + + # First time, num. samples, first and last sample + write_float(fid, FIFF.FIFF_FIRST_TIME, e.times[0]) + write_int(fid, FIFF.FIFF_NO_SAMPLES, len(e.times)) + write_int(fid, FIFF.FIFF_FIRST_SAMPLE, e.first) + write_int(fid, FIFF.FIFF_LAST_SAMPLE, e.last) + + # Baseline + if not isinstance(e, DipoleFixed) and e.baseline is not None: + bmin, bmax = e.baseline + write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin) + write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax) + + # The evoked data itself + if e.info.get('maxshield'): + aspect = FIFF.FIFFB_IAS_ASPECT + else: + aspect = FIFF.FIFFB_ASPECT + start_block(fid, aspect) + + write_int(fid, FIFF.FIFF_ASPECT_KIND, e._aspect_kind) + # convert nave to integer to comply with FIFF spec + nave_int = int(round(e.nave)) + if nave_int != e.nave and not warned: + warn('converting "nave" to integer before saving evoked; this ' + 'can have a minor effect on the scale of source ' + 'estimates that are computed using "nave".') + warned = True + write_int(fid, FIFF.FIFF_NAVE, nave_int) + del nave_int + + decal = np.zeros((e.info['nchan'], 1)) + for k in range(e.info['nchan']): + decal[k] = 1.0 / (e.info['chs'][k]['cal'] * + e.info['chs'][k].get('scale', 1.0)) + + if np.iscomplexobj(e.data): + write_function = write_complex_float_matrix + else: + write_function = write_float_matrix + + write_function(fid, FIFF.FIFF_EPOCH, decal * e.data) + end_block(fid, aspect) + end_block(fid, FIFF.FIFFB_EVOKED) + + end_block(fid, FIFF.FIFFB_PROCESSED_DATA) + end_block(fid, FIFF.FIFFB_MEAS) + + +def _get_peak(data, times, tmin=None, tmax=None, mode='abs'): + """Get feature-index and time of maximum signal from 2D array. + + Note. This is a 'getter', not a 'finder'. For non-evoked type + data and continuous signals, please use proper peak detection algorithms. + + Parameters + ---------- + data : instance of numpy.ndarray (n_locations, n_times) + The data, either evoked in sensor or source space. + times : instance of numpy.ndarray (n_times) + The times in seconds. + tmin : float | None + The minimum point in time to be considered for peak getting. + tmax : float | None + The maximum point in time to be considered for peak getting. + mode : {'pos', 'neg', 'abs'} + How to deal with the sign of the data. If 'pos' only positive + values will be considered. If 'neg' only negative values will + be considered. If 'abs' absolute values will be considered. + Defaults to 'abs'. + + Returns + ------- + max_loc : int + The index of the feature with the maximum value. + max_time : int + The time point of the maximum response, index. + max_amp : float + Amplitude of the maximum response. + """ + _check_option('mode', mode, ['abs', 'neg', 'pos']) + + if tmin is None: + tmin = times[0] + if tmax is None: + tmax = times[-1] + + if tmin < times.min(): + raise ValueError('The tmin value is out of bounds. It must be ' + 'within {} and {}'.format(times.min(), times.max())) + if tmax > times.max(): + raise ValueError('The tmax value is out of bounds. It must be ' + 'within {} and {}'.format(times.min(), times.max())) + if tmin > tmax: + raise ValueError('The tmin must be smaller or equal to tmax') + + time_win = (times >= tmin) & (times <= tmax) + mask = np.ones_like(data).astype(bool) + mask[:, time_win] = False + + maxfun = np.argmax + if mode == 'pos': + if not np.any(data > 0): + raise ValueError('No positive values encountered. Cannot ' + 'operate in pos mode.') + elif mode == 'neg': + if not np.any(data < 0): + raise ValueError('No negative values encountered. Cannot ' + 'operate in neg mode.') + maxfun = np.argmin + + masked_index = np.ma.array(np.abs(data) if mode == 'abs' else data, + mask=mask) + + max_loc, max_time = np.unravel_index(maxfun(masked_index), data.shape) + + return max_loc, max_time, data[max_loc, max_time] diff --git a/python/libs/mne/export/__init__.py b/python/libs/mne/export/__init__.py new file mode 100644 index 0000000..9d7abae --- /dev/null +++ b/python/libs/mne/export/__init__.py @@ -0,0 +1,2 @@ +from ._export import export_raw, export_epochs, export_evokeds +from ._egimff import export_evokeds_mff diff --git a/python/libs/mne/export/_edf.py b/python/libs/mne/export/_edf.py new file mode 100644 index 0000000..b2ed4f3 --- /dev/null +++ b/python/libs/mne/export/_edf.py @@ -0,0 +1,281 @@ +# -*- coding: utf-8 -*- +# Authors: MNE Developers +# +# License: BSD-3-Clause + +from contextlib import contextmanager +import numpy as np + +from ..utils import _check_edflib_installed, warn +_check_edflib_installed() +from EDFlib.edfwriter import EDFwriter # noqa: E402 + + +def _try_to_set_value(header, key, value, channel_index=None): + """Set key/value pairs in EDF header.""" + # all EDFLib set functions are set + # for example "setPatientName()" + func_name = f'set{key}' + func = getattr(header, func_name) + + # some setter functions are indexed by channels + if channel_index is None: + return_val = func(value) + else: + return_val = func(channel_index, value) + + # a nonzero return value indicates an error + if return_val != 0: + raise RuntimeError(f"Setting {key} with {value} " + f"returned an error value " + f"{return_val}.") + + +@contextmanager +def _auto_close(fid): + # try to close the handle no matter what + try: + yield fid + finally: + try: + fid.close() + except Exception: + pass # we did our best + + +def _export_raw(fname, raw, physical_range, add_ch_type): + """Export Raw objects to EDF files. + + TODO: if in future the Info object supports transducer or + technician information, allow writing those here. + """ + # scale to save data in EDF + phys_dims = 'uV' + + # get EEG-related data in uV + units = dict(eeg='uV', ecog='uV', seeg='uV', eog='uV', ecg='uV', emg='uV', + bio='uV', dbs='uV') + + digital_min = -32767 + digital_max = 32767 + file_type = EDFwriter.EDFLIB_FILETYPE_EDFPLUS + + # load data first + raw.load_data() + + # remove extra STI channels + orig_ch_types = raw.get_channel_types() + drop_chs = [] + if 'stim' in orig_ch_types: + stim_index = np.argwhere(np.array(orig_ch_types) == 'stim') + stim_index = np.atleast_1d(stim_index.squeeze()).tolist() + drop_chs.extend([raw.ch_names[idx] for idx in stim_index]) + + # Add warning if any channel types are not voltage based. + # Users are expected to only export data that is voltage based, + # such as EEG, ECoG, sEEG, etc. + # Non-voltage channels are dropped by the export function. + # Note: we can write these other channels, such as 'misc' + # but these are simply a "catch all" for unknown or undesired + # channels. + voltage_types = list(units) + ['stim', 'misc'] + non_voltage_ch = [ch not in voltage_types for ch in orig_ch_types] + if any(non_voltage_ch): + warn(f"Non-voltage channels detected: {non_voltage_ch}. MNE-Python's " + 'EDF exporter only supports voltage-based channels, because the ' + 'EDF format cannot accommodate much of the accompanying data ' + 'necessary for channel types like MEG and fNIRS (channel ' + 'orientations, coordinate frame transforms, etc). You can ' + 'override this restriction by setting those channel types to ' + '"misc" but no guarantees are made of the fidelity of that ' + 'approach.') + + ch_names = [ch for ch in raw.ch_names if ch not in drop_chs] + ch_types = np.array(raw.get_channel_types(picks=ch_names)) + n_channels = len(ch_names) + n_times = raw.n_times + + # Sampling frequency in EDF only supports integers, so to allow for + # float sampling rates from Raw, we adjust the output sampling rate + # for all channels and the data record duration. + sfreq = raw.info['sfreq'] + if float(sfreq).is_integer(): + out_sfreq = int(sfreq) + data_record_duration = None + else: + out_sfreq = np.floor(sfreq).astype(int) + data_record_duration = int(np.around( + out_sfreq / sfreq, decimals=6) * 1e6) + + warn(f'Data has a non-integer sampling rate of {sfreq}; writing to ' + 'EDF format may cause a small change to sample times.') + + # get any filter information applied to the data + lowpass = raw.info['lowpass'] + highpass = raw.info['highpass'] + linefreq = raw.info['line_freq'] + filter_str_info = f"HP:{highpass}Hz LP:{lowpass}Hz N:{linefreq}Hz" + + # get the entire dataset in uV + data = raw.get_data(units=units, picks=ch_names) + + if physical_range == 'auto': + # get max and min for each channel type data + ch_types_phys_max = dict() + ch_types_phys_min = dict() + + for _type in np.unique(ch_types): + _picks = np.nonzero(ch_types == _type)[0] + _data = raw.get_data(units=units, picks=_picks) + ch_types_phys_max[_type] = _data.max() + ch_types_phys_min[_type] = _data.min() + else: + # get the physical min and max of the data in uV + # Physical ranges of the data in uV is usually set by the manufacturer + # and properties of the electrode. In general, physical max and min + # should be the clipping levels of the ADC input and they should be + # the same for all channels. For example, Nihon Kohden uses +3200 uV + # and -3200 uV for all EEG channels (which are the actual clipping + # levels of their input amplifiers & ADC). + # For full discussion, see: https://github.com/sccn/eeglab/issues/246 + pmin, pmax = physical_range[0], physical_range[1] + + # check that physical min and max is not exceeded + if data.max() > pmax: + raise RuntimeError(f'The maximum μV of the data {data.max()} is ' + f'more than the physical max passed in {pmax}.') + if data.min() < pmin: + raise RuntimeError(f'The minimum μV of the data {data.min()} is ' + f'less than the physical min passed in {pmin}.') + + # create instance of EDF Writer + with _auto_close(EDFwriter(fname, file_type, n_channels)) as hdl: + # set channel data + for idx, ch in enumerate(ch_names): + ch_type = ch_types[idx] + signal_label = f'{ch_type.upper()} {ch}' if add_ch_type else ch + if len(signal_label) > 16: + raise RuntimeError(f'Signal label for {ch} ({ch_type}) is ' + f'longer than 16 characters, which is not ' + f'supported in EDF. Please shorten the ' + f'channel name before exporting to EDF.') + + if physical_range == 'auto': + # take the channel type minimum and maximum + pmin = ch_types_phys_min[ch_type] + pmax = ch_types_phys_max[ch_type] + for key, val in [('PhysicalMaximum', pmax), + ('PhysicalMinimum', pmin), + ('DigitalMaximum', digital_max), + ('DigitalMinimum', digital_min), + ('PhysicalDimension', phys_dims), + ('SampleFrequency', out_sfreq), + ('SignalLabel', signal_label), + ('PreFilter', filter_str_info)]: + _try_to_set_value(hdl, key, val, channel_index=idx) + + # set patient info + subj_info = raw.info.get('subject_info') + if subj_info is not None: + birthday = subj_info.get('birthday') + + # get the full name of subject if available + first_name = subj_info.get('first_name') + last_name = subj_info.get('last_name') + first_name = first_name or '' + last_name = last_name or '' + joiner = '' + if len(first_name) and len(last_name): + joiner = ' ' + name = joiner.join([first_name, last_name]) + + hand = subj_info.get('hand') + sex = subj_info.get('sex') + + if birthday is not None: + if hdl.setPatientBirthDate(birthday[0], birthday[1], + birthday[2]) != 0: + raise RuntimeError( + f"Setting patient birth date to {birthday} " + f"returned an error") + for key, val in [('PatientName', name), + ('PatientGender', sex), + ('AdditionalPatientInfo', f'hand={hand}')]: + _try_to_set_value(hdl, key, val) + + # set measurement date + meas_date = raw.info['meas_date'] + if meas_date: + subsecond = int(meas_date.microsecond / 100) + if hdl.setStartDateTime(year=meas_date.year, month=meas_date.month, + day=meas_date.day, hour=meas_date.hour, + minute=meas_date.minute, + second=meas_date.second, + subsecond=subsecond) != 0: + raise RuntimeError(f"Setting start date time {meas_date} " + f"returned an error") + + device_info = raw.info.get('device_info') + if device_info is not None: + device_type = device_info.get('type') + _try_to_set_value(hdl, 'Equipment', device_type) + + # set data record duration + if data_record_duration is not None: + _try_to_set_value(hdl, 'DataRecordDuration', data_record_duration) + + # compute number of data records to loop over + n_blocks = np.ceil(n_times / out_sfreq).astype(int) + + # increase the number of annotation signals if necessary + annots = raw.annotations + if annots is not None: + n_annotations = len(raw.annotations) + n_annot_chans = int(n_annotations / n_blocks) + if np.mod(n_annotations, n_blocks): + n_annot_chans += 1 + if n_annot_chans > 1: + hdl.setNumberOfAnnotationSignals(n_annot_chans) + + # Write each data record sequentially + for idx in range(n_blocks): + end_samp = (idx + 1) * out_sfreq + if end_samp > n_times: + end_samp = n_times + start_samp = idx * out_sfreq + + # then for each datarecord write each channel + for jdx in range(n_channels): + # create a buffer with sampling rate + buf = np.zeros(out_sfreq, np.float64, "C") + + # get channel data for this block + ch_data = data[jdx, start_samp:end_samp] + + # assign channel data to the buffer and write to EDF + buf[:len(ch_data)] = ch_data + err = hdl.writeSamples(buf) + if err != 0: + raise RuntimeError( + f"writeSamples() for channel{ch_names[jdx]} " + f"returned error: {err}") + + # there was an incomplete datarecord + if len(ch_data) != len(buf): + warn(f'EDF format requires equal-length data blocks, ' + f'so {(len(buf) - len(ch_data)) / sfreq} seconds of ' + 'zeros were appended to all channels when writing the ' + 'final block.') + + # write annotations + if annots is not None: + for desc, onset, duration in zip(raw.annotations.description, + raw.annotations.onset, + raw.annotations.duration): + # annotations are written in terms of 100 microseconds + onset = onset * 10000 + duration = duration * 10000 + if hdl.writeAnnotation(onset, duration, desc) != 0: + raise RuntimeError(f'writeAnnotation() returned an error ' + f'trying to write {desc} at {onset} ' + f'for {duration} seconds.') diff --git a/python/libs/mne/export/_eeglab.py b/python/libs/mne/export/_eeglab.py new file mode 100644 index 0000000..09e58bd --- /dev/null +++ b/python/libs/mne/export/_eeglab.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Authors: MNE Developers +# +# License: BSD-3-Clause + +import numpy as np + +from ..utils import _check_eeglabio_installed +_check_eeglabio_installed() +import eeglabio.raw # noqa: E402 +import eeglabio.epochs # noqa: E402 + + +def _export_raw(fname, raw): + # load data first + raw.load_data() + + # remove extra epoc and STI channels + drop_chs = ['epoc'] + if not (raw.filenames[0].endswith('.fif')): + drop_chs.append('STI 014') + + ch_names = [ch for ch in raw.ch_names if ch not in drop_chs] + cart_coords = _get_als_coords_from_chs(raw.info['chs'], drop_chs) + + annotations = [raw.annotations.description, + raw.annotations.onset, + raw.annotations.duration] + eeglabio.raw.export_set( + fname, data=raw.get_data(picks=ch_names), sfreq=raw.info['sfreq'], + ch_names=ch_names, ch_locs=cart_coords, annotations=annotations) + + +def _export_epochs(fname, epochs): + _check_eeglabio_installed() + # load data first + epochs.load_data() + + # remove extra epoc and STI channels + drop_chs = ['epoc', 'STI 014'] + ch_names = [ch for ch in epochs.ch_names if ch not in drop_chs] + cart_coords = _get_als_coords_from_chs(epochs.info['chs'], drop_chs) + + eeglabio.epochs.export_set( + fname, data=epochs.get_data(picks=ch_names), + sfreq=epochs.info['sfreq'], events=epochs.events, + tmin=epochs.tmin, tmax=epochs.tmax, ch_names=ch_names, + event_id=epochs.event_id, ch_locs=cart_coords) + + +def _get_als_coords_from_chs(chs, drop_chs=None): + """Extract channel locations in ALS format (x, y, z) from a chs instance. + + Returns + ------- + None if no valid coordinates are found (all zeros) + """ + if drop_chs is None: + drop_chs = [] + cart_coords = np.array([d['loc'][:3] for d in chs + if d['ch_name'] not in drop_chs]) + if cart_coords.any(): # has coordinates + # (-y x z) to (x y z) + cart_coords[:, 0] = -cart_coords[:, 0] # -y to y + # swap x (1) and y (0) + cart_coords[:, [0, 1]] = cart_coords[:, [1, 0]] + else: + cart_coords = None + return cart_coords diff --git a/python/libs/mne/export/_egimff.py b/python/libs/mne/export/_egimff.py new file mode 100644 index 0000000..7bcaf0e --- /dev/null +++ b/python/libs/mne/export/_egimff.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- +# Authors: MNE Developers +# +# License: BSD-3-Clause + +import os +import shutil +import datetime +import os.path as op + +import numpy as np + +from ..io.egi.egimff import _import_mffpy +from ..io.pick import pick_types, pick_channels +from ..utils import verbose, warn, _check_fname + + +@verbose +def export_evokeds_mff(fname, evoked, history=None, *, overwrite=False, + verbose=None): + """Export evoked dataset to MFF. + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + evoked : list of Evoked instances + List of evoked datasets to export to one file. Note that the + measurement info from the first evoked instance is used, so be sure + that information matches. + history : None (default) | list of dict + Optional list of history entries (dictionaries) to be written to + history.xml. This must adhere to the format described in + mffpy.xml_files.History.content. If None, no history.xml will be + written. + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_evoked)s + + Only EEG channels are written to the output file. + ``info['device_info']['type']`` must be a valid MFF recording device + (e.g. 'HydroCel GSN 256 1.0'). This field is automatically populated when + using MFF read functions. + """ + mffpy = _import_mffpy('Export evokeds to MFF.') + import pytz + info = evoked[0].info + if np.round(info['sfreq']) != info['sfreq']: + raise ValueError('Sampling frequency must be a whole number. ' + f'sfreq: {info["sfreq"]}') + sampling_rate = int(info['sfreq']) + + # check for unapplied projectors + if any(not proj['active'] for proj in evoked[0].info['projs']): + warn('Evoked instance has unapplied projectors. Consider applying ' + 'them before exporting with evoked.apply_proj().') + + # Initialize writer + # Future changes: conditions based on version or mffpy requirement if + # https://github.com/BEL-Public/mffpy/pull/92 is merged and released. + fname = _check_fname(fname, overwrite=overwrite) + if op.exists(fname): + os.remove(fname) if op.isfile(fname) else shutil.rmtree(fname) + writer = mffpy.Writer(fname) + current_time = pytz.utc.localize(datetime.datetime.utcnow()) + writer.addxml('fileInfo', recordTime=current_time) + try: + device = info['device_info']['type'] + except (TypeError, KeyError): + raise ValueError('No device type. Cannot determine sensor layout.') + writer.add_coordinates_and_sensor_layout(device) + + # Add EEG data + eeg_channels = pick_types(info, eeg=True, exclude=[]) + eeg_bin = mffpy.bin_writer.BinWriter(sampling_rate) + for ave in evoked: + # Signals are converted to µV + block = (ave.data[eeg_channels] * 1e6).astype(np.float32) + eeg_bin.add_block(block, offset_us=0) + writer.addbin(eeg_bin) + + # Add categories + categories_content = _categories_content_from_evokeds(evoked) + writer.addxml('categories', categories=categories_content) + + # Add history + if history: + writer.addxml('historyEntries', entries=history) + + writer.write() + + +def _categories_content_from_evokeds(evoked): + """Return categories.xml content for evoked dataset.""" + content = dict() + begin_time = 0 + for ave in evoked: + # Times are converted to microseconds + sfreq = ave.info['sfreq'] + duration = np.round(len(ave.times) / sfreq * 1e6).astype(int) + end_time = begin_time + duration + event_time = begin_time - np.round(ave.tmin * 1e6).astype(int) + eeg_bads = _get_bad_eeg_channels(ave.info) + content[ave.comment] = [ + _build_segment_content(begin_time, end_time, event_time, eeg_bads, + name='Average', nsegs=ave.nave) + ] + begin_time += duration + return content + + +def _get_bad_eeg_channels(info): + """Return a list of bad EEG channels formatted for categories.xml. + + Given a list of only the EEG channels in file, return the indices of this + list (starting at 1) that correspond to bad channels. + """ + if len(info['bads']) == 0: + return [] + eeg_channels = pick_types(info, eeg=True, exclude=[]) + bad_channels = pick_channels(info['ch_names'], info['bads']) + bads_elementwise = np.isin(eeg_channels, bad_channels) + return list(np.flatnonzero(bads_elementwise) + 1) + + +def _build_segment_content(begin_time, end_time, event_time, eeg_bads, + status='unedited', name=None, pns_bads=None, + nsegs=None): + """Build content for a single segment in categories.xml. + + Segments are sorted into categories in categories.xml. In a segmented MFF + each category can contain multiple segments, but in an averaged MFF each + category only contains one segment (the average). + """ + channel_status = [{ + 'signalBin': 1, + 'exclusion': 'badChannels', + 'channels': eeg_bads + }] + if pns_bads: + channel_status.append({ + 'signalBin': 2, + 'exclusion': 'badChannels', + 'channels': pns_bads + }) + content = { + 'status': status, + 'beginTime': begin_time, + 'endTime': end_time, + 'evtBegin': event_time, + 'evtEnd': event_time, + 'channelStatus': channel_status, + } + if name: + content['name'] = name + if nsegs: + content['keys'] = { + '#seg': { + 'type': 'long', + 'data': nsegs + } + } + return content diff --git a/python/libs/mne/export/_export.py b/python/libs/mne/export/_export.py new file mode 100644 index 0000000..1d3f0b4 --- /dev/null +++ b/python/libs/mne/export/_export.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# Authors: MNE Developers +# +# License: BSD-3-Clause + +import os.path as op + +from ._egimff import export_evokeds_mff +from ..utils import logger, verbose, warn, _check_fname, _validate_type + + +@verbose +def export_raw(fname, raw, fmt='auto', physical_range='auto', + add_ch_type=False, *, overwrite=False, verbose=None): + """Export Raw to external formats. + + Supported formats: + - EEGLAB (.set, uses :mod:`eeglabio`) + - EDF (.edf, uses ``EDFlib-Python``) + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + raw : instance of Raw + The raw instance to export. + %(fmt_export_params)s + %(physical_range_export_params)s + %(add_ch_type_export_params)s + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_raw)s + %(export_eeglab_note)s + %(export_edf_note)s + """ + fname = _check_fname(fname, overwrite=overwrite) + supported_export_formats = { # format : extensions + 'eeglab': ('set',), + 'edf': ('edf',), + 'brainvision': ('eeg', 'vmrk', 'vhdr',) + } + fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats) + + # check for unapplied projectors + if any(not proj['active'] for proj in raw.info['projs']): + warn('Raw instance has unapplied projectors. Consider applying ' + 'them before exporting with raw.apply_proj().') + + if fmt == 'eeglab': + from ._eeglab import _export_raw + _export_raw(fname, raw) + elif fmt == 'edf': + from ._edf import _export_raw + _export_raw(fname, raw, physical_range, add_ch_type) + elif fmt == 'brainvision': + raise NotImplementedError('Export to BrainVision not implemented.') + + +@verbose +def export_epochs(fname, epochs, fmt='auto', *, overwrite=False, verbose=None): + """Export Epochs to external formats. + + Supported formats: EEGLAB (set, uses :mod:`eeglabio`) + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + epochs : instance of Epochs + The epochs to export. + %(fmt_export_params)s + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_epochs)s + %(export_eeglab_note)s + """ + fname = _check_fname(fname, overwrite=overwrite) + supported_export_formats = { + 'eeglab': ('set',), + 'edf': ('edf',), + 'brainvision': ('eeg', 'vmrk', 'vhdr',) + } + fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats) + + # check for unapplied projectors + if any(not proj['active'] for proj in epochs.info['projs']): + warn('Epochs instance has unapplied projectors. Consider applying ' + 'them before exporting with epochs.apply_proj().') + + if fmt == 'eeglab': + from ._eeglab import _export_epochs + _export_epochs(fname, epochs) + elif fmt == 'edf': + raise NotImplementedError('Export to EDF format not implemented.') + elif fmt == 'brainvision': + raise NotImplementedError('Export to BrainVision not implemented.') + + +@verbose +def export_evokeds(fname, evoked, fmt='auto', *, overwrite=False, + verbose=None): + """Export evoked dataset to external formats. + + This function is a wrapper for format-specific export functions. The export + function is selected based on the inferred file format. For additional + options, use the format-specific functions. + + Supported formats + MFF (mff, uses :func:`mne.export.export_evokeds_mff`) + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + evoked : Evoked instance, or list of Evoked instances + The evoked dataset, or list of evoked datasets, to export to one file. + Note that the measurement info from the first evoked instance is used, + so be sure that information matches. + fmt : 'auto' | 'mff' + Format of the export. Defaults to ``'auto'``, which will infer the + format from the filename extension. See supported formats above for + more information. + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + See Also + -------- + mne.write_evokeds + mne.export.export_evokeds_mff + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_evoked)s + """ + fname = _check_fname(fname, overwrite=overwrite) + supported_export_formats = { + 'mff': ('mff',), + 'eeglab': ('set',), + 'edf': ('edf',), + 'brainvision': ('eeg', 'vmrk', 'vhdr',) + } + fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats) + + if not isinstance(evoked, list): + evoked = [evoked] + + logger.info(f'Exporting evoked dataset to {fname}...') + + if fmt == 'mff': + export_evokeds_mff(fname, evoked, overwrite=overwrite) + elif fmt == 'eeglab': + raise NotImplementedError('Export to EEGLAB not implemented.') + elif fmt == 'edf': + raise NotImplementedError('Export to EDF not implemented.') + elif fmt == 'brainvision': + raise NotImplementedError('Export to BrainVision not implemented.') + + +def _infer_check_export_fmt(fmt, fname, supported_formats): + """Infer export format from filename extension if auto. + + Raises error if fmt is auto and no file extension found, + then checks format against supported formats, raises error if format is not + supported. + + Parameters + ---------- + fmt : str + Format of the export, will only infer the format from filename if fmt + is auto. + fname : str + Name of the target export file, only used when fmt is auto. + supported_formats : dict of str : tuple/list + Dictionary containing supported formats (as keys) and each format's + corresponding file extensions in a tuple/list (e.g. 'eeglab': ('set',)) + """ + _validate_type(fmt, str, 'fmt') + fmt = fmt.lower() + if fmt == "auto": + fmt = op.splitext(fname)[1] + if fmt: + fmt = fmt[1:].lower() + # find fmt in supported formats dict's tuples + fmt = next((k for k, v in supported_formats.items() if fmt in v), + fmt) # default to original fmt for raising error later + else: + raise ValueError(f"Couldn't infer format from filename {fname}" + " (no extension found)") + + if fmt not in supported_formats: + supported = [] + for format, extensions in supported_formats.items(): + ext_str = ', '.join(f'*.{ext}' for ext in extensions) + supported.append(f'{format} ({ext_str})') + + supported_str = ', '.join(supported) + raise ValueError(f"Format '{fmt}' is not supported. " + f"Supported formats are {supported_str}.") + return fmt diff --git a/python/libs/mne/export/tests/test_export.py b/python/libs/mne/export/tests/test_export.py new file mode 100644 index 0000000..2cca928 --- /dev/null +++ b/python/libs/mne/export/tests/test_export.py @@ -0,0 +1,427 @@ +# -*- coding: utf-8 -*- +"""Test exporting functions.""" +# Authors: MNE Developers +# +# License: BSD-3-Clause + +from datetime import datetime, timezone +from mne.io import RawArray +from mne.io.meas_info import create_info +from pathlib import Path +import os.path as op + +import pytest +import numpy as np +from numpy.testing import (assert_allclose, assert_array_almost_equal, + assert_array_equal) + +from mne import (read_epochs_eeglab, Epochs, read_evokeds, read_evokeds_mff, + Annotations) +from mne.datasets import testing, misc +from mne.export import export_evokeds, export_evokeds_mff +from mne.io import read_raw_fif, read_raw_eeglab, read_raw_edf +from mne.utils import (_check_eeglabio_installed, requires_version, + object_diff, _check_edflib_installed, _resource_path) +from mne.tests.test_epochs import _get_data + +base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') +fname_evoked = op.join(base_dir, 'test-ave.fif') + +data_path = testing.data_path(download=False) +egi_evoked_fname = op.join(data_path, 'EGI', 'test_egi_evoked.mff') +misc_path = misc.data_path(download=False) + + +@requires_version('pymatreader') +@pytest.mark.skipif(not _check_eeglabio_installed(strict=False), + reason='eeglabio not installed') +def test_export_raw_eeglab(tmp_path): + """Test saving a Raw instance to EEGLAB's set format.""" + fname = (Path(__file__).parent.parent.parent / + "io" / "tests" / "data" / "test_raw.fif") + raw = read_raw_fif(fname, preload=True) + raw.apply_proj() + temp_fname = op.join(str(tmp_path), 'test.set') + raw.export(temp_fname) + raw.drop_channels([ch for ch in ['epoc'] + if ch in raw.ch_names]) + raw_read = read_raw_eeglab(temp_fname, preload=True) + assert raw.ch_names == raw_read.ch_names + cart_coords = np.array([d['loc'][:3] for d in raw.info['chs']]) # just xyz + cart_coords_read = np.array([d['loc'][:3] for d in raw_read.info['chs']]) + assert_allclose(cart_coords, cart_coords_read) + assert_allclose(raw.times, raw_read.times) + assert_allclose(raw.get_data(), raw_read.get_data()) + + # test overwrite + with pytest.raises(FileExistsError, match='Destination file exists'): + raw.export(temp_fname, overwrite=False) + raw.export(temp_fname, overwrite=True) + + # test pathlib.Path files + raw.export(Path(temp_fname), overwrite=True) + + # test warning with unapplied projectors + raw = read_raw_fif(fname, preload=True) + with pytest.warns(RuntimeWarning, + match='Raw instance has unapplied projectors.'): + raw.export(temp_fname, overwrite=True) + + +@pytest.mark.skipif(not _check_edflib_installed(strict=False), + reason='edflib-python not installed') +def test_double_export_edf(tmp_path): + """Test exporting an EDF file multiple times.""" + rng = np.random.RandomState(123456) + format = 'edf' + ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'ecog', 'seeg', 'eog', 'ecg', + 'emg', 'dbs', 'bio'] + info = create_info(len(ch_types), sfreq=1000, ch_types=ch_types) + data = rng.random(size=(len(ch_types), 1000)) * 1e-5 + + # include subject info and measurement date + info['subject_info'] = dict(first_name='mne', last_name='python', + birthday=(1992, 1, 20), sex=1, hand=3) + raw = RawArray(data, info) + + # export once + temp_fname = tmp_path / f'test.{format}' + raw.export(temp_fname, add_ch_type=True) + raw_read = read_raw_edf(temp_fname, infer_types=True, preload=True) + + # export again + raw_read.load_data() + raw_read.export(temp_fname, add_ch_type=True, overwrite=True) + raw_read = read_raw_edf(temp_fname, infer_types=True, preload=True) + + # stim channel should be dropped + raw.drop_channels('2') + + assert raw.ch_names == raw_read.ch_names + # only compare the original length, since extra zeros are appended + orig_raw_len = len(raw) + assert_array_almost_equal( + raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) + assert_allclose( + raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) + + # check channel types except for 'bio', which loses its type + orig_ch_types = raw.get_channel_types() + read_ch_types = raw_read.get_channel_types() + assert_array_equal(orig_ch_types, read_ch_types) + + +@pytest.mark.skipif(not _check_edflib_installed(strict=False), + reason='edflib-python not installed') +def test_export_edf_annotations(tmp_path): + """Test that exporting EDF preserves annotations.""" + rng = np.random.RandomState(123456) + format = 'edf' + ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'ecog', 'seeg', + 'eog', 'ecg', 'emg', 'dbs', 'bio'] + ch_names = np.arange(len(ch_types)).astype(str).tolist() + info = create_info(ch_names, sfreq=1000, + ch_types=ch_types) + data = rng.random(size=(len(ch_names), 2000)) * 1.e-5 + raw = RawArray(data, info) + + annotations = Annotations( + onset=[0.01, 0.05, 0.90, 1.05], duration=[0, 1, 0, 0], + description=['test1', 'test2', 'test3', 'test4']) + raw.set_annotations(annotations) + + # export + temp_fname = op.join(str(tmp_path), f'test.{format}') + raw.export(temp_fname) + + # read in the file + raw_read = read_raw_edf(temp_fname, preload=True) + assert_array_equal(raw.annotations.onset, raw_read.annotations.onset) + assert_array_equal(raw.annotations.duration, raw_read.annotations.duration) + assert_array_equal(raw.annotations.description, + raw_read.annotations.description) + + +@pytest.mark.skipif(not _check_edflib_installed(strict=False), + reason='edflib-python not installed') +def test_rawarray_edf(tmp_path): + """Test saving a Raw array with integer sfreq to EDF.""" + rng = np.random.RandomState(12345) + format = 'edf' + ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'seeg', 'eog', 'ecg', 'emg', + 'dbs', 'bio'] + ch_names = np.arange(len(ch_types)).astype(str).tolist() + info = create_info(ch_names, sfreq=1000, + ch_types=ch_types) + data = rng.random(size=(len(ch_names), 1000)) * 1e-5 + + # include subject info and measurement date + subject_info = dict(first_name='mne', last_name='python', + birthday=(1992, 1, 20), sex=1, hand=3) + info['subject_info'] = subject_info + raw = RawArray(data, info) + time_now = datetime.now() + meas_date = datetime(year=time_now.year, month=time_now.month, + day=time_now.day, hour=time_now.hour, + minute=time_now.minute, second=time_now.second, + tzinfo=timezone.utc) + raw.set_meas_date(meas_date) + temp_fname = op.join(str(tmp_path), f'test.{format}') + + raw.export(temp_fname, add_ch_type=True) + raw_read = read_raw_edf(temp_fname, infer_types=True, preload=True) + + # stim channel should be dropped + raw.drop_channels('2') + + assert raw.ch_names == raw_read.ch_names + # only compare the original length, since extra zeros are appended + orig_raw_len = len(raw) + assert_array_almost_equal( + raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) + assert_allclose( + raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) + + # check channel types except for 'bio', which loses its type + orig_ch_types = raw.get_channel_types() + read_ch_types = raw_read.get_channel_types() + assert_array_equal(orig_ch_types, read_ch_types) + assert raw.info['meas_date'] == raw_read.info['meas_date'] + + # channel name can't be longer than 16 characters with the type added + raw_bad = raw.copy() + raw_bad.rename_channels({'1': 'abcdefghijklmnopqrstuvwxyz'}) + with pytest.raises(RuntimeError, match='Signal label'), \ + pytest.warns(RuntimeWarning, match='Data has a non-integer'): + raw_bad.export(temp_fname, overwrite=True) + + # include bad birthday that is non-EDF compliant + bad_info = info.copy() + bad_info['subject_info']['birthday'] = (1700, 1, 20) + raw = RawArray(data, bad_info) + with pytest.raises(RuntimeError, match='Setting patient birth date'): + raw.export(temp_fname, overwrite=True) + + # include bad measurement date that is non-EDF compliant + raw = RawArray(data, info) + meas_date = datetime(year=1984, month=1, day=1, tzinfo=timezone.utc) + raw.set_meas_date(meas_date) + with pytest.raises(RuntimeError, match='Setting start date time'): + raw.export(temp_fname, overwrite=True) + + # test that warning is raised if there are non-voltage based channels + raw = RawArray(data, info) + with pytest.warns(RuntimeWarning, match='The unit'): + raw.set_channel_types({'9': 'hbr'}) + with pytest.warns(RuntimeWarning, match='Non-voltage channels'): + raw.export(temp_fname, overwrite=True) + + # data should match up to the non-accepted channel + raw_read = read_raw_edf(temp_fname, preload=True) + orig_raw_len = len(raw) + assert_array_almost_equal( + raw.get_data()[:-1, :], raw_read.get_data()[:, :orig_raw_len], + decimal=4) + assert_allclose( + raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) + + # the data should still match though + raw_read = read_raw_edf(temp_fname, preload=True) + raw.drop_channels('2') + assert raw.ch_names == raw_read.ch_names + orig_raw_len = len(raw) + assert_array_almost_equal( + raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) + assert_allclose( + raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) + + +@pytest.mark.skipif(not _check_edflib_installed(strict=False), + reason='edflib-python not installed') +@pytest.mark.parametrize( + ['dataset', 'format'], [ + ['test', 'edf'], + pytest.param('misc', 'edf', marks=[pytest.mark.slowtest, + misc._pytest_mark()]), + ]) +def test_export_raw_edf(tmp_path, dataset, format): + """Test saving a Raw instance to EDF format.""" + if dataset == 'test': + fname = _resource_path('mne.io.tests.data', 'test_raw.fif') + raw = read_raw_fif(fname) + elif dataset == 'misc': + fname = op.join(misc_path, 'ecog', 'sample_ecog_ieeg.fif') + raw = read_raw_fif(fname) + + # only test with EEG channels + raw.pick_types(eeg=True, ecog=True, seeg=True) + raw.load_data() + orig_ch_names = raw.ch_names + temp_fname = op.join(str(tmp_path), f'test.{format}') + + # test runtime errors + with pytest.raises(RuntimeError, match='The maximum'), \ + pytest.warns(RuntimeWarning, match='Data has a non-integer'): + raw.export(temp_fname, physical_range=(-1e6, 0)) + with pytest.raises(RuntimeError, match='The minimum'), \ + pytest.warns(RuntimeWarning, match='Data has a non-integer'): + raw.export(temp_fname, physical_range=(0, 1e6)) + + if dataset == 'test': + with pytest.warns(RuntimeWarning, match='Data has a non-integer'): + raw.export(temp_fname) + elif dataset == 'misc': + with pytest.warns(RuntimeWarning, match='EDF format requires'): + raw.export(temp_fname) + + if 'epoc' in raw.ch_names: + raw.drop_channels(['epoc']) + + raw_read = read_raw_edf(temp_fname, preload=True) + assert orig_ch_names == raw_read.ch_names + # only compare the original length, since extra zeros are appended + orig_raw_len = len(raw) + + # assert data and times are not different + # Due to the physical range of the data, reading and writing is + # not lossless. For example, a physical min/max of -/+ 3200 uV + # will result in a resolution of 0.09 uV. This resolution + # though is acceptable for most EEG manufacturers. + assert_array_almost_equal( + raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) + + # Due to the data record duration limitations of EDF files, one + # cannot store arbitrary float sampling rate exactly. Usually this + # results in two sampling rates that are off by very low number of + # decimal points. This for practical purposes does not matter + # but will result in an error when say the number of time points + # is very very large. + assert_allclose( + raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) + + +@requires_version('pymatreader') +@pytest.mark.skipif(not _check_eeglabio_installed(strict=False), + reason='eeglabio not installed') +@pytest.mark.parametrize('preload', (True, False)) +def test_export_epochs_eeglab(tmp_path, preload): + """Test saving an Epochs instance to EEGLAB's set format.""" + raw, events = _get_data()[:2] + raw.load_data() + epochs = Epochs(raw, events, preload=preload) + temp_fname = op.join(str(tmp_path), 'test.set') + epochs.export(temp_fname) + epochs.drop_channels([ch for ch in ['epoc', 'STI 014'] + if ch in epochs.ch_names]) + epochs_read = read_epochs_eeglab(temp_fname) + assert epochs.ch_names == epochs_read.ch_names + cart_coords = np.array([d['loc'][:3] + for d in epochs.info['chs']]) # just xyz + cart_coords_read = np.array([d['loc'][:3] + for d in epochs_read.info['chs']]) + assert_allclose(cart_coords, cart_coords_read) + assert_array_equal(epochs.events[:, 0], + epochs_read.events[:, 0]) # latency + assert epochs.event_id.keys() == epochs_read.event_id.keys() # just keys + assert_allclose(epochs.times, epochs_read.times) + assert_allclose(epochs.get_data(), epochs_read.get_data()) + + # test overwrite + with pytest.raises(FileExistsError, match='Destination file exists'): + epochs.export(temp_fname, overwrite=False) + epochs.export(temp_fname, overwrite=True) + + # test pathlib.Path files + epochs.export(Path(temp_fname), overwrite=True) + + # test warning with unapplied projectors + epochs = Epochs(raw, events, preload=preload, proj=False) + with pytest.warns(RuntimeWarning, + match='Epochs instance has unapplied projectors.'): + epochs.export(Path(temp_fname), overwrite=True) + + +@requires_version('mffpy', '0.5.7') +@testing.requires_testing_data +@pytest.mark.parametrize('fmt', ('auto', 'mff')) +@pytest.mark.parametrize('do_history', (True, False)) +def test_export_evokeds_to_mff(tmp_path, fmt, do_history): + """Test exporting evoked dataset to MFF.""" + evoked = read_evokeds_mff(egi_evoked_fname) + export_fname = op.join(str(tmp_path), 'evoked.mff') + history = [ + { + 'name': 'Test Segmentation', + 'method': 'Segmentation', + 'settings': ['Setting 1', 'Setting 2'], + 'results': ['Result 1', 'Result 2'] + }, + { + 'name': 'Test Averaging', + 'method': 'Averaging', + 'settings': ['Setting 1', 'Setting 2'], + 'results': ['Result 1', 'Result 2'] + } + ] + if do_history: + export_evokeds_mff(export_fname, evoked, history=history) + else: + export_evokeds(export_fname, evoked) + # Drop non-EEG channels + evoked = [ave.drop_channels(['ECG', 'EMG']) for ave in evoked] + evoked_exported = read_evokeds_mff(export_fname) + assert len(evoked) == len(evoked_exported) + for ave, ave_exported in zip(evoked, evoked_exported): + # Compare infos + assert object_diff(ave_exported.info, ave.info) == '' + # Compare data + assert_allclose(ave_exported.data, ave.data) + # Compare properties + assert ave_exported.nave == ave.nave + assert ave_exported.kind == ave.kind + assert ave_exported.comment == ave.comment + assert_allclose(ave_exported.times, ave.times) + + # test overwrite + with pytest.raises(FileExistsError, match='Destination file exists'): + if do_history: + export_evokeds_mff(export_fname, evoked, history=history, + overwrite=False) + else: + export_evokeds(export_fname, evoked, overwrite=False) + + if do_history: + export_evokeds_mff(export_fname, evoked, history=history, + overwrite=True) + else: + export_evokeds(export_fname, evoked, overwrite=True) + + +@requires_version('mffpy', '0.5.7') +@testing.requires_testing_data +def test_export_to_mff_no_device(): + """Test no device type throws ValueError.""" + evoked = read_evokeds_mff(egi_evoked_fname, condition='Category 1') + evoked.info['device_info'] = None + with pytest.raises(ValueError, match='No device type.'): + export_evokeds('output.mff', evoked) + + +@requires_version('mffpy', '0.5.7') +def test_export_to_mff_incompatible_sfreq(): + """Test non-whole number sampling frequency throws ValueError.""" + evoked = read_evokeds(fname_evoked) + with pytest.raises(ValueError, match=f'sfreq: {evoked[0].info["sfreq"]}'): + export_evokeds('output.mff', evoked) + + +@pytest.mark.parametrize('fmt,ext', [ + ('EEGLAB', 'set'), + ('EDF', 'edf'), + ('BrainVision', 'eeg') +]) +def test_export_evokeds_unsupported_format(fmt, ext): + """Test exporting evoked dataset to non-supported formats.""" + evoked = read_evokeds(fname_evoked) + with pytest.raises(NotImplementedError, match=f'Export to {fmt} not imp'): + export_evokeds(f'output.{ext}', evoked) diff --git a/python/libs/mne/filter.py b/python/libs/mne/filter.py new file mode 100644 index 0000000..150192f --- /dev/null +++ b/python/libs/mne/filter.py @@ -0,0 +1,2352 @@ +"""IIR and FIR filtering and resampling functions.""" + +from collections import Counter +from copy import deepcopy +from functools import partial + +import numpy as np + +from .annotations import _annotations_starts_stops +from .io.pick import _picks_to_idx +from .cuda import (_setup_cuda_fft_multiply_repeated, _fft_multiply_repeated, + _setup_cuda_fft_resample, _fft_resample, _smart_pad) +from .parallel import parallel_func, check_n_jobs +from .time_frequency.multitaper import _mt_spectra, _compute_mt_params +from .utils import (logger, verbose, sum_squared, warn, _pl, + _check_preload, _validate_type, _check_option, _ensure_int) +from ._ola import _COLA + +# These values from Ifeachor and Jervis. +_length_factors = dict(hann=3.1, hamming=3.3, blackman=5.0) + + +def is_power2(num): + """Test if number is a power of 2. + + Parameters + ---------- + num : int + Number. + + Returns + ------- + b : bool + True if is power of 2. + + Examples + -------- + >>> is_power2(2 ** 3) + True + >>> is_power2(5) + False + """ + num = int(num) + return num != 0 and ((num & (num - 1)) == 0) + + +def next_fast_len(target): + """Find the next fast size of input data to `fft`, for zero-padding, etc. + + SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this + returns the next composite of the prime factors 2, 3, and 5 which is + greater than or equal to `target`. (These are also known as 5-smooth + numbers, regular numbers, or Hamming numbers.) + + Parameters + ---------- + target : int + Length to start searching from. Must be a positive integer. + + Returns + ------- + out : int + The first 5-smooth number greater than or equal to `target`. + + Notes + ----- + Copied from SciPy with minor modifications. + """ + from bisect import bisect_left + hams = (8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, + 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128, + 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250, + 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450, + 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729, + 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125, + 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536, + 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160, + 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916, + 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840, + 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000, + 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400, + 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100, + 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000) + + if target <= 6: + return target + + # Quickly check if it's already a power of 2 + if not (target & (target - 1)): + return target + + # Get result quickly for small sizes, since FFT itself is similarly fast. + if target <= hams[-1]: + return hams[bisect_left(hams, target)] + + match = float('inf') # Anything found will be smaller + p5 = 1 + while p5 < target: + p35 = p5 + while p35 < target: + # Ceiling integer division, avoiding conversion to float + # (quotient = ceil(target / p35)) + quotient = -(-target // p35) + + p2 = 2 ** int(quotient - 1).bit_length() + + N = p2 * p35 + if N == target: + return N + elif N < match: + match = N + p35 *= 3 + if p35 == target: + return p35 + if p35 < match: + match = p35 + p5 *= 5 + if p5 == target: + return p5 + if p5 < match: + match = p5 + return match + + +def _overlap_add_filter(x, h, n_fft=None, phase='zero', picks=None, + n_jobs=1, copy=True, pad='reflect_limited'): + """Filter the signal x using h with overlap-add FFTs. + + Parameters + ---------- + x : array, shape (n_signals, n_times) + Signals to filter. + h : 1d array + Filter impulse response (FIR filter coefficients). Must be odd length + if phase == 'linear'. + n_fft : int + Length of the FFT. If None, the best size is determined automatically. + phase : str + If 'zero', the delay for the filter is compensated (and it must be + an odd-length symmetric filter). If 'linear', the response is + uncompensated. If 'zero-double', the filter is applied in the + forward and reverse directions. If 'minimum', a minimum-phase + filter will be used. + picks : list | None + See calling functions. + n_jobs : int | str + Number of jobs to run in parallel. Can be 'cuda' if ``cupy`` + is installed properly. + copy : bool + If True, a copy of x, filtered, is returned. Otherwise, it operates + on x in place. + pad : str + Padding type for ``_smart_pad``. + + Returns + ------- + x : array, shape (n_signals, n_times) + x filtered. + """ + n_jobs = check_n_jobs(n_jobs, allow_cuda=True) + # set up array for filtering, reshape to 2D, operate on last axis + x, orig_shape, picks = _prep_for_filtering(x, copy, picks) + # Extend the signal by mirroring the edges to reduce transient filter + # response + _check_zero_phase_length(len(h), phase) + if len(h) == 1: + return x * h ** 2 if phase == 'zero-double' else x * h + n_edge = max(min(len(h), x.shape[1]) - 1, 0) + logger.debug('Smart-padding with: %s samples on each edge' % n_edge) + n_x = x.shape[1] + 2 * n_edge + + if phase == 'zero-double': + h = np.convolve(h, h[::-1]) + + # Determine FFT length to use + min_fft = 2 * len(h) - 1 + if n_fft is None: + max_fft = n_x + if max_fft >= min_fft: + # cost function based on number of multiplications + N = 2 ** np.arange(np.ceil(np.log2(min_fft)), + np.ceil(np.log2(max_fft)) + 1, dtype=int) + cost = (np.ceil(n_x / (N - len(h) + 1).astype(np.float64)) * + N * (np.log2(N) + 1)) + + # add a heuristic term to prevent too-long FFT's which are slow + # (not predicted by mult. cost alone, 4e-5 exp. determined) + cost += 4e-5 * N * n_x + + n_fft = N[np.argmin(cost)] + else: + # Use only a single block + n_fft = next_fast_len(min_fft) + logger.debug('FFT block length: %s' % n_fft) + if n_fft < min_fft: + raise ValueError('n_fft is too short, has to be at least ' + '2 * len(h) - 1 (%s), got %s' % (min_fft, n_fft)) + + # Figure out if we should use CUDA + n_jobs, cuda_dict = _setup_cuda_fft_multiply_repeated( + n_jobs, h, n_fft) + + # Process each row separately + picks = _picks_to_idx(len(x), picks) + if n_jobs == 1: + for p in picks: + x[p] = _1d_overlap_filter(x[p], len(h), n_edge, phase, + cuda_dict, pad, n_fft) + else: + parallel, p_fun, _ = parallel_func(_1d_overlap_filter, n_jobs) + data_new = parallel(p_fun(x[p], len(h), n_edge, phase, + cuda_dict, pad, n_fft) for p in picks) + for pp, p in enumerate(picks): + x[p] = data_new[pp] + + x.shape = orig_shape + return x + + +def _1d_overlap_filter(x, n_h, n_edge, phase, cuda_dict, pad, n_fft): + """Do one-dimensional overlap-add FFT FIR filtering.""" + # pad to reduce ringing + x_ext = _smart_pad(x, (n_edge, n_edge), pad) + n_x = len(x_ext) + x_filtered = np.zeros_like(x_ext) + + n_seg = n_fft - n_h + 1 + n_segments = int(np.ceil(n_x / float(n_seg))) + shift = ((n_h - 1) // 2 if phase.startswith('zero') else 0) + n_edge + + # Now the actual filtering step is identical for zero-phase (filtfilt-like) + # or single-pass + for seg_idx in range(n_segments): + start = seg_idx * n_seg + stop = (seg_idx + 1) * n_seg + seg = x_ext[start:stop] + seg = np.concatenate([seg, np.zeros(n_fft - len(seg))]) + + prod = _fft_multiply_repeated(seg, cuda_dict) + + start_filt = max(0, start - shift) + stop_filt = min(start - shift + n_fft, n_x) + start_prod = max(0, shift - start) + stop_prod = start_prod + stop_filt - start_filt + x_filtered[start_filt:stop_filt] += prod[start_prod:stop_prod] + + # Remove mirrored edges that we added and cast (n_edge can be zero) + x_filtered = x_filtered[:n_x - 2 * n_edge].astype(x.dtype) + return x_filtered + + +def _filter_attenuation(h, freq, gain): + """Compute minimum attenuation at stop frequency.""" + from scipy.signal import freqz + _, filt_resp = freqz(h.ravel(), worN=np.pi * freq) + filt_resp = np.abs(filt_resp) # use amplitude response + filt_resp[np.where(gain == 1)] = 0 + idx = np.argmax(filt_resp) + att_db = -20 * np.log10(np.maximum(filt_resp[idx], 1e-20)) + att_freq = freq[idx] + return att_db, att_freq + + +def _prep_for_filtering(x, copy, picks=None): + """Set up array as 2D for filtering ease.""" + x = _check_filterable(x) + if copy is True: + x = x.copy() + orig_shape = x.shape + x = np.atleast_2d(x) + picks = _picks_to_idx(x.shape[-2], picks) + x.shape = (np.prod(x.shape[:-1]), x.shape[-1]) + if len(orig_shape) == 3: + n_epochs, n_channels, n_times = orig_shape + offset = np.repeat(np.arange(0, n_channels * n_epochs, n_channels), + len(picks)) + picks = np.tile(picks, n_epochs) + offset + elif len(orig_shape) > 3: + raise ValueError('picks argument is not supported for data with more' + ' than three dimensions') + assert all(0 <= pick < x.shape[0] for pick in picks) # guaranteed by above + + return x, orig_shape, picks + + +def _firwin_design(N, freq, gain, window, sfreq): + """Construct a FIR filter using firwin.""" + from scipy.signal import firwin + assert freq[0] == 0 + assert len(freq) > 1 + assert len(freq) == len(gain) + assert N % 2 == 1 + h = np.zeros(N) + prev_freq = freq[-1] + prev_gain = gain[-1] + if gain[-1] == 1: + h[N // 2] = 1 # start with "all up" + assert prev_gain in (0, 1) + for this_freq, this_gain in zip(freq[::-1][1:], gain[::-1][1:]): + assert this_gain in (0, 1) + if this_gain != prev_gain: + # Get the correct N to satistify the requested transition bandwidth + transition = (prev_freq - this_freq) / 2. + this_N = int(round(_length_factors[window] / transition)) + this_N += (1 - this_N % 2) # make it odd + if this_N > N: + raise ValueError('The requested filter length %s is too short ' + 'for the requested %0.2f Hz transition band, ' + 'which requires %s samples' + % (N, transition * sfreq / 2., this_N)) + # Construct a lowpass + this_h = firwin(this_N, (prev_freq + this_freq) / 2., + window=window, pass_zero=True, nyq=freq[-1]) + assert this_h.shape == (this_N,) + offset = (N - this_N) // 2 + if this_gain == 0: + h[offset:N - offset] -= this_h + else: + h[offset:N - offset] += this_h + prev_gain = this_gain + prev_freq = this_freq + return h + + +def _construct_fir_filter(sfreq, freq, gain, filter_length, phase, fir_window, + fir_design): + """Filter signal using gain control points in the frequency domain. + + The filter impulse response is constructed from a Hann window (window + used in "firwin2" function) to avoid ripples in the frequency response + (windowing is a smoothing in frequency domain). + + If x is multi-dimensional, this operates along the last dimension. + + Parameters + ---------- + sfreq : float + Sampling rate in Hz. + freq : 1d array + Frequency sampling points in Hz. + gain : 1d array + Filter gain at frequency sampling points. + Must be all 0 and 1 for fir_design=="firwin". + filter_length : int + Length of the filter to use. Must be odd length if phase == "zero". + phase : str + If 'zero', the delay for the filter is compensated (and it must be + an odd-length symmetric filter). If 'linear', the response is + uncompensated. If 'zero-double', the filter is applied in the + forward and reverse directions. If 'minimum', a minimum-phase + filter will be used. + fir_window : str + The window to use in FIR design, can be "hamming" (default), + "hann", or "blackman". + fir_design : str + Can be "firwin2" or "firwin". + + Returns + ------- + h : array + Filter coefficients. + """ + assert freq[0] == 0 + if fir_design == 'firwin2': + from scipy.signal import firwin2 as fir_design + else: + assert fir_design == 'firwin' + fir_design = partial(_firwin_design, sfreq=sfreq) + from scipy.signal import minimum_phase + + # issue a warning if attenuation is less than this + min_att_db = 12 if phase == 'minimum' else 20 + + # normalize frequencies + freq = np.array(freq) / (sfreq / 2.) + if freq[0] != 0 or freq[-1] != 1: + raise ValueError('freq must start at 0 and end an Nyquist (%s), got %s' + % (sfreq / 2., freq)) + gain = np.array(gain) + + # Use overlap-add filter with a fixed length + N = _check_zero_phase_length(filter_length, phase, gain[-1]) + # construct symmetric (linear phase) filter + if phase == 'minimum': + h = fir_design(N * 2 - 1, freq, gain, window=fir_window) + h = minimum_phase(h) + else: + h = fir_design(N, freq, gain, window=fir_window) + assert h.size == N + att_db, att_freq = _filter_attenuation(h, freq, gain) + if phase == 'zero-double': + att_db += 6 + if att_db < min_att_db: + att_freq *= sfreq / 2. + warn('Attenuation at stop frequency %0.2f Hz is only %0.2f dB. ' + 'Increase filter_length for higher attenuation.' + % (att_freq, att_db)) + return h + + +def _check_zero_phase_length(N, phase, gain_nyq=0): + N = int(N) + if N % 2 == 0: + if phase == 'zero': + raise RuntimeError('filter_length must be odd if phase="zero", ' + 'got %s' % N) + elif phase == 'zero-double' and gain_nyq == 1: + N += 1 + return N + + +def _check_coefficients(system): + """Check for filter stability.""" + if isinstance(system, tuple): + from scipy.signal import tf2zpk + z, p, k = tf2zpk(*system) + else: # sos + from scipy.signal import sos2zpk + z, p, k = sos2zpk(system) + if np.any(np.abs(p) > 1.0): + raise RuntimeError('Filter poles outside unit circle, filter will be ' + 'unstable. Consider using different filter ' + 'coefficients.') + + +def _filtfilt(x, iir_params, picks, n_jobs, copy): + """Call filtfilt.""" + # set up array for filtering, reshape to 2D, operate on last axis + from scipy.signal import filtfilt, sosfiltfilt + padlen = min(iir_params['padlen'], x.shape[-1] - 1) + n_jobs = check_n_jobs(n_jobs) + x, orig_shape, picks = _prep_for_filtering(x, copy, picks) + if 'sos' in iir_params: + fun = partial(sosfiltfilt, sos=iir_params['sos'], padlen=padlen, + axis=-1) + _check_coefficients(iir_params['sos']) + else: + fun = partial(filtfilt, b=iir_params['b'], a=iir_params['a'], + padlen=padlen, axis=-1) + _check_coefficients((iir_params['b'], iir_params['a'])) + if n_jobs == 1: + for p in picks: + x[p] = fun(x=x[p]) + else: + parallel, p_fun, _ = parallel_func(fun, n_jobs) + data_new = parallel(p_fun(x=x[p]) for p in picks) + for pp, p in enumerate(picks): + x[p] = data_new[pp] + x.shape = orig_shape + return x + + +def estimate_ringing_samples(system, max_try=100000): + """Estimate filter ringing. + + Parameters + ---------- + system : tuple | ndarray + A tuple of (b, a) or ndarray of second-order sections coefficients. + max_try : int + Approximate maximum number of samples to try. + This will be changed to a multiple of 1000. + + Returns + ------- + n : int + The approximate ringing. + """ + from scipy import signal + if isinstance(system, tuple): # TF + kind = 'ba' + b, a = system + zi = [0.] * (len(a) - 1) + else: + kind = 'sos' + sos = system + zi = [[0.] * 2] * len(sos) + n_per_chunk = 1000 + n_chunks_max = int(np.ceil(max_try / float(n_per_chunk))) + x = np.zeros(n_per_chunk) + x[0] = 1 + last_good = n_per_chunk + thresh_val = 0 + for ii in range(n_chunks_max): + if kind == 'ba': + h, zi = signal.lfilter(b, a, x, zi=zi) + else: + h, zi = signal.sosfilt(sos, x, zi=zi) + x[0] = 0 # for subsequent iterations we want zero input + h = np.abs(h) + thresh_val = max(0.001 * np.max(h), thresh_val) + idx = np.where(np.abs(h) > thresh_val)[0] + if len(idx) > 0: + last_good = idx[-1] + else: # this iteration had no sufficiently lange values + idx = (ii - 1) * n_per_chunk + last_good + break + else: + warn('Could not properly estimate ringing for the filter') + idx = n_per_chunk * n_chunks_max + return idx + + +_ftype_dict = { + 'butter': 'Butterworth', + 'cheby1': 'Chebyshev I', + 'cheby2': 'Chebyshev II', + 'ellip': 'Cauer/elliptic', + 'bessel': 'Bessel/Thomson', +} + + +@verbose +def construct_iir_filter(iir_params, f_pass=None, f_stop=None, sfreq=None, + btype=None, return_copy=True, verbose=None): + """Use IIR parameters to get filtering coefficients. + + This function works like a wrapper for iirdesign and iirfilter in + scipy.signal to make filter coefficients for IIR filtering. It also + estimates the number of padding samples based on the filter ringing. + It creates a new iir_params dict (or updates the one passed to the + function) with the filter coefficients ('b' and 'a') and an estimate + of the padding necessary ('padlen') so IIR filtering can be performed. + + Parameters + ---------- + iir_params : dict + Dictionary of parameters to use for IIR filtering. + + * If ``iir_params['sos']`` exists, it will be used as + second-order sections to perform IIR filtering. + + .. versionadded:: 0.13 + + * Otherwise, if ``iir_params['b']`` and ``iir_params['a']`` + exist, these will be used as coefficients to perform IIR + filtering. + * Otherwise, if ``iir_params['order']`` and + ``iir_params['ftype']`` exist, these will be used with + `scipy.signal.iirfilter` to make a filter. + You should also supply ``iir_params['rs']`` and + ``iir_params['rp']`` if using elliptic or Chebychev filters. + * Otherwise, if ``iir_params['gpass']`` and + ``iir_params['gstop']`` exist, these will be used with + `scipy.signal.iirdesign` to design a filter. + * ``iir_params['padlen']`` defines the number of samples to pad + (and an estimate will be calculated if it is not given). + See Notes for more details. + * ``iir_params['output']`` defines the system output kind when + designing filters, either "sos" or "ba". For 0.13 the + default is 'ba' but will change to 'sos' in 0.14. + + f_pass : float or list of float + Frequency for the pass-band. Low-pass and high-pass filters should + be a float, band-pass should be a 2-element list of float. + f_stop : float or list of float + Stop-band frequency (same size as f_pass). Not used if 'order' is + specified in iir_params. + sfreq : float | None + The sample rate. + btype : str + Type of filter. Should be 'lowpass', 'highpass', or 'bandpass' + (or analogous string representations known to + :func:`scipy.signal.iirfilter`). + return_copy : bool + If False, the 'sos', 'b', 'a', and 'padlen' entries in + ``iir_params`` will be set inplace (if they weren't already). + Otherwise, a new ``iir_params`` instance will be created and + returned with these entries. + %(verbose)s + + Returns + ------- + iir_params : dict + Updated iir_params dict, with the entries (set only if they didn't + exist before) for 'sos' (or 'b', 'a'), and 'padlen' for + IIR filtering. + + See Also + -------- + mne.filter.filter_data + mne.io.Raw.filter + + Notes + ----- + This function triages calls to :func:`scipy.signal.iirfilter` and + :func:`scipy.signal.iirdesign` based on the input arguments (see + linked functions for more details). + + .. versionchanged:: 0.14 + Second-order sections are used in filter design by default (replacing + ``output='ba'`` by ``output='sos'``) to help ensure filter stability + and reduce numerical error. + + Examples + -------- + iir_params can have several forms. Consider constructing a low-pass + filter at 40 Hz with 1000 Hz sampling rate. + + In the most basic (2-parameter) form of iir_params, the order of the + filter 'N' and the type of filtering 'ftype' are specified. To get + coefficients for a 4th-order Butterworth filter, this would be: + + >>> iir_params = dict(order=4, ftype='butter', output='sos') # doctest:+SKIP + >>> iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low', return_copy=False) # doctest:+SKIP + >>> print((2 * len(iir_params['sos']), iir_params['padlen'])) # doctest:+SKIP + (4, 82) + + Filters can also be constructed using filter design methods. To get a + 40 Hz Chebyshev type 1 lowpass with specific gain characteristics in the + pass and stop bands (assuming the desired stop band is at 45 Hz), this + would be a filter with much longer ringing: + + >>> iir_params = dict(ftype='cheby1', gpass=3, gstop=20, output='sos') # doctest:+SKIP + >>> iir_params = construct_iir_filter(iir_params, 40, 50, 1000, 'low') # doctest:+SKIP + >>> print((2 * len(iir_params['sos']), iir_params['padlen'])) # doctest:+SKIP + (6, 439) + + Padding and/or filter coefficients can also be manually specified. For + a 10-sample moving window with no padding during filtering, for example, + one can just do: + + >>> iir_params = dict(b=np.ones((10)), a=[1, 0], padlen=0) # doctest:+SKIP + >>> iir_params = construct_iir_filter(iir_params, return_copy=False) # doctest:+SKIP + >>> print((iir_params['b'], iir_params['a'], iir_params['padlen'])) # doctest:+SKIP + (array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]), [1, 0], 0) + + For more information, see the tutorials + :ref:`disc-filtering` and :ref:`tut-filter-resample`. + """ # noqa: E501 + from scipy.signal import iirfilter, iirdesign, freqz, sosfreqz + known_filters = ('bessel', 'butter', 'butterworth', 'cauer', 'cheby1', + 'cheby2', 'chebyshev1', 'chebyshev2', 'chebyshevi', + 'chebyshevii', 'ellip', 'elliptic') + if not isinstance(iir_params, dict): + raise TypeError('iir_params must be a dict, got %s' % type(iir_params)) + # if the filter has been designed, we're good to go + Wp = None + if 'sos' in iir_params: + system = iir_params['sos'] + output = 'sos' + elif 'a' in iir_params and 'b' in iir_params: + system = (iir_params['b'], iir_params['a']) + output = 'ba' + else: + output = iir_params.get('output', 'sos') + _check_option('output', output, ('ba', 'sos')) + # ensure we have a valid ftype + if 'ftype' not in iir_params: + raise RuntimeError('ftype must be an entry in iir_params if ''b'' ' + 'and ''a'' are not specified') + ftype = iir_params['ftype'] + if ftype not in known_filters: + raise RuntimeError('ftype must be in filter_dict from ' + 'scipy.signal (e.g., butter, cheby1, etc.) not ' + '%s' % ftype) + + # use order-based design + f_pass = np.atleast_1d(f_pass) + if f_pass.ndim > 1: + raise ValueError('frequencies must be 1D, got %dD' % f_pass.ndim) + edge_freqs = ', '.join('%0.2f' % (f,) for f in f_pass) + Wp = f_pass / (float(sfreq) / 2) + # IT will de designed + ftype_nice = _ftype_dict.get(ftype, ftype) + logger.info('') + logger.info('IIR filter parameters') + logger.info('---------------------') + logger.info('%s %s zero-phase (two-pass forward and reverse) ' + 'non-causal filter:' % (ftype_nice, btype)) + # SciPy designs for -3dB but we do forward-backward, so this is -6dB + if 'order' in iir_params: + kwargs = dict(N=iir_params['order'], Wn=Wp, btype=btype, + ftype=ftype, output=output) + for key in ('rp', 'rs'): + if key in iir_params: + kwargs[key] = iir_params[key] + system = iirfilter(**kwargs) + logger.info('- Filter order %d (effective, after forward-backward)' + % (2 * iir_params['order'] * len(Wp),)) + else: + # use gpass / gstop design + Ws = np.asanyarray(f_stop) / (float(sfreq) / 2) + if 'gpass' not in iir_params or 'gstop' not in iir_params: + raise ValueError('iir_params must have at least ''gstop'' and' + ' ''gpass'' (or ''N'') entries') + system = iirdesign(Wp, Ws, iir_params['gpass'], + iir_params['gstop'], ftype=ftype, output=output) + + if system is None: + raise RuntimeError('coefficients could not be created from iir_params') + # do some sanity checks + _check_coefficients(system) + + # get the gains at the cutoff frequencies + if Wp is not None: + if output == 'sos': + cutoffs = sosfreqz(system, worN=Wp * np.pi)[1] + else: + cutoffs = freqz(system[0], system[1], worN=Wp * np.pi)[1] + # 2 * 20 here because we do forward-backward filtering + cutoffs = 40 * np.log10(np.abs(cutoffs)) + cutoffs = ', '.join(['%0.2f' % (c,) for c in cutoffs]) + logger.info('- Cutoff%s at %s Hz: %s dB' + % (_pl(f_pass), edge_freqs, cutoffs)) + # now deal with padding + if 'padlen' not in iir_params: + padlen = estimate_ringing_samples(system) + else: + padlen = iir_params['padlen'] + + if return_copy: + iir_params = deepcopy(iir_params) + + iir_params.update(dict(padlen=padlen)) + if output == 'sos': + iir_params.update(sos=system) + else: + iir_params.update(b=system[0], a=system[1]) + logger.info('') + return iir_params + + +def _check_method(method, iir_params, extra_types=()): + """Parse method arguments.""" + allowed_types = ['iir', 'fir', 'fft'] + list(extra_types) + _validate_type(method, 'str', 'method') + _check_option('method', method, allowed_types) + if method == 'fft': + method = 'fir' # use the better name + if method == 'iir': + if iir_params is None: + iir_params = dict() + if len(iir_params) == 0 or (len(iir_params) == 1 and + 'output' in iir_params): + iir_params = dict(order=4, ftype='butter', + output=iir_params.get('output', 'sos')) + elif iir_params is not None: + raise ValueError('iir_params must be None if method != "iir"') + return iir_params, method + + +@verbose +def filter_data(data, sfreq, l_freq, h_freq, picks=None, filter_length='auto', + l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=1, + method='fir', iir_params=None, copy=True, phase='zero', + fir_window='hamming', fir_design='firwin', + pad='reflect_limited', verbose=None): + """Filter a subset of channels. + + Parameters + ---------- + data : ndarray, shape (..., n_times) + The data to filter. + sfreq : float + The sample frequency in Hz. + %(l_freq)s + %(h_freq)s + %(picks_nostr)s + Currently this is only supported for 2D (n_channels, n_times) and + 3D (n_epochs, n_channels, n_times) arrays. + %(filter_length)s + %(l_trans_bandwidth)s + %(h_trans_bandwidth)s + %(n_jobs_fir)s + %(method_fir)s + %(iir_params)s + copy : bool + If True, a copy of x, filtered, is returned. Otherwise, it operates + on x in place. + %(phase)s + %(fir_window)s + %(fir_design)s + %(pad_fir)s + The default is ``'reflect_limited'``. + + .. versionadded:: 0.15 + %(verbose)s + + Returns + ------- + data : ndarray, shape (..., n_times) + The filtered data. + + See Also + -------- + construct_iir_filter + create_filter + mne.io.Raw.filter + notch_filter + resample + + Notes + ----- + Applies a zero-phase low-pass, high-pass, band-pass, or band-stop + filter to the channels selected by ``picks``. + + ``l_freq`` and ``h_freq`` are the frequencies below which and above + which, respectively, to filter out of the data. Thus the uses are: + + * ``l_freq < h_freq``: band-pass filter + * ``l_freq > h_freq``: band-stop filter + * ``l_freq is not None and h_freq is None``: high-pass filter + * ``l_freq is None and h_freq is not None``: low-pass filter + + .. note:: If n_jobs > 1, more memory is required as + ``len(picks) * n_times`` additional time points need to + be temporaily stored in memory. + + For more information, see the tutorials + :ref:`disc-filtering` and :ref:`tut-filter-resample` and + :func:`mne.filter.create_filter`. + """ + data = _check_filterable(data) + iir_params, method = _check_method(method, iir_params) + filt = create_filter( + data, sfreq, l_freq, h_freq, filter_length, l_trans_bandwidth, + h_trans_bandwidth, method, iir_params, phase, fir_window, fir_design) + if method in ('fir', 'fft'): + data = _overlap_add_filter(data, filt, None, phase, picks, n_jobs, + copy, pad) + else: + data = _filtfilt(data, filt, picks, n_jobs, copy) + return data + + +@verbose +def create_filter(data, sfreq, l_freq, h_freq, filter_length='auto', + l_trans_bandwidth='auto', h_trans_bandwidth='auto', + method='fir', iir_params=None, phase='zero', + fir_window='hamming', fir_design='firwin', verbose=None): + r"""Create a FIR or IIR filter. + + ``l_freq`` and ``h_freq`` are the frequencies below which and above + which, respectively, to filter out of the data. Thus the uses are: + + * ``l_freq < h_freq``: band-pass filter + * ``l_freq > h_freq``: band-stop filter + * ``l_freq is not None and h_freq is None``: high-pass filter + * ``l_freq is None and h_freq is not None``: low-pass filter + + Parameters + ---------- + data : ndarray, shape (..., n_times) | None + The data that will be filtered. This is used for sanity checking + only. If None, no sanity checking related to the length of the signal + relative to the filter order will be performed. + sfreq : float + The sample frequency in Hz. + %(l_freq)s + %(h_freq)s + %(filter_length)s + %(l_trans_bandwidth)s + %(h_trans_bandwidth)s + %(method_fir)s + %(iir_params)s + %(phase)s + %(fir_window)s + %(fir_design)s + %(verbose)s + + Returns + ------- + filt : array or dict + Will be an array of FIR coefficients for method='fir', and dict + with IIR parameters for method='iir'. + + See Also + -------- + filter_data + + Notes + ----- + .. note:: For FIR filters, the *cutoff frequency*, i.e. the -6 dB point, + is in the middle of the transition band (when using phase='zero' + and fir_design='firwin'). For IIR filters, the cutoff frequency + is given by ``l_freq`` or ``h_freq`` directly, and + ``l_trans_bandwidth`` and ``h_trans_bandwidth`` are ignored. + + **Band-pass filter** + + The frequency response is (approximately) given by:: + + 1-| ---------- + | /| | \ + |H| | / | | \ + | / | | \ + | / | | \ + 0-|---------- | | -------------- + | | | | | | + 0 Fs1 Fp1 Fp2 Fs2 Nyq + + Where: + + * Fs1 = Fp1 - l_trans_bandwidth in Hz + * Fs2 = Fp2 + h_trans_bandwidth in Hz + + **Band-stop filter** + + The frequency response is (approximately) given by:: + + 1-|--------- ---------- + | \ / + |H| | \ / + | \ / + | \ / + 0-| ----------- + | | | | | | + 0 Fp1 Fs1 Fs2 Fp2 Nyq + + Where ``Fs1 = Fp1 + l_trans_bandwidth`` and + ``Fs2 = Fp2 - h_trans_bandwidth``. + + Multiple stop bands can be specified using arrays. + + **Low-pass filter** + + The frequency response is (approximately) given by:: + + 1-|------------------------ + | \ + |H| | \ + | \ + | \ + 0-| ---------------- + | | | | + 0 Fp Fstop Nyq + + Where ``Fstop = Fp + trans_bandwidth``. + + **High-pass filter** + + The frequency response is (approximately) given by:: + + 1-| ----------------------- + | / + |H| | / + | / + | / + 0-|--------- + | | | | + 0 Fstop Fp Nyq + + Where ``Fstop = Fp - trans_bandwidth``. + + .. versionadded:: 0.14 + """ + sfreq = float(sfreq) + if sfreq < 0: + raise ValueError('sfreq must be positive') + # If no data specified, sanity checking will be skipped + if data is None: + logger.info('No data specified. Sanity checks related to the length of' + ' the signal relative to the filter order will be' + ' skipped.') + if h_freq is not None: + h_freq = np.array(h_freq, float).ravel() + if (h_freq > (sfreq / 2.)).any(): + raise ValueError('h_freq (%s) must be less than the Nyquist ' + 'frequency %s' % (h_freq, sfreq / 2.)) + if l_freq is not None: + l_freq = np.array(l_freq, float).ravel() + if (l_freq == 0).all(): + l_freq = None + iir_params, method = _check_method(method, iir_params) + if l_freq is None and h_freq is None: + data, sfreq, _, _, _, _, filter_length, phase, fir_window, \ + fir_design = _triage_filter_params( + data, sfreq, None, None, None, None, + filter_length, method, phase, fir_window, fir_design) + if method == 'iir': + out = dict() if iir_params is None else deepcopy(iir_params) + out.update(b=np.array([1.]), a=np.array([1.])) + else: + freq = [0, sfreq / 2.] + gain = [1., 1.] + if l_freq is None and h_freq is not None: + logger.info('Setting up low-pass filter at %0.2g Hz' % (h_freq,)) + data, sfreq, _, f_p, _, f_s, filter_length, phase, fir_window, \ + fir_design = _triage_filter_params( + data, sfreq, None, h_freq, None, h_trans_bandwidth, + filter_length, method, phase, fir_window, fir_design) + if method == 'iir': + out = construct_iir_filter(iir_params, f_p, f_s, sfreq, 'lowpass') + else: # 'fir' + freq = [0, f_p, f_s] + gain = [1, 1, 0] + if f_s != sfreq / 2.: + freq += [sfreq / 2.] + gain += [0] + elif l_freq is not None and h_freq is None: + logger.info('Setting up high-pass filter at %0.2g Hz' % (l_freq,)) + data, sfreq, pass_, _, stop, _, filter_length, phase, fir_window, \ + fir_design = _triage_filter_params( + data, sfreq, l_freq, None, l_trans_bandwidth, None, + filter_length, method, phase, fir_window, fir_design) + if method == 'iir': + out = construct_iir_filter(iir_params, pass_, stop, sfreq, + 'highpass') + else: # 'fir' + freq = [stop, pass_, sfreq / 2.] + gain = [0, 1, 1] + if stop != 0: + freq = [0] + freq + gain = [0] + gain + elif l_freq is not None and h_freq is not None: + if (l_freq < h_freq).any(): + logger.info('Setting up band-pass filter from %0.2g - %0.2g Hz' + % (l_freq, h_freq)) + data, sfreq, f_p1, f_p2, f_s1, f_s2, filter_length, phase, \ + fir_window, fir_design = _triage_filter_params( + data, sfreq, l_freq, h_freq, l_trans_bandwidth, + h_trans_bandwidth, filter_length, method, phase, + fir_window, fir_design) + if method == 'iir': + out = construct_iir_filter(iir_params, [f_p1, f_p2], + [f_s1, f_s2], sfreq, 'bandpass') + else: # 'fir' + freq = [f_s1, f_p1, f_p2, f_s2] + gain = [0, 1, 1, 0] + if f_s2 != sfreq / 2.: + freq += [sfreq / 2.] + gain += [0] + if f_s1 != 0: + freq = [0] + freq + gain = [0] + gain + else: + # This could possibly be removed after 0.14 release, but might + # as well leave it in to sanity check notch_filter + if len(l_freq) != len(h_freq): + raise ValueError('l_freq and h_freq must be the same length') + msg = 'Setting up band-stop filter' + if len(l_freq) == 1: + msg += ' from %0.2g - %0.2g Hz' % (h_freq, l_freq) + logger.info(msg) + # Note: order of outputs is intentionally switched here! + data, sfreq, f_s1, f_s2, f_p1, f_p2, filter_length, phase, \ + fir_window, fir_design = _triage_filter_params( + data, sfreq, h_freq, l_freq, h_trans_bandwidth, + l_trans_bandwidth, filter_length, method, phase, + fir_window, fir_design, bands='arr', reverse=True) + if method == 'iir': + if len(f_p1) != 1: + raise ValueError('Multiple stop-bands can only be used ' + 'with FIR filtering') + out = construct_iir_filter(iir_params, [f_p1[0], f_p2[0]], + [f_s1[0], f_s2[0]], sfreq, + 'bandstop') + else: # 'fir' + freq = np.r_[f_p1, f_s1, f_s2, f_p2] + gain = np.r_[np.ones_like(f_p1), np.zeros_like(f_s1), + np.zeros_like(f_s2), np.ones_like(f_p2)] + order = np.argsort(freq) + freq = freq[order] + gain = gain[order] + if freq[0] != 0: + freq = np.r_[[0.], freq] + gain = np.r_[[1.], gain] + if freq[-1] != sfreq / 2.: + freq = np.r_[freq, [sfreq / 2.]] + gain = np.r_[gain, [1.]] + if np.any(np.abs(np.diff(gain, 2)) > 1): + raise ValueError('Stop bands are not sufficiently ' + 'separated.') + if method == 'fir': + out = _construct_fir_filter(sfreq, freq, gain, filter_length, phase, + fir_window, fir_design) + return out + + +@verbose +def notch_filter(x, Fs, freqs, filter_length='auto', notch_widths=None, + trans_bandwidth=1, method='fir', iir_params=None, + mt_bandwidth=None, p_value=0.05, picks=None, n_jobs=1, + copy=True, phase='zero', fir_window='hamming', + fir_design='firwin', pad='reflect_limited', + verbose=None): + r"""Notch filter for the signal x. + + Applies a zero-phase notch filter to the signal x, operating on the last + dimension. + + Parameters + ---------- + x : array + Signal to filter. + Fs : float + Sampling rate in Hz. + freqs : float | array of float | None + Frequencies to notch filter in Hz, e.g. np.arange(60, 241, 60). + None can only be used with the mode 'spectrum_fit', where an F + test is used to find sinusoidal components. + %(filter_length_notch)s + notch_widths : float | array of float | None + Width of the stop band (centred at each freq in freqs) in Hz. + If None, freqs / 200 is used. + trans_bandwidth : float + Width of the transition band in Hz. + Only used for ``method='fir'``. + %(method_fir)s + 'spectrum_fit' will use multi-taper estimation of sinusoidal + components. If freqs=None and method='spectrum_fit', significant + sinusoidal components are detected using an F test, and noted by + logging. + %(iir_params)s + mt_bandwidth : float | None + The bandwidth of the multitaper windowing function in Hz. + Only used in 'spectrum_fit' mode. + p_value : float + P-value to use in F-test thresholding to determine significant + sinusoidal components to remove when method='spectrum_fit' and + freqs=None. Note that this will be Bonferroni corrected for the + number of frequencies, so large p-values may be justified. + %(picks_nostr)s + Only supported for 2D (n_channels, n_times) and 3D + (n_epochs, n_channels, n_times) data. + %(n_jobs_fir)s + copy : bool + If True, a copy of x, filtered, is returned. Otherwise, it operates + on x in place. + %(phase)s + %(fir_window)s + %(fir_design)s + %(pad_fir)s + The default is ``'reflect_limited'``. + %(verbose)s + + Returns + ------- + xf : array + The x array filtered. + + See Also + -------- + filter_data + resample + + Notes + ----- + The frequency response is (approximately) given by:: + + 1-|---------- ----------- + | \ / + |H| | \ / + | \ / + | \ / + 0-| - + | | | | | + 0 Fp1 freq Fp2 Nyq + + For each freq in freqs, where ``Fp1 = freq - trans_bandwidth / 2`` and + ``Fs2 = freq + trans_bandwidth / 2``. + + References + ---------- + Multi-taper removal is inspired by code from the Chronux toolbox, see + www.chronux.org and the book "Observed Brain Dynamics" by Partha Mitra + & Hemant Bokil, Oxford University Press, New York, 2008. Please + cite this in publications if method 'spectrum_fit' is used. + """ + x = _check_filterable(x, 'notch filtered', 'notch_filter') + iir_params, method = _check_method(method, iir_params, ['spectrum_fit']) + + if freqs is not None: + freqs = np.atleast_1d(freqs) + elif method != 'spectrum_fit': + raise ValueError('freqs=None can only be used with method ' + 'spectrum_fit') + + # Only have to deal with notch_widths for non-autodetect + if freqs is not None: + if notch_widths is None: + notch_widths = freqs / 200.0 + elif np.any(notch_widths < 0): + raise ValueError('notch_widths must be >= 0') + else: + notch_widths = np.atleast_1d(notch_widths) + if len(notch_widths) == 1: + notch_widths = notch_widths[0] * np.ones_like(freqs) + elif len(notch_widths) != len(freqs): + raise ValueError('notch_widths must be None, scalar, or the ' + 'same length as freqs') + + if method in ('fir', 'iir'): + # Speed this up by computing the fourier coefficients once + tb_2 = trans_bandwidth / 2.0 + lows = [freq - nw / 2.0 - tb_2 + for freq, nw in zip(freqs, notch_widths)] + highs = [freq + nw / 2.0 + tb_2 + for freq, nw in zip(freqs, notch_widths)] + xf = filter_data(x, Fs, highs, lows, picks, filter_length, tb_2, tb_2, + n_jobs, method, iir_params, copy, phase, fir_window, + fir_design, pad=pad) + elif method == 'spectrum_fit': + xf = _mt_spectrum_proc(x, Fs, freqs, notch_widths, mt_bandwidth, + p_value, picks, n_jobs, copy, filter_length) + + return xf + + +def _get_window_thresh(n_times, sfreq, mt_bandwidth, p_value): + # max taper size chosen because it has an max error < 1e-3: + # >>> np.max(np.diff(dpss_windows(953, 4, 100)[0])) + # 0.00099972447657578449 + # so we use 1000 because it's the first "nice" number bigger than 953. + # but if we have a new enough scipy, + # it's only ~0.175 sec for 8 tapers even with 100000 samples + from scipy import stats + dpss_n_times_max = 100000 + + # figure out what tapers to use + window_fun, _, _ = _compute_mt_params( + n_times, sfreq, mt_bandwidth, False, False, + interp_from=min(n_times, dpss_n_times_max), verbose=False) + + # F-stat of 1-p point + threshold = stats.f.ppf(1 - p_value / n_times, 2, 2 * len(window_fun) - 2) + return window_fun, threshold + + +def _mt_spectrum_proc(x, sfreq, line_freqs, notch_widths, mt_bandwidth, + p_value, picks, n_jobs, copy, filter_length): + """Call _mt_spectrum_remove.""" + # set up array for filtering, reshape to 2D, operate on last axis + n_jobs = check_n_jobs(n_jobs) + x, orig_shape, picks = _prep_for_filtering(x, copy, picks) + if isinstance(filter_length, str) and filter_length == 'auto': + filter_length = '10s' + if filter_length is None: + filter_length = x.shape[-1] + filter_length = min(_to_samples(filter_length, sfreq, '', ''), x.shape[-1]) + get_wt = partial( + _get_window_thresh, sfreq=sfreq, mt_bandwidth=mt_bandwidth, + p_value=p_value) + window_fun, threshold = get_wt(filter_length) + if n_jobs == 1: + freq_list = list() + for ii, x_ in enumerate(x): + if ii in picks: + x[ii], f = _mt_spectrum_remove_win( + x_, sfreq, line_freqs, notch_widths, window_fun, threshold, + get_wt) + freq_list.append(f) + else: + parallel, p_fun, _ = parallel_func(_mt_spectrum_remove_win, n_jobs) + data_new = parallel(p_fun(x_, sfreq, line_freqs, notch_widths, + window_fun, threshold, get_wt) + for xi, x_ in enumerate(x) + if xi in picks) + freq_list = [d[1] for d in data_new] + data_new = np.array([d[0] for d in data_new]) + x[picks, :] = data_new + + # report found frequencies, but do some sanitizing first by binning into + # 1 Hz bins + counts = Counter(sum((np.unique(np.round(ff)).tolist() + for f in freq_list for ff in f), list())) + kind = 'Detected' if line_freqs is None else 'Removed' + found_freqs = '\n'.join(f' {freq:6.2f} : ' + f'{counts[freq]:4d} window{_pl(counts[freq])}' + for freq in sorted(counts)) or ' None' + logger.info(f'{kind} notch frequencies (Hz):\n{found_freqs}') + + x.shape = orig_shape + return x + + +def _mt_spectrum_remove_win(x, sfreq, line_freqs, notch_widths, + window_fun, threshold, get_thresh): + n_times = x.shape[-1] + n_samples = window_fun.shape[1] + n_overlap = (n_samples + 1) // 2 + x_out = np.zeros_like(x) + rm_freqs = list() + idx = [0] + + # Define how to process a chunk of data + def process(x_): + out = _mt_spectrum_remove( + x_, sfreq, line_freqs, notch_widths, window_fun, threshold, + get_thresh) + rm_freqs.append(out[1]) + return (out[0],) # must return a tuple + + # Define how to store a chunk of fully processed data (it's trivial) + def store(x_): + stop = idx[0] + x_.shape[-1] + x_out[..., idx[0]:stop] += x_ + idx[0] = stop + + _COLA(process, store, n_times, n_samples, n_overlap, sfreq, + verbose=False).feed(x) + assert idx[0] == n_times + return x_out, rm_freqs + + +def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths, + window_fun, threshold, get_thresh): + """Use MT-spectrum to remove line frequencies. + + Based on Chronux. If line_freqs is specified, all freqs within notch_width + of each line_freq is set to zero. + """ + assert x.ndim == 1 + if x.shape[-1] != window_fun.shape[-1]: + window_fun, threshold = get_thresh(x.shape[-1]) + # drop the even tapers + n_tapers = len(window_fun) + tapers_odd = np.arange(0, n_tapers, 2) + tapers_even = np.arange(1, n_tapers, 2) + tapers_use = window_fun[tapers_odd] + + # sum tapers for (used) odd prolates across time (n_tapers, 1) + H0 = np.sum(tapers_use, axis=1) + + # sum of squares across tapers (1, ) + H0_sq = sum_squared(H0) + + # make "time" vector + rads = 2 * np.pi * (np.arange(x.size) / float(sfreq)) + + # compute mt_spectrum (returning n_ch, n_tapers, n_freq) + x_p, freqs = _mt_spectra(x[np.newaxis, :], window_fun, sfreq) + + # sum of the product of x_p and H0 across tapers (1, n_freqs) + x_p_H0 = np.sum(x_p[:, tapers_odd, :] * + H0[np.newaxis, :, np.newaxis], axis=1) + + # resulting calculated amplitudes for all freqs + A = x_p_H0 / H0_sq + + if line_freqs is None: + # figure out which freqs to remove using F stat + + # estimated coefficient + x_hat = A * H0[:, np.newaxis] + + # numerator for F-statistic + num = (n_tapers - 1) * (A * A.conj()).real * H0_sq + # denominator for F-statistic + den = (np.sum(np.abs(x_p[:, tapers_odd, :] - x_hat) ** 2, 1) + + np.sum(np.abs(x_p[:, tapers_even, :]) ** 2, 1)) + den[den == 0] = np.inf + f_stat = num / den + + # find frequencies to remove + indices = np.where(f_stat > threshold)[1] + rm_freqs = freqs[indices] + else: + # specify frequencies + indices_1 = np.unique([np.argmin(np.abs(freqs - lf)) + for lf in line_freqs]) + indices_2 = [np.logical_and(freqs > lf - nw / 2., freqs < lf + nw / 2.) + for lf, nw in zip(line_freqs, notch_widths)] + indices_2 = np.where(np.any(np.array(indices_2), axis=0))[0] + indices = np.unique(np.r_[indices_1, indices_2]) + rm_freqs = freqs[indices] + + fits = list() + for ind in indices: + c = 2 * A[0, ind] + fit = np.abs(c) * np.cos(freqs[ind] * rads + np.angle(c)) + fits.append(fit) + + if len(fits) == 0: + datafit = 0.0 + else: + # fitted sinusoids are summed, and subtracted from data + datafit = np.sum(fits, axis=0) + + return x - datafit, rm_freqs + + +def _check_filterable(x, kind='filtered', alternative='filter'): + # Let's be fairly strict about this -- users can easily coerce to ndarray + # at their end, and we already should do it internally any time we are + # using these low-level functions. At the same time, let's + # help people who might accidentally use low-level functions that they + # shouldn't use by pushing them in the right direction + from .io.base import BaseRaw + from .epochs import BaseEpochs + from .evoked import Evoked + if isinstance(x, (BaseRaw, BaseEpochs, Evoked)): + try: + name = x.__class__.__name__ + except Exception: + pass + else: + raise TypeError( + 'This low-level function only operates on np.ndarray ' + f'instances. To get a {kind} {name} instance, use a method ' + f'like `inst_new = inst.copy().{alternative}(...)` ' + 'instead.') + _validate_type(x, (np.ndarray, list, tuple), f'Data to be {kind}') + x = np.asanyarray(x) + if x.dtype != np.float64: + raise ValueError('Data to be %s must be real floating, got %s' + % (kind, x.dtype,)) + return x + + +def _resamp_ratio_len(up, down, n): + ratio = float(up) / down + return ratio, max(int(round(ratio * n)), 1) + + +@verbose +def resample(x, up=1., down=1., npad=100, axis=-1, window='boxcar', n_jobs=1, + pad='reflect_limited', verbose=None): + """Resample an array. + + Operates along the last dimension of the array. + + Parameters + ---------- + x : ndarray + Signal to resample. + up : float + Factor to upsample by. + down : float + Factor to downsample by. + %(npad)s + axis : int + Axis along which to resample (default is the last axis). + %(window_resample)s + %(n_jobs_cuda)s + %(pad)s + The default is ``'reflect_limited'``. + + .. versionadded:: 0.15 + %(verbose)s + + Returns + ------- + y : array + The x array resampled. + + Notes + ----- + This uses (hopefully) intelligent edge padding and frequency-domain + windowing improve scipy.signal.resample's resampling method, which + we have adapted for our use here. Choices of npad and window have + important consequences, and the default choices should work well + for most natural signals. + + Resampling arguments are broken into "up" and "down" components for future + compatibility in case we decide to use an upfirdn implementation. The + current implementation is functionally equivalent to passing + up=up/down and down=1. + """ + from scipy.signal import get_window + from scipy.fft import ifftshift, fftfreq + # check explicitly for backwards compatibility + if not isinstance(axis, int): + err = ("The axis parameter needs to be an integer (got %s). " + "The axis parameter was missing from this function for a " + "period of time, you might be intending to specify the " + "subsequent window parameter." % repr(axis)) + raise TypeError(err) + + # make sure our arithmetic will work + x = _check_filterable(x, 'resampled', 'resample') + ratio, final_len = _resamp_ratio_len(up, down, x.shape[axis]) + del up, down + if axis < 0: + axis = x.ndim + axis + orig_last_axis = x.ndim - 1 + if axis != orig_last_axis: + x = x.swapaxes(axis, orig_last_axis) + orig_shape = x.shape + x_len = orig_shape[-1] + if x_len == 0: + warn('x has zero length along last axis, returning a copy of x') + return x.copy() + bad_msg = 'npad must be "auto" or an integer' + if isinstance(npad, str): + if npad != 'auto': + raise ValueError(bad_msg) + # Figure out reasonable pad that gets us to a power of 2 + min_add = min(x_len // 8, 100) * 2 + npad = 2 ** int(np.ceil(np.log2(x_len + min_add))) - x_len + npad, extra = divmod(npad, 2) + npads = np.array([npad, npad + extra], int) + else: + if npad != int(npad): + raise ValueError(bad_msg) + npads = np.array([npad, npad], int) + del npad + + # prep for resampling now + x_flat = x.reshape((-1, x_len)) + orig_len = x_len + npads.sum() # length after padding + new_len = max(int(round(ratio * orig_len)), 1) # length after resampling + to_removes = [int(round(ratio * npads[0]))] + to_removes.append(new_len - final_len - to_removes[0]) + to_removes = np.array(to_removes) + # This should hold: + # assert np.abs(to_removes[1] - to_removes[0]) <= int(np.ceil(ratio)) + + # figure out windowing function + if window is not None: + if callable(window): + W = window(fftfreq(orig_len)) + elif isinstance(window, np.ndarray) and \ + window.shape == (orig_len,): + W = window + else: + W = ifftshift(get_window(window, orig_len)) + else: + W = np.ones(orig_len) + W *= (float(new_len) / float(orig_len)) + + # figure out if we should use CUDA + n_jobs, cuda_dict = _setup_cuda_fft_resample(n_jobs, W, new_len) + + # do the resampling using an adaptation of scipy's FFT-based resample() + # use of the 'flat' window is recommended for minimal ringing + if n_jobs == 1: + y = np.zeros((len(x_flat), new_len - to_removes.sum()), dtype=x.dtype) + for xi, x_ in enumerate(x_flat): + y[xi] = _fft_resample(x_, new_len, npads, to_removes, + cuda_dict, pad) + else: + parallel, p_fun, _ = parallel_func(_fft_resample, n_jobs) + y = parallel(p_fun(x_, new_len, npads, to_removes, cuda_dict, pad) + for x_ in x_flat) + y = np.array(y) + + # Restore the original array shape (modified for resampling) + y.shape = orig_shape[:-1] + (y.shape[1],) + if axis != orig_last_axis: + y = y.swapaxes(axis, orig_last_axis) + assert y.shape[axis] == final_len + + return y + + +def _resample_stim_channels(stim_data, up, down): + """Resample stim channels, carefully. + + Parameters + ---------- + stim_data : array, shape (n_samples,) or (n_stim_channels, n_samples) + Stim channels to resample. + up : float + Factor to upsample by. + down : float + Factor to downsample by. + + Returns + ------- + stim_resampled : array, shape (n_stim_channels, n_samples_resampled) + The resampled stim channels. + + Note + ---- + The approach taken here is equivalent to the approach in the C-code. + See the decimate_stimch function in MNE/mne_browse_raw/save.c + """ + stim_data = np.atleast_2d(stim_data) + n_stim_channels, n_samples = stim_data.shape + + ratio = float(up) / down + resampled_n_samples = int(round(n_samples * ratio)) + + stim_resampled = np.zeros((n_stim_channels, resampled_n_samples)) + + # Figure out which points in old data to subsample protect against + # out-of-bounds, which can happen (having one sample more than + # expected) due to padding + sample_picks = np.minimum( + (np.arange(resampled_n_samples) / ratio).astype(int), + n_samples - 1 + ) + + # Create windows starting from sample_picks[i], ending at sample_picks[i+1] + windows = zip(sample_picks, np.r_[sample_picks[1:], n_samples]) + + # Use the first non-zero value in each window + for window_i, window in enumerate(windows): + for stim_num, stim in enumerate(stim_data): + nonzero = stim[window[0]:window[1]].nonzero()[0] + if len(nonzero) > 0: + val = stim[window[0] + nonzero[0]] + else: + val = stim[window[0]] + stim_resampled[stim_num, window_i] = val + + return stim_resampled + + +def detrend(x, order=1, axis=-1): + """Detrend the array x. + + Parameters + ---------- + x : n-d array + Signal to detrend. + order : int + Fit order. Currently must be '0' or '1'. + axis : int + Axis of the array to operate on. + + Returns + ------- + y : array + The x array detrended. + + Examples + -------- + As in :func:`scipy.signal.detrend`:: + + >>> randgen = np.random.RandomState(9) + >>> npoints = int(1e3) + >>> noise = randgen.randn(npoints) + >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise + >>> (detrend(x) - noise).max() < 0.01 + True + """ + from scipy.signal import detrend + if axis > len(x.shape): + raise ValueError('x does not have %d axes' % axis) + if order == 0: + fit = 'constant' + elif order == 1: + fit = 'linear' + else: + raise ValueError('order must be 0 or 1') + + y = detrend(x, axis=axis, type=fit) + + return y + + +# Taken from Ifeachor and Jervis p. 356. +# Note that here the passband ripple and stopband attenuation are +# rendundant. The scalar passband ripple δp is expressed in dB as +# 20 * log10(1+δp), but the scalar stopband ripple δs is expressed in dB as +# -20 * log10(δs). So if we know that our stopband attenuation is 53 dB +# (Hamming) then δs = 10 ** (53 / -20.), which means that the passband +# deviation should be 20 * np.log10(1 + 10 ** (53 / -20.)) == 0.0194. +_fir_window_dict = { + 'hann': dict(name='Hann', ripple=0.0546, attenuation=44), + 'hamming': dict(name='Hamming', ripple=0.0194, attenuation=53), + 'blackman': dict(name='Blackman', ripple=0.0017, attenuation=74), +} +_known_fir_windows = tuple(sorted(_fir_window_dict.keys())) +_known_phases = ('linear', 'zero', 'zero-double', 'minimum') +_known_fir_designs = ('firwin', 'firwin2') +_fir_design_dict = { + 'firwin': 'Windowed time-domain', + 'firwin2': 'Windowed frequency-domain', +} + + +def _to_samples(filter_length, sfreq, phase, fir_design): + _validate_type(filter_length, (str, 'int-like'), 'filter_length') + if isinstance(filter_length, str): + filter_length = filter_length.lower() + err_msg = ('filter_length, if a string, must be a ' + 'human-readable time, e.g. "10s", or "auto", not ' + '"%s"' % filter_length) + if filter_length.lower().endswith('ms'): + mult_fact = 1e-3 + filter_length = filter_length[:-2] + elif filter_length[-1].lower() == 's': + mult_fact = 1 + filter_length = filter_length[:-1] + else: + raise ValueError(err_msg) + # now get the number + try: + filter_length = float(filter_length) + except ValueError: + raise ValueError(err_msg) + filter_length = max(int(np.ceil(filter_length * mult_fact * + sfreq)), 1) + if fir_design == 'firwin': + filter_length += (filter_length - 1) % 2 + filter_length = _ensure_int(filter_length, 'filter_length') + return filter_length + + +def _triage_filter_params(x, sfreq, l_freq, h_freq, + l_trans_bandwidth, h_trans_bandwidth, + filter_length, method, phase, fir_window, + fir_design, bands='scalar', reverse=False): + """Validate and automate filter parameter selection.""" + _validate_type(phase, 'str', 'phase') + _check_option('phase', phase, _known_phases) + _validate_type(fir_window, 'str', 'fir_window') + _check_option('fir_window', fir_window, _known_fir_windows) + _validate_type(fir_design, 'str', 'fir_design') + _check_option('fir_design', fir_design, _known_fir_designs) + + # Helpers for reporting + report_phase = 'non-linear phase' if phase == 'minimum' else 'zero-phase' + causality = 'causal' if phase == 'minimum' else 'non-causal' + if phase == 'zero-double': + report_pass = 'two-pass forward and reverse' + else: + report_pass = 'one-pass' + if l_freq is not None: + if h_freq is not None: + kind = 'bandstop' if reverse else 'bandpass' + else: + kind = 'highpass' + assert not reverse + elif h_freq is not None: + kind = 'lowpass' + assert not reverse + else: + kind = 'allpass' + + def float_array(c): + return np.array(c, float).ravel() + + if bands == 'arr': + cast = float_array + else: + cast = float + sfreq = float(sfreq) + if l_freq is not None: + l_freq = cast(l_freq) + if np.any(l_freq <= 0): + raise ValueError('highpass frequency %s must be greater than zero' + % (l_freq,)) + if h_freq is not None: + h_freq = cast(h_freq) + if np.any(h_freq >= sfreq / 2.): + raise ValueError('lowpass frequency %s must be less than Nyquist ' + '(%s)' % (h_freq, sfreq / 2.)) + + dB_cutoff = False # meaning, don't try to compute or report + if bands == 'scalar' or (len(h_freq) == 1 and len(l_freq) == 1): + if phase == 'zero': + dB_cutoff = '-6 dB' + elif phase == 'zero-double': + dB_cutoff = '-12 dB' + + # we go to the next power of two when in FIR and zero-double mode + if method == 'iir': + # Ignore these parameters, effectively + l_stop, h_stop = l_freq, h_freq + else: # method == 'fir' + l_stop = h_stop = None + logger.info('') + logger.info('FIR filter parameters') + logger.info('---------------------') + logger.info('Designing a %s, %s, %s %s filter:' + % (report_pass, report_phase, causality, kind)) + logger.info('- %s design (%s) method' + % (_fir_design_dict[fir_design], fir_design)) + this_dict = _fir_window_dict[fir_window] + if fir_design == 'firwin': + logger.info('- {name:s} window with {ripple:0.4f} passband ripple ' + 'and {attenuation:d} dB stopband attenuation' + .format(**this_dict)) + else: + logger.info('- {name:s} window'.format(**this_dict)) + + if l_freq is not None: # high-pass component + if isinstance(l_trans_bandwidth, str): + if l_trans_bandwidth != 'auto': + raise ValueError('l_trans_bandwidth must be "auto" if ' + 'string, got "%s"' % l_trans_bandwidth) + l_trans_bandwidth = np.minimum(np.maximum(0.25 * l_freq, 2.), + l_freq) + msg = ('- Lower transition bandwidth: %0.2f Hz' + % (l_trans_bandwidth)) + if dB_cutoff: + logger.info('- Lower passband edge: %0.2f' % (l_freq,)) + msg += ' (%s cutoff frequency: %0.2f Hz)' % ( + dB_cutoff, l_freq - l_trans_bandwidth / 2.) + logger.info(msg) + l_trans_bandwidth = cast(l_trans_bandwidth) + if np.any(l_trans_bandwidth <= 0): + raise ValueError('l_trans_bandwidth must be positive, got %s' + % (l_trans_bandwidth,)) + l_stop = l_freq - l_trans_bandwidth + if reverse: # band-stop style + l_stop += l_trans_bandwidth + l_freq += l_trans_bandwidth + if np.any(l_stop < 0): + raise ValueError('Filter specification invalid: Lower stop ' + 'frequency negative (%0.2f Hz). Increase pass' + ' frequency or reduce the transition ' + 'bandwidth (l_trans_bandwidth)' % l_stop) + if h_freq is not None: # low-pass component + if isinstance(h_trans_bandwidth, str): + if h_trans_bandwidth != 'auto': + raise ValueError('h_trans_bandwidth must be "auto" if ' + 'string, got "%s"' % h_trans_bandwidth) + h_trans_bandwidth = np.minimum(np.maximum(0.25 * h_freq, 2.), + sfreq / 2. - h_freq) + msg = ('- Upper transition bandwidth: %0.2f Hz' + % (h_trans_bandwidth)) + if dB_cutoff: + logger.info('- Upper passband edge: %0.2f Hz' % (h_freq,)) + msg += ' (%s cutoff frequency: %0.2f Hz)' % ( + dB_cutoff, h_freq + h_trans_bandwidth / 2.) + logger.info(msg) + h_trans_bandwidth = cast(h_trans_bandwidth) + if np.any(h_trans_bandwidth <= 0): + raise ValueError('h_trans_bandwidth must be positive, got %s' + % (h_trans_bandwidth,)) + h_stop = h_freq + h_trans_bandwidth + if reverse: # band-stop style + h_stop -= h_trans_bandwidth + h_freq -= h_trans_bandwidth + if np.any(h_stop > sfreq / 2): + raise ValueError('Effective band-stop frequency (%s) is too ' + 'high (maximum based on Nyquist is %s)' + % (h_stop, sfreq / 2.)) + + if isinstance(filter_length, str) and filter_length.lower() == 'auto': + filter_length = filter_length.lower() + h_check = h_trans_bandwidth if h_freq is not None else np.inf + l_check = l_trans_bandwidth if l_freq is not None else np.inf + mult_fact = 2. if fir_design == 'firwin2' else 1. + filter_length = '%ss' % (_length_factors[fir_window] * mult_fact / + float(min(h_check, l_check)),) + next_pow_2 = False # disable old behavior + else: + next_pow_2 = ( + isinstance(filter_length, str) and phase == 'zero-double') + + filter_length = _to_samples(filter_length, sfreq, phase, fir_design) + + # use correct type of filter (must be odd length for firwin and for + # zero phase) + if fir_design == 'firwin' or phase == 'zero': + filter_length += (filter_length - 1) % 2 + + logger.info('- Filter length: %s samples (%0.3f sec)' + % (filter_length, filter_length / sfreq)) + logger.info('') + + if filter_length <= 0: + raise ValueError('filter_length must be positive, got %s' + % (filter_length,)) + + if next_pow_2: + filter_length = 2 ** int(np.ceil(np.log2(filter_length))) + if fir_design == 'firwin': + filter_length += (filter_length - 1) % 2 + + # If we have data supplied, do a sanity check + if x is not None: + x = _check_filterable(x) + len_x = x.shape[-1] + if method != 'fir': + filter_length = len_x + if filter_length > len_x and not (l_freq is None and h_freq is None): + warn('filter_length (%s) is longer than the signal (%s), ' + 'distortion is likely. Reduce filter length or filter a ' + 'longer signal.' % (filter_length, len_x)) + + logger.debug('Using filter length: %s' % filter_length) + return (x, sfreq, l_freq, h_freq, l_stop, h_stop, filter_length, phase, + fir_window, fir_design) + + +class FilterMixin(object): + """Object for Epoch/Evoked filtering.""" + + @verbose + def savgol_filter(self, h_freq, verbose=None): + """Filter the data using Savitzky-Golay polynomial method. + + Parameters + ---------- + h_freq : float + Approximate high cut-off frequency in Hz. Note that this + is not an exact cutoff, since Savitzky-Golay filtering + :footcite:`SavitzkyGolay1964` is done using polynomial fits + instead of FIR/IIR filtering. This parameter is thus used to + determine the length of the window over which a 5th-order + polynomial smoothing is used. + %(verbose)s + + Returns + ------- + inst : instance of Epochs or Evoked + The object with the filtering applied. + + See Also + -------- + mne.io.Raw.filter + + Notes + ----- + For Savitzky-Golay low-pass approximation, see: + + https://gist.github.com/larsoner/bbac101d50176611136b + + .. versionadded:: 0.9.0 + + References + ---------- + .. footbibliography:: + + Examples + -------- + >>> import mne + >>> from os import path as op + >>> evoked_fname = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample', 'sample_audvis-ave.fif') # doctest:+SKIP + >>> evoked = mne.read_evokeds(evoked_fname, baseline=(None, 0))[0] # doctest:+SKIP + >>> evoked.savgol_filter(10.) # low-pass at around 10 Hz # doctest:+SKIP + >>> evoked.plot() # doctest:+SKIP + """ # noqa: E501 + from scipy.signal import savgol_filter + _check_preload(self, 'inst.savgol_filter') + h_freq = float(h_freq) + if h_freq >= self.info['sfreq'] / 2.: + raise ValueError('h_freq must be less than half the sample rate') + + # savitzky-golay filtering + window_length = (int(np.round(self.info['sfreq'] / + h_freq)) // 2) * 2 + 1 + logger.info('Using savgol length %d' % window_length) + self._data[:] = savgol_filter(self._data, axis=-1, polyorder=5, + window_length=window_length) + return self + + @verbose + def filter(self, l_freq, h_freq, picks=None, filter_length='auto', + l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=1, + method='fir', iir_params=None, phase='zero', + fir_window='hamming', fir_design='firwin', + skip_by_annotation=('edge', 'bad_acq_skip'), pad='edge', + verbose=None): + """Filter a subset of channels. + + Parameters + ---------- + %(l_freq)s + %(h_freq)s + %(picks_all_data)s + %(filter_length)s + %(l_trans_bandwidth)s + %(h_trans_bandwidth)s + %(n_jobs_fir)s + %(method_fir)s + %(iir_params)s + %(phase)s + %(fir_window)s + %(fir_design)s + skip_by_annotation : str | list of str + If a string (or list of str), any annotation segment that begins + with the given string will not be included in filtering, and + segments on either side of the given excluded annotated segment + will be filtered separately (i.e., as independent signals). + The default (``('edge', 'bad_acq_skip')`` will separately filter + any segments that were concatenated by :func:`mne.concatenate_raws` + or :meth:`mne.io.Raw.append`, or separated during acquisition. + To disable, provide an empty list. Only used if ``inst`` is raw. + + .. versionadded:: 0.16. + %(pad_fir)s + %(verbose)s + + Returns + ------- + inst : instance of Epochs, Evoked, or Raw + The filtered data. + + See Also + -------- + mne.filter.create_filter + mne.Evoked.savgol_filter + mne.io.Raw.notch_filter + mne.io.Raw.resample + mne.filter.create_filter + mne.filter.filter_data + mne.filter.construct_iir_filter + + Notes + ----- + Applies a zero-phase low-pass, high-pass, band-pass, or band-stop + filter to the channels selected by ``picks``. + The data are modified inplace. + + The object has to have the data loaded e.g. with ``preload=True`` + or ``self.load_data()``. + + ``l_freq`` and ``h_freq`` are the frequencies below which and above + which, respectively, to filter out of the data. Thus the uses are: + + * ``l_freq < h_freq``: band-pass filter + * ``l_freq > h_freq``: band-stop filter + * ``l_freq is not None and h_freq is None``: high-pass filter + * ``l_freq is None and h_freq is not None``: low-pass filter + + ``self.info['lowpass']`` and ``self.info['highpass']`` are only + updated with picks=None. + + .. note:: If n_jobs > 1, more memory is required as + ``len(picks) * n_times`` additional time points need to + be temporaily stored in memory. + + For more information, see the tutorials + :ref:`disc-filtering` and :ref:`tut-filter-resample` and + :func:`mne.filter.create_filter`. + + .. versionadded:: 0.15 + """ + from .io.base import BaseRaw + _check_preload(self, 'inst.filter') + if pad is None and method != 'iir': + pad = 'edge' + update_info, picks = _filt_check_picks(self.info, picks, + l_freq, h_freq) + if isinstance(self, BaseRaw): + # Deal with annotations + onsets, ends = _annotations_starts_stops( + self, skip_by_annotation, invert=True) + logger.info('Filtering raw data in %d contiguous segment%s' + % (len(onsets), _pl(onsets))) + else: + onsets, ends = np.array([0]), np.array([self._data.shape[1]]) + max_idx = (ends - onsets).argmax() + for si, (start, stop) in enumerate(zip(onsets, ends)): + # Only output filter params once (for info level), and only warn + # once about the length criterion (longest segment is too short) + use_verbose = verbose if si == max_idx else 'error' + filter_data( + self._data[:, start:stop], self.info['sfreq'], l_freq, h_freq, + picks, filter_length, l_trans_bandwidth, h_trans_bandwidth, + n_jobs, method, iir_params, copy=False, phase=phase, + fir_window=fir_window, fir_design=fir_design, pad=pad, + verbose=use_verbose) + # update info if filter is applied to all data channels, + # and it's not a band-stop filter + _filt_update_info(self.info, update_info, l_freq, h_freq) + return self + + @verbose + def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1, + pad='edge', verbose=None): # lgtm + """Resample data. + + If appropriate, an anti-aliasing filter is applied before resampling. + See :ref:`resampling-and-decimating` for more information. + + .. note:: Data must be loaded. + + Parameters + ---------- + sfreq : float + New sample rate to use. + %(npad)s + %(window_resample)s + %(n_jobs_cuda)s + %(pad)s + The default is ``'edge'``, which pads with the edge values of each + vector. + + .. versionadded:: 0.15 + %(verbose)s + + Returns + ------- + inst : instance of Epochs or Evoked + The resampled object. + + See Also + -------- + mne.io.Raw.resample + + Notes + ----- + For some data, it may be more accurate to use npad=0 to reduce + artifacts. This is dataset dependent -- check your data! + """ + from .epochs import BaseEpochs + from .evoked import Evoked + # Should be guaranteed by our inheritance, and the fact that + # mne.io.base.BaseRaw overrides this method + assert isinstance(self, (BaseEpochs, Evoked)) + + _check_preload(self, 'inst.resample') + + sfreq = float(sfreq) + o_sfreq = self.info['sfreq'] + self._data = resample(self._data, sfreq, o_sfreq, npad, window=window, + n_jobs=n_jobs, pad=pad) + lowpass = self.info.get('lowpass') + lowpass = np.inf if lowpass is None else lowpass + with self.info._unlock(): + self.info['lowpass'] = min(lowpass, sfreq / 2.) + self.info['sfreq'] = float(sfreq) + new_times = (np.arange(self._data.shape[-1], dtype=np.float64) / + sfreq + self.times[0]) + # adjust indirectly affected variables + if isinstance(self, BaseEpochs): + self._set_times(new_times) + self._raw_times = self.times + else: # isinstance(self, Evoked) + self.times = new_times + self._update_first_last() + return self + + @verbose + def apply_hilbert(self, picks=None, envelope=False, n_jobs=1, n_fft='auto', + verbose=None): + """Compute analytic signal or envelope for a subset of channels. + + Parameters + ---------- + %(picks_all_data_noref)s + envelope : bool + Compute the envelope signal of each channel. Default False. + See Notes. + %(n_jobs)s + n_fft : int | None | str + Points to use in the FFT for Hilbert transformation. The signal + will be padded with zeros before computing Hilbert, then cut back + to original length. If None, n == self.n_times. If 'auto', + the next highest fast FFT length will be use. + %(verbose)s + + Returns + ------- + self : instance of Raw, Epochs, or Evoked + The raw object with transformed data. + + Notes + ----- + **Parameters** + + If ``envelope=False``, the analytic signal for the channels defined in + ``picks`` is computed and the data of the Raw object is converted to + a complex representation (the analytic signal is complex valued). + + If ``envelope=True``, the absolute value of the analytic signal for the + channels defined in ``picks`` is computed, resulting in the envelope + signal. + + .. warning: Do not use ``envelope=True`` if you intend to compute + an inverse solution from the raw data. If you want to + compute the envelope in source space, use + ``envelope=False`` and compute the envelope after the + inverse solution has been obtained. + + If envelope=False, more memory is required since the original raw data + as well as the analytic signal have temporarily to be stored in memory. + If n_jobs > 1, more memory is required as ``len(picks) * n_times`` + additional time points need to be temporaily stored in memory. + + Also note that the ``n_fft`` parameter will allow you to pad the signal + with zeros before performing the Hilbert transform. This padding + is cut off, but it may result in a slightly different result + (particularly around the edges). Use at your own risk. + + **Analytic signal** + + The analytic signal "x_a(t)" of "x(t)" is:: + + x_a = F^{-1}(F(x) 2U) = x + i y + + where "F" is the Fourier transform, "U" the unit step function, + and "y" the Hilbert transform of "x". One usage of the analytic + signal is the computation of the envelope signal, which is given by + "e(t) = abs(x_a(t))". Due to the linearity of Hilbert transform and the + MNE inverse solution, the enevlope in source space can be obtained + by computing the analytic signal in sensor space, applying the MNE + inverse, and computing the envelope in source space. + """ + _check_preload(self, 'inst.apply_hilbert') + if n_fft is None: + n_fft = len(self.times) + elif isinstance(n_fft, str): + if n_fft != 'auto': + raise ValueError('n_fft must be an integer, string, or None, ' + 'got %s' % (type(n_fft),)) + n_fft = next_fast_len(len(self.times)) + n_fft = int(n_fft) + if n_fft < len(self.times): + raise ValueError("n_fft (%d) must be at least the number of time " + "points (%d)" % (n_fft, len(self.times))) + dtype = None if envelope else np.complex128 + picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) + args, kwargs = (), dict(n_fft=n_fft, envelope=envelope) + + data_in = self._data + if dtype is not None and dtype != self._data.dtype: + self._data = self._data.astype(dtype) + + if n_jobs == 1: + # modify data inplace to save memory + for idx in picks: + self._data[..., idx, :] = _check_fun( + _my_hilbert, data_in[..., idx, :], *args, **kwargs) + else: + # use parallel function + parallel, p_fun, _ = parallel_func(_check_fun, n_jobs) + data_picks_new = parallel( + p_fun(_my_hilbert, data_in[..., p, :], *args, **kwargs) + for p in picks) + for pp, p in enumerate(picks): + self._data[..., p, :] = data_picks_new[pp] + return self + + +def _check_fun(fun, d, *args, **kwargs): + """Check shapes.""" + want_shape = d.shape + d = fun(d, *args, **kwargs) + if not isinstance(d, np.ndarray): + raise TypeError('Return value must be an ndarray') + if d.shape != want_shape: + raise ValueError('Return data must have shape %s not %s' + % (want_shape, d.shape)) + return d + + +def _my_hilbert(x, n_fft=None, envelope=False): + """Compute Hilbert transform of signals w/ zero padding. + + Parameters + ---------- + x : array, shape (n_times) + The signal to convert + n_fft : int + Size of the FFT to perform, must be at least ``len(x)``. + The signal will be cut back to original length. + envelope : bool + Whether to compute amplitude of the hilbert transform in order + to return the signal envelope. + + Returns + ------- + out : array, shape (n_times) + The hilbert transform of the signal, or the envelope. + """ + from scipy.signal import hilbert + n_x = x.shape[-1] + out = hilbert(x, N=n_fft, axis=-1)[..., :n_x] + if envelope: + out = np.abs(out) + return out + + +@verbose +def design_mne_c_filter(sfreq, l_freq=None, h_freq=40., + l_trans_bandwidth=None, h_trans_bandwidth=5., + verbose=None): + """Create a FIR filter like that used by MNE-C. + + Parameters + ---------- + sfreq : float + The sample frequency. + l_freq : float | None + The low filter frequency in Hz, default None. + Can be None to avoid high-passing. + h_freq : float + The high filter frequency in Hz, default 40. + Can be None to avoid low-passing. + l_trans_bandwidth : float | None + Low transition bandwidthin Hz. Can be None (default) to use 3 samples. + h_trans_bandwidth : float + High transition bandwidth in Hz. + %(verbose)s + + Returns + ------- + h : ndarray, shape (8193,) + The linear-phase (symmetric) FIR filter coefficients. + + Notes + ----- + This function is provided mostly for reference purposes. + + MNE-C uses a frequency-domain filter design technique by creating a + linear-phase filter of length 8193. In the frequency domain, the + 4197 frequencies are directly constructed, with zeroes in the stop-band + and ones in the passband, with squared cosine ramps in between. + """ + from scipy.fft import irfft + n_freqs = (4096 + 2 * 2048) // 2 + 1 + freq_resp = np.ones(n_freqs) + l_freq = 0 if l_freq is None else float(l_freq) + if l_trans_bandwidth is None: + l_width = 3 + else: + l_width = (int(((n_freqs - 1) * l_trans_bandwidth) / + (0.5 * sfreq)) + 1) // 2 + l_start = int(((n_freqs - 1) * l_freq) / (0.5 * sfreq)) + h_freq = sfreq / 2. if h_freq is None else float(h_freq) + h_width = (int(((n_freqs - 1) * h_trans_bandwidth) / + (0.5 * sfreq)) + 1) // 2 + h_start = int(((n_freqs - 1) * h_freq) / (0.5 * sfreq)) + logger.info('filter : %7.3f ... %6.1f Hz bins : %d ... %d of %d ' + 'hpw : %d lpw : %d' % (l_freq, h_freq, l_start, h_start, + n_freqs, l_width, h_width)) + if l_freq > 0: + start = l_start - l_width + 1 + stop = start + 2 * l_width - 1 + if start < 0 or stop >= n_freqs: + raise RuntimeError('l_freq too low or l_trans_bandwidth too large') + freq_resp[:start] = 0. + k = np.arange(-l_width + 1, l_width) / float(l_width) + 3. + freq_resp[start:stop] = np.cos(np.pi / 4. * k) ** 2 + + if h_freq < sfreq / 2.: + start = h_start - h_width + 1 + stop = start + 2 * h_width - 1 + if start < 0 or stop >= n_freqs: + raise RuntimeError('h_freq too high or h_trans_bandwidth too ' + 'large') + k = np.arange(-h_width + 1, h_width) / float(h_width) + 1. + freq_resp[start:stop] *= np.cos(np.pi / 4. * k) ** 2 + freq_resp[stop:] = 0.0 + # Get the time-domain version of this signal + h = irfft(freq_resp, n=2 * len(freq_resp) - 1) + h = np.roll(h, n_freqs - 1) # center the impulse like a linear-phase filt + return h + + +def _filt_check_picks(info, picks, h_freq, l_freq): + from .io.pick import _picks_to_idx + update_info = False + # This will pick *all* data channels + picks = _picks_to_idx(info, picks, 'data_or_ica', exclude=()) + if h_freq is not None or l_freq is not None: + data_picks = _picks_to_idx(info, None, 'data_or_ica', exclude=(), + allow_empty=True) + if len(data_picks) == 0: + logger.info('No data channels found. The highpass and ' + 'lowpass values in the measurement info will not ' + 'be updated.') + elif np.in1d(data_picks, picks).all(): + update_info = True + else: + logger.info('Filtering a subset of channels. The highpass and ' + 'lowpass values in the measurement info will not ' + 'be updated.') + return update_info, picks + + +def _filt_update_info(info, update_info, l_freq, h_freq): + if update_info: + if h_freq is not None and (l_freq is None or l_freq < h_freq) and \ + (info["lowpass"] is None or h_freq < info['lowpass']): + with info._unlock(): + info['lowpass'] = float(h_freq) + if l_freq is not None and (h_freq is None or l_freq < h_freq) and \ + (info["highpass"] is None or l_freq > info['highpass']): + with info._unlock(): + info['highpass'] = float(l_freq) diff --git a/python/libs/mne/fixes.py b/python/libs/mne/fixes.py new file mode 100644 index 0000000..0cb743f --- /dev/null +++ b/python/libs/mne/fixes.py @@ -0,0 +1,1084 @@ +"""Compatibility fixes for older versions of libraries + +If you add content to this file, please give the version of the package +at which the fix is no longer needed. + +# originally copied from scikit-learn + +""" +# Authors: Emmanuelle Gouillart +# Gael Varoquaux +# Fabian Pedregosa +# Lars Buitinck +# License: BSD + +import functools +import inspect +from math import log +import os +from pathlib import Path +import warnings + +import numpy as np + + +############################################################################### +# distutils + +# distutils has been deprecated since Python 3.10 and is scheduled for removal +# from the standard library with the release of Python 3.12. For version +# comparisons, we use setuptools's `parse_version` if available. + +def _compare_version(version_a, operator, version_b): + """Compare two version strings via a user-specified operator. + + Parameters + ---------- + version_a : str + First version string. + operator : '==' | '>' | '<' | '>=' | '<=' + Operator to compare ``version_a`` and ``version_b`` in the form of + ``version_a operator version_b``. + version_b : str + Second version string. + + Returns + ------- + bool + The result of the version comparison. + """ + from packaging.version import parse + with warnings.catch_warnings(record=True): + warnings.simplefilter('ignore') + return eval(f'parse("{version_a}") {operator} parse("{version_b}")') + + +############################################################################### +# Misc + +def _median_complex(data, axis): + """Compute marginal median on complex data safely. + + Can be removed when numpy introduces a fix. + See: https://github.com/scipy/scipy/pull/12676/. + """ + # np.median must be passed real arrays for the desired result + if np.iscomplexobj(data): + data = (np.median(np.real(data), axis=axis) + + 1j * np.median(np.imag(data), axis=axis)) + else: + data = np.median(data, axis=axis) + return data + + +# helpers to get function arguments +def _get_args(function, varargs=False): + params = inspect.signature(function).parameters + args = [key for key, param in params.items() + if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)] + if varargs: + varargs = [param.name for param in params.values() + if param.kind == param.VAR_POSITIONAL] + if len(varargs) == 0: + varargs = None + return args, varargs + else: + return args + + +def _safe_svd(A, **kwargs): + """Wrapper to get around the SVD did not converge error of death""" + # Intel has a bug with their GESVD driver: + # https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa: E501 + # For SciPy 0.18 and up, we can work around it by using + # lapack_driver='gesvd' instead. + from scipy import linalg + if kwargs.get('overwrite_a', False): + raise ValueError('Cannot set overwrite_a=True with this function') + try: + return linalg.svd(A, **kwargs) + except np.linalg.LinAlgError as exp: + from .utils import warn + warn('SVD error (%s), attempting to use GESVD instead of GESDD' + % (exp,)) + return linalg.svd(A, lapack_driver='gesvd', **kwargs) + + +def _csc_matrix_cast(x): + from scipy.sparse import csc_matrix + return csc_matrix(x) + + +############################################################################### +# Backporting nibabel's read_geometry + +def _get_read_geometry(): + """Get the geometry reading function.""" + try: + import nibabel as nib + has_nibabel = True + except ImportError: + has_nibabel = False + if has_nibabel: + from nibabel.freesurfer import read_geometry + else: + read_geometry = _read_geometry + return read_geometry + + +def _read_geometry(filepath, read_metadata=False, read_stamp=False): + """Backport from nibabel.""" + from .surface import _fread3, _fread3_many + volume_info = dict() + + TRIANGLE_MAGIC = 16777214 + QUAD_MAGIC = 16777215 + NEW_QUAD_MAGIC = 16777213 + with open(filepath, "rb") as fobj: + magic = _fread3(fobj) + if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file + nvert = _fread3(fobj) + nquad = _fread3(fobj) + (fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.) + coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float64) / div + coords = coords.reshape(-1, 3) + quads = _fread3_many(fobj, nquad * 4) + quads = quads.reshape(nquad, 4) + # + # Face splitting follows + # + faces = np.zeros((2 * nquad, 3), dtype=np.int64) + nface = 0 + for quad in quads: + if (quad[0] % 2) == 0: + faces[nface] = quad[0], quad[1], quad[3] + nface += 1 + faces[nface] = quad[2], quad[3], quad[1] + nface += 1 + else: + faces[nface] = quad[0], quad[1], quad[2] + nface += 1 + faces[nface] = quad[0], quad[2], quad[3] + nface += 1 + + elif magic == TRIANGLE_MAGIC: # Triangle file + create_stamp = fobj.readline().rstrip(b'\n').decode('utf-8') + fobj.readline() + vnum = np.fromfile(fobj, ">i4", 1)[0] + fnum = np.fromfile(fobj, ">i4", 1)[0] + coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3) + faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3) + + if read_metadata: + volume_info = _read_volume_info(fobj) + else: + raise ValueError("File does not appear to be a Freesurfer surface") + + coords = coords.astype(np.float64) + + ret = (coords, faces) + if read_metadata: + if len(volume_info) == 0: + warnings.warn('No volume information contained in the file') + ret += (volume_info,) + if read_stamp: + ret += (create_stamp,) + + return ret + + +############################################################################### +# NumPy Generator (NumPy 1.17) + + +def rng_uniform(rng): + """Get the unform/randint from the rng.""" + # prefer Generator.integers, fall back to RandomState.randint + return getattr(rng, 'integers', getattr(rng, 'randint', None)) + + +def _validate_sos(sos): + """Helper to validate a SOS input""" + sos = np.atleast_2d(sos) + if sos.ndim != 2: + raise ValueError('sos array must be 2D') + n_sections, m = sos.shape + if m != 6: + raise ValueError('sos array must be shape (n_sections, 6)') + if not (sos[:, 3] == 1).all(): + raise ValueError('sos[:, 3] should be all ones') + return sos, n_sections + + +############################################################################### +# Misc utilities + +# get_fdata() requires knowing the dtype ahead of time, so let's triage on our +# own instead +def _get_img_fdata(img): + data = np.asanyarray(img.dataobj) + dtype = np.complex128 if np.iscomplexobj(data) else np.float64 + return data.astype(dtype) + + +def _read_volume_info(fobj): + """An implementation of nibabel.freesurfer.io._read_volume_info, since old + versions of nibabel (<=2.1.0) don't have it. + """ + volume_info = dict() + head = np.fromfile(fobj, '>i4', 1) + if not np.array_equal(head, [20]): # Read two bytes more + head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)]) + if not np.array_equal(head, [2, 0, 20]): + warnings.warn("Unknown extension code.") + return volume_info + + volume_info['head'] = head + for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', + 'zras', 'cras']: + pair = fobj.readline().decode('utf-8').split('=') + if pair[0].strip() != key or len(pair) != 2: + raise IOError('Error parsing volume info.') + if key in ('valid', 'filename'): + volume_info[key] = pair[1].strip() + elif key == 'volume': + volume_info[key] = np.array(pair[1].split()).astype(int) + else: + volume_info[key] = np.array(pair[1].split()).astype(float) + # Ignore the rest + return volume_info + + +def _serialize_volume_info(volume_info): + """An implementation of nibabel.freesurfer.io._serialize_volume_info, since + old versions of nibabel (<=2.1.0) don't have it.""" + keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', + 'zras', 'cras'] + diff = set(volume_info.keys()).difference(keys) + if len(diff) > 0: + raise ValueError('Invalid volume info: %s.' % diff.pop()) + + strings = list() + for key in keys: + if key == 'head': + if not (np.array_equal(volume_info[key], [20]) or np.array_equal( + volume_info[key], [2, 0, 20])): + warnings.warn("Unknown extension code.") + strings.append(np.array(volume_info[key], dtype='>i4').tobytes()) + elif key in ('valid', 'filename'): + val = volume_info[key] + strings.append('{} = {}\n'.format(key, val).encode('utf-8')) + elif key == 'volume': + val = volume_info[key] + strings.append('{} = {} {} {}\n'.format( + key, val[0], val[1], val[2]).encode('utf-8')) + else: + val = volume_info[key] + strings.append('{} = {:0.10g} {:0.10g} {:0.10g}\n'.format( + key.ljust(6), val[0], val[1], val[2]).encode('utf-8')) + return b''.join(strings) + + +############################################################################## +# adapted from scikit-learn + + +def is_classifier(estimator): + """Returns True if the given estimator is (probably) a classifier. + + Parameters + ---------- + estimator : object + Estimator object to test. + + Returns + ------- + out : bool + True if estimator is a classifier and False otherwise. + """ + return getattr(estimator, "_estimator_type", None) == "classifier" + + +def is_regressor(estimator): + """Returns True if the given estimator is (probably) a regressor. + + Parameters + ---------- + estimator : object + Estimator object to test. + + Returns + ------- + out : bool + True if estimator is a regressor and False otherwise. + """ + return getattr(estimator, "_estimator_type", None) == "regressor" + + +_DEFAULT_TAGS = { + 'non_deterministic': False, + 'requires_positive_X': False, + 'requires_positive_y': False, + 'X_types': ['2darray'], + 'poor_score': False, + 'no_validation': False, + 'multioutput': False, + "allow_nan": False, + 'stateless': False, + 'multilabel': False, + '_skip_test': False, + '_xfail_checks': False, + 'multioutput_only': False, + 'binary_only': False, + 'requires_fit': True, + 'preserves_dtype': [np.float64], + 'requires_y': False, + 'pairwise': False, +} + + +class BaseEstimator(object): + """Base class for all estimators in scikit-learn. + + Notes + ----- + All estimators should specify all the parameters that can be set + at the class level in their ``__init__`` as explicit keyword + arguments (no ``*args`` or ``**kwargs``). + """ + + @classmethod + def _get_param_names(cls): + """Get parameter names for the estimator""" + # fetch the constructor or the original constructor before + # deprecation wrapping if any + init = getattr(cls.__init__, 'deprecated_original', cls.__init__) + if init is object.__init__: + # No explicit constructor to introspect + return [] + + # introspect the constructor arguments to find the model parameters + # to represent + init_signature = inspect.signature(init) + # Consider the constructor parameters excluding 'self' + parameters = [p for p in init_signature.parameters.values() + if p.name != 'self' and p.kind != p.VAR_KEYWORD] + for p in parameters: + if p.kind == p.VAR_POSITIONAL: + raise RuntimeError("scikit-learn estimators should always " + "specify their parameters in the signature" + " of their __init__ (no varargs)." + " %s with constructor %s doesn't " + " follow this convention." + % (cls, init_signature)) + # Extract and sort argument names excluding 'self' + return sorted([p.name for p in parameters]) + + def get_params(self, deep=True): + """Get parameters for this estimator. + + Parameters + ---------- + deep : bool, optional + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + out = dict() + for key in self._get_param_names(): + # We need deprecation warnings to always be on in order to + # catch deprecated param values. + # This is set in utils/__init__.py but it gets overwritten + # when running under python3 somehow. + warnings.simplefilter("always", DeprecationWarning) + try: + with warnings.catch_warnings(record=True) as w: + value = getattr(self, key, None) + if len(w) and w[0].category == DeprecationWarning: + # if the parameter is deprecated, don't show it + continue + finally: + warnings.filters.pop(0) + + # XXX: should we rather test if instance of estimator? + if deep and hasattr(value, 'get_params'): + deep_items = value.get_params().items() + out.update((key + '__' + k, val) for k, val in deep_items) + out[key] = value + return out + + def set_params(self, **params): + """Set the parameters of this estimator. + + The method works on simple estimators as well as on nested objects + (such as pipelines). The latter have parameters of the form + ``__`` so that it's possible to update each + component of a nested object. + + Parameters + ---------- + **params : dict + Parameters. + + Returns + ------- + inst : instance + The object. + """ + if not params: + # Simple optimisation to gain speed (inspect is slow) + return self + valid_params = self.get_params(deep=True) + for key, value in params.items(): + split = key.split('__', 1) + if len(split) > 1: + # nested objects case + name, sub_name = split + if name not in valid_params: + raise ValueError('Invalid parameter %s for estimator %s. ' + 'Check the list of available parameters ' + 'with `estimator.get_params().keys()`.' % + (name, self)) + sub_object = valid_params[name] + sub_object.set_params(**{sub_name: value}) + else: + # simple objects case + if key not in valid_params: + raise ValueError('Invalid parameter %s for estimator %s. ' + 'Check the list of available parameters ' + 'with `estimator.get_params().keys()`.' % + (key, self.__class__.__name__)) + setattr(self, key, value) + return self + + def __repr__(self): + from sklearn.base import _pprint + class_name = self.__class__.__name__ + return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False), + offset=len(class_name),),) + + # __getstate__ and __setstate__ are omitted because they only contain + # conditionals that are not satisfied by our objects (e.g., + # ``if type(self).__module__.startswith('sklearn.')``. + + def _more_tags(self): + return _DEFAULT_TAGS + + def _get_tags(self): + collected_tags = {} + for base_class in reversed(inspect.getmro(self.__class__)): + if hasattr(base_class, '_more_tags'): + # need the if because mixins might not have _more_tags + # but might do redundant work in estimators + # (i.e. calling more tags on BaseEstimator multiple times) + more_tags = base_class._more_tags(self) + collected_tags.update(more_tags) + return collected_tags + + +# newer sklearn deprecates importing from sklearn.metrics.scoring, +# but older sklearn does not expose check_scoring in sklearn.metrics. +def _get_check_scoring(): + try: + from sklearn.metrics import check_scoring # noqa + except ImportError: + from sklearn.metrics.scorer import check_scoring # noqa + return check_scoring + + +def _check_fit_params(X, fit_params, indices=None): + """Check and validate the parameters passed during `fit`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data array. + + fit_params : dict + Dictionary containing the parameters passed at fit. + + indices : array-like of shape (n_samples,), default=None + Indices to be selected if the parameter has the same size as + `X`. + + Returns + ------- + fit_params_validated : dict + Validated parameters. We ensure that the values support + indexing. + """ + try: + from sklearn.utils.validation import \ + _check_fit_params as _sklearn_check_fit_params + return _sklearn_check_fit_params(X, fit_params, indices) + except ImportError: + from sklearn.model_selection import _validation + + fit_params_validated = \ + {k: _validation._index_param_value(X, v, indices) + for k, v in fit_params.items()} + return fit_params_validated + + +############################################################################### +# Copied from sklearn to simplify code paths + +def empirical_covariance(X, assume_centered=False): + """Computes the Maximum likelihood covariance estimator + + + Parameters + ---------- + X : ndarray, shape (n_samples, n_features) + Data from which to compute the covariance estimate + + assume_centered : Boolean + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data are centered before computation. + + Returns + ------- + covariance : 2D ndarray, shape (n_features, n_features) + Empirical covariance (Maximum Likelihood Estimator). + + """ + X = np.asarray(X) + if X.ndim == 1: + X = np.reshape(X, (1, -1)) + + if X.shape[0] == 1: + warnings.warn("Only one sample available. " + "You may want to reshape your data array") + + if assume_centered: + covariance = np.dot(X.T, X) / X.shape[0] + else: + covariance = np.cov(X.T, bias=1) + + if covariance.ndim == 0: + covariance = np.array([[covariance]]) + return covariance + + +class EmpiricalCovariance(BaseEstimator): + """Maximum likelihood covariance estimator + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool + Specifies if the estimated precision is stored. + + assume_centered : bool + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False (default), data are centered before computation. + + Attributes + ---------- + covariance_ : 2D ndarray, shape (n_features, n_features) + Estimated covariance matrix + + precision_ : 2D ndarray, shape (n_features, n_features) + Estimated pseudo-inverse matrix. + (stored only if store_precision is True) + + """ + def __init__(self, store_precision=True, assume_centered=False): + self.store_precision = store_precision + self.assume_centered = assume_centered + + def _set_covariance(self, covariance): + """Saves the covariance and precision estimates + + Storage is done accordingly to `self.store_precision`. + Precision stored only if invertible. + + Parameters + ---------- + covariance : 2D ndarray, shape (n_features, n_features) + Estimated covariance matrix to be stored, and from which precision + is computed. + + """ + from scipy import linalg + # covariance = check_array(covariance) + # set covariance + self.covariance_ = covariance + # set precision + if self.store_precision: + self.precision_ = linalg.pinvh(covariance) + else: + self.precision_ = None + + def get_precision(self): + """Getter for the precision matrix. + + Returns + ------- + precision_ : array-like, + The precision matrix associated to the current covariance object. + + """ + from scipy import linalg + if self.store_precision: + precision = self.precision_ + else: + precision = linalg.pinvh(self.covariance_) + return precision + + def fit(self, X, y=None): + """Fit the Maximum Likelihood Estimator covariance model. + + Parameters + ---------- + X : array-like, shape = [n_samples, n_features] + Training data, where n_samples is the number of samples and + n_features is the number of features. + y : ndarray | None + Not used, present for API consistency. + + Returns + ------- + self : object + Returns self. + """ # noqa: E501 + # X = check_array(X) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + covariance = empirical_covariance( + X, assume_centered=self.assume_centered) + self._set_covariance(covariance) + + return self + + def score(self, X_test, y=None): + """Compute the log-likelihood of a Gaussian dataset. + + Uses ``self.covariance_`` as an estimator of its covariance matrix. + + Parameters + ---------- + X_test : array-like, shape = [n_samples, n_features] + Test data of which we compute the likelihood, where n_samples is + the number of samples and n_features is the number of features. + X_test is assumed to be drawn from the same distribution than + the data used in fit (including centering). + y : ndarray | None + Not used, present for API consistency. + + Returns + ------- + res : float + The likelihood of the data set with `self.covariance_` as an + estimator of its covariance matrix. + """ + # compute empirical covariance of the test set + test_cov = empirical_covariance( + X_test - self.location_, assume_centered=True) + # compute log likelihood + res = log_likelihood(test_cov, self.get_precision()) + + return res + + def error_norm(self, comp_cov, norm='frobenius', scaling=True, + squared=True): + """Computes the Mean Squared Error between two covariance estimators. + + Parameters + ---------- + comp_cov : array-like, shape = [n_features, n_features] + The covariance to compare with. + norm : str + The type of norm used to compute the error. Available error types: + - 'frobenius' (default): sqrt(tr(A^t.A)) + - 'spectral': sqrt(max(eigenvalues(A^t.A)) + where A is the error ``(comp_cov - self.covariance_)``. + scaling : bool + If True (default), the squared error norm is divided by n_features. + If False, the squared error norm is not rescaled. + squared : bool + Whether to compute the squared error norm or the error norm. + If True (default), the squared error norm is returned. + If False, the error norm is returned. + + Returns + ------- + The Mean Squared Error (in the sense of the Frobenius norm) between + `self` and `comp_cov` covariance estimators. + """ + from scipy import linalg + # compute the error + error = comp_cov - self.covariance_ + # compute the error norm + if norm == "frobenius": + squared_norm = np.sum(error ** 2) + elif norm == "spectral": + squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error))) + else: + raise NotImplementedError( + "Only spectral and frobenius norms are implemented") + # optionally scale the error norm + if scaling: + squared_norm = squared_norm / error.shape[0] + # finally get either the squared norm or the norm + if squared: + result = squared_norm + else: + result = np.sqrt(squared_norm) + + return result + + def mahalanobis(self, observations): + """Computes the squared Mahalanobis distances of given observations. + + Parameters + ---------- + observations : array-like, shape = [n_observations, n_features] + The observations, the Mahalanobis distances of the which we + compute. Observations are assumed to be drawn from the same + distribution than the data used in fit. + + Returns + ------- + mahalanobis_distance : array, shape = [n_observations,] + Squared Mahalanobis distances of the observations. + + """ + precision = self.get_precision() + # compute mahalanobis distances + centered_obs = observations - self.location_ + mahalanobis_dist = np.sum( + np.dot(centered_obs, precision) * centered_obs, 1) + + return mahalanobis_dist + + +def log_likelihood(emp_cov, precision): + """Computes the sample mean of the log_likelihood under a covariance model + + computes the empirical expected log-likelihood (accounting for the + normalization terms and scaling), allowing for universal comparison (beyond + this software package) + + Parameters + ---------- + emp_cov : 2D ndarray (n_features, n_features) + Maximum Likelihood Estimator of covariance + + precision : 2D ndarray (n_features, n_features) + The precision matrix of the covariance model to be tested + + Returns + ------- + sample mean of the log-likelihood + """ + p = precision.shape[0] + log_likelihood_ = - np.sum(emp_cov * precision) + _logdet(precision) + log_likelihood_ -= p * np.log(2 * np.pi) + log_likelihood_ /= 2. + return log_likelihood_ + + +# sklearn uses np.linalg for this, but ours is more robust to zero eigenvalues + +def _logdet(A): + """Compute the log det of a positive semidefinite matrix.""" + from scipy import linalg + vals = linalg.eigvalsh(A) + # avoid negative (numerical errors) or zero (semi-definite matrix) values + tol = vals.max() * vals.size * np.finfo(np.float64).eps + vals = np.where(vals > tol, vals, tol) + return np.sum(np.log(vals)) + + +def _infer_dimension_(spectrum, n_samples, n_features): + """Infers the dimension of a dataset of shape (n_samples, n_features) + The dataset is described by its spectrum `spectrum`. + """ + n_spectrum = len(spectrum) + ll = np.empty(n_spectrum) + for rank in range(n_spectrum): + ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features) + return ll.argmax() + + +def _assess_dimension_(spectrum, rank, n_samples, n_features): + from scipy.special import gammaln + if rank > len(spectrum): + raise ValueError("The tested rank cannot exceed the rank of the" + " dataset") + + pu = -rank * log(2.) + for i in range(rank): + pu += (gammaln((n_features - i) / 2.) - + log(np.pi) * (n_features - i) / 2.) + + pl = np.sum(np.log(spectrum[:rank])) + pl = -pl * n_samples / 2. + + if rank == n_features: + pv = 0 + v = 1 + else: + v = np.sum(spectrum[rank:]) / (n_features - rank) + pv = -np.log(v) * n_samples * (n_features - rank) / 2. + + m = n_features * rank - rank * (rank + 1.) / 2. + pp = log(2. * np.pi) * (m + rank + 1.) / 2. + + pa = 0. + spectrum_ = spectrum.copy() + spectrum_[rank:n_features] = v + for i in range(rank): + for j in range(i + 1, len(spectrum)): + pa += log((spectrum[i] - spectrum[j]) * + (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples) + + ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2. + + return ll + + +def svd_flip(u, v, u_based_decision=True): + if u_based_decision: + # columns of u, rows of v + max_abs_cols = np.argmax(np.abs(u), axis=0) + signs = np.sign(u[max_abs_cols, np.arange(u.shape[1])]) + u *= signs + v *= signs[:, np.newaxis] + else: + # rows of v, columns of u + max_abs_rows = np.argmax(np.abs(v), axis=1) + signs = np.sign(v[np.arange(v.shape[0]), max_abs_rows]) + u *= signs + v *= signs[:, np.newaxis] + return u, v + + +def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08): + """Use high precision for cumsum and check that final value matches sum + + Parameters + ---------- + arr : array-like + To be cumulatively summed as flat + axis : int, optional + Axis along which the cumulative sum is computed. + The default (None) is to compute the cumsum over the flattened array. + rtol : float + Relative tolerance, see ``np.allclose`` + atol : float + Absolute tolerance, see ``np.allclose`` + """ + out = np.cumsum(arr, axis=axis, dtype=np.float64) + expected = np.sum(arr, axis=axis, dtype=np.float64) + if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol, + atol=atol, equal_nan=True)): + warnings.warn('cumsum was found to be unstable: ' + 'its last element does not correspond to sum', + RuntimeWarning) + return out + + +# This shim can be removed once NumPy 1.19.0+ is required (1.18.4 has sign bug) +def svd(a, hermitian=False): + if hermitian: # faster + s, u = np.linalg.eigh(a) + sgn = np.sign(s) + s = np.abs(s) + sidx = np.argsort(s)[..., ::-1] + sgn = np.take_along_axis(sgn, sidx, axis=-1) + s = np.take_along_axis(s, sidx, axis=-1) + u = np.take_along_axis(u, sidx[..., None, :], axis=-1) + # singular values are unsigned, move the sign into v + vt = (u * sgn[..., np.newaxis, :]).swapaxes(-2, -1).conj() + np.abs(s, out=s) + return u, s, vt + else: + return np.linalg.svd(a) + + +############################################################################### +# From nilearn + + +def _crop_colorbar(cbar, cbar_vmin, cbar_vmax): + """ + crop a colorbar to show from cbar_vmin to cbar_vmax + Used when symmetric_cbar=False is used. + """ + import matplotlib + if (cbar_vmin is None) and (cbar_vmax is None): + return + cbar_tick_locs = cbar.locator.locs + if cbar_vmax is None: + cbar_vmax = cbar_tick_locs.max() + if cbar_vmin is None: + cbar_vmin = cbar_tick_locs.min() + new_tick_locs = np.linspace(cbar_vmin, cbar_vmax, + len(cbar_tick_locs)) + + # matplotlib >= 3.2.0 no longer normalizes axes between 0 and 1 + # See https://matplotlib.org/3.2.1/api/prev_api_changes/api_changes_3.2.0.html + # _outline was removed in + # https://github.com/matplotlib/matplotlib/commit/03a542e875eba091a027046d5ec652daa8be6863 + # so we use the code from there + if _compare_version(matplotlib.__version__, '>=', '3.2.0'): + cbar.ax.set_ylim(cbar_vmin, cbar_vmax) + X = cbar._mesh()[0] + X = np.array([X[0], X[-1]]) + Y = np.array([[cbar_vmin, cbar_vmin], [cbar_vmax, cbar_vmax]]) + N = X.shape[0] + ii = [0, 1, N - 2, N - 1, 2 * N - 1, 2 * N - 2, N + 1, N, 0] + x = X.T.reshape(-1)[ii] + y = Y.T.reshape(-1)[ii] + xy = (np.column_stack([y, x]) + if cbar.orientation == 'horizontal' else + np.column_stack([x, y])) + cbar.outline.set_xy(xy) + else: + cbar.ax.set_ylim(cbar.norm(cbar_vmin), cbar.norm(cbar_vmax)) + outline = cbar.outline.get_xy() + outline[:2, 1] += cbar.norm(cbar_vmin) + outline[2:6, 1] -= (1. - cbar.norm(cbar_vmax)) + outline[6:, 1] += cbar.norm(cbar_vmin) + cbar.outline.set_xy(outline) + + cbar.set_ticks(new_tick_locs) + cbar.update_ticks() + + +############################################################################### +# Numba (optional requirement) + +# Here we choose different defaults to speed things up by default +try: + import numba + if _compare_version(numba.__version__, '<', '0.48'): + raise ImportError + prange = numba.prange + def jit(nopython=True, nogil=True, fastmath=True, cache=True, + **kwargs): # noqa + return numba.jit(nopython=nopython, nogil=nogil, fastmath=fastmath, + cache=cache, **kwargs) +except ImportError: + has_numba = False +else: + has_numba = (os.getenv('MNE_USE_NUMBA', 'true').lower() == 'true') + + +if not has_numba: + def jit(**kwargs): # noqa + def _jit(func): + return func + return _jit + prange = range + bincount = np.bincount + mean = np.mean + +else: + @jit() + def bincount(x, weights, minlength): # noqa: D103 + out = np.zeros(minlength) + for idx, w in zip(x, weights): + out[idx] += w + return out + + # fix because Numba does not support axis kwarg for mean + @jit() + def _np_apply_along_axis(func1d, axis, arr): + assert arr.ndim == 2 + assert axis in [0, 1] + if axis == 0: + result = np.empty(arr.shape[1]) + for i in range(len(result)): + result[i] = func1d(arr[:, i]) + else: + result = np.empty(arr.shape[0]) + for i in range(len(result)): + result[i] = func1d(arr[i, :]) + return result + + @jit() + def mean(array, axis): + return _np_apply_along_axis(np.mean, axis, array) + + +############################################################################### +# workaround: plt.close() doesn't spawn close_event on Agg backend +# (check MPL github issue #18609; scheduled to be fixed by MPL 3.4) + +def _close_event(fig): + """Force calling of the MPL figure close event.""" + try: + fig.canvas.close_event() + except ValueError: # old mpl with Qt + pass # pragma: no cover + + +def _is_last_row(ax): + try: + return ax.get_subplotspec().is_last_row() # 3.4+ + except AttributeError: + return ax.is_last_row() + return ax.get_subplotspec().is_last_row() + + +############################################################################### +# SciPy deprecation of pinv + pinvh rcond (never worked properly anyway) in 1.7 + +def pinvh(a, rtol=None): + """Compute a pseudo-inverse of a Hermitian matrix.""" + s, u = np.linalg.eigh(a) + del a + if rtol is None: + rtol = s.size * np.finfo(s.dtype).eps + maxS = np.max(np.abs(s)) + above_cutoff = (abs(s) > maxS * rtol) + psigma_diag = 1.0 / s[above_cutoff] + u = u[:, above_cutoff] + return (u * psigma_diag) @ u.conj().T + + +def pinv(a, rtol=None): + """Compute a pseudo-inverse of a matrix.""" + u, s, vh = np.linalg.svd(a, full_matrices=False) + del a + maxS = np.max(s) + if rtol is None: + rtol = max(vh.shape + u.shape) * np.finfo(u.dtype).eps + rank = np.sum(s > maxS * rtol) + u = u[:, :rank] + u /= s[:rank] + return (u @ vh[:rank]).conj().T + + +############################################################################### +# PyVista + +# Deal with pyvista deprecation of point_data and cell_data +# (can be removed once we require 0.31+) + +def _point_data(obj): + try: + return obj.point_data + except AttributeError: + return obj.point_arrays + + +def _cell_data(obj): + try: + return obj.cell_data + except AttributeError: + return obj.cell_arrays diff --git a/python/libs/mne/forward/__init__.py b/python/libs/mne/forward/__init__.py new file mode 100644 index 0000000..a573072 --- /dev/null +++ b/python/libs/mne/forward/__init__.py @@ -0,0 +1,22 @@ +"""Forward modeling code.""" + +from .forward import (Forward, read_forward_solution, write_forward_solution, + is_fixed_orient, _read_forward_meas_info, + _select_orient_forward, + compute_orient_prior, compute_depth_prior, + apply_forward, apply_forward_raw, + restrict_forward_to_stc, restrict_forward_to_label, + average_forward_solutions, _stc_src_sel, + _fill_measurement_info, _apply_forward, + _subject_from_forward, convert_forward_solution, + _merge_meg_eeg_fwds, _do_forward_solution) +from ._make_forward import (make_forward_solution, _prepare_for_forward, + _prep_meg_channels, _prep_eeg_channels, + _to_forward_dict, _create_meg_coils, + _read_coil_defs, _transform_orig_meg_coils, + make_forward_dipole, use_coil_def) +from ._compute_forward import (_magnetic_dipole_field_vec, _compute_forwards, + _concatenate_coils) +from ._field_interpolation import (_make_surface_mapping, make_field_map, + _as_meg_type_inst, _map_meg_or_eeg_channels) +from . import _lead_dots # for testing purposes diff --git a/python/libs/mne/forward/_compute_forward.py b/python/libs/mne/forward/_compute_forward.py new file mode 100644 index 0000000..5b68d72 --- /dev/null +++ b/python/libs/mne/forward/_compute_forward.py @@ -0,0 +1,921 @@ +# -*- coding: utf-8 -*- +# Authors: Matti Hämäläinen +# Alexandre Gramfort +# Martin Luessi +# Eric Larson +# Mark Wronkiewicz +# +# License: BSD-3-Clause + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. +# +# Many of the idealized equations behind these calculations can be found in: +# 1) Realistic conductivity geometry model of the human head for interpretation +# of neuromagnetic data. Hämäläinen and Sarvas, 1989. Specific to MNE +# 2) EEG and MEG: forward solutions for inverse methods. Mosher, Leahy, and +# Lewis, 1999. Generalized discussion of forward solutions. + +import numpy as np +from copy import deepcopy + +from ..fixes import jit, bincount +from ..io.compensator import get_current_comp, make_compensator +from ..io.constants import FIFF, FWD +from ..io.pick import pick_types +from ..parallel import parallel_func +from ..surface import _project_onto_surface, _jit_cross +from ..transforms import apply_trans +from ..utils import logger, verbose, _pl, warn, fill_doc + + +# ############################################################################# +# COIL SPECIFICATION AND FIELD COMPUTATION MATRIX + +def _dup_coil_set(coils, coord_frame, t): + """Make a duplicate.""" + if t is not None and coord_frame != t['from']: + raise RuntimeError('transformation frame does not match the coil set') + coils = deepcopy(coils) + if t is not None: + coord_frame = t['to'] + for coil in coils: + for key in ('ex', 'ey', 'ez'): + if key in coil: + coil[key] = apply_trans(t['trans'], coil[key], False) + coil['r0'] = apply_trans(t['trans'], coil['r0']) + coil['rmag'] = apply_trans(t['trans'], coil['rmag']) + coil['cosmag'] = apply_trans(t['trans'], coil['cosmag'], False) + coil['coord_frame'] = t['to'] + return coils, coord_frame + + +def _check_coil_frame(coils, coord_frame, bem): + """Check to make sure the coils are in the correct coordinate frame.""" + if coord_frame != FIFF.FIFFV_COORD_MRI: + if coord_frame == FIFF.FIFFV_COORD_HEAD: + # Make a transformed duplicate + coils, coord_Frame = _dup_coil_set(coils, coord_frame, + bem['head_mri_t']) + else: + raise RuntimeError('Bad coil coordinate frame %s' % coord_frame) + return coils, coord_frame + + +@fill_doc +def _lin_field_coeff(surf, mult, rmags, cosmags, ws, bins, n_jobs): + """Parallel wrapper for _do_lin_field_coeff to compute linear coefficients. + + Parameters + ---------- + surf : dict + Dict containing information for one surface of the BEM + mult : float + Multiplier for particular BEM surface (Iso Skull Approach discussed in + Mosher et al., 1999 and Hämäläinen and Sarvas, 1989 Section III?) + rmag : ndarray, shape (n_integration_pts, 3) + 3D positions of MEG coil integration points (from coil['rmag']) + cosmag : ndarray, shape (n_integration_pts, 3) + Direction of the MEG coil integration points (from coil['cosmag']) + ws : ndarray, shape (n_integration_pts,) + Weights for MEG coil integration points + bins : ndarray, shape (n_integration_points,) + The sensor assignments for each rmag/cosmag/w. + %(n_jobs)s + + Returns + ------- + coeff : list + Linear coefficients with lead fields for each BEM vertex on each sensor + (?) + """ + parallel, p_fun, _ = parallel_func(_do_lin_field_coeff, n_jobs) + nas = np.array_split + coeffs = parallel(p_fun(surf['rr'], t, tn, ta, rmags, cosmags, ws, bins) + for t, tn, ta in zip(nas(surf['tris'], n_jobs), + nas(surf['tri_nn'], n_jobs), + nas(surf['tri_area'], n_jobs))) + return mult * np.sum(coeffs, axis=0) + + +@jit() +def _do_lin_field_coeff(bem_rr, tris, tn, ta, rmags, cosmags, ws, bins): + """Compute field coefficients (parallel-friendly). + + See section IV of Mosher et al., 1999 (specifically equation 35). + + Parameters + ---------- + bem_rr : ndarray, shape (n_BEM_vertices, 3) + Positions on one BEM surface in 3-space. 2562 BEM vertices for BEM with + 5120 triangles (ico-4) + tris : ndarray, shape (n_BEM_vertices, 3) + Vertex indices for each triangle (referring to bem_rr) + tn : ndarray, shape (n_BEM_vertices, 3) + Triangle unit normal vectors + ta : ndarray, shape (n_BEM_vertices,) + Triangle areas + rmag : ndarray, shape (n_sensor_pts, 3) + 3D positions of MEG coil integration points (from coil['rmag']) + cosmag : ndarray, shape (n_sensor_pts, 3) + Direction of the MEG coil integration points (from coil['cosmag']) + ws : ndarray, shape (n_sensor_pts,) + Weights for MEG coil integration points + bins : ndarray, shape (n_sensor_pts,) + The sensor assignments for each rmag/cosmag/w. + + Returns + ------- + coeff : ndarray, shape (n_MEG_sensors, n_BEM_vertices) + Linear coefficients with effect of each BEM vertex on each sensor (?) + """ + coeff = np.zeros((bins[-1] + 1, len(bem_rr))) + w_cosmags = ws.reshape(-1, 1) * cosmags + diff = rmags.reshape(rmags.shape[0], 1, rmags.shape[1]) - bem_rr + den = np.sum(diff * diff, axis=-1) + den *= np.sqrt(den) + den *= 3 + for ti in range(len(tris)): + tri, tri_nn, tri_area = tris[ti], tn[ti], ta[ti] + # Accumulate the coefficients for each triangle node and add to the + # corresponding coefficient matrix + + # Simple version (bem_lin_field_coeffs_simple) + # The following is equivalent to: + # tri_rr = bem_rr[tri] + # for j, coil in enumerate(coils['coils']): + # x = func(coil['rmag'], coil['cosmag'], + # tri_rr, tri_nn, tri_area) + # res = np.sum(coil['w'][np.newaxis, :] * x, axis=1) + # coeff[j][tri + off] += mult * res + + c = np.empty((diff.shape[0], tri.shape[0], diff.shape[2])) + _jit_cross(c, diff[:, tri], tri_nn) + c *= w_cosmags.reshape(w_cosmags.shape[0], 1, w_cosmags.shape[1]) + for ti in range(3): + x = np.sum(c[:, ti], axis=-1) + x /= den[:, tri[ti]] / tri_area + coeff[:, tri[ti]] += \ + bincount(bins, weights=x, minlength=bins[-1] + 1) + return coeff + + +def _concatenate_coils(coils): + """Concatenate MEG coil parameters.""" + rmags = np.concatenate([coil['rmag'] for coil in coils]) + cosmags = np.concatenate([coil['cosmag'] for coil in coils]) + ws = np.concatenate([coil['w'] for coil in coils]) + n_int = np.array([len(coil['rmag']) for coil in coils]) + if n_int[-1] == 0: + # We assume each sensor has at least one integration point, + # which should be a safe assumption. But let's check it here, since + # our code elsewhere relies on bins[-1] + 1 being the number of sensors + raise RuntimeError('not supported') + bins = np.repeat(np.arange(len(n_int)), n_int) + return rmags, cosmags, ws, bins + + +@fill_doc +def _bem_specify_coils(bem, coils, coord_frame, mults, n_jobs): + """Set up for computing the solution at a set of MEG coils. + + Parameters + ---------- + bem : instance of ConductorModel + BEM information + coils : list of dict, len(n_MEG_sensors) + MEG sensor information dicts + coord_frame : int + Class constant identifying coordinate frame + mults : ndarray, shape (1, n_BEM_vertices) + Multiplier for every vertex in BEM + %(n_jobs)s + + Returns + ------- + sol: ndarray, shape (n_MEG_sensors, n_BEM_vertices) + MEG solution + """ + # Make sure MEG coils are in MRI coordinate frame to match BEM coords + coils, coord_frame = _check_coil_frame(coils, coord_frame, bem) + + # leaving this in in case we want to easily add in the future + # if method != 'simple': # in ['ferguson', 'urankar']: + # raise NotImplementedError + + # Compute the weighting factors to obtain the magnetic field in the linear + # potential approximation + + # Process each of the surfaces + rmags, cosmags, ws, bins = _triage_coils(coils) + del coils + lens = np.cumsum(np.r_[0, [len(s['rr']) for s in bem['surfs']]]) + sol = np.zeros((bins[-1] + 1, bem['solution'].shape[1])) + + lims = np.concatenate([np.arange(0, sol.shape[0], 100), [sol.shape[0]]]) + # Put through the bem (in channel-based chunks to save memory) + for start, stop in zip(lims[:-1], lims[1:]): + mask = np.logical_and(bins >= start, bins < stop) + r, c, w, b = rmags[mask], cosmags[mask], ws[mask], bins[mask] - start + # Compute coeffs for each surface, one at a time + for o1, o2, surf, mult in zip(lens[:-1], lens[1:], + bem['surfs'], bem['field_mult']): + coeff = _lin_field_coeff(surf, mult, r, c, w, b, n_jobs) + sol[start:stop] += np.dot(coeff, bem['solution'][o1:o2]) + sol *= mults + return sol + + +def _bem_specify_els(bem, els, mults): + """Set up for computing the solution at a set of EEG electrodes. + + Parameters + ---------- + bem : instance of ConductorModel + BEM information + els : list of dict, len(n_EEG_sensors) + List of EEG sensor information dicts + mults: ndarray, shape (1, n_BEM_vertices) + Multiplier for every vertex in BEM + + Returns + ------- + sol : ndarray, shape (n_EEG_sensors, n_BEM_vertices) + EEG solution + """ + sol = np.zeros((len(els), bem['solution'].shape[1])) + scalp = bem['surfs'][0] + + # Operate on all integration points for all electrodes (in MRI coords) + rrs = np.concatenate([apply_trans(bem['head_mri_t']['trans'], el['rmag']) + for el in els], axis=0) + ws = np.concatenate([el['w'] for el in els]) + tri_weights, tri_idx = _project_onto_surface(rrs, scalp) + tri_weights *= ws[:, np.newaxis] + weights = np.matmul(tri_weights[:, np.newaxis], + bem['solution'][scalp['tris'][tri_idx]])[:, 0] + # there are way more vertices than electrodes generally, so let's iterate + # over the electrodes + edges = np.concatenate([[0], np.cumsum([len(el['w']) for el in els])]) + for ii, (start, stop) in enumerate(zip(edges[:-1], edges[1:])): + sol[ii] = weights[start:stop].sum(0) + sol *= mults + return sol + + +# ############################################################################# +# COMPENSATION + +def _make_ctf_comp_coils(info, coils): + """Get the correct compensator for CTF coils.""" + # adapted from mne_make_ctf_comp() from mne_ctf_comp.c + logger.info('Setting up compensation data...') + comp_num = get_current_comp(info) + if comp_num is None or comp_num == 0: + logger.info(' No compensation set. Nothing more to do.') + return None + + # Need to meaningfully populate comp['set'] dict a.k.a. compset + n_comp_ch = sum([c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']]) + logger.info(' %d out of %d channels have the compensation set.' + % (n_comp_ch, len(coils))) + + # Find the desired compensation data matrix + compensator = make_compensator(info, 0, comp_num, True) + logger.info(' Desired compensation data (%s) found.' % comp_num) + logger.info(' All compensation channels found.') + logger.info(' Preselector created.') + logger.info(' Compensation data matrix created.') + logger.info(' Postselector created.') + return compensator + + +# ############################################################################# +# BEM COMPUTATION + +_MAG_FACTOR = 1e-7 # μ_0 / (4π) + +# def _bem_inf_pot(rd, Q, rp): +# """The infinite medium potential in one direction. See Eq. (8) in +# Mosher, 1999""" +# NOTE: the (μ_0 / (4π) factor has been moved to _prep_field_communication +# diff = rp - rd # (Observation point position) - (Source position) +# diff2 = np.sum(diff * diff, axis=1) # Squared magnitude of diff +# # (Dipole moment) dot (diff) / (magnitude ^ 3) +# return np.sum(Q * diff, axis=1) / (diff2 * np.sqrt(diff2)) + + +@jit() +def _bem_inf_pots(mri_rr, bem_rr, mri_Q=None): + """Compute the infinite medium potential in all 3 directions. + + Parameters + ---------- + mri_rr : ndarray, shape (n_dipole_vertices, 3) + Chunk of 3D dipole positions in MRI coordinates + bem_rr: ndarray, shape (n_BEM_vertices, 3) + 3D vertex positions for one BEM surface + mri_Q : ndarray, shape (3, 3) + 3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3)) + + Returns + ------- + ndarray : shape(n_dipole_vertices, 3, n_BEM_vertices) + """ + # NOTE: the (μ_0 / (4π) factor has been moved to _prep_field_communication + # Get position difference vector between BEM vertex and dipole + diff = np.empty((len(mri_rr), 3, len(bem_rr))) + for ri in range(mri_rr.shape[0]): + rr = mri_rr[ri] + this_diff = bem_rr - rr + diff_norm = np.sum(this_diff * this_diff, axis=1) + diff_norm *= np.sqrt(diff_norm) + diff_norm[diff_norm == 0] = 1. + if mri_Q is not None: + this_diff = np.dot(this_diff, mri_Q.T) + this_diff /= diff_norm.reshape(-1, 1) + diff[ri] = this_diff.T + + return diff + +# This function has been refactored to process all points simultaneously +# def _bem_inf_field(rd, Q, rp, d): +# """Infinite-medium magnetic field. See (7) in Mosher, 1999""" +# # Get vector from source to sensor integration point +# diff = rp - rd +# diff2 = np.sum(diff * diff, axis=1) # Get magnitude of diff +# +# # Compute cross product between diff and dipole to get magnetic field at +# # integration point +# x = fast_cross_3d(Q[np.newaxis, :], diff) +# +# # Take magnetic field dotted by integration point normal to get magnetic +# # field threading the current loop. Divide by R^3 (equivalently, R^2 * R) +# return np.sum(x * d, axis=1) / (diff2 * np.sqrt(diff2)) + + +@jit() +def _bem_inf_fields(rr, rmag, cosmag): + """Compute infinite-medium magnetic field at one MEG sensor. + + This operates on all dipoles in all 3 basis directions. + + Parameters + ---------- + rr : ndarray, shape (n_source_points, 3) + 3D dipole source positions + rmag : ndarray, shape (n_sensor points, 3) + 3D positions of 1 MEG coil's integration points (from coil['rmag']) + cosmag : ndarray, shape (n_sensor_points, 3) + Direction of 1 MEG coil's integration points (from coil['cosmag']) + + Returns + ------- + ndarray, shape (n_dipoles, 3, n_integration_pts) + Magnetic field from all dipoles at each MEG sensor integration point + """ + # rr, rmag refactored according to Equation (19) in Mosher, 1999 + # Knowing that we're doing all directions, refactor above function: + + # rr, 3, rmag + diff = rmag.T.reshape(1, 3, rmag.shape[0]) - rr.reshape(rr.shape[0], 3, 1) + diff_norm = np.sum(diff * diff, axis=1) # rr, rmag + diff_norm *= np.sqrt(diff_norm) # Get magnitude of distance cubed + diff_norm_ = diff_norm.reshape(-1) + diff_norm_[diff_norm_ == 0] = 1 # avoid nans + + # This is the result of cross-prod calcs with basis vectors, + # as if we had taken (Q=np.eye(3)), then multiplied by cosmags + # factor, and then summed across directions + x = np.empty((rr.shape[0], 3, rmag.shape[0])) + x[:, 0] = diff[:, 1] * cosmag[:, 2] - diff[:, 2] * cosmag[:, 1] + x[:, 1] = diff[:, 2] * cosmag[:, 0] - diff[:, 0] * cosmag[:, 2] + x[:, 2] = diff[:, 0] * cosmag[:, 1] - diff[:, 1] * cosmag[:, 0] + diff_norm = diff_norm_.reshape((rr.shape[0], 1, rmag.shape[0])) + x /= diff_norm + # x.shape == (rr.shape[0], 3, rmag.shape[0]) + return x + + +@fill_doc +def _bem_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs, + coil_type): + """Calculate the magnetic field or electric potential forward solution. + + The code is very similar between EEG and MEG potentials, so combine them. + This does the work of "fwd_comp_field" (which wraps to "fwd_bem_field") + and "fwd_bem_pot_els" in MNE-C. + + Parameters + ---------- + rr : ndarray, shape (n_dipoles, 3) + 3D dipole source positions + mri_rr : ndarray, shape (n_dipoles, 3) + 3D source positions in MRI coordinates + mri_Q : + 3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3)) + coils : list of dict, len(sensors) + List of sensors where each element contains sensor specific information + solution : ndarray, shape (n_sensors, n_BEM_rr) + Comes from _bem_specify_coils + bem_rr : ndarray, shape (n_BEM_vertices, 3) + 3D vertex positions for all surfaces in the BEM + %(n_jobs)s + coil_type : str + 'meg' or 'eeg' + + Returns + ------- + B : ndarray, shape (n_dipoles * 3, n_sensors) + Forward solution for a set of sensors + """ + # Both MEG and EEG have the inifinite-medium potentials + # This could be just vectorized, but eats too much memory, so instead we + # reduce memory by chunking within _do_inf_pots and parallelize, too: + parallel, p_fun, _ = parallel_func(_do_inf_pots, n_jobs) + nas = np.array_split + B = np.sum(parallel(p_fun(mri_rr, sr.copy(), np.ascontiguousarray(mri_Q), + np.array(sol)) # copy and contig + for sr, sol in zip(nas(bem_rr, n_jobs), + nas(solution.T, n_jobs))), axis=0) + # The copy()s above should make it so the whole objects don't need to be + # pickled... + + # Only MEG coils are sensitive to the primary current distribution. + if coil_type == 'meg': + # Primary current contribution (can be calc. in coil/dipole coords) + parallel, p_fun, _ = parallel_func(_do_prim_curr, n_jobs) + pcc = np.concatenate(parallel(p_fun(r, coils) + for r in nas(rr, n_jobs)), axis=0) + B += pcc + B *= _MAG_FACTOR + return B + + +def _do_prim_curr(rr, coils): + """Calculate primary currents in a set of MEG coils. + + See Mosher et al., 1999 Section II for discussion of primary vs. volume + currents. + + Parameters + ---------- + rr : ndarray, shape (n_dipoles, 3) + 3D dipole source positions in head coordinates + coils : list of dict + List of MEG coils where each element contains coil specific information + + Returns + ------- + pc : ndarray, shape (n_sources, n_MEG_sensors) + Primary current for set of MEG coils due to all sources + """ + rmags, cosmags, ws, bins = _triage_coils(coils) + n_coils = bins[-1] + 1 + del coils + pc = np.empty((len(rr) * 3, n_coils)) + for start, stop in _rr_bounds(rr, chunk=1): + pp = _bem_inf_fields(rr[start:stop], rmags, cosmags) + pp *= ws + pp.shape = (3 * (stop - start), -1) + pc[3 * start:3 * stop] = [bincount(bins, this_pp, bins[-1] + 1) + for this_pp in pp] + return pc + + +def _rr_bounds(rr, chunk=200): + # chunk data nicely + bounds = np.concatenate([np.arange(0, len(rr), chunk), [len(rr)]]) + return zip(bounds[:-1], bounds[1:]) + + +def _do_inf_pots(mri_rr, bem_rr, mri_Q, sol): + """Calculate infinite potentials for MEG or EEG sensors using chunks. + + Parameters + ---------- + mri_rr : ndarray, shape (n_dipoles, 3) + 3D dipole source positions in MRI coordinates + bem_rr : ndarray, shape (n_BEM_vertices, 3) + 3D vertex positions for all surfaces in the BEM + mri_Q : + 3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3)) + sol : ndarray, shape (n_sensors_subset, n_BEM_vertices_subset) + Comes from _bem_specify_coils + + Returns + ------- + B : ndarray, (n_dipoles * 3, n_sensors) + Forward solution for sensors due to volume currents + """ + # Doing work of 'fwd_bem_pot_calc' in MNE-C + # The following code is equivalent to this, but saves memory + # v0s = _bem_inf_pots(rr, bem_rr, Q) # n_rr x 3 x n_bem_rr + # v0s.shape = (len(rr) * 3, v0s.shape[2]) + # B = np.dot(v0s, sol) + + # We chunk the source mri_rr's in order to save memory + B = np.empty((len(mri_rr) * 3, sol.shape[1])) + for start, stop in _rr_bounds(mri_rr): + # v0 in Hämäläinen et al., 1989 == v_inf in Mosher, et al., 1999 + v0s = _bem_inf_pots(mri_rr[start:stop], bem_rr, mri_Q) + v0s = v0s.reshape(-1, v0s.shape[2]) + B[3 * start:3 * stop] = np.dot(v0s, sol) + return B + + +# ############################################################################# +# SPHERE COMPUTATION + +def _sphere_pot_or_field(rr, mri_rr, mri_Q, coils, sphere, bem_rr, + n_jobs, coil_type): + """Do potential or field for spherical model.""" + fun = _eeg_spherepot_coil if coil_type == 'eeg' else _sphere_field + parallel, p_fun, _ = parallel_func(fun, n_jobs) + B = np.concatenate(parallel(p_fun(r, coils, sphere) + for r in np.array_split(rr, n_jobs))) + return B + + +def _sphere_field(rrs, coils, sphere): + """Compute field for spherical model using Jukka Sarvas' field computation. + + Jukka Sarvas, "Basic mathematical and electromagnetic concepts of the + biomagnetic inverse problem", Phys. Med. Biol. 1987, Vol. 32, 1, 11-22. + + The formulas have been manipulated for efficient computation + by Matti Hämäläinen, February 1990 + """ + rmags, cosmags, ws, bins = _triage_coils(coils) + return _do_sphere_field(rrs, rmags, cosmags, ws, bins, sphere['r0']) + + +@jit() +def _do_sphere_field(rrs, rmags, cosmags, ws, bins, r0): + n_coils = bins[-1] + 1 + # Shift to the sphere model coordinates + rrs = rrs - r0 + B = np.zeros((3 * len(rrs), n_coils)) + for ri in range(len(rrs)): + rr = rrs[ri] + # Check for a dipole at the origin + if np.sqrt(np.dot(rr, rr)) <= 1e-10: + continue + this_poss = rmags - r0 + + # Vector from dipole to the field point + a_vec = this_poss - rr + a = np.sqrt(np.sum(a_vec * a_vec, axis=1)) + r = np.sqrt(np.sum(this_poss * this_poss, axis=1)) + rr0 = np.sum(this_poss * rr, axis=1) + ar = (r * r) - rr0 + ar0 = ar / a + F = a * (r * a + ar) + gr = (a * a) / r + ar0 + 2.0 * (a + r) + g0 = a + 2 * r + ar0 + # Compute the dot products needed + re = np.sum(this_poss * cosmags, axis=1) + r0e = np.sum(rr * cosmags, axis=1) + g = (g0 * r0e - gr * re) / (F * F) + good = (a > 0) | (r > 0) | ((a * r) + 1 > 1e-5) + rr_ = rr.reshape(1, 3) + v1 = np.empty((cosmags.shape[0], 3)) + _jit_cross(v1, rr_, cosmags) + v2 = np.empty((cosmags.shape[0], 3)) + _jit_cross(v2, rr_, this_poss) + xx = ((good * ws).reshape(-1, 1) * + (v1 / F.reshape(-1, 1) + v2 * g.reshape(-1, 1))) + for jj in range(3): + zz = bincount(bins, xx[:, jj], n_coils) + B[3 * ri + jj, :] = zz + B *= _MAG_FACTOR + return B + + +def _eeg_spherepot_coil(rrs, coils, sphere): + """Calculate the EEG in the sphere model.""" + rmags, cosmags, ws, bins = _triage_coils(coils) + n_coils = bins[-1] + 1 + del coils + + # Shift to the sphere model coordinates + rrs = rrs - sphere['r0'] + + B = np.zeros((3 * len(rrs), n_coils)) + for ri, rr in enumerate(rrs): + # Only process dipoles inside the innermost sphere + if np.sqrt(np.dot(rr, rr)) >= sphere['layers'][0]['rad']: + continue + # fwd_eeg_spherepot_vec + vval_one = np.zeros((len(rmags), 3)) + + # Make a weighted sum over the equivalence parameters + for eq in range(sphere['nfit']): + # Scale the dipole position + rd = sphere['mu'][eq] * rr + rd2 = np.sum(rd * rd) + rd2_inv = 1.0 / rd2 + # Go over all electrodes + this_pos = rmags - sphere['r0'] + + # Scale location onto the surface of the sphere (not used) + # if sphere['scale_pos']: + # pos_len = (sphere['layers'][-1]['rad'] / + # np.sqrt(np.sum(this_pos * this_pos, axis=1))) + # this_pos *= pos_len + + # Vector from dipole to the field point + a_vec = this_pos - rd + + # Compute the dot products needed + a = np.sqrt(np.sum(a_vec * a_vec, axis=1)) + a3 = 2.0 / (a * a * a) + r2 = np.sum(this_pos * this_pos, axis=1) + r = np.sqrt(r2) + rrd = np.sum(this_pos * rd, axis=1) + ra = r2 - rrd + rda = rrd - rd2 + + # The main ingredients + F = a * (r * a + ra) + c1 = a3 * rda + 1.0 / a - 1.0 / r + c2 = a3 + (a + r) / (r * F) + + # Mix them together and scale by lambda/(rd*rd) + m1 = (c1 - c2 * rrd) + m2 = c2 * rd2 + + vval_one += (sphere['lambda'][eq] * rd2_inv * + (m1[:, np.newaxis] * rd + + m2[:, np.newaxis] * this_pos)) + + # compute total result + xx = vval_one * ws[:, np.newaxis] + zz = np.array([bincount(bins, x, bins[-1] + 1) for x in xx.T]) + B[3 * ri:3 * ri + 3, :] = zz + # finishing by scaling by 1/(4*M_PI) + B *= 0.25 / np.pi + return B + + +def _triage_coils(coils): + return coils if isinstance(coils, tuple) else _concatenate_coils(coils) + + +# ############################################################################# +# MAGNETIC DIPOLE (e.g. CHPI) + +_MIN_DIST_LIMIT = 1e-5 + + +def _magnetic_dipole_field_vec(rrs, coils, too_close='raise'): + rmags, cosmags, ws, bins = _triage_coils(coils) + fwd, min_dist = _compute_mdfv(rrs, rmags, cosmags, ws, bins, too_close) + if min_dist < _MIN_DIST_LIMIT: + msg = 'Coil too close (dist = %g mm)' % (min_dist * 1000,) + if too_close == 'raise': + raise RuntimeError(msg) + func = warn if too_close == 'warning' else logger.info + func(msg) + return fwd + + +@jit() +def _compute_mdfv(rrs, rmags, cosmags, ws, bins, too_close): + """Compute an MEG forward solution for a set of magnetic dipoles.""" + # The code below is a more efficient version (~30x) of this: + # for ri, rr in enumerate(rrs): + # for k in range(len(coils)): + # this_coil = coils[k] + # # Go through all points + # diff = this_coil['rmag'] - rr + # dist2 = np.sum(diff * diff, axis=1)[:, np.newaxis] + # dist = np.sqrt(dist2) + # if (dist < 1e-5).any(): + # raise RuntimeError('Coil too close') + # dist5 = dist2 * dist2 * dist + # sum_ = (3 * diff * np.sum(diff * this_coil['cosmag'], + # axis=1)[:, np.newaxis] - + # dist2 * this_coil['cosmag']) / dist5 + # fwd[3*ri:3*ri+3, k] = 1e-7 * np.dot(this_coil['w'], sum_) + fwd = np.zeros((3 * len(rrs), bins[-1] + 1)) + min_dist = np.inf + ws2 = ws.reshape(-1, 1) + for ri in range(len(rrs)): + rr = rrs[ri] + diff = rmags - rr + dist2_ = np.sum(diff * diff, axis=1) + dist2 = dist2_.reshape(-1, 1) + dist = np.sqrt(dist2) + min_dist = min(dist.min(), min_dist) + if min_dist < _MIN_DIST_LIMIT and too_close == 'raise': + break + t_ = np.sum(diff * cosmags, axis=1) + t = t_.reshape(-1, 1) + sum_ = ws2 * (3 * diff * t - dist2 * cosmags) / (dist2 * dist2 * dist) + for ii in range(3): + fwd[3 * ri + ii] = bincount(bins, sum_[:, ii], bins[-1] + 1) + fwd *= _MAG_FACTOR + return fwd, min_dist + + +# ############################################################################# +# MAIN TRIAGING FUNCTION + +@verbose +def _prep_field_computation(rr, bem, fwd_data, n_jobs, verbose=None): + """Precompute and store some things that are used for both MEG and EEG. + + Calculation includes multiplication factors, coordinate transforms, + compensations, and forward solutions. All are stored in modified fwd_data. + + Parameters + ---------- + rr : ndarray, shape (n_dipoles, 3) + 3D dipole source positions in head coordinates + bem : instance of ConductorModel + Boundary Element Model information + fwd_data : dict + Dict containing sensor information. Gets updated here with BEM and + sensor information for later forward calculations + %(n_jobs)s + %(verbose)s + """ + bem_rr = mults = mri_Q = head_mri_t = None + if not bem['is_sphere']: + if bem['bem_method'] != FWD.BEM_LINEAR_COLL: + raise RuntimeError('only linear collocation supported') + # Store (and apply soon) μ_0/(4π) factor before source computations + mults = np.repeat(bem['source_mult'] / (4.0 * np.pi), + [len(s['rr']) for s in bem['surfs']])[np.newaxis, :] + # Get positions of BEM points for every surface + bem_rr = np.concatenate([s['rr'] for s in bem['surfs']]) + + # The dipole location and orientation must be transformed + head_mri_t = bem['head_mri_t'] + mri_Q = bem['head_mri_t']['trans'][:3, :3].T + + # Compute solution and compensation for dif sensor types ('meg', 'eeg') + if len(set(fwd_data['coil_types'])) != len(fwd_data['coil_types']): + raise RuntimeError('Non-unique sensor types found') + compensators, solutions, csolutions = [], [], [] + coils_list, ccoils_list = [], [] + for coil_type, coils, ccoils, info in zip(fwd_data['coil_types'], + fwd_data['coils_list'], + fwd_data['ccoils_list'], + fwd_data['infos']): + compensator = solution = csolution = None + if len(coils) > 0: # Only proceed if sensors exist + if coil_type == 'meg': + # Compose a compensation data set if necessary + compensator = _make_ctf_comp_coils(info, coils) + + if not bem['is_sphere']: + if coil_type == 'meg': + # MEG field computation matrices for BEM + start = 'Composing the field computation matrix' + logger.info('\n' + start + '...') + cf = FIFF.FIFFV_COORD_HEAD + # multiply solution by "mults" here for simplicity + solution = _bem_specify_coils(bem, coils, cf, mults, + n_jobs) + if compensator is not None: + logger.info(start + ' (compensation coils)...') + csolution = _bem_specify_coils(bem, ccoils, cf, + mults, n_jobs) + else: + # Compute solution for EEG sensor + logger.info('Setting up for EEG...') + solution = _bem_specify_els(bem, coils, mults) + else: + solution = csolution = bem + if coil_type == 'eeg': + logger.info('Using the equivalent source approach in the ' + 'homogeneous sphere for EEG') + coils = _triage_coils(coils) + if ccoils is not None and len(ccoils) > 0: + ccoils = _triage_coils(ccoils) + coils_list.append(coils) + ccoils_list.append(ccoils) + compensators.append(compensator) + solutions.append(solution) + csolutions.append(csolution) + + # Get appropriate forward physics function depending on sphere or BEM model + fun = _sphere_pot_or_field if bem['is_sphere'] else _bem_pot_or_field + + # Update fwd_data with + # bem_rr (3D BEM vertex positions) + # mri_Q (3x3 Head->MRI coord transformation applied to identity matrix) + # head_mri_t (head->MRI coord transform dict) + # fun (_bem_pot_or_field if not 'sphere'; otherwise _sph_pot_or_field) + # solutions (len 2 list; [ndarray, shape (n_MEG_sens, n BEM vertices), + # ndarray, shape (n_EEG_sens, n BEM vertices)] + # csolutions (compensation for solution) + fwd_data.update(dict(bem_rr=bem_rr, mri_Q=mri_Q, head_mri_t=head_mri_t, + compensators=compensators, solutions=solutions, + csolutions=csolutions, fun=fun, + coils_list=coils_list, ccoils_list=ccoils_list)) + + +@fill_doc +def _compute_forwards_meeg(rr, fd, n_jobs, silent=False): + """Compute MEG and EEG forward solutions for all sensor types. + + Parameters + ---------- + rr : ndarray, shape (n_dipoles, 3) + 3D dipole positions in head coordinates + fd : dict + Dict containing forward data after update in _prep_field_computation + %(n_jobs)s + silent : bool + If True, don't emit logger.info. + This saves time over ``verbose`` when this function is called a lot. + + Returns + ------- + Bs : list + Each element contains ndarray, shape (3 * n_dipoles, n_sensors) where + n_sensors depends on which channel types are requested (MEG and/or EEG) + """ + n_jobs = max(min(n_jobs, len(rr)), 1) + Bs = list() + # The dipole location and orientation must be transformed to mri coords + mri_rr = None + if fd['head_mri_t'] is not None: + mri_rr = np.ascontiguousarray( + apply_trans(fd['head_mri_t']['trans'], rr)) + mri_Q, bem_rr, fun = fd['mri_Q'], fd['bem_rr'], fd['fun'] + for ci in range(len(fd['coils_list'])): + coils, ccoils = fd['coils_list'][ci], fd['ccoils_list'][ci] + if len(coils) == 0: # nothing to do + Bs.append(np.zeros((3 * len(rr), 0))) + continue + + coil_type, compensator = fd['coil_types'][ci], fd['compensators'][ci] + solution, csolution = fd['solutions'][ci], fd['csolutions'][ci] + info = fd['infos'][ci] + + # Do the actual forward calculation for a list MEG/EEG sensors + if not silent: + logger.info('Computing %s at %d source location%s ' + '(free orientations)...' + % (coil_type.upper(), len(rr), _pl(rr))) + # Calculate forward solution using spherical or BEM model + B = fun(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs, + coil_type) + + # Compensate if needed (only done for MEG systems w/compensation) + if compensator is not None: + # Compute the field in the compensation sensors + work = fun(rr, mri_rr, mri_Q, ccoils, csolution, bem_rr, + n_jobs, coil_type) + # Combine solutions so we can do the compensation + both = np.zeros((work.shape[0], B.shape[1] + work.shape[1])) + picks = pick_types(info, meg=True, ref_meg=False, exclude=[]) + both[:, picks] = B + picks = pick_types(info, meg=False, ref_meg=True, exclude=[]) + both[:, picks] = work + B = np.dot(both, compensator.T) + Bs.append(B) + return Bs + + +@verbose +def _compute_forwards(rr, bem, coils_list, ccoils_list, infos, coil_types, + n_jobs, verbose=None): + """Compute the MEG and EEG forward solutions. + + This effectively combines compute_forward_meg and compute_forward_eeg + from MNE-C. + + Parameters + ---------- + rr : ndarray, shape (n_sources, 3) + 3D dipole in head coordinates + bem : instance of ConductorModel + Boundary Element Model information for all surfaces + coils_list : list + List of MEG and/or EEG sensor information dicts + ccoils_list : list + Optional list of MEG compensation information + coil_types : list of str + Sensor types. May contain 'meg' and/or 'eeg' + %(n_jobs)s + infos : list, len(2) + infos[0] is MEG info, infos[1] is EEG info + + Returns + ------- + Bs : list of ndarray + Each element contains ndarray, shape (3 * n_dipoles, n_sensors) where + n_sensors depends on which channel types are requested (MEG and/or EEG) + """ + # Split calculation into two steps to save (potentially) a lot of time + # when e.g. dipole fitting + fwd_data = dict(coils_list=coils_list, ccoils_list=ccoils_list, + infos=infos, coil_types=coil_types) + _prep_field_computation(rr, bem, fwd_data, n_jobs) + Bs = _compute_forwards_meeg(rr, fwd_data, n_jobs) + return Bs diff --git a/python/libs/mne/forward/_field_interpolation.py b/python/libs/mne/forward/_field_interpolation.py new file mode 100644 index 0000000..5e8c263 --- /dev/null +++ b/python/libs/mne/forward/_field_interpolation.py @@ -0,0 +1,473 @@ +# -*- coding: utf-8 -*- +# Authors: Matti Hämäläinen +# Alexandre Gramfort +# Eric Larson + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. + +from copy import deepcopy + +import numpy as np + +from ..bem import _check_origin +from ..cov import make_ad_hoc_cov +from ..io.constants import FIFF +from ..io.pick import pick_types, pick_info +from ..io.meas_info import _simplify_info +from ..io.proj import _has_eeg_average_ref_proj, make_projector +from ..surface import get_head_surf, get_meg_helmet_surf +from ..transforms import (transform_surface_to, read_trans, _find_trans, + _ensure_trans) +from ._make_forward import _create_meg_coils, _create_eeg_els, _read_coil_defs +from ._lead_dots import (_do_self_dots, _do_surface_dots, _get_legen_table, + _do_cross_dots) +from ..parallel import check_n_jobs +from ..utils import logger, verbose, _check_option, _reg_pinv, _pl +from ..epochs import EpochsArray, BaseEpochs +from ..evoked import Evoked, EvokedArray + + +def _setup_dots(mode, info, coils, ch_type): + """Set up dot products.""" + from scipy.interpolate import interp1d + int_rad = 0.06 + noise = make_ad_hoc_cov(info, dict(mag=20e-15, grad=5e-13, eeg=1e-6)) + n_coeff, interp = (50, 'nearest') if mode == 'fast' else (100, 'linear') + lut, n_fact = _get_legen_table(ch_type, False, n_coeff, verbose=False) + lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp, axis=0) + return int_rad, noise, lut_fun, n_fact + + +def _compute_mapping_matrix(fmd, info): + """Do the hairy computations.""" + logger.info(' Preparing the mapping matrix...') + # assemble a projector and apply it to the data + ch_names = fmd['ch_names'] + projs = info.get('projs', list()) + proj_op = make_projector(projs, ch_names)[0] + proj_dots = np.dot(proj_op.T, np.dot(fmd['self_dots'], proj_op)) + + noise_cov = fmd['noise'] + # Whiten + if not noise_cov['diag']: + raise NotImplementedError # this shouldn't happen + whitener = np.diag(1.0 / np.sqrt(noise_cov['data'].ravel())) + whitened_dots = np.dot(whitener.T, np.dot(proj_dots, whitener)) + + # SVD is numerically better than the eigenvalue composition even if + # mat is supposed to be symmetric and positive definite + if fmd.get('pinv_method', 'tsvd') == 'tsvd': + inv, fmd['nest'] = _pinv_trunc(whitened_dots, fmd['miss']) + else: + assert fmd['pinv_method'] == 'tikhonov', fmd['pinv_method'] + inv, fmd['nest'] = _pinv_tikhonov(whitened_dots, fmd['miss']) + + # Sandwich with the whitener + inv_whitened = np.dot(whitener.T, np.dot(inv, whitener)) + + # Take into account that the lead fields used to compute + # d->surface_dots were unprojected + inv_whitened_proj = proj_op.T @ inv_whitened + + # Finally sandwich in the selection matrix + # This one picks up the correct lead field projection + mapping_mat = np.dot(fmd['surface_dots'], inv_whitened_proj) + + # Optionally apply the average electrode reference to the final field map + if fmd['kind'] == 'eeg' and _has_eeg_average_ref_proj(projs): + logger.info( + ' The map has an average electrode reference ' + f'({mapping_mat.shape[0]} channels)') + mapping_mat -= np.mean(mapping_mat, axis=0) + return mapping_mat + + +def _pinv_trunc(x, miss): + """Compute pseudoinverse, truncating at most "miss" fraction of varexp.""" + from scipy import linalg + u, s, v = linalg.svd(x, full_matrices=False) + + # Eigenvalue truncation + varexp = np.cumsum(s) + varexp /= varexp[-1] + n = np.where(varexp >= (1.0 - miss))[0][0] + 1 + logger.info(' Truncating at %d/%d components to omit less than %g ' + '(%0.2g)' % (n, len(s), miss, 1. - varexp[n - 1])) + s = 1. / s[:n] + inv = ((u[:, :n] * s) @ v[:n]).T + return inv, n + + +def _pinv_tikhonov(x, reg): + # _reg_pinv requires square Hermitian, which we have here + inv, _, n = _reg_pinv(x, reg=reg, rank=None) + logger.info(f' Truncating at {n}/{len(x)} components and regularizing ' + f'with α={reg:0.1e}') + return inv, n + + +def _map_meg_or_eeg_channels(info_from, info_to, mode, origin, miss=None): + """Find mapping from one set of channels to another. + + Parameters + ---------- + info_from : instance of Info + The measurement data to interpolate from. + info_to : instance of Info + The measurement info to interpolate to. + mode : str + Either `'accurate'` or `'fast'`, determines the quality of the + Legendre polynomial expansion used. `'fast'` should be sufficient + for most applications. + origin : array-like, shape (3,) | str + Origin of the sphere in the head coordinate frame and in meters. + Can be ``'auto'``, which means a head-digitization-based origin + fit. Default is ``(0., 0., 0.04)``. + + Returns + ------- + mapping : array, shape (n_to, n_from) + A mapping matrix. + """ + # no need to apply trans because both from and to coils are in device + # coordinates + info_kinds = set(ch['kind'] for ch in info_to['chs']) + info_kinds |= set(ch['kind'] for ch in info_from['chs']) + if FIFF.FIFFV_REF_MEG_CH in info_kinds: # refs same as MEG + info_kinds |= set([FIFF.FIFFV_MEG_CH]) + info_kinds -= set([FIFF.FIFFV_REF_MEG_CH]) + info_kinds = sorted(info_kinds) + # This should be guaranteed by the callers + assert (len(info_kinds) == 1 and info_kinds[0] in ( + FIFF.FIFFV_MEG_CH, FIFF.FIFFV_EEG_CH)) + kind = 'eeg' if info_kinds[0] == FIFF.FIFFV_EEG_CH else 'meg' + + # + # Step 1. Prepare the coil definitions + # + if kind == 'meg': + templates = _read_coil_defs(verbose=False) + coils_from = _create_meg_coils(info_from['chs'], 'normal', + info_from['dev_head_t'], templates) + coils_to = _create_meg_coils(info_to['chs'], 'normal', + info_to['dev_head_t'], templates) + pinv_method = 'tsvd' + miss = 1e-4 + else: + coils_from = _create_eeg_els(info_from['chs']) + coils_to = _create_eeg_els(info_to['chs']) + pinv_method = 'tikhonov' + miss = 1e-1 + if _has_eeg_average_ref_proj(info_from['projs']) and \ + not _has_eeg_average_ref_proj(info_to['projs']): + raise RuntimeError( + 'info_to must have an average EEG reference projector if ' + 'info_from has one') + origin = _check_origin(origin, info_from) + # + # Step 2. Calculate the dot products + # + int_rad, noise, lut_fun, n_fact = _setup_dots( + mode, info_from, coils_from, kind) + logger.info(f' Computing dot products for {len(coils_from)} ' + f'{kind.upper()} channel{_pl(coils_from)}...') + self_dots = _do_self_dots(int_rad, False, coils_from, origin, kind, + lut_fun, n_fact, n_jobs=1) + logger.info(f' Computing cross products for {len(coils_from)} → ' + f'{len(coils_to)} {kind.upper()} channel{_pl(coils_to)}...') + cross_dots = _do_cross_dots(int_rad, False, coils_from, coils_to, + origin, kind, lut_fun, n_fact).T + + ch_names = [c['ch_name'] for c in info_from['chs']] + fmd = dict(kind=kind, ch_names=ch_names, + origin=origin, noise=noise, self_dots=self_dots, + surface_dots=cross_dots, int_rad=int_rad, miss=miss, + pinv_method=pinv_method) + + # + # Step 3. Compute the mapping matrix + # + mapping = _compute_mapping_matrix(fmd, info_from) + return mapping + + +def _as_meg_type_inst(inst, ch_type='grad', mode='fast'): + """Compute virtual evoked using interpolated fields in mag/grad channels. + + Parameters + ---------- + inst : instance of mne.Evoked or mne.Epochs + The evoked or epochs object. + ch_type : str + The destination channel type. It can be 'mag' or 'grad'. + mode : str + Either `'accurate'` or `'fast'`, determines the quality of the + Legendre polynomial expansion used. `'fast'` should be sufficient + for most applications. + + Returns + ------- + inst : instance of mne.EvokedArray or mne.EpochsArray + The transformed evoked object containing only virtual channels. + """ + _check_option('ch_type', ch_type, ['mag', 'grad']) + + # pick the original and destination channels + pick_from = pick_types(inst.info, meg=True, eeg=False, + ref_meg=False) + pick_to = pick_types(inst.info, meg=ch_type, eeg=False, + ref_meg=False) + + if len(pick_to) == 0: + raise ValueError('No channels matching the destination channel type' + ' found in info. Please pass an evoked containing' + 'both the original and destination channels. Only the' + ' locations of the destination channels will be used' + ' for interpolation.') + + info_from = pick_info(inst.info, pick_from) + info_to = pick_info(inst.info, pick_to) + # XXX someday we should probably expose the origin + mapping = _map_meg_or_eeg_channels( + info_from, info_to, origin=(0., 0., 0.04), mode=mode) + + # compute data by multiplying by the 'gain matrix' from + # original sensors to virtual sensors + if hasattr(inst, 'get_data'): + data = inst.get_data() + else: + data = inst.data + + ndim = data.ndim + if ndim == 2: + data = data[np.newaxis, :, :] + + data_ = np.empty((data.shape[0], len(mapping), data.shape[2]), + dtype=data.dtype) + for d, d_ in zip(data, data_): + d_[:] = np.dot(mapping, d[pick_from]) + + # keep only the destination channel types + info = pick_info(inst.info, sel=pick_to, copy=True) + + # change channel names to emphasize they contain interpolated data + for ch in info['chs']: + ch['ch_name'] += '_v' + info._update_redundant() + info._check_consistency() + if isinstance(inst, Evoked): + assert ndim == 2 + data_ = data_[0] # undo new axis + inst_ = EvokedArray(data_, info, tmin=inst.times[0], + comment=inst.comment, nave=inst.nave) + else: + assert isinstance(inst, BaseEpochs) + inst_ = EpochsArray(data_, info, tmin=inst.tmin, + events=inst.events, + event_id=inst.event_id, + metadata=inst.metadata) + + return inst_ + + +@verbose +def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast', + n_jobs=1, origin=(0., 0., 0.04), verbose=None): + """Re-map M/EEG data to a surface. + + Parameters + ---------- + %(info_not_none)s + surf : dict + The surface to map the data to. The required fields are `'rr'`, + `'nn'`, and `'coord_frame'`. Must be in head coordinates. + ch_type : str + Must be either `'meg'` or `'eeg'`, determines the type of field. + trans : None | dict + If None, no transformation applied. Should be a Head<->MRI + transformation. + mode : str + Either `'accurate'` or `'fast'`, determines the quality of the + Legendre polynomial expansion used. `'fast'` should be sufficient + for most applications. + %(n_jobs)s + origin : array-like, shape (3,) | str + Origin of the sphere in the head coordinate frame and in meters. + The default is ``'auto'``, which means a head-digitization-based + origin fit. + %(verbose)s + + Returns + ------- + mapping : array + A n_vertices x n_sensors array that remaps the MEG or EEG data, + as `new_data = np.dot(mapping, data)`. + """ + if not all(key in surf for key in ['rr', 'nn']): + raise KeyError('surf must have both "rr" and "nn"') + if 'coord_frame' not in surf: + raise KeyError('The surface coordinate frame must be specified ' + 'in surf["coord_frame"]') + _check_option('mode', mode, ['accurate', 'fast']) + + # deal with coordinate frames here -- always go to "head" (easiest) + orig_surf = surf + surf = transform_surface_to(deepcopy(surf), 'head', trans) + n_jobs = check_n_jobs(n_jobs) + origin = _check_origin(origin, info) + + # + # Step 1. Prepare the coil definitions + # Do the dot products, assume surf in head coords + # + _check_option('ch_type', ch_type, ['meg', 'eeg']) + if ch_type == 'meg': + picks = pick_types(info, meg=True, eeg=False, ref_meg=False) + logger.info('Prepare MEG mapping...') + else: + picks = pick_types(info, meg=False, eeg=True, ref_meg=False) + logger.info('Prepare EEG mapping...') + if len(picks) == 0: + raise RuntimeError('cannot map, no channels found') + # XXX this code does not do any checking for compensation channels, + # but it seems like this must be intentional from the ref_meg=False + # (presumably from the C code) + dev_head_t = info['dev_head_t'] + info = pick_info(_simplify_info(info), picks) + info['dev_head_t'] = dev_head_t + + # create coil defs in head coordinates + if ch_type == 'meg': + # Put them in head coordinates + coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t']) + type_str = 'coils' + miss = 1e-4 # Smoothing criterion for MEG + else: # EEG + coils = _create_eeg_els(info['chs']) + type_str = 'electrodes' + miss = 1e-3 # Smoothing criterion for EEG + + # + # Step 2. Calculate the dot products + # + int_rad, noise, lut_fun, n_fact = _setup_dots(mode, info, coils, ch_type) + logger.info('Computing dot products for %i %s...' % (len(coils), type_str)) + self_dots = _do_self_dots(int_rad, False, coils, origin, ch_type, + lut_fun, n_fact, n_jobs) + sel = np.arange(len(surf['rr'])) # eventually we should do sub-selection + logger.info('Computing dot products for %i surface locations...' + % len(sel)) + surface_dots = _do_surface_dots(int_rad, False, coils, surf, sel, + origin, ch_type, lut_fun, n_fact, + n_jobs) + + # + # Step 4. Return the result + # + fmd = dict(kind=ch_type, surf=surf, ch_names=info['ch_names'], coils=coils, + origin=origin, noise=noise, self_dots=self_dots, + surface_dots=surface_dots, int_rad=int_rad, miss=miss) + logger.info('Field mapping data ready') + + fmd['data'] = _compute_mapping_matrix(fmd, info) + # bring the original back, whatever coord frame it was in + fmd['surf'] = orig_surf + + # Remove some unnecessary fields + del fmd['self_dots'] + del fmd['surface_dots'] + del fmd['int_rad'] + del fmd['miss'] + return fmd + + +@verbose +def make_field_map(evoked, trans='auto', subject=None, subjects_dir=None, + ch_type=None, mode='fast', meg_surf='helmet', + origin=(0., 0., 0.04), n_jobs=1, verbose=None): + """Compute surface maps used for field display in 3D. + + Parameters + ---------- + evoked : Evoked | Epochs | Raw + The measurement file. Need to have info attribute. + trans : str | 'auto' | None + The full path to the ``*-trans.fif`` file produced during + coregistration. If present or found using 'auto' + the maps will be in MRI coordinates. + If None, map for EEG data will not be available. + subject : str | None + The subject name corresponding to FreeSurfer environment + variable SUBJECT. If None, map for EEG data will not be available. + subjects_dir : str + The path to the freesurfer subjects reconstructions. + It corresponds to Freesurfer environment variable SUBJECTS_DIR. + ch_type : None | 'eeg' | 'meg' + If None, a map for each available channel type will be returned. + Else only the specified type will be used. + mode : 'accurate' | 'fast' + Either ``'accurate'`` or ``'fast'``, determines the quality of the + Legendre polynomial expansion used. ``'fast'`` should be sufficient + for most applications. + meg_surf : 'helmet' | 'head' + Should be ``'helmet'`` or ``'head'`` to specify in which surface + to compute the MEG field map. The default value is ``'helmet'``. + origin : array-like, shape (3,) | 'auto' + Origin of the sphere in the head coordinate frame and in meters. + Can be ``'auto'``, which means a head-digitization-based origin + fit. Default is ``(0., 0., 0.04)``. + + .. versionadded:: 0.11 + %(n_jobs)s + %(verbose)s + + Returns + ------- + surf_maps : list + The surface maps to be used for field plots. The list contains + separate ones for MEG and EEG (if both MEG and EEG are present). + """ + info = evoked.info + + if ch_type is None: + types = [t for t in ['eeg', 'meg'] if t in evoked] + else: + _check_option('ch_type', ch_type, ['eeg', 'meg']) + types = [ch_type] + + if trans == 'auto': + # let's try to do this in MRI coordinates so they're easy to plot + trans = _find_trans(subject, subjects_dir) + + if 'eeg' in types and trans is None: + logger.info('No trans file available. EEG data ignored.') + types.remove('eeg') + + if len(types) == 0: + raise RuntimeError('No data available for mapping.') + + if trans is not None: + if isinstance(trans, str): + trans = read_trans(trans) + trans = _ensure_trans(trans, 'head', 'mri') + + _check_option('meg_surf', meg_surf, ['helmet', 'head']) + + surfs = [] + for this_type in types: + if this_type == 'meg' and meg_surf == 'helmet': + surf = get_meg_helmet_surf(info, trans) + else: + surf = get_head_surf(subject, subjects_dir=subjects_dir) + surfs.append(surf) + + surf_maps = list() + + for this_type, this_surf in zip(types, surfs): + this_map = _make_surface_mapping(evoked.info, this_surf, this_type, + trans, n_jobs=n_jobs, origin=origin, + mode=mode) + surf_maps.append(this_map) + + return surf_maps diff --git a/python/libs/mne/forward/_lead_dots.py b/python/libs/mne/forward/_lead_dots.py new file mode 100644 index 0000000..92f7f53 --- /dev/null +++ b/python/libs/mne/forward/_lead_dots.py @@ -0,0 +1,518 @@ +# Authors: Matti Hämäläinen +# Eric Larson +# Mainak Jas +# +# License: BSD-3-Clause + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. + +import os +import os.path as op + +import numpy as np +from numpy.polynomial import legendre + +from ..parallel import parallel_func +from ..utils import logger, verbose, _get_extra_data_path, fill_doc + + +############################################################################## +# FAST LEGENDRE (DERIVATIVE) POLYNOMIALS USING LOOKUP TABLE + +def _next_legen_der(n, x, p0, p01, p0d, p0dd): + """Compute the next Legendre polynomial and its derivatives.""" + # only good for n > 1 ! + old_p0 = p0 + old_p0d = p0d + p0 = ((2 * n - 1) * x * old_p0 - (n - 1) * p01) / n + p0d = n * old_p0 + x * old_p0d + p0dd = (n + 1) * old_p0d + x * p0dd + return p0, p0d, p0dd + + +def _get_legen(x, n_coeff=100): + """Get Legendre polynomials expanded about x.""" + return legendre.legvander(x, n_coeff - 1) + + +def _get_legen_der(xx, n_coeff=100): + """Get Legendre polynomial derivatives expanded about x.""" + coeffs = np.empty((len(xx), n_coeff, 3)) + for c, x in zip(coeffs, xx): + p0s, p0ds, p0dds = c[:, 0], c[:, 1], c[:, 2] + p0s[:2] = [1.0, x] + p0ds[:2] = [0.0, 1.0] + p0dds[:2] = [0.0, 0.0] + for n in range(2, n_coeff): + p0s[n], p0ds[n], p0dds[n] = _next_legen_der( + n, x, p0s[n - 1], p0s[n - 2], p0ds[n - 1], p0dds[n - 1]) + return coeffs + + +@verbose +def _get_legen_table(ch_type, volume_integral=False, n_coeff=100, + n_interp=20000, force_calc=False, verbose=None): + """Return a (generated) LUT of Legendre (derivative) polynomial coeffs.""" + if n_interp % 2 != 0: + raise RuntimeError('n_interp must be even') + fname = op.join(_get_extra_data_path(), 'tables') + if not op.isdir(fname): + # Updated due to API change (GH 1167) + os.makedirs(fname) + if ch_type == 'meg': + fname = op.join(fname, 'legder_%s_%s.bin' % (n_coeff, n_interp)) + leg_fun = _get_legen_der + extra_str = ' derivative' + lut_shape = (n_interp + 1, n_coeff, 3) + else: # 'eeg' + fname = op.join(fname, 'legval_%s_%s.bin' % (n_coeff, n_interp)) + leg_fun = _get_legen + extra_str = '' + lut_shape = (n_interp + 1, n_coeff) + if not op.isfile(fname) or force_calc: + logger.info('Generating Legendre%s table...' % extra_str) + x_interp = np.linspace(-1, 1, n_interp + 1) + lut = leg_fun(x_interp, n_coeff).astype(np.float32) + if not force_calc: + with open(fname, 'wb') as fid: + fid.write(lut.tobytes()) + else: + logger.info('Reading Legendre%s table...' % extra_str) + with open(fname, 'rb', buffering=0) as fid: + lut = np.fromfile(fid, np.float32) + lut.shape = lut_shape + + # we need this for the integration step + n_fact = np.arange(1, n_coeff, dtype=float) + if ch_type == 'meg': + n_facts = list() # multn, then mult, then multn * (n + 1) + if volume_integral: + n_facts.append(n_fact / ((2.0 * n_fact + 1.0) * + (2.0 * n_fact + 3.0))) + else: + n_facts.append(n_fact / (2.0 * n_fact + 1.0)) + n_facts.append(n_facts[0] / (n_fact + 1.0)) + n_facts.append(n_facts[0] * (n_fact + 1.0)) + # skip the first set of coefficients because they are not used + lut = lut[:, 1:, [0, 1, 1, 2]] # for multiplicative convenience later + # reshape this for convenience, too + n_facts = np.array(n_facts)[[2, 0, 1, 1], :].T + n_facts = np.ascontiguousarray(n_facts) + n_fact = n_facts + else: # 'eeg' + n_fact = (2.0 * n_fact + 1.0) * (2.0 * n_fact + 1.0) / n_fact + # skip the first set of coefficients because they are not used + lut = lut[:, 1:].copy() + return lut, n_fact + + +def _comp_sum_eeg(beta, ctheta, lut_fun, n_fact): + """Lead field dot products using Legendre polynomial (P_n) series.""" + # Compute the sum occurring in the evaluation. + # The result is + # sums[:] (2n+1)^2/n beta^n P_n + n_chunk = 50000000 // (8 * max(n_fact.shape) * 2) + lims = np.concatenate([np.arange(0, beta.size, n_chunk), [beta.size]]) + s0 = np.empty(beta.shape) + for start, stop in zip(lims[:-1], lims[1:]): + coeffs = lut_fun(ctheta[start:stop]) + betans = np.tile(beta[start:stop][:, np.newaxis], (1, n_fact.shape[0])) + np.cumprod(betans, axis=1, out=betans) # run inplace + coeffs *= betans + s0[start:stop] = np.dot(coeffs, n_fact) # == weighted sum across cols + return s0 + + +def _comp_sums_meg(beta, ctheta, lut_fun, n_fact, volume_integral): + """Lead field dot products using Legendre polynomial (P_n) series. + + Parameters + ---------- + beta : array, shape (n_points * n_points, 1) + Coefficients of the integration. + ctheta : array, shape (n_points * n_points, 1) + Cosine of the angle between the sensor integration points. + lut_fun : callable + Look-up table for evaluating Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + volume_integral : bool + If True, compute volume integral. + + Returns + ------- + sums : array, shape (4, n_points * n_points) + The results. + """ + # Compute the sums occurring in the evaluation. + # Two point magnetometers on the xz plane are assumed. + # The four sums are: + # * sums[:, 0] n(n+1)/(2n+1) beta^(n+1) P_n + # * sums[:, 1] n/(2n+1) beta^(n+1) P_n' + # * sums[:, 2] n/((2n+1)(n+1)) beta^(n+1) P_n' + # * sums[:, 3] n/((2n+1)(n+1)) beta^(n+1) P_n'' + + # This is equivalent, but slower: + # sums = np.sum(bbeta[:, :, np.newaxis].T * n_fact * coeffs, axis=1) + # sums = np.rollaxis(sums, 2) + # or + # sums = np.einsum('ji,jk,ijk->ki', bbeta, n_fact, lut_fun(ctheta))) + sums = np.empty((n_fact.shape[1], len(beta))) + # beta can be e.g. 3 million elements, which ends up using lots of memory + # so we split up the computations into ~50 MB blocks + n_chunk = 50000000 // (8 * max(n_fact.shape) * 2) + lims = np.concatenate([np.arange(0, beta.size, n_chunk), [beta.size]]) + for start, stop in zip(lims[:-1], lims[1:]): + bbeta = np.tile(beta[start:stop][np.newaxis], (n_fact.shape[0], 1)) + bbeta[0] *= beta[start:stop] + np.cumprod(bbeta, axis=0, out=bbeta) # run inplace + np.einsum('ji,jk,ijk->ki', bbeta, n_fact, lut_fun(ctheta[start:stop]), + out=sums[:, start:stop]) + return sums + + +############################################################################### +# SPHERE DOTS + +_meg_const = 4e-14 * np.pi # This is \mu_0^2/4\pi +_eeg_const = 1.0 / (4.0 * np.pi) + + +def _fast_sphere_dot_r0(r, rr1_orig, rr2s, lr1, lr2s, cosmags1, cosmags2s, + w1, w2s, volume_integral, lut, n_fact, ch_type): + """Lead field dot product computation for M/EEG in the sphere model. + + Parameters + ---------- + r : float + The integration radius. It is used to calculate beta as: + beta = (r * r) / (lr1 * lr2). + rr1 : array, shape (n_points x 3) + Normalized position vectors of integrations points in first sensor. + rr2s : list + Normalized position vector of integration points in second sensor. + lr1 : array, shape (n_points x 1) + Magnitude of position vector of integration points in first sensor. + lr2s : list + Magnitude of position vector of integration points in second sensor. + cosmags1 : array, shape (n_points x 1) + Direction of integration points in first sensor. + cosmags2s : list + Direction of integration points in second sensor. + w1 : array, shape (n_points x 1) | None + Weights of integration points in the first sensor. + w2s : list + Weights of integration points in the second sensor. + volume_integral : bool + If True, compute volume integral. + lut : callable + Look-up table for evaluating Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + ch_type : str + The channel type. It can be 'meg' or 'eeg'. + + Returns + ------- + result : float + The integration sum. + """ + if w1 is None: # operating on surface, treat independently + out_shape = (len(rr2s), len(rr1_orig)) + sum_axis = 1 # operate along second axis only at the end + else: + out_shape = (len(rr2s),) + sum_axis = None # operate on flattened array at the end + out = np.empty(out_shape) + rr2 = np.concatenate(rr2s) + lr2 = np.concatenate(lr2s) + cosmags2 = np.concatenate(cosmags2s) + + # outer product, sum over coords + ct = np.einsum('ik,jk->ij', rr1_orig, rr2) + np.clip(ct, -1, 1, ct) + + # expand axes + rr1 = rr1_orig[:, np.newaxis, :] # (n_rr1, n_rr2, n_coord) e.g. 4x4x3 + rr2 = rr2[np.newaxis, :, :] + lr1lr2 = lr1[:, np.newaxis] * lr2[np.newaxis, :] + + beta = (r * r) / lr1lr2 + if ch_type == 'meg': + sums = _comp_sums_meg(beta.flatten(), ct.flatten(), lut, n_fact, + volume_integral) + sums.shape = (4,) + beta.shape + + # Accumulate the result, a little bit streamlined version + # cosmags1 = cosmags1[:, np.newaxis, :] + # cosmags2 = cosmags2[np.newaxis, :, :] + # n1c1 = np.sum(cosmags1 * rr1, axis=2) + # n1c2 = np.sum(cosmags1 * rr2, axis=2) + # n2c1 = np.sum(cosmags2 * rr1, axis=2) + # n2c2 = np.sum(cosmags2 * rr2, axis=2) + # n1n2 = np.sum(cosmags1 * cosmags2, axis=2) + n1c1 = np.einsum('ik,ijk->ij', cosmags1, rr1) + n1c2 = np.einsum('ik,ijk->ij', cosmags1, rr2) + n2c1 = np.einsum('jk,ijk->ij', cosmags2, rr1) + n2c2 = np.einsum('jk,ijk->ij', cosmags2, rr2) + n1n2 = np.einsum('ik,jk->ij', cosmags1, cosmags2) + part1 = ct * n1c1 * n2c2 + part2 = n1c1 * n2c1 + n1c2 * n2c2 + + result = (n1c1 * n2c2 * sums[0] + + (2.0 * part1 - part2) * sums[1] + + (n1n2 + part1 - part2) * sums[2] + + (n1c2 - ct * n1c1) * (n2c1 - ct * n2c2) * sums[3]) + + # Give it a finishing touch! + result *= (_meg_const / lr1lr2) + if volume_integral: + result *= r + else: # 'eeg' + result = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact) + result.shape = beta.shape + # Give it a finishing touch! + result *= _eeg_const + result /= lr1lr2 + # now we add them all up with weights + offset = 0 + result *= np.concatenate(w2s) + if w1 is not None: + result *= w1[:, np.newaxis] + for ii, w2 in enumerate(w2s): + out[ii] = np.sum(result[:, offset:offset + len(w2)], axis=sum_axis) + offset += len(w2) + return out + + +@fill_doc +def _do_self_dots(intrad, volume, coils, r0, ch_type, lut, n_fact, n_jobs): + """Perform the lead field dot product integrations. + + Parameters + ---------- + intrad : float + The integration radius. It is used to calculate beta as: + beta = (intrad * intrad) / (r1 * r2). + volume : bool + If True, perform volume integral. + coils : list of dict + The coils. + r0 : array, shape (3 x 1) + The origin of the sphere. + ch_type : str + The channel type. It can be 'meg' or 'eeg'. + lut : callable + Look-up table for evaluating Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + %(n_jobs)s + + Returns + ------- + products : array, shape (n_coils, n_coils) + The integration products. + """ + if ch_type == 'eeg': + intrad = intrad * 0.7 + # convert to normalized distances from expansion center + rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils] + rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags] + rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)] + cosmags = [coil['cosmag'] for coil in coils] + ws = [coil['w'] for coil in coils] + parallel, p_fun, _ = parallel_func(_do_self_dots_subset, n_jobs) + prods = parallel(p_fun(intrad, rmags, rlens, cosmags, + ws, volume, lut, n_fact, ch_type, idx) + for idx in np.array_split(np.arange(len(rmags)), n_jobs)) + products = np.sum(prods, axis=0) + return products + + +def _do_self_dots_subset(intrad, rmags, rlens, cosmags, ws, volume, lut, + n_fact, ch_type, idx): + """Parallelize.""" + # all possible combinations of two magnetometers + products = np.zeros((len(rmags), len(rmags))) + for ci1 in idx: + ci2 = ci1 + 1 + res = _fast_sphere_dot_r0( + intrad, rmags[ci1], rmags[:ci2], rlens[ci1], rlens[:ci2], + cosmags[ci1], cosmags[:ci2], ws[ci1], ws[:ci2], volume, lut, + n_fact, ch_type) + products[ci1, :ci2] = res + products[:ci2, ci1] = res + return products + + +def _do_cross_dots(intrad, volume, coils1, coils2, r0, ch_type, + lut, n_fact): + """Compute lead field dot product integrations between two coil sets. + + The code is a direct translation of MNE-C code found in + `mne_map_data/lead_dots.c`. + + Parameters + ---------- + intrad : float + The integration radius. It is used to calculate beta as: + beta = (intrad * intrad) / (r1 * r2). + volume : bool + If True, compute volume integral. + coils1 : list of dict + The original coils. + coils2 : list of dict + The coils to which data is being mapped. + r0 : array, shape (3 x 1). + The origin of the sphere. + ch_type : str + The channel type. It can be 'meg' or 'eeg' + lut : callable + Look-up table for evaluating Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + + Returns + ------- + products : array, shape (n_coils, n_coils) + The integration products. + """ + if ch_type == 'eeg': + intrad = intrad * 0.7 + rmags1 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils1] + rmags2 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils2] + + rlens1 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags1] + rlens2 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags2] + + rmags1 = [r / rl[:, np.newaxis] for r, rl in zip(rmags1, rlens1)] + rmags2 = [r / rl[:, np.newaxis] for r, rl in zip(rmags2, rlens2)] + + ws1 = [coil['w'] for coil in coils1] + ws2 = [coil['w'] for coil in coils2] + + cosmags1 = [coil['cosmag'] for coil in coils1] + cosmags2 = [coil['cosmag'] for coil in coils2] + + products = np.zeros((len(rmags1), len(rmags2))) + for ci1 in range(len(coils1)): + res = _fast_sphere_dot_r0( + intrad, rmags1[ci1], rmags2, rlens1[ci1], rlens2, cosmags1[ci1], + cosmags2, ws1[ci1], ws2, volume, lut, n_fact, ch_type) + products[ci1, :] = res + return products + + +@fill_doc +def _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type, + lut, n_fact, n_jobs): + """Compute the map construction products. + + Parameters + ---------- + intrad : float + The integration radius. It is used to calculate beta as: + beta = (intrad * intrad) / (r1 * r2) + volume : bool + If True, compute a volume integral. + coils : list of dict + The coils. + surf : dict + The surface on which the field is interpolated. + sel : array + Indices of the surface vertices to select. + r0 : array, shape (3 x 1) + The origin of the sphere. + ch_type : str + The channel type. It can be 'meg' or 'eeg'. + lut : callable + Look-up table for Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + %(n_jobs)s + + Returns + ------- + products : array, shape (n_coils, n_coils) + The integration products. + """ + # convert to normalized distances from expansion center + rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils] + rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags] + rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)] + cosmags = [coil['cosmag'] for coil in coils] + ws = [coil['w'] for coil in coils] + rref = None + refl = None + # virt_ref = False + if ch_type == 'eeg': + intrad = intrad * 0.7 + # The virtual ref code is untested and unused, so it is + # commented out for now + # if virt_ref: + # rref = virt_ref[np.newaxis, :] - r0[np.newaxis, :] + # refl = np.sqrt(np.sum(rref * rref, axis=1)) + # rref /= refl[:, np.newaxis] + + rsurf = surf['rr'][sel] - r0[np.newaxis, :] + lsurf = np.sqrt(np.sum(rsurf * rsurf, axis=1)) + rsurf /= lsurf[:, np.newaxis] + this_nn = surf['nn'][sel] + + # loop over the coils + parallel, p_fun, _ = parallel_func(_do_surface_dots_subset, n_jobs) + prods = parallel(p_fun(intrad, rsurf, rmags, rref, refl, lsurf, rlens, + this_nn, cosmags, ws, volume, lut, n_fact, ch_type, + idx) + for idx in np.array_split(np.arange(len(rmags)), n_jobs)) + products = np.sum(prods, axis=0) + return products + + +def _do_surface_dots_subset(intrad, rsurf, rmags, rref, refl, lsurf, rlens, + this_nn, cosmags, ws, volume, lut, n_fact, ch_type, + idx): + """Parallelize. + + Parameters + ---------- + refl : array | None + If ch_type is 'eeg', the magnitude of position vector of the + virtual reference (never used). + lsurf : array + Magnitude of position vector of the surface points. + rlens : list of arrays of length n_coils + Magnitude of position vector. + this_nn : array, shape (n_vertices, 3) + Surface normals. + cosmags : list of array. + Direction of the integration points in the coils. + ws : list of array + Integration weights of the coils. + volume : bool + If True, compute volume integral. + lut : callable + Look-up table for evaluating Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + ch_type : str + 'meg' or 'eeg' + idx : array, shape (n_coils x 1) + Index of coil. + + Returns + ------- + products : array, shape (n_coils, n_coils) + The integration products. + """ + products = _fast_sphere_dot_r0( + intrad, rsurf, rmags, lsurf, rlens, this_nn, cosmags, None, ws, + volume, lut, n_fact, ch_type).T + if rref is not None: + raise NotImplementedError # we don't ever use this, isn't tested + # vres = _fast_sphere_dot_r0( + # intrad, rref, rmags, refl, rlens, this_nn, cosmags, None, ws, + # volume, lut, n_fact, ch_type) + # products -= vres + return products diff --git a/python/libs/mne/forward/_make_forward.py b/python/libs/mne/forward/_make_forward.py new file mode 100644 index 0000000..dc3c345 --- /dev/null +++ b/python/libs/mne/forward/_make_forward.py @@ -0,0 +1,813 @@ +# Authors: Matti Hämäläinen +# Alexandre Gramfort +# Martin Luessi +# Eric Larson +# +# License: BSD-3-Clause + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. + +from copy import deepcopy +from contextlib import contextmanager +import os +import os.path as op + +import numpy as np + +from ._compute_forward import _compute_forwards +from ..io import read_info, _loc_to_coil_trans, _loc_to_eeg_loc, Info +from ..io.pick import _has_kit_refs, pick_types, pick_info +from ..io.constants import FIFF, FWD +from ..transforms import (_ensure_trans, transform_surface_to, apply_trans, + _get_trans, _print_coord_trans, _coord_frame_name, + Transform, invert_transform) +from ..utils import logger, verbose, warn, _pl, _validate_type +from ..parallel import check_n_jobs +from ..source_space import (_ensure_src, _filter_source_spaces, + _make_discrete_source_space, _complete_vol_src) +from ..source_estimate import VolSourceEstimate +from ..surface import _normalize_vectors, _CheckInside +from ..bem import read_bem_solution, _bem_find_surface, ConductorModel + +from .forward import Forward, _merge_meg_eeg_fwds, convert_forward_solution + + +_accuracy_dict = dict(normal=FWD.COIL_ACCURACY_NORMAL, + accurate=FWD.COIL_ACCURACY_ACCURATE) +_extra_coil_def_fname = None + + +@verbose +def _read_coil_defs(verbose=None): + """Read a coil definition file. + + Parameters + ---------- + %(verbose)s + + Returns + ------- + res : list of dict + The coils. It is a dictionary with valid keys: + 'cosmag' | 'coil_class' | 'coord_frame' | 'rmag' | 'type' | + 'chname' | 'accuracy'. + cosmag contains the direction of the coils and rmag contains the + position vector. + + Notes + ----- + The global variable "_extra_coil_def_fname" can be used to prepend + additional definitions. These are never added to the registry. + """ + coil_dir = op.join(op.split(__file__)[0], '..', 'data') + coils = list() + if _extra_coil_def_fname is not None: + coils += _read_coil_def_file(_extra_coil_def_fname, use_registry=False) + coils += _read_coil_def_file(op.join(coil_dir, 'coil_def.dat')) + return coils + + +# Typically we only have 1 or 2 coil def files, but they can end up being +# read a lot. Let's keep a list of them and just reuse them: +_coil_registry = {} + + +def _read_coil_def_file(fname, use_registry=True): + """Read a coil def file.""" + if not use_registry or fname not in _coil_registry: + big_val = 0.5 + coils = list() + with open(fname, 'r') as fid: + lines = fid.readlines() + lines = lines[::-1] + while len(lines) > 0: + line = lines.pop().strip() + if line[0] == '#' and len(line) > 0: + continue + desc_start = line.find('"') + desc_end = len(line) - 1 + assert line.strip()[desc_end] == '"' + desc = line[desc_start:desc_end] + vals = np.fromstring(line[:desc_start].strip(), + dtype=float, sep=' ') + assert len(vals) == 6 + npts = int(vals[3]) + coil = dict(coil_type=vals[1], coil_class=vals[0], desc=desc, + accuracy=vals[2], size=vals[4], base=vals[5]) + # get parameters of each component + rmag = list() + cosmag = list() + w = list() + for p in range(npts): + # get next non-comment line + line = lines.pop() + while(line[0] == '#'): + line = lines.pop() + vals = np.fromstring(line, sep=' ') + if len(vals) != 7: + raise RuntimeError( + f'Could not interpret line {p + 1} as 7 points:\n' + f'{line}') + # Read and verify data for each integration point + w.append(vals[0]) + rmag.append(vals[[1, 2, 3]]) + cosmag.append(vals[[4, 5, 6]]) + w = np.array(w) + rmag = np.array(rmag) + cosmag = np.array(cosmag) + size = np.sqrt(np.sum(cosmag ** 2, axis=1)) + if np.any(np.sqrt(np.sum(rmag ** 2, axis=1)) > big_val): + raise RuntimeError('Unreasonable integration point') + if np.any(size <= 0): + raise RuntimeError('Unreasonable normal') + cosmag /= size[:, np.newaxis] + coil.update(dict(w=w, cosmag=cosmag, rmag=rmag)) + coils.append(coil) + if use_registry: + _coil_registry[fname] = coils + if use_registry: + coils = deepcopy(_coil_registry[fname]) + logger.info('%d coil definition%s read', len(coils), _pl(coils)) + return coils + + +def _create_meg_coil(coilset, ch, acc, do_es): + """Create a coil definition using templates, transform if necessary.""" + # Also change the coordinate frame if so desired + if ch['kind'] not in [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH]: + raise RuntimeError('%s is not a MEG channel' % ch['ch_name']) + + # Simple linear search from the coil definitions + for coil in coilset: + if coil['coil_type'] == (ch['coil_type'] & 0xFFFF) and \ + coil['accuracy'] == acc: + break + else: + raise RuntimeError('Desired coil definition not found ' + '(type = %d acc = %d)' % (ch['coil_type'], acc)) + + # Apply a coordinate transformation if so desired + coil_trans = _loc_to_coil_trans(ch['loc']) + + # Create the result + res = dict(chname=ch['ch_name'], coil_class=coil['coil_class'], + accuracy=coil['accuracy'], base=coil['base'], size=coil['size'], + type=ch['coil_type'], w=coil['w'], desc=coil['desc'], + coord_frame=FIFF.FIFFV_COORD_DEVICE, rmag_orig=coil['rmag'], + cosmag_orig=coil['cosmag'], coil_trans_orig=coil_trans, + r0=coil_trans[:3, 3], + rmag=apply_trans(coil_trans, coil['rmag']), + cosmag=apply_trans(coil_trans, coil['cosmag'], False)) + if do_es: + r0_exey = (np.dot(coil['rmag'][:, :2], coil_trans[:3, :2].T) + + coil_trans[:3, 3]) + res.update(ex=coil_trans[:3, 0], ey=coil_trans[:3, 1], + ez=coil_trans[:3, 2], r0_exey=r0_exey) + return res + + +def _create_eeg_el(ch, t=None): + """Create an electrode definition, transform coords if necessary.""" + if ch['kind'] != FIFF.FIFFV_EEG_CH: + raise RuntimeError('%s is not an EEG channel. Cannot create an ' + 'electrode definition.' % ch['ch_name']) + if t is None: + t = Transform('head', 'head') # identity, no change + if t.from_str != 'head': + raise RuntimeError('Inappropriate coordinate transformation') + + r0ex = _loc_to_eeg_loc(ch['loc']) + if r0ex.shape[1] == 1: # no reference + w = np.array([1.]) + else: # has reference + w = np.array([1., -1.]) + + # Optional coordinate transformation + r0ex = apply_trans(t['trans'], r0ex.T) + + # The electrode location + cosmag = r0ex.copy() + _normalize_vectors(cosmag) + res = dict(chname=ch['ch_name'], coil_class=FWD.COILC_EEG, w=w, + accuracy=_accuracy_dict['normal'], type=ch['coil_type'], + coord_frame=t['to'], rmag=r0ex, cosmag=cosmag) + return res + + +def _create_meg_coils(chs, acc, t=None, coilset=None, do_es=False): + """Create a set of MEG coils in the head coordinate frame.""" + acc = _accuracy_dict[acc] if isinstance(acc, str) else acc + coilset = _read_coil_defs(verbose=False) if coilset is None else coilset + coils = [_create_meg_coil(coilset, ch, acc, do_es) for ch in chs] + _transform_orig_meg_coils(coils, t, do_es=do_es) + return coils + + +def _transform_orig_meg_coils(coils, t, do_es=True): + """Transform original (device) MEG coil positions.""" + if t is None: + return + for coil in coils: + coil_trans = np.dot(t['trans'], coil['coil_trans_orig']) + coil.update( + coord_frame=t['to'], r0=coil_trans[:3, 3], + rmag=apply_trans(coil_trans, coil['rmag_orig']), + cosmag=apply_trans(coil_trans, coil['cosmag_orig'], False)) + if do_es: + r0_exey = (np.dot(coil['rmag_orig'][:, :2], + coil_trans[:3, :2].T) + coil_trans[:3, 3]) + coil.update(ex=coil_trans[:3, 0], ey=coil_trans[:3, 1], + ez=coil_trans[:3, 2], r0_exey=r0_exey) + + +def _create_eeg_els(chs): + """Create a set of EEG electrodes in the head coordinate frame.""" + return [_create_eeg_el(ch) for ch in chs] + + +@verbose +def _setup_bem(bem, bem_extra, neeg, mri_head_t, allow_none=False, + verbose=None): + """Set up a BEM for forward computation, making a copy and modifying.""" + if allow_none and bem is None: + return None + logger.info('') + _validate_type(bem, ('path-like', ConductorModel), bem) + if not isinstance(bem, ConductorModel): + logger.info('Setting up the BEM model using %s...\n' % bem_extra) + bem = read_bem_solution(bem) + else: + bem = bem.copy() + if bem['is_sphere']: + logger.info('Using the sphere model.\n') + if len(bem['layers']) == 0 and neeg > 0: + raise RuntimeError('Spherical model has zero shells, cannot use ' + 'with EEG data') + if bem['coord_frame'] != FIFF.FIFFV_COORD_HEAD: + raise RuntimeError('Spherical model is not in head coordinates') + else: + if bem['surfs'][0]['coord_frame'] != FIFF.FIFFV_COORD_MRI: + raise RuntimeError( + 'BEM is in %s coordinates, should be in MRI' + % (_coord_frame_name(bem['surfs'][0]['coord_frame']),)) + if neeg > 0 and len(bem['surfs']) == 1: + raise RuntimeError('Cannot use a homogeneous (1-layer BEM) model ' + 'for EEG forward calculations, consider ' + 'using a 3-layer BEM instead') + logger.info('Employing the head->MRI coordinate transform with the ' + 'BEM model.') + # fwd_bem_set_head_mri_t: Set the coordinate transformation + bem['head_mri_t'] = _ensure_trans(mri_head_t, 'head', 'mri') + logger.info('BEM model %s is now set up' % op.split(bem_extra)[1]) + logger.info('') + return bem + + +@verbose +def _prep_meg_channels(info, accuracy='accurate', exclude=(), ignore_ref=False, + head_frame=True, do_es=False, do_picking=True, + verbose=None): + """Prepare MEG coil definitions for forward calculation. + + Parameters + ---------- + %(info_not_none)s + accuracy : str + Can be "normal" or "accurate" (default). + exclude : list of str | str + List of channels to exclude. If 'bads', exclude channels in + info['bads'] + ignore_ref : bool + If true, ignore compensation coils + head_frame : bool + If True (default), use head frame coords. Otherwise, use device frame. + do_es : bool + If True, compute and store ex, ey, ez, and r0_exey. + do_picking : bool + If True, pick info and return it. + %(verbose)s + + Returns + ------- + megcoils : list of dict + Information for each prepped MEG coil + compcoils : list of dict + Information for each prepped MEG coil + megnames : list of str + Name of each prepped MEG coil + meginfo : instance of Info + Information subselected for just the set of MEG coils + """ + info_extra = 'info' + megnames, megcoils, compcoils = [], [], [] + + # Find MEG channels + picks = pick_types(info, meg=True, eeg=False, ref_meg=False, + exclude=exclude) + + # Make sure MEG coils exist + nmeg = len(picks) + if nmeg <= 0: + raise RuntimeError('Could not find any MEG channels') + + # Get channel info and names for MEG channels + megchs = [info['chs'][pick] for pick in picks] + megnames = [info['ch_names'][p] for p in picks] + logger.info('Read %3d MEG channels from %s' + % (len(picks), info_extra)) + + # Get MEG compensation channels + if not ignore_ref: + picks = pick_types(info, meg=False, ref_meg=True, exclude=exclude) + ncomp = len(picks) + if (ncomp > 0): + compchs = pick_info(info, picks)['chs'] + logger.info('Read %3d MEG compensation channels from %s' + % (ncomp, info_extra)) + # We need to check to make sure these are NOT KIT refs + if _has_kit_refs(info, picks): + raise NotImplementedError( + 'Cannot create forward solution with KIT reference ' + 'channels. Consider using "ignore_ref=True" in ' + 'calculation') + else: + ncomp = 0 + + # Make info structure to allow making compensator later + ncomp_data = len(info['comps']) + ref_meg = True if not ignore_ref else False + picks = pick_types(info, meg=True, ref_meg=ref_meg, exclude=exclude) + + # Create coil descriptions with transformation to head or device frame + templates = _read_coil_defs() + + if head_frame: + _print_coord_trans(info['dev_head_t']) + transform = info['dev_head_t'] + else: + transform = None + + megcoils = _create_meg_coils(megchs, accuracy, transform, templates, + do_es=do_es) + + if ncomp > 0: + logger.info('%d compensation data sets in %s' % (ncomp_data, + info_extra)) + compcoils = _create_meg_coils(compchs, 'normal', transform, templates, + do_es=do_es) + + # Check that coordinate frame is correct and log it + if head_frame: + assert megcoils[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD + logger.info('MEG coil definitions created in head coordinates.') + else: + assert megcoils[0]['coord_frame'] == FIFF.FIFFV_COORD_DEVICE + logger.info('MEG coil definitions created in device coordinate.') + + out = (megcoils, compcoils, megnames) + if do_picking: + out = out + (pick_info(info, picks),) + return out + + +@verbose +def _prep_eeg_channels(info, exclude=(), verbose=None): + """Prepare EEG electrode definitions for forward calculation. + + Parameters + ---------- + %(info_not_none)s + exclude : list of str | str + List of channels to exclude. If 'bads', exclude channels in + info['bads'] + %(verbose)s + + Returns + ------- + eegels : list of dict + Information for each prepped EEG electrode + eegnames : list of str + Name of each prepped EEG electrode + """ + info_extra = 'info' + + # Find EEG electrodes + picks = pick_types(info, meg=False, eeg=True, ref_meg=False, + exclude=exclude) + + # Make sure EEG electrodes exist + neeg = len(picks) + if neeg <= 0: + raise RuntimeError('Could not find any EEG channels') + + # Get channel info and names for EEG channels + eegchs = pick_info(info, picks)['chs'] + eegnames = [info['ch_names'][p] for p in picks] + logger.info('Read %3d EEG channels from %s' % (len(picks), info_extra)) + + # Create EEG electrode descriptions + eegels = _create_eeg_els(eegchs) + logger.info('Head coordinate coil definitions created.') + + return eegels, eegnames + + +@verbose +def _prepare_for_forward(src, mri_head_t, info, bem, mindist, n_jobs, + bem_extra='', trans='', info_extra='', + meg=True, eeg=True, ignore_ref=False, + allow_bem_none=False, verbose=None): + """Prepare for forward computation.""" + # Read the source locations + logger.info('') + # let's make a copy in case we modify something + src = _ensure_src(src).copy() + nsource = sum(s['nuse'] for s in src) + if nsource == 0: + raise RuntimeError('No sources are active in these source spaces. ' + '"do_all" option should be used.') + logger.info('Read %d source spaces a total of %d active source locations' + % (len(src), nsource)) + # Delete some keys to clean up the source space: + for key in ['working_dir', 'command_line']: + if key in src.info: + del src.info[key] + + # Read the MRI -> head coordinate transformation + logger.info('') + _print_coord_trans(mri_head_t) + + # make a new dict with the relevant information + arg_list = [info_extra, trans, src, bem_extra, meg, eeg, mindist, + n_jobs, verbose] + cmd = 'make_forward_solution(%s)' % (', '.join([str(a) for a in arg_list])) + mri_id = dict(machid=np.zeros(2, np.int32), version=0, secs=0, usecs=0) + + info = Info(chs=info['chs'], comps=info['comps'], + dev_head_t=info['dev_head_t'], mri_file=trans, mri_id=mri_id, + meas_file=info_extra, meas_id=None, working_dir=os.getcwd(), + command_line=cmd, bads=info['bads'], mri_head_t=mri_head_t) + info._update_redundant() + info._check_consistency() + logger.info('') + + megcoils, compcoils, megnames, meg_info = [], [], [], [] + eegels, eegnames = [], [] + + if meg and len(pick_types(info, meg=True, ref_meg=False, exclude=[])) > 0: + megcoils, compcoils, megnames, meg_info = \ + _prep_meg_channels(info, ignore_ref=ignore_ref) + if eeg and len(pick_types(info, meg=False, eeg=True, ref_meg=False, + exclude=[])) > 0: + eegels, eegnames = _prep_eeg_channels(info) + + # Check that some channels were found + if len(megcoils + eegels) == 0: + raise RuntimeError('No MEG or EEG channels found.') + + # pick out final info + info = pick_info(info, pick_types(info, meg=meg, eeg=eeg, ref_meg=False, + exclude=[])) + + # Transform the source spaces into the appropriate coordinates + # (will either be HEAD or MRI) + for s in src: + transform_surface_to(s, 'head', mri_head_t) + logger.info('Source spaces are now in %s coordinates.' + % _coord_frame_name(s['coord_frame'])) + + # Prepare the BEM model + bem = _setup_bem(bem, bem_extra, len(eegnames), mri_head_t, + allow_none=allow_bem_none) + + # Circumvent numerical problems by excluding points too close to the skull, + # and check that sensors are not inside any BEM surface + if bem is not None: + if not bem['is_sphere']: + check_surface = 'inner skull surface' + inner_skull = _bem_find_surface(bem, 'inner_skull') + check_inside = _filter_source_spaces( + inner_skull, mindist, mri_head_t, src, n_jobs) + logger.info('') + if len(bem['surfs']) == 3: + check_surface = 'scalp surface' + check_inside = _CheckInside( + _bem_find_surface(bem, 'head')) + else: + check_surface = 'outermost sphere shell' + if len(bem['layers']) == 0: + def check_inside(x): + return np.zeros(len(x), bool) + else: + def check_inside(x): + return (np.linalg.norm(x - bem['r0'], axis=1) < + bem['layers'][-1]['rad']) + if len(megcoils): + meg_loc = apply_trans( + invert_transform(mri_head_t), + np.array([coil['r0'] for coil in megcoils])) + n_inside = check_inside(meg_loc).sum() + if n_inside: + raise RuntimeError( + f'Found {n_inside} MEG sensor{_pl(n_inside)} inside the ' + f'{check_surface}, perhaps coordinate frames and/or ' + 'coregistration must be incorrect') + + rr = np.concatenate([s['rr'][s['vertno']] for s in src]) + if len(rr) < 1: + raise RuntimeError('No points left in source space after excluding ' + 'points close to inner skull.') + + # deal with free orientations: + source_nn = np.tile(np.eye(3), (len(rr), 1)) + update_kwargs = dict(nchan=len(info['ch_names']), nsource=len(rr), + info=info, src=src, source_nn=source_nn, + source_rr=rr, surf_ori=False, mri_head_t=mri_head_t) + return megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, \ + info, update_kwargs, bem + + +@verbose +def make_forward_solution(info, trans, src, bem, meg=True, eeg=True, + mindist=0.0, ignore_ref=False, n_jobs=1, + verbose=None): + """Calculate a forward solution for a subject. + + Parameters + ---------- + %(info_str)s + %(trans)s + src : str | instance of SourceSpaces + If string, should be a source space filename. Can also be an + instance of loaded or generated SourceSpaces. + bem : dict | str + Filename of the BEM (e.g., "sample-5120-5120-5120-bem-sol.fif") to + use, or a loaded sphere model (dict). + meg : bool + If True (Default), include MEG computations. + eeg : bool + If True (Default), include EEG computations. + mindist : float + Minimum distance of sources from inner skull surface (in mm). + ignore_ref : bool + If True, do not include reference channels in compensation. This + option should be True for KIT files, since forward computation + with reference channels is not currently supported. + %(n_jobs)s + %(verbose)s + + Returns + ------- + fwd : instance of Forward + The forward solution. + + See Also + -------- + convert_forward_solution + + Notes + ----- + The ``--grad`` option from MNE-C (to compute gradients) is not implemented + here. + + To create a fixed-orientation forward solution, use this function + followed by :func:`mne.convert_forward_solution`. + """ + # Currently not (sup)ported: + # 1. --grad option (gradients of the field, not used much) + # 2. --fixed option (can be computed post-hoc) + # 3. --mricoord option (probably not necessary) + + # read the transformation from MRI to HEAD coordinates + # (could also be HEAD to MRI) + mri_head_t, trans = _get_trans(trans) + if isinstance(bem, ConductorModel): + bem_extra = 'instance of ConductorModel' + else: + bem_extra = bem + if not isinstance(info, (Info, str)): + raise TypeError('info should be an instance of Info or string') + if isinstance(info, str): + info_extra = op.split(info)[1] + info = read_info(info, verbose=False) + else: + info_extra = 'instance of Info' + n_jobs = check_n_jobs(n_jobs) + + # Report the setup + logger.info('Source space : %s' % src) + logger.info('MRI -> head transform : %s' % trans) + logger.info('Measurement data : %s' % info_extra) + if isinstance(bem, ConductorModel) and bem['is_sphere']: + logger.info('Sphere model : origin at %s mm' + % (bem['r0'],)) + logger.info('Standard field computations') + else: + logger.info('Conductor model : %s' % bem_extra) + logger.info('Accurate field computations') + logger.info('Do computations in %s coordinates', + _coord_frame_name(FIFF.FIFFV_COORD_HEAD)) + logger.info('Free source orientations') + + megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, info, \ + update_kwargs, bem = _prepare_for_forward( + src, mri_head_t, info, bem, mindist, n_jobs, bem_extra, trans, + info_extra, meg, eeg, ignore_ref) + del (src, mri_head_t, trans, info_extra, bem_extra, mindist, + meg, eeg, ignore_ref) + + # Time to do the heavy lifting: MEG first, then EEG + coil_types = ['meg', 'eeg'] + coils = [megcoils, eegels] + ccoils = [compcoils, None] + infos = [meg_info, None] + megfwd, eegfwd = _compute_forwards(rr, bem, coils, ccoils, + infos, coil_types, n_jobs) + + # merge forwards + fwd = _merge_meg_eeg_fwds(_to_forward_dict(megfwd, megnames), + _to_forward_dict(eegfwd, eegnames), + verbose=False) + logger.info('') + + # Don't transform the source spaces back into MRI coordinates (which is + # done in the C code) because mne-python assumes forward solution source + # spaces are in head coords. + fwd.update(**update_kwargs) + logger.info('Finished.') + return fwd + + +@verbose +def make_forward_dipole(dipole, bem, info, trans=None, n_jobs=1, verbose=None): + """Convert dipole object to source estimate and calculate forward operator. + + The instance of Dipole is converted to a discrete source space, + which is then combined with a BEM or a sphere model and + the sensor information in info to form a forward operator. + + The source estimate object (with the forward operator) can be projected to + sensor-space using :func:`mne.simulation.simulate_evoked`. + + .. note:: If the (unique) time points of the dipole object are unevenly + spaced, the first output will be a list of single-timepoint + source estimates. + + Parameters + ---------- + %(dipole)s + bem : str | dict + The BEM filename (str) or a loaded sphere model (dict). + info : instance of Info + The measurement information dictionary. It is sensor-information etc., + e.g., from a real data file. + trans : str | None + The head<->MRI transform filename. Must be provided unless BEM + is a sphere model. + %(n_jobs)s + %(verbose)s + + Returns + ------- + fwd : instance of Forward + The forward solution corresponding to the source estimate(s). + stc : instance of VolSourceEstimate | list of VolSourceEstimate + The dipoles converted to a discrete set of points and associated + time courses. If the time points of the dipole are unevenly spaced, + a list of single-timepoint source estimates are returned. + + See Also + -------- + mne.simulation.simulate_evoked + + Notes + ----- + .. versionadded:: 0.12.0 + """ + # Make copies to avoid mangling original dipole + times = dipole.times.copy() + pos = dipole.pos.copy() + amplitude = dipole.amplitude.copy() + ori = dipole.ori.copy() + + # Convert positions to discrete source space (allows duplicate rr & nn) + # NB information about dipole orientation enters here, then no more + sources = dict(rr=pos, nn=ori) + # Dipole objects must be in the head frame + src = _complete_vol_src( + [_make_discrete_source_space(sources, coord_frame='head')]) + + # Forward operator created for channels in info (use pick_info to restrict) + # Use defaults for most params, including min_dist + fwd = make_forward_solution(info, trans, src, bem, n_jobs=n_jobs, + verbose=verbose) + # Convert from free orientations to fixed (in-place) + convert_forward_solution(fwd, surf_ori=False, force_fixed=True, + copy=False, use_cps=False, verbose=None) + + # Check for omissions due to proximity to inner skull in + # make_forward_solution, which will result in an exception + if fwd['src'][0]['nuse'] != len(pos): + inuse = fwd['src'][0]['inuse'].astype(bool) + head = ('The following dipoles are outside the inner skull boundary') + msg = len(head) * '#' + '\n' + head + '\n' + for (t, pos) in zip(times[np.logical_not(inuse)], + pos[np.logical_not(inuse)]): + msg += ' t={:.0f} ms, pos=({:.0f}, {:.0f}, {:.0f}) mm\n'.\ + format(t * 1000., pos[0] * 1000., + pos[1] * 1000., pos[2] * 1000.) + msg += len(head) * '#' + logger.error(msg) + raise ValueError('One or more dipoles outside the inner skull.') + + # multiple dipoles (rr and nn) per time instant allowed + # uneven sampling in time returns list + timepoints = np.unique(times) + if len(timepoints) > 1: + tdiff = np.diff(timepoints) + if not np.allclose(tdiff, tdiff[0]): + warn('Unique time points of dipoles unevenly spaced: returned ' + 'stc will be a list, one for each time point.') + tstep = -1.0 + else: + tstep = tdiff[0] + elif len(timepoints) == 1: + tstep = 0.001 + + # Build the data matrix, essentially a block-diagonal with + # n_rows: number of dipoles in total (dipole.amplitudes) + # n_cols: number of unique time points in dipole.times + # amplitude with identical value of times go together in one col (others=0) + data = np.zeros((len(amplitude), len(timepoints))) # (n_d, n_t) + row = 0 + for tpind, tp in enumerate(timepoints): + amp = amplitude[np.in1d(times, tp)] + data[row:row + len(amp), tpind] = amp + row += len(amp) + + if tstep > 0: + stc = VolSourceEstimate(data, vertices=[fwd['src'][0]['vertno']], + tmin=timepoints[0], + tstep=tstep, subject=None) + else: # Must return a list of stc, one for each time point + stc = [] + for col, tp in enumerate(timepoints): + stc += [VolSourceEstimate(data[:, col][:, np.newaxis], + vertices=[fwd['src'][0]['vertno']], + tmin=tp, tstep=0.001, subject=None)] + return fwd, stc + + +def _to_forward_dict(fwd, names, fwd_grad=None, + coord_frame=FIFF.FIFFV_COORD_HEAD, + source_ori=FIFF.FIFFV_MNE_FREE_ORI): + """Convert forward solution matrices to dicts.""" + assert names is not None + if len(fwd) == 0: + return None + sol = dict(data=fwd.T, nrow=fwd.shape[1], ncol=fwd.shape[0], + row_names=names, col_names=[]) + fwd = Forward(sol=sol, source_ori=source_ori, nsource=sol['ncol'], + coord_frame=coord_frame, sol_grad=None, + nchan=sol['nrow'], _orig_source_ori=source_ori, + _orig_sol=sol['data'].copy(), _orig_sol_grad=None) + if fwd_grad is not None: + sol_grad = dict(data=fwd_grad.T, nrow=fwd_grad.shape[1], + ncol=fwd_grad.shape[0], row_names=names, + col_names=[]) + fwd.update(dict(sol_grad=sol_grad), + _orig_sol_grad=sol_grad['data'].copy()) + return fwd + + +@contextmanager +def use_coil_def(fname): + """Use a custom coil definition file. + + Parameters + ---------- + fname : str + The filename of the coil definition file. + + Returns + ------- + context : contextmanager + The context for using the coil definition. + + Notes + ----- + This is meant to be used a context manager such as: + + >>> with use_coil_def(my_fname): # doctest:+SKIP + ... make_forward_solution(...) + + This allows using custom coil definitions with functions that require + forward modeling. + """ + global _extra_coil_def_fname + _extra_coil_def_fname = fname + try: + yield + finally: + _extra_coil_def_fname = None diff --git a/python/libs/mne/forward/forward.py b/python/libs/mne/forward/forward.py new file mode 100644 index 0000000..1e5c1f9 --- /dev/null +++ b/python/libs/mne/forward/forward.py @@ -0,0 +1,1973 @@ +# Authors: Matti Hämäläinen +# Alexandre Gramfort +# Martin Luessi +# +# License: BSD-3-Clause + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. + +from time import time +from copy import deepcopy +import re + +import numpy as np + +import shutil +import os +from os import path as op +import tempfile + +from ..io import RawArray, Info +from ..io.constants import FIFF +from ..io.open import fiff_open +from ..io.tree import dir_tree_find +from ..io.tag import find_tag, read_tag +from ..io.matrix import (_read_named_matrix, _transpose_named_matrix, + write_named_matrix) +from ..io.meas_info import (_read_bad_channels, write_info, _write_ch_infos, + _read_extended_ch_info, _make_ch_names_mapping, + _rename_list) +from ..io.pick import (pick_channels_forward, pick_info, pick_channels, + pick_types) +from ..io.write import (write_int, start_block, end_block, + write_coord_trans, write_name_list, + write_string, start_and_end_file, write_id) +from ..io.base import BaseRaw +from ..evoked import Evoked, EvokedArray +from ..epochs import BaseEpochs +from ..source_space import (_read_source_spaces_from_tree, + find_source_space_hemi, _set_source_space_vertices, + _write_source_spaces_to_fid, _get_src_nn, + _src_kind_dict) +from ..source_estimate import _BaseVectorSourceEstimate, _BaseSourceEstimate +from ..surface import _normal_orth +from ..transforms import (transform_surface_to, invert_transform, + write_trans) +from ..utils import (_check_fname, get_subjects_dir, has_mne_c, warn, + run_subprocess, check_fname, logger, verbose, fill_doc, + _validate_type, _check_compensation_grade, _check_option, + _check_stc_units, _stamp_to_dt, _on_missing) +from ..label import Label + + +class Forward(dict): + """Forward class to represent info from forward solution. + + Attributes + ---------- + ch_names : list of str + List of channels' names. + + .. versionadded:: 0.20.0 + """ + + def copy(self): + """Copy the Forward instance.""" + return Forward(deepcopy(self)) + + def _get_src_type_and_ori_for_repr(self): + src_types = np.array([src['type'] for src in self['src']]) + + if (src_types == 'surf').all(): + src_type = 'Surface with %d vertices' % self['nsource'] + elif (src_types == 'vol').all(): + src_type = 'Volume with %d grid points' % self['nsource'] + elif (src_types == 'discrete').all(): + src_type = 'Discrete with %d dipoles' % self['nsource'] + else: + count_string = '' + if (src_types == 'surf').any(): + count_string += '%d surface, ' % (src_types == 'surf').sum() + if (src_types == 'vol').any(): + count_string += '%d volume, ' % (src_types == 'vol').sum() + if (src_types == 'discrete').any(): + count_string += '%d discrete, ' \ + % (src_types == 'discrete').sum() + count_string = count_string.rstrip(', ') + src_type = ('Mixed (%s) with %d vertices' + % (count_string, self['nsource'])) + + if self['source_ori'] == FIFF.FIFFV_MNE_UNKNOWN_ORI: + src_ori = 'Unknown' + elif self['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI: + src_ori = 'Fixed' + elif self['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI: + src_ori = 'Free' + + return src_type, src_ori + + def __repr__(self): + """Summarize forward info instead of printing all.""" + entr = ' 0: + raise ValueError('Width of matrix must be a multiple of n') + + tmp = np.arange(ma * bdn, dtype=np.int64).reshape(bdn, ma) + tmp = np.tile(tmp, (1, n)) + ii = tmp.ravel() + + jj = np.arange(na, dtype=np.int64)[None, :] + jj = jj * np.ones(ma, dtype=np.int64)[:, None] + jj = jj.T.ravel() # column indices foreach sparse bd + + bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc() + + return bd + + +def _get_tag_int(fid, node, name, id_): + """Check we have an appropriate tag.""" + tag = find_tag(fid, node, id_) + if tag is None: + fid.close() + raise ValueError(name + ' tag not found') + return int(tag.data) + + +def _read_one(fid, node): + """Read all interesting stuff for one forward solution.""" + # This function assumes the fid is open as a context manager + if node is None: + return None + + one = Forward() + one['source_ori'] = _get_tag_int(fid, node, 'Source orientation', + FIFF.FIFF_MNE_SOURCE_ORIENTATION) + one['coord_frame'] = _get_tag_int(fid, node, 'Coordinate frame', + FIFF.FIFF_MNE_COORD_FRAME) + one['nsource'] = _get_tag_int(fid, node, 'Number of sources', + FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS) + one['nchan'] = _get_tag_int(fid, node, 'Number of channels', + FIFF.FIFF_NCHAN) + try: + one['sol'] = _read_named_matrix(fid, node, + FIFF.FIFF_MNE_FORWARD_SOLUTION, + transpose=True) + one['_orig_sol'] = one['sol']['data'].copy() + except Exception: + logger.error('Forward solution data not found') + raise + + try: + fwd_type = FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD + one['sol_grad'] = _read_named_matrix(fid, node, fwd_type, + transpose=True) + one['_orig_sol_grad'] = one['sol_grad']['data'].copy() + except Exception: + one['sol_grad'] = None + + if one['sol']['data'].shape[0] != one['nchan'] or \ + (one['sol']['data'].shape[1] != one['nsource'] and + one['sol']['data'].shape[1] != 3 * one['nsource']): + raise ValueError('Forward solution matrix has wrong dimensions') + + if one['sol_grad'] is not None: + if one['sol_grad']['data'].shape[0] != one['nchan'] or \ + (one['sol_grad']['data'].shape[1] != 3 * one['nsource'] and + one['sol_grad']['data'].shape[1] != 3 * 3 * one['nsource']): + raise ValueError('Forward solution gradient matrix has ' + 'wrong dimensions') + + return one + + +@fill_doc +def _read_forward_meas_info(tree, fid): + """Read light measurement info from forward operator. + + Parameters + ---------- + tree : tree + FIF tree structure. + fid : file id + The file id. + + Returns + ------- + %(info_not_none)s + """ + # This function assumes fid is being used as a context manager + info = Info() + info._unlocked = True + + # Information from the MRI file + parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE) + if len(parent_mri) == 0: + raise ValueError('No parent MEG information found in operator') + parent_mri = parent_mri[0] + + tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_FILE_NAME) + info['mri_file'] = tag.data if tag is not None else None + tag = find_tag(fid, parent_mri, FIFF.FIFF_PARENT_FILE_ID) + info['mri_id'] = tag.data if tag is not None else None + + # Information from the MEG file + parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE) + if len(parent_meg) == 0: + raise ValueError('No parent MEG information found in operator') + parent_meg = parent_meg[0] + + tag = find_tag(fid, parent_meg, FIFF.FIFF_MNE_FILE_NAME) + info['meas_file'] = tag.data if tag is not None else None + tag = find_tag(fid, parent_meg, FIFF.FIFF_PARENT_FILE_ID) + info['meas_id'] = tag.data if tag is not None else None + + # Add channel information + info['chs'] = chs = list() + for k in range(parent_meg['nent']): + kind = parent_meg['directory'][k].kind + pos = parent_meg['directory'][k].pos + if kind == FIFF.FIFF_CH_INFO: + tag = read_tag(fid, pos) + chs.append(tag.data) + ch_names_mapping = _read_extended_ch_info(chs, parent_meg, fid) + info._update_redundant() + + # Get the MRI <-> head coordinate transformation + tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS) + coord_head = FIFF.FIFFV_COORD_HEAD + coord_mri = FIFF.FIFFV_COORD_MRI + coord_device = FIFF.FIFFV_COORD_DEVICE + coord_ctf_head = FIFF.FIFFV_MNE_COORD_CTF_HEAD + if tag is None: + raise ValueError('MRI/head coordinate transformation not found') + cand = tag.data + if cand['from'] == coord_mri and cand['to'] == coord_head: + info['mri_head_t'] = cand + else: + raise ValueError('MRI/head coordinate transformation not found') + + # Get the MEG device <-> head coordinate transformation + tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS) + if tag is None: + raise ValueError('MEG/head coordinate transformation not found') + cand = tag.data + if cand['from'] == coord_device and cand['to'] == coord_head: + info['dev_head_t'] = cand + elif cand['from'] == coord_ctf_head and cand['to'] == coord_head: + info['ctf_head_t'] = cand + else: + raise ValueError('MEG/head coordinate transformation not found') + + info['bads'] = _read_bad_channels( + fid, parent_meg, ch_names_mapping=ch_names_mapping) + # clean up our bad list, old versions could have non-existent bads + info['bads'] = [bad for bad in info['bads'] if bad in info['ch_names']] + + # Check if a custom reference has been applied + tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_CUSTOM_REF) + if tag is None: + tag = find_tag(fid, parent_mri, 236) # Constant 236 used before v0.11 + + info['custom_ref_applied'] = int(tag.data) if tag is not None else False + info._unlocked = False + return info + + +def _subject_from_forward(forward): + """Get subject id from inverse operator.""" + return forward['src']._subject + + +@verbose +def _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=None): + """Merge loaded MEG and EEG forward dicts into one dict.""" + if megfwd is not None and eegfwd is not None: + if (megfwd['sol']['data'].shape[1] != eegfwd['sol']['data'].shape[1] or + megfwd['source_ori'] != eegfwd['source_ori'] or + megfwd['nsource'] != eegfwd['nsource'] or + megfwd['coord_frame'] != eegfwd['coord_frame']): + raise ValueError('The MEG and EEG forward solutions do not match') + + fwd = megfwd + fwd['sol']['data'] = np.r_[fwd['sol']['data'], eegfwd['sol']['data']] + fwd['_orig_sol'] = np.r_[fwd['_orig_sol'], eegfwd['_orig_sol']] + fwd['sol']['nrow'] = fwd['sol']['nrow'] + eegfwd['sol']['nrow'] + + fwd['sol']['row_names'] = (fwd['sol']['row_names'] + + eegfwd['sol']['row_names']) + if fwd['sol_grad'] is not None: + fwd['sol_grad']['data'] = np.r_[fwd['sol_grad']['data'], + eegfwd['sol_grad']['data']] + fwd['_orig_sol_grad'] = np.r_[fwd['_orig_sol_grad'], + eegfwd['_orig_sol_grad']] + fwd['sol_grad']['nrow'] = (fwd['sol_grad']['nrow'] + + eegfwd['sol_grad']['nrow']) + fwd['sol_grad']['row_names'] = (fwd['sol_grad']['row_names'] + + eegfwd['sol_grad']['row_names']) + + fwd['nchan'] = fwd['nchan'] + eegfwd['nchan'] + logger.info(' MEG and EEG forward solutions combined') + elif megfwd is not None: + fwd = megfwd + else: + fwd = eegfwd + return fwd + + +@verbose +def read_forward_solution(fname, include=(), exclude=(), verbose=None): + """Read a forward solution a.k.a. lead field. + + Parameters + ---------- + fname : str + The file name, which should end with -fwd.fif or -fwd.fif.gz. + include : list, optional + List of names of channels to include. If empty all channels + are included. + exclude : list, optional + List of names of channels to exclude. If empty include all + channels. + %(verbose)s + + Returns + ------- + fwd : instance of Forward + The forward solution. + + See Also + -------- + write_forward_solution, make_forward_solution + + Notes + ----- + Forward solutions, which are derived from an original forward solution with + free orientation, are always stored on disk as forward solution with free + orientation in X/Y/Z RAS coordinates. To apply any transformation to the + forward operator (surface orientation, fixed orientation) please apply + :func:`convert_forward_solution` after reading the forward solution with + :func:`read_forward_solution`. + + Forward solutions, which are derived from an original forward solution with + fixed orientation, are stored on disk as forward solution with fixed + surface-based orientations. Please note that the transformation to + surface-based, fixed orientation cannot be reverted after loading the + forward solution with :func:`read_forward_solution`. + """ + check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz', + '_fwd.fif', '_fwd.fif.gz')) + fname = _check_fname(fname=fname, must_exist=True, overwrite='read') + # Open the file, create directory + logger.info('Reading forward solution from %s...' % fname) + f, tree, _ = fiff_open(fname) + with f as fid: + # Find all forward solutions + fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION) + if len(fwds) == 0: + raise ValueError('No forward solutions in %s' % fname) + + # Parent MRI data + parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE) + if len(parent_mri) == 0: + raise ValueError('No parent MRI information in %s' % fname) + parent_mri = parent_mri[0] + + src = _read_source_spaces_from_tree(fid, tree, patch_stats=False) + for s in src: + s['id'] = find_source_space_hemi(s) + + fwd = None + + # Locate and read the forward solutions + megnode = None + eegnode = None + for k in range(len(fwds)): + tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS) + if tag is None: + raise ValueError('Methods not listed for one of the forward ' + 'solutions') + + if tag.data == FIFF.FIFFV_MNE_MEG: + megnode = fwds[k] + elif tag.data == FIFF.FIFFV_MNE_EEG: + eegnode = fwds[k] + + megfwd = _read_one(fid, megnode) + if megfwd is not None: + if is_fixed_orient(megfwd): + ori = 'fixed' + else: + ori = 'free' + logger.info(' Read MEG forward solution (%d sources, ' + '%d channels, %s orientations)' + % (megfwd['nsource'], megfwd['nchan'], ori)) + + eegfwd = _read_one(fid, eegnode) + if eegfwd is not None: + if is_fixed_orient(eegfwd): + ori = 'fixed' + else: + ori = 'free' + logger.info(' Read EEG forward solution (%d sources, ' + '%d channels, %s orientations)' + % (eegfwd['nsource'], eegfwd['nchan'], ori)) + + fwd = _merge_meg_eeg_fwds(megfwd, eegfwd) + + # Get the MRI <-> head coordinate transformation + tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS) + if tag is None: + raise ValueError('MRI/head coordinate transformation not found') + mri_head_t = tag.data + if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or + mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD): + mri_head_t = invert_transform(mri_head_t) + if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or + mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD): + fid.close() + raise ValueError('MRI/head coordinate transformation not ' + 'found') + fwd['mri_head_t'] = mri_head_t + + # + # get parent MEG info + # + fwd['info'] = _read_forward_meas_info(tree, fid) + + # MNE environment + parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV) + if len(parent_env) > 0: + parent_env = parent_env[0] + tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR) + if tag is not None: + with fwd['info']._unlock(): + fwd['info']['working_dir'] = tag.data + tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE) + if tag is not None: + with fwd['info']._unlock(): + fwd['info']['command_line'] = tag.data + + # Transform the source spaces to the correct coordinate frame + # if necessary + + # Make sure forward solution is in either the MRI or HEAD coordinate frame + if fwd['coord_frame'] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD): + raise ValueError('Only forward solutions computed in MRI or head ' + 'coordinates are acceptable') + + # Transform each source space to the HEAD or MRI coordinate frame, + # depending on the coordinate frame of the forward solution + # NOTE: the function transform_surface_to will also work on discrete and + # volume sources + nuse = 0 + for s in src: + try: + s = transform_surface_to(s, fwd['coord_frame'], mri_head_t) + except Exception as inst: + raise ValueError('Could not transform source space (%s)' % inst) + + nuse += s['nuse'] + + # Make sure the number of sources match after transformation + if nuse != fwd['nsource']: + raise ValueError('Source spaces do not match the forward solution.') + + logger.info(' Source spaces transformed to the forward solution ' + 'coordinate frame') + fwd['src'] = src + + # Handle the source locations and orientations + fwd['source_rr'] = np.concatenate([ss['rr'][ss['vertno'], :] + for ss in src], axis=0) + + # Store original source orientations + fwd['_orig_source_ori'] = fwd['source_ori'] + + # Deal with include and exclude + pick_channels_forward(fwd, include=include, exclude=exclude, copy=False) + + if is_fixed_orient(fwd, orig=True): + fwd['source_nn'] = np.concatenate([_src['nn'][_src['vertno'], :] + for _src in fwd['src']], axis=0) + fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI + fwd['surf_ori'] = True + else: + fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3)) + fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI + fwd['surf_ori'] = False + return Forward(fwd) + + +@verbose +def convert_forward_solution(fwd, surf_ori=False, force_fixed=False, + copy=True, use_cps=True, verbose=None): + """Convert forward solution between different source orientations. + + Parameters + ---------- + fwd : Forward + The forward solution to modify. + surf_ori : bool, optional (default False) + Use surface-based source coordinate system? Note that force_fixed=True + implies surf_ori=True. + force_fixed : bool, optional (default False) + If True, force fixed source orientation mode. + copy : bool + Whether to return a new instance or modify in place. + %(use_cps)s + %(verbose)s + + Returns + ------- + fwd : Forward + The modified forward solution. + """ + from scipy import sparse + fwd = fwd.copy() if copy else fwd + + if force_fixed is True: + surf_ori = True + + if any([src['type'] == 'vol' for src in fwd['src']]) and force_fixed: + raise ValueError( + 'Forward operator was generated with sources from a ' + 'volume source space. Conversion to fixed orientation is not ' + 'possible. Consider using a discrete source space if you have ' + 'meaningful normal orientations.') + + if surf_ori and use_cps: + if any(s.get('patch_inds') is not None for s in fwd['src']): + logger.info(' Average patch normals will be employed in ' + 'the rotation to the local surface coordinates..' + '..') + else: + use_cps = False + logger.info(' No patch info available. The standard source ' + 'space normals will be employed in the rotation ' + 'to the local surface coordinates....') + + # We need to change these entries (only): + # 1. source_nn + # 2. sol['data'] + # 3. sol['ncol'] + # 4. sol_grad['data'] + # 5. sol_grad['ncol'] + # 6. source_ori + + if is_fixed_orient(fwd, orig=True) or (force_fixed and not use_cps): + # Fixed + fwd['source_nn'] = np.concatenate([_get_src_nn(s, use_cps) + for s in fwd['src']], axis=0) + if not is_fixed_orient(fwd, orig=True): + logger.info(' Changing to fixed-orientation forward ' + 'solution with surface-based source orientations...') + fix_rot = _block_diag(fwd['source_nn'].T, 1) + # newer versions of numpy require explicit casting here, so *= no + # longer works + fwd['sol']['data'] = (fwd['_orig_sol'] * + fix_rot).astype('float32') + fwd['sol']['ncol'] = fwd['nsource'] + if fwd['sol_grad'] is not None: + x = sparse.block_diag([fix_rot] * 3) + fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod + fwd['sol_grad']['ncol'] = 3 * fwd['nsource'] + fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI + fwd['surf_ori'] = True + + elif surf_ori: # Free, surf-oriented + # Rotate the local source coordinate systems + fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3)) + logger.info(' Converting to surface-based source orientations...') + # Actually determine the source orientations + pp = 0 + for s in fwd['src']: + if s['type'] in ['surf', 'discrete']: + nn = _get_src_nn(s, use_cps) + stop = pp + 3 * s['nuse'] + fwd['source_nn'][pp:stop] = _normal_orth(nn).reshape(-1, 3) + pp = stop + del nn + else: + pp += 3 * s['nuse'] + + # Rotate the solution components as well + if force_fixed: + fwd['source_nn'] = fwd['source_nn'][2::3, :] + fix_rot = _block_diag(fwd['source_nn'].T, 1) + # newer versions of numpy require explicit casting here, so *= no + # longer works + fwd['sol']['data'] = (fwd['_orig_sol'] * + fix_rot).astype('float32') + fwd['sol']['ncol'] = fwd['nsource'] + if fwd['sol_grad'] is not None: + x = sparse.block_diag([fix_rot] * 3) + fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod + fwd['sol_grad']['ncol'] = 3 * fwd['nsource'] + fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI + fwd['surf_ori'] = True + else: + surf_rot = _block_diag(fwd['source_nn'].T, 3) + fwd['sol']['data'] = fwd['_orig_sol'] * surf_rot + fwd['sol']['ncol'] = 3 * fwd['nsource'] + if fwd['sol_grad'] is not None: + x = sparse.block_diag([surf_rot] * 3) + fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod + fwd['sol_grad']['ncol'] = 9 * fwd['nsource'] + fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI + fwd['surf_ori'] = True + + else: # Free, cartesian + logger.info(' Cartesian source orientations...') + fwd['source_nn'] = np.tile(np.eye(3), (fwd['nsource'], 1)) + fwd['sol']['data'] = fwd['_orig_sol'].copy() + fwd['sol']['ncol'] = 3 * fwd['nsource'] + if fwd['sol_grad'] is not None: + fwd['sol_grad']['data'] = fwd['_orig_sol_grad'].copy() + fwd['sol_grad']['ncol'] = 9 * fwd['nsource'] + fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI + fwd['surf_ori'] = False + + logger.info(' [done]') + + return fwd + + +@verbose +def write_forward_solution(fname, fwd, overwrite=False, verbose=None): + """Write forward solution to a file. + + Parameters + ---------- + fname : str + File name to save the forward solution to. It should end with + ``-fwd.fif`` or ``-fwd.fif.gz``. + fwd : Forward + Forward solution. + %(overwrite)s + %(verbose)s + + See Also + -------- + read_forward_solution + + Notes + ----- + Forward solutions, which are derived from an original forward solution with + free orientation, are always stored on disk as forward solution with free + orientation in X/Y/Z RAS coordinates. Transformations (surface orientation, + fixed orientation) will be reverted. To reapply any transformation to the + forward operator please apply :func:`convert_forward_solution` after + reading the forward solution with :func:`read_forward_solution`. + + Forward solutions, which are derived from an original forward solution with + fixed orientation, are stored on disk as forward solution with fixed + surface-based orientations. Please note that the transformation to + surface-based, fixed orientation cannot be reverted after loading the + forward solution with :func:`read_forward_solution`. + """ + check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz', + '_fwd.fif', '_fwd.fif.gz')) + + # check for file existence and expand `~` if present + fname = _check_fname(fname, overwrite) + with start_and_end_file(fname) as fid: + _write_forward_solution(fid, fwd) + + +def _write_forward_solution(fid, fwd): + start_block(fid, FIFF.FIFFB_MNE) + + # + # MNE env + # + start_block(fid, FIFF.FIFFB_MNE_ENV) + write_id(fid, FIFF.FIFF_BLOCK_ID) + data = fwd['info'].get('working_dir', None) + if data is not None: + write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data) + data = fwd['info'].get('command_line', None) + if data is not None: + write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data) + end_block(fid, FIFF.FIFFB_MNE_ENV) + + # + # Information from the MRI file + # + start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE) + write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd['info']['mri_file']) + if fwd['info']['mri_id'] is not None: + write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd['info']['mri_id']) + # store the MRI to HEAD transform in MRI file + write_coord_trans(fid, fwd['info']['mri_head_t']) + end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE) + + # write measurement info + write_forward_meas_info(fid, fwd['info']) + + # invert our original source space transform + src = list() + for s in fwd['src']: + s = deepcopy(s) + try: + # returns source space to original coordinate frame + # usually MRI + s = transform_surface_to(s, fwd['mri_head_t']['from'], + fwd['mri_head_t']) + except Exception as inst: + raise ValueError('Could not transform source space (%s)' % inst) + src.append(s) + + # + # Write the source spaces (again) + # + _write_source_spaces_to_fid(fid, src) + n_vert = sum([ss['nuse'] for ss in src]) + if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI: + n_col = n_vert + else: + n_col = 3 * n_vert + + # Undo transformations + sol = fwd['_orig_sol'].copy() + if fwd['sol_grad'] is not None: + sol_grad = fwd['_orig_sol_grad'].copy() + else: + sol_grad = None + + if fwd['surf_ori'] is True: + if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI: + warn('The forward solution, which is stored on disk now, is based ' + 'on a forward solution with fixed orientation. Please note ' + 'that the transformation to surface-based, fixed orientation ' + 'cannot be reverted after loading the forward solution with ' + 'read_forward_solution.', RuntimeWarning) + else: + warn('This forward solution is based on a forward solution with ' + 'free orientation. The original forward solution is stored ' + 'on disk in X/Y/Z RAS coordinates. Any transformation ' + '(surface orientation or fixed orientation) will be ' + 'reverted. To reapply any transformation to the forward ' + 'operator please apply convert_forward_solution after ' + 'reading the forward solution with read_forward_solution.', + RuntimeWarning) + + # + # MEG forward solution + # + picks_meg = pick_types(fwd['info'], meg=True, eeg=False, ref_meg=False, + exclude=[]) + picks_eeg = pick_types(fwd['info'], meg=False, eeg=True, ref_meg=False, + exclude=[]) + n_meg = len(picks_meg) + n_eeg = len(picks_eeg) + row_names_meg = [fwd['sol']['row_names'][p] for p in picks_meg] + row_names_eeg = [fwd['sol']['row_names'][p] for p in picks_eeg] + + if n_meg > 0: + meg_solution = dict(data=sol[picks_meg], nrow=n_meg, ncol=n_col, + row_names=row_names_meg, col_names=[]) + _transpose_named_matrix(meg_solution) + start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) + write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_MEG) + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame']) + write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, + fwd['_orig_source_ori']) + write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert) + write_int(fid, FIFF.FIFF_NCHAN, n_meg) + write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution) + if sol_grad is not None: + meg_solution_grad = dict(data=sol_grad[picks_meg], + nrow=n_meg, ncol=n_col * 3, + row_names=row_names_meg, col_names=[]) + _transpose_named_matrix(meg_solution_grad) + write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD, + meg_solution_grad) + end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) + + # + # EEG forward solution + # + if n_eeg > 0: + eeg_solution = dict(data=sol[picks_eeg], nrow=n_eeg, ncol=n_col, + row_names=row_names_eeg, col_names=[]) + _transpose_named_matrix(eeg_solution) + start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) + write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_EEG) + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame']) + write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, + fwd['_orig_source_ori']) + write_int(fid, FIFF.FIFF_NCHAN, n_eeg) + write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert) + write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution) + if sol_grad is not None: + eeg_solution_grad = dict(data=sol_grad[picks_eeg], + nrow=n_eeg, ncol=n_col * 3, + row_names=row_names_eeg, col_names=[]) + _transpose_named_matrix(eeg_solution_grad) + write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD, + eeg_solution_grad) + end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) + + end_block(fid, FIFF.FIFFB_MNE) + + +def is_fixed_orient(forward, orig=False): + """Check if the forward operator is fixed orientation. + + Parameters + ---------- + forward : instance of Forward + The forward. + orig : bool + If True, consider the original source orientation. + If False (default), consider the current source orientation. + + Returns + ------- + fixed_ori : bool + Whether or not it is fixed orientation. + """ + if orig: # if we want to know about the original version + fixed_ori = (forward['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI) + else: # most of the time we want to know about the current version + fixed_ori = (forward['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI) + return fixed_ori + + +@fill_doc +def write_forward_meas_info(fid, info): + """Write measurement info stored in forward solution. + + Parameters + ---------- + fid : file id + The file id + %(info_not_none)s + """ + info._check_consistency() + # + # Information from the MEG file + # + start_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE) + write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info['meas_file']) + if info['meas_id'] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id']) + # get transformation from CTF and DEVICE to HEAD coordinate frame + meg_head_t = info.get('dev_head_t', info.get('ctf_head_t')) + if meg_head_t is None: + fid.close() + raise ValueError('Head<-->sensor transform not found') + write_coord_trans(fid, meg_head_t) + + ch_names_mapping = dict() + if 'chs' in info: + # Channel information + ch_names_mapping = _make_ch_names_mapping(info['chs']) + write_int(fid, FIFF.FIFF_NCHAN, len(info['chs'])) + _write_ch_infos(fid, info['chs'], False, ch_names_mapping) + if 'bads' in info and len(info['bads']) > 0: + # Bad channels + bads = _rename_list(info['bads'], ch_names_mapping) + start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) + write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, bads) + end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) + + end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE) + + +def _select_orient_forward(forward, info, noise_cov=None, copy=True): + """Prepare forward solution for inverse solvers.""" + # fwd['sol']['row_names'] may be different order from fwd['info']['chs'] + fwd_sol_ch_names = forward['sol']['row_names'] + all_ch_names = set(fwd_sol_ch_names) + all_bads = set(info['bads']) + if noise_cov is not None: + all_ch_names &= set(noise_cov['names']) + all_bads |= set(noise_cov['bads']) + else: + noise_cov = dict(bads=info['bads']) + ch_names = [c['ch_name'] for c in info['chs'] + if c['ch_name'] not in all_bads and + c['ch_name'] in all_ch_names] + + if not len(info['bads']) == len(noise_cov['bads']) or \ + not all(b in noise_cov['bads'] for b in info['bads']): + logger.info('info["bads"] and noise_cov["bads"] do not match, ' + 'excluding bad channels from both') + + # check the compensation grade + _check_compensation_grade(forward['info'], info, 'forward') + + n_chan = len(ch_names) + logger.info("Computing inverse operator with %d channels." % n_chan) + forward = pick_channels_forward(forward, ch_names, ordered=True, + copy=copy) + info_idx = [info['ch_names'].index(name) for name in ch_names] + info_picked = pick_info(info, info_idx) + forward['info']._check_consistency() + info_picked._check_consistency() + return forward, info_picked + + +def _triage_loose(src, loose, fixed='auto'): + _validate_type(loose, (str, dict, 'numeric'), 'loose') + _validate_type(fixed, (str, bool), 'fixed') + orig_loose = loose + if isinstance(loose, str): + _check_option('loose', loose, ('auto',)) + if fixed is True: + loose = 0. + else: # False or auto + loose = 0.2 if src.kind == 'surface' else 1. + src_types = set(_src_kind_dict[s['type']] for s in src) + if not isinstance(loose, dict): + loose = float(loose) + loose = {key: loose for key in src_types} + loose_keys = set(loose.keys()) + if loose_keys != src_types: + raise ValueError( + f'loose, if dict, must have keys {sorted(src_types)} to match the ' + f'source space, got {sorted(loose_keys)}') + # if fixed is auto it can be ignored, if it's False it can be ignored, + # only really need to care about fixed=True + if fixed is True: + if not all(v == 0. for v in loose.values()): + raise ValueError( + 'When using fixed=True, loose must be 0. or "auto", ' + f'got {orig_loose}') + elif fixed is False: + if any(v == 0. for v in loose.values()): + raise ValueError( + 'If loose==0., then fixed must be True or "auto", got False') + del fixed + + for key, this_loose in loose.items(): + if key != 'surface' and this_loose != 1: + raise ValueError( + 'loose parameter has to be 1 or "auto" for non-surface ' + f'source spaces, got loose["{key}"] = {this_loose}') + if not 0 <= this_loose <= 1: + raise ValueError( + f'loose ({key}) must be between 0 and 1, got {this_loose}') + return loose + + +@verbose +def compute_orient_prior(forward, loose='auto', verbose=None): + """Compute orientation prior. + + Parameters + ---------- + forward : instance of Forward + Forward operator. + %(loose)s + %(verbose)s + + Returns + ------- + orient_prior : ndarray, shape (n_sources,) + Orientation priors. + + See Also + -------- + compute_depth_prior + """ + _validate_type(forward, Forward, 'forward') + n_sources = forward['sol']['data'].shape[1] + + loose = _triage_loose(forward['src'], loose) + orient_prior = np.ones(n_sources, dtype=np.float64) + if is_fixed_orient(forward): + if any(v > 0. for v in loose.values()): + raise ValueError('loose must be 0. with forward operator ' + 'with fixed orientation, got %s' % (loose,)) + return orient_prior + if all(v == 1. for v in loose.values()): + return orient_prior + # We actually need non-unity prior, compute it for each source space + # separately + if not forward['surf_ori']: + raise ValueError('Forward operator is not oriented in surface ' + 'coordinates. loose parameter should be 1. ' + 'not %s.' % (loose,)) + start = 0 + logged = dict() + for s in forward['src']: + this_type = _src_kind_dict[s['type']] + use_loose = loose[this_type] + if not logged.get(this_type): + if use_loose == 1.: + name = 'free' + else: + name = 'fixed' if use_loose == 0. else 'loose' + logger.info(f'Applying {name.ljust(5)} dipole orientations to ' + f'{this_type.ljust(7)} source spaces: {use_loose}') + logged[this_type] = True + stop = start + 3 * s['nuse'] + orient_prior[start:stop:3] *= use_loose + orient_prior[start + 1:stop:3] *= use_loose + start = stop + return orient_prior + + +def _restrict_gain_matrix(G, info): + """Restrict gain matrix entries for optimal depth weighting.""" + # Figure out which ones have been used + if len(info['chs']) != G.shape[0]: + raise ValueError('G.shape[0] (%d) and length of info["chs"] (%d) ' + 'do not match' % (G.shape[0], len(info['chs']))) + for meg, eeg, kind in ( + ('grad', False, 'planar'), + ('mag', False, 'magnetometer or axial gradiometer'), + (False, True, 'EEG')): + sel = pick_types(info, meg=meg, eeg=eeg, ref_meg=False, exclude=[]) + if len(sel) > 0: + logger.info(' %d %s channels' % (len(sel), kind)) + break + else: + warn('Could not find MEG or EEG channels to limit depth channels') + sel = slice(None) + return G[sel] + + +@verbose +def compute_depth_prior(forward, info, exp=0.8, limit=10.0, + limit_depth_chs=False, combine_xyz='spectral', + noise_cov=None, rank=None, verbose=None): + """Compute depth prior for depth weighting. + + Parameters + ---------- + forward : instance of Forward + The forward solution. + %(info_not_none)s + exp : float + Exponent for the depth weighting, must be between 0 and 1. + limit : float | None + The upper bound on depth weighting. + Can be None to be bounded by the largest finite prior. + limit_depth_chs : bool | 'whiten' + How to deal with multiple channel types in depth weighting. + The default is True, which whitens based on the source sensitivity + of the highest-SNR channel type. See Notes for details. + + .. versionchanged:: 0.18 + Added the "whiten" option. + combine_xyz : 'spectral' | 'fro' + When a loose (or free) orientation is used, how the depth weighting + for each triplet should be calculated. + If 'spectral', use the squared spectral norm of Gk. + If 'fro', use the squared Frobenius norm of Gk. + + .. versionadded:: 0.18 + noise_cov : instance of Covariance | None + The noise covariance to use to whiten the gain matrix when + ``limit_depth_chs='whiten'``. + + .. versionadded:: 0.18 + %(rank_none)s + + .. versionadded:: 0.18 + %(verbose)s + + Returns + ------- + depth_prior : ndarray, shape (n_vertices,) + The depth prior. + + See Also + -------- + compute_orient_prior + + Notes + ----- + The defaults used by the minimum norm code and sparse solvers differ. + In particular, the values for MNE are:: + + compute_depth_prior(..., limit=10., limit_depth_chs=True, + combine_xyz='spectral') + + In sparse solvers and LCMV, the values are:: + + compute_depth_prior(..., limit=None, limit_depth_chs='whiten', + combine_xyz='fro') + + The ``limit_depth_chs`` argument can take the following values: + + * :data:`python:True` (default) + Use only grad channels in depth weighting (equivalent to MNE C + minimum-norm code). If grad channels aren't present, only mag + channels will be used (if no mag, then eeg). This makes the depth + prior dependent only on the sensor geometry (and relationship + to the sources). + * ``'whiten'`` + Compute a whitener and apply it to the gain matirx before computing + the depth prior. In this case ``noise_cov`` must not be None. + Whitening the gain matrix makes the depth prior + depend on both sensor geometry and the data of interest captured + by the noise covariance (e.g., projections, SNR). + + .. versionadded:: 0.18 + * :data:`python:False` + Use all channels. Not recommended since the depth weighting will be + biased toward whichever channel type has the largest values in + SI units (such as EEG being orders of magnitude larger than MEG). + """ + from ..cov import Covariance, compute_whitener + _validate_type(forward, Forward, 'forward') + patch_areas = forward.get('patch_areas', None) + is_fixed_ori = is_fixed_orient(forward) + G = forward['sol']['data'] + logger.info('Creating the depth weighting matrix...') + _validate_type(noise_cov, (Covariance, None), 'noise_cov', + 'Covariance or None') + _validate_type(limit_depth_chs, (str, bool), 'limit_depth_chs') + if isinstance(limit_depth_chs, str): + if limit_depth_chs != 'whiten': + raise ValueError('limit_depth_chs, if str, must be "whiten", got ' + '%s' % (limit_depth_chs,)) + if not isinstance(noise_cov, Covariance): + raise ValueError('With limit_depth_chs="whiten", noise_cov must be' + ' a Covariance, got %s' % (type(noise_cov),)) + if combine_xyz is not False: # private / expert option + _check_option('combine_xyz', combine_xyz, ('fro', 'spectral')) + + # If possible, pick best depth-weighting channels + if limit_depth_chs is True: + G = _restrict_gain_matrix(G, info) + elif limit_depth_chs == 'whiten': + whitener, _ = compute_whitener(noise_cov, info, pca=True, rank=rank, + verbose=False) + G = np.dot(whitener, G) + + # Compute the gain matrix + if is_fixed_ori or combine_xyz in ('fro', False): + d = np.sum(G ** 2, axis=0) + if not (is_fixed_ori or combine_xyz is False): + d = d.reshape(-1, 3).sum(axis=1) + # Spherical leadfield can be zero at the center + d[d == 0.] = np.min(d[d != 0.]) + else: # 'spectral' + # n_pos = G.shape[1] // 3 + # The following is equivalent to this, but 4-10x faster + # d = np.zeros(n_pos) + # for k in range(n_pos): + # Gk = G[:, 3 * k:3 * (k + 1)] + # x = np.dot(Gk.T, Gk) + # d[k] = linalg.svdvals(x)[0] + G.shape = (G.shape[0], -1, 3) + d = np.linalg.norm(np.einsum('svj,svk->vjk', G, G), # vector dot prods + ord=2, axis=(1, 2)) # ord=2 spectral (largest s.v.) + G.shape = (G.shape[0], -1) + + # XXX Currently the fwd solns never have "patch_areas" defined + if patch_areas is not None: + if not is_fixed_ori and combine_xyz is False: + patch_areas = np.repeat(patch_areas, 3) + d /= patch_areas ** 2 + logger.info(' Patch areas taken into account in the depth ' + 'weighting') + + w = 1.0 / d + if limit is not None: + ws = np.sort(w) + weight_limit = limit ** 2 + if limit_depth_chs is False: + # match old mne-python behavor + # we used to do ind = np.argmin(ws), but this is 0 by sort above + n_limit = 0 + limit = ws[0] * weight_limit + else: + # match C code behavior + limit = ws[-1] + n_limit = len(d) + if ws[-1] > weight_limit * ws[0]: + ind = np.where(ws > weight_limit * ws[0])[0][0] + limit = ws[ind] + n_limit = ind + + logger.info(' limit = %d/%d = %f' + % (n_limit + 1, len(d), + np.sqrt(limit / ws[0]))) + scale = 1.0 / limit + logger.info(' scale = %g exp = %g' % (scale, exp)) + w = np.minimum(w / limit, 1) + depth_prior = w ** exp + + if not (is_fixed_ori or combine_xyz is False): + depth_prior = np.repeat(depth_prior, 3) + + return depth_prior + + +def _stc_src_sel(src, stc, on_missing='raise', + extra=', likely due to forward calculations'): + """Select the vertex indices of a source space using a source estimate.""" + if isinstance(stc, list): + vertices = stc + else: + assert isinstance(stc, _BaseSourceEstimate) + vertices = stc.vertices + del stc + if not len(src) == len(vertices): + raise RuntimeError('Mismatch between number of source spaces (%s) and ' + 'STC vertices (%s)' % (len(src), len(vertices))) + src_sels, stc_sels, out_vertices = [], [], [] + src_offset = stc_offset = 0 + for s, v in zip(src, vertices): + joint_sel = np.intersect1d(s['vertno'], v) + src_sels.append(np.searchsorted(s['vertno'], joint_sel) + src_offset) + src_offset += len(s['vertno']) + idx = np.searchsorted(v, joint_sel) + stc_sels.append(idx + stc_offset) + stc_offset += len(v) + out_vertices.append(np.array(v)[idx]) + src_sel = np.concatenate(src_sels) + stc_sel = np.concatenate(stc_sels) + assert len(src_sel) == len(stc_sel) == sum(len(v) for v in out_vertices) + + n_stc = sum(len(v) for v in vertices) + n_joint = len(src_sel) + if n_joint != n_stc: + msg = ('Only %i of %i SourceEstimate %s found in ' + 'source space%s' + % (n_joint, n_stc, 'vertex' if n_stc == 1 else 'vertices', + extra)) + _on_missing(on_missing, msg) + return src_sel, stc_sel, out_vertices + + +def _fill_measurement_info(info, fwd, sfreq, data): + """Fill the measurement info of a Raw or Evoked object.""" + sel = pick_channels(info['ch_names'], fwd['sol']['row_names']) + info = pick_info(info, sel) + info['bads'] = [] + + now = time() + sec = np.floor(now) + usec = 1e6 * (now - sec) + + # this is probably correct based on what's done in meas_info.py... + with info._unlock(check_after=True): + info.update(meas_id=fwd['info']['meas_id'], file_id=info['meas_id'], + meas_date=_stamp_to_dt((int(sec), int(usec))), + highpass=0., lowpass=sfreq / 2., sfreq=sfreq, projs=[]) + + # reorder data (which is in fwd order) to match that of info + order = [fwd['sol']['row_names'].index(name) for name in info['ch_names']] + data = data[order] + + return info, data + + +@verbose +def _apply_forward(fwd, stc, start=None, stop=None, on_missing='raise', + use_cps=True, verbose=None): + """Apply forward model and return data, times, ch_names.""" + _validate_type(stc, _BaseSourceEstimate, 'stc', 'SourceEstimate') + _validate_type(fwd, Forward, 'fwd') + if isinstance(stc, _BaseVectorSourceEstimate): + vector = True + fwd = convert_forward_solution(fwd, force_fixed=False, surf_ori=False) + else: + vector = False + if not is_fixed_orient(fwd): + fwd = convert_forward_solution(fwd, force_fixed=True, + use_cps=use_cps) + + if np.all(stc.data > 0): + warn('Source estimate only contains currents with positive values. ' + 'Use pick_ori="normal" when computing the inverse to compute ' + 'currents not current magnitudes.') + + _check_stc_units(stc) + + src_sel, stc_sel, _ = _stc_src_sel(fwd['src'], stc, on_missing=on_missing) + gain = fwd['sol']['data'] + stc_sel = slice(None) if len(stc_sel) == len(stc.data) else stc_sel + times = stc.times[start:stop].copy() + stc_data = stc.data[stc_sel, ..., start:stop].reshape(-1, len(times)) + del stc + if vector: + gain = gain.reshape(len(gain), gain.shape[1] // 3, 3) + gain = gain[:, src_sel].reshape(len(gain), -1) + # save some memory if possible + + logger.info('Projecting source estimate to sensor space...') + data = np.dot(gain, stc_data) + logger.info('[done]') + return data, times + + +@verbose +def apply_forward(fwd, stc, info, start=None, stop=None, use_cps=True, + on_missing='raise', verbose=None): + """Project source space currents to sensor space using a forward operator. + + The sensor space data is computed for all channels present in fwd. Use + pick_channels_forward or pick_types_forward to restrict the solution to a + subset of channels. + + The function returns an Evoked object, which is constructed from + evoked_template. The evoked_template should be from the same MEG system on + which the original data was acquired. An exception will be raised if the + forward operator contains channels that are not present in the template. + + Parameters + ---------- + fwd : Forward + Forward operator to use. + stc : SourceEstimate + The source estimate from which the sensor space data is computed. + %(info_not_none)s + start : int, optional + Index of first time sample (index not time is seconds). + stop : int, optional + Index of first time sample not to include (index not time is seconds). + %(use_cps)s + + .. versionadded:: 0.15 + %(on_missing_fwd)s + Default is "raise". + + .. versionadded:: 0.18 + %(verbose)s + + Returns + ------- + evoked : Evoked + Evoked object with computed sensor space data. + + See Also + -------- + apply_forward_raw: Compute sensor space data and return a Raw object. + """ + _validate_type(info, Info, 'info') + _validate_type(fwd, Forward, 'forward') + info._check_consistency() + + # make sure evoked_template contains all channels in fwd + for ch_name in fwd['sol']['row_names']: + if ch_name not in info['ch_names']: + raise ValueError('Channel %s of forward operator not present in ' + 'evoked_template.' % ch_name) + + # project the source estimate to the sensor space + data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing, + use_cps=use_cps) + + # fill the measurement info + sfreq = float(1.0 / stc.tstep) + info, data = _fill_measurement_info(info, fwd, sfreq, data) + + evoked = EvokedArray(data, info, times[0], nave=1) + + evoked.times = times + evoked._update_first_last() + + return evoked + + +@verbose +def apply_forward_raw(fwd, stc, info, start=None, stop=None, + on_missing='raise', use_cps=True, verbose=None): + """Project source space currents to sensor space using a forward operator. + + The sensor space data is computed for all channels present in fwd. Use + pick_channels_forward or pick_types_forward to restrict the solution to a + subset of channels. + + The function returns a Raw object, which is constructed using provided + info. The info object should be from the same MEG system on which the + original data was acquired. An exception will be raised if the forward + operator contains channels that are not present in the info. + + Parameters + ---------- + fwd : Forward + Forward operator to use. + stc : SourceEstimate + The source estimate from which the sensor space data is computed. + %(info_not_none)s + start : int, optional + Index of first time sample (index not time is seconds). + stop : int, optional + Index of first time sample not to include (index not time is seconds). + %(on_missing_fwd)s + Default is "raise". + + .. versionadded:: 0.18 + %(use_cps)s + + .. versionadded:: 0.21 + %(verbose)s + + Returns + ------- + raw : Raw object + Raw object with computed sensor space data. + + See Also + -------- + apply_forward: Compute sensor space data and return an Evoked object. + """ + # make sure info contains all channels in fwd + for ch_name in fwd['sol']['row_names']: + if ch_name not in info['ch_names']: + raise ValueError('Channel %s of forward operator not present in ' + 'info.' % ch_name) + + # project the source estimate to the sensor space + data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing, + use_cps=use_cps) + + sfreq = 1.0 / stc.tstep + info, data = _fill_measurement_info(info, fwd, sfreq, data) + with info._unlock(): + info['projs'] = [] + # store sensor data in Raw object using the info + raw = RawArray(data, info, first_samp=int(np.round(times[0] * sfreq))) + raw._projector = None + return raw + + +@fill_doc +def restrict_forward_to_stc(fwd, stc, on_missing='ignore'): + """Restrict forward operator to active sources in a source estimate. + + Parameters + ---------- + fwd : instance of Forward + Forward operator. + stc : instance of SourceEstimate + Source estimate. + %(on_missing_fwd)s + Default is "ignore". + + .. versionadded:: 0.18 + + Returns + ------- + fwd_out : instance of Forward + Restricted forward operator. + + See Also + -------- + restrict_forward_to_label + """ + _validate_type(on_missing, str, 'on_missing') + _check_option('on_missing', on_missing, ('ignore', 'warn', 'raise')) + src_sel, _, vertices = _stc_src_sel(fwd['src'], stc, on_missing=on_missing) + del stc + return _restrict_forward_to_src_sel(fwd, src_sel) + + +def _restrict_forward_to_src_sel(fwd, src_sel): + fwd_out = deepcopy(fwd) + # figure out the vertno we are keeping + idx_sel = np.concatenate([[[si] * len(s['vertno']), s['vertno']] + for si, s in enumerate(fwd['src'])], axis=-1) + assert idx_sel.ndim == 2 and idx_sel.shape[0] == 2 + assert idx_sel.shape[1] == fwd['nsource'] + idx_sel = idx_sel[:, src_sel] + + fwd_out['source_rr'] = fwd['source_rr'][src_sel] + fwd_out['nsource'] = len(src_sel) + + if is_fixed_orient(fwd): + idx = src_sel + if fwd['sol_grad'] is not None: + idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel() + else: + idx = (3 * src_sel[:, None] + np.arange(3)).ravel() + if fwd['sol_grad'] is not None: + idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel() + + fwd_out['source_nn'] = fwd['source_nn'][idx] + fwd_out['sol']['data'] = fwd['sol']['data'][:, idx] + if fwd['sol_grad'] is not None: + fwd_out['sol_grad']['data'] = fwd['sol_grad']['data'][:, idx_grad] + fwd_out['sol']['ncol'] = len(idx) + + if is_fixed_orient(fwd, orig=True): + idx = src_sel + if fwd['sol_grad'] is not None: + idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel() + else: + idx = (3 * src_sel[:, None] + np.arange(3)).ravel() + if fwd['sol_grad'] is not None: + idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel() + + fwd_out['_orig_sol'] = fwd['_orig_sol'][:, idx] + if fwd['sol_grad'] is not None: + fwd_out['_orig_sol_grad'] = fwd['_orig_sol_grad'][:, idx_grad] + + vertices = [idx_sel[1][idx_sel[0] == si] + for si in range(len(fwd_out['src']))] + _set_source_space_vertices(fwd_out['src'], vertices) + + return fwd_out + + +def restrict_forward_to_label(fwd, labels): + """Restrict forward operator to labels. + + Parameters + ---------- + fwd : Forward + Forward operator. + labels : instance of Label | list + Label object or list of label objects. + + Returns + ------- + fwd_out : dict + Restricted forward operator. + + See Also + -------- + restrict_forward_to_stc + """ + vertices = [np.array([], int), np.array([], int)] + + if not isinstance(labels, list): + labels = [labels] + + # Get vertices separately of each hemisphere from all label + for label in labels: + _validate_type(label, Label, "label", "Label or list") + i = 0 if label.hemi == 'lh' else 1 + vertices[i] = np.append(vertices[i], label.vertices) + # Remove duplicates and sort + vertices = [np.unique(vert_hemi) for vert_hemi in vertices] + + fwd_out = deepcopy(fwd) + fwd_out['source_rr'] = np.zeros((0, 3)) + fwd_out['nsource'] = 0 + fwd_out['source_nn'] = np.zeros((0, 3)) + fwd_out['sol']['data'] = np.zeros((fwd['sol']['data'].shape[0], 0)) + fwd_out['_orig_sol'] = np.zeros((fwd['_orig_sol'].shape[0], 0)) + if fwd['sol_grad'] is not None: + fwd_out['sol_grad']['data'] = np.zeros( + (fwd['sol_grad']['data'].shape[0], 0)) + fwd_out['_orig_sol_grad'] = np.zeros( + (fwd['_orig_sol_grad'].shape[0], 0)) + fwd_out['sol']['ncol'] = 0 + nuse_lh = fwd['src'][0]['nuse'] + + for i in range(2): + fwd_out['src'][i]['vertno'] = np.array([], int) + fwd_out['src'][i]['nuse'] = 0 + fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy() + fwd_out['src'][i]['inuse'].fill(0) + fwd_out['src'][i]['use_tris'] = np.array([[]], int) + fwd_out['src'][i]['nuse_tri'] = np.array([0]) + + # src_sel is idx to cols in fwd that are in any label per hemi + src_sel = np.intersect1d(fwd['src'][i]['vertno'], vertices[i]) + src_sel = np.searchsorted(fwd['src'][i]['vertno'], src_sel) + + # Reconstruct each src + vertno = fwd['src'][i]['vertno'][src_sel] + fwd_out['src'][i]['inuse'][vertno] = 1 + fwd_out['src'][i]['nuse'] += len(vertno) + fwd_out['src'][i]['vertno'] = np.where(fwd_out['src'][i]['inuse'])[0] + + # Reconstruct part of fwd that is not sol data + src_sel += i * nuse_lh # Add column shift to right hemi + fwd_out['source_rr'] = np.vstack([fwd_out['source_rr'], + fwd['source_rr'][src_sel]]) + fwd_out['nsource'] += len(src_sel) + + if is_fixed_orient(fwd): + idx = src_sel + if fwd['sol_grad'] is not None: + idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel() + else: + idx = (3 * src_sel[:, None] + np.arange(3)).ravel() + if fwd['sol_grad'] is not None: + idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel() + + fwd_out['source_nn'] = np.vstack( + [fwd_out['source_nn'], fwd['source_nn'][idx]]) + fwd_out['sol']['data'] = np.hstack( + [fwd_out['sol']['data'], fwd['sol']['data'][:, idx]]) + if fwd['sol_grad'] is not None: + fwd_out['sol_grad']['data'] = np.hstack( + [fwd_out['sol_grad']['data'], + fwd['sol_rad']['data'][:, idx_grad]]) + fwd_out['sol']['ncol'] += len(idx) + + if is_fixed_orient(fwd, orig=True): + idx = src_sel + if fwd['sol_grad'] is not None: + idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel() + else: + idx = (3 * src_sel[:, None] + np.arange(3)).ravel() + if fwd['sol_grad'] is not None: + idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel() + + fwd_out['_orig_sol'] = np.hstack( + [fwd_out['_orig_sol'], fwd['_orig_sol'][:, idx]]) + if fwd['sol_grad'] is not None: + fwd_out['_orig_sol_grad'] = np.hstack( + [fwd_out['_orig_sol_grad'], + fwd['_orig_sol_grad'][:, idx_grad]]) + + return fwd_out + + +def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None, + mindist=None, bem=None, mri=None, trans=None, + eeg=True, meg=True, fixed=False, grad=False, + mricoord=False, overwrite=False, subjects_dir=None, + verbose=None): + """Calculate a forward solution for a subject using MNE-C routines. + + This is kept around for testing purposes. + + This function wraps to mne_do_forward_solution, so the mne + command-line tools must be installed and accessible from Python. + + Parameters + ---------- + subject : str + Name of the subject. + meas : Raw | Epochs | Evoked | str + If Raw or Epochs, a temporary evoked file will be created and + saved to a temporary directory. If str, then it should be a + filename to a file with measurement information the mne + command-line tools can understand (i.e., raw or evoked). + fname : str | None + Destination forward solution filename. If None, the solution + will be created in a temporary directory, loaded, and deleted. + src : str | None + Source space name. If None, the MNE default is used. + spacing : str + The spacing to use. Can be ``'#'`` for spacing in mm, ``'ico#'`` for a + recursively subdivided icosahedron, or ``'oct#'`` for a recursively + subdivided octahedron (e.g., ``spacing='ico4'``). Default is 7 mm. + mindist : float | str | None + Minimum distance of sources from inner skull surface (in mm). + If None, the MNE default value is used. If string, 'all' + indicates to include all points. + bem : str | None + Name of the BEM to use (e.g., "sample-5120-5120-5120"). If None + (Default), the MNE default will be used. + mri : str | None + The name of the trans file in FIF format. + If None, trans must not be None. + trans : dict | str | None + File name of the trans file in text format. + If None, mri must not be None. + eeg : bool + If True (Default), include EEG computations. + meg : bool + If True (Default), include MEG computations. + fixed : bool + If True, make a fixed-orientation forward solution (Default: + False). Note that fixed-orientation inverses can still be + created from free-orientation forward solutions. + grad : bool + If True, compute the gradient of the field with respect to the + dipole coordinates as well (Default: False). + mricoord : bool + If True, calculate in MRI coordinates (Default: False) + %(overwrite)s + %(subjects_dir)s + %(verbose)s + + See Also + -------- + make_forward_solution + + Returns + ------- + fwd : Forward + The generated forward solution. + """ + if not has_mne_c(): + raise RuntimeError('mne command line tools could not be found') + + # check for file existence + temp_dir = tempfile.mkdtemp() + if fname is None: + fname = op.join(temp_dir, 'temp-fwd.fif') + _check_fname(fname, overwrite) + _validate_type(subject, "str", "subject") + + # check for meas to exist as string, or try to make evoked + if isinstance(meas, str): + if not op.isfile(meas): + raise IOError('measurement file "%s" could not be found' % meas) + elif isinstance(meas, (BaseRaw, BaseEpochs, Evoked)): + meas_file = op.join(temp_dir, 'info.fif') + write_info(meas_file, meas.info) + meas = meas_file + else: + raise ValueError('meas must be string, Raw, Epochs, or Evoked') + + # deal with trans/mri + if mri is not None and trans is not None: + raise ValueError('trans and mri cannot both be specified') + if mri is None and trans is None: + # MNE allows this to default to a trans/mri in the subject's dir, + # but let's be safe here and force the user to pass us a trans/mri + raise ValueError('Either trans or mri must be specified') + + if trans is not None: + _validate_type(trans, "str", "trans") + if not op.isfile(trans): + raise IOError('trans file "%s" not found' % trans) + if mri is not None: + # deal with trans + if not isinstance(mri, str): + if isinstance(mri, dict): + mri_data = deepcopy(mri) + mri = op.join(temp_dir, 'mri-trans.fif') + try: + write_trans(mri, mri_data) + except Exception: + raise IOError('mri was a dict, but could not be ' + 'written to disk as a transform file') + else: + raise ValueError('trans must be a string or dict (trans)') + if not op.isfile(mri): + raise IOError('trans file "%s" could not be found' % trans) + + # deal with meg/eeg + if not meg and not eeg: + raise ValueError('meg or eeg (or both) must be True') + + path, fname = op.split(fname) + if not op.splitext(fname)[1] == '.fif': + raise ValueError('Forward name does not end with .fif') + path = op.abspath(path) + + # deal with mindist + if mindist is not None: + if isinstance(mindist, str): + if not mindist.lower() == 'all': + raise ValueError('mindist, if string, must be "all"') + mindist = ['--all'] + else: + mindist = ['--mindist', '%g' % mindist] + + # src, spacing, bem + for element, name, kind in zip((src, spacing, bem), + ("src", "spacing", "bem"), + ('path-like', 'str', 'path-like')): + if element is not None: + _validate_type(element, kind, name, "%s or None" % kind) + + # put together the actual call + cmd = ['mne_do_forward_solution', + '--subject', subject, + '--meas', meas, + '--fwd', fname, + '--destdir', path] + if src is not None: + cmd += ['--src', src] + if spacing is not None: + if spacing.isdigit(): + pass # spacing in mm + else: + # allow both "ico4" and "ico-4" style values + match = re.match(r"(oct|ico)-?(\d+)$", spacing) + if match is None: + raise ValueError("Invalid spacing parameter: %r" % spacing) + spacing = '-'.join(match.groups()) + cmd += ['--spacing', spacing] + if mindist is not None: + cmd += mindist + if bem is not None: + cmd += ['--bem', bem] + if mri is not None: + cmd += ['--mri', '%s' % mri] + if trans is not None: + cmd += ['--trans', '%s' % trans] + if not meg: + cmd.append('--eegonly') + if not eeg: + cmd.append('--megonly') + if fixed: + cmd.append('--fixed') + if grad: + cmd.append('--grad') + if mricoord: + cmd.append('--mricoord') + if overwrite: + cmd.append('--overwrite') + + env = os.environ.copy() + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + env['SUBJECTS_DIR'] = subjects_dir + + try: + logger.info('Running forward solution generation command with ' + 'subjects_dir %s' % subjects_dir) + run_subprocess(cmd, env=env) + except Exception: + raise + else: + fwd = read_forward_solution(op.join(path, fname), verbose=False) + finally: + shutil.rmtree(temp_dir, ignore_errors=True) + return fwd + + +@verbose +def average_forward_solutions(fwds, weights=None, verbose=None): + """Average forward solutions. + + Parameters + ---------- + fwds : list of Forward + Forward solutions to average. Each entry (dict) should be a + forward solution. + weights : array | None + Weights to apply to each forward solution in averaging. If None, + forward solutions will be equally weighted. Weights must be + non-negative, and will be adjusted to sum to one. + %(verbose)s + + Returns + ------- + fwd : Forward + The averaged forward solution. + """ + # check for fwds being a list + _validate_type(fwds, list, "fwds") + if not len(fwds) > 0: + raise ValueError('fwds must not be empty') + + # check weights + if weights is None: + weights = np.ones(len(fwds)) + weights = np.asanyarray(weights) # in case it's a list, convert it + if not np.all(weights >= 0): + raise ValueError('weights must be non-negative') + if not len(weights) == len(fwds): + raise ValueError('weights must be None or the same length as fwds') + w_sum = np.sum(weights) + if not w_sum > 0: + raise ValueError('weights cannot all be zero') + weights /= w_sum + + # check our forward solutions + for fwd in fwds: + # check to make sure it's a forward solution + _validate_type(fwd, dict, "each entry in fwds", "dict") + # check to make sure the dict is actually a fwd + check_keys = ['info', 'sol_grad', 'nchan', 'src', 'source_nn', 'sol', + 'source_rr', 'source_ori', 'surf_ori', 'coord_frame', + 'mri_head_t', 'nsource'] + if not all(key in fwd for key in check_keys): + raise KeyError('forward solution dict does not have all standard ' + 'entries, cannot compute average.') + + # check forward solution compatibility + if any(fwd['sol'][k] != fwds[0]['sol'][k] + for fwd in fwds[1:] for k in ['nrow', 'ncol']): + raise ValueError('Forward solutions have incompatible dimensions') + if any(fwd[k] != fwds[0][k] for fwd in fwds[1:] + for k in ['source_ori', 'surf_ori', 'coord_frame']): + raise ValueError('Forward solutions have incompatible orientations') + + # actually average them (solutions and gradients) + fwd_ave = deepcopy(fwds[0]) + fwd_ave['sol']['data'] *= weights[0] + fwd_ave['_orig_sol'] *= weights[0] + for fwd, w in zip(fwds[1:], weights[1:]): + fwd_ave['sol']['data'] += w * fwd['sol']['data'] + fwd_ave['_orig_sol'] += w * fwd['_orig_sol'] + if fwd_ave['sol_grad'] is not None: + fwd_ave['sol_grad']['data'] *= weights[0] + fwd_ave['_orig_sol_grad'] *= weights[0] + for fwd, w in zip(fwds[1:], weights[1:]): + fwd_ave['sol_grad']['data'] += w * fwd['sol_grad']['data'] + fwd_ave['_orig_sol_grad'] += w * fwd['_orig_sol_grad'] + return fwd_ave diff --git a/python/libs/mne/forward/tests/__init__.py b/python/libs/mne/forward/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/forward/tests/test_field_interpolation.py b/python/libs/mne/forward/tests/test_field_interpolation.py new file mode 100644 index 0000000..ed82e61 --- /dev/null +++ b/python/libs/mne/forward/tests/test_field_interpolation.py @@ -0,0 +1,275 @@ +from os import path as op + +import numpy as np +from numpy.polynomial import legendre +from numpy.testing import (assert_allclose, assert_array_equal, assert_equal, + assert_array_almost_equal) +from scipy.interpolate import interp1d + +import pytest + +import mne +from mne.forward import _make_surface_mapping, make_field_map +from mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg, + _get_legen_table, _do_cross_dots) +from mne.forward._make_forward import _create_meg_coils +from mne.forward._field_interpolation import _setup_dots +from mne.surface import get_meg_helmet_surf, get_head_surf +from mne.datasets import testing +from mne import read_evokeds, pick_types, make_fixed_length_events, Epochs +from mne.io import read_raw_fif + + +base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') +raw_fname = op.join(base_dir, 'test_raw.fif') +evoked_fname = op.join(base_dir, 'test-ave.fif') +raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif') + +data_path = testing.data_path(download=False) +trans_fname = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-trans.fif') +subjects_dir = op.join(data_path, 'subjects') + + +@testing.requires_testing_data +def test_field_map_ctf(): + """Test that field mapping can be done with CTF data.""" + raw = read_raw_fif(raw_ctf_fname).crop(0, 1) + raw.apply_gradient_compensation(3) + events = make_fixed_length_events(raw, duration=0.5) + evoked = Epochs(raw, events).average() + evoked.pick_channels(evoked.ch_names[:50]) # crappy mapping but faster + # smoke test + make_field_map(evoked, trans=trans_fname, subject='sample', + subjects_dir=subjects_dir) + + +def test_legendre_val(): + """Test Legendre polynomial (derivative) equivalence.""" + rng = np.random.RandomState(0) + # check table equiv + xs = np.linspace(-1., 1., 1000) + n_terms = 100 + + # True, numpy + vals_np = legendre.legvander(xs, n_terms - 1) + + # Table approximation + for nc, interp in zip([100, 50], ['nearest', 'linear']): + lut, n_fact = _get_legen_table('eeg', n_coeff=nc, force_calc=True) + lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp, + axis=0) + vals_i = lut_fun(xs) + # Need a "1:" here because we omit the first coefficient in our table! + assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i, + rtol=1e-2, atol=5e-3) + + # Now let's look at our sums + ctheta = rng.rand(20, 30) * 2.0 - 1.0 + beta = rng.rand(20, 30) * 0.8 + c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact) + c1.shape = beta.shape + + # compare to numpy + n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis] + coeffs = np.zeros((n_terms,) + beta.shape) + coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0) * + (2.0 * n + 1.0) * (2.0 * n + 1.0) / n) + # can't use tensor=False here b/c it isn't in old numpy + c2 = np.empty((20, 30)) + for ci1 in range(20): + for ci2 in range(30): + c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2], + coeffs[:, ci1, ci2]) + assert_allclose(c1, c2, 1e-2, 1e-3) # close enough... + + # compare fast and slow for MEG + ctheta = rng.rand(20 * 30) * 2.0 - 1.0 + beta = rng.rand(20 * 30) * 0.8 + lut, n_fact = _get_legen_table('meg', n_coeff=10, force_calc=True) + fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'nearest', axis=0) + coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False) + lut, n_fact = _get_legen_table('meg', n_coeff=20, force_calc=True) + fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'linear', axis=0) + coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False) + + +def test_legendre_table(): + """Test Legendre table calculation.""" + # double-check our table generation + n = 10 + for ch_type in ['eeg', 'meg']: + lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=25, force_calc=True) + lut1 = lut1[:, :n - 1].copy() + n_fact1 = n_fact1[:n - 1].copy() + lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n, force_calc=True) + assert_allclose(lut1, lut2) + assert_allclose(n_fact1, n_fact2) + + +@testing.requires_testing_data +def test_make_field_map_eeg(): + """Test interpolation of EEG field onto head.""" + evoked = read_evokeds(evoked_fname, condition='Left Auditory') + evoked.info['bads'] = ['MEG 2443', 'EEG 053'] # add some bads + surf = get_head_surf('sample', subjects_dir=subjects_dir) + # we must have trans if surface is in MRI coords + pytest.raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg') + + evoked.pick_types(meg=False, eeg=True) + fmd = make_field_map(evoked, trans_fname, + subject='sample', subjects_dir=subjects_dir) + + # trans is necessary for EEG only + pytest.raises(RuntimeError, make_field_map, evoked, None, + subject='sample', subjects_dir=subjects_dir) + + fmd = make_field_map(evoked, trans_fname, + subject='sample', subjects_dir=subjects_dir) + assert len(fmd) == 1 + assert_array_equal(fmd[0]['data'].shape, (642, 59)) # maps data onto surf + assert len(fmd[0]['ch_names']) == 59 + + +@testing.requires_testing_data +@pytest.mark.slowtest +def test_make_field_map_meg(): + """Test interpolation of MEG field onto helmet | head.""" + evoked = read_evokeds(evoked_fname, condition='Left Auditory') + info = evoked.info + surf = get_meg_helmet_surf(info) + # let's reduce the number of channels by a bunch to speed it up + info['bads'] = info['ch_names'][:200] + # bad ch_type + pytest.raises(ValueError, _make_surface_mapping, info, surf, 'foo') + # bad mode + pytest.raises(ValueError, _make_surface_mapping, info, surf, 'meg', + mode='foo') + # no picks + evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True) + pytest.raises(RuntimeError, _make_surface_mapping, evoked_eeg.info, + surf, 'meg') + # bad surface def + nn = surf['nn'] + del surf['nn'] + pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg') + surf['nn'] = nn + cf = surf['coord_frame'] + del surf['coord_frame'] + pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg') + surf['coord_frame'] = cf + + # now do it with make_field_map + evoked.pick_types(meg=True, eeg=False) + evoked.info.normalize_proj() # avoid projection warnings + fmd = make_field_map(evoked, None, + subject='sample', subjects_dir=subjects_dir) + assert (len(fmd) == 1) + assert_array_equal(fmd[0]['data'].shape, (304, 106)) # maps data onto surf + assert len(fmd[0]['ch_names']) == 106 + + pytest.raises(ValueError, make_field_map, evoked, ch_type='foobar') + + # now test the make_field_map on head surf for MEG + evoked.pick_types(meg=True, eeg=False) + evoked.info.normalize_proj() + fmd = make_field_map(evoked, trans_fname, meg_surf='head', + subject='sample', subjects_dir=subjects_dir) + assert len(fmd) == 1 + assert_array_equal(fmd[0]['data'].shape, (642, 106)) # maps data onto surf + assert len(fmd[0]['ch_names']) == 106 + + pytest.raises(ValueError, make_field_map, evoked, meg_surf='foobar', + subjects_dir=subjects_dir, trans=trans_fname) + + +@testing.requires_testing_data +def test_make_field_map_meeg(): + """Test making a M/EEG field map onto helmet & head.""" + evoked = read_evokeds(evoked_fname, baseline=(-0.2, 0.0))[0] + picks = pick_types(evoked.info, meg=True, eeg=True) + picks = picks[::10] + evoked.pick_channels([evoked.ch_names[p] for p in picks]) + evoked.info.normalize_proj() + maps = make_field_map(evoked, trans_fname, subject='sample', + subjects_dir=subjects_dir, n_jobs=1, verbose='debug') + assert_equal(maps[0]['data'].shape, (642, 6)) # EEG->Head + assert_equal(maps[1]['data'].shape, (304, 31)) # MEG->Helmet + # reasonable ranges + maxs = (1.2, 2.0) # before #4418, was (1.1, 2.0) + mins = (-0.8, -1.3) # before #4418, was (-0.6, -1.2) + assert_equal(len(maxs), len(maps)) + for map_, max_, min_ in zip(maps, maxs, mins): + assert_allclose(map_['data'].max(), max_, rtol=5e-2) + assert_allclose(map_['data'].min(), min_, rtol=5e-2) + # calculated from correct looking mapping on 2015/12/26 + assert_allclose(np.sqrt(np.sum(maps[0]['data'] ** 2)), 19.0903, # 16.6088, + atol=1e-3, rtol=1e-3) + assert_allclose(np.sqrt(np.sum(maps[1]['data'] ** 2)), 19.4748, # 20.1245, + atol=1e-3, rtol=1e-3) + + +def _setup_args(info): + """Configure args for test_as_meg_type_evoked.""" + coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t']) + int_rad, _, lut_fun, n_fact = _setup_dots('fast', info, coils, 'meg') + my_origin = np.array([0., 0., 0.04]) + args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin, + ch_type='meg', lut=lut_fun, n_fact=n_fact) + return args_dict + + +@testing.requires_testing_data +def test_as_meg_type_evoked(): + """Test interpolation of data on to virtual channels.""" + # validation tests + raw = read_raw_fif(raw_fname) + events = mne.find_events(raw) + picks = pick_types(raw.info, meg=True, eeg=True, stim=True, + ecg=True, eog=True, include=['STI 014'], + exclude='bads') + epochs = mne.Epochs(raw, events, picks=picks) + evoked = epochs.average() + + with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"): + evoked.as_type('meg') + with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"): + evoked.copy().pick_types(meg='grad').as_type('meg') + + # channel names + ch_names = evoked.info['ch_names'] + virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1]) + virt_evoked.info.normalize_proj() + virt_evoked = virt_evoked.as_type('mag') + assert (all(ch.endswith('_v') for ch in virt_evoked.info['ch_names'])) + + # pick from and to channels + evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3]) + evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3]) + + info_from, info_to = evoked_from.info, evoked_to.info + + # set up things + args1, args2 = _setup_args(info_from), _setup_args(info_to) + args1.update(coils2=args2['coils1']) + args2.update(coils2=args1['coils1']) + + # test cross dots + cross_dots1 = _do_cross_dots(**args1) + cross_dots2 = _do_cross_dots(**args2) + + assert_array_almost_equal(cross_dots1, cross_dots2.T) + + # correlation test + evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy() + data1 = evoked.pick_types(meg='grad').data.ravel() + data2 = evoked.as_type('grad').data.ravel() + assert (np.corrcoef(data1, data2)[0, 1] > 0.95) + + # Do it with epochs + virt_epochs = \ + epochs.copy().load_data().pick_channels(ch_names=ch_names[:10:1]) + virt_epochs.info.normalize_proj() + virt_epochs = virt_epochs.as_type('mag') + assert (all(ch.endswith('_v') for ch in virt_epochs.info['ch_names'])) + assert_allclose(virt_epochs.get_data().mean(0), virt_evoked.data) diff --git a/python/libs/mne/forward/tests/test_forward.py b/python/libs/mne/forward/tests/test_forward.py new file mode 100644 index 0000000..cfc9cd8 --- /dev/null +++ b/python/libs/mne/forward/tests/test_forward.py @@ -0,0 +1,447 @@ +import os.path as op +import gc + +import pytest +import numpy as np +from numpy.testing import (assert_array_almost_equal, assert_equal, + assert_array_equal, assert_allclose) + +from mne.datasets import testing +from mne import (read_forward_solution, apply_forward, apply_forward_raw, + average_forward_solutions, write_forward_solution, + convert_forward_solution, SourceEstimate, pick_types_forward, + read_evokeds, VectorSourceEstimate) +from mne.io import read_info +from mne.label import read_label +from mne.utils import requires_mne, run_subprocess +from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label, + Forward, is_fixed_orient, compute_orient_prior, + compute_depth_prior) +from mne.channels import equalize_channels + +data_path = testing.data_path(download=False) +fname_meeg = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') +fname_meeg_grad = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif') + +fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', + 'data', 'test-ave.fif') + + +def assert_forward_allclose(f1, f2, rtol=1e-7): + """Compare two potentially converted forward solutions.""" + assert_allclose(f1['sol']['data'], f2['sol']['data'], rtol=rtol) + assert f1['sol']['ncol'] == f2['sol']['ncol'] + assert f1['sol']['ncol'] == f1['sol']['data'].shape[1] + assert_allclose(f1['source_nn'], f2['source_nn'], rtol=rtol) + if f1['sol_grad'] is not None: + assert (f2['sol_grad'] is not None) + assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data']) + assert f1['sol_grad']['ncol'] == f2['sol_grad']['ncol'] + assert f1['sol_grad']['ncol'] == f1['sol_grad']['data'].shape[1] + else: + assert (f2['sol_grad'] is None) + assert f1['source_ori'] == f2['source_ori'] + assert f1['surf_ori'] == f2['surf_ori'] + assert f1['src'][0]['coord_frame'] == f1['src'][0]['coord_frame'] + + +@testing.requires_testing_data +def test_convert_forward(): + """Test converting forward solution between different representations.""" + fwd = read_forward_solution(fname_meeg_grad) + fwd_repr = repr(fwd) + assert ('306' in fwd_repr) + assert ('60' in fwd_repr) + assert (fwd_repr) + assert (isinstance(fwd, Forward)) + # look at surface orientation + fwd_surf = convert_forward_solution(fwd, surf_ori=True) + # go back + fwd_new = convert_forward_solution(fwd_surf, surf_ori=False) + assert (repr(fwd_new)) + assert (isinstance(fwd_new, Forward)) + assert_forward_allclose(fwd, fwd_new) + del fwd_new + gc.collect() + + # now go to fixed + fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=True, + force_fixed=True, use_cps=False) + del fwd_surf + gc.collect() + assert (repr(fwd_fixed)) + assert (isinstance(fwd_fixed, Forward)) + assert (is_fixed_orient(fwd_fixed)) + # now go back to cartesian (original condition) + fwd_new = convert_forward_solution(fwd_fixed, surf_ori=False, + force_fixed=False) + assert (repr(fwd_new)) + assert (isinstance(fwd_new, Forward)) + assert_forward_allclose(fwd, fwd_new) + del fwd, fwd_new, fwd_fixed + gc.collect() + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_io_forward(tmp_path): + """Test IO for forward solutions.""" + # do extensive tests with MEEG + grad + n_channels, n_src = 366, 108 + fwd = read_forward_solution(fname_meeg_grad) + assert (isinstance(fwd, Forward)) + fwd = read_forward_solution(fname_meeg_grad) + fwd = convert_forward_solution(fwd, surf_ori=True) + leadfield = fwd['sol']['data'] + assert_equal(leadfield.shape, (n_channels, n_src)) + assert_equal(len(fwd['sol']['row_names']), n_channels) + fname_temp = tmp_path / 'test-fwd.fif' + with pytest.warns(RuntimeWarning, match='stored on disk'): + write_forward_solution(fname_temp, fwd, overwrite=True) + + fwd = read_forward_solution(fname_meeg_grad) + fwd = convert_forward_solution(fwd, surf_ori=True) + fwd_read = read_forward_solution(fname_temp) + fwd_read = convert_forward_solution(fwd_read, surf_ori=True) + leadfield = fwd_read['sol']['data'] + assert_equal(leadfield.shape, (n_channels, n_src)) + assert_equal(len(fwd_read['sol']['row_names']), n_channels) + assert_equal(len(fwd_read['info']['chs']), n_channels) + assert ('dev_head_t' in fwd_read['info']) + assert ('mri_head_t' in fwd_read) + assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data']) + + fwd = read_forward_solution(fname_meeg) + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, + use_cps=False) + with pytest.warns(RuntimeWarning, match='stored on disk'): + write_forward_solution(fname_temp, fwd, overwrite=True) + fwd_read = read_forward_solution(fname_temp) + fwd_read = convert_forward_solution(fwd_read, surf_ori=True, + force_fixed=True, use_cps=False) + assert (repr(fwd_read)) + assert (isinstance(fwd_read, Forward)) + assert (is_fixed_orient(fwd_read)) + assert_forward_allclose(fwd, fwd_read) + + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, + use_cps=True) + leadfield = fwd['sol']['data'] + assert_equal(leadfield.shape, (n_channels, 1494 / 3)) + assert_equal(len(fwd['sol']['row_names']), n_channels) + assert_equal(len(fwd['info']['chs']), n_channels) + assert ('dev_head_t' in fwd['info']) + assert ('mri_head_t' in fwd) + assert (fwd['surf_ori']) + with pytest.warns(RuntimeWarning, match='stored on disk'): + write_forward_solution(fname_temp, fwd, overwrite=True) + fwd_read = read_forward_solution(fname_temp) + fwd_read = convert_forward_solution(fwd_read, surf_ori=True, + force_fixed=True, use_cps=True) + assert (repr(fwd_read)) + assert (isinstance(fwd_read, Forward)) + assert (is_fixed_orient(fwd_read)) + assert_forward_allclose(fwd, fwd_read) + + fwd = read_forward_solution(fname_meeg_grad) + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, + use_cps=True) + leadfield = fwd['sol']['data'] + assert_equal(leadfield.shape, (n_channels, n_src / 3)) + assert_equal(len(fwd['sol']['row_names']), n_channels) + assert_equal(len(fwd['info']['chs']), n_channels) + assert ('dev_head_t' in fwd['info']) + assert ('mri_head_t' in fwd) + assert (fwd['surf_ori']) + with pytest.warns(RuntimeWarning, match='stored on disk'): + write_forward_solution(fname_temp, fwd, overwrite=True) + fwd_read = read_forward_solution(fname_temp) + fwd_read = convert_forward_solution(fwd_read, surf_ori=True, + force_fixed=True, use_cps=True) + assert (repr(fwd_read)) + assert (isinstance(fwd_read, Forward)) + assert (is_fixed_orient(fwd_read)) + assert_forward_allclose(fwd, fwd_read) + + # test warnings on bad filenames + fwd = read_forward_solution(fname_meeg_grad) + fwd_badname = tmp_path / 'test-bad-name.fif.gz' + with pytest.warns(RuntimeWarning, match='end with'): + write_forward_solution(fwd_badname, fwd) + with pytest.warns(RuntimeWarning, match='end with'): + read_forward_solution(fwd_badname) + + fwd = read_forward_solution(fname_meeg) + write_forward_solution(fname_temp, fwd, overwrite=True) + fwd_read = read_forward_solution(fname_temp) + assert_forward_allclose(fwd, fwd_read) + + +@testing.requires_testing_data +def test_apply_forward(): + """Test projection of source space data to sensor space.""" + start = 0 + stop = 5 + n_times = stop - start - 1 + sfreq = 10.0 + t_start = 0.123 + + fwd = read_forward_solution(fname_meeg) + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, + use_cps=True) + fwd = pick_types_forward(fwd, meg=True) + assert isinstance(fwd, Forward) + + vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']] + stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times)) + stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq) + + gain_sum = np.sum(fwd['sol']['data'], axis=1) + + # Evoked + evoked = read_evokeds(fname_evoked, condition=0) + evoked.pick_types(meg=True) + with pytest.warns(RuntimeWarning, match='only .* positive values'): + evoked = apply_forward(fwd, stc, evoked.info, start=start, stop=stop) + data = evoked.data + times = evoked.times + + # do some tests + assert_array_almost_equal(evoked.info['sfreq'], sfreq) + assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum) + assert_array_almost_equal(times[0], t_start) + assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq) + + # vector + stc_vec = VectorSourceEstimate( + fwd['source_nn'][:, :, np.newaxis] * stc.data[:, np.newaxis], + stc.vertices, stc.tmin, stc.tstep) + with pytest.warns(RuntimeWarning, match='very large'): + evoked_2 = apply_forward(fwd, stc_vec, evoked.info) + assert np.abs(evoked_2.data).mean() > 1e-5 + assert_allclose(evoked.data, evoked_2.data, atol=1e-10) + + # Raw + with pytest.warns(RuntimeWarning, match='only .* positive values'): + raw_proj = apply_forward_raw(fwd, stc, evoked.info, start=start, + stop=stop) + data, times = raw_proj[:, :] + + # do some tests + assert_array_almost_equal(raw_proj.info['sfreq'], sfreq) + assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum) + atol = 1. / sfreq + assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol) + assert_allclose(raw_proj.last_samp / sfreq, + t_start + (n_times - 1) / sfreq, atol=atol) + + +@testing.requires_testing_data +def test_restrict_forward_to_stc(tmp_path): + """Test restriction of source space to source SourceEstimate.""" + start = 0 + stop = 5 + n_times = stop - start - 1 + sfreq = 10.0 + t_start = 0.123 + + fwd = read_forward_solution(fname_meeg) + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, + use_cps=True) + fwd = pick_types_forward(fwd, meg=True) + + vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]] + stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times)) + stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq) + + fwd_out = restrict_forward_to_stc(fwd, stc) + assert (isinstance(fwd_out, Forward)) + + assert_equal(fwd_out['sol']['ncol'], 20) + assert_equal(fwd_out['src'][0]['nuse'], 15) + assert_equal(fwd_out['src'][1]['nuse'], 5) + assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15]) + assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5]) + + fwd = read_forward_solution(fname_meeg) + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False) + fwd = pick_types_forward(fwd, meg=True) + + vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]] + stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times)) + stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq) + + fwd_out = restrict_forward_to_stc(fwd, stc) + + assert_equal(fwd_out['sol']['ncol'], 60) + assert_equal(fwd_out['src'][0]['nuse'], 15) + assert_equal(fwd_out['src'][1]['nuse'], 5) + assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15]) + assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5]) + + # Test saving the restricted forward object. This only works if all fields + # are properly accounted for. + fname_copy = tmp_path / 'copy-fwd.fif' + with pytest.warns(RuntimeWarning, match='stored on disk'): + write_forward_solution(fname_copy, fwd_out, overwrite=True) + fwd_out_read = read_forward_solution(fname_copy) + fwd_out_read = convert_forward_solution(fwd_out_read, surf_ori=True, + force_fixed=False) + assert_forward_allclose(fwd_out, fwd_out_read) + + +@testing.requires_testing_data +def test_restrict_forward_to_label(tmp_path): + """Test restriction of source space to label.""" + fwd = read_forward_solution(fname_meeg) + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, + use_cps=True) + fwd = pick_types_forward(fwd, meg=True) + + label_path = op.join(data_path, 'MEG', 'sample', 'labels') + labels = ['Aud-lh', 'Vis-rh'] + label_lh = read_label(op.join(label_path, labels[0] + '.label')) + label_rh = read_label(op.join(label_path, labels[1] + '.label')) + + fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh]) + + src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices) + src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh) + vertno_lh = fwd['src'][0]['vertno'][src_sel_lh] + + nuse_lh = fwd['src'][0]['nuse'] + src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices) + src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) + vertno_rh = fwd['src'][1]['vertno'][src_sel_rh] + src_sel_rh += nuse_lh + + assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh)) + assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh)) + assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh)) + assert_equal(fwd_out['src'][0]['vertno'], vertno_lh) + assert_equal(fwd_out['src'][1]['vertno'], vertno_rh) + + fwd = read_forward_solution(fname_meeg) + fwd = pick_types_forward(fwd, meg=True) + + label_path = op.join(data_path, 'MEG', 'sample', 'labels') + labels = ['Aud-lh', 'Vis-rh'] + label_lh = read_label(op.join(label_path, labels[0] + '.label')) + label_rh = read_label(op.join(label_path, labels[1] + '.label')) + + fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh]) + + src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices) + src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh) + vertno_lh = fwd['src'][0]['vertno'][src_sel_lh] + + nuse_lh = fwd['src'][0]['nuse'] + src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices) + src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) + vertno_rh = fwd['src'][1]['vertno'][src_sel_rh] + src_sel_rh += nuse_lh + + assert_equal(fwd_out['sol']['ncol'], + 3 * (len(src_sel_lh) + len(src_sel_rh))) + assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh)) + assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh)) + assert_equal(fwd_out['src'][0]['vertno'], vertno_lh) + assert_equal(fwd_out['src'][1]['vertno'], vertno_rh) + + # Test saving the restricted forward object. This only works if all fields + # are properly accounted for. + fname_copy = tmp_path / 'copy-fwd.fif' + write_forward_solution(fname_copy, fwd_out, overwrite=True) + fwd_out_read = read_forward_solution(fname_copy) + assert_forward_allclose(fwd_out, fwd_out_read) + + +@testing.requires_testing_data +@requires_mne +def test_average_forward_solution(tmp_path): + """Test averaging forward solutions.""" + fwd = read_forward_solution(fname_meeg) + # input not a list + pytest.raises(TypeError, average_forward_solutions, 1) + # list is too short + pytest.raises(ValueError, average_forward_solutions, []) + # negative weights + pytest.raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0]) + # all zero weights + pytest.raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0]) + # weights not same length + pytest.raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0]) + # list does not only have all dict() + pytest.raises(TypeError, average_forward_solutions, [1, fwd]) + + # try an easy case + fwd_copy = average_forward_solutions([fwd]) + assert (isinstance(fwd_copy, Forward)) + assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data']) + + # modify a fwd solution, save it, use MNE to average with old one + fwd_copy['sol']['data'] *= 0.5 + fname_copy = str(tmp_path / 'copy-fwd.fif') + write_forward_solution(fname_copy, fwd_copy, overwrite=True) + cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd', + fname_copy, '--out', fname_copy) + run_subprocess(cmd) + + # now let's actually do it, with one filename and one fwd + fwd_ave = average_forward_solutions([fwd, fwd_copy]) + assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data']) + # fwd_ave_mne = read_forward_solution(fname_copy) + # assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data']) + + # with gradient + fwd = read_forward_solution(fname_meeg_grad) + fwd_ave = average_forward_solutions([fwd, fwd]) + assert_forward_allclose(fwd, fwd_ave) + + +@testing.requires_testing_data +def test_priors(): + """Test prior computations.""" + # Depth prior + fwd = read_forward_solution(fname_meeg) + assert not is_fixed_orient(fwd) + n_sources = fwd['nsource'] + info = read_info(fname_evoked) + depth_prior = compute_depth_prior(fwd, info, exp=0.8) + assert depth_prior.shape == (3 * n_sources,) + depth_prior = compute_depth_prior(fwd, info, exp=0.) + assert_array_equal(depth_prior, 1.) + with pytest.raises(ValueError, match='must be "whiten"'): + compute_depth_prior(fwd, info, limit_depth_chs='foo') + with pytest.raises(ValueError, match='noise_cov must be a Covariance'): + compute_depth_prior(fwd, info, limit_depth_chs='whiten') + fwd_fixed = convert_forward_solution(fwd, force_fixed=True) + depth_prior = compute_depth_prior(fwd_fixed, info=info) + assert depth_prior.shape == (n_sources,) + # Orientation prior + orient_prior = compute_orient_prior(fwd, 1.) + assert_array_equal(orient_prior, 1.) + orient_prior = compute_orient_prior(fwd_fixed, 0.) + assert_array_equal(orient_prior, 1.) + with pytest.raises(ValueError, match='oriented in surface coordinates'): + compute_orient_prior(fwd, 0.5) + fwd_surf_ori = convert_forward_solution(fwd, surf_ori=True) + orient_prior = compute_orient_prior(fwd_surf_ori, 0.5) + assert all(np.in1d(orient_prior, (0.5, 1.))) + with pytest.raises(ValueError, match='between 0 and 1'): + compute_orient_prior(fwd_surf_ori, -0.5) + with pytest.raises(ValueError, match='with fixed orientation'): + compute_orient_prior(fwd_fixed, 0.5) + + +@testing.requires_testing_data +def test_equalize_channels(): + """Test equalization of channels for instances of Forward.""" + fwd1 = read_forward_solution(fname_meeg) + fwd1.pick_channels(['EEG 001', 'EEG 002', 'EEG 003']) + fwd2 = fwd1.copy().pick_channels(['EEG 002', 'EEG 001'], ordered=True) + fwd1, fwd2 = equalize_channels([fwd1, fwd2]) + assert fwd1.ch_names == ['EEG 001', 'EEG 002'] + assert fwd2.ch_names == ['EEG 001', 'EEG 002'] diff --git a/python/libs/mne/forward/tests/test_make_forward.py b/python/libs/mne/forward/tests/test_make_forward.py new file mode 100644 index 0000000..1b79c32 --- /dev/null +++ b/python/libs/mne/forward/tests/test_make_forward.py @@ -0,0 +1,539 @@ +from itertools import product +import os +import os.path as op + +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_allclose, assert_array_equal + +from mne.channels import make_standard_montage +from mne.datasets import testing +from mne.io import read_raw_fif, read_raw_kit, read_raw_bti, read_info +from mne.io.constants import FIFF +from mne import (read_forward_solution, write_forward_solution, + make_forward_solution, convert_forward_solution, + setup_volume_source_space, read_source_spaces, create_info, + make_sphere_model, pick_types_forward, pick_info, pick_types, + read_evokeds, read_cov, read_dipole, + get_volume_labels_from_aseg) +from mne.surface import _get_ico_surface +from mne.transforms import Transform +from mne.utils import (requires_mne, requires_nibabel, run_subprocess, + catch_logging) +from mne.forward._make_forward import _create_meg_coils, make_forward_dipole +from mne.forward._compute_forward import _magnetic_dipole_field_vec +from mne.forward import Forward, _do_forward_solution, use_coil_def +from mne.dipole import Dipole, fit_dipole +from mne.simulation import simulate_evoked +from mne.source_estimate import VolSourceEstimate +from mne.source_space import (write_source_spaces, _compare_source_spaces, + setup_source_space) + +from mne.forward.tests.test_forward import assert_forward_allclose + +data_path = testing.data_path(download=False) +fname_meeg = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') +fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data', + 'test_raw.fif') +fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif') +fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif') +fname_dip = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip') +fname_trans = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-trans.fif') +subjects_dir = os.path.join(data_path, 'subjects') +fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif') +fname_bem = op.join(subjects_dir, 'sample', 'bem', + 'sample-1280-1280-1280-bem-sol.fif') +fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') +fname_bem_meg = op.join(subjects_dir, 'sample', 'bem', + 'sample-1280-bem-sol.fif') + + +def _compare_forwards(fwd, fwd_py, n_sensors, n_src, + meg_rtol=1e-4, meg_atol=1e-9, + eeg_rtol=1e-3, eeg_atol=1e-3): + """Test forwards.""" + # check source spaces + assert_equal(len(fwd['src']), len(fwd_py['src'])) + _compare_source_spaces(fwd['src'], fwd_py['src'], mode='approx') + for surf_ori, force_fixed in product([False, True], [False, True]): + # use copy here to leave our originals unmodified + fwd = convert_forward_solution(fwd, surf_ori, force_fixed, copy=True, + use_cps=True) + fwd_py = convert_forward_solution(fwd_py, surf_ori, force_fixed, + copy=True, use_cps=True) + check_src = n_src // 3 if force_fixed else n_src + + for key in ('nchan', 'source_rr', 'source_ori', + 'surf_ori', 'coord_frame', 'nsource'): + assert_allclose(fwd_py[key], fwd[key], rtol=1e-4, atol=1e-7, + err_msg=key) + # In surf_ori=True only Z matters for source_nn + if surf_ori and not force_fixed: + ori_sl = slice(2, None, 3) + else: + ori_sl = slice(None) + assert_allclose(fwd_py['source_nn'][ori_sl], fwd['source_nn'][ori_sl], + rtol=1e-4, atol=1e-6) + assert_allclose(fwd_py['mri_head_t']['trans'], + fwd['mri_head_t']['trans'], rtol=1e-5, atol=1e-8) + + assert_equal(fwd_py['sol']['data'].shape, (n_sensors, check_src)) + assert_equal(len(fwd['sol']['row_names']), n_sensors) + assert_equal(len(fwd_py['sol']['row_names']), n_sensors) + + # check MEG + assert_allclose(fwd['sol']['data'][:306, ori_sl], + fwd_py['sol']['data'][:306, ori_sl], + rtol=meg_rtol, atol=meg_atol, + err_msg='MEG mismatch') + # check EEG + if fwd['sol']['data'].shape[0] > 306: + assert_allclose(fwd['sol']['data'][306:, ori_sl], + fwd_py['sol']['data'][306:, ori_sl], + rtol=eeg_rtol, atol=eeg_atol, + err_msg='EEG mismatch') + + +def test_magnetic_dipole(): + """Test basic magnetic dipole forward calculation.""" + info = read_info(fname_raw) + picks = pick_types(info, meg=True, eeg=False, exclude=[]) + info = pick_info(info, picks[:12]) + coils = _create_meg_coils(info['chs'], 'normal', None) + # magnetic dipole far (meters!) from device origin + r0 = np.array([0., 13., -6.]) + for ch, coil in zip(info['chs'], coils): + rr = (ch['loc'][:3] + r0) / 2. # get halfway closer + far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil]) + near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil]) + ratio = 8. if ch['ch_name'][-1] == '1' else 16. # grad vs mag + assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1) + # degenerate case + r0 = coils[0]['rmag'][[0]] + with pytest.raises(RuntimeError, match='Coil too close'): + _magnetic_dipole_field_vec(r0, coils[:1]) + with pytest.warns(RuntimeWarning, match='Coil too close'): + fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='warning') + assert not np.isfinite(fwd).any() + with np.errstate(invalid='ignore'): + fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='info') + assert not np.isfinite(fwd).any() + + +@pytest.mark.slowtest # slow-ish on Travis OSX +@pytest.mark.timeout(60) # can take longer than 30 sec on Travis +@testing.requires_testing_data +@requires_mne +def test_make_forward_solution_kit(tmp_path): + """Test making fwd using KIT, BTI, and CTF (compensated) files.""" + kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit', + 'tests', 'data') + sqd_path = op.join(kit_dir, 'test.sqd') + mrk_path = op.join(kit_dir, 'test_mrk.sqd') + elp_path = op.join(kit_dir, 'test_elp.txt') + hsp_path = op.join(kit_dir, 'test_hsp.txt') + trans_path = op.join(kit_dir, 'trans-sample.fif') + fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif') + + bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti', + 'tests', 'data') + bti_pdf = op.join(bti_dir, 'test_pdf_linux') + bti_config = op.join(bti_dir, 'test_config_linux') + bti_hs = op.join(bti_dir, 'test_hs_linux') + fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif') + + fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', + 'data', 'test_ctf_comp_raw.fif') + + # first set up a small testing source space + fname_src_small = tmp_path / 'sample-oct-2-src.fif' + src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir, + add_dist=False) + write_source_spaces(fname_src_small, src) # to enable working with MNE-C + n_src = 108 # this is the resulting # of verts in fwd + + # first use mne-C: convert file, make forward solution + fwd = _do_forward_solution('sample', fname_kit_raw, src=fname_src_small, + bem=fname_bem_meg, mri=trans_path, + eeg=False, meg=True, subjects_dir=subjects_dir) + assert (isinstance(fwd, Forward)) + + # now let's use python with the same raw file + fwd_py = make_forward_solution(fname_kit_raw, trans_path, src, + fname_bem_meg, eeg=False, meg=True) + _compare_forwards(fwd, fwd_py, 157, n_src) + assert (isinstance(fwd_py, Forward)) + + # now let's use mne-python all the way + raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path) + # without ignore_ref=True, this should throw an error: + with pytest.raises(NotImplementedError, match='Cannot.*KIT reference'): + make_forward_solution(raw_py.info, src=src, eeg=False, meg=True, + bem=fname_bem_meg, trans=trans_path) + + # check that asking for eeg channels (even if they don't exist) is handled + meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True, + eeg=False)) + fwd_py = make_forward_solution(meg_only_info, src=src, meg=True, eeg=True, + bem=fname_bem_meg, trans=trans_path, + ignore_ref=True) + _compare_forwards(fwd, fwd_py, 157, n_src, + meg_rtol=1e-3, meg_atol=1e-7) + + # BTI python end-to-end versus C + fwd = _do_forward_solution('sample', fname_bti_raw, src=fname_src_small, + bem=fname_bem_meg, mri=trans_path, + eeg=False, meg=True, subjects_dir=subjects_dir) + raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False) + fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True, + bem=fname_bem_meg, trans=trans_path) + _compare_forwards(fwd, fwd_py, 248, n_src) + + # now let's test CTF w/compensation + fwd_py = make_forward_solution(fname_ctf_raw, fname_trans, src, + fname_bem_meg, eeg=False, meg=True) + + fwd = _do_forward_solution('sample', fname_ctf_raw, mri=fname_trans, + src=fname_src_small, bem=fname_bem_meg, + eeg=False, meg=True, subjects_dir=subjects_dir) + _compare_forwards(fwd, fwd_py, 274, n_src) + + # CTF with compensation changed in python + ctf_raw = read_raw_fif(fname_ctf_raw) + ctf_raw.info['bads'] = ['MRO24-2908'] # test that it works with some bads + ctf_raw.apply_gradient_compensation(2) + + fwd_py = make_forward_solution(ctf_raw.info, fname_trans, src, + fname_bem_meg, eeg=False, meg=True) + fwd = _do_forward_solution('sample', ctf_raw, mri=fname_trans, + src=fname_src_small, bem=fname_bem_meg, + eeg=False, meg=True, + subjects_dir=subjects_dir) + _compare_forwards(fwd, fwd_py, 274, n_src) + + fname_temp = tmp_path / 'test-ctf-fwd.fif' + write_forward_solution(fname_temp, fwd_py) + fwd_py2 = read_forward_solution(fname_temp) + _compare_forwards(fwd_py, fwd_py2, 274, n_src) + repr(fwd_py) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_make_forward_solution(): + """Test making M-EEG forward solution from python.""" + with catch_logging() as log: + fwd_py = make_forward_solution(fname_raw, fname_trans, fname_src, + fname_bem, mindist=5., verbose=True) + log = log.getvalue() + assert 'Total 258/258 points inside the surface' in log + assert (isinstance(fwd_py, Forward)) + fwd = read_forward_solution(fname_meeg) + assert (isinstance(fwd, Forward)) + _compare_forwards(fwd, fwd_py, 366, 1494, meg_rtol=1e-3) + # Homogeneous model + with pytest.raises(RuntimeError, match='homogeneous.*1-layer.*EEG'): + make_forward_solution(fname_raw, fname_trans, fname_src, + fname_bem_meg) + + +@testing.requires_testing_data +def test_make_forward_solution_discrete(tmp_path): + """Test making and converting a forward solution with discrete src.""" + # smoke test for depth weighting and discrete source spaces + src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir, + add_dist=False) + src = src + setup_volume_source_space( + pos=dict(rr=src[0]['rr'][src[0]['vertno'][:3]].copy(), + nn=src[0]['nn'][src[0]['vertno'][:3]].copy())) + sphere = make_sphere_model() + fwd = make_forward_solution(fname_raw, fname_trans, src, sphere, + meg=True, eeg=False) + convert_forward_solution(fwd, surf_ori=True) + + +@testing.requires_testing_data +@requires_mne +@pytest.mark.timeout(90) # can take longer than 60 sec on Travis +def test_make_forward_solution_sphere(tmp_path): + """Test making a forward solution with a sphere model.""" + fname_src_small = tmp_path / 'sample-oct-2-src.fif' + src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir, + add_dist=False) + write_source_spaces(fname_src_small, src) # to enable working with MNE-C + out_name = tmp_path / 'tmp-fwd.fif' + run_subprocess(['mne_forward_solution', '--meg', '--eeg', + '--meas', fname_raw, '--src', fname_src_small, + '--mri', fname_trans, '--fwd', out_name]) + fwd = read_forward_solution(out_name) + sphere = make_sphere_model(verbose=True) + fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere, + meg=True, eeg=True, verbose=True) + _compare_forwards(fwd, fwd_py, 366, 108, + meg_rtol=5e-1, meg_atol=1e-6, + eeg_rtol=5e-1, eeg_atol=5e-1) + # Since the above is pretty lax, let's check a different way + for meg, eeg in zip([True, False], [False, True]): + fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg) + fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg) + assert_allclose(np.corrcoef(fwd_['sol']['data'].ravel(), + fwd_py_['sol']['data'].ravel())[0, 1], + 1.0, rtol=1e-3) + # Number of layers in the sphere model doesn't matter for MEG + # (as long as no sources are omitted due to distance) + assert len(sphere['layers']) == 4 + fwd = make_forward_solution(fname_raw, fname_trans, src, sphere, + meg=True, eeg=False) + sphere_1 = make_sphere_model(head_radius=None) + assert len(sphere_1['layers']) == 0 + assert_array_equal(sphere['r0'], sphere_1['r0']) + fwd_1 = make_forward_solution(fname_raw, fname_trans, src, sphere, + meg=True, eeg=False) + _compare_forwards(fwd, fwd_1, 306, 108, meg_rtol=1e-12, meg_atol=1e-12) + # Homogeneous model + sphere = make_sphere_model(head_radius=None) + with pytest.raises(RuntimeError, match='zero shells.*EEG'): + make_forward_solution(fname_raw, fname_trans, src, sphere) + + +@pytest.mark.slowtest +@testing.requires_testing_data +@requires_nibabel() +def test_forward_mixed_source_space(tmp_path): + """Test making the forward solution for a mixed source space.""" + # get the surface source space + rng = np.random.RandomState(0) + surf = read_source_spaces(fname_src) + + # setup two volume source spaces + label_names = get_volume_labels_from_aseg(fname_aseg) + vol_labels = rng.choice(label_names, 2) + with pytest.warns(RuntimeWarning, match='Found no usable.*CC_Mid_Ant.*'): + vol1 = setup_volume_source_space('sample', pos=20., mri=fname_aseg, + volume_label=vol_labels[0], + add_interpolator=False) + vol2 = setup_volume_source_space('sample', pos=20., mri=fname_aseg, + volume_label=vol_labels[1], + add_interpolator=False) + + # merge surfaces and volume + src = surf + vol1 + vol2 + + # calculate forward solution + fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem) + assert (repr(fwd)) + + # extract source spaces + src_from_fwd = fwd['src'] + + # get the coordinate frame of each source space + coord_frames = np.array([s['coord_frame'] for s in src_from_fwd]) + + # assert that all source spaces are in head coordinates + assert ((coord_frames == FIFF.FIFFV_COORD_HEAD).all()) + + # run tests for SourceSpaces.export_volume + fname_img = tmp_path / 'temp-image.mgz' + + # head coordinates and mri_resolution, but trans file + with pytest.raises(ValueError, match='trans containing mri to head'): + src_from_fwd.export_volume(fname_img, mri_resolution=True, trans=None) + + # head coordinates and mri_resolution, but wrong trans file + vox_mri_t = vol1[0]['vox_mri_t'] + with pytest.raises(ValueError, match='head<->mri, got mri_voxel->mri'): + src_from_fwd.export_volume(fname_img, mri_resolution=True, + trans=vox_mri_t) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_make_forward_dipole(tmp_path): + """Test forward-projecting dipoles.""" + rng = np.random.RandomState(0) + + evoked = read_evokeds(fname_evo)[0] + cov = read_cov(fname_cov) + cov['projs'] = [] # avoid proj warning + dip_c = read_dipole(fname_dip) + + # Only use magnetometers for speed! + picks = pick_types(evoked.info, meg='mag', eeg=False)[::8] + evoked.pick_channels([evoked.ch_names[p] for p in picks]) + evoked.info.normalize_proj() + info = evoked.info + + # Make new Dipole object with n_test_dipoles picked from the dipoles + # in the test dataset. + n_test_dipoles = 3 # minimum 3 needed to get uneven sampling in time + dipsel = np.sort(rng.permutation(np.arange(len(dip_c)))[:n_test_dipoles]) + dip_test = Dipole(times=dip_c.times[dipsel], + pos=dip_c.pos[dipsel], + amplitude=dip_c.amplitude[dipsel], + ori=dip_c.ori[dipsel], + gof=dip_c.gof[dipsel]) + + sphere = make_sphere_model(head_radius=0.1) + + # Warning emitted due to uneven sampling in time + with pytest.warns(RuntimeWarning, match='unevenly spaced'): + fwd, stc = make_forward_dipole(dip_test, sphere, info, + trans=fname_trans) + + # stc is list of VolSourceEstimate's + assert isinstance(stc, list) + for n_dip in range(n_test_dipoles): + assert isinstance(stc[n_dip], VolSourceEstimate) + + # Now simulate evoked responses for each of the test dipoles, + # and fit dipoles to them (sphere model, MEG and EEG) + times, pos, amplitude, ori, gof = [], [], [], [], [] + nave = 200 # add a tiny amount of noise to the simulated evokeds + for s in stc: + evo_test = simulate_evoked(fwd, s, info, cov, + nave=nave, random_state=rng) + # evo_test.add_proj(make_eeg_average_ref_proj(evo_test.info)) + dfit, resid = fit_dipole(evo_test, cov, sphere, None) + times += dfit.times.tolist() + pos += dfit.pos.tolist() + amplitude += dfit.amplitude.tolist() + ori += dfit.ori.tolist() + gof += dfit.gof.tolist() + + # Create a new Dipole object with the dipole fits + dip_fit = Dipole(times, pos, amplitude, ori, gof) + + # check that true (test) dipoles and fits are "close" + # cf. mne/tests/test_dipole.py + diff = dip_test.pos - dip_fit.pos + corr = np.corrcoef(dip_test.pos.ravel(), dip_fit.pos.ravel())[0, 1] + dist = np.sqrt(np.mean(np.sum(diff * diff, axis=1))) + gc_dist = 180 / np.pi * \ + np.mean(np.arccos(np.sum(dip_test.ori * dip_fit.ori, axis=1))) + amp_err = np.sqrt(np.mean((dip_test.amplitude - dip_fit.amplitude) ** 2)) + + # Make sure each coordinate is close to reference + # NB tolerance should be set relative to snr of simulated evoked! + assert_allclose(dip_fit.pos, dip_test.pos, rtol=0, atol=1e-2, + err_msg='position mismatch') + assert dist < 1e-2 # within 1 cm + assert corr > 0.985 + assert gc_dist < 20 # less than 20 degrees + assert amp_err < 10e-9 # within 10 nAm + + # Make sure rejection works with BEM: one dipole at z=1m + # NB _make_forward.py:_prepare_for_forward will raise a RuntimeError + # if no points are left after min_dist exclusions, hence 2 dips here! + dip_outside = Dipole(times=[0., 0.001], + pos=[[0., 0., 1.0], [0., 0., 0.040]], + amplitude=[100e-9, 100e-9], + ori=[[1., 0., 0.], [1., 0., 0.]], gof=1) + with pytest.raises(ValueError, match='outside the inner skull'): + make_forward_dipole(dip_outside, fname_bem, info, fname_trans) + # if we get this far, can safely assume the code works with BEMs too + # -> use sphere again below for speed + + # Now make an evenly sampled set of dipoles, some simultaneous, + # should return a VolSourceEstimate regardless + times = [0., 0., 0., 0.001, 0.001, 0.002] + pos = np.random.rand(6, 3) * 0.020 + \ + np.array([0., 0., 0.040])[np.newaxis, :] + amplitude = np.random.rand(6) * 100e-9 + ori = np.eye(6, 3) + np.eye(6, 3, -3) + gof = np.arange(len(times)) / len(times) # arbitrary + + dip_even_samp = Dipole(times, pos, amplitude, ori, gof) + + # I/O round-trip + fname = str(tmp_path / 'test-fwd.fif') + with pytest.warns(RuntimeWarning, match='free orientation'): + write_forward_solution(fname, fwd) + fwd_read = convert_forward_solution( + read_forward_solution(fname), force_fixed=True) + assert_forward_allclose(fwd, fwd_read, rtol=1e-6) + + fwd, stc = make_forward_dipole(dip_even_samp, sphere, info, + trans=fname_trans) + assert isinstance(stc, VolSourceEstimate) + assert_allclose(stc.times, np.arange(0., 0.003, 0.001)) + + +@testing.requires_testing_data +def test_make_forward_no_meg(tmp_path): + """Test that we can make and I/O forward solution with no MEG channels.""" + pos = dict(rr=[[0.05, 0, 0]], nn=[[0, 0, 1.]]) + src = setup_volume_source_space(pos=pos) + bem = make_sphere_model() + trans = None + montage = make_standard_montage('standard_1020') + info = create_info(['Cz'], 1000., 'eeg').set_montage(montage) + fwd = make_forward_solution(info, trans, src, bem) + fname = tmp_path / 'test-fwd.fif' + write_forward_solution(fname, fwd) + fwd_read = read_forward_solution(fname) + assert_allclose(fwd['sol']['data'], fwd_read['sol']['data']) + + +def test_use_coil_def(tmp_path): + """Test use_coil_def.""" + info = create_info(1, 1000., 'mag') + info['chs'][0]['coil_type'] = 9999 + info['chs'][0]['loc'][:] = [0, 0, 0.02, 1, 0, 0, 0, 1, 0, 0, 0, 1] + sphere = make_sphere_model((0., 0., 0.), 0.01) + src = setup_volume_source_space(pos=5, sphere=sphere) + trans = Transform('head', 'mri', None) + with pytest.raises(RuntimeError, match='coil definition not found'): + make_forward_solution(info, trans, src, sphere) + coil_fname = tmp_path / 'coil_def.dat' + with open(coil_fname, 'w') as fid: + fid.write("""# custom cube coil def +1 9999 2 8 3e-03 0.000e+00 "Test" + 0.1250 -0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000""") + with pytest.raises(RuntimeError, match='Could not interpret'): + with use_coil_def(coil_fname): + make_forward_solution(info, trans, src, sphere) + with open(coil_fname, 'w') as fid: + fid.write("""# custom cube coil def +1 9999 2 8 3e-03 0.000e+00 "Test" + 0.1250 -0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000 1.000 + 0.1250 -0.750e-03 0.750e-03 -0.750e-03 0.000 0.000 1.000 + 0.1250 0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000 1.000 + 0.1250 0.750e-03 0.750e-03 -0.750e-03 0.000 0.000 1.000 + 0.1250 -0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000 + 0.1250 -0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000 + 0.1250 0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000 + 0.1250 0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000""") + with use_coil_def(coil_fname): + make_forward_solution(info, trans, src, sphere) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_sensors_inside_bem(): + """Test that sensors inside the BEM are problematic.""" + rr = _get_ico_surface(1)['rr'] + rr /= np.linalg.norm(rr, axis=1, keepdims=True) + rr *= 0.1 + assert len(rr) == 42 + info = create_info(len(rr), 1000., 'mag') + info['dev_head_t'] = Transform('meg', 'head', np.eye(4)) + for ii, ch in enumerate(info['chs']): + ch['loc'][:] = np.concatenate((rr[ii], np.eye(3).ravel())) + trans = Transform('head', 'mri', np.eye(4)) + trans['trans'][2, 3] = 0.03 + sphere_noshell = make_sphere_model((0., 0., 0.), None) + sphere = make_sphere_model((0., 0., 0.), 1.01) + with pytest.raises(RuntimeError, match='.* 15 MEG.*inside the scalp.*'): + make_forward_solution(info, trans, fname_src, fname_bem) + make_forward_solution(info, trans, fname_src, fname_bem_meg) # okay + make_forward_solution(info, trans, fname_src, sphere_noshell) # okay + with pytest.raises(RuntimeError, match='.* 42 MEG.*outermost sphere sh.*'): + make_forward_solution(info, trans, fname_src, sphere) + sphere = make_sphere_model((0., 0., 2.0), 1.01) # weird, but okay + make_forward_solution(info, trans, fname_src, sphere) + for ch in info['chs']: + ch['loc'][:3] *= 0.1 + with pytest.raises(RuntimeError, match='.* 42 MEG.*the inner skull.*'): + make_forward_solution(info, trans, fname_src, fname_bem_meg) diff --git a/python/libs/mne/gui/__init__.py b/python/libs/mne/gui/__init__.py new file mode 100644 index 0000000..d6377f4 --- /dev/null +++ b/python/libs/mne/gui/__init__.py @@ -0,0 +1,280 @@ +"""Convenience functions for opening GUIs.""" + +# Authors: Christian Brodbeck +# +# License: BSD-3-Clause + +import os + +from ..utils import verbose, get_config, warn + + +@verbose +def coregistration(tabbed=False, split=True, width=None, inst=None, + subject=None, subjects_dir=None, guess_mri_subject=None, + height=None, head_opacity=None, head_high_res=None, + trans=None, scrollable=True, project_eeg=None, + orient_to_surface=True, scale_by_distance=True, + mark_inside=True, interaction=None, scale=None, + advanced_rendering=None, head_inside=True, verbose=None): + """Coregister an MRI with a subject's head shape. + + The recommended way to use the GUI is through bash with: + + .. code-block:: bash + + $ mne coreg + + Parameters + ---------- + tabbed : bool + Combine the data source panel and the coregistration panel into a + single panel with tabs. + split : bool + Split the main panels with a movable splitter (good for QT4 but + unnecessary for wx backend). + width : int | None + Specify the width for window (in logical pixels). + Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value + (which defaults to 800). + inst : None | str + Path to an instance file containing the digitizer data. Compatible for + Raw, Epochs, and Evoked files. + subject : None | str + Name of the mri subject. + %(subjects_dir)s + guess_mri_subject : bool + When selecting a new head shape file, guess the subject's name based + on the filename and change the MRI subject accordingly (default True). + height : int | None + Specify a height for window (in logical pixels). + Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value + (which defaults to 400). + head_opacity : float | None + The opacity of the head surface in the range [0., 1.]. + Default is None, which uses ``MNE_COREG_HEAD_OPACITY`` config value + (which defaults to 1.). + head_high_res : bool | None + Use a high resolution head surface. + Default is None, which uses ``MNE_COREG_HEAD_HIGH_RES`` config value + (which defaults to True). + trans : str | None + The transform file to use. + scrollable : bool + Make the coregistration panel vertically scrollable (default True). + project_eeg : bool | None + Deprecated. Use :func:`mne.viz.plot_alignment` to see projected EEG electrodes. + + .. versionadded:: 0.16 + orient_to_surface : bool | None + If True (default), orient EEG electrode and head shape points + to the head surface. + + .. versionadded:: 0.16 + scale_by_distance : bool | None + If True (default), scale the digitization points by their + distance from the scalp surface. + + .. versionadded:: 0.16 + mark_inside : bool | None + If True (default), mark points inside the head surface in a + different color. + + .. versionadded:: 0.16 + %(interaction_scene_none)s + Defaults to ``'terrain'``. + + .. versionadded:: 0.16 + .. versionchanged:: 1.0 + Default interaction mode if ``None`` and no config setting found + changed from ``'trackball'`` to ``'terrain'``. + scale : float | None + The scaling for the scene. + + .. versionadded:: 0.16 + advanced_rendering : bool + Use advanced OpenGL rendering techniques (default True). + For some renderers (such as MESA software) this can cause rendering + bugs. + + .. versionadded:: 0.18 + head_inside : bool + If True (default), add opaque inner scalp head surface to help occlude + points behind the head. + + .. versionadded:: 0.23 + %(verbose)s + + Returns + ------- + frame : instance of CoregistrationUI + The coregistration frame. + + Notes + ----- + Many parameters (e.g., ``head_opacity``) take None as a parameter, + which means that the default will be read from the MNE-Python + configuration file (which gets saved when exiting). + + Step by step instructions for the coregistrations can be accessed as + slides, `for subjects with structural MRI + `_ and `for + subjects for which no MRI is available + `_. + """ + unsupported_params = { + 'tabbed': (tabbed, False), + 'split': (split, True), + 'scrollable': (scrollable, True), + 'head_inside': (head_inside, True), + 'guess_mri_subject': guess_mri_subject, + 'scale': scale, + 'advanced_rendering': advanced_rendering, + } + for key, val in unsupported_params.items(): + if isinstance(val, tuple): + to_raise = val[0] != val[1] + else: + to_raise = val is not None + if to_raise: + warn(f"The parameter {key} is not supported with" + " the pyvistaqt 3d backend. It will be ignored.") + deprecated_params = { + 'project_eeg': project_eeg, + } + for key, val in deprecated_params.items(): + if val is not None: + warn(f'{key} is deprecated and will be removed in 1.1.', + DeprecationWarning) + config = get_config() + if guess_mri_subject is None: + guess_mri_subject = config.get( + 'MNE_COREG_GUESS_MRI_SUBJECT', 'true') == 'true' + if head_high_res is None: + head_high_res = config.get('MNE_COREG_HEAD_HIGH_RES', 'true') == 'true' + if advanced_rendering is None: + advanced_rendering = \ + config.get('MNE_COREG_ADVANCED_RENDERING', 'true') == 'true' + if head_opacity is None: + head_opacity = config.get('MNE_COREG_HEAD_OPACITY', 0.8) + if head_inside is None: + head_inside = \ + config.get('MNE_COREG_HEAD_INSIDE', 'true').lower() == 'true' + if width is None: + width = config.get('MNE_COREG_WINDOW_WIDTH', 800) + if height is None: + height = config.get('MNE_COREG_WINDOW_HEIGHT', 600) + if subjects_dir is None: + if 'SUBJECTS_DIR' in config: + subjects_dir = config['SUBJECTS_DIR'] + elif 'MNE_COREG_SUBJECTS_DIR' in config: + subjects_dir = config['MNE_COREG_SUBJECTS_DIR'] + if orient_to_surface is None: + orient_to_surface = (config.get('MNE_COREG_ORIENT_TO_SURFACE', '') == + 'true') + if scale_by_distance is None: + scale_by_distance = (config.get('MNE_COREG_SCALE_BY_DISTANCE', '') == + 'true') + if interaction is None: + interaction = config.get('MNE_COREG_INTERACTION', 'terrain') + if mark_inside is None: + mark_inside = config.get('MNE_COREG_MARK_INSIDE', '') == 'true' + if scale is None: + scale = config.get('MNE_COREG_SCENE_SCALE', 0.16) + head_opacity = float(head_opacity) + head_inside = bool(head_inside) + width = int(width) + height = int(height) + scale = float(scale) + + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + from ._coreg import CoregistrationUI + show = block = not MNE_3D_BACKEND_TESTING + return CoregistrationUI( + info_file=inst, subject=subject, subjects_dir=subjects_dir, + head_resolution=head_high_res, head_opacity=head_opacity, + orient_glyphs=orient_to_surface, scale_by_distance=scale_by_distance, + mark_inside=mark_inside, trans=trans, size=(width, height), show=show, + block=block, interaction=interaction, verbose=verbose + ) + + +@verbose +def locate_ieeg(info, trans, aligned_ct, subject=None, subjects_dir=None, + groups=None, verbose=None): + """Locate intracranial electrode contacts. + + Parameters + ---------- + %(info_not_none)s + %(trans_not_none)s + aligned_ct : path-like | nibabel.spatialimages.SpatialImage + The CT image that has been aligned to the Freesurfer T1. Path-like + inputs and nibabel image objects are supported. + %(subject)s + %(subjects_dir)s + groups : dict | None + A dictionary with channels as keys and their group index as values. + If None, the groups will be inferred by the channel names. Channel + names must have a format like ``LAMY 7`` where a string prefix + like ``LAMY`` precedes a numeric index like ``7``. If the channels + are formatted improperly, group plotting will work incorrectly. + Group assignments can be adjusted in the GUI. + %(verbose)s + + Returns + ------- + gui : instance of IntracranialElectrodeLocator + The graphical user interface (GUI) window. + """ + from ._ieeg_locate_gui import IntracranialElectrodeLocator + from PyQt5.QtWidgets import QApplication + # get application + app = QApplication.instance() + if app is None: + app = QApplication(["Intracranial Electrode Locator"]) + gui = IntracranialElectrodeLocator( + info, trans, aligned_ct, subject=subject, + subjects_dir=subjects_dir, groups=groups, verbose=verbose) + gui.show() + return gui + + +class _LocateScraper(object): + """Scrape locate_ieeg outputs.""" + + def __repr__(self): + return '' + + def __call__(self, block, block_vars, gallery_conf): + from ._ieeg_locate_gui import IntracranialElectrodeLocator + from sphinx_gallery.scrapers import figure_rst + from PyQt5 import QtGui + for gui in block_vars['example_globals'].values(): + if (isinstance(gui, IntracranialElectrodeLocator) and + not getattr(gui, '_scraped', False) and + gallery_conf['builder_name'] == 'html'): + gui._scraped = True # monkey-patch but it's easy enough + img_fname = next(block_vars['image_path_iterator']) + # gui is QWindow + # https://doc.qt.io/qt-5/qwidget.html#grab + pixmap = gui.grab() + # Now the tricky part: we need to get the 3D renderer, extract + # the image from it, and put it in the correct place in the + # pixmap. The easiest way to do this is actually to save the + # 3D image first, then load it using QPixmap and Qt geometry. + plotter = gui._renderer.plotter + plotter.screenshot(img_fname) + sub_pixmap = QtGui.QPixmap(img_fname) + # https://doc.qt.io/qt-5/qwidget.html#mapTo + # https://doc.qt.io/qt-5/qpainter.html#drawPixmap-1 + QtGui.QPainter(pixmap).drawPixmap( + plotter.mapTo(gui, plotter.rect().topLeft()), + sub_pixmap) + # https://doc.qt.io/qt-5/qpixmap.html#save + pixmap.save(img_fname) + gui._renderer.close() # TODO should be triggered by close... + gui.close() + return figure_rst( + [img_fname], gallery_conf['src_dir'], 'iEEG GUI') + return '' diff --git a/python/libs/mne/gui/_coreg.py b/python/libs/mne/gui/_coreg.py new file mode 100644 index 0000000..f100488 --- /dev/null +++ b/python/libs/mne/gui/_coreg.py @@ -0,0 +1,1848 @@ +from contextlib import contextmanager +from functools import partial +import inspect +import os +import os.path as op +import platform +from pathlib import Path +import time +import queue +import threading +import re + +import numpy as np +from traitlets import observe, HasTraits, Unicode, Bool, Float + +from ..io.constants import FIFF +from ..defaults import DEFAULTS +from ..io import read_info, read_fiducials, write_fiducials, read_raw +from ..io.pick import pick_types +from ..io.open import fiff_open, dir_tree_find +from ..io.meas_info import _empty_info +from ..io._read_raw import supported as raw_supported_types +from ..bem import make_bem_solution, write_bem_solution +from ..coreg import (Coregistration, _is_mri_subject, scale_mri, bem_fname, + _mri_subject_has_bem, fid_fname, _map_fid_name_to_idx) +from ..viz._3d import (_plot_head_surface, _plot_head_fiducials, + _plot_head_shape_points, _plot_mri_fiducials, + _plot_hpi_coils, _plot_sensors, _plot_helmet) +from ..transforms import (read_trans, write_trans, _ensure_trans, _get_trans, + rotation_angles, _get_transforms_to_coord_frame) +from ..utils import (get_subjects_dir, check_fname, _check_fname, fill_doc, + warn, verbose, logger, _validate_type) +from ..surface import _DistanceQuery, _CheckInside +from ..channels import read_dig_fif + + +class _WorkerData(): + def __init__(self, name, params=None): + self._name = name + self._params = params + + +@fill_doc +class CoregistrationUI(HasTraits): + """Class for coregistration assisted by graphical interface. + + Parameters + ---------- + info_file : None | str + The FIFF file with digitizer data for coregistration. + %(subject)s + %(subjects_dir)s + %(fiducials)s + head_resolution : bool + If True, use a high-resolution head surface. Defaults to False. + head_opacity : float + The opacity of the head surface. Defaults to 0.8. + hpi_coils : bool + If True, display the HPI coils. Defaults to True. + head_shape_points : bool + If True, display the head shape points. Defaults to True. + eeg_channels : bool + If True, display the EEG channels. Defaults to True. + orient_glyphs : bool + If True, orient the sensors towards the head surface. Default to False. + scale_by_distance : bool + If True, scale the sensors based on their distance to the head surface. + Defaults to True. + mark_inside : bool + If True, mark the head shape points that are inside the head surface + with a different color. Defaults to True. + sensor_opacity : float + The opacity of the sensors between 0 and 1. Defaults to 1.0. + trans : str + The path to the Head<->MRI transform FIF file ("-trans.fif"). + size : tuple + The dimensions (width, height) of the rendering view. The default is + (800, 600). + bgcolor : tuple | str + The background color as a tuple (red, green, blue) of float + values between 0 and 1 or a valid color name (i.e. 'white' + or 'w'). Defaults to 'grey'. + show : bool + Display the window as soon as it is ready. Defaults to True. + block : bool + If True, start the Qt application event loop. Default to False. + %(interaction_scene)s + Defaults to ``'terrain'``. + + .. versionadded:: 1.0 + %(verbose)s + + Attributes + ---------- + coreg : mne.coreg.Coregistration + The coregistration instance used by the graphical interface. + """ + + _subject = Unicode() + _subjects_dir = Unicode() + _lock_fids = Bool() + _current_fiducial = Unicode() + _info_file = Unicode() + _orient_glyphs = Bool() + _scale_by_distance = Bool() + _mark_inside = Bool() + _hpi_coils = Bool() + _head_shape_points = Bool() + _eeg_channels = Bool() + _head_resolution = Bool() + _head_opacity = Float() + _helmet = Bool() + _grow_hair = Float() + _subject_to = Unicode() + _scale_mode = Unicode() + _icp_fid_match = Unicode() + + @verbose + def __init__(self, info_file, *, subject=None, subjects_dir=None, + fiducials='auto', head_resolution=None, + head_opacity=None, hpi_coils=None, + head_shape_points=None, eeg_channels=None, orient_glyphs=None, + scale_by_distance=None, mark_inside=None, + sensor_opacity=None, trans=None, size=None, bgcolor=None, + show=True, block=False, interaction='terrain', + project_eeg=None, head_transparency=None, standalone=None, + verbose=None): + if standalone is not None: + depr_message = ('standalone is deprecated and will be replaced by ' + 'block in 1.1.') + if block is None: + block = standalone + warn(depr_message, DeprecationWarning) + else: + warn(depr_message + ' Since you passed values for both ' + 'standalone and block, standalone will be ignored.', + DeprecationWarning) + if head_transparency is not None: + depr_message = ('head_transparency is deprecated and will be' + ' replaced by head_opacity in 1.1.') + if head_opacity is None: + head_opacity = 0.8 if head_transparency else 1.0 + warn(depr_message, DeprecationWarning) + else: + warn(depr_message + ' Since you passed values for both ' + 'head_transparency and head_opacity, ' + 'head_transparency will be ignored.', + DeprecationWarning) + if project_eeg is not None: + warn('project_eeg is deprecated and will be removed in 1.1.', + DeprecationWarning) + from ..viz.backends.renderer import _get_renderer + from ..viz.backends._utils import _qt_app_exec + + def _get_default(var, val): + return var if var is not None else val + self._actors = dict() + self._surfaces = dict() + self._widgets = dict() + self._verbose = verbose + self._plot_locked = False + self._params_locked = False + self._refresh_rate_ms = max(int(round(1000. / 60.)), 1) + self._redraws_pending = set() + self._parameter_mutex = threading.Lock() + self._redraw_mutex = threading.Lock() + self._job_queue = queue.Queue() + self._parameter_queue = queue.Queue() + self._head_geo = None + self._check_inside = None + self._nearest = None + self._coord_frame = "mri" + self._mouse_no_mvt = -1 + self._to_cf_t = None + self._omit_hsp_distance = 0.0 + self._fiducials_file = None + self._trans_modified = False + self._mri_fids_modified = False + self._mri_scale_modified = False + self._accept_close_event = True + self._fid_colors = tuple( + DEFAULTS['coreg'][f'{key}_color'] for key in + ('lpa', 'nasion', 'rpa')) + self._defaults = dict( + size=_get_default(size, (800, 600)), + bgcolor=_get_default(bgcolor, "grey"), + orient_glyphs=_get_default(orient_glyphs, True), + scale_by_distance=_get_default(scale_by_distance, True), + mark_inside=_get_default(mark_inside, True), + hpi_coils=_get_default(hpi_coils, True), + head_shape_points=_get_default(head_shape_points, True), + eeg_channels=_get_default(eeg_channels, True), + head_resolution=_get_default(head_resolution, True), + head_opacity=_get_default(head_opacity, 0.8), + helmet=False, + sensor_opacity=_get_default(sensor_opacity, 1.0), + fiducials=("LPA", "Nasion", "RPA"), + fiducial="LPA", + lock_fids=True, + grow_hair=0.0, + subject_to="", + scale_modes=["None", "uniform", "3-axis"], + scale_mode="None", + icp_fid_matches=('nearest', 'matched'), + icp_fid_match='matched', + icp_n_iterations=20, + omit_hsp_distance=10.0, + lock_head_opacity=self._head_opacity < 1.0, + weights=dict( + lpa=1.0, + nasion=10.0, + rpa=1.0, + hsp=1.0, + eeg=1.0, + hpi=1.0, + ), + ) + + # process requirements + info = None + subjects_dir = get_subjects_dir( + subjects_dir=subjects_dir, raise_error=True) + subject = _get_default(subject, self._get_subjects(subjects_dir)[0]) + + # setup the window + splash = 'Initializing coregistration GUI...' if show else False + self._renderer = _get_renderer( + size=self._defaults["size"], bgcolor=self._defaults["bgcolor"], + splash=splash) + self._renderer._window_close_connect(self._clean) + self._renderer._window_close_connect(self._close_callback, after=False) + self._renderer.set_interaction(interaction) + + # coregistration model setup + self._immediate_redraw = (self._renderer._kind != 'qt') + self._info = info + self._fiducials = fiducials + self.coreg = Coregistration( + info=self._info, subject=subject, subjects_dir=subjects_dir, + fiducials=fiducials, + on_defects='ignore' # safe due to interactive visual inspection + ) + fid_accurate = self.coreg._fid_accurate + for fid in self._defaults["weights"].keys(): + setattr(self, f"_{fid}_weight", self._defaults["weights"][fid]) + + # set main traits + self._set_head_opacity(self._defaults["head_opacity"]) + self._old_head_opacity = self._head_opacity + self._set_subjects_dir(subjects_dir) + self._set_subject(subject) + self._set_info_file(info_file) + self._set_orient_glyphs(self._defaults["orient_glyphs"]) + self._set_scale_by_distance(self._defaults["scale_by_distance"]) + self._set_mark_inside(self._defaults["mark_inside"]) + self._set_hpi_coils(self._defaults["hpi_coils"]) + self._set_head_shape_points(self._defaults["head_shape_points"]) + self._set_eeg_channels(self._defaults["eeg_channels"]) + self._set_head_resolution(self._defaults["head_resolution"]) + self._set_helmet(self._defaults["helmet"]) + self._set_grow_hair(self._defaults["grow_hair"]) + self._set_omit_hsp_distance(self._defaults["omit_hsp_distance"]) + self._set_icp_n_iterations(self._defaults["icp_n_iterations"]) + self._set_icp_fid_match(self._defaults["icp_fid_match"]) + + # configure UI + self._reset_fitting_parameters() + self._configure_dialogs() + self._configure_status_bar() + self._configure_dock() + self._configure_picking() + + # once the docks are initialized + self._set_current_fiducial(self._defaults["fiducial"]) + self._set_scale_mode(self._defaults["scale_mode"]) + self._set_subject_to(self._defaults["subject_to"]) + if trans is not None: + self._load_trans(trans) + self._redraw() # we need the elements to be present now + + if fid_accurate: + assert self.coreg._fid_filename is not None + # _set_fiducials_file() calls _update_fiducials_label() + # internally + self._set_fiducials_file(self.coreg._fid_filename) + else: + self._set_head_resolution('high') + self._forward_widget_command('high_res_head', "set_value", True) + self._set_lock_fids(True) # hack to make the dig disappear + self._update_fiducials_label() + self._update_fiducials() + + self._set_lock_fids(fid_accurate) + + # configure worker + self._configure_worker() + + # must be done last + if show: + self._renderer.show() + # update the view once shown + views = {True: dict(azimuth=90, elevation=90), # front + False: dict(azimuth=180, elevation=90)} # left + self._renderer.set_camera(distance=None, **views[self._lock_fids]) + self._redraw() + # XXX: internal plotter/renderer should not be exposed + if not self._immediate_redraw: + self._renderer.plotter.add_callback( + self._redraw, self._refresh_rate_ms) + self._renderer.plotter.show_axes() + # initialization does not count as modification by the user + self._trans_modified = False + self._mri_fids_modified = False + self._mri_scale_modified = False + if block and self._renderer._kind != 'notebook': + _qt_app_exec(self._renderer.figure.store["app"]) + + def _set_subjects_dir(self, subjects_dir): + self._subjects_dir = _check_fname( + subjects_dir, overwrite='read', must_exist=True, need_dir=True) + + def _set_subject(self, subject): + self._subject = subject + + def _set_lock_fids(self, state): + self._lock_fids = bool(state) + + def _set_fiducials_file(self, fname): + if fname is None: + fids = 'auto' + else: + fname = _check_fname( + fname, overwrite='read', must_exist=True, need_dir=False + ) + fids, _ = read_fiducials(fname) + + self._fiducials_file = fname + self.coreg._setup_fiducials(fids) + self._update_distance_estimation() + self._update_fiducials_label() + self._update_fiducials() + self._reset(keep_trans=True) + + if fname is None: + self._set_lock_fids(False) + self._forward_widget_command( + 'reload_mri_fids', 'set_enabled', False + ) + else: + self._set_lock_fids(True) + self._forward_widget_command( + 'reload_mri_fids', 'set_enabled', True + ) + self._display_message( + f"Loading MRI fiducials from {fname}... Done!" + ) + + def _set_current_fiducial(self, fid): + self._current_fiducial = fid.lower() + + def _set_info_file(self, fname): + if fname is None: + return + + # info file can be anything supported by read_raw + try: + check_fname(fname, 'info', tuple(raw_supported_types.keys()), + endings_err=tuple(raw_supported_types.keys())) + except IOError as e: + warn(e) + self._widgets["info_file"].set_value(0, '') + return + + fname = _check_fname(fname, overwrite='read') # convert to str + + # ctf ds `files` are actually directories + if fname.endswith(('.ds',)): + self._info_file = _check_fname( + fname, overwrite='read', must_exist=True, need_dir=True) + else: + self._info_file = _check_fname( + fname, overwrite='read', must_exist=True, need_dir=False) + + def _set_omit_hsp_distance(self, distance): + self._omit_hsp_distance = distance + + def _set_orient_glyphs(self, state): + self._orient_glyphs = bool(state) + + def _set_scale_by_distance(self, state): + self._scale_by_distance = bool(state) + + def _set_mark_inside(self, state): + self._mark_inside = bool(state) + + def _set_hpi_coils(self, state): + self._hpi_coils = bool(state) + + def _set_head_shape_points(self, state): + self._head_shape_points = bool(state) + + def _set_eeg_channels(self, state): + self._eeg_channels = bool(state) + + def _set_head_resolution(self, state): + self._head_resolution = bool(state) + + def _set_head_opacity(self, value): + self._head_opacity = value + + def _set_helmet(self, state): + self._helmet = bool(state) + + def _set_grow_hair(self, value): + self._grow_hair = value + + def _set_subject_to(self, value): + style = dict() + self._subject_to = value + self._forward_widget_command( + "save_subject", "set_enabled", len(value) > 0) + if self._check_subject_exists(): + style["border"] = "2px solid #ff0000" + else: + style["border"] = "initial" + self._forward_widget_command( + "subject_to", "set_style", style) + + def _set_scale_mode(self, mode): + self._scale_mode = mode + + def _set_fiducial(self, value, coord): + self._mri_fids_modified = True + fid = self._current_fiducial + fid_idx = _map_fid_name_to_idx(name=fid) + + coords = ["X", "Y", "Z"] + coord_idx = coords.index(coord) + + self.coreg.fiducials.dig[fid_idx]['r'][coord_idx] = value / 1e3 + self._update_plot("mri_fids") + + def _set_parameter(self, value, mode_name, coord): + if mode_name == "scale": + self._mri_scale_modified = True + else: + self._trans_modified = True + if self._params_locked: + return + if mode_name == "scale" and self._scale_mode == "uniform": + with self._lock(params=True): + self._forward_widget_command( + ["sY", "sZ"], "set_value", value) + with self._parameter_mutex: + self. _set_parameter_safe(value, mode_name, coord) + self._update_plot("sensors") + + def _set_parameter_safe(self, value, mode_name, coord): + params = dict( + rotation=self.coreg._rotation, + translation=self.coreg._translation, + scale=self.coreg._scale, + ) + idx = ["X", "Y", "Z"].index(coord) + if mode_name == "rotation": + params[mode_name][idx] = np.deg2rad(value) + elif mode_name == "translation": + params[mode_name][idx] = value / 1e3 + else: + assert mode_name == "scale" + if self._scale_mode == "uniform": + params[mode_name][:] = value / 1e2 + else: + params[mode_name][idx] = value / 1e2 + self._update_plot("head") + self.coreg._update_params( + rot=params["rotation"], + tra=params["translation"], + sca=params["scale"], + ) + + def _set_icp_n_iterations(self, n_iterations): + self._icp_n_iterations = n_iterations + + def _set_icp_fid_match(self, method): + self._icp_fid_match = method + + def _set_point_weight(self, weight, point): + funcs = { + 'hpi': '_set_hpi_coils', + 'hsp': '_set_head_shape_points', + 'eeg': '_set_eeg_channels', + } + if point in funcs.keys(): + getattr(self, funcs[point])(weight > 0) + setattr(self, f"_{point}_weight", weight) + setattr(self.coreg, f"_{point}_weight", weight) + self._update_distance_estimation() + + @observe("_subjects_dir") + def _subjects_dir_changed(self, change=None): + # XXX: add coreg.set_subjects_dir + self.coreg._subjects_dir = self._subjects_dir + subjects = self._get_subjects() + + if self._subject not in subjects: # Just pick the first available one + self._subject = subjects[0] + + self._reset() + + @observe("_subject") + def _subject_changed(self, change=None): + # XXX: add coreg.set_subject() + self.coreg._subject = self._subject + self.coreg._setup_bem() + self.coreg._setup_fiducials(self._fiducials) + self._reset() + + default_fid_fname = fid_fname.format( + subjects_dir=self._subjects_dir, subject=self._subject + ) + if Path(default_fid_fname).exists(): + fname = default_fid_fname + else: + fname = None + + self._set_fiducials_file(fname) + self._reset_fiducials() + + @observe("_lock_fids") + def _lock_fids_changed(self, change=None): + locked_widgets = [ + # MRI fiducials + "save_mri_fids", + # View options + "helmet", "head_opacity", "high_res_head", + # Digitization source + "info_file", "grow_hair", "omit_distance", "omit", "reset_omit", + # Scaling + "scaling_mode", "sX", "sY", "sZ", + # Transformation + "tX", "tY", "tZ", + "rX", "rY", "rZ", + # Fitting buttons + "fit_fiducials", "fit_icp", + # Transformation I/O + "save_trans", "load_trans", + "reset_trans", + # ICP + "icp_n_iterations", "icp_fid_match", "reset_fitting_options", + # Weights + "hsp_weight", "eeg_weight", "hpi_weight", + "lpa_weight", "nasion_weight", "rpa_weight", + ] + fits_widgets = ["fits_fiducials", "fits_icp"] + fid_widgets = ["fid_X", "fid_Y", "fid_Z", "fids_file", "fids"] + if self._lock_fids: + self._forward_widget_command(locked_widgets, "set_enabled", True) + self._forward_widget_command( + 'head_opacity', 'set_value', self._old_head_opacity + ) + self._scale_mode_changed() + self._display_message() + self._update_distance_estimation() + else: + self._old_head_opacity = self._head_opacity + self._forward_widget_command( + 'head_opacity', 'set_value', 1.0 + ) + self._forward_widget_command(locked_widgets, "set_enabled", False) + self._forward_widget_command(fits_widgets, "set_enabled", False) + self._display_message("Placing MRI fiducials - " + f"{self._current_fiducial.upper()}") + + self._set_sensors_visibility(self._lock_fids) + self._forward_widget_command("lock_fids", "set_value", self._lock_fids) + self._forward_widget_command(fid_widgets, "set_enabled", + not self._lock_fids) + + @observe("_current_fiducial") + def _current_fiducial_changed(self, change=None): + self._update_fiducials() + self._follow_fiducial_view() + if not self._lock_fids: + self._display_message("Placing MRI fiducials - " + f"{self._current_fiducial.upper()}") + + @observe("_info_file") + def _info_file_changed(self, change=None): + if not self._info_file: + return + elif self._info_file.endswith(('.fif', '.fif.gz')): + fid, tree, _ = fiff_open(self._info_file) + fid.close() + if len(dir_tree_find(tree, FIFF.FIFFB_MEAS_INFO)) > 0: + self._info = read_info(self._info_file, verbose=False) + elif len(dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)) > 0: + self._info = _empty_info(1) + self._info['dig'] = read_dig_fif(fname=self._info_file).dig + self._info._unlocked = False + else: + self._info = read_raw(self._info_file).info + # XXX: add coreg.set_info() + self.coreg._info = self._info + self.coreg._setup_digs() + self._reset() + + @observe("_orient_glyphs") + def _orient_glyphs_changed(self, change=None): + self._update_plot(["hpi", "hsp", "eeg"]) + + @observe("_scale_by_distance") + def _scale_by_distance_changed(self, change=None): + self._update_plot(["hpi", "hsp", "eeg"]) + + @observe("_mark_inside") + def _mark_inside_changed(self, change=None): + self._update_plot("hsp") + + @observe("_hpi_coils") + def _hpi_coils_changed(self, change=None): + self._update_plot("hpi") + + @observe("_head_shape_points") + def _head_shape_point_changed(self, change=None): + self._update_plot("hsp") + + @observe("_eeg_channels") + def _eeg_channels_changed(self, change=None): + self._update_plot("eeg") + + @observe("_head_resolution") + def _head_resolution_changed(self, change=None): + self._update_plot(["head", "hsp"]) + + @observe("_head_opacity") + def _head_opacity_changed(self, change=None): + if "head" in self._actors: + self._actors["head"].GetProperty().SetOpacity(self._head_opacity) + self._renderer._update() + + @observe("_helmet") + def _helmet_changed(self, change=None): + self._update_plot("helmet") + + @observe("_grow_hair") + def _grow_hair_changed(self, change=None): + self.coreg.set_grow_hair(self._grow_hair) + self._update_plot("head") + self._update_plot("hsp") # inside/outside could change + + @observe("_scale_mode") + def _scale_mode_changed(self, change=None): + locked_widgets = ["sX", "sY", "sZ", "fits_icp", "subject_to"] + mode = None if self._scale_mode == "None" else self._scale_mode + self.coreg.set_scale_mode(mode) + if self._lock_fids: + self._forward_widget_command(locked_widgets, "set_enabled", + mode is not None) + self._forward_widget_command("fits_fiducials", "set_enabled", + mode not in (None, "3-axis")) + if self._scale_mode == "uniform": + self._forward_widget_command(["sY", "sZ"], "set_enabled", False) + + @observe("_icp_fid_match") + def _icp_fid_match_changed(self, change=None): + self.coreg.set_fid_match(self._icp_fid_match) + + def _run_worker(self, queue, jobs): + while True: + data = queue.get() + func = jobs[data._name] + if data._params is not None: + func(**data._params) + else: + func() + queue.task_done() + + def _configure_dialogs(self): + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + for name, buttons in zip( + ["overwrite_subject", "overwrite_subject_exit"], + [["Yes", "No"], ["Yes", "Discard", "Cancel"]]): + self._widgets[name] = self._renderer._dialog_warning( + title="CoregistrationUI", + text="The name of the output subject used to " + "save the scaled anatomy already exists.", + info_text="Do you want to overwrite?", + callback=self._overwrite_subject_callback, + buttons=buttons, + modal=not MNE_3D_BACKEND_TESTING, + ) + + def _configure_worker(self): + work_plan = { + "_job_queue": dict(save_subject=self._save_subject), + "_parameter_queue": dict(set_parameter=self._set_parameter), + } + for queue_name, jobs in work_plan.items(): + t = threading.Thread(target=partial( + self._run_worker, + queue=getattr(self, queue_name), + jobs=jobs, + )) + t.daemon = True + t.start() + + def _configure_picking(self): + self._renderer._update_picking_callback( + self._on_mouse_move, + self._on_button_press, + self._on_button_release, + self._on_pick + ) + + @verbose + def _redraw(self, verbose=None): + if not self._redraws_pending: + return + draw_map = dict( + head=self._add_head_surface, + mri_fids=self._add_mri_fiducials, + hsp=self._add_head_shape_points, + hpi=self._add_hpi_coils, + eeg=self._add_eeg_channels, + head_fids=self._add_head_fiducials, + helmet=self._add_helmet, + ) + with self._redraw_mutex: + # We need at least "head" before "hsp", because the grow_hair param + # for head sets the rr that are used for inside/outside hsp + redraws_ordered = sorted( + self._redraws_pending, + key=lambda key: list(draw_map).index(key)) + logger.debug(f'Redrawing {redraws_ordered}') + for ki, key in enumerate(redraws_ordered): + logger.debug(f'{ki}. Drawing {repr(key)}') + draw_map[key]() + self._redraws_pending.clear() + self._renderer._update() + # necessary for MacOS + if platform.system() == 'Darwin': + self._renderer._process_events() + + def _on_mouse_move(self, vtk_picker, event): + if self._mouse_no_mvt: + self._mouse_no_mvt -= 1 + + def _on_button_press(self, vtk_picker, event): + self._mouse_no_mvt = 2 + + def _on_button_release(self, vtk_picker, event): + if self._mouse_no_mvt > 0: + x, y = vtk_picker.GetEventPosition() + # XXX: internal plotter/renderer should not be exposed + plotter = self._renderer.figure.plotter + picked_renderer = self._renderer.figure.plotter.renderer + # trigger the pick + plotter.picker.Pick(x, y, 0, picked_renderer) + self._mouse_no_mvt = 0 + + def _on_pick(self, vtk_picker, event): + if self._lock_fids: + return + # XXX: taken from Brain, can be refactored + cell_id = vtk_picker.GetCellId() + mesh = vtk_picker.GetDataSet() + if mesh is None or cell_id == -1 or not self._mouse_no_mvt: + return + if not getattr(mesh, "_picking_target", False): + return + pos = np.array(vtk_picker.GetPickPosition()) + vtk_cell = mesh.GetCell(cell_id) + cell = [vtk_cell.GetPointId(point_id) for point_id + in range(vtk_cell.GetNumberOfPoints())] + vertices = mesh.points[cell] + idx = np.argmin(abs(vertices - pos), axis=0) + vertex_id = cell[idx[0]] + + fiducials = [s.lower() for s in self._defaults["fiducials"]] + idx = fiducials.index(self._current_fiducial.lower()) + # XXX: add coreg.set_fids + self.coreg._fid_points[idx] = self._surfaces["head"].points[vertex_id] + self.coreg._reset_fiducials() + self._update_fiducials() + self._update_plot("mri_fids") + + def _reset_fitting_parameters(self): + self._forward_widget_command("icp_n_iterations", "set_value", + self._defaults["icp_n_iterations"]) + self._forward_widget_command("icp_fid_match", "set_value", + self._defaults["icp_fid_match"]) + weights_widgets = [f"{w}_weight" + for w in self._defaults["weights"].keys()] + self._forward_widget_command(weights_widgets, "set_value", + list(self._defaults["weights"].values())) + + def _reset_fiducials(self): + self._set_current_fiducial(self._defaults["fiducial"]) + + def _omit_hsp(self): + self.coreg.omit_head_shape_points(self._omit_hsp_distance / 1e3) + n_omitted = np.sum(~self.coreg._extra_points_filter) + n_remaining = len(self.coreg._dig_dict['hsp']) - n_omitted + self._update_plot("hsp") + self._update_distance_estimation() + self._display_message( + f"{n_omitted} head shape points omitted, " + f"{n_remaining} remaining.") + + def _reset_omit_hsp_filter(self): + self.coreg._extra_points_filter = None + self.coreg._update_params(force_update=True) + self._update_plot("hsp") + self._update_distance_estimation() + n_total = len(self.coreg._dig_dict['hsp']) + self._display_message( + f"No head shape point is omitted, the total is {n_total}.") + + @verbose + def _update_plot(self, changes="all", verbose=None): + # Update list of things that need to be updated/plotted (and maybe + # draw them immediately) + try: + fun_name = inspect.currentframe().f_back.f_back.f_code.co_name + except Exception: # just in case one of these attrs is missing + fun_name = 'unknown' + logger.debug( + f'Updating plots based on {fun_name}: {repr(changes)}') + if self._plot_locked: + return + if self._info is None: + changes = ["head", "mri_fids"] + self._to_cf_t = dict(mri=dict(trans=np.eye(4)), head=None) + else: + self._to_cf_t = _get_transforms_to_coord_frame( + self._info, self.coreg.trans, coord_frame=self._coord_frame) + all_keys = ( + 'head', 'mri_fids', # MRI first + 'hsp', 'hpi', 'eeg', 'head_fids', # then dig + 'helmet', + ) + if changes == 'all': + changes = list(all_keys) + elif changes == 'sensors': + changes = all_keys[2:] # omit MRI ones + elif isinstance(changes, str): + changes = [changes] + changes = set(changes) + # ideally we would maybe have this in: + # with self._redraw_mutex: + # it would reduce "jerkiness" of the updates, but this should at least + # work okay + bad = changes.difference(set(all_keys)) + assert len(bad) == 0, f'Unknown changes: {bad}' + self._redraws_pending.update(changes) + if self._immediate_redraw: + self._redraw() + + @contextmanager + def _lock(self, plot=False, params=False, scale_mode=False, fitting=False): + """Which part of the UI to temporarily disable.""" + if plot: + old_plot_locked = self._plot_locked + self._plot_locked = True + if params: + old_params_locked = self._params_locked + self._params_locked = True + if scale_mode: + old_scale_mode = self.coreg._scale_mode + self.coreg._scale_mode = None + if fitting: + widgets = [ + "sX", "sY", "sZ", + "tX", "tY", "tZ", + "rX", "rY", "rZ", + "fit_icp", "fit_fiducials", "fits_icp", "fits_fiducials" + ] + states = [ + self._forward_widget_command( + w, "is_enabled", None, + input_value=False, output_value=True) + for w in widgets + ] + self._forward_widget_command(widgets, "set_enabled", False) + try: + yield + finally: + if plot: + self._plot_locked = old_plot_locked + if params: + self._params_locked = old_params_locked + if scale_mode: + self.coreg._scale_mode = old_scale_mode + if fitting: + for idx, w in enumerate(widgets): + self._forward_widget_command(w, "set_enabled", states[idx]) + + def _display_message(self, msg=""): + self._forward_widget_command('status_message', 'set_value', msg) + self._forward_widget_command( + 'status_message', 'show', None, input_value=False + ) + self._forward_widget_command( + 'status_message', 'update', None, input_value=False + ) + if msg: + logger.info(msg) + + def _follow_fiducial_view(self): + fid = self._current_fiducial.lower() + view = dict(lpa='left', rpa='right', nasion='front') + kwargs = dict(front=(90., 90.), left=(180, 90), right=(0., 90)) + kwargs = dict(zip(('azimuth', 'elevation'), kwargs[view[fid]])) + if not self._lock_fids: + self._renderer.set_camera(distance=None, **kwargs) + + def _update_fiducials(self): + fid = self._current_fiducial + if not fid: + return + + idx = _map_fid_name_to_idx(name=fid) + val = self.coreg.fiducials.dig[idx]['r'] * 1e3 + + with self._lock(plot=True): + self._forward_widget_command( + ["fid_X", "fid_Y", "fid_Z"], "set_value", val) + + def _update_distance_estimation(self): + value = self.coreg._get_fiducials_distance_str() + '\n' + \ + self.coreg._get_point_distance_str() + dists = self.coreg.compute_dig_mri_distances() * 1e3 + if self._hsp_weight > 0: + value += "\nHSP <-> MRI (mean/min/max): "\ + f"{np.mean(dists):.2f} "\ + f"/ {np.min(dists):.2f} / {np.max(dists):.2f} mm" + self._forward_widget_command("fit_label", "set_value", value) + + def _update_parameters(self): + with self._lock(plot=True, params=True): + # rotation + deg = np.rad2deg(self.coreg._rotation) + logger.debug(f' Rotation: {deg}') + self._forward_widget_command(["rX", "rY", "rZ"], "set_value", deg) + # translation + mm = self.coreg._translation * 1e3 + logger.debug(f' Translation: {mm}') + self._forward_widget_command(["tX", "tY", "tZ"], "set_value", mm) + # scale + sc = self.coreg._scale * 1e2 + logger.debug(f' Scale: {sc}') + self._forward_widget_command(["sX", "sY", "sZ"], "set_value", sc) + + def _reset(self, keep_trans=False): + """Refresh the scene, and optionally reset transformation & scaling. + + Parameters + ---------- + keep_trans : bool + Whether to retain translation, rotation, and scaling; or reset them + to their default values (no translation, no rotation, no scaling). + """ + if not keep_trans: + self.coreg.set_scale(self.coreg._default_parameters[6:9]) + self.coreg.set_rotation(self.coreg._default_parameters[:3]) + self.coreg.set_translation(self.coreg._default_parameters[3:6]) + self._update_plot() + self._update_parameters() + self._update_distance_estimation() + + def _forward_widget_command(self, names, command, value, + input_value=True, output_value=False): + """Invoke a method of one or more widgets if the widgets exist. + + Parameters + ---------- + names : str | array-like of str + The widget names to operate on. + command : str + The method to invoke. + value : object | array-like + The value(s) to pass to the method. + input_value : bool + Whether the ``command`` accepts a ``value``. If ``False``, no + ``value`` will be passed to ``command``. + output_value : bool + Whether to return the return value of ``command``. + + Returns + ------- + ret : object | None + ``None`` if ``output_value`` is ``False``, and the return value of + ``command`` otherwise. + """ + _validate_type( + item=names, + types=(str, list), + item_name='names' + ) + if isinstance(names, str): + names = [names] + + if not isinstance(value, (str, float, int, dict, type(None))): + value = list(value) + assert len(names) == len(value) + + for idx, name in enumerate(names): + val = value[idx] if isinstance(value, list) else value + if name in self._widgets: + if input_value: + ret = getattr(self._widgets[name], command)(val) + else: + ret = getattr(self._widgets[name], command)() + if output_value: + return ret + + def _set_sensors_visibility(self, state): + sensors = ["head_fiducials", "hpi_coils", "head_shape_points", + "eeg_channels"] + for sensor in sensors: + if sensor in self._actors and self._actors[sensor] is not None: + actors = self._actors[sensor] + actors = actors if isinstance(actors, list) else [actors] + for actor in actors: + actor.SetVisibility(state) + self._renderer._update() + + def _update_actor(self, actor_name, actor): + # XXX: internal plotter/renderer should not be exposed + self._renderer.plotter.remove_actor(self._actors.get(actor_name), + render=False) + self._actors[actor_name] = actor + + def _add_mri_fiducials(self): + mri_fids_actors = _plot_mri_fiducials( + self._renderer, self.coreg._fid_points, self._subjects_dir, + self._subject, self._to_cf_t, self._fid_colors) + # disable picking on the markers + for actor in mri_fids_actors: + actor.SetPickable(False) + self._update_actor("mri_fiducials", mri_fids_actors) + + def _add_head_fiducials(self): + head_fids_actors = _plot_head_fiducials( + self._renderer, self._info, self._to_cf_t, self._fid_colors) + self._update_actor("head_fiducials", head_fids_actors) + + def _add_hpi_coils(self): + if self._hpi_coils: + hpi_actors = _plot_hpi_coils( + self._renderer, self._info, self._to_cf_t, + opacity=self._defaults["sensor_opacity"], + scale=DEFAULTS["coreg"]["extra_scale"], + orient_glyphs=self._orient_glyphs, + scale_by_distance=self._scale_by_distance, + surf=self._head_geo, check_inside=self._check_inside, + nearest=self._nearest) + else: + hpi_actors = None + self._update_actor("hpi_coils", hpi_actors) + + def _add_head_shape_points(self): + if self._head_shape_points: + hsp_actors = _plot_head_shape_points( + self._renderer, self._info, self._to_cf_t, + opacity=self._defaults["sensor_opacity"], + orient_glyphs=self._orient_glyphs, + scale_by_distance=self._scale_by_distance, + mark_inside=self._mark_inside, surf=self._head_geo, + mask=self.coreg._extra_points_filter, + check_inside=self._check_inside, nearest=self._nearest) + else: + hsp_actors = None + self._update_actor("head_shape_points", hsp_actors) + + def _add_eeg_channels(self): + if self._eeg_channels: + eeg = ["original"] + picks = pick_types(self._info, eeg=(len(eeg) > 0), fnirs=True) + if len(picks) > 0: + actors = _plot_sensors( + self._renderer, self._info, self._to_cf_t, picks, + meg=False, eeg=eeg, fnirs=["sources", "detectors"], + warn_meg=False, head_surf=self._head_geo, units='m', + sensor_opacity=self._defaults["sensor_opacity"], + orient_glyphs=self._orient_glyphs, + scale_by_distance=self._scale_by_distance, + surf=self._head_geo, check_inside=self._check_inside, + nearest=self._nearest) + sens_actors = actors["eeg"] + sens_actors.extend(actors["fnirs"]) + else: + sens_actors = None + else: + sens_actors = None + self._update_actor("eeg_channels", sens_actors) + + def _add_head_surface(self): + bem = None + if self._head_resolution: + surface = 'head-dense' + key = 'high' + else: + surface = 'head' + key = 'low' + try: + head_actor, head_surf, _ = _plot_head_surface( + self._renderer, surface, self._subject, + self._subjects_dir, bem, self._coord_frame, self._to_cf_t, + alpha=self._head_opacity) + except IOError: + head_actor, head_surf, _ = _plot_head_surface( + self._renderer, "head", self._subject, self._subjects_dir, + bem, self._coord_frame, self._to_cf_t, + alpha=self._head_opacity) + key = 'low' + self._update_actor("head", head_actor) + # mark head surface mesh to restrict picking + head_surf._picking_target = True + # We need to use _get_processed_mri_points to incorporate grow_hair + rr = self.coreg._get_processed_mri_points(key) * self.coreg._scale.T + head_surf.points = rr + head_surf.compute_normals() + self._surfaces["head"] = head_surf + tris = self._surfaces["head"].faces.reshape(-1, 4)[:, 1:] + assert tris.ndim == 2 and tris.shape[1] == 3, tris.shape + nn = self._surfaces["head"].point_normals + assert nn.shape == (len(rr), 3), nn.shape + self._head_geo = dict(rr=rr, tris=tris, nn=nn) + self._check_inside = _CheckInside(head_surf, mode='pyvista') + self._nearest = _DistanceQuery(rr) + + def _add_helmet(self): + if self._helmet: + head_mri_t = _get_trans(self.coreg.trans, 'head', 'mri')[0] + helmet_actor, _, _ = _plot_helmet( + self._renderer, self._info, self._to_cf_t, head_mri_t, + self._coord_frame) + else: + helmet_actor = None + self._update_actor("helmet", helmet_actor) + + def _fit_fiducials(self): + with self._lock(scale_mode=True): + self._fits_fiducials() + + def _fits_fiducials(self): + with self._lock(params=True, fitting=True): + start = time.time() + self.coreg.fit_fiducials( + lpa_weight=self._lpa_weight, + nasion_weight=self._nasion_weight, + rpa_weight=self._rpa_weight, + verbose=self._verbose, + ) + end = time.time() + self._display_message( + f"Fitting fiducials finished in {end - start:.2f} seconds.") + self._update_plot("sensors") + self._update_parameters() + self._update_distance_estimation() + + def _fit_icp(self): + with self._lock(scale_mode=True): + self._fit_icp_real(update_head=False) + + def _fits_icp(self): + self._fit_icp_real(update_head=True) + + def _fit_icp_real(self, *, update_head): + with self._lock(params=True, fitting=True): + self._current_icp_iterations = 0 + updates = ['hsp', 'hpi', 'eeg', 'head_fids'] + if update_head: + updates.insert(0, 'head') + + def callback(iteration, n_iterations): + self._display_message( + f"Fitting ICP - iteration {iteration + 1}") + self._update_plot(updates) + self._current_icp_iterations += 1 + self._update_distance_estimation() + self._update_parameters() + self._renderer._process_events() # allow a draw or cancel + + start = time.time() + self.coreg.fit_icp( + n_iterations=self._icp_n_iterations, + lpa_weight=self._lpa_weight, + nasion_weight=self._nasion_weight, + rpa_weight=self._rpa_weight, + callback=callback, + verbose=self._verbose, + ) + end = time.time() + self._display_message() + self._display_message( + f"Fitting ICP finished in {end - start:.2f} seconds and " + f"{self._current_icp_iterations} iterations.") + del self._current_icp_iterations + + def _task_save_subject(self): + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + if MNE_3D_BACKEND_TESTING: + self._save_subject() + else: + self._job_queue.put(_WorkerData("save_subject", None)) + + def _task_set_parameter(self, value, mode_name, coord): + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + if MNE_3D_BACKEND_TESTING: + self._set_parameter(value, mode_name, coord) + else: + self._parameter_queue.put(_WorkerData("set_parameter", dict( + value=value, mode_name=mode_name, coord=coord))) + + def _overwrite_subject_callback(self, button_name): + if button_name == "Yes": + self._save_subject_callback(overwrite=True) + elif button_name == "Cancel": + self._accept_close_event = False + else: + assert button_name == "No" or button_name == "Discard" + + def _check_subject_exists(self): + if not self._subject_to: + return False + subject_dirname = os.path.join('{subjects_dir}', '{subject}') + dest = subject_dirname.format(subject=self._subject_to, + subjects_dir=self._subjects_dir) + return os.path.exists(dest) + + def _save_subject(self, exit_mode=False): + dialog = "overwrite_subject_exit" if exit_mode else "overwrite_subject" + if self._check_subject_exists(): + self._forward_widget_command(dialog, "show", True) + else: + self._save_subject_callback() + + def _save_subject_callback(self, overwrite=False): + self._display_message(f"Saving {self._subject_to}...") + default_cursor = self._renderer._window_get_cursor() + self._renderer._window_set_cursor( + self._renderer._window_new_cursor("WaitCursor")) + + # prepare bem + bem_names = [] + if self._scale_mode != "None": + can_prepare_bem = _mri_subject_has_bem( + self._subject, self._subjects_dir) + else: + can_prepare_bem = False + if can_prepare_bem: + pattern = bem_fname.format(subjects_dir=self._subjects_dir, + subject=self._subject, + name='(.+-bem)') + bem_dir, pattern = os.path.split(pattern) + for filename in os.listdir(bem_dir): + match = re.match(pattern, filename) + if match: + bem_names.append(match.group(1)) + + # save the scaled MRI + try: + self._display_message(f"Scaling {self._subject_to}...") + scale_mri( + subject_from=self._subject, subject_to=self._subject_to, + scale=self.coreg._scale, overwrite=overwrite, + subjects_dir=self._subjects_dir, skip_fiducials=True, + labels=True, annot=True, on_defects='ignore' + ) + except Exception: + logger.error(f"Error scaling {self._subject_to}") + bem_names = [] + else: + self._display_message(f"Scaling {self._subject_to}... Done!") + + # Precompute BEM solutions + for bem_name in bem_names: + try: + self._display_message(f"Computing {bem_name} solution...") + bem_file = bem_fname.format(subjects_dir=self._subjects_dir, + subject=self._subject_to, + name=bem_name) + bemsol = make_bem_solution(bem_file) + write_bem_solution(bem_file[:-4] + '-sol.fif', bemsol) + except Exception: + logger.error(f"Error computing {bem_name} solution") + else: + self._display_message(f"Computing {bem_name} solution..." + " Done!") + self._display_message(f"Saving {self._subject_to}... Done!") + self._renderer._window_set_cursor(default_cursor) + self._mri_scale_modified = False + + def _save_mri_fiducials(self, fname): + self._display_message(f"Saving {fname}...") + dig_montage = self.coreg.fiducials + write_fiducials( + fname=fname, pts=dig_montage.dig, coord_frame='mri', overwrite=True + ) + self._set_fiducials_file(fname) + self._display_message(f"Saving {fname}... Done!") + self._mri_fids_modified = False + + def _save_trans(self, fname): + write_trans(fname, self.coreg.trans, overwrite=True) + self._display_message( + f"{fname} transform file is saved.") + self._trans_modified = False + + def _load_trans(self, fname): + mri_head_t = _ensure_trans(read_trans(fname, return_all=True), + 'mri', 'head')['trans'] + rot_x, rot_y, rot_z = rotation_angles(mri_head_t) + x, y, z = mri_head_t[:3, 3] + self.coreg._update_params( + rot=np.array([rot_x, rot_y, rot_z]), + tra=np.array([x, y, z]), + ) + self._update_parameters() + self._update_distance_estimation() + self._display_message( + f"{fname} transform file is loaded.") + + def _get_subjects(self, sdir=None): + # XXX: would be nice to move this function to util + sdir = sdir if sdir is not None else self._subjects_dir + is_dir = sdir and op.isdir(sdir) + if is_dir: + dir_content = os.listdir(sdir) + subjects = [s for s in dir_content if _is_mri_subject(s, sdir)] + if len(subjects) == 0: + subjects.append('') + else: + subjects = [''] + return sorted(subjects) + + def _update_fiducials_label(self): + if self._fiducials_file is None: + text = ( + '

No custom MRI fiducials loaded!

' + '

MRI fiducials could not be found in the standard ' + 'location. The displayed initial MRI fiducial locations ' + '(diamonds) were derived from fsaverage. Place, lock, and ' + 'save fiducials to discard this message.

' + ) + else: + assert self._fiducials_file == fid_fname.format( + subjects_dir=self._subjects_dir, subject=self._subject + ) + assert self.coreg._fid_accurate is True + text = ( + f'

MRI fiducials (diamonds) loaded from ' + f'standard location:

' + f'

{self._fiducials_file}

' + ) + + self._forward_widget_command( + 'mri_fiducials_label', 'set_value', text + ) + + def _configure_dock(self): + if self._renderer._kind == 'notebook': + collapse = True # collapsible and collapsed + else: + collapse = None # not collapsible + self._renderer._dock_initialize( + name="Input", area="left", max_width="350px" + ) + mri_subject_layout = self._renderer._dock_add_group_box( + name="MRI Subject", + collapse=collapse, + ) + self._widgets["subjects_dir"] = self._renderer._dock_add_file_button( + name="subjects_dir", + desc="Load", + func=self._set_subjects_dir, + value=self._subjects_dir, + placeholder="Subjects Directory", + is_directory=True, + tooltip="Load the path to the directory containing the " + "FreeSurfer subjects", + layout=mri_subject_layout, + ) + self._widgets["subject"] = self._renderer._dock_add_combo_box( + name="Subject", + value=self._subject, + rng=self._get_subjects(), + callback=self._set_subject, + compact=True, + tooltip="Select the FreeSurfer subject name", + layout=mri_subject_layout, + ) + + mri_fiducials_layout = self._renderer._dock_add_group_box( + name="MRI Fiducials", + collapse=collapse, + ) + # Add MRI fiducials I/O widgets + self._widgets['mri_fiducials_label'] = self._renderer._dock_add_label( + value='', # Will be filled via _update_fiducials_label() + layout=mri_fiducials_layout, + selectable=True + ) + # Reload & Save buttons go into their own layout widget + mri_fiducials_button_layout = self._renderer._dock_add_layout( + vertical=False + ) + self._renderer._layout_add_widget( + layout=mri_fiducials_layout, + widget=mri_fiducials_button_layout + ) + self._widgets["reload_mri_fids"] = self._renderer._dock_add_button( + name='Reload MRI Fid.', + callback=lambda: self._set_fiducials_file(self._fiducials_file), + tooltip="Reload MRI fiducials from the standard location", + layout=mri_fiducials_button_layout, + ) + # Disable reload button until we've actually loaded a fiducial file + # (happens in _set_fiducials_file method) + self._forward_widget_command('reload_mri_fids', 'set_enabled', False) + + self._widgets["save_mri_fids"] = self._renderer._dock_add_button( + name="Save MRI Fid.", + callback=lambda: self._save_mri_fiducials( + fid_fname.format( + subjects_dir=self._subjects_dir, subject=self._subject + ) + ), + tooltip="Save MRI fiducials to the standard location. Fiducials " + "must be locked first!", + layout=mri_fiducials_button_layout, + ) + self._widgets["lock_fids"] = self._renderer._dock_add_check_box( + name="Lock fiducials", + value=self._lock_fids, + callback=self._set_lock_fids, + tooltip="Lock/Unlock interactive fiducial editing", + layout=mri_fiducials_layout, + ) + self._widgets["fids"] = self._renderer._dock_add_radio_buttons( + value=self._defaults["fiducial"], + rng=self._defaults["fiducials"], + callback=self._set_current_fiducial, + vertical=False, + layout=mri_fiducials_layout, + ) + fiducial_coords_layout = self._renderer._dock_add_layout() + for coord in ("X", "Y", "Z"): + name = f"fid_{coord}" + self._widgets[name] = self._renderer._dock_add_spin_box( + name=coord, + value=0., + rng=[-1e3, 1e3], + callback=partial( + self._set_fiducial, + coord=coord, + ), + compact=True, + double=True, + step=1, + tooltip=f"Set the {coord} fiducial coordinate", + layout=fiducial_coords_layout, + ) + self._renderer._layout_add_widget( + mri_fiducials_layout, fiducial_coords_layout) + + dig_source_layout = self._renderer._dock_add_group_box( + name="Info source with digitization", + collapse=collapse, + ) + self._widgets["info_file"] = self._renderer._dock_add_file_button( + name="info_file", + desc="Load", + func=self._set_info_file, + value=self._info_file, + placeholder="Path to info", + tooltip="Load the FIFF file with digitization data for " + "coregistration", + layout=dig_source_layout, + ) + self._widgets["grow_hair"] = self._renderer._dock_add_spin_box( + name="Grow Hair (mm)", + value=self._grow_hair, + rng=[0.0, 10.0], + callback=self._set_grow_hair, + tooltip="Compensate for hair on the digitizer head shape", + layout=dig_source_layout, + ) + omit_hsp_layout = self._renderer._dock_add_layout(vertical=False) + self._widgets["omit_distance"] = self._renderer._dock_add_spin_box( + name="Omit Distance (mm)", + value=self._omit_hsp_distance, + rng=[0.0, 100.0], + callback=self._set_omit_hsp_distance, + tooltip="Set the head shape points exclusion distance", + layout=omit_hsp_layout, + ) + self._widgets["omit"] = self._renderer._dock_add_button( + name="Omit", + callback=self._omit_hsp, + tooltip="Exclude the head shape points that are far away from " + "the MRI head", + layout=omit_hsp_layout, + ) + self._widgets["reset_omit"] = self._renderer._dock_add_button( + name="Reset", + callback=self._reset_omit_hsp_filter, + tooltip="Reset all excluded head shape points", + layout=omit_hsp_layout, + ) + self._renderer._layout_add_widget(dig_source_layout, omit_hsp_layout) + + view_options_layout = self._renderer._dock_add_group_box( + name="View Options", + collapse=collapse, + ) + self._widgets["helmet"] = self._renderer._dock_add_check_box( + name="Show MEG helmet", + value=self._helmet, + callback=self._set_helmet, + tooltip="Enable/Disable MEG helmet", + layout=view_options_layout, + ) + self._widgets["high_res_head"] = self._renderer._dock_add_check_box( + name="Show high-resolution head", + value=self._head_resolution, + callback=self._set_head_resolution, + tooltip="Enable/Disable high resolution head surface", + layout=view_options_layout, + ) + self._widgets["head_opacity"] = self._renderer._dock_add_slider( + name="Head opacity", + value=self._head_opacity, + rng=[0.25, 1.0], + callback=self._set_head_opacity, + compact=True, + double=True, + layout=view_options_layout, + ) + self._renderer._dock_add_stretch() + + self._renderer._dock_initialize( + name="Parameters", area="right", max_width="350px" + ) + mri_scaling_layout = self._renderer._dock_add_group_box( + name="MRI Scaling", + collapse=collapse, + ) + self._widgets["scaling_mode"] = self._renderer._dock_add_combo_box( + name="Scaling Mode", + value=self._defaults["scale_mode"], + rng=self._defaults["scale_modes"], + callback=self._set_scale_mode, + tooltip="Select the scaling mode", + compact=True, + layout=mri_scaling_layout, + ) + scale_params_layout = self._renderer._dock_add_group_box( + name="Scaling Parameters", + layout=mri_scaling_layout, + ) + coords = ["X", "Y", "Z"] + for coord in coords: + name = f"s{coord}" + attr = getattr(self.coreg, "_scale") + self._widgets[name] = self._renderer._dock_add_spin_box( + name=name, + value=attr[coords.index(coord)] * 1e2, + rng=[1., 10000.], # percent + callback=partial( + self._set_parameter, + mode_name="scale", + coord=coord, + ), + compact=True, + double=True, + step=1, + tooltip=f"Set the {coord} scaling parameter (in %)", + layout=scale_params_layout, + ) + + fit_scale_layout = self._renderer._dock_add_layout(vertical=False) + self._widgets["fits_fiducials"] = self._renderer._dock_add_button( + name="Fit fiducials with scaling", + callback=self._fits_fiducials, + tooltip="Find MRI scaling, rotation, and translation to fit all " + "3 fiducials", + layout=fit_scale_layout, + ) + self._widgets["fits_icp"] = self._renderer._dock_add_button( + name="Fit ICP with scaling", + callback=self._fits_icp, + tooltip="Find MRI scaling, rotation, and translation to match the " + "head shape points", + layout=fit_scale_layout, + ) + self._renderer._layout_add_widget( + scale_params_layout, fit_scale_layout) + subject_to_layout = self._renderer._dock_add_layout(vertical=False) + self._widgets["subject_to"] = self._renderer._dock_add_text( + name="subject-to", + value=self._subject_to, + placeholder="subject name", + callback=self._set_subject_to, + layout=subject_to_layout, + ) + self._widgets["save_subject"] = self._renderer._dock_add_button( + name="Save scaled anatomy", + callback=self._task_save_subject, + tooltip="Save scaled anatomy", + layout=subject_to_layout, + ) + self._renderer._layout_add_widget( + mri_scaling_layout, subject_to_layout) + param_layout = self._renderer._dock_add_group_box( + name="Translation (t) and Rotation (r)", + collapse=collapse, + ) + for coord in coords: + coord_layout = self._renderer._dock_add_layout(vertical=False) + for mode, mode_name in (("t", "Translation"), ("r", "Rotation")): + name = f"{mode}{coord}" + attr = getattr(self.coreg, f"_{mode_name.lower()}") + rng = [-360, 360] if mode_name == "Rotation" else [-100, 100] + unit = "°" if mode_name == "Rotation" else "mm" + self._widgets[name] = self._renderer._dock_add_spin_box( + name=name, + value=attr[coords.index(coord)] * 1e3, + rng=np.array(rng), + callback=partial( + self._task_set_parameter, + mode_name=mode_name.lower(), + coord=coord, + ), + compact=True, + double=True, + step=1, + tooltip=f"Set the {coord} {mode_name.lower()}" + f" parameter (in {unit})", + layout=coord_layout + ) + self._renderer._layout_add_widget(param_layout, coord_layout) + + fit_layout = self._renderer._dock_add_layout(vertical=False) + self._widgets["fit_fiducials"] = self._renderer._dock_add_button( + name="Fit fiducials", + callback=self._fit_fiducials, + tooltip="Find rotation and translation to fit all 3 fiducials", + layout=fit_layout, + ) + self._widgets["fit_icp"] = self._renderer._dock_add_button( + name="Fit ICP", + callback=self._fit_icp, + tooltip="Find rotation and translation to match the " + "head shape points", + layout=fit_layout, + ) + self._renderer._layout_add_widget(param_layout, fit_layout) + trans_layout = self._renderer._dock_add_group_box( + name="HEAD <> MRI Transform", + collapse=collapse, + ) + save_trans_layout = self._renderer._dock_add_layout(vertical=False) + self._widgets["save_trans"] = self._renderer._dock_add_file_button( + name="save_trans", + desc="Save...", + save=True, + func=self._save_trans, + input_text_widget=False, + tooltip="Save the transform file to disk", + layout=save_trans_layout, + filter='Head->MRI transformation (*-trans.fif *_trans.fif)', + initial_directory=str(Path(self._info_file).parent), + ) + self._widgets["load_trans"] = self._renderer._dock_add_file_button( + name="load_trans", + desc="Load...", + func=self._load_trans, + input_text_widget=False, + tooltip="Load the transform file from disk", + layout=save_trans_layout, + filter='Head->MRI transformation (*-trans.fif *_trans.fif)', + initial_directory=str(Path(self._info_file).parent), + ) + self._widgets["reset_trans"] = self._renderer._dock_add_button( + name="Reset", + callback=self._reset, + tooltip="Reset all the parameters affecting the coregistration", + layout=save_trans_layout, + ) + self._renderer._layout_add_widget(trans_layout, save_trans_layout) + + fitting_options_layout = self._renderer._dock_add_group_box( + name="Fitting Options", + collapse=collapse, + ) + self._widgets["fit_label"] = self._renderer._dock_add_label( + value="", + layout=fitting_options_layout, + ) + self._widgets["icp_n_iterations"] = self._renderer._dock_add_spin_box( + name="Number Of ICP Iterations", + value=self._defaults["icp_n_iterations"], + rng=[1, 100], + callback=self._set_icp_n_iterations, + compact=True, + double=False, + tooltip="Set the number of ICP iterations", + layout=fitting_options_layout, + ) + self._widgets["icp_fid_match"] = self._renderer._dock_add_combo_box( + name="Fiducial point matching", + value=self._defaults["icp_fid_match"], + rng=self._defaults["icp_fid_matches"], + callback=self._set_icp_fid_match, + compact=True, + tooltip="Select the fiducial point matching method", + layout=fitting_options_layout, + ) + weights_layout = self._renderer._dock_add_group_box( + name="Weights", + layout=fitting_options_layout, + ) + for point, fid in zip(("HSP", "EEG", "HPI"), + self._defaults["fiducials"]): + weight_layout = self._renderer._dock_add_layout(vertical=False) + point_lower = point.lower() + name = f"{point_lower}_weight" + self._widgets[name] = self._renderer._dock_add_spin_box( + name=point, + value=getattr(self, f"_{point_lower}_weight"), + rng=[0., 100.], + callback=partial(self._set_point_weight, point=point_lower), + compact=True, + double=True, + tooltip=f"Set the {point} weight", + layout=weight_layout, + ) + + fid_lower = fid.lower() + name = f"{fid_lower}_weight" + self._widgets[name] = self._renderer._dock_add_spin_box( + name=fid, + value=getattr(self, f"_{fid_lower}_weight"), + rng=[0., 100.], + callback=partial(self._set_point_weight, point=fid_lower), + compact=True, + double=True, + tooltip=f"Set the {fid} weight", + layout=weight_layout, + ) + self._renderer._layout_add_widget(weights_layout, weight_layout) + self._widgets['reset_fitting_options'] = ( + self._renderer._dock_add_button( + name="Reset Fitting Options", + callback=self._reset_fitting_parameters, + tooltip="Reset all the fitting parameters to default value", + layout=fitting_options_layout, + ) + ) + self._renderer._dock_add_stretch() + + def _configure_status_bar(self): + self._renderer._status_bar_initialize() + self._widgets['status_message'] = self._renderer._status_bar_add_label( + "", stretch=1 + ) + self._forward_widget_command( + 'status_message', 'hide', value=None, input_value=False + ) + + def _clean(self): + if not self._accept_close_event: + return + self._renderer = None + self._widgets.clear() + self._actors.clear() + self._surfaces.clear() + self._defaults.clear() + self._head_geo = None + self._redraw = None + + def close(self): + """Close interface and cleanup data structure.""" + self._renderer.close() + + def _close_dialog_callback(self, button_name): + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + self._accept_close_event = True + if button_name == "Save": + if self._trans_modified: + self._forward_widget_command( + "save_trans", "set_value", None) + # cancel means _save_trans is not called + if self._trans_modified: + self._accept_close_event = False + if self._mri_fids_modified: + self._forward_widget_command( + "save_mri_fids", "set_value", None) + if self._mri_scale_modified: + if self._subject_to: + self._save_subject(exit_mode=True) + else: + dialog = self._renderer._dialog_warning( + title="CoregistrationUI", + text="The name of the output subject used to " + "save the scaled anatomy is not set.", + info_text="Please set a subject name", + callback=lambda x: None, + buttons=["Ok"], + modal=not MNE_3D_BACKEND_TESTING, + ) + dialog.show() + self._accept_close_event = False + elif button_name == "Cancel": + self._accept_close_event = False + else: + assert button_name == "Discard" + + def _close_callback(self): + if self._trans_modified or self._mri_fids_modified or \ + self._mri_scale_modified: + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + # prepare the dialog's text + text = "The following is/are not saved:" + text += "
    " + if self._trans_modified: + text += "
  • Head<>MRI transform
  • " + if self._mri_fids_modified: + text += "
  • MRI fiducials
  • " + if self._mri_scale_modified: + text += "
  • scaled subject MRI
  • " + text += "
" + self._widgets["close_dialog"] = self._renderer._dialog_warning( + title="CoregistrationUI", + text=text, + info_text="Do you want to save?", + callback=self._close_dialog_callback, + buttons=["Save", "Discard", "Cancel"], + # modal=True means that the dialog blocks the application + # when show() is called, until one of the buttons is clicked + modal=not MNE_3D_BACKEND_TESTING, + ) + self._widgets["close_dialog"].show() + return self._accept_close_event diff --git a/python/libs/mne/gui/_ieeg_locate_gui.py b/python/libs/mne/gui/_ieeg_locate_gui.py new file mode 100644 index 0000000..7fe77ac --- /dev/null +++ b/python/libs/mne/gui/_ieeg_locate_gui.py @@ -0,0 +1,1111 @@ +# -*- coding: utf-8 -*- +"""Intracranial elecrode localization GUI for finding contact locations.""" + +# Authors: Alex Rockhill +# +# License: BSD (3-clause) + +import os.path as op +import numpy as np +from functools import partial +import platform + +from scipy.ndimage import maximum_filter + +from PyQt5 import QtCore, QtGui, Qt +from PyQt5.QtCore import pyqtSlot +from PyQt5.QtWidgets import (QMainWindow, QGridLayout, + QVBoxLayout, QHBoxLayout, QLabel, + QMessageBox, QWidget, + QListView, QSlider, QPushButton, + QComboBox, QPlainTextEdit) + +from matplotlib import patheffects +from matplotlib.backends.backend_qt5agg import FigureCanvas +from matplotlib.colors import LinearSegmentedColormap +from matplotlib.figure import Figure +from matplotlib.patches import Rectangle + +from .._freesurfer import _import_nibabel +from ..viz.backends.renderer import _get_renderer +from ..viz.utils import safe_event +from ..surface import _read_mri_surface, _voxel_neighbors, _marching_cubes +from ..transforms import (apply_trans, _frame_to_str, _get_trans, + invert_transform) +from ..utils import logger, _check_fname, _validate_type, verbose, warn +from .. import pick_types + +_IMG_LABELS = [['I', 'P'], ['I', 'L'], ['P', 'L']] +_CH_PLOT_SIZE = 1024 +_ZOOM_STEP_SIZE = 5 +_RADIUS_SCALAR = 0.4 +_TUBE_SCALAR = 0.1 +_BOLT_SCALAR = 30 # mm +_CH_MENU_WIDTH = 30 if platform.system() == 'Windows' else 10 + +# 20 colors generated to be evenly spaced in a cube, worked better than +# matplotlib color cycle +_UNIQUE_COLORS = [(0.1, 0.42, 0.43), (0.9, 0.34, 0.62), (0.47, 0.51, 0.3), + (0.47, 0.55, 0.99), (0.79, 0.68, 0.06), (0.34, 0.74, 0.05), + (0.58, 0.87, 0.13), (0.86, 0.98, 0.4), (0.92, 0.91, 0.66), + (0.77, 0.38, 0.34), (0.9, 0.37, 0.1), (0.2, 0.62, 0.9), + (0.22, 0.65, 0.64), (0.14, 0.94, 0.8), (0.34, 0.31, 0.68), + (0.59, 0.28, 0.74), (0.46, 0.19, 0.94), (0.37, 0.93, 0.7), + (0.56, 0.86, 0.55), (0.67, 0.69, 0.44)] +_N_COLORS = len(_UNIQUE_COLORS) +_CMAP = LinearSegmentedColormap.from_list( + 'ch_colors', _UNIQUE_COLORS, N=_N_COLORS) + + +def _load_image(img, name, verbose=True): + """Load data from a 3D image file (e.g. CT, MR).""" + nib = _import_nibabel('use iEEG GUI') + if not isinstance(img, nib.spatialimages.SpatialImage): + if verbose: + logger.info(f'Loading {img}') + _check_fname(img, overwrite='read', must_exist=True, name=name) + img = nib.load(img) + # get data + orig_data = np.array(img.dataobj).astype(np.float32) + # reorient data to RAS + ornt = nib.orientations.axcodes2ornt( + nib.orientations.aff2axcodes(img.affine)).astype(int) + ras_ornt = nib.orientations.axcodes2ornt('RAS') + ornt_trans = nib.orientations.ornt_transform(ornt, ras_ornt) + img_data = nib.orientations.apply_orientation(orig_data, ornt_trans) + orig_mgh = nib.MGHImage(orig_data, img.affine) + aff_trans = nib.orientations.inv_ornt_aff(ornt_trans, img.shape) + vox_ras_t = np.dot(orig_mgh.header.get_vox2ras_tkr(), aff_trans) + return img_data, vox_ras_t + + +class ComboBox(QComboBox): + """Dropdown menu that emits a click when popped up.""" + + clicked = QtCore.pyqtSignal() + + def showPopup(self): + """Override show popup method to emit click.""" + self.clicked.emit() + super(ComboBox, self).showPopup() + + +def _make_slice_plot(width=4, height=4, dpi=300): + fig = Figure(figsize=(width, height), dpi=dpi) + canvas = FigureCanvas(fig) + ax = fig.subplots() + fig.subplots_adjust(bottom=0, left=0, right=1, top=1, wspace=0, hspace=0) + ax.set_facecolor('k') + # clean up excess plot text, invert + ax.invert_yaxis() + ax.set_xticks([]) + ax.set_yticks([]) + return canvas, fig + + +class IntracranialElectrodeLocator(QMainWindow): + """Locate electrode contacts using a coregistered MRI and CT.""" + + _xy_idx = ( + (1, 2), + (0, 2), + (0, 1), + ) + + def __init__(self, info, trans, aligned_ct, subject=None, + subjects_dir=None, groups=None, verbose=None): + """GUI for locating intracranial electrodes. + + .. note:: Images will be displayed using orientation information + obtained from the image header. Images will be resampled to + dimensions [256, 256, 256] for display. + """ + # initialize QMainWindow class + super(IntracranialElectrodeLocator, self).__init__() + + if not info.ch_names: + raise ValueError('No channels found in `info` to locate') + + # store info for modification + self._info = info + self._seeg_idx = pick_types(self._info, meg=False, seeg=True) + self._verbose = verbose + + # channel plotting default parameters + self._ch_alpha = 0.5 + self._radius = int(_CH_PLOT_SIZE // 100) # starting 1/100 of image + + # load imaging data + self._subject_dir = op.join(subjects_dir, subject) + self._load_image_data(aligned_ct) + + # initialize channel data + self._ch_index = 0 + # load data, apply trans + self._head_mri_t = _get_trans(trans, 'head', 'mri')[0] + self._mri_head_t = invert_transform(self._head_mri_t) + # load channels, convert from m to mm + self._chs = {name: apply_trans(self._head_mri_t, ch['loc'][:3]) * 1000 + for name, ch in zip(info.ch_names, info['chs'])} + self._ch_names = list(self._chs.keys()) + # set current position + if np.isnan(self._chs[self._ch_names[self._ch_index]]).any(): + ras = [0., 0., 0.] + else: + ras = self._chs[self._ch_names[self._ch_index]] + self._set_ras(ras, update_plots=False) + self._group_channels(groups) + + # GUI design + + # Main plots: make one plot for each view; sagittal, coronal, axial + plt_grid = QGridLayout() + plts = [_make_slice_plot(), _make_slice_plot(), _make_slice_plot()] + self._figs = [plts[0][1], plts[1][1], plts[2][1]] + plt_grid.addWidget(plts[0][0], 0, 0) + plt_grid.addWidget(plts[1][0], 0, 1) + plt_grid.addWidget(plts[2][0], 1, 0) + self._renderer = _get_renderer( + name='IEEG Locator', size=(400, 400), bgcolor='w') + plt_grid.addWidget(self._renderer.plotter) + + # Channel selector + self._ch_list = QListView() + self._ch_list.setSelectionMode(Qt.QAbstractItemView.SingleSelection) + max_ch_name_len = max([len(name) for name in self._chs]) + self._ch_list.setMinimumWidth(max_ch_name_len * _CH_MENU_WIDTH) + self._ch_list.setMaximumWidth(max_ch_name_len * _CH_MENU_WIDTH) + self._set_ch_names() + + # Plots + self._plot_images() + + # Menus + button_hbox = self._get_button_bar() + slider_hbox = self._get_slider_bar() + bottom_hbox = self._get_bottom_bar() + + # Add lines + self._lines = dict() + self._lines_2D = dict() + for group in set(self._groups.values()): + self._update_lines(group) + + # Put everything together + plot_ch_hbox = QHBoxLayout() + plot_ch_hbox.addLayout(plt_grid) + plot_ch_hbox.addWidget(self._ch_list) + + main_vbox = QVBoxLayout() + main_vbox.addLayout(button_hbox) + main_vbox.addLayout(slider_hbox) + main_vbox.addLayout(plot_ch_hbox) + main_vbox.addLayout(bottom_hbox) + + central_widget = QWidget() + central_widget.setLayout(main_vbox) + self.setCentralWidget(central_widget) + + # ready for user + self._move_cursors_to_pos() + self._ch_list.setFocus() # always focus on list + + def _load_image_data(self, ct): + """Get MRI and CT data to display and transforms to/from vox/RAS.""" + # allows recon-all not to be finished (T1 made in a few minutes) + mri_img = 'brain' if op.isfile(op.join( + self._subject_dir, 'mri', 'brain.mgz')) else 'T1' + self._mri_data, self._vox_ras_t = _load_image( + op.join(self._subject_dir, 'mri', f'{mri_img}.mgz'), + 'MRI Image', verbose=self._verbose) + self._ras_vox_t = np.linalg.inv(self._vox_ras_t) + + self._voxel_sizes = np.array(self._mri_data.shape) + # We need our extents to land the centers of each pixel on the voxel + # number. This code assumes 1mm isotropic... + img_delta = 0.5 + self._img_extents = list( + [-img_delta, self._voxel_sizes[idx[0]] - img_delta, + -img_delta, self._voxel_sizes[idx[1]] - img_delta] + for idx in self._xy_idx) + ch_deltas = list(img_delta * (self._voxel_sizes[ii] / _CH_PLOT_SIZE) + for ii in range(3)) + self._ch_extents = list( + [-ch_delta, self._voxel_sizes[idx[0]] - ch_delta, + -ch_delta, self._voxel_sizes[idx[1]] - ch_delta] + for idx, ch_delta in zip(self._xy_idx, ch_deltas)) + + # ready ct + self._ct_data, vox_ras_t = _load_image(ct, 'CT', verbose=self._verbose) + if self._mri_data.shape != self._ct_data.shape or \ + not np.allclose(self._vox_ras_t, vox_ras_t, rtol=1e-6): + raise ValueError('CT is not aligned to MRI, got ' + f'CT shape={self._ct_data.shape}, ' + f'MRI shape={self._mri_data.shape}, ' + f'CT affine={vox_ras_t} and ' + f'MRI affine={self._vox_ras_t}') + self._ct_maxima = None # don't compute until turned on + + if op.exists(op.join(self._subject_dir, 'surf', 'lh.seghead')): + self._head = _read_mri_surface( + op.join(self._subject_dir, 'surf', 'lh.seghead')) + assert _frame_to_str[self._head['coord_frame']] == 'mri' + else: + warn('`seghead` not found, using marching cubes on CT for ' + 'head plot, use :ref:`mne.bem.make_scalp_surfaces` ' + 'to add the scalp surface instead of skull from the CT') + self._head = None + if op.exists(op.join(self._subject_dir, 'surf', 'lh.pial')): + self._lh = _read_mri_surface( + op.join(self._subject_dir, 'surf', 'lh.pial')) + assert _frame_to_str[self._lh['coord_frame']] == 'mri' + self._rh = _read_mri_surface( + op.join(self._subject_dir, 'surf', 'rh.pial')) + assert _frame_to_str[self._rh['coord_frame']] == 'mri' + else: + warn('`pial` surface not found, skipping adding to 3D ' + 'plot. This indicates the Freesurfer recon-all ' + 'has not finished or has been modified and ' + 'these files have been deleted.') + self._lh = self._rh = None + + def _make_ch_image(self, axis, proj=False): + """Make a plot to display the channel locations.""" + # Make channel data higher resolution so it looks better. + ch_image = np.zeros((_CH_PLOT_SIZE, _CH_PLOT_SIZE)) * np.nan + vxyz = self._voxel_sizes + + def color_ch_radius(ch_image, xf, yf, group, radius): + # Take the fraction across each dimension of the RAS + # coordinates converted to xyz and put a circle in that + # position in this larger resolution image + ex, ey = np.round(np.array([xf, yf]) * _CH_PLOT_SIZE).astype(int) + ii = np.arange(-radius, radius + 1) + ii_sq = ii * ii + idx = np.where(ii_sq + ii_sq[:, np.newaxis] < radius * radius) + # negative y because y axis is inverted + ch_image[-(ey + ii[idx[1]]), ex + ii[idx[0]]] = group + return ch_image + + for name, ras in self._chs.items(): + # move from middle-centered (half coords positive, half negative) + # to bottom-left corner centered (all coords positive). + if np.isnan(ras).any(): + continue + xyz = apply_trans(self._ras_vox_t, ras) + # check if closest to that voxel + dist = np.linalg.norm(xyz - self._current_slice) + if proj or dist < self._radius: + group = self._groups[name] + r = self._radius if proj else \ + self._radius - np.round(abs(dist)).astype(int) + xf, yf = (xyz / vxyz)[list(self._xy_idx[axis])] + ch_image = color_ch_radius(ch_image, xf, yf, group, r) + return ch_image + + @verbose + def _save_ch_coords(self, info=None, verbose=None): + """Save the location of the electrode contacts.""" + logger.info('Saving channel positions to `info`') + if info is None: + info = self._info + with info._unlock(): + for name, ch in zip(info.ch_names, info['chs']): + ch['loc'][:3] = apply_trans( + self._mri_head_t, self._chs[name] / 1000) # mm->m + + def _plot_images(self): + """Use the MRI and CT to make plots.""" + # Plot sagittal (0), coronal (1) or axial (2) view + self._images = dict(ct=list(), chs=list(), ct_bounds=list(), + cursor_v=list(), cursor_h=list()) + ct_min, ct_max = np.nanmin(self._ct_data), np.nanmax(self._ct_data) + text_kwargs = dict(fontsize='medium', weight='bold', color='#66CCEE', + family='monospace', ha='center', va='center', + path_effects=[patheffects.withStroke( + linewidth=4, foreground="k", alpha=0.75)]) + xyz = apply_trans(self._ras_vox_t, self._ras) + for axis in range(3): + plot_x_idx, plot_y_idx = self._xy_idx[axis] + fig = self._figs[axis] + ax = fig.axes[0] + ct_data = np.take(self._ct_data, self._current_slice[axis], + axis=axis).T + self._images['ct'].append(ax.imshow( + ct_data, cmap='gray', aspect='auto', zorder=1, + vmin=ct_min, vmax=ct_max)) + img_extent = self._img_extents[axis] # x0, x1, y0, y1 + w, h = np.diff(np.array(img_extent).reshape(2, 2), axis=1)[:, 0] + self._images['ct_bounds'].append(Rectangle( + img_extent[::2], w, h, edgecolor='w', facecolor='none', + alpha=0.25, lw=0.5, zorder=1.5)) + ax.add_patch(self._images['ct_bounds'][-1]) + self._images['chs'].append(ax.imshow( + self._make_ch_image(axis), aspect='auto', + extent=self._ch_extents[axis], zorder=3, + cmap=_CMAP, alpha=self._ch_alpha, vmin=0, vmax=_N_COLORS)) + v_x = (xyz[plot_x_idx],) * 2 + v_y = img_extent[2:4] + self._images['cursor_v'].append(ax.plot( + v_x, v_y, color='lime', linewidth=0.5, alpha=0.5, zorder=8)[0]) + h_y = (xyz[plot_y_idx],) * 2 + h_x = img_extent[0:2] + self._images['cursor_h'].append(ax.plot( + h_x, h_y, color='lime', linewidth=0.5, alpha=0.5, zorder=8)[0]) + # label axes + self._figs[axis].text(0.5, 0.05, _IMG_LABELS[axis][0], + **text_kwargs) + self._figs[axis].text(0.05, 0.5, _IMG_LABELS[axis][1], + **text_kwargs) + self._figs[axis].axes[0].axis(img_extent) + self._figs[axis].canvas.mpl_connect( + 'scroll_event', self._on_scroll) + self._figs[axis].canvas.mpl_connect( + 'button_release_event', partial(self._on_click, axis=axis)) + # add head and brain in mm (convert from m) + if self._head is None: + logger.info('Using marching cubes on CT for the ' + '3D visualization panel') + rr, tris = _marching_cubes(np.where( + self._ct_data < np.quantile(self._ct_data, 0.95), 0, 1), + [1])[0] + rr = apply_trans(self._vox_ras_t, rr) + self._renderer.mesh( + *rr.T, triangles=tris, color='gray', opacity=0.2, + reset_camera=False, render=False) + else: + self._renderer.mesh( + *self._head['rr'].T * 1000, triangles=self._head['tris'], + color='gray', opacity=0.2, reset_camera=False, render=False) + if self._lh is not None and self._rh is not None: + self._renderer.mesh( + *self._lh['rr'].T * 1000, triangles=self._lh['tris'], + color='white', opacity=0.2, reset_camera=False, render=False) + self._renderer.mesh( + *self._rh['rr'].T * 1000, triangles=self._rh['tris'], + color='white', opacity=0.2, reset_camera=False, render=False) + self._3d_chs = dict() + for name in self._chs: + self._plot_3d_ch(name) + self._renderer.set_camera(azimuth=90, elevation=90, distance=300, + focalpoint=tuple(self._ras)) + # update plots + self._draw() + self._renderer._update() + + def _update_camera(self, render=False): + """Update the camera position.""" + self._renderer.set_camera( + # needs fix, distance moves when focal point updates + distance=self._renderer.plotter.camera.distance * 0.9, + focalpoint=tuple(self._ras), + reset_camera=False) + + def _plot_3d_ch(self, name, render=False): + """Plot a single 3D channel.""" + if name in self._3d_chs: + self._renderer.plotter.remove_actor( + self._3d_chs.pop(name), render=False) + if not any(np.isnan(self._chs[name])): + self._3d_chs[name] = self._renderer.sphere( + tuple(self._chs[name]), scale=1, + color=_CMAP(self._groups[name])[:3], opacity=self._ch_alpha)[0] + # The actor scale is managed differently than the glyph scale + # in order not to recreate objects, we use the actor scale + self._3d_chs[name].SetOrigin(self._chs[name]) + self._3d_chs[name].SetScale(self._radius * _RADIUS_SCALAR) + if render: + self._renderer._update() + + def _get_button_bar(self): + """Make a bar with buttons for user interactions.""" + hbox = QHBoxLayout() + + help_button = QPushButton('Help') + help_button.released.connect(self._show_help) + hbox.addWidget(help_button) + + hbox.addStretch(8) + + hbox.addWidget(QLabel('Snap to Center')) + self._snap_button = QPushButton('Off') + self._snap_button.setMaximumWidth(25) # not too big + hbox.addWidget(self._snap_button) + self._snap_button.released.connect(self._toggle_snap) + self._toggle_snap() # turn on to start + + hbox.addStretch(1) + + self._toggle_brain_button = QPushButton('Show Brain') + self._toggle_brain_button.released.connect(self._toggle_show_brain) + hbox.addWidget(self._toggle_brain_button) + + hbox.addStretch(1) + + mark_button = QPushButton('Mark') + hbox.addWidget(mark_button) + mark_button.released.connect(self._mark_ch) + + remove_button = QPushButton('Remove') + hbox.addWidget(remove_button) + remove_button.released.connect(self._remove_ch) + + self._group_selector = ComboBox() + group_model = self._group_selector.model() + + for i in range(_N_COLORS): + self._group_selector.addItem(' ') + color = QtGui.QColor() + color.setRgb(*(255 * np.array(_CMAP(i))).round().astype(int)) + brush = QtGui.QBrush(color) + brush.setStyle(QtCore.Qt.SolidPattern) + group_model.setData(group_model.index(i, 0), + brush, QtCore.Qt.BackgroundRole) + self._group_selector.clicked.connect(self._select_group) + self._group_selector.currentIndexChanged.connect( + self._select_group) + hbox.addWidget(self._group_selector) + + # update background color for current selection + self._update_group() + + return hbox + + def _get_slider_bar(self): + """Make a bar with sliders on it.""" + + def make_label(name): + label = QLabel(name) + label.setAlignment(QtCore.Qt.AlignCenter) + return label + + def make_slider(smin, smax, sval, sfun=None): + slider = QSlider(QtCore.Qt.Horizontal) + slider.setMinimum(int(round(smin))) + slider.setMaximum(int(round(smax))) + slider.setValue(int(round(sval))) + slider.setTracking(False) # only update on release + if sfun is not None: + slider.valueChanged.connect(sfun) + slider.keyPressEvent = self._key_press_event + return slider + + slider_hbox = QHBoxLayout() + + ch_vbox = QVBoxLayout() + ch_vbox.addWidget(make_label('ch alpha')) + ch_vbox.addWidget(make_label('ch radius')) + slider_hbox.addLayout(ch_vbox) + + ch_slider_vbox = QVBoxLayout() + self._alpha_slider = make_slider(0, 100, self._ch_alpha * 100, + self._update_ch_alpha) + ch_plot_max = _CH_PLOT_SIZE // 50 # max 1 / 50 of plot size + ch_slider_vbox.addWidget(self._alpha_slider) + self._radius_slider = make_slider(0, ch_plot_max, self._radius, + self._update_radius) + ch_slider_vbox.addWidget(self._radius_slider) + slider_hbox.addLayout(ch_slider_vbox) + + ct_vbox = QVBoxLayout() + ct_vbox.addWidget(make_label('CT min')) + ct_vbox.addWidget(make_label('CT max')) + slider_hbox.addLayout(ct_vbox) + + ct_slider_vbox = QVBoxLayout() + ct_min = int(round(np.nanmin(self._ct_data))) + ct_max = int(round(np.nanmax(self._ct_data))) + self._ct_min_slider = make_slider( + ct_min, ct_max, ct_min, self._update_ct_scale) + ct_slider_vbox.addWidget(self._ct_min_slider) + self._ct_max_slider = make_slider( + ct_min, ct_max, ct_max, self._update_ct_scale) + ct_slider_vbox.addWidget(self._ct_max_slider) + slider_hbox.addLayout(ct_slider_vbox) + return slider_hbox + + def _get_bottom_bar(self): + """Make a bar at the bottom with information in it.""" + hbox = QHBoxLayout() + + hbox.addStretch(3) + + self._toggle_show_mip_button = QPushButton('Show Max Intensity Proj') + self._toggle_show_mip_button.released.connect( + self._toggle_show_mip) + hbox.addWidget(self._toggle_show_mip_button) + + self._toggle_show_max_button = QPushButton('Show Maxima') + self._toggle_show_max_button.released.connect( + self._toggle_show_max) + hbox.addWidget(self._toggle_show_max_button) + + self._intensity_label = QLabel('') # update later + hbox.addWidget(self._intensity_label) + + VOX_label = QLabel('VOX =') + self._VOX_textbox = QPlainTextEdit('') # update later + self._VOX_textbox.setMaximumHeight(25) + self._VOX_textbox.setMaximumWidth(125) + self._VOX_textbox.focusOutEvent = self._update_VOX + self._VOX_textbox.textChanged.connect(self._check_update_VOX) + hbox.addWidget(VOX_label) + hbox.addWidget(self._VOX_textbox) + + RAS_label = QLabel('RAS =') + self._RAS_textbox = QPlainTextEdit('') # update later + self._RAS_textbox.setMaximumHeight(25) + self._RAS_textbox.setMaximumWidth(200) + self._RAS_textbox.focusOutEvent = self._update_RAS + self._RAS_textbox.textChanged.connect(self._check_update_RAS) + hbox.addWidget(RAS_label) + hbox.addWidget(self._RAS_textbox) + self._update_moved() # update text now + return hbox + + def _group_channels(self, groups): + """Automatically find a group based on the name of the channel.""" + if groups is not None: + for name in self._ch_names: + if name not in groups: + raise ValueError(f'{name} not found in ``groups``') + _validate_type(groups[name], (float, int), f'groups[{name}]') + self.groups = groups + else: + i = 0 + self._groups = dict() + base_names = dict() + for name in self._ch_names: + # strip all numbers from the name + base_name = ''.join([letter for letter in name if + not letter.isdigit() and letter != ' ']) + if base_name in base_names: + # look up group number by base name + self._groups[name] = base_names[base_name] + else: + self._groups[name] = i + base_names[base_name] = i + i += 1 + + def _update_lines(self, group, only_2D=False): + """Draw lines that connect the points in a group.""" + if group in self._lines_2D: # remove existing 2D lines first + for line in self._lines_2D[group]: + line.remove() + self._lines_2D.pop(group) + if only_2D: # if not in projection, don't add 2D lines + if self._toggle_show_mip_button.text() == \ + 'Show Max Intensity Proj': + return + elif group in self._lines: # if updating 3D, remove first + self._renderer.plotter.remove_actor( + self._lines[group], render=False) + pos = np.array([ + self._chs[ch] for i, ch in enumerate(self._ch_names) + if self._groups[ch] == group and i in self._seeg_idx and + not np.isnan(self._chs[ch]).any()]) + if len(pos) < 2: # not enough points for line + return + # first, the insertion will be the point farthest from the origin + # brains are a longer posterior-anterior, scale for this (80%) + insert_idx = np.argmax(np.linalg.norm(pos * np.array([1, 0.8, 1]), + axis=1)) + # second, find the farthest point from the insertion + target_idx = np.argmax(np.linalg.norm(pos[insert_idx] - pos, axis=1)) + # third, make a unit vector and to add to the insertion for the bolt + elec_v = pos[insert_idx] - pos[target_idx] + elec_v /= np.linalg.norm(elec_v) + if not only_2D: + self._lines[group] = self._renderer.tube( + [pos[target_idx]], [pos[insert_idx] + elec_v * _BOLT_SCALAR], + radius=self._radius * _TUBE_SCALAR, color=_CMAP(group)[:3])[0] + if self._toggle_show_mip_button.text() == 'Hide Max Intensity Proj': + # add 2D lines on each slice plot if in max intensity projection + target_vox = apply_trans(self._ras_vox_t, pos[target_idx]) + insert_vox = apply_trans(self._ras_vox_t, + pos[insert_idx] + elec_v * _BOLT_SCALAR) + lines_2D = list() + for axis in range(3): + x, y = self._xy_idx[axis] + lines_2D.append(self._figs[axis].axes[0].plot( + [target_vox[x], insert_vox[x]], + [target_vox[y], insert_vox[y]], + color=_CMAP(group), linewidth=0.25, zorder=7)[0]) + self._lines_2D[group] = lines_2D + + def _set_ch_names(self): + """Add the channel names to the selector.""" + self._ch_list_model = QtGui.QStandardItemModel(self._ch_list) + for name in self._ch_names: + self._ch_list_model.appendRow(QtGui.QStandardItem(name)) + self._color_list_item(name=name) + self._ch_list.setModel(self._ch_list_model) + self._ch_list.clicked.connect(self._go_to_ch) + self._ch_list.setCurrentIndex( + self._ch_list_model.index(self._ch_index, 0)) + self._ch_list.keyPressEvent = self._key_press_event + + def _select_group(self): + """Change the group label to the selection.""" + group = self._group_selector.currentIndex() + self._groups[self._ch_names[self._ch_index]] = group + # color differently if found already + self._color_list_item(self._ch_names[self._ch_index]) + self._update_group() + + def _update_group(self): + """Set background for closed group menu.""" + group = self._group_selector.currentIndex() + rgb = (255 * np.array(_CMAP(group))).round().astype(int) + self._group_selector.setStyleSheet( + 'background-color: rgb({:d},{:d},{:d})'.format(*rgb)) + self._group_selector.update() + + def _on_scroll(self, event): + """Process mouse scroll wheel event to zoom.""" + self._zoom(event.step, draw=True) + + def _zoom(self, sign=1, draw=False): + """Zoom in on the image.""" + delta = _ZOOM_STEP_SIZE * sign + for axis, fig in enumerate(self._figs): + xmid = self._images['cursor_v'][axis].get_xdata()[0] + ymid = self._images['cursor_h'][axis].get_ydata()[0] + xmin, xmax = fig.axes[0].get_xlim() + ymin, ymax = fig.axes[0].get_ylim() + xwidth = (xmax - xmin) / 2 - delta + ywidth = (ymax - ymin) / 2 - delta + if xwidth <= 0 or ywidth <= 0: + return + fig.axes[0].set_xlim(xmid - xwidth, xmid + xwidth) + fig.axes[0].set_ylim(ymid - ywidth, ymid + ywidth) + if draw: + self._figs[axis].canvas.draw() + + def _update_ch_selection(self): + """Update which channel is selected.""" + name = self._ch_names[self._ch_index] + self._ch_list.setCurrentIndex( + self._ch_list_model.index(self._ch_index, 0)) + self._group_selector.setCurrentIndex(self._groups[name]) + self._update_group() + if not np.isnan(self._chs[name]).any(): + self._set_ras(self._chs[name]) + self._update_camera(render=True) + self._draw() + + def _go_to_ch(self, index): + """Change current channel to the item selected.""" + self._ch_index = index.row() + self._update_ch_selection() + + @pyqtSlot() + def _next_ch(self): + """Increment the current channel selection index.""" + self._ch_index = (self._ch_index + 1) % len(self._ch_names) + self._update_ch_selection() + + @pyqtSlot() + def _update_RAS(self, event): + """Interpret user input to the RAS textbox.""" + text = self._RAS_textbox.toPlainText() + ras = self._convert_text(text, 'ras') + if ras is not None: + self._set_ras(ras) + + @pyqtSlot() + def _update_VOX(self, event): + """Interpret user input to the RAS textbox.""" + text = self._VOX_textbox.toPlainText() + ras = self._convert_text(text, 'vox') + if ras is not None: + self._set_ras(ras) + + def _convert_text(self, text, text_kind): + text = text.replace('\n', '') + vals = text.split(',') + if len(vals) != 3: + vals = text.split(' ') # spaces also okay as in freesurfer + vals = [var.lstrip().rstrip() for var in vals] + try: + vals = np.array([float(var) for var in vals]).reshape(3) + except Exception: + self._update_moved() # resets RAS label + return + if text_kind == 'vox': + vox = vals + ras = apply_trans(self._vox_ras_t, vox) + else: + assert text_kind == 'ras' + ras = vals + vox = apply_trans(self._ras_vox_t, ras) + wrong_size = any(var < 0 or var > n - 1 for var, n in + zip(vox, self._voxel_sizes)) + if wrong_size: + self._update_moved() # resets RAS label + return + return ras + + @property + def _ras(self): + return self._ras_safe + + def _set_ras(self, ras, update_plots=True): + ras = np.asarray(ras, dtype=float) + assert ras.shape == (3,) + msg = ', '.join(f'{x:0.2f}' for x in ras) + logger.debug(f'Trying RAS: ({msg}) mm') + # clip to valid + vox = apply_trans(self._ras_vox_t, ras) + vox = np.array([ + np.clip(d, 0, self._voxel_sizes[ii] - 1) + for ii, d in enumerate(vox)]) + # transform back, make write-only + self._ras_safe = apply_trans(self._vox_ras_t, vox) + self._ras_safe.flags['WRITEABLE'] = False + msg = ', '.join(f'{x:0.2f}' for x in self._ras_safe) + logger.debug(f'Setting RAS: ({msg}) mm') + if update_plots: + self._move_cursors_to_pos() + + @property + def _vox(self): + return apply_trans(self._ras_vox_t, self._ras) + + @property + def _current_slice(self): + return self._vox.round().astype(int) + + @pyqtSlot() + def _check_update_RAS(self): + """Check whether the RAS textbox is done being edited.""" + if '\n' in self._RAS_textbox.toPlainText(): + self._update_RAS(event=None) + self._ch_list.setFocus() # remove focus from text edit + + @pyqtSlot() + def _check_update_VOX(self): + """Check whether the VOX textbox is done being edited.""" + if '\n' in self._VOX_textbox.toPlainText(): + self._update_VOX(event=None) + self._ch_list.setFocus() # remove focus from text edit + + def _color_list_item(self, name=None): + """Color the item in the view list for easy id of marked channels.""" + name = self._ch_names[self._ch_index] if name is None else name + color = QtGui.QColor('white') + if not np.isnan(self._chs[name]).any(): + group = self._groups[name] + color.setRgb(*[int(c * 255) for c in _CMAP(group)]) + brush = QtGui.QBrush(color) + brush.setStyle(QtCore.Qt.SolidPattern) + self._ch_list_model.setData( + self._ch_list_model.index(self._ch_names.index(name), 0), + brush, QtCore.Qt.BackgroundRole) + # color text black + color = QtGui.QColor('black') + brush = QtGui.QBrush(color) + brush.setStyle(QtCore.Qt.SolidPattern) + self._ch_list_model.setData( + self._ch_list_model.index(self._ch_names.index(name), 0), + brush, QtCore.Qt.ForegroundRole) + + @pyqtSlot() + def _toggle_snap(self): + """Toggle snapping the contact location to the center of mass.""" + if self._snap_button.text() == 'Off': + self._snap_button.setText('On') + self._snap_button.setStyleSheet("background-color: green") + else: # text == 'On', turn off + self._snap_button.setText('Off') + self._snap_button.setStyleSheet("background-color: red") + + @pyqtSlot() + def _mark_ch(self): + """Mark the current channel as being located at the crosshair.""" + name = self._ch_names[self._ch_index] + if self._snap_button.text() == 'Off': + self._chs[name][:] = self._ras + else: + shape = np.mean(self._mri_data.shape) # Freesurfer shape (256) + voxels_max = int( + 4 / 3 * np.pi * (shape * self._radius / _CH_PLOT_SIZE)**3) + neighbors = _voxel_neighbors( + self._vox, self._ct_data, thresh=0.5, + voxels_max=voxels_max, use_relative=True) + self._chs[name][:] = apply_trans( # to surface RAS + self._vox_ras_t, np.array(list(neighbors)).mean(axis=0)) + self._color_list_item() + self._update_lines(self._groups[name]) + self._update_ch_images(draw=True) + self._plot_3d_ch(name, render=True) + self._save_ch_coords() + self._next_ch() + self._ch_list.setFocus() + + @pyqtSlot() + def _remove_ch(self): + """Remove the location data for the current channel.""" + name = self._ch_names[self._ch_index] + self._chs[name] *= np.nan + self._color_list_item() + self._save_ch_coords() + self._update_lines(self._groups[name]) + self._update_ch_images(draw=True) + self._plot_3d_ch(name, render=True) + self._next_ch() + self._ch_list.setFocus() + + def _draw(self, axis=None): + """Update the figures with a draw call.""" + for axis in (range(3) if axis is None else [axis]): + self._figs[axis].canvas.draw() + + def _update_ch_images(self, axis=None, draw=False): + """Update the channel image(s).""" + for axis in range(3) if axis is None else [axis]: + self._images['chs'][axis].set_data( + self._make_ch_image(axis)) + if self._toggle_show_mip_button.text() == \ + 'Hide Max Intensity Proj': + self._images['mip_chs'][axis].set_data( + self._make_ch_image(axis, proj=True)) + if draw: + self._draw(axis) + + def _update_ct_images(self, axis=None, draw=False): + """Update the CT image(s).""" + for axis in range(3) if axis is None else [axis]: + ct_data = np.take(self._ct_data, self._current_slice[axis], + axis=axis).T + # Threshold the CT so only bright objects (electrodes) are visible + ct_data[ct_data < self._ct_min_slider.value()] = np.nan + ct_data[ct_data > self._ct_max_slider.value()] = np.nan + self._images['ct'][axis].set_data(ct_data) + if 'local_max' in self._images: + ct_max_data = np.take( + self._ct_maxima, self._current_slice[axis], axis=axis).T + self._images['local_max'][axis].set_data(ct_max_data) + if draw: + self._draw(axis) + + def _update_mri_images(self, axis=None, draw=False): + """Update the CT image(s).""" + if 'mri' in self._images: + for axis in range(3) if axis is None else [axis]: + self._images['mri'][axis].set_data( + np.take(self._mri_data, self._current_slice[axis], + axis=axis).T) + if draw: + self._draw(axis) + + def _update_images(self, axis=None, draw=True): + """Update CT and channel images when general changes happen.""" + self._update_ct_images(axis=axis) + self._update_ch_images(axis=axis) + self._update_mri_images(axis=axis) + if draw: + self._draw(axis) + + def _update_ct_scale(self): + """Update CT min slider value.""" + new_min = self._ct_min_slider.value() + new_max = self._ct_max_slider.value() + # handle inversions + self._ct_min_slider.setValue(min([new_min, new_max])) + self._ct_max_slider.setValue(max([new_min, new_max])) + self._update_ct_images(draw=True) + + def _update_radius(self): + """Update channel plot radius.""" + self._radius = np.round(self._radius_slider.value()).astype(int) + if self._toggle_show_max_button.text() == 'Hide Maxima': + self._update_ct_maxima() + self._update_ct_images() + else: + self._ct_maxima = None # signals ct max is out-of-date + self._update_ch_images(draw=True) + for name, actor in self._3d_chs.items(): + if not np.isnan(self._chs[name]).any(): + actor.SetOrigin(self._chs[name]) + actor.SetScale(self._radius * _RADIUS_SCALAR) + self._renderer._update() + self._ch_list.setFocus() # remove focus from 3d plotter + + def _update_ch_alpha(self): + """Update channel plot alpha.""" + self._ch_alpha = self._alpha_slider.value() / 100 + for axis in range(3): + self._images['chs'][axis].set_alpha(self._ch_alpha) + self._draw() + for actor in self._3d_chs.values(): + actor.GetProperty().SetOpacity(self._ch_alpha) + self._renderer._update() + self._ch_list.setFocus() # remove focus from 3d plotter + + def _move_cursors_to_pos(self): + """Move the cursors to a position.""" + for axis in range(3): + x, y = self._vox[list(self._xy_idx[axis])] + self._images['cursor_v'][axis].set_xdata([x, x]) + self._images['cursor_h'][axis].set_ydata([y, y]) + self._zoom(0) # doesn't actually zoom just resets view to center + self._update_images(draw=True) + self._update_moved() + + def _show_help(self): + """Show the help menu.""" + QMessageBox.information( + self, 'Help', + "Help:\n'm': mark channel location\n" + "'r': remove channel location\n" + "'b': toggle viewing of brain in T1\n" + "'+'/'-': zoom\nleft/right arrow: left/right\n" + "up/down arrow: superior/inferior\n" + "left angle bracket/right angle bracket: anterior/posterior") + + def _update_ct_maxima(self): + """Compute the maximum voxels based on the current radius.""" + self._ct_maxima = maximum_filter( + self._ct_data, (self._radius,) * 3) == self._ct_data + self._ct_maxima[self._ct_data <= np.median(self._ct_data)] = \ + False + self._ct_maxima = np.where(self._ct_maxima, 1, np.nan) # transparent + + def _toggle_show_mip(self): + """Toggle whether the maximum-intensity projection is shown.""" + if self._toggle_show_mip_button.text() == 'Show Max Intensity Proj': + self._toggle_show_mip_button.setText('Hide Max Intensity Proj') + self._images['mip'] = list() + self._images['mip_chs'] = list() + ct_min, ct_max = np.nanmin(self._ct_data), np.nanmax(self._ct_data) + for axis in range(3): + ct_mip_data = np.max(self._ct_data, axis=axis).T + self._images['mip'].append( + self._figs[axis].axes[0].imshow( + ct_mip_data, cmap='gray', aspect='auto', + vmin=ct_min, vmax=ct_max, zorder=5)) + # add circles for each channel + xs, ys, colors = list(), list(), list() + for name, ras in self._chs.items(): + xyz = self._vox + xs.append(xyz[self._xy_idx[axis][0]]) + ys.append(xyz[self._xy_idx[axis][1]]) + colors.append(_CMAP(self._groups[name])) + self._images['mip_chs'].append( + self._figs[axis].axes[0].imshow( + self._make_ch_image(axis, proj=True), aspect='auto', + extent=self._ch_extents[axis], zorder=6, + cmap=_CMAP, alpha=1, vmin=0, vmax=_N_COLORS)) + for group in set(self._groups.values()): + self._update_lines(group, only_2D=True) + else: + for img in self._images['mip'] + self._images['mip_chs']: + img.remove() + self._images.pop('mip') + self._images.pop('mip_chs') + self._toggle_show_mip_button.setText('Show Max Intensity Proj') + for group in set(self._groups.values()): # remove lines + self._update_lines(group, only_2D=True) + self._draw() + + def _toggle_show_max(self): + """Toggle whether to color local maxima differently.""" + if self._toggle_show_max_button.text() == 'Show Maxima': + self._toggle_show_max_button.setText('Hide Maxima') + # happens on initiation or if the radius is changed with it off + if self._ct_maxima is None: # otherwise don't recompute + self._update_ct_maxima() + self._images['local_max'] = list() + for axis in range(3): + ct_max_data = np.take(self._ct_maxima, + self._current_slice[axis], axis=axis).T + self._images['local_max'].append( + self._figs[axis].axes[0].imshow( + ct_max_data, cmap='autumn', aspect='auto', + vmin=0, vmax=1, zorder=4)) + else: + for img in self._images['local_max']: + img.remove() + self._images.pop('local_max') + self._toggle_show_max_button.setText('Show Maxima') + self._draw() + + def _toggle_show_brain(self): + """Toggle whether the brain/MRI is being shown.""" + if 'mri' in self._images: + for img in self._images['mri']: + img.remove() + self._images.pop('mri') + self._toggle_brain_button.setText('Show Brain') + else: + self._images['mri'] = list() + for axis in range(3): + mri_data = np.take(self._mri_data, + self._current_slice[axis], axis=axis).T + self._images['mri'].append(self._figs[axis].axes[0].imshow( + mri_data, cmap='hot', aspect='auto', alpha=0.25, zorder=2)) + self._toggle_brain_button.setText('Hide Brain') + self._draw() + + def _key_press_event(self, event): + """Execute functions when the user presses a key.""" + if event.key() == 'escape': + self.close() + + if event.text() == 'h': + self._show_help() + + if event.text() == 'm': + self._mark_ch() + + if event.text() == 'r': + self._remove_ch() + + if event.text() == 'b': + self._toggle_show_brain() + + if event.text() in ('=', '+', '-'): + self._zoom(sign=-2 * (event.text() == '-') + 1, draw=True) + + # Changing slices + if event.key() in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down, + QtCore.Qt.Key_Left, QtCore.Qt.Key_Right, + QtCore.Qt.Key_Comma, QtCore.Qt.Key_Period, + QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown): + ras = np.array(self._ras) + if event.key() in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down): + ras[2] += 2 * (event.key() == QtCore.Qt.Key_Up) - 1 + elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Right): + ras[0] += 2 * (event.key() == QtCore.Qt.Key_Right) - 1 + else: + ras[1] += 2 * (event.key() == QtCore.Qt.Key_PageUp or + event.key() == QtCore.Qt.Key_Period) - 1 + self._set_ras(ras) + + def _on_click(self, event, axis): + """Move to view on MRI and CT on click.""" + if event.inaxes is self._figs[axis].axes[0]: + # Data coordinates are voxel coordinates + pos = (event.xdata, event.ydata) + logger.info(f'Clicked {"XYZ"[axis]} ({axis}) axis at pos {pos}') + xyz = self._vox + xyz[list(self._xy_idx[axis])] = pos + logger.debug(f'Using voxel {list(xyz)}') + ras = apply_trans(self._vox_ras_t, xyz) + self._set_ras(ras) + + def _update_moved(self): + """Update when cursor position changes.""" + self._RAS_textbox.setPlainText('{:.2f}, {:.2f}, {:.2f}'.format( + *self._ras)) + self._VOX_textbox.setPlainText('{:3d}, {:3d}, {:3d}'.format( + *self._current_slice)) + self._intensity_label.setText('intensity = {:.2f}'.format( + self._ct_data[tuple(self._current_slice)])) + + @safe_event + def closeEvent(self, event): + """Clean up upon closing the window.""" + self._renderer.plotter.close() + self.close() diff --git a/python/libs/mne/gui/tests/__init__.py b/python/libs/mne/gui/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/gui/tests/test_coreg_gui.py b/python/libs/mne/gui/tests/test_coreg_gui.py new file mode 100644 index 0000000..0d17ead --- /dev/null +++ b/python/libs/mne/gui/tests/test_coreg_gui.py @@ -0,0 +1,288 @@ +# Author: Christian Brodbeck +# +# License: BSD-3-Clause + +import os.path as op + +import pytest +import warnings +from numpy.testing import assert_allclose +import numpy as np + +from mne.datasets import testing +from mne.io import read_info +from mne.io.kit.tests import data_dir as kit_data_dir +from mne.io.constants import FIFF +from mne.utils import get_config, catch_logging +from mne.channels import DigMontage +from mne.coreg import Coregistration +from mne.viz import _3d + + +data_path = testing.data_path(download=False) +raw_path = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') +fname_trans = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-trans.fif') +kit_raw_path = op.join(kit_data_dir, 'test_bin_raw.fif') +subjects_dir = op.join(data_path, 'subjects') +fid_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-fiducials.fif') +ctf_raw_path = op.join(data_path, 'CTF', 'catch-alp-good-f.ds') +nirx_15_0_raw_path = op.join(data_path, 'NIRx', 'nirscout', + 'nirx_15_0_recording', 'NIRS-2019-10-27_003.hdr') +nirsport2_raw_path = op.join(data_path, 'NIRx', 'nirsport_v2', 'aurora_2021_9', + '2021-10-01_002_config.hdr') +snirf_nirsport2_raw_path = op.join(data_path, 'SNIRF', 'NIRx', 'NIRSport2', + '1.0.3', '2021-05-05_001.snirf') + + +class TstVTKPicker(object): + """Class to test cell picking.""" + + def __init__(self, mesh, cell_id, event_pos): + self.mesh = mesh + self.cell_id = cell_id + self.point_id = None + self.event_pos = event_pos + + def GetCellId(self): + """Return the picked cell.""" + return self.cell_id + + def GetDataSet(self): + """Return the picked mesh.""" + return self.mesh + + def GetPickPosition(self): + """Return the picked position.""" + vtk_cell = self.mesh.GetCell(self.cell_id) + cell = [vtk_cell.GetPointId(point_id) for point_id + in range(vtk_cell.GetNumberOfPoints())] + self.point_id = cell[0] + return self.mesh.points[self.point_id] + + def GetEventPosition(self): + """Return event position.""" + return self.event_pos + + +@pytest.mark.slowtest +@testing.requires_testing_data +@pytest.mark.parametrize( + 'inst_path', (raw_path, 'gen_montage', ctf_raw_path, nirx_15_0_raw_path, + nirsport2_raw_path, snirf_nirsport2_raw_path)) +def test_coreg_gui_pyvista_file_support(inst_path, tmp_path, + renderer_interactive_pyvistaqt): + """Test reading supported files.""" + from mne.gui import coregistration + + if inst_path == 'gen_montage': + # generate a montage fig to use as inst. + tmp_info = read_info(raw_path) + eeg_chans = [] + for pt in tmp_info['dig']: + if pt['kind'] == FIFF.FIFFV_POINT_EEG: + eeg_chans.append(f"EEG {pt['ident']:03d}") + + dig = DigMontage(dig=tmp_info['dig'], + ch_names=eeg_chans) + inst_path = tmp_path / 'tmp-dig.fif' + dig.save(inst_path) + + # Suppressing warnings here is not ideal. + # However ctf_raw_path (catch-alp-good-f.ds) is poorly formed and causes + # mne.io.read_raw to issue warning. + # XXX consider replacing ctf_raw_path and removing warning ignore filter. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + coregistration(inst=inst_path, subject='sample', + subjects_dir=subjects_dir) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_coreg_gui_pyvista_basic(tmp_path, renderer_interactive_pyvistaqt, + monkeypatch): + """Test that using CoregistrationUI matches mne coreg.""" + from mne.gui import coregistration + from mne.gui._coreg import CoregistrationUI + config = get_config() + # the sample subject in testing has MRI fids + assert op.isfile(op.join( + subjects_dir, 'sample', 'bem', 'sample-fiducials.fif')) + + deprecated_params = [ + 'standalone', 'head_transparency', 'project_eeg' + ] + for param in deprecated_params: + kwargs = {p: None for p in deprecated_params} + kwargs[param] = True + with pytest.warns(DeprecationWarning, match=f'{param} is deprecated'): + coreg = CoregistrationUI( + info_file=None, subject='sample', subjects_dir=subjects_dir, + **kwargs) + coreg.close() + del kwargs + + deprecated_params = [ + 'project_eeg' + ] + for param in deprecated_params: + kwargs = {p: None for p in deprecated_params} + kwargs[param] = True + with pytest.warns(DeprecationWarning, match=f'{param} is deprecated'): + coreg = coregistration( + subject='sample', subjects_dir=subjects_dir, **kwargs) + coreg.close() + del kwargs + + coreg = coregistration(subject='sample', subjects_dir=subjects_dir, + trans=fname_trans) + assert coreg._lock_fids + coreg._reset_fiducials() + coreg.close() + + # make it always log the distances + monkeypatch.setattr(_3d.logger, 'info', _3d.logger.warning) + with catch_logging() as log: + coreg = coregistration(inst=raw_path, subject='sample', + head_high_res=False, # for speed + subjects_dir=subjects_dir, verbose='debug') + log = log.getvalue() + assert 'Total 16/78 points inside the surface' in log + coreg._set_fiducials_file(fid_fname) + assert coreg._fiducials_file == fid_fname + + # fitting (with scaling) + assert not coreg._mri_scale_modified + coreg._reset() + coreg._reset_fitting_parameters() + coreg._set_scale_mode("uniform") + coreg._fits_fiducials() + assert_allclose(coreg.coreg._scale, + np.array([97.46, 97.46, 97.46]) * 1e-2, + atol=1e-3) + shown_scale = [coreg._widgets[f's{x}'].get_value() for x in 'XYZ'] + assert_allclose(shown_scale, coreg.coreg._scale * 100, atol=1e-2) + coreg._set_icp_fid_match("nearest") + coreg._set_scale_mode("3-axis") + coreg._fits_icp() + assert_allclose(coreg.coreg._scale, + np.array([104.43, 101.47, 125.78]) * 1e-2, + atol=1e-3) + shown_scale = [coreg._widgets[f's{x}'].get_value() for x in 'XYZ'] + assert_allclose(shown_scale, coreg.coreg._scale * 100, atol=1e-2) + coreg._set_scale_mode("None") + coreg._set_icp_fid_match("matched") + assert coreg._mri_scale_modified + + # unlock fiducials + assert coreg._lock_fids + coreg._set_lock_fids(False) + assert not coreg._lock_fids + + # picking + assert not coreg._mri_fids_modified + vtk_picker = TstVTKPicker(coreg._surfaces['head'], 0, (0, 0)) + coreg._on_mouse_move(vtk_picker, None) + coreg._on_button_press(vtk_picker, None) + coreg._on_pick(vtk_picker, None) + coreg._on_button_release(vtk_picker, None) + coreg._on_pick(vtk_picker, None) # also pick when locked + assert coreg._mri_fids_modified + + # lock fiducials + coreg._set_lock_fids(True) + assert coreg._lock_fids + + # fitting (no scaling) + assert coreg._nasion_weight == 10. + coreg._set_point_weight(11., 'nasion') + assert coreg._nasion_weight == 11. + coreg._fit_fiducials() + with catch_logging() as log: + coreg._redraw() # actually emit the log + log = log.getvalue() + assert 'Total 6/78 points inside the surface' in log + with catch_logging() as log: + coreg._fit_icp() + coreg._redraw() + log = log.getvalue() + assert 'Total 38/78 points inside the surface' in log + assert coreg.coreg._extra_points_filter is None + coreg._omit_hsp() + with catch_logging() as log: + coreg._redraw() + log = log.getvalue() + assert 'Total 29/53 points inside the surface' in log + assert coreg.coreg._extra_points_filter is not None + coreg._reset_omit_hsp_filter() + with catch_logging() as log: + coreg._redraw() + log = log.getvalue() + assert 'Total 38/78 points inside the surface' in log + assert coreg.coreg._extra_points_filter is None + + assert coreg._grow_hair == 0 + coreg._fit_fiducials() # go back to few inside to start + with catch_logging() as log: + coreg._redraw() + log = log.getvalue() + assert 'Total 6/78 points inside the surface' in log + norm = np.linalg.norm(coreg._head_geo['rr']) # what's used for inside + assert_allclose(norm, 5.949288, atol=1e-3) + coreg._set_grow_hair(20.0) + with catch_logging() as log: + coreg._redraw() + assert coreg._grow_hair == 20.0 + norm = np.linalg.norm(coreg._head_geo['rr']) + assert_allclose(norm, 6.555220, atol=1e-3) # outward + log = log.getvalue() + assert 'Total 8/78 points inside the surface' in log # more outside now + + # visualization + assert not coreg._helmet + coreg._set_helmet(True) + assert coreg._helmet + assert coreg._orient_glyphs + assert coreg._scale_by_distance + assert coreg._mark_inside + assert_allclose( + coreg._head_opacity, + float(config.get('MNE_COREG_HEAD_OPACITY', '0.8'))) + assert coreg._hpi_coils + assert coreg._eeg_channels + assert coreg._head_shape_points + assert coreg._scale_mode == 'None' + assert coreg._icp_fid_match == 'matched' + assert coreg._head_resolution is False + + assert coreg._trans_modified + tmp_trans = tmp_path / 'tmp-trans.fif' + coreg._save_trans(tmp_trans) + assert not coreg._trans_modified + assert op.isfile(tmp_trans) + + # first, disable auto cleanup + coreg._renderer._window_close_disconnect(after=True) + # test _close_callback() + coreg.close() + coreg._widgets['close_dialog'].trigger('Discard') # do not save + coreg._clean() # finally, cleanup internal structures + + # Coregistration instance should survive + assert isinstance(coreg.coreg, Coregistration) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_coreg_gui_notebook(renderer_notebook, nbexec): + """Test the coregistration UI in a notebook.""" + import os + import mne + from mne.datasets import testing + from mne.gui import coregistration + mne.viz.set_3d_backend('notebook') # set the 3d backend + with mne.utils.modified_env(_MNE_FAKE_HOME_DIR=None): + data_path = testing.data_path(download=False) + subjects_dir = os.path.join(data_path, 'subjects') + coregistration(subject='sample', subjects_dir=subjects_dir) diff --git a/python/libs/mne/gui/tests/test_gui_api.py b/python/libs/mne/gui/tests/test_gui_api.py new file mode 100644 index 0000000..064d233 --- /dev/null +++ b/python/libs/mne/gui/tests/test_gui_api.py @@ -0,0 +1,365 @@ +# Authors: Guillaume Favelier +# +# License: Simplified BSD + +import sys +import pytest + +# This will skip all tests in this scope +pytestmark = pytest.mark.skipif( + sys.platform.startswith('win'), reason='nbexec does not work on Windows') + + +def test_gui_api(renderer_notebook, nbexec): + """Test GUI API.""" + import contextlib + import mne + import warnings + # nbexec does not expose renderer_notebook so I use a + # temporary variable to synchronize the tests + try: + assert mne.MNE_PYVISTAQT_BACKEND_TEST + except AttributeError: + mne.viz.set_3d_backend('notebook') + backend = 'notebook' + else: + backend = 'qt' + renderer = mne.viz.backends.renderer._get_renderer(size=(300, 300)) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + renderer._window_set_theme('/does/not/exist') + if backend == 'qt': + assert len(w) == 1 + assert 'not found' in str(w[0].message), str(w[0].message) + else: + assert len(w) == 0 + renderer._window_set_theme('dark') + + from unittest.mock import Mock + mock = Mock() + + @contextlib.contextmanager + def _check_widget_trigger(widget, mock, before, after, call_count=True, + get_value=True): + if get_value: + assert widget.get_value() == before + old_call_count = mock.call_count + try: + yield + finally: + if get_value: + assert widget.get_value() == after + if call_count: + assert mock.call_count == old_call_count + 1 + + # --- BEGIN: dock --- + renderer._dock_initialize(name='', area='left') + + # label (not interactive) + widget = renderer._dock_add_label( + value='', + align=False, + selectable=True, + ) + widget = renderer._dock_add_label( + value='', + align=True, + ) + widget.update() + # labels are disabled by default with the notebook backend + widget.set_enabled(False) + assert not widget.is_enabled() + widget.set_enabled(True) + assert widget.is_enabled() + + # ToolButton + widget = renderer._dock_add_button( + name='', + callback=mock, + style='toolbutton', + tooltip='button', + ) + with _check_widget_trigger(widget, mock, None, None, get_value=False): + widget.set_value(True) + + # PushButton + widget = renderer._dock_add_button( + name='', + callback=mock, + ) + with _check_widget_trigger(widget, mock, None, None, get_value=False): + widget.set_value(True) + + # slider + widget = renderer._dock_add_slider( + name='', + value=0, + rng=[0, 10], + callback=mock, + tooltip='slider', + ) + with _check_widget_trigger(widget, mock, 0, 5): + widget.set_value(5) + + # check box + widget = renderer._dock_add_check_box( + name='', + value=False, + callback=mock, + tooltip='check box', + ) + with _check_widget_trigger(widget, mock, False, True): + widget.set_value(True) + + # spin box + renderer._dock_add_spin_box( + name='', + value=0, + rng=[0, 1], + callback=mock, + step=0.1, + tooltip='spin box', + ) + widget = renderer._dock_add_spin_box( + name='', + value=0, + rng=[0, 1], + callback=mock, + step=None, + ) + with _check_widget_trigger(widget, mock, 0, 0.5): + widget.set_value(0.5) + + # combo box + widget = renderer._dock_add_combo_box( + name='', + value='foo', + rng=['foo', 'bar'], + callback=mock, + tooltip='combo box', + ) + with _check_widget_trigger(widget, mock, 'foo', 'bar'): + widget.set_value('bar') + + # radio buttons + widget = renderer._dock_add_radio_buttons( + value='foo', + rng=['foo', 'bar'], + callback=mock, + ) + with _check_widget_trigger(widget, mock, 'foo', 'bar', get_value=False): + widget.set_value(1, 'bar') + assert widget.get_value(0) == 'foo' + assert widget.get_value(1) == 'bar' + widget.set_enabled(False) + + # text field + widget = renderer._dock_add_text( + name='', + value='foo', + placeholder='', + callback=mock, + ) + with _check_widget_trigger(widget, mock, 'foo', 'bar'): + widget.set_value('bar') + widget.set_style(dict(border="2px solid #ff0000")) + + # file button + renderer._dock_add_file_button( + name='', + desc='', + func=mock, + is_directory=True, + tooltip='file button', + ) + renderer._dock_add_file_button( + name='', + desc='', + func=mock, + initial_directory='', + ) + renderer._dock_add_file_button( + name='', + desc='', + func=mock, + input_text_widget=False, + ) + widget = renderer._dock_add_file_button( + name='', + desc='', + func=mock, + save=True + ) + widget.set_value(0, 'foo') # modify the text field (not interactive) + assert widget.get_value(0) == 'foo' + # XXX: the internal file dialogs may hang without signals + # widget.set_value(1, 'bar') + widget.set_enabled(False) + + renderer._dock_initialize(name='', area='right') + renderer._dock_named_layout(name='') + for collapse in (None, True, False): + renderer._dock_add_group_box(name='', collapse=collapse) + renderer._dock_add_stretch() + renderer._dock_add_layout() + renderer._dock_finalize() + renderer._dock_hide() + renderer._dock_show() + # --- END: dock --- + + # --- BEGIN: tool bar --- + renderer._tool_bar_initialize( + name="default", + window=None, + ) + renderer._tool_bar_load_icons() + + # button + assert 'reset' not in renderer.actions + renderer._tool_bar_add_button( + name='reset', + desc='', + func=mock, + icon_name='help', + ) + assert 'reset' in renderer.actions + + # icon + renderer._tool_bar_update_button_icon( + name='reset', + icon_name='reset', + ) + + # text + renderer._tool_bar_add_text( + name='', + value='', + placeholder='', + ) + + # spacer + renderer._tool_bar_add_spacer() + + # file button + assert 'help' not in renderer.actions + renderer._tool_bar_add_file_button( + name='help', + desc='', + func=mock, + shortcut=None, + ) + assert 'help' in renderer.actions + + # play button + assert 'play' not in renderer.actions + renderer._tool_bar_add_play_button( + name='play', + desc='', + func=mock, + shortcut=None, + ) + assert 'play' in renderer.actions + + # theme + renderer._tool_bar_set_theme() + # --- END: tool bar --- + + # --- BEGIN: menu bar --- + renderer._menu_initialize() + + # submenu + renderer._menu_add_submenu(name='foo', desc='foo') + assert 'foo' in renderer._menus + assert 'foo' in renderer._menu_actions + + # button + renderer._menu_add_button( + menu_name='foo', + name='bar', + desc='bar', + func=mock, + ) + assert 'bar' in renderer._menu_actions['foo'] + with _check_widget_trigger(None, mock, '', '', get_value=False): + renderer._menu_actions['foo']['bar'].trigger() + + # --- END: menu bar --- + + # --- BEGIN: status bar --- + renderer._status_bar_initialize() + renderer._status_bar_update() + + # label + widget = renderer._status_bar_add_label(value='foo', stretch=0) + assert widget.get_value() == 'foo' + + # progress bar + widget = renderer._status_bar_add_progress_bar(stretch=0) + # by default, get_value() is -1 for Qt and 0 for Ipywidgets + widget.set_value(0) + assert widget.get_value() == 0 + # --- END: status bar --- + + # --- BEGIN: tooltips --- + widget = renderer._dock_add_button( + name='', + callback=mock, + tooltip='foo' + ) + assert widget.get_tooltip() == 'foo' + # Change it … + widget.set_tooltip('bar') + assert widget.get_tooltip() == 'bar' + # --- END: tooltips --- + + # --- BEGIN: dialog --- + # dialogs are not supported yet on notebook + if renderer._kind == 'qt': + # warning + buttons = ["Save", "Cancel"] + widget = renderer._dialog_warning( + title='', + text='', + info_text='', + callback=mock, + buttons=buttons, + modal=False, + ) + widget.show() + for button in buttons: + with _check_widget_trigger(None, mock, '', '', get_value=False): + widget.trigger(button=button) + assert mock.call_args.args == (button,) + + # buttons list empty means OK button (default) + button = 'Ok' + widget = renderer._dialog_warning( + title='', + text='', + info_text='', + callback=mock, + modal=False, + ) + widget.show() + with _check_widget_trigger(None, mock, '', '', get_value=False): + widget.trigger(button=button) + assert mock.call_args.args == (button,) + # --- END: dialog --- + + renderer.show() + + renderer._window_close_connect(lambda: mock('first'), after=False) + renderer._window_close_connect(lambda: mock('last')) + old_call_count = mock.call_count + renderer.close() + if renderer._kind == 'qt': + assert mock.call_count == old_call_count + 2 + assert mock.call_args_list[-1].args == ('last',) + assert mock.call_args_list[-2].args == ('first',) + + +def test_gui_api_qt(renderer_interactive_pyvistaqt): + """Test GUI API with the Qt backend.""" + import mne + mne.MNE_PYVISTAQT_BACKEND_TEST = True + test_gui_api(None, None) diff --git a/python/libs/mne/gui/tests/test_ieeg_locate_gui.py b/python/libs/mne/gui/tests/test_ieeg_locate_gui.py new file mode 100644 index 0000000..1776d45 --- /dev/null +++ b/python/libs/mne/gui/tests/test_ieeg_locate_gui.py @@ -0,0 +1,221 @@ +# Authors: Alex Rockhill +# +# License: BSD-3-clause + +import os.path as op +import numpy as np +from numpy.testing import assert_allclose + +import pytest + +import mne +from mne.datasets import testing +from mne.transforms import apply_trans +from mne.utils import (requires_nibabel, requires_version, catch_logging, + use_log_level) +from mne.viz.utils import _fake_click + +data_path = testing.data_path(download=False) +subject = 'sample' +subjects_dir = op.join(data_path, 'subjects') +sample_dir = op.join(data_path, 'MEG', subject) +raw_path = op.join(sample_dir, 'sample_audvis_trunc_raw.fif') +fname_trans = op.join(sample_dir, 'sample_audvis_trunc-trans.fif') + + +@requires_nibabel() +@pytest.fixture +def _fake_CT_coords(skull_size=5, contact_size=2): + """Make somewhat realistic CT data with contacts.""" + import nibabel as nib + brain = nib.load( + op.join(subjects_dir, subject, 'mri', 'brain.mgz')) + verts = mne.read_surface( + op.join(subjects_dir, subject, 'bem', 'outer_skull.surf'))[0] + verts = apply_trans(np.linalg.inv(brain.header.get_vox2ras_tkr()), verts) + x, y, z = np.array(brain.shape).astype(int) // 2 + coords = [(x, y - 14, z), (x - 10, y - 15, z), + (x - 20, y - 16, z + 1), (x - 30, y - 16, z + 1)] + center = np.array(brain.shape) / 2 + # make image + np.random.seed(99) + ct_data = np.random.random(brain.shape).astype(np.float32) * 100 + # make skull + for vert in verts: + x, y, z = np.round(vert).astype(int) + ct_data[slice(x - skull_size, x + skull_size + 1), + slice(y - skull_size, y + skull_size + 1), + slice(z - skull_size, z + skull_size + 1)] = 1000 + # add electrode with contacts + for (x, y, z) in coords: + # make sure not in skull + assert np.linalg.norm(center - np.array((x, y, z))) < 50 + ct_data[slice(x - contact_size, x + contact_size + 1), + slice(y - contact_size, y + contact_size + 1), + slice(z - contact_size, z + contact_size + 1)] = \ + 1000 - np.linalg.norm(np.array(np.meshgrid( + *[range(-contact_size, contact_size + 1)] * 3)), axis=0) + ct = nib.MGHImage(ct_data, brain.affine) + coords = apply_trans(ct.header.get_vox2ras_tkr(), np.array(coords)) + return ct, coords + + +@requires_nibabel() +@pytest.fixture +def _locate_ieeg(renderer_interactive_pyvistaqt): + # Use a fixture to create these classes so we can ensure that they + # are closed at the end of the test + guis = list() + + def fun(*args, **kwargs): + guis.append(mne.gui.locate_ieeg(*args, **kwargs)) + return guis[-1] + + yield fun + + for gui in guis: + try: + gui.close() + except Exception: + pass + + +def test_ieeg_elec_locate_gui_io(_locate_ieeg): + """Test the input/output of the intracranial location GUI.""" + import nibabel as nib + info = mne.create_info([], 1000) + aligned_ct = nib.MGHImage(np.zeros((256, 256, 256), dtype=np.float32), + np.eye(4)) + trans = mne.transforms.Transform('head', 'mri') + with pytest.raises(ValueError, + match='No channels found in `info` to locate'): + _locate_ieeg(info, aligned_ct, subject, subjects_dir) + info = mne.create_info(['test'], 1000, ['seeg']) + with pytest.raises(ValueError, match='CT is not aligned to MRI'): + _locate_ieeg(info, trans, aligned_ct, subject=subject, + subjects_dir=subjects_dir) + + +@requires_version('sphinx_gallery') +@testing.requires_testing_data +def test_locate_scraper(_locate_ieeg, _fake_CT_coords, tmp_path): + """Test sphinx-gallery scraping of the GUI.""" + raw = mne.io.read_raw_fif(raw_path) + raw.pick_types(eeg=True) + ch_dict = {'EEG 001': 'LAMY 1', 'EEG 002': 'LAMY 2', + 'EEG 003': 'LSTN 1', 'EEG 004': 'LSTN 2'} + raw.pick_channels(list(ch_dict.keys())) + raw.rename_channels(ch_dict) + raw.set_montage(None) + aligned_ct, _ = _fake_CT_coords + trans = mne.read_trans(fname_trans) + with pytest.warns(RuntimeWarning, match='`pial` surface not found'): + gui = _locate_ieeg(raw.info, trans, aligned_ct, + subject=subject, subjects_dir=subjects_dir) + (tmp_path / '_images').mkdir() + image_path = str(tmp_path / '_images' / 'temp.png') + gallery_conf = dict(builder_name='html', src_dir=str(tmp_path)) + block_vars = dict( + example_globals=dict(gui=gui), + image_path_iterator=iter([image_path])) + assert not op.isfile(image_path) + assert not getattr(gui, '_scraped', False) + mne.gui._LocateScraper()(None, block_vars, gallery_conf) + assert op.isfile(image_path) + assert gui._scraped + + +@testing.requires_testing_data +def test_ieeg_elec_locate_gui_display(_locate_ieeg, _fake_CT_coords): + """Test that the intracranial location GUI displays properly.""" + raw = mne.io.read_raw_fif(raw_path, preload=True) + raw.pick_types(eeg=True) + ch_dict = {'EEG 001': 'LAMY 1', 'EEG 002': 'LAMY 2', + 'EEG 003': 'LSTN 1', 'EEG 004': 'LSTN 2'} + raw.pick_channels(list(ch_dict.keys())) + raw.rename_channels(ch_dict) + raw.set_eeg_reference('average') + raw.set_channel_types({name: 'seeg' for name in raw.ch_names}) + raw.set_montage(None) + aligned_ct, coords = _fake_CT_coords + trans = mne.read_trans(fname_trans) + + # test no seghead, fsaverage doesn't have seghead + with pytest.warns(RuntimeWarning, match='`seghead` not found'): + with catch_logging() as log: + _locate_ieeg(raw.info, trans, aligned_ct, subject='fsaverage', + subjects_dir=subjects_dir, verbose=True) + log = log.getvalue() + assert 'Using marching cubes' in log + + # test functions + with pytest.warns(RuntimeWarning, match='`pial` surface not found'): + gui = _locate_ieeg(raw.info, trans, aligned_ct, + subject=subject, subjects_dir=subjects_dir, + verbose=True) + + with pytest.raises(ValueError, match='read-only'): + gui._ras[:] = coords[0] # start in the right position + gui._set_ras(coords[0]) + gui._mark_ch() + assert not gui._lines and not gui._lines_2D # no lines for one contact + for ci, coord in enumerate(coords[1:], 1): + coord_vox = apply_trans(gui._ras_vox_t, coord) + with use_log_level('debug'): + _fake_click(gui._figs[2], gui._figs[2].axes[0], + coord_vox[:-1], xform='data', kind='release') + assert_allclose(coord[:2], gui._ras[:2], atol=0.1, + err_msg=f'coords[{ci}][:2]') + assert_allclose(coord[2], gui._ras[2], atol=2, + err_msg=f'coords[{ci}][2]') + gui._mark_ch() + + # ensure a 3D line was made for each group + assert len(gui._lines) == 2 + + # test snap to center + gui._ch_index = 0 + gui._set_ras(coords[0]) # move to first position + gui._mark_ch() + assert_allclose(coords[0], gui._chs['LAMY 1'], atol=0.2) + gui._snap_button.click() + assert gui._snap_button.text() == 'Off' + # now make sure no snap happens + gui._ch_index = 0 + gui._set_ras(coords[1] + 1) + gui._mark_ch() + assert_allclose(coords[1] + 1, gui._chs['LAMY 1'], atol=0.01) + # check that it turns back on + gui._snap_button.click() + assert gui._snap_button.text() == 'On' + + # test remove + gui._ch_index = 1 + gui._update_ch_selection() + gui._remove_ch() + assert np.isnan(gui._chs['LAMY 2']).all() + + # check that raw object saved + assert not np.isnan(raw.info['chs'][0]['loc'][:3]).any() # LAMY 1 + assert np.isnan(raw.info['chs'][1]['loc'][:3]).all() # LAMY 2 (removed) + + # move sliders + gui._alpha_slider.setValue(75) + assert gui._ch_alpha == 0.75 + gui._radius_slider.setValue(5) + assert gui._radius == 5 + ct_sum_before = np.nansum(gui._images['ct'][0].get_array().data) + gui._ct_min_slider.setValue(500) + assert np.nansum(gui._images['ct'][0].get_array().data) < ct_sum_before + + # test buttons + gui._toggle_show_brain() + assert 'mri' in gui._images + assert 'local_max' not in gui._images + gui._toggle_show_max() + assert 'local_max' in gui._images + assert 'mip' not in gui._images + gui._toggle_show_mip() + assert 'mip' in gui._images + assert 'mip_chs' in gui._images + assert len(gui._lines_2D) == 1 # LAMY only has one contact diff --git a/python/libs/mne/html/d3.v3.min.js b/python/libs/mne/html/d3.v3.min.js new file mode 100644 index 0000000..eed58e6 --- /dev/null +++ b/python/libs/mne/html/d3.v3.min.js @@ -0,0 +1,5 @@ +!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++ue;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o=0,a=0,c=0;if(r=/([a-z]+)\((.*)\)/i.exec(n))switch(u=r[2].split(","),r[1]){case"hsl":return e(parseFloat(u[0]),parseFloat(u[1])/100,parseFloat(u[2])/100);case"rgb":return t(Mt(u[0]),Mt(u[1]),Mt(u[2]))}return(i=Va.get(n))?t(i.r,i.g,i.b):(null!=n&&"#"===n.charAt(0)&&(4===n.length?(o=n.charAt(1),o+=o,a=n.charAt(2),a+=a,c=n.charAt(3),c+=c):7===n.length&&(o=n.substring(1,3),a=n.substring(3,5),c=n.substring(5,7)),o=parseInt(o,16),a=parseInt(a,16),c=parseInt(c,16)),t(o,a,c))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t8?function(n){return n/e}:function(n){return n*e},symbol:n}}function zt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function Tt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=Tt;var r=new Tt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=Tt;var r=new Tt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++aa;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=Tt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=Tt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++ea;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=oa(_)>Sa,w=p*x;if(hc.add(Math.atan2(w*Math.sin(_),v*M+w*Math.cos(_))),i+=b?_+(_>=0?ka:-ka):_,b^h>=e^m>=e){var S=fe(se(f),se(n));pe(S);var k=fe(u,S);pe(k);var E=(b^_>=0?-1:1)*X(k[2]);(r>E||r===E&&(S[0]||S[1]))&&(o+=b^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function ze(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function Te(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);he(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(he(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return he(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)0?0:3:oa(r[0]-e)0?2:1:oa(r[1]-t)0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function He(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),zc>t&&(zc=t),t>Tc&&(Tc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)i||oa((y*L+x*z)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b +},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function zr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];zr(n);for(var c=i;c.circle&&oa(e-c.circle.x)l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function Tr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)Aa?{x:f,y:oa(t-f)Aa?{x:oa(e-p)Aa?{x:h,y:oa(t-h)Aa?{x:oa(e-g)=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.yd||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.yr||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.yg){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.xr;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=zu(t,e),i=qu(Tu(e,t,-u))||0;t[0]*e[1]180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++ie;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.ro;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++iu&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function Ti(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():Ti(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=Ti(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=Ti(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++0;h--)o.push(i(s)*h);for(s=0;o[s]c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++ae?[0/0,0/0]:[e>0?u[e-1]:n[0],et?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++ue?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=Ti(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.2"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u=e);)e=void 0;for(;++ur&&(e=r)}else{for(;++u=e);)e=void 0;for(;++ur&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u=e);)e=void 0;for(;++ue&&(e=r)}else{for(;++u=e);)e=void 0;for(;++ue&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i=e);)e=u=void 0;for(;++ir&&(e=r),r>u&&(u=r))}else{for(;++i=e);)e=void 0;for(;++ir&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)r;){var i=r+u>>>1;er?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=function(n,t){return Sizzle.uniqueSort(Sizzle(n,t))},va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++ur){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++rr;++r)p[r]=z(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++oi;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=T.apply(this,arguments);for(var t=-1,e=this.length;++tn;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++ar){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,za=Math.SQRT2,qa=2,Ta=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(za*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(za*t+v)]}return[r+n*s,u+n*l,i*Math.exp(za*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+Ta*f)/(2*i*qa*h),p=(c*c-i*i-Ta*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/za;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=z.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=z.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=z.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=z.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",z=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=z.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,z,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;Tt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:zt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++rn?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:nhc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h +}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,ze,Te,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(He)}).raw=He,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,zc,qc,Tc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=Tc=-(Lc=zc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,zc],[qc,Tc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.xm&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++ea*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++at;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++fg;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i0)for(i=-1;++i=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++ut?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++oe&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++ie.dx)&&(l=e.dx);++ie&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[]) +},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(zo(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=To,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++oi;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=zs,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":zs,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var zs="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return Ts[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),z.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),z=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?z.on("touchmove.brush",v).on("touchend.brush",y):z.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),T=+/^n/.test(k);M=[l[1-q]-L[0],f[1-T]-L[1]],L[0]=l[q],L[1]=f[T]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var Ts={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(+n,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}(); \ No newline at end of file diff --git a/python/libs/mne/html/mpld3.v0.2.min.js b/python/libs/mne/html/mpld3.v0.2.min.js new file mode 100644 index 0000000..adefb15 --- /dev/null +++ b/python/libs/mne/html/mpld3.v0.2.min.js @@ -0,0 +1,2 @@ +!function(t){function s(t){var s={};for(var o in t)s[o]=t[o];return s}function o(t,s){t="undefined"!=typeof t?t:10,s="undefined"!=typeof s?s:"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";for(var o=s.charAt(Math.round(Math.random()*(s.length-11))),e=1;t>e;e++)o+=s.charAt(Math.round(Math.random()*(s.length-1)));return o}function e(s,o){var e=t.interpolate([s[0].valueOf(),s[1].valueOf()],[o[0].valueOf(),o[1].valueOf()]);return function(t){var s=e(t);return[new Date(s[0]),new Date(s[1])]}}function i(t){return"undefined"==typeof t}function r(t){return null==t||i(t)}function n(t,s){return t.length>0?t[s%t.length]:null}function a(){function s(s,n){var a=t.functor(o),p=t.functor(e),h=[],l=[],c=0,d=-1,u=0,f=!1;if(!n){n=["M"];for(var y=1;yc;)i.call(this,s[c],c)?(h.push(a.call(this,s[c],c),p.call(this,s[c],c)),c++):(h=null,c=u);h?f&&h.length>0?(l.push("M",h[0],h[1]),f=!1):(l.push(n[d]),l=l.concat(h)):f=!0}return c!=s.length&&console.warn("Warning: not all vertices used in Path"),l.join(" ")}var o=function(t){return t[0]},e=function(t){return t[1]},i=function(){return!0},r={M:1,m:1,L:1,l:1,Q:2,q:2,T:1,t:1,S:2,s:2,C:3,c:3,Z:0,z:0};return s.x=function(t){return arguments.length?(o=t,s):o},s.y=function(t){return arguments.length?(e=t,s):e},s.defined=function(t){return arguments.length?(i=t,s):i},s.call=s,s}function p(){function t(t){return s.forEach(function(s){t=s(t)}),t}var s=Array.prototype.slice.call(arguments,0),o=s.length;return t.domain=function(o){return arguments.length?(s[0].domain(o),t):s[0].domain()},t.range=function(e){return arguments.length?(s[o-1].range(e),t):s[o-1].range()},t.step=function(t){return s[t]},t}function h(t,s){if(O.call(this,t,s),this.cssclass="mpld3-"+this.props.xy+"grid","x"==this.props.xy)this.transform="translate(0,"+this.ax.height+")",this.position="bottom",this.scale=this.ax.xdom,this.tickSize=-this.ax.height;else{if("y"!=this.props.xy)throw"unrecognized grid xy specifier: should be 'x' or 'y'";this.transform="translate(0,0)",this.position="left",this.scale=this.ax.ydom,this.tickSize=-this.ax.width}}function l(t,s){O.call(this,t,s);var o={bottom:[0,this.ax.height],top:[0,0],left:[0,0],right:[this.ax.width,0]},e={bottom:"x",top:"x",left:"y",right:"y"};this.transform="translate("+o[this.props.position]+")",this.props.xy=e[this.props.position],this.cssclass="mpld3-"+this.props.xy+"axis",this.scale=this.ax[this.props.xy+"dom"]}function c(t,s){if("undefined"==typeof s){if(this.ax=null,this.fig=null,"display"!==this.trans)throw"ax must be defined if transform != 'display'"}else this.ax=s,this.fig=s.fig;if(this.zoomable="data"===t,this.x=this["x_"+t],this.y=this["y_"+t],"undefined"==typeof this.x||"undefined"==typeof this.y)throw"unrecognized coordinate code: "+t}function d(t,s){O.call(this,t,s),this.data=t.fig.get_data(this.props.data),this.pathcodes=this.props.pathcodes,this.pathcoords=new c(this.props.coordinates,this.ax),this.offsetcoords=new c(this.props.offsetcoordinates,this.ax),this.datafunc=a()}function u(t,s){O.call(this,t,s),(null==this.props.facecolors||0==this.props.facecolors.length)&&(this.props.facecolors=["none"]),(null==this.props.edgecolors||0==this.props.edgecolors.length)&&(this.props.edgecolors=["none"]);var o=this.ax.fig.get_data(this.props.offsets);(null===o||0===o.length)&&(o=[null]);var e=Math.max(this.props.paths.length,o.length);if(o.length===e)this.offsets=o;else{this.offsets=[];for(var i=0;e>i;i++)this.offsets.push(n(o,i))}this.pathcoords=new c(this.props.pathcoordinates,this.ax),this.offsetcoords=new c(this.props.offsetcoordinates,this.ax)}function f(s,o){O.call(this,s,o);var e=this.props;e.facecolor="none",e.edgecolor=e.color,delete e.color,e.edgewidth=e.linewidth,delete e.linewidth,this.defaultProps=d.prototype.defaultProps,d.call(this,s,e),this.datafunc=t.svg.line().interpolate("linear")}function y(s,o){O.call(this,s,o),this.marker=null!==this.props.markerpath?0==this.props.markerpath[0].length?null:F.path().call(this.props.markerpath[0],this.props.markerpath[1]):null===this.props.markername?null:t.svg.symbol(this.props.markername).size(Math.pow(this.props.markersize,2))();var e={paths:[this.props.markerpath],offsets:s.fig.get_data(this.props.data),xindex:this.props.xindex,yindex:this.props.yindex,offsetcoordinates:this.props.coordinates,edgecolors:[this.props.edgecolor],edgewidths:[this.props.edgewidth],facecolors:[this.props.facecolor],alphas:[this.props.alpha],zorder:this.props.zorder,id:this.props.id};this.requiredProps=u.prototype.requiredProps,this.defaultProps=u.prototype.defaultProps,u.call(this,s,e)}function g(t,s){O.call(this,t,s),this.coords=new c(this.props.coordinates,this.ax)}function m(t,s){O.call(this,t,s),this.text=this.props.text,this.position=this.props.position,this.coords=new c(this.props.coordinates,this.ax)}function x(s,o){function e(t){return new Date(t[0],t[1],t[2],t[3],t[4],t[5])}function i(t,s){return"date"!==t?s:[e(s[0]),e(s[1])]}function r(s,o,e){var i="date"===s?t.time.scale():"log"===s?t.scale.log():t.scale.linear();return i.domain(o).range(e)}O.call(this,s,o),this.axnum=this.fig.axes.length,this.axid=this.fig.figid+"_ax"+(this.axnum+1),this.clipid=this.axid+"_clip",this.props.xdomain=this.props.xdomain||this.props.xlim,this.props.ydomain=this.props.ydomain||this.props.ylim,this.sharex=[],this.sharey=[],this.elements=[];var n=this.props.bbox;this.position=[n[0]*this.fig.width,(1-n[1]-n[3])*this.fig.height],this.width=n[2]*this.fig.width,this.height=n[3]*this.fig.height,this.props.xdomain=i(this.props.xscale,this.props.xdomain),this.props.ydomain=i(this.props.yscale,this.props.ydomain),this.x=this.xdom=r(this.props.xscale,this.props.xdomain,[0,this.width]),this.y=this.ydom=r(this.props.yscale,this.props.ydomain,[this.height,0]),"date"===this.props.xscale&&(this.x=F.multiscale(t.scale.linear().domain(this.props.xlim).range(this.props.xdomain.map(Number)),this.xdom)),"date"===this.props.yscale&&(this.x=F.multiscale(t.scale.linear().domain(this.props.ylim).range(this.props.ydomain.map(Number)),this.ydom));for(var a=this.props.axes,p=0;p0&&this.buttons.forEach(function(t){t.actions.filter(s).length>0&&t.deactivate()})},F.Button=v,v.prototype=Object.create(O.prototype),v.prototype.constructor=v,v.prototype.setState=function(t){t?this.activate():this.deactivate()},v.prototype.click=function(){this.active?this.deactivate():this.activate()},v.prototype.activate=function(){this.toolbar.deactivate_by_action(this.actions),this.onActivate(),this.active=!0,this.toolbar.toolbar.select("."+this.cssclass).classed({pressed:!0}),this.sticky||this.deactivate()},v.prototype.deactivate=function(){this.onDeactivate(),this.active=!1,this.toolbar.toolbar.select("."+this.cssclass).classed({pressed:!1})},v.prototype.sticky=!1,v.prototype.actions=[],v.prototype.icon=function(){return""},v.prototype.onActivate=function(){},v.prototype.onDeactivate=function(){},v.prototype.onDraw=function(){},F.ButtonFactory=function(t){function s(t){v.call(this,t,this.buttonID)}if("string"!=typeof t.buttonID)throw"ButtonFactory: buttonID must be present and be a string";s.prototype=Object.create(v.prototype),s.prototype.constructor=s;for(var o in t)s.prototype[o]=t[o];return s},F.Plugin=A,A.prototype=Object.create(O.prototype),A.prototype.constructor=A,A.prototype.requiredProps=[],A.prototype.defaultProps={},A.prototype.draw=function(){},F.ResetPlugin=z,F.register_plugin("reset",z),z.prototype=Object.create(A.prototype),z.prototype.constructor=z,z.prototype.requiredProps=[],z.prototype.defaultProps={},F.ZoomPlugin=w,F.register_plugin("zoom",w),w.prototype=Object.create(A.prototype),w.prototype.constructor=w,w.prototype.requiredProps=[],w.prototype.defaultProps={button:!0,enabled:null},w.prototype.activate=function(){this.fig.enable_zoom()},w.prototype.deactivate=function(){this.fig.disable_zoom()},w.prototype.draw=function(){this.props.enabled?this.fig.enable_zoom():this.fig.disable_zoom()},F.BoxZoomPlugin=_,F.register_plugin("boxzoom",_),_.prototype=Object.create(A.prototype),_.prototype.constructor=_,_.prototype.requiredProps=[],_.prototype.defaultProps={button:!0,enabled:null},_.prototype.activate=function(){this.enable&&this.enable()},_.prototype.deactivate=function(){this.disable&&this.disable()},_.prototype.draw=function(){function t(t){if(this.enabled){var o=s.extent();s.empty()||t.set_axlim([o[0][0],o[1][0]],[o[0][1],o[1][1]])}t.axes.call(s.clear())}F.insert_css("#"+this.fig.figid+" rect.extent."+this.extentClass,{fill:"#fff","fill-opacity":0,stroke:"#999"});var s=this.fig.getBrush();this.enable=function(){this.fig.showBrush(this.extentClass),s.on("brushend",t.bind(this)),this.enabled=!0},this.disable=function(){this.fig.hideBrush(this.extentClass),this.enabled=!1},this.toggle=function(){this.enabled?this.disable():this.enable()},this.disable()},F.TooltipPlugin=k,F.register_plugin("tooltip",k),k.prototype=Object.create(A.prototype),k.prototype.constructor=k,k.prototype.requiredProps=["id"],k.prototype.defaultProps={labels:null,hoffset:0,voffset:10,location:"mouse"},k.prototype.draw=function(){function s(t,s){this.tooltip.style("visibility","visible").text(null===r?"("+t+")":n(r,s))}function o(){if("mouse"===a){var s=t.mouse(this.fig.canvas.node());this.x=s[0]+this.props.hoffset,this.y=s[1]-this.props.voffset}this.tooltip.attr("x",this.x).attr("y",this.y)}function e(){this.tooltip.style("visibility","hidden")}var i=F.get_element(this.props.id,this.fig),r=this.props.labels,a=this.props.location;this.tooltip=this.fig.canvas.append("text").attr("class","mpld3-tooltip-text").attr("x",0).attr("y",0).text("").style("visibility","hidden"),"bottom left"==a||"top left"==a?(this.x=i.ax.position[0]+5+this.props.hoffset,this.tooltip.style("text-anchor","beginning")):"bottom right"==a||"top right"==a?(this.x=i.ax.position[0]+i.ax.width-5+this.props.hoffset,this.tooltip.style("text-anchor","end")):this.tooltip.style("text-anchor","middle"),"bottom left"==a||"bottom right"==a?this.y=i.ax.position[1]+i.ax.height-5+this.props.voffset:("top left"==a||"top right"==a)&&(this.y=i.ax.position[1]+5+this.props.voffset),i.elements().on("mouseover",s.bind(this)).on("mousemove",o.bind(this)).on("mouseout",e.bind(this))},F.LinkedBrushPlugin=P,F.register_plugin("linkedbrush",P),P.prototype=Object.create(F.Plugin.prototype),P.prototype.constructor=P,P.prototype.requiredProps=["id"],P.prototype.defaultProps={button:!0,enabled:null},P.prototype.activate=function(){this.enable&&this.enable()},P.prototype.deactivate=function(){this.disable&&this.disable()},P.prototype.draw=function(){function s(s){l!=this&&(t.select(l).call(p.clear()),l=this,p.x(s.xdom).y(s.ydom))}function o(t){var s=h[t.axnum];if(s.length>0){var o=s[0].props.xindex,e=s[0].props.yindex,i=p.extent();p.empty()?c.selectAll("path").classed("mpld3-hidden",!1):c.selectAll("path").classed("mpld3-hidden",function(t){return i[0][0]>t[o]||i[1][0]t[e]||i[1][1]1?s[1]:""},"object"==typeof module&&module.exports?module.exports=F:this.mpld3=F,console.log("Loaded mpld3 version "+F.version)}(d3); \ No newline at end of file diff --git a/python/libs/mne/html_templates/__init__.py b/python/libs/mne/html_templates/__init__.py new file mode 100644 index 0000000..24cbd45 --- /dev/null +++ b/python/libs/mne/html_templates/__init__.py @@ -0,0 +1,3 @@ +"""Jinja2 HTML templates.""" + +from ._templates import repr_templates_env, report_templates_env diff --git a/python/libs/mne/html_templates/_templates.py b/python/libs/mne/html_templates/_templates.py new file mode 100644 index 0000000..28fd936 --- /dev/null +++ b/python/libs/mne/html_templates/_templates.py @@ -0,0 +1,25 @@ +import jinja2 + +autoescape = jinja2.select_autoescape( + default=True, + default_for_string=True +) + +# For _html_repr_() +repr_templates_env = jinja2.Environment( + loader=jinja2.PackageLoader( + package_name='mne.html_templates', + package_path='repr' + ), + autoescape=autoescape +) + +# For mne.Report +report_templates_env = jinja2.Environment( + loader=jinja2.PackageLoader( + package_name='mne.html_templates', + package_path='report' + ), + autoescape=autoescape +) +report_templates_env.filters['zip'] = zip diff --git a/python/libs/mne/html_templates/report/code.html.jinja b/python/libs/mne/html_templates/report/code.html.jinja new file mode 100644 index 0000000..4550d85 --- /dev/null +++ b/python/libs/mne/html_templates/report/code.html.jinja @@ -0,0 +1,24 @@ +
+
+ +
+ +
+
+
+        {{ code }}
+      
+
+
+
diff --git a/python/libs/mne/html_templates/report/cov.html.jinja b/python/libs/mne/html_templates/report/cov.html.jinja new file mode 100644 index 0000000..02d3661 --- /dev/null +++ b/python/libs/mne/html_templates/report/cov.html.jinja @@ -0,0 +1,23 @@ +
+
+ +
+ +
+
+ {{ matrix | safe }} + {{ svd | safe }} +
+
+
diff --git a/python/libs/mne/html_templates/report/epochs.html.jinja b/python/libs/mne/html_templates/report/epochs.html.jinja new file mode 100644 index 0000000..19c640c --- /dev/null +++ b/python/libs/mne/html_templates/report/epochs.html.jinja @@ -0,0 +1,27 @@ +
+
+ +
+ +
+
+ {{ repr | safe }} + {{ drop_log | safe }} + {{ metadata | safe }} + {{ erp_imgs | safe }} + {{ psd | safe }} + {{ ssp_projs | safe }} +
+
+
diff --git a/python/libs/mne/html_templates/report/evoked.html.jinja b/python/libs/mne/html_templates/report/evoked.html.jinja new file mode 100644 index 0000000..bd5da6c --- /dev/null +++ b/python/libs/mne/html_templates/report/evoked.html.jinja @@ -0,0 +1,26 @@ +
+
+ +
+ +
+
+ {{joint | safe}} + {{slider | safe}} + {{gfp | safe}} + {{whitened | safe}} + {{ssp_projs | safe}} +
+
+
diff --git a/python/libs/mne/html_templates/report/footer.html.jinja b/python/libs/mne/html_templates/report/footer.html.jinja new file mode 100644 index 0000000..9733155 --- /dev/null +++ b/python/libs/mne/html_templates/report/footer.html.jinja @@ -0,0 +1,10 @@ + + +
+ +
+ + + diff --git a/python/libs/mne/html_templates/report/forward.html.jinja b/python/libs/mne/html_templates/report/forward.html.jinja new file mode 100644 index 0000000..687c4eb --- /dev/null +++ b/python/libs/mne/html_templates/report/forward.html.jinja @@ -0,0 +1,23 @@ +
+
+ +
+ +
+
+ {{repr | safe}} + {{sensitivity_maps | safe }} +
+
+
diff --git a/python/libs/mne/html_templates/report/header.html.jinja b/python/libs/mne/html_templates/report/header.html.jinja new file mode 100644 index 0000000..692dbab --- /dev/null +++ b/python/libs/mne/html_templates/report/header.html.jinja @@ -0,0 +1,56 @@ + + + + + + {{include | safe }} + + + + + {{ title }} + + + + diff --git a/python/libs/mne/html_templates/report/html.html.jinja b/python/libs/mne/html_templates/report/html.html.jinja new file mode 100644 index 0000000..9592944 --- /dev/null +++ b/python/libs/mne/html_templates/report/html.html.jinja @@ -0,0 +1,23 @@ +
+
+ +
+ + +
+
+ {{ html | safe }} +
+
+
diff --git a/python/libs/mne/html_templates/report/ica.html.jinja b/python/libs/mne/html_templates/report/ica.html.jinja new file mode 100644 index 0000000..988552f --- /dev/null +++ b/python/libs/mne/html_templates/report/ica.html.jinja @@ -0,0 +1,29 @@ +
+
+ +
+ +
+
+ {{repr | safe}} + {{overlay | safe}} + {{ecg_scores | safe}} + {{ecg | safe}} + {{eog_scores | safe}} + {{eog | safe}} + {{topographies | safe}} + {{properties | safe}} +
+
+
diff --git a/python/libs/mne/html_templates/report/image.html.jinja b/python/libs/mne/html_templates/report/image.html.jinja new file mode 100644 index 0000000..be8fe0b --- /dev/null +++ b/python/libs/mne/html_templates/report/image.html.jinja @@ -0,0 +1,36 @@ +
+
+ +
+ +
+
+
+ {% if image_format == 'svg' %} +
+ {{ img }} +
+ {% else %} + {{ title }} + {% endif %} + + {% if caption is not none %} +
{{ caption }}
+ {% endif %} +
+
+
+
diff --git a/python/libs/mne/html_templates/report/inverse.html.jinja b/python/libs/mne/html_templates/report/inverse.html.jinja new file mode 100644 index 0000000..12f6ba5 --- /dev/null +++ b/python/libs/mne/html_templates/report/inverse.html.jinja @@ -0,0 +1,23 @@ +
+
+ +
+ +
+
+ {{repr | safe}} + {{source_space | safe}} +
+
+
diff --git a/python/libs/mne/html_templates/report/raw.html.jinja b/python/libs/mne/html_templates/report/raw.html.jinja new file mode 100644 index 0000000..719069b --- /dev/null +++ b/python/libs/mne/html_templates/report/raw.html.jinja @@ -0,0 +1,25 @@ +
+
+ +
+ +
+
+ {{ repr| safe }} + {{ psd | safe }} + {{ butterfly | safe }} + {{ ssp_projs | safe }} +
+
+
diff --git a/python/libs/mne/html_templates/report/slider.html.jinja b/python/libs/mne/html_templates/report/slider.html.jinja new file mode 100644 index 0000000..24227bf --- /dev/null +++ b/python/libs/mne/html_templates/report/slider.html.jinja @@ -0,0 +1,60 @@ +
+
+ +
+ +
+
+
+ + +
+ + +
+
+
diff --git a/python/libs/mne/html_templates/report/toc.html.jinja b/python/libs/mne/html_templates/report/toc.html.jinja new file mode 100644 index 0000000..1396a97 --- /dev/null +++ b/python/libs/mne/html_templates/report/toc.html.jinja @@ -0,0 +1,12 @@ +
+
+
+
Table of contents
+ +
+
diff --git a/python/libs/mne/html_templates/repr/epochs.html.jinja b/python/libs/mne/html_templates/repr/epochs.html.jinja new file mode 100644 index 0000000..eed6d67 --- /dev/null +++ b/python/libs/mne/html_templates/repr/epochs.html.jinja @@ -0,0 +1,22 @@ + + + + + + + + {% if events is not none %} + + {% else %} + + {% endif %} + + + + + + + + + +
Number of events{{ epochs.events|length }}
Events{{ events|join('
') | safe }}
Not available
Time range{{ '%.3f'|format(epochs.tmin) }} – {{ '%.3f'|format(epochs.tmax) }} sec
Baseline{{ baseline }}
diff --git a/python/libs/mne/html_templates/repr/forward.html.jinja b/python/libs/mne/html_templates/repr/forward.html.jinja new file mode 100644 index 0000000..f7294cf --- /dev/null +++ b/python/libs/mne/html_templates/repr/forward.html.jinja @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + +
Good channels{{ good_channels }}
Bad channels{{ bad_channels }}
Source space{{ source_space_descr }}
Source orientation{{ source_orientation }}
diff --git a/python/libs/mne/html_templates/repr/ica.html.jinja b/python/libs/mne/html_templates/repr/ica.html.jinja new file mode 100644 index 0000000..0fb0e05 --- /dev/null +++ b/python/libs/mne/html_templates/repr/ica.html.jinja @@ -0,0 +1,32 @@ + + + + + + + + + + {% if fit_on %} + + + + + + + + + + + + + + + + + + + + + {% endif %} +
Method{{ method }}
Fit{% if fit_on %}{{ n_iter }} iterations on {{ fit_on }} ({{ n_samples }} samples){% else %}no{% endif %}
ICA components{{ n_components }}
Explained variance{{ (explained_variance * 100) | round(1) }} %
Available PCA components{{ n_pca_components }}
Channel types{{ ch_types|join(', ') }}
ICA components marked for exclusion{% if excludes %}{{ excludes|join('
' | safe) }}{% else %}—{% endif %}
diff --git a/python/libs/mne/html_templates/repr/info.html.jinja b/python/libs/mne/html_templates/repr/info.html.jinja new file mode 100644 index 0000000..ec01af7 --- /dev/null +++ b/python/libs/mne/html_templates/repr/info.html.jinja @@ -0,0 +1,74 @@ + + + + {% if meas_date is not none %} + + {% else %} + + {% endif %} + + + + {% if experimenter is not none %} + + {% else %} + + {% endif %} + + + {% if subject_info is not none %} + {% if 'his_id' in subject_info.keys() %} + + {% endif %} + {% else %} + + {% endif %} + + + + {% if dig is not none %} + + {% else %} + + {% endif %} + + + + + + + + + + + + + + + + + {% if sfreq is not none %} + + + + + {% endif %} + {% if highpass is not none %} + + + + + {% endif %} + {% if lowpass is not none %} + + + + + {% endif %} + {% if projs is not none %} + + + + + {% endif %} +
Measurement date{{ meas_date }}Unknown
Experimenter{{ experimenter }}Unknown
Participant{{ subject_info['his_id'] }}Unknown
Digitized points{{ dig|length }} pointsNot available
Good channels{{ good_channels }}
Bad channels{{ bad_channels }}
EOG channels{{ eog }}
ECG channels{{ ecg }}
Sampling frequency{{ '%0.2f'|format(sfreq) }} Hz
Highpass{{ '%0.2f'|format(highpass) }} Hz
Lowpass{{ '%0.2f'|format(lowpass) }} Hz
Projections{{ projs|join('
') | safe }}
diff --git a/python/libs/mne/html_templates/repr/inverse_operator.html.jinja b/python/libs/mne/html_templates/repr/inverse_operator.html.jinja new file mode 100644 index 0000000..31376a6 --- /dev/null +++ b/python/libs/mne/html_templates/repr/inverse_operator.html.jinja @@ -0,0 +1,14 @@ + + + + + + + + + + + + + +
Channels{{ channels }}
Source space{{ source_space_descr }}
Source orientation{{ source_orientation }}
diff --git a/python/libs/mne/html_templates/repr/raw.html.jinja b/python/libs/mne/html_templates/repr/raw.html.jinja new file mode 100644 index 0000000..7bc8542 --- /dev/null +++ b/python/libs/mne/html_templates/repr/raw.html.jinja @@ -0,0 +1,12 @@ +{{ info_repr[:-9] | safe }} + {% if filenames %} + + Filenames + {{ filenames|join('
') }} + + {% endif %} + + Duration + {{ duration }} (HH:MM:SS) + + diff --git a/python/libs/mne/icons/README.rst b/python/libs/mne/icons/README.rst new file mode 100644 index 0000000..452b642 --- /dev/null +++ b/python/libs/mne/icons/README.rst @@ -0,0 +1,23 @@ +.. -*- mode: rst -*- + + +Documentation +============= + +The icons are used in ``mne/viz/_brain/_brain.py`` for the toolbar. +It is necessary to compile those icons into a resource file for proper use by +the application. + +The resource configuration file ``mne/icons/mne.qrc`` describes the location of +the resources in the filesystem and also defines aliases for their use in the code. + +To automatically generate the resource file in ``mne/icons``: + +.. code-block:: bash + + pyrcc5 -o mne/icons/resources.py mne/icons/mne.qrc + +These Material design icons are provided by Google under the `Apache 2.0`_ license. + + +.. _Apache 2.0: https://github.com/google/material-design-icons/blob/master/LICENSE diff --git a/python/libs/mne/icons/clear-black-18dp.svg b/python/libs/mne/icons/clear-black-18dp.svg new file mode 100644 index 0000000..ce2b30f --- /dev/null +++ b/python/libs/mne/icons/clear-black-18dp.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/python/libs/mne/icons/help-black-18dp.svg b/python/libs/mne/icons/help-black-18dp.svg new file mode 100644 index 0000000..d040ebf --- /dev/null +++ b/python/libs/mne/icons/help-black-18dp.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/python/libs/mne/icons/mne-bigsur-white.png b/python/libs/mne/icons/mne-bigsur-white.png new file mode 100644 index 0000000..cb395d8 Binary files /dev/null and b/python/libs/mne/icons/mne-bigsur-white.png differ diff --git a/python/libs/mne/icons/mne-circle-black.png b/python/libs/mne/icons/mne-circle-black.png new file mode 100644 index 0000000..4fe25a0 Binary files /dev/null and b/python/libs/mne/icons/mne-circle-black.png differ diff --git a/python/libs/mne/icons/mne-splash.png b/python/libs/mne/icons/mne-splash.png new file mode 100644 index 0000000..a40632a Binary files /dev/null and b/python/libs/mne/icons/mne-splash.png differ diff --git a/python/libs/mne/icons/mne.qrc b/python/libs/mne/icons/mne.qrc new file mode 100644 index 0000000..01eda75 --- /dev/null +++ b/python/libs/mne/icons/mne.qrc @@ -0,0 +1,18 @@ + + + visibility_on-black-18dp.svg + visibility_off-black-18dp.svg + help-black-18dp.svg + play-black-18dp.svg + reset-black-18dp.svg + pause-black-18dp.svg + scale-black-18dp.svg + restore-black-18dp.svg + clear-black-18dp.svg + screenshot-black-18dp.svg + movie-black-18dp.svg + mne-circle-black.png + mne-bigsur-white.png + mne-splash.png + + diff --git a/python/libs/mne/icons/mne_icon-cropped.png b/python/libs/mne/icons/mne_icon-cropped.png new file mode 100644 index 0000000..9cc2c33 Binary files /dev/null and b/python/libs/mne/icons/mne_icon-cropped.png differ diff --git a/python/libs/mne/icons/mne_icon.png b/python/libs/mne/icons/mne_icon.png new file mode 100644 index 0000000..66320ca Binary files /dev/null and b/python/libs/mne/icons/mne_icon.png differ diff --git a/python/libs/mne/icons/movie-black-18dp.svg b/python/libs/mne/icons/movie-black-18dp.svg new file mode 100644 index 0000000..eff987a --- /dev/null +++ b/python/libs/mne/icons/movie-black-18dp.svg @@ -0,0 +1,72 @@ + + + + + + image/svg+xml + + + + + + + + + + + + diff --git a/python/libs/mne/icons/pause-black-18dp.svg b/python/libs/mne/icons/pause-black-18dp.svg new file mode 100644 index 0000000..9915148 --- /dev/null +++ b/python/libs/mne/icons/pause-black-18dp.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/python/libs/mne/icons/play-black-18dp.svg b/python/libs/mne/icons/play-black-18dp.svg new file mode 100644 index 0000000..fd272de --- /dev/null +++ b/python/libs/mne/icons/play-black-18dp.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/python/libs/mne/icons/reset-black-18dp.svg b/python/libs/mne/icons/reset-black-18dp.svg new file mode 100644 index 0000000..92136ba --- /dev/null +++ b/python/libs/mne/icons/reset-black-18dp.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/python/libs/mne/icons/resources.py b/python/libs/mne/icons/resources.py new file mode 100644 index 0000000..5703cf7 --- /dev/null +++ b/python/libs/mne/icons/resources.py @@ -0,0 +1,6075 @@ +# -*- coding: utf-8 -*- + +# Resource object code +# +# Created by: The Resource Compiler for PyQt5 (Qt v5.15.2) +# +# WARNING! All changes made in this file will be lost! + +from PyQt5 import QtCore + +qt_resource_data = b"\ +\x00\x00\x00\xfa\ +\x3c\ +\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\ +\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\ +\x30\x2f\x73\x76\x67\x22\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\ +\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x20\x66\x69\x6c\x6c\x3d\ +\x22\x62\x6c\x61\x63\x6b\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x31\ +\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x38\x70\ +\x78\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x30\x20\x30\ +\x68\x32\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x22\x20\x66\x69\x6c\ +\x6c\x3d\x22\x6e\x6f\x6e\x65\x22\x2f\x3e\x3c\x70\x61\x74\x68\x20\ +\x64\x3d\x22\x4d\x31\x36\x20\x39\x76\x31\x30\x48\x38\x56\x39\x68\ +\x38\x6d\x2d\x31\x2e\x35\x2d\x36\x68\x2d\x35\x6c\x2d\x31\x20\x31\ +\x48\x35\x76\x32\x68\x31\x34\x56\x34\x68\x2d\x33\x2e\x35\x6c\x2d\ +\x31\x2d\x31\x7a\x4d\x31\x38\x20\x37\x48\x36\x76\x31\x32\x63\x30\ +\x20\x31\x2e\x31\x2e\x39\x20\x32\x20\x32\x20\x32\x68\x38\x63\x31\ +\x2e\x31\x20\x30\x20\x32\x2d\x2e\x39\x20\x32\x2d\x32\x56\x37\x7a\ +\x22\x2f\x3e\x3c\x2f\x73\x76\x67\x3e\ +\x00\x00\x01\x35\ +\x3c\ +\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\ +\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\ +\x30\x2f\x73\x76\x67\x22\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\ +\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x20\x66\x69\x6c\x6c\x3d\ +\x22\x62\x6c\x61\x63\x6b\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x31\ +\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x38\x70\ +\x78\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x30\x20\x30\ +\x68\x32\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x22\x20\x66\x69\x6c\ +\x6c\x3d\x22\x6e\x6f\x6e\x65\x22\x2f\x3e\x3c\x70\x61\x74\x68\x20\ +\x64\x3d\x22\x4d\x39\x20\x31\x36\x68\x32\x56\x38\x48\x39\x76\x38\ +\x7a\x6d\x33\x2d\x31\x34\x43\x36\x2e\x34\x38\x20\x32\x20\x32\x20\ +\x36\x2e\x34\x38\x20\x32\x20\x31\x32\x73\x34\x2e\x34\x38\x20\x31\ +\x30\x20\x31\x30\x20\x31\x30\x20\x31\x30\x2d\x34\x2e\x34\x38\x20\ +\x31\x30\x2d\x31\x30\x53\x31\x37\x2e\x35\x32\x20\x32\x20\x31\x32\ +\x20\x32\x7a\x6d\x30\x20\x31\x38\x63\x2d\x34\x2e\x34\x31\x20\x30\ +\x2d\x38\x2d\x33\x2e\x35\x39\x2d\x38\x2d\x38\x73\x33\x2e\x35\x39\ +\x2d\x38\x20\x38\x2d\x38\x20\x38\x20\x33\x2e\x35\x39\x20\x38\x20\ +\x38\x2d\x33\x2e\x35\x39\x20\x38\x2d\x38\x20\x38\x7a\x6d\x31\x2d\ +\x34\x68\x32\x56\x38\x68\x2d\x32\x76\x38\x7a\x22\x2f\x3e\x3c\x2f\ +\x73\x76\x67\x3e\ +\x00\x00\x01\x77\ +\x3c\ +\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\ +\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\ +\x30\x2f\x73\x76\x67\x22\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\ +\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x20\x66\x69\x6c\x6c\x3d\ +\x22\x62\x6c\x61\x63\x6b\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x31\ +\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x38\x70\ +\x78\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x30\x20\x30\ +\x68\x32\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x22\x20\x66\x69\x6c\ +\x6c\x3d\x22\x6e\x6f\x6e\x65\x22\x2f\x3e\x3c\x70\x61\x74\x68\x20\ +\x64\x3d\x22\x4d\x31\x34\x20\x31\x32\x63\x30\x2d\x31\x2e\x31\x2d\ +\x2e\x39\x2d\x32\x2d\x32\x2d\x32\x73\x2d\x32\x20\x2e\x39\x2d\x32\ +\x20\x32\x20\x2e\x39\x20\x32\x20\x32\x20\x32\x20\x32\x2d\x2e\x39\ +\x20\x32\x2d\x32\x7a\x6d\x2d\x32\x2d\x39\x63\x2d\x34\x2e\x39\x37\ +\x20\x30\x2d\x39\x20\x34\x2e\x30\x33\x2d\x39\x20\x39\x48\x30\x6c\ +\x34\x20\x34\x20\x34\x2d\x34\x48\x35\x63\x30\x2d\x33\x2e\x38\x37\ +\x20\x33\x2e\x31\x33\x2d\x37\x20\x37\x2d\x37\x73\x37\x20\x33\x2e\ +\x31\x33\x20\x37\x20\x37\x2d\x33\x2e\x31\x33\x20\x37\x2d\x37\x20\ +\x37\x63\x2d\x31\x2e\x35\x31\x20\x30\x2d\x32\x2e\x39\x31\x2d\x2e\ +\x34\x39\x2d\x34\x2e\x30\x36\x2d\x31\x2e\x33\x6c\x2d\x31\x2e\x34\ +\x32\x20\x31\x2e\x34\x34\x43\x38\x2e\x30\x34\x20\x32\x30\x2e\x33\ +\x20\x39\x2e\x39\x34\x20\x32\x31\x20\x31\x32\x20\x32\x31\x63\x34\ +\x2e\x39\x37\x20\x30\x20\x39\x2d\x34\x2e\x30\x33\x20\x39\x2d\x39\ +\x73\x2d\x34\x2e\x30\x33\x2d\x39\x2d\x39\x2d\x39\x7a\x22\x2f\x3e\ +\x3c\x2f\x73\x76\x67\x3e\ +\x00\x00\xb9\xd1\ +\x89\ +\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ +\x00\x02\x7c\x00\x00\x02\x7c\x08\x06\x00\x00\x00\x64\xed\x7c\x56\ +\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xc4\x00\x00\x0d\xc4\ +\x01\xa6\x85\xaa\xdf\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\ +\x74\x77\x61\x72\x65\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\ +\x65\x52\x65\x61\x64\x79\x71\xc9\x65\x3c\x00\x00\xb9\x5e\x49\x44\ +\x41\x54\x78\xda\xec\xbd\x79\xb0\x25\xd7\x5d\xe7\xf9\x3b\x79\xef\ +\x7d\x5b\x2d\x2a\x49\xa5\x5d\x96\x4a\xc8\x6a\xcb\x08\x8c\xbc\x3b\ +\x0c\x34\x32\x1e\x96\x86\x06\xec\xa1\x61\xe8\x9e\xa1\xb1\x63\x22\ +\xba\x23\x86\x98\x06\xe6\x8f\xe9\x8e\xe0\x0f\x60\xa2\x23\x66\x7a\ +\x66\x02\xe8\x06\x06\x7a\x82\xc1\x36\xcd\x74\x37\x60\xbc\x81\x81\ +\xc6\x1e\x2c\x63\x90\xf1\x82\xb5\xd8\xb2\x64\xcb\x96\x4a\x9b\xb5\ +\x55\xa9\xf6\xb7\xdc\x25\xcf\x9c\xdf\x59\x32\x7f\x79\xf2\x9c\xbc\ +\xf7\x55\xdd\xfb\xaa\x5e\xbd\xef\x27\x22\xeb\xbe\xca\x9b\x37\x6f\ +\xde\x73\xf3\xe6\xf9\xe6\x6f\x25\x02\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x5c\x9a\x28\x0c\x01\xb8\x14\xd0\x5a\xdf\ +\xe3\xff\xbc\xdb\x2c\x87\xfc\xdf\xdf\x15\x6d\x26\x9f\x03\x00\x80\ +\x8b\xcd\x51\xbf\x04\x4e\x9a\xe5\xc1\xe8\xb9\x93\x4a\xa9\x07\x30\ +\x54\x00\x82\x0f\xec\x25\x51\x77\xc4\x8b\x36\x5e\x6e\x35\x0b\xff\ +\xff\x1e\x8c\x0c\x00\x60\x0f\x89\x43\x16\x7f\x4f\xfa\xc7\x07\x8c\ +\x18\x3c\x89\xa1\x01\x10\x7c\xe0\x72\x10\x77\xdf\xe5\x1f\x21\xec\ +\x00\x00\x20\x2d\x04\x59\xfc\xb1\x65\xf0\x5e\x88\x40\x00\xc1\x07\ +\x76\x83\xc0\xbb\xc7\x0b\x3c\x7e\x3c\x82\x51\x01\x00\x80\xf3\xe2\ +\x01\x2f\xfe\x3e\xc9\x8f\x10\x80\x00\x82\x0f\x5c\x6c\x91\xf7\x0e\ +\x2f\xf0\xde\x01\x81\x07\x00\x00\x0b\x15\x80\x1f\x36\xcb\x87\x10\ +\x0f\x08\x20\xf8\xc0\x4e\x08\xbc\x43\x5e\xdc\xfd\x08\x39\x2b\x1e\ +\x12\x28\x00\x00\x60\x67\x39\xca\xc2\xcf\x2c\x9f\x34\xe2\xef\x43\ +\x18\x0e\x00\xc1\x07\xe6\x29\xf4\x58\xe4\xfd\x94\x17\x7b\x00\x00\ +\x00\x2e\x0d\xd8\xd5\xfb\x5e\xb3\xbc\x0f\x96\x3f\x00\xc1\x07\xce\ +\x57\xe4\x71\xa2\xc5\xcf\x78\x91\x07\x4b\x1e\x00\x00\x5c\xda\x1c\ +\x35\xcb\xbf\x25\xe7\xf6\x3d\x8a\xe1\x00\x10\x7c\xa0\x4b\xe4\x05\ +\x97\x2d\x0b\xbd\xbb\x31\x22\x00\x00\xb0\x2b\x61\x57\xef\xfb\xe0\ +\xf2\x05\x10\x7c\x20\x16\x7a\x47\xbc\xc8\x7b\x17\xc1\x9a\x07\x00\ +\x00\x97\x0b\x47\xc9\x59\xfd\xde\x8b\x4c\x5f\x00\xc1\xb7\xb7\x85\ +\xde\xdd\x42\xe8\x01\x00\x00\xb8\x3c\x39\x29\x84\xdf\x51\x0c\x07\ +\x04\x1f\xd8\x3b\x42\xef\x1e\xf3\xf0\x0b\x84\x62\xc8\x00\x00\xb0\ +\xd7\x78\xaf\x59\x7e\x09\xc2\x0f\x82\x0f\x40\xe8\x01\x00\x00\x80\ +\xf0\x03\x10\x7c\x60\x17\x0a\xbd\x23\xe6\xe1\x3d\x10\x7a\x00\x00\ +\x00\x22\x7e\xc9\x2c\xbf\x8a\x18\x3f\x08\x3e\xb0\xbb\x85\x1e\x27\ +\x60\xfc\x0a\x21\x46\x0f\x00\x00\x40\x1e\x1b\xe3\x67\x44\xdf\x2f\ +\x62\x28\x20\xf8\xc0\xee\x13\x7b\xfc\xc3\xe5\x84\x0c\x64\xdd\x02\ +\x00\x00\x98\x85\xa3\x66\xf9\x39\x94\x73\x81\xe0\x03\xbb\x43\xe8\ +\xdd\x43\xce\x7d\x7b\x04\xa3\x01\x00\x00\xe0\x3c\xb8\xd7\x2c\xef\ +\x46\x7c\x1f\x04\x1f\xb8\x34\x85\xde\x21\x2f\xf4\xd0\xfe\x0c\x00\ +\x00\xc0\x3c\xf8\x25\xb8\x79\x21\xf8\xc0\xa5\x25\xf6\xde\x45\x2e\ +\x56\x0f\xee\x5b\x00\x00\x00\xf3\x84\x7b\xf4\xbe\x1b\xbd\x7a\x21\ +\xf8\xc0\xc5\x15\x7a\x47\x08\xd9\xb7\x00\x00\x00\x16\x0f\xac\x7d\ +\x10\x7c\xe0\x22\x89\xbd\x77\x78\xb1\x07\xab\x1e\x00\x00\x80\x9d\ +\x00\xd6\x3e\x08\x3e\xb0\x83\x42\x0f\xa5\x56\x00\x00\x00\x5c\x4c\ +\x38\x93\xf7\x57\x31\x0c\x10\x7c\x60\x71\x62\x8f\x7b\xdf\x7e\x90\ +\x90\x81\x0b\x00\x00\xe0\xe2\xc2\xa5\x5b\xde\x8d\x82\xcd\x10\x7c\ +\x60\xfe\x62\xef\x67\xc9\x59\xf6\x00\x00\x00\x80\x4b\x81\xa3\x66\ +\x79\x27\x5c\xbc\x10\x7c\x60\x3e\x42\x0f\x2e\x5c\x00\x00\x00\x97\ +\x32\x70\xf1\x42\xf0\x81\x0b\x14\x7b\x47\xc8\xb9\x70\xef\xc6\x68\ +\x00\x00\x00\xb8\x84\x79\xaf\x11\x7d\xef\xc6\x30\x40\xf0\x81\xed\ +\x8b\x3d\x16\x79\x9f\x20\x64\xe1\x02\x00\x00\xd8\x1d\xb0\x6b\xf7\ +\x6d\x88\xeb\x83\xe0\x03\xb3\x8b\xbd\x77\x91\x2b\xb9\x02\x00\x00\ +\x00\xec\x26\x8e\x12\xe2\xfa\x20\xf8\xc0\x4c\x62\x0f\xc9\x19\x00\ +\x00\x00\x76\x33\x27\xbd\xe8\xbb\x17\x43\x01\xc1\x07\xd2\x62\x8f\ +\xad\x7a\xef\xc2\x48\x00\x00\x00\xb8\x0c\xe0\xb2\x2d\xef\xc5\x30\ +\x40\xf0\x01\x88\x3d\x00\x00\x00\x10\x7d\x00\x82\x6f\x4f\x08\x3d\ +\x4e\xca\xe0\x4c\xdc\x7b\x30\x1a\x00\x00\x00\x2e\x43\x50\xb6\x05\ +\x82\x0f\x62\x8f\x5c\x26\x2e\xca\xae\x00\x00\x00\xb8\x9c\x41\xd9\ +\x16\x08\x3e\x88\x3d\x8c\x06\x00\x00\x00\x88\x3e\x00\xc1\x07\xb1\ +\x07\x00\x00\x00\x40\xf4\x01\x08\xbe\x5d\x26\xf8\xee\x87\xd8\x03\ +\x00\x00\xb0\x47\x41\x4c\x1f\x04\xdf\x9e\x10\x7b\xc8\xc6\x05\x00\ +\x00\xb0\xd7\x41\xf6\x2e\x04\x1f\xc4\x1e\x00\x00\x00\x00\xd1\x07\ +\x20\xf8\x76\xa7\xd8\xe3\xee\x19\x3f\x8b\x91\x00\x00\x00\x00\x2a\ +\xde\x86\x8e\x1c\x10\x7c\x97\x93\xd8\x7b\x17\xa1\x37\x2e\x00\x00\ +\x00\x10\x73\xd2\x8b\x3e\xf4\xde\x85\xe0\xdb\xf5\x62\xef\x1e\x72\ +\x19\xb9\x00\x00\x00\x00\x48\x8b\xbe\xdb\x8c\xe8\x3b\x89\xa1\x80\ +\xe0\xdb\xad\x62\xef\x6e\x2f\xf6\x0e\x61\x34\x00\x00\x00\x80\x2c\ +\x6c\xe1\x7b\x1b\x44\x1f\x04\xdf\x6e\x14\x7b\xa8\xb5\x07\x00\x00\ +\x00\xcc\x0e\x6a\xf4\x41\xf0\xed\x4a\xc1\xc7\xfd\x71\xdf\x81\x91\ +\x00\x00\x00\x00\x66\x06\x35\xfa\x20\xf8\x76\x95\xd8\xfb\x45\xf3\ +\xf0\x0b\x18\x09\x00\x00\x00\x60\xdb\x20\x73\x17\x82\x6f\x57\x88\ +\xbd\x7b\x08\x49\x1a\x00\x00\x00\xc0\xf9\x82\x24\x0e\x08\xbe\x4b\ +\x5e\xec\x71\xdc\xde\x13\x84\x24\x0d\x00\x00\x00\xe0\x42\xb8\xd7\ +\x08\xbe\xb7\x61\x18\xe6\x47\x81\x21\x98\x2b\x1f\x84\xd8\x03\x00\ +\x00\x00\x2e\x98\x7b\x7c\x78\x14\x98\x13\xb0\xf0\xcd\x09\x73\x62\ +\x72\x17\x8d\x5f\xc1\x48\x00\x00\x00\x00\x73\xe3\xb5\x28\xca\x0c\ +\xc1\x77\x29\x89\x3d\x2e\xbd\x72\x3f\x46\x02\x00\x00\x00\x98\x2b\ +\xa8\xcf\x07\xc1\x77\x49\x09\x3e\x16\x7b\xa8\xb7\x07\x00\x00\x00\ +\xcc\x9f\x5f\x35\x82\xef\xe7\x30\x0c\x10\x7c\x17\x5b\xec\xfd\x22\ +\xa1\x04\x0b\x00\x00\x00\xb0\x48\x50\xaa\x05\x82\xef\xa2\x8a\x3d\ +\xb8\x72\x01\x00\x00\x80\xc5\x73\xd4\x08\xbe\xdb\x30\x0c\xe7\x0f\ +\xb2\x74\x2f\x0c\x24\x69\x00\x00\x00\x00\x8b\xe7\x08\xb2\x76\x2f\ +\x0c\x58\xf8\xce\x13\x64\xe5\x02\x00\x00\x00\x3b\x0e\x17\x64\x3e\ +\x8a\x61\x80\xe0\xdb\x29\xb1\x87\x02\xcb\x00\x00\x00\xc0\xce\x83\ +\x82\xcc\xe7\x09\x5c\xba\xe7\xc7\xaf\x40\xec\x01\x00\x00\x00\x3b\ +\x0e\x17\x64\x7e\x07\x86\x61\xfb\xc0\xc2\xb7\x4d\xd0\x2b\x17\x00\ +\x00\x00\xb8\xa8\x1c\x25\x57\x90\x19\xb5\xf9\xb6\x01\x2c\x7c\xdb\ +\x07\x25\x58\x00\x00\x00\x80\x8b\xc7\x11\xb3\xfc\x2c\x86\x61\x7b\ +\xc0\xc2\xb7\x0d\xbc\x19\xf9\x83\x18\x09\x00\x00\x00\xe0\xa2\xc2\ +\xd6\xbd\xdb\x60\xe5\x83\xe0\x5b\x94\xe0\x7b\xc2\xdf\x59\x00\x00\ +\x00\x00\xe0\xe2\xf2\x5e\x23\xf8\xde\x8d\x61\x80\xe0\x9b\xb7\xd8\ +\x7b\x97\x79\x78\x0f\x46\x02\x00\x00\x00\xb8\x64\x40\x99\x16\x08\ +\xbe\xb9\x0b\x3e\x58\xf7\x00\x00\x00\x80\x4b\x0b\x58\xf9\x20\xf8\ +\xe6\x2a\xf6\xde\x45\xb0\xee\x01\x00\x00\x00\x97\x22\xb0\xf2\x41\ +\xf0\xcd\x4d\xf0\xc1\xba\x07\x00\x00\x00\x5c\x9a\xc0\xca\x07\xc1\ +\x37\x17\xb1\xf7\x2e\x82\x75\x0f\x00\x00\x00\xb8\x94\x81\x95\x0f\ +\x82\xef\x82\x05\x1f\xac\x7b\x00\x00\x00\xc0\xa5\x0d\xac\x7c\x10\ +\x7c\x17\x24\xf6\x50\x77\x0f\x00\x00\x00\xb8\xf4\x41\x5d\xbe\x29\ +\xa0\xd3\x46\x37\x3f\x83\x21\x00\x00\x00\x00\x2e\x79\xb8\xbf\x3d\ +\xba\x6f\x74\x00\x0b\x5f\x06\xad\xf5\xdd\xe6\xe1\x7e\x8c\x04\x00\ +\x00\x00\xb0\x2b\x38\xaa\x94\xba\x0d\xc3\x90\x06\x16\xbe\x3c\xb0\ +\xee\x01\x00\x00\x00\xbb\x87\x23\x3e\xd1\x12\x24\x80\x85\x2f\x81\ +\x39\x61\xd8\x34\xcc\xc9\x1a\x87\x30\x1a\x00\x00\x00\xc0\xae\xe1\ +\x5e\xa5\xd4\xdb\x30\x0c\x10\x7c\xb3\x0a\x3e\xbe\x43\x40\x29\x16\ +\x00\x00\x00\x60\xf7\x81\x12\x2d\x09\xe0\xd2\x4d\x03\x77\x2e\x00\ +\x00\x00\x80\x39\xfc\xb2\x01\x16\xbe\x08\x24\x6b\x00\x00\x00\x00\ +\xbb\x1a\x24\x6f\x24\x80\x85\xaf\xcd\x4f\x61\x08\x00\x00\x00\x80\ +\x5d\xcb\x11\x5f\x47\x17\x40\xf0\x75\x82\x93\x04\x00\x00\x00\xd8\ +\xdd\xfc\x08\x86\xa0\x09\x5c\xba\x02\x74\xd6\x00\x00\x00\x00\x2e\ +\x0b\x4e\x2a\xa5\xae\xc4\x30\xd4\xc0\xc2\x87\x3b\x02\x00\x00\x00\ +\xe0\x72\xe3\x10\xdc\xba\x10\x7c\x5d\xdc\x83\x21\x00\x00\x00\x00\ +\x2e\x0b\x60\xc4\x11\xc0\xa5\xeb\x41\x76\x2e\x00\x00\x00\x70\x59\ +\x01\xb7\xae\x00\x16\xbe\x1a\x64\xe7\x02\x00\x00\x00\x97\x0f\xec\ +\xd6\xbd\x07\xc3\x00\xc1\x17\x83\x93\x02\x00\x00\x00\xb8\xbc\x80\ +\x5b\xd7\x03\x97\x2e\x59\x77\xee\x11\x72\xbd\x73\x01\x00\x00\x00\ +\x70\xf9\xf0\x80\x52\xea\xb5\x18\x06\x58\xf8\x02\xf7\x60\x08\x00\ +\x00\x00\x80\xcb\x8e\xbb\xbd\x51\x07\x82\x0f\x43\x60\xf9\x2e\x0c\ +\x01\x00\x00\x00\x70\x59\x72\x0f\x86\x00\x82\x2f\x80\x5a\x3d\x00\ +\x00\x00\xc0\xe5\x09\x8c\x3a\x84\x18\x3e\x94\x63\x01\x00\x00\x00\ +\x2e\x6f\x8e\x2a\xa5\x6e\xdb\xeb\x83\x00\x0b\x1f\xd1\xdd\x18\x02\ +\x00\x00\x00\xe0\xb2\xe5\x08\xe2\xf8\x20\xf8\x18\x98\x7a\x01\x00\ +\x00\x80\xcb\x9b\x3d\x6f\xdc\x81\xe0\xc3\x49\x00\x00\x00\x00\x5c\ +\xee\xec\x79\xe3\x0e\x62\xf8\x0c\xf8\x1d\x00\x00\x00\x00\x97\x35\ +\xf7\x2a\xa5\xde\x06\xc1\xb7\x77\xc5\xde\x3d\xe6\xe1\x13\xf8\x1d\ +\x00\x00\x00\x00\x97\xb9\xe0\x31\xec\xe5\xcf\xbf\xd7\x5d\xba\x70\ +\xe7\x02\x00\x00\x00\x7b\x00\x5f\x95\x03\x82\x6f\x8f\xf2\x6d\xf8\ +\x09\x00\x00\x00\x00\x7b\x82\x23\x10\x7c\xf8\xf2\x01\x00\x00\x00\ +\x70\x79\xb3\xa7\x2d\x7c\x7b\x3d\x86\x0f\x09\x1b\x00\x00\x00\xc0\ +\xde\xe0\x43\x4a\xa9\x77\xee\xd5\x0f\xbf\x67\x2d\x7c\x46\xeb\x1d\ +\xc2\xb9\x0f\x00\x00\x00\xec\x19\xf6\xf4\xbc\xbf\x67\x2d\x7c\xc8\ +\xd0\x05\x00\x00\x00\xf6\x98\xe8\xd9\xc3\x99\xba\x7b\x39\x86\xef\ +\x08\x4e\x7d\x00\x00\x00\x60\xef\xb0\x97\xbd\x7b\x10\x7c\x00\x00\ +\x00\x00\xd8\x2b\xec\xd9\xc4\x8d\xbd\x2c\xf8\x6e\xc5\x79\x0f\x00\ +\x00\x00\xec\x29\x60\xe1\xdb\x83\x1c\xc1\x79\x0f\x00\x00\x00\xec\ +\x29\x60\xe1\x03\x00\x00\x00\x00\x00\x10\x7c\x97\x1b\xf7\xe0\xeb\ +\x07\x00\x00\x00\xf6\x14\x7b\x36\x9c\x0b\x16\x3e\x00\x00\x00\x00\ +\xec\x15\x8e\x40\xf0\x01\x00\x00\x00\x00\x00\x08\xbe\xcb\x05\xad\ +\xf5\x11\x7c\xf5\x00\x00\x00\x00\xd8\x2b\xec\xc9\x8a\xd3\xe8\xb2\ +\x01\x00\x00\x00\xec\x51\xe1\xb3\x47\xbb\x6d\xc0\xa5\x0b\x00\x00\ +\x00\x00\x00\xc1\x07\x00\x00\x00\x00\x00\x20\xf8\x00\x00\x00\x00\ +\x00\x00\x04\x1f\x00\x00\x00\x00\x00\x80\xe0\x03\x00\x00\x00\x00\ +\x00\x10\x7c\x00\x00\x00\x00\x00\x00\x82\x0f\x00\x00\x00\x00\x00\ +\x40\xf0\x01\x00\x00\x00\x00\x40\xf0\x01\x00\x00\x00\x00\x00\x08\ +\x3e\x00\x00\x00\x00\x00\x00\xc1\x07\x00\x00\x00\x00\x00\x20\xf8\ +\x00\x00\x00\x00\x00\x00\x04\x1f\x00\x00\x00\x00\x00\x80\xe0\x03\ +\x00\x00\x00\x00\x00\x10\x7c\x00\x00\x00\x00\x00\x10\x7c\x00\x00\ +\x00\x00\x00\x00\x82\x0f\x00\x00\x00\x00\x00\x40\xf0\x01\x00\x00\ +\x00\x00\x00\x08\x3e\x00\x00\x00\x00\x00\x00\xc1\x07\x00\x00\x00\ +\x00\x00\x20\xf8\x00\x00\x00\x00\x00\x00\x04\x1f\x00\x00\x00\x00\ +\x00\x80\xe0\x03\x00\x00\x00\x00\x80\xe0\x03\x00\x00\x00\x00\x00\ +\x10\x7c\x00\x00\x00\x00\x00\x00\x82\x0f\x00\x00\x00\x00\x00\x40\ +\xf0\x01\x00\x00\x00\x00\x00\x08\x3e\x00\x00\x00\x00\x00\x00\xc1\ +\x07\x00\x00\x00\x00\x00\x20\xf8\x00\x00\x00\x00\x00\x20\xf8\x00\ +\x00\x00\x00\x00\x00\x04\x1f\x00\x00\x00\x00\x00\x80\xe0\x03\x00\ +\x00\x00\x00\x00\x10\x7c\x00\x00\x00\x00\x00\x00\x82\x0f\x00\x00\ +\x00\x00\x00\x40\xf0\x01\x00\x00\x00\x00\x00\x08\x3e\x00\x00\x00\ +\x00\x00\x08\x3e\x00\x00\x00\x00\x00\x00\xc1\x07\x00\x00\x00\x00\ +\x00\x20\xf8\x00\x00\x00\x00\x00\x00\x04\x1f\x00\x00\x00\x00\x00\ +\x80\xe0\x03\x00\x00\x00\x00\x00\x10\x7c\x00\x00\x00\x00\x00\x00\ +\x82\x0f\x00\x00\x00\x00\x00\x40\xf0\x01\x00\x00\x00\x00\x40\xf0\ +\x01\x00\x00\x00\x00\x00\x08\x3e\x00\x00\x00\x00\x00\x00\xc1\x07\ +\x00\x00\x00\x00\x00\x20\xf8\x00\x00\x00\x00\x00\x00\x04\x1f\x00\ +\x00\x00\x00\x00\x80\xe0\x03\x00\x00\x00\x00\x00\x10\x7c\x00\x00\ +\x00\x00\x00\x10\x7c\x00\x00\x00\x00\x00\x00\x82\x0f\x00\x00\x00\ +\x00\x00\x40\xf0\x01\x00\x00\x00\x00\x00\x08\x3e\x00\x00\x00\x00\ +\x00\x00\xc1\x07\x00\x00\x00\x00\x00\x20\xf8\x00\x00\x00\x00\x00\ +\x00\x04\x1f\x00\x00\x00\x00\x00\x04\x1f\x86\x00\x00\x00\x00\x00\ +\x00\x82\x0f\x00\x00\x00\x00\x00\x40\xf0\x01\x00\x00\x00\x00\x00\ +\x08\x3e\x00\x00\x00\x00\x00\x00\xc1\x07\x00\x00\x00\x00\x00\x20\ +\xf8\x00\x00\x00\x00\x00\x00\x04\x1f\x00\x00\x00\x00\x00\x80\xe0\ +\x03\x00\x00\x00\x00\x80\xe0\x03\x00\x00\x00\x00\x00\x10\x7c\x00\ +\x00\x00\x00\x00\x00\x82\x0f\x00\x00\x00\x00\x00\x40\xf0\x01\x00\ +\x00\x00\x00\x00\x08\x3e\x00\x00\x00\x00\x00\x00\xc1\x07\x00\x00\ +\x00\x00\x00\x20\xf8\x00\x00\x00\x00\x00\x20\xf8\x00\x00\x00\x00\ +\x00\x00\x04\x1f\x00\x00\x00\x00\x00\xd8\x7d\xf4\x31\x04\xbb\x87\ +\x63\xa7\x89\x9e\x3f\x61\xfe\x50\x89\x27\x75\xe6\x45\x7e\xdb\xc9\ +\x98\xa8\x1c\x99\x65\xd2\x7e\xce\xfe\x69\xa4\x7f\x7f\xc9\x3c\xf6\ +\x9a\xbb\xd7\xd4\x7e\x3f\xfe\xef\xc0\x6c\x77\x68\x1f\xd1\x35\x07\ +\xcc\x5d\x03\x6e\x1b\x1a\x6c\x99\xb1\x7e\xd6\x7c\x57\xe7\xcc\x78\ +\x6b\x9d\xf8\x9a\xd4\x36\x76\x76\x1e\xdb\x4e\xb6\xcc\xb2\xd9\x5c\ +\xff\xba\xdb\x88\x7a\x0b\xfa\x9e\x1e\x36\xe7\xe4\x48\x77\x6f\xb3\ +\xed\xcf\x3d\xcb\xe7\xef\x78\x6e\xc5\x9c\x9f\xaf\xde\xbf\xbb\xcf\ +\xa3\x93\xe6\xb7\xfa\x8c\x39\x87\xb6\xca\x19\xcf\x85\x79\x9f\x57\ +\x99\x6d\xae\x34\xe7\xd1\x2d\xfd\xf9\x4d\x1e\x1b\xe6\xe4\xf8\xba\ +\xf9\xac\x9b\x33\x6e\x7f\xde\xe7\xd2\x3c\xc6\x6d\x11\xaf\x5f\xc0\ +\x3e\xf5\x0e\x9f\xab\x57\x9b\xe5\x66\xb3\x2c\xe3\xf2\x0f\xc1\x07\ +\xe6\xc3\x5f\x3e\x48\xf4\x1b\x1f\x8d\x2e\x06\xaa\xfd\x0b\x4f\x5d\ +\x10\x47\x43\xa2\xe1\x39\x33\x79\x98\xc9\x79\xcc\x57\xd6\xd2\x6d\ +\x13\x5e\x56\x0c\xcc\x8f\xd5\x08\xb8\xc1\x9a\x59\xcc\x44\x59\x2c\ +\x3b\x11\x18\xde\x43\x5e\x40\x94\xf9\xff\x9a\x79\xfe\xb6\x6b\x89\ +\x7e\xf8\xf5\x44\xdf\xfe\x2a\xa2\x2b\x56\xf1\xfd\x30\x47\xcd\xf8\ +\x7e\xf8\x11\xa2\x8f\x7f\x9d\xe8\x45\x33\xde\x13\x3f\xce\xf1\x77\ +\xa6\xe3\x8b\xbb\x78\x5e\xab\xf4\x73\xc9\x89\x2e\x7e\x9d\x59\x46\ +\x67\x88\xd6\x1f\x33\x5f\xf1\x46\xbd\xcd\x43\xbf\xb2\xb8\xef\xe8\ +\x5f\xfc\xad\xf9\xac\x9b\x89\x63\xb7\x27\x56\xe6\xd8\xa3\xcf\xa5\ +\x73\x13\x5c\x11\xbd\xae\x98\x3e\x4e\x56\x94\x98\x9b\x97\xdf\xb9\ +\x7b\x77\x8b\xbe\xcf\xae\x13\xfd\xaf\x2f\x98\x73\x6a\x98\xf8\xec\ +\x1d\xe3\xd9\xf5\x3d\x64\xff\xaf\xd2\xe7\x52\xea\x5c\xfb\x41\x73\ +\x1e\xfd\xeb\x43\x66\x92\x9f\xd3\x0d\xc4\x53\x46\xec\xfd\xf4\x59\ +\xa2\xc7\xca\x19\xc5\x8f\xca\x9c\x2f\x2a\x23\x94\xd4\x14\x11\x55\ +\x74\x88\xa6\x69\x9f\x51\xcd\x20\xbc\xb6\x33\x4e\xe1\x33\x9c\x87\ +\x62\x93\xe7\x84\x56\x3b\x7b\xae\xfe\x23\xb3\xfc\x2b\xb3\xdc\x8a\ +\x29\x00\x82\x0f\xcc\x87\x4d\x73\xb7\x7f\xe2\xec\x8c\x17\x9c\x94\ +\xa8\x30\xa2\x6e\x6c\x04\xdd\x88\x9c\x28\x28\xcd\x44\xa2\xa5\xf0\ +\x3b\x69\xae\x4d\x7c\xe7\x6e\x84\x5f\x6f\xc5\x6c\x7e\xd0\x3c\x2e\ +\x3b\xab\x5f\xbc\xcf\x97\xcd\x71\x3c\x73\x9c\xe8\x79\xf3\x9a\xbe\ +\x79\xfe\x7b\xbf\x15\xdf\x0f\xf3\x9f\x1e\x22\xfa\x7d\xb3\x9c\xd9\ +\x4a\x08\x12\xca\x4c\xca\xd3\x04\x5f\xf4\xa8\x73\xdf\x7f\x98\x08\ +\xcd\x77\x38\xba\xc2\x08\xfc\xd3\xe6\xff\x23\xf7\xd4\x64\xb2\xb8\ +\xcf\x7c\xc2\x7c\xd6\xe3\x9b\x99\xc9\xb8\xc8\x9c\x93\xd3\x04\x8a\ +\x7c\xae\x88\xc6\x61\x06\x71\x7c\xdc\x9c\xdb\xbf\xfe\x04\xd1\x2f\ +\xdf\x65\x6e\x64\x76\xa9\x05\x7a\x68\x7e\x9b\x27\xcc\xf7\x76\x6c\ +\x9c\x16\x26\xba\xe8\xb8\x16\xc4\xdf\x43\x19\x3d\x5f\x76\x0b\xe6\ +\xa4\xa8\xf2\xdb\x9c\xd1\xf3\xb5\x20\xf1\xa9\x69\x3f\x67\xd9\x7d\ +\x43\x73\x21\x16\x2c\x9d\x13\x51\x05\x75\x7a\x47\xb4\x3e\x7f\xb1\ +\x97\xfc\xbd\xce\x22\xf8\xce\x07\xb5\x00\xcb\xe7\x36\x38\xeb\x4f\ +\x31\x70\x69\x03\x67\xdc\x2e\x42\x69\xb7\x34\xae\x20\x5a\x2c\xd1\ +\x15\xb1\xda\x5e\x5c\x21\xd9\x7a\xb7\x7a\x0d\xd1\xfe\x9b\x89\xd6\ +\x6e\x74\xff\x97\xdb\x96\x66\x72\x19\x9e\x22\xda\x78\x91\xe8\xdc\ +\x53\xe6\x87\x7c\xd4\x08\xcd\x97\xcc\x45\x79\xc3\xfd\xa2\x1b\xfb\ +\x34\x3c\xf6\x1c\xd1\x6f\xfe\x17\xe7\xc6\xdc\xeb\x3c\xfc\x82\x11\ +\x7c\xf7\x9b\x09\x71\x33\xfa\xbe\x32\xdf\x53\xe3\xb9\xb2\x7e\xae\ +\xf3\x75\xd1\xf8\xb7\x9e\xf3\x57\xdd\xfe\x75\x44\x4b\xe6\x3b\x2e\ +\x76\xca\xf2\x1a\x1d\x43\x75\x8c\x65\xe6\xd8\x75\x73\x86\x50\x3a\ +\x3f\xa3\xab\x68\x1f\xc9\x31\x4c\x3c\xf7\x81\xe7\xdd\x72\xd9\xa0\ +\xdb\xd7\x83\xce\x6d\xf5\x94\xff\x53\xf7\xff\xd5\x0e\xfa\x05\xd5\ +\x76\xb4\x8a\xde\xa6\xe2\xd3\x17\xf1\x2b\xdb\x8e\x00\x2b\xce\xff\ +\x3d\xf4\xb6\x06\x10\x40\xf0\x81\x5d\x73\xd1\x57\x61\x52\x2d\xeb\ +\x09\xbe\x4b\xfc\xa9\x48\xfc\xb1\x4b\x96\x2d\x77\xcb\x57\x10\x1d\ +\xb8\x95\xe8\xe0\x2b\x8d\x38\x38\xe8\xd6\x4b\x51\xc9\xb1\x60\x23\ +\x73\xeb\xb6\xfe\x0d\xa2\xd3\x8f\x19\x21\xf3\xb8\x11\x83\x27\xdd\ +\x5d\xaf\xdc\xdf\x57\x79\x62\xfd\xec\xde\xfe\x5a\x36\x8c\xe0\xfd\ +\xb5\xbf\x31\x22\x79\x98\x16\x25\x5d\x42\x47\xd1\x0c\x22\xa8\x6b\ +\x9f\x09\x81\xc4\x71\x95\x83\x9b\xcc\x77\x7c\x87\x73\xcf\xef\xb8\ +\x20\x99\x55\xdc\xe5\xc6\xe2\x42\x44\x9f\x67\x3c\x21\xfa\xad\x27\ +\x88\x1e\x5f\xdf\xc5\x37\x79\x65\xb7\x68\x53\xfa\x3c\x05\x61\xea\ +\xf9\x72\xba\x08\x5c\xa8\x78\x2a\x67\x10\xa2\x42\x20\xce\x45\xd8\ +\x2d\xd8\x2c\xb5\x70\xd1\x0c\xa1\x07\x20\xf8\x2e\x6f\xc1\xd7\xba\ +\xb0\x97\x91\x55\x65\x1b\xe2\xcf\x5a\x83\x96\x9d\xc5\xef\xc0\xed\ +\x44\xab\xd7\x7b\x97\xee\xc0\x5f\x4b\xfc\x76\x2c\xf2\x58\xfc\x9d\ +\x7d\x92\xe8\xd4\x97\x8d\xb0\x79\x9a\x68\xeb\x65\x73\xbd\x34\x93\ +\xa9\x36\x22\xe7\x0f\x8c\xd8\xf9\xbb\xc7\xf7\xe6\x57\x32\x32\xc2\ +\xe2\xdf\x7f\x9a\xe8\xfe\x67\xf3\x56\xbb\xa4\xd0\x29\x33\x13\xb7\ +\x78\x2e\x29\x16\x67\x10\x3b\x61\x9f\x3d\x23\xe4\x57\xee\x72\x62\ +\x7e\xa1\x13\xf5\x34\xd1\x47\x53\x44\x5f\xd9\x21\x32\x62\xd1\xa7\ +\xa7\x88\x3e\x31\x46\x8f\x9d\x23\x7a\xdf\x53\x44\x2f\x6d\x5d\xa6\ +\xa2\x6f\x8a\xe8\x99\x2a\x18\xcb\x6e\x01\xa4\x4a\xea\xb6\x14\xee\ +\xf4\x8d\xc4\xb4\xe3\x28\x67\x18\xc3\x6d\xde\x5c\x6f\xe7\x5a\x3c\ +\x17\x11\x0a\x00\x04\x1f\x68\xdc\xd9\xea\xfc\x05\x5c\x95\xd3\xdd\ +\x89\x0d\xf1\x47\x4e\x10\xb0\xf0\x5b\xbd\xda\x08\xbf\x57\x18\x01\ +\x78\x93\x11\x0a\x87\x89\x06\x07\x8c\x68\xe8\x35\xb7\x65\xb7\x2f\ +\x8b\xbd\x73\xcf\x10\x9d\x79\xd2\x3d\x7e\xc5\x88\xc0\xdf\xfa\xb0\ +\x99\x58\x4f\xed\xbd\xef\xe4\xc1\xe7\x88\x3e\xf2\x30\xd1\xf1\x73\ +\xed\x71\x6d\x88\x34\x1d\x3d\x17\x89\x13\xa5\xf3\xc2\x65\xe6\x7d\ +\x26\x5c\xbc\xbd\x55\xf3\xdd\x0e\x16\x3b\x21\x37\x6e\x38\x12\xc7\ +\x90\x72\xf1\xb6\xc6\x22\xfa\x3c\xa9\x89\xb5\xb2\x6a\x27\x84\x5f\ +\x6a\x5f\x23\xb3\xed\x07\x9f\x27\xfa\xc3\xe7\x44\xb6\xeb\xe5\x24\ +\xfa\x74\xe6\x66\x2f\x27\xda\x32\xd7\x81\x4e\xd1\xb7\x83\x22\xaf\ +\xf5\x59\x3a\x84\x95\xca\x09\xb2\xdc\xf6\x65\x87\x48\xd4\x8b\x13\ +\x7d\x33\x5b\xf9\x16\x21\x4a\x01\x80\xe0\xdb\xa5\xc8\x09\x91\x84\ +\x35\x3f\x23\xfe\x1a\x62\x40\xc6\xff\x75\x6d\xcb\x27\x85\x11\x78\ +\xfd\x35\x23\xfe\x0e\xbb\x78\xbf\x15\xb3\x2c\x5f\xe9\x12\x39\xe4\ +\xc4\x6a\x63\xfe\xd8\xed\x7b\x9a\xe8\xd4\xb3\x44\x9f\xfb\x3c\xd1\ +\x7d\x0f\x3a\x57\xda\x5e\x61\x7d\x44\x74\xef\xd7\x88\x5e\x38\x43\ +\xd9\x78\xbb\xe4\x77\xa4\xf3\xe2\x5d\x75\x3c\x47\xb3\xc6\xf7\x45\ +\xeb\x77\xe2\xbc\xcc\xc5\x8d\x36\x8e\x4d\xa7\x8f\x7b\xea\x38\x44\ +\xfb\xcb\x9e\xcf\x89\x31\x78\x6e\x93\xe8\x4f\x5f\x20\x7a\x76\x73\ +\x97\x19\x5c\xb6\x11\xaf\xb8\x2d\xf7\xee\xb4\xb8\xd0\xdc\xeb\x77\ +\x6a\xf0\xca\xed\xc5\x2a\x67\xd7\x65\x44\xe2\xb6\x2c\x86\xf3\x10\ +\x7d\xba\x43\x9c\x9e\x87\x80\xcc\x1e\x1f\x84\x1f\x98\x02\xb2\x74\ +\x77\xf1\x04\x50\x5d\xc4\xc2\xd3\x2a\x73\xf1\x53\xed\x6d\xed\x53\ +\xd1\x73\xf1\x3e\xd9\xea\x57\x18\x91\xd7\x5f\x71\xdb\x72\x1d\xbf\ +\xc9\xd0\x65\x7f\x8e\xcf\x3a\x4b\x5f\xf5\x72\x23\xf2\x5e\x7a\x91\ +\xe8\xf7\xfe\x84\xe8\x9a\x43\x44\x6f\x7d\xcd\xe5\xff\x75\x94\x66\ +\x4c\x1e\x35\x9f\xf9\x23\x5f\x32\x63\x32\xaa\x85\x5d\xf5\x3d\x88\ +\x12\x0b\x8d\xea\x39\x62\xdc\x55\x87\x26\x93\xa2\x4f\xab\xcc\x73\ +\x14\x65\xbe\x8a\xc9\x5d\x9f\x67\x79\x87\xf3\x3e\x37\x53\xe7\x24\ +\x4d\x59\xaf\xdd\x6d\x67\xd7\x31\x67\xcf\x6f\xed\xce\xd1\x54\x36\ +\x69\xb5\x3f\xff\xcf\x17\x4e\x98\x73\xf3\x69\xa2\x7f\x7e\x84\xe8\ +\xba\xe5\xdd\x73\x7e\x25\x45\x9f\x12\x63\x91\x3b\xcf\xe4\x58\x29\ +\x61\xe1\x52\x89\x71\x54\x42\x34\xe4\xf6\x4d\x19\x21\xbf\xa0\xf3\ +\xc7\x8a\x3e\x8a\x32\xb4\x75\xfa\x07\xd0\xf8\xae\x55\x66\x9f\xa9\ +\xed\x55\xe6\xfa\x5a\x74\x88\xaa\x5c\x49\x98\xcc\x7b\xc5\xcf\x2b\ +\x9a\x92\xc8\xa1\xa3\xeb\xb6\x9a\x7d\xfc\x1a\x1f\x1d\x71\x7d\x20\ +\x01\x2c\x7c\xbb\x5d\xfc\xc5\x16\x91\x2e\x97\x58\xce\x15\x38\xe5\ +\x4e\xda\x5a\xfd\x94\x2b\xcc\xbc\xb4\x9f\x68\xed\x5a\xa2\x7d\x9c\ +\xe5\x7b\x3d\xd1\x60\x9f\x28\xdb\x62\xf6\xfd\xc0\x23\x44\x9f\xf8\ +\x2c\xd1\xfa\xd6\xe5\xff\x15\xbc\x78\x86\xe8\x8f\x8d\xd8\x7b\xf1\ +\x34\xcd\x94\x7d\xdb\x7a\x2e\x61\x19\x6b\xb9\x2b\xa5\x45\xab\xe3\ +\xb9\xf8\xbb\x6d\x58\xc1\x16\x4c\xc3\xda\x56\xb6\x3f\x57\xca\x82\ +\x97\x75\xd9\x76\x58\x02\x93\x16\xac\x19\xc6\x8d\x61\xd7\xee\x1f\ +\x3c\x4b\xf4\xf9\x93\xbb\xe7\xfc\x3a\x77\xdc\x1c\xf7\x73\xbe\x74\ +\x52\xca\x72\x9c\xb3\xe6\x76\x65\xe8\xea\x29\x89\x5e\xa9\xef\xac\ +\xdc\x21\x6b\x52\x47\x7c\xf2\xd4\x38\xc2\x9c\x15\xad\xc3\xa2\x97\ +\x74\x1f\x13\xcd\xe6\xe2\xd5\x53\xae\xc9\x53\xac\x71\x6a\xd6\xf8\ +\xbf\x59\xf6\x99\x78\x8d\x2a\x77\xee\xf7\x0f\x60\xe1\x03\x0b\x9c\ +\x5c\x93\x77\x71\x89\x3b\xcc\x99\xb6\xcd\xed\x57\xa5\xcd\x33\xbc\ +\xba\xd7\x77\x0b\xad\x11\xad\x5c\xed\x26\x24\xae\xeb\x37\x5a\x27\ +\x1a\x6e\x10\xfd\xe7\xbf\x20\x7a\xf3\x6b\x88\xfe\xab\x37\x5d\xde\ +\xdf\xc5\xfb\x1f\x20\xfa\x8f\x9f\x13\x96\x52\x5d\xdf\x46\xb5\x2c\ +\x56\xba\x39\x86\x2d\xcb\x9d\xb8\xa3\x0f\x93\x81\x8e\xf6\xd9\xb2\ +\x66\xa4\xde\x8f\x32\xfb\xdb\x01\xeb\x9e\xd2\x89\xcf\x5b\x08\xcb\ +\x67\xd1\xde\x9e\x28\x5a\xaf\x12\x16\x3a\xf1\x5c\x97\x35\x5b\x4d\ +\x84\x05\x46\xb5\xc7\xe6\x85\x4d\xa2\x9f\x7f\x98\xe8\x8d\x87\x88\ +\xae\xdd\x05\x56\xbe\xa1\xf9\x3d\x8d\xcc\x39\x36\x31\xc7\x5f\xdc\ +\x62\x3e\xc7\x75\x66\x31\xbf\x39\x1a\x44\xc6\xb9\xd4\x98\xcb\x73\ +\x28\x3e\xc7\xa8\x16\x7d\xba\x88\xc4\x5d\x91\xb0\x14\x92\x10\x7d\ +\x3b\x61\x22\x88\x2c\x93\x2d\xeb\x5a\xe2\xf3\xc6\x82\xaa\x55\xa3\ +\x30\x77\xfc\x3a\x71\x1e\x52\xc7\xfe\xa5\x35\x4d\x67\xca\xa1\x74\ +\xbd\x36\xb2\xf6\x65\xf7\x91\xba\x56\xeb\x6d\x7e\x07\xa9\x1b\x25\ +\x4a\x8c\x0d\x80\x85\x0f\x5c\xa2\x56\xbd\xb2\x6d\x59\xe9\xb4\xd2\ +\x65\xac\x7f\x39\x4b\x93\xb4\x26\xcd\x52\xf6\x85\xb7\x65\xeb\xdf\ +\xf2\x41\xa2\x7d\xd7\xbb\x6c\xdf\xe1\x2a\xd1\x1f\x7f\xc6\x77\x99\ +\xb8\x8c\xad\x7b\x1f\x78\xa0\x2e\xcc\xda\x18\xd3\xb2\xb6\x12\x24\ +\xc7\xbb\xab\xe6\xde\x2c\x99\xac\x34\x3d\xb9\x23\x67\x61\x5c\xe8\ +\x79\xa9\xa7\x24\x9e\xc8\xf3\x49\x77\xac\x4f\x9c\xe3\xf1\x73\x5d\ +\x16\xae\x86\xb5\x2f\x1a\x1b\x8e\xe3\xfb\xa3\x6f\xb8\xa2\xc6\xbb\ +\xe6\x67\x7f\xc2\xfc\x96\xbe\x68\x96\x87\xcc\xf2\xb4\xf9\x3f\x5b\ +\x94\xc7\x89\xcf\x1b\x8b\x1b\xf1\x5b\xce\xdd\xec\xb5\xac\x5c\x71\ +\xf6\x78\x6a\xdf\x0b\x38\x9f\x66\x4a\xd6\xd0\x19\x8b\x5f\xf4\xdb\ +\x90\xe7\x54\x32\x21\x25\xf3\x19\x3a\x2d\x7e\x65\xfe\x46\xb9\x75\ +\x9d\x9c\xf1\xb5\x33\xef\xe3\x3c\xf6\x39\x75\xac\xcb\xcc\x58\x02\ +\x58\xf8\xc0\x25\x64\xe1\x13\x17\xf4\x5c\xec\x56\xab\x3b\x83\x6e\ +\x6f\xdb\x12\x0d\x1d\xdb\x36\x2e\x44\x2a\x61\x9d\x92\xd6\xbf\x81\ +\x5b\xee\x7b\x82\xe8\x53\x0f\x13\xdd\x73\x19\x76\xe0\xe0\x32\x2c\ +\xff\xfb\x5f\x18\xf1\xf0\xb2\xb0\xb0\xc8\xef\x40\x58\x21\xb4\xb4\ +\xec\x45\x31\x93\xc9\xe7\x94\xfb\x7e\x35\x45\xd6\xb9\xdc\x73\xd3\ +\xf6\xb7\x13\xf8\x58\x4e\x2d\xe2\xbe\x92\xc7\xa6\x84\x25\xae\x68\ +\x9f\xc3\x4a\x76\x5a\x98\x88\x71\x8d\xc6\x41\x8b\xed\x94\x78\x7d\ +\xe3\xf3\x0b\x8b\x8e\x8e\x2c\x87\xff\xe1\x49\xa2\x9b\x57\x88\x7e\ +\xe8\x86\x4b\xfc\x6e\xbc\x27\xca\xe9\xf0\xf7\xfe\xbc\x59\x5e\x34\ +\xcb\x21\xb3\xfe\x5a\x67\xf1\x2b\xae\x74\x16\x3f\x25\x2c\x78\x5a\ +\x8a\xbe\x28\x7e\x2f\x75\xdd\x90\x16\xb4\xca\xaa\x17\x8f\xb1\xa6\ +\xee\x80\xd3\x79\x58\xf4\xe2\xe3\x20\x6a\x5a\xce\x53\xc7\x2b\x3b\ +\x4c\xc4\x9d\x3a\xd4\x94\x7d\xea\x6d\x6c\x1f\x0b\xd0\x44\xe7\x1c\ +\x15\xbd\xff\xd4\xd7\x52\xfb\x37\x9a\xbc\xd6\x52\x47\xdc\x60\x2e\ +\x48\x36\xf7\xff\x8c\xe0\x9c\xd6\x8b\x7d\xf6\xc9\x09\x40\xf0\x81\ +\xf9\x5d\x18\xa3\xd8\x3a\x79\x41\xd4\xb9\x0b\xa9\x9a\xb2\xad\xca\ +\xef\x77\xea\x76\x89\x09\x9d\x1f\xb7\x46\x44\xef\xf9\x18\xd1\xeb\ +\x5f\x49\x74\xe0\x32\xeb\xb3\xfb\xe0\x33\x44\x1f\x7f\x94\x5a\xae\ +\x99\xc6\x85\x59\x37\xc7\x3d\xd5\xa2\x29\x4c\xe6\x3a\xb6\xce\xaa\ +\xcc\xdc\xda\xf5\x5c\xd7\xf7\xb6\x60\x36\xcc\x78\x8c\xb9\x98\xf7\ +\x15\xb5\xf8\x98\x9a\xac\x51\x46\x9f\x25\x21\x50\x2b\x11\x59\xb4\ +\xc7\x4d\xe7\x92\x97\x22\xe1\x67\x83\xff\x23\xa1\xf3\xcc\x3a\xd1\ +\xef\x3f\x4d\xf4\xa6\xab\x2e\xed\x04\x8e\xd5\x83\x66\x38\x8d\x28\ +\x55\x67\x5c\xad\xcb\x4a\xf8\x99\x1b\x0d\x7d\xd2\xac\x3f\x6e\x1e\ +\x59\xf8\x19\xd1\xa7\xae\x36\xcb\x52\xe2\x77\xd9\x31\x2f\xe7\x7e\ +\xdb\x0d\x37\xf1\x45\xba\xb6\x35\x8e\x53\xb5\xaf\x67\xf2\x35\x2a\ +\x08\xfb\x2e\x21\xa2\x13\x63\xa3\x3a\x06\x28\xb7\xfd\xb4\x81\xcd\ +\x5d\x1f\xa7\xbd\x36\xf9\xa2\xbc\x96\xca\xb6\x51\x9b\xe5\x3b\x53\ +\xdb\xfc\x5e\x66\xdd\x25\x04\x1f\x04\x1f\x58\xec\x45\x31\x17\x1b\ +\xd6\xba\xa8\xa9\xcc\xb6\x29\xb1\x22\xd6\x29\xd5\xb1\x5d\xc7\xfe\ +\x78\x9b\x07\x1e\x27\xfa\xc0\x7d\x44\x3f\xf5\xf6\xcb\xe7\x2b\x38\ +\xb9\x41\xf4\xdb\x7f\x4d\xb4\xb9\x95\x9e\x39\xab\xb8\x1c\x6a\x5a\ +\xfa\x54\x6c\x19\x95\x16\xaf\x9c\xd5\x54\x77\x3c\x97\x12\x9a\x2a\ +\x12\x85\xa9\x98\xba\x05\xb0\xf9\x94\x11\xf8\x5b\x2e\xa3\xbb\x77\ +\xc8\x2c\x57\xb9\xc7\xe4\x67\x95\x43\x26\x2d\x6f\x29\xe1\x17\x3e\ +\x7f\xd9\x3e\xef\x66\x19\xb3\x56\xfc\x9f\x5f\xcf\xf5\xf8\xee\x33\ +\x62\xe9\x43\xcf\x12\xfd\xf7\xb7\x99\x8b\xe0\x25\x3a\x51\xad\xec\ +\x33\x82\xef\xef\x99\xe3\x33\xc2\xaf\x7c\xd1\x2c\xec\xce\xdd\x12\ +\xc2\xef\x25\xb3\x98\xcf\x41\xdc\xef\xfa\x80\x13\x7d\x85\xf9\x3c\ +\x6a\x1f\xb5\x2c\xad\x71\x76\x38\x4d\x31\xd8\xb5\xac\x5c\xe7\x21\ +\x02\x2e\xf8\xda\x96\xb2\xb8\xe5\xee\x26\xca\xa6\xe8\x68\xc5\x26\ +\xaa\x84\x98\xcb\xed\x77\x96\xed\x13\x02\xaf\x4b\x55\xb7\x2c\xde\ +\xb3\x58\x4b\x3b\x44\x62\x52\xeb\x9d\xaf\x00\x84\xa5\x0e\x82\x0f\ +\x5c\x9a\xb4\x02\x70\xa7\xd4\xbf\x88\x4b\x29\x24\x2f\x98\x45\x7b\ +\xdf\xba\xa3\x14\x82\xd2\x99\xbb\xe4\xd8\xa5\x68\xf8\x90\x11\x7c\ +\xdf\xfe\x6a\xa2\x57\xde\x78\x79\xe8\xed\x8f\x3d\x4c\xf4\xf9\xc7\ +\xa9\x95\x69\xda\x4a\x48\x90\x63\x5e\x50\x77\xd2\x46\x22\xd8\xbe\ +\x35\xb6\xf2\xb9\xd8\x8a\x95\x4b\xf4\x50\x3b\x74\xed\xe6\xf7\xdb\ +\x72\x35\x19\x27\x1c\x5f\xf6\x94\x13\x7f\x83\x5b\xcc\x05\xe6\x1a\ +\x73\x0c\x83\x28\xc9\x44\x25\x84\x1f\x09\x51\x96\x1a\x37\x8a\x82\ +\xeb\x73\x09\x0a\x61\x7f\x45\xe4\x06\x8e\x5c\x6a\xe7\xcc\xb1\xfe\ +\xee\x51\xa2\xbf\x6f\x8e\xef\xd5\x07\x2e\xe1\xdf\x7b\xcf\x7c\x14\ +\x16\x72\x87\xcd\xe1\xdf\x6e\xc6\xf8\x39\xb3\x7c\xc5\x8d\x75\x10\ +\x7e\x64\x6e\x42\xf4\x86\x73\xf7\x96\x8f\x99\xd7\x18\xc1\x5d\xdc\ +\xe1\x5e\xc3\x62\xb0\x71\xae\xca\xb2\x22\x2a\xba\x39\x28\x32\xe2\ +\x22\x7e\xed\x82\x04\x5f\x32\x51\x27\x5c\x9f\x74\xe6\x58\x62\xcb\ +\x5f\xd8\x6e\x52\xbf\x36\x77\x8d\x6a\x89\xb9\xd4\x67\x8c\x13\x25\ +\x12\xbf\xef\xd4\xb9\xda\xba\x6e\x2a\xca\xd7\x54\xa4\xcc\xd8\x67\ +\xae\xd3\x59\x11\x98\xf9\x5e\x74\xb1\x4d\x11\xb8\x9d\xef\x17\xad\ +\xdd\x20\xf8\xc0\x82\x2e\xfe\x44\xcd\xe0\x64\xd5\x71\xa7\x37\x4d\ +\x00\xca\x6d\xcb\xf6\xb6\xb3\xde\x2c\x4e\xdb\xee\xa9\x17\x88\x3e\ +\xfc\x69\xa2\x77\x7f\x2f\xd1\x55\x07\x76\xef\xd8\xb3\x60\xf8\xf4\ +\xd7\x89\x3e\x72\x3f\xd1\xd9\x4d\xea\x8c\xbd\x89\x2f\xb0\xad\xe7\ +\x12\xd9\xb5\xf6\xb9\xb2\x79\x71\x6f\x64\xfa\xc6\x31\x59\x39\xc1\ +\x14\x7f\x0f\x3b\xe0\x96\xeb\x2d\x99\xf7\xe5\x89\x75\x52\xbf\x7f\ +\x69\xc6\x68\xeb\xab\x44\x43\x23\x8e\x0b\xf3\xbd\xf7\x0f\xbb\x36\ +\x6f\x8a\x05\xc8\xc0\x4f\x5e\x5d\xc7\x5b\x44\x99\xa4\xf2\xdc\x14\ +\x63\xa1\xcb\x29\xe3\x15\x9e\x4b\xdc\xc4\x7c\xcd\x88\xd3\xdf\xfe\ +\x1a\xd1\xbf\x78\x15\xd1\xad\x6b\x97\xe6\xcd\x9d\xbc\x61\x63\x97\ +\x6d\xef\x88\xf9\x38\x37\x9b\xff\x1a\xe1\x37\x79\xd6\x3c\x06\x77\ +\x6f\x10\x38\x63\x27\xfc\x26\xbc\x18\xd1\xad\xae\x37\xdb\xdf\xe8\ +\x2d\x80\xab\x6e\xec\x55\x46\x2c\x65\xcf\xd3\xc8\xda\xb6\xe8\xac\ +\xef\xd6\x0d\x65\x99\x10\x3d\x3a\x93\xe5\x1d\x5f\x98\xca\xe8\x1c\ +\x2b\x12\x17\xaa\x22\x2d\x3c\x1b\xdb\x97\xdd\xe2\xab\x31\x76\x2a\ +\x61\x9d\xd3\x1d\xc2\x8d\xda\x09\x1b\xc9\xcc\xdd\xf2\xfc\x04\x57\ +\x57\x32\xc8\x05\x67\xec\x76\xd5\x2e\x04\x10\x7c\xe0\xc2\x04\x5f\ +\xb6\xdc\x47\x6c\xed\x99\xe5\xa6\xad\x6b\xdb\x19\xf6\x9b\x13\x96\ +\x2a\x6a\x60\x7f\xdf\xc3\x44\xdf\x7c\x0b\xd1\xf7\xbd\xde\x5c\x17\ +\x76\xe9\x85\x61\xdd\x4c\xaa\x9f\x78\x84\xe8\xa1\xa7\xa6\x8c\x43\ +\x2c\x48\x32\xc2\xb0\x35\x91\xc5\xe2\xa7\x9c\x41\xc4\xc5\x82\x31\ +\xf3\xdc\xa2\x8b\xb0\xee\x7b\x05\xd1\x69\xf3\x7e\x93\xb3\xce\xe5\ +\xa8\x65\xb6\x23\x17\xeb\x3e\xe1\x96\xc2\x88\xaa\x9e\x17\x7e\xfc\ +\xb7\x15\x7f\x3d\x7f\x7e\xe5\x04\x5b\x11\x8d\x59\x42\x1c\xc4\x82\ +\x58\xa5\x6e\x68\x54\x3a\x7e\xeb\x83\xcf\x10\xbd\xee\x2a\x23\xf8\ +\x6e\xbd\x84\x7f\xf7\xd1\x77\xcc\x56\x3f\x75\xb3\x13\x73\xe5\x37\ +\x9c\x6b\xb7\x34\xe2\x55\x6f\x52\x9d\xbd\xcb\x70\x67\x91\xa3\x66\ +\x15\x5b\x5c\xd9\xd2\xca\xf1\x80\x57\x98\x65\xbf\x8f\xf7\x1b\x44\ +\xe2\x4e\x4c\xdc\x8d\x31\xdf\x89\x78\x3e\x4d\xc9\xd2\x29\xad\x73\ +\xb8\x4c\x5c\x97\x52\x62\x2b\x77\xc3\x9b\xfa\x6d\x4e\xa8\xe9\xc2\ +\x55\x33\x08\xb9\xb2\x43\xfc\xe9\x84\xf5\xaf\x4b\xb8\x25\x44\x9b\ +\x4a\x5c\xdb\x67\x4e\xdc\xd8\x86\xe5\x6d\x96\xcc\xe0\x99\x44\x61\ +\x2a\x09\x06\x40\xf0\x81\x0b\xb4\xf0\x69\xea\xae\xad\x16\x0b\x80\ +\x94\x65\x2f\x71\x51\xd4\x53\x44\x60\x6a\xbf\x5d\xfb\x94\xdb\x3c\ +\x6e\x26\xa5\x8f\xdc\x47\xf4\xda\xdb\x89\xae\xbf\x6a\xf7\x8d\x3b\ +\x67\xe5\x3e\xf0\x24\xd1\xc7\xbe\x68\xfe\x1e\xd5\x6e\xc7\x64\x26\ +\x6d\x24\xbe\x54\x22\x23\x4f\xab\xe8\x82\x2e\xc7\x4d\x8a\x6b\x11\ +\xff\x56\x6d\x2a\x27\xa6\x32\xf3\x7d\x14\xcd\xe7\xd4\x82\x2f\xc2\ +\x4b\x46\xc4\xad\xac\x9a\xb7\xdc\x30\x73\xe7\x19\xf3\xb8\xee\x1f\ +\xcf\x39\xcb\x68\x35\x69\xaf\x3b\xf1\xc1\x09\x1e\x1c\x67\xd6\x3b\ +\xe0\x84\x5f\x71\xd0\xfd\x9f\x5d\xbf\xad\xf1\x2c\xa3\x31\x8b\xce\ +\x35\x1d\x67\x04\xab\x19\xc6\x51\xac\xe7\xd6\x78\xbf\xfe\x15\xa2\ +\xef\x30\x9f\xe1\xd6\x7d\x97\xe0\xc9\x57\x66\xc4\x2c\xff\x6d\xae\ +\xde\xca\xdc\x48\xd1\xcd\xce\xd2\x57\x9e\xaa\xc5\x1f\x9d\x13\x25\ +\x83\x38\xde\xef\x05\x67\xf9\xb3\x16\x3e\x4e\xae\x39\xe8\x5c\xc5\ +\xea\xa0\xb3\xfe\x91\x1f\x7b\x9b\x1d\x2d\x44\x4c\x6a\x6c\x17\x91\ +\xa5\xdb\x28\x89\x32\x89\x62\x60\x3b\x8e\x43\xc7\xdd\x44\x52\xdb\ +\x51\x47\x66\x72\xc2\x72\xa8\x33\x59\xb4\x0d\x37\xae\xdc\x66\x92\ +\x7f\x4d\x52\x54\xa9\x19\x6e\xb0\xa7\x08\xff\xae\xed\x92\xfb\xa4\ +\xd9\x5f\xdb\xfa\xdc\xb3\x94\x8b\x81\x85\x0f\x82\x0f\xcc\x5f\xf0\ +\x0d\x12\x3f\xac\x51\x99\xbe\x10\xab\xcc\x9d\xae\xce\x6d\x1b\x0b\ +\xbb\x4c\x1c\x9f\x9a\x61\xbb\xf8\xbd\xbf\xf0\x55\xa2\x4f\x3c\x40\ +\xf4\x83\x6f\x31\x73\xcd\xda\xee\x1a\xf7\x27\xcd\x24\xfa\x3b\xf7\ +\x12\xbd\x74\xb2\x29\xce\x54\x2c\xd4\x32\x89\x18\xb1\x15\x50\xa9\ +\xcc\x64\x44\xf9\x44\x0c\x95\x11\x86\xc9\xe3\x28\xa7\x9c\x03\x0b\ +\xb0\xce\xf0\x64\xc4\xbd\x96\x8b\x55\x67\xe1\x63\x37\x23\x0b\xc0\ +\x31\x5b\xf7\x4e\x39\x17\xaf\x14\x2e\x2c\x50\xc6\x67\x9c\x85\x8f\ +\xad\x4d\x6c\xed\xb3\x96\xbf\x2b\x7c\xc2\xc7\xa0\x39\x38\x52\xb4\ +\x4a\x77\xa3\x8a\xc4\x5f\xaa\x2c\x8c\x8e\x93\x67\xa2\x31\x7c\xc2\ +\x1c\xc7\xbf\xbc\x9f\xe8\x77\xdf\x6a\xc4\xeb\xa5\x34\x71\xc5\x42\ +\x48\xb7\x2d\x58\xe1\xf3\x5b\xe1\x66\xc6\x4e\x5f\x6f\xc6\x6f\xd3\ +\x5b\xfc\x8e\x9b\xc7\xe7\xc9\xc6\x57\x56\x63\x33\x74\xa2\x90\x5e\ +\x72\x35\xfd\x6c\xc2\xc7\x3e\x6f\xf9\xe3\x8c\xdf\x6b\x9c\x90\x6c\ +\x24\xca\x44\x49\x5f\x0b\xe9\xb4\xa1\x33\xc2\x56\x67\x5a\xa0\x75\ +\x6d\x97\x4a\x52\xd3\x89\xdf\x0f\x35\x45\x5b\xeb\xb7\x9b\xb2\x1a\ +\x66\x32\x86\x53\x49\x6b\x4d\xb3\x6c\xfe\xff\x2a\xf1\x7c\xf2\x33\ +\x77\x8c\x43\x52\x1c\x4e\x13\x84\xd3\xbe\x8f\x05\x64\xf3\x02\x08\ +\x3e\x30\x23\xec\x0e\x5d\xea\x35\xd7\x8d\x75\xa3\xf0\x7e\x53\x04\ +\xea\xd9\xee\x14\xb5\x9a\xfd\x26\x50\x27\x36\x6a\xb9\x30\x54\xfb\ +\x82\x31\x34\x13\xcd\x07\x3f\x45\x74\xd7\x11\xa2\xd7\x7c\xd3\xee\ +\x1a\xf7\x4f\x3e\x42\xf4\xc0\x51\xca\xc7\xf7\x50\x94\x21\xad\xda\ +\x25\x2f\x66\xe9\xbc\xa1\x53\x81\xdd\x2a\xfd\x1d\xa9\xcc\x7b\x25\ +\x4b\xc4\x2c\xfa\x46\x44\x16\x51\x0e\x13\xa6\x11\x12\x05\x8b\x38\ +\x23\x26\x4a\x23\x24\x26\xe7\xcc\x72\xdc\x25\x75\x34\x62\x15\x27\ +\x3e\xe1\x60\xd3\x59\x04\xd5\x31\xb3\x70\xb6\xef\x95\x6e\x29\xf6\ +\x39\x51\xd8\x18\xcb\x44\xd6\x6e\xb2\x5c\x4d\x2e\x63\x39\xaa\x2f\ +\xc7\xeb\x3f\x6d\x04\xd0\xc7\x9e\x33\x37\x24\x37\x5d\x42\x37\x78\ +\x9a\xda\x85\xa8\x55\xe6\x9c\xf0\xe7\x98\xb5\x92\x0e\x9c\x88\xd3\ +\x57\xfb\x78\xbf\xe3\x2e\xd9\x83\x0b\x36\x37\xce\xcb\xa1\x17\x80\ +\xe7\x5c\xa9\x17\xc5\xee\xe1\x35\xe7\xfe\x2d\xae\xf5\x22\x72\x29\ +\x21\x94\x16\x54\x78\xb9\x55\x47\x2f\x21\xea\xb3\xe7\xb5\x6a\x17\ +\x9f\xef\x12\x4d\xc9\x92\x29\x3a\x23\xe6\xe2\x0c\xdb\x8e\xee\x19\ +\xb9\xd7\x24\x8f\x87\xb6\x21\x02\x67\xd9\xc7\x36\x8d\x07\xb3\x88\ +\x38\xbd\x8d\x5e\xbe\x00\x82\x0f\xcc\x11\xb6\xee\xad\x2e\x09\x51\ +\x67\x26\xcb\x9e\x8e\x26\x44\x2f\xf6\x7a\x19\x4b\x60\x5c\xbf\xac\ +\xd4\x6d\x8b\x5d\xf6\xc2\x9e\x48\xe6\x48\xd6\x4f\x8b\xef\xb0\xfd\ +\xfe\x1e\x7b\x9a\xe8\x37\x3e\x40\xf4\x6b\x3f\x63\x84\xeb\x60\x77\ +\x8c\xf9\xc3\x5f\x27\xfa\xdf\x7e\xdd\x08\xeb\x9e\x13\x2f\x2c\x62\ +\x8a\xbe\x77\xa7\xf5\x5c\xb2\x42\x52\x90\x15\x69\x41\x9d\xb4\xee\ +\xe9\x8c\x65\x21\x95\x9d\x18\x2c\x64\x6a\x86\xf7\x2a\x76\xe6\x42\ +\x2c\xbb\x04\x68\xd9\xd2\x8c\x9c\x00\x29\xb8\x18\xf7\x7e\xf3\x9f\ +\xeb\xcc\xf3\x63\x17\xeb\x67\xe3\xfa\xce\x3a\x4b\xa0\x1e\x79\x11\ +\x38\xf2\x7f\xaf\xbb\xe7\xed\xbe\xcd\x38\x17\x87\xcc\x85\xca\x8b\ +\x3f\x16\x83\x56\xd0\xf4\x9b\x9f\x2f\x37\xc6\x2d\x6b\x60\xc6\x4a\ +\x38\x34\x7f\xff\x9b\x2f\x3a\xb7\xee\x5d\x87\x2e\x9d\x50\x24\xd9\ +\x29\x43\x67\xda\xf7\xb5\xe2\xbb\x0a\x37\x36\x3c\x56\x9a\x13\x35\ +\x8c\xf0\xeb\xdd\xe1\xc6\x95\xeb\xf7\xb1\xd5\xcf\x8a\xbc\x4d\x37\ +\xde\x56\x68\x0d\x7d\xf2\x07\xbb\x86\x5f\x30\x8b\x72\xb3\x83\x3a\ +\xe4\xeb\xfc\x5d\xef\xc7\x9e\xe3\x2e\xe7\xed\xfa\x4e\xf5\x5d\xa6\ +\x4c\x32\x46\xd1\x9d\x1d\x5f\xdf\x75\x24\x2c\xa3\xd3\xb6\x4b\x59\ +\xf4\x52\x62\x71\x92\xf8\x9d\x76\x29\xaa\x22\x7d\xa3\xdd\x78\xaf\ +\x2e\xd1\x54\xe4\x6f\xd6\x3b\x8f\x63\x96\xdf\x7e\x31\xc3\x4d\xc7\ +\x2c\xe7\x29\x5c\xba\x10\x7c\x60\xbe\xf4\x84\x85\x2f\xa1\xa9\xea\ +\x1f\xdf\x24\x2f\xec\xf4\x76\x44\x60\xea\xfa\x91\x73\x05\x4f\x11\ +\x8b\x61\xb3\xbf\x7b\x94\xe8\xcb\x47\x89\xee\xbe\xe3\xd2\x1f\xef\ +\xb3\x66\x82\xfc\xdd\x8f\x12\x9d\x3b\xe5\x57\x9c\x70\x17\xb6\xfe\ +\xaa\x13\x7f\xbd\x55\x9f\xa1\xda\xab\x05\x60\x25\x34\xca\xcc\xc5\ +\x50\x77\x88\xb5\xf8\x2e\x3f\x97\xf9\xab\x13\xc9\x1a\x09\x41\xae\ +\x77\xaa\x7d\x98\xb0\xce\xa8\xd4\x7b\xcb\xe3\x32\xe3\xd4\x67\x01\ +\x77\x85\x2b\x2d\x52\xb2\x6b\xf7\x65\xe7\xfe\x2d\x59\x7c\x8c\xfd\ +\xb9\x2d\x93\x3e\x8e\xb9\x85\xc5\x23\x17\x77\xb6\x96\xbf\x03\xde\ +\x15\x3c\x70\x37\x3c\xaa\x97\x11\xd1\x52\x24\x53\x66\x6c\xfd\xa4\ +\xfb\xb4\x11\xa0\x7f\xf6\x0c\xd1\xcd\x6b\x44\x87\x96\x2e\x91\x93\ +\x30\x8c\x6d\xdc\x2f\xb9\x4c\x58\x94\xc2\x79\x92\x49\x60\xe1\x3e\ +\xbc\x7a\x9f\xcb\xda\x65\xc1\x67\x4b\xbc\xb0\xb0\x3e\xe7\x85\xf6\ +\x30\xb2\x72\x8d\x7c\xad\x3f\x5e\x9e\x70\xb1\x7e\x5c\xea\xc5\x6a\ +\xc4\x57\x2c\x40\xf4\xc5\x99\xd5\xa9\x84\x89\xf8\xbc\x2a\x13\x37\ +\x42\x71\x7c\x6c\x57\xc2\x59\x4e\x00\xea\xcc\x7e\x53\x96\xc5\x72\ +\x8a\xf8\x9a\x44\x17\xc1\x58\xc0\xa5\xc2\x6e\x54\x74\x0e\xcc\x20\ +\xd6\x3a\x63\xed\x72\x49\x1f\xdb\xb9\x46\xcc\xda\xf3\x17\x40\xf0\ +\x81\xf9\x08\xbe\x15\x6f\x19\xe3\x3e\xb5\xbd\xc4\x8f\x6f\xe2\x7b\ +\xdb\xc6\xbf\x3f\xce\x96\x4d\x59\xfd\x7a\x93\xf6\x6f\x7e\x9c\x58\ +\xc7\x62\xa4\x2c\x69\xaa\x9f\x37\x99\x24\x10\xb5\xd8\x7a\xdf\x9f\ +\x12\x5d\xf3\xdf\x12\xdd\x74\xf8\xd2\x1e\xef\x3f\xf8\x0b\xa2\xff\ +\xef\xbe\x68\xb2\x30\x63\x33\x36\xc2\x60\x74\xce\x5f\x73\x7b\xde\ +\xea\xb7\xec\x2d\x59\x2b\xee\x91\xeb\xd0\xf1\x73\x32\x81\x22\x3b\ +\x3e\x72\x9c\x72\xe5\x1b\x32\x81\xdf\x9a\x9a\x99\x87\x72\x42\x53\ +\x3b\x65\xa6\x0a\x16\xbe\xa8\xde\x60\x72\xa2\x10\xeb\x7b\xcb\x6e\ +\xbc\x06\xd7\x98\x61\xf5\x2e\x5d\x4e\xf8\xb0\x49\x1f\xc1\x02\x25\ +\x5f\x32\xac\xc5\x1f\x5b\xf9\x38\x5e\xd0\x5a\xfd\x56\x9d\x00\xe4\ +\xff\xb3\x15\xaa\x12\xd1\x51\x62\x8b\x4a\x64\x62\x6a\xe1\xf2\xe5\ +\x82\xcc\x7f\x64\x6e\x46\xae\x35\xfb\xf8\x27\xdf\x94\x8e\x97\xdd\ +\x51\xb4\x18\x5b\x1d\x7d\xaf\x3a\xf1\x59\x28\x91\xd8\x12\x09\xa6\ +\xf0\x3c\xbb\x7c\xb9\x56\x9f\x8d\xb7\x3c\xe7\x97\x53\xbe\xcc\xcb\ +\x69\x67\x0d\x94\x1d\x5b\x14\xab\xbc\x97\x9d\x40\xdc\x30\x63\x3f\ +\xe6\x96\x89\x73\x12\xc5\x8a\xa2\x5e\xb2\xaa\x16\x49\x3a\x25\xd8\ +\x32\x09\x62\x2d\x0f\x44\x42\xa0\x4c\xdd\x9f\x6e\xdf\x44\x91\x6e\ +\xde\x38\x34\x1e\xe3\x63\xe8\x10\x81\xd5\xfb\x4c\xa2\xe3\xa1\xf6\ +\x3e\x55\xea\xfa\x99\x8a\x3b\xa4\x29\xfb\xa2\xcc\x3e\x29\x73\x2d\ +\x9f\xe6\x76\xee\xb2\x14\x22\x4b\x17\x82\x0f\xcc\x57\xf0\x0d\xfc\ +\x37\x56\xb0\xe0\x4b\x5c\x54\xac\x9b\x57\xa5\x7f\x8f\xa9\x6b\x10\ +\x8b\xc3\x52\x4f\xdf\x96\x05\xe6\x44\x27\x44\xe0\x2c\x15\xe0\xa3\ +\x0b\xc1\x97\x1f\x27\xfa\xd8\x67\x88\xfe\xe9\x3f\xb8\x74\xcb\xb4\ +\xbc\x78\x92\xe8\x2f\xbf\x44\x74\xc6\x7c\xee\xbe\x11\x00\xe5\xd8\ +\x2d\xad\x6b\x33\xbb\x28\xcd\x32\x3e\xe7\x56\xf6\xbc\xd5\xa9\x12\ +\x7e\x4b\x5e\xfc\x2d\x45\xbd\x51\x63\x4b\x0d\x45\xae\xc8\xf8\xb9\ +\x22\x7d\xdd\xcd\x15\xe3\xdf\xc9\x5e\xba\xc9\x58\xb3\x8c\xe5\xb2\ +\xf5\x39\xfc\x41\xf3\x78\xf1\xa2\xaf\x74\x09\x1e\x2c\xfe\xd8\xe5\ +\x6b\xff\x5e\xf7\xe5\x5e\xe4\x9b\x0e\x9d\x45\xd0\x66\xa4\xb2\xe8\ +\xde\xef\x85\xdf\x3e\xbf\xac\x3a\x8b\x56\x2a\x10\xbf\x75\x3c\x62\ +\xd2\x7e\xda\x08\x9e\x3f\x7b\x8a\xe8\xed\x37\x10\xdd\x7c\x09\x64\ +\xed\x2a\x4d\xcd\xbe\xb6\xa9\x18\x3e\xdd\xfd\x19\x5b\x9f\x33\x12\ +\x16\x5c\xa6\x85\x0e\xb8\x36\x6d\x74\xd6\x59\x5d\x83\xe8\xe3\xbf\ +\x69\x9d\xea\xcc\xd9\x45\x59\x72\x32\x89\x20\x2a\x71\x43\x49\x71\ +\x41\xee\x39\x27\x9c\xc9\xae\x2c\xc9\x4a\x06\xb9\xfd\x52\xb7\x08\ +\x4c\xb6\x8f\xcb\xb8\x74\x67\xea\x9a\xa1\x3a\xfe\x3b\x63\x9c\xf6\ +\xcc\xa2\x4e\x9d\xe7\xeb\x00\x04\x1f\xb8\x00\x0b\x5f\x5f\x08\xb0\ +\x84\x82\x2b\x8a\x74\x9f\x51\x7e\x6d\x99\x58\x3f\x4a\xf4\x25\x1d\ +\x25\xf6\xa1\xbd\xe5\x4f\x47\x17\xc0\x71\xe4\x12\x9e\x04\x4b\x60\ +\x7c\x51\x13\xdb\xbc\x6c\xc4\xd4\x47\xff\x9a\xe8\x8d\xaf\x26\xba\ +\xeb\x12\x4c\xe0\xe0\x71\xfd\xd8\xfd\x44\x5f\x7b\xd9\x95\x1c\x61\ +\x17\x24\x5b\x42\x58\x60\x4c\x8c\xf0\x18\x73\x79\x11\x76\x41\x4e\ +\xda\x9f\xd1\x66\xa3\x6e\x39\x2b\xa0\x9d\x08\xbc\xbb\xd7\x5a\x02\ +\x59\x08\x1a\x11\xd2\x5f\xf3\x16\xc1\xa5\xc4\x45\x5e\x06\x95\x77\ +\x65\xf2\x8a\x89\x44\xc7\x13\xbe\x9a\x32\x41\x2d\x40\x94\x24\x1b\ +\xc7\x27\x8e\x57\x53\x62\xa2\x8e\x0c\x56\xd6\x55\x6e\x96\xfe\xd5\ +\x4e\x50\xb3\xbb\x71\xc2\xe2\xc3\x8c\xe9\xf8\x94\xb7\x46\x45\xdd\ +\x10\x4a\xce\xa0\x36\xcb\xa4\x70\x96\x27\xeb\x62\x5f\x76\x19\xbf\ +\x9c\xf9\x5b\xc8\xb2\x2f\xf2\x78\x12\x81\xfc\x9f\x79\x81\xe8\xfd\ +\x4f\x10\xfd\xe4\x2b\x89\xae\x5e\xb9\x88\x27\xa2\x77\xe7\xb6\xba\ +\xaf\xc8\xe3\x2d\xce\xe3\xfc\x89\xad\x7f\x85\xb0\x00\x1d\x70\xe5\ +\x72\xe8\x86\xda\xcd\x6b\x97\x63\xbe\xec\xcb\x09\x5a\x5c\x59\x96\ +\x92\x92\xb1\xbf\xad\x6b\x49\x39\x47\x61\xa7\x13\xe3\x94\x72\x5d\ +\xaa\x8e\x58\xe7\x6d\x88\xc0\xce\xd7\x10\x65\x33\x85\x1b\xbb\x92\ +\x71\x8b\x33\xec\x2b\x7b\x03\x98\xeb\xcc\xa1\xce\xef\x3b\x04\x10\ +\x7c\x60\x8e\xb0\x35\x6e\x79\xd0\x2d\xf8\xac\x8b\x76\x1b\x82\xaf\ +\x97\x10\x77\x2b\xde\x52\x18\x6f\x3f\x4e\xec\x63\x9c\xd8\x8e\x45\ +\x9f\x3c\xb6\xf8\xff\xfc\x7e\x47\x9f\x25\xfa\xd3\xbf\x21\xba\xf1\ +\x1a\xa2\x2b\x2f\xb1\x0e\x1c\x4f\xbf\x44\xf4\x91\x4f\x13\x6d\x6c\ +\xba\xf9\xb4\x08\xbf\x12\x23\x20\xb4\x38\xd6\xb1\x17\x7f\xa3\x33\ +\x66\x59\x17\x71\x6b\xb2\xfc\x47\xe9\xad\x83\xe1\x45\x27\xeb\x0b\ +\x6f\xdf\x8b\xbf\xfe\x01\x27\x70\x8a\x01\xb5\x5c\xa1\xc9\xca\x0b\ +\x51\x6b\xb5\x64\x9f\x5e\xda\x39\x97\xae\x9c\xac\xa7\x1d\x6f\x2e\ +\xf3\x38\xb5\xde\xce\xbb\x3c\xf6\x5e\x28\xeb\x6b\x5c\xce\x00\x8b\ +\xea\x31\xbb\x76\x4f\xba\x7a\x7f\x8d\xf3\x97\x8f\xc3\xbb\x82\x35\ +\x27\x87\xbc\xec\xf7\xd5\xab\x4b\xbe\xf4\xaf\xf3\x09\x08\xd4\x4e\ +\x6c\xe1\x7d\x0d\xcd\x39\xfd\xef\x1e\x22\xba\xdd\x7c\x2f\x3f\x74\ +\x11\x0b\x32\x2b\x69\xe1\xcb\x89\x35\x9d\xb6\xaa\xb6\x12\x7b\x72\ +\x13\xba\xea\xc8\x3c\x1f\xf8\x2c\x5d\xe6\xaa\xfa\xfd\xf6\x1f\x34\ +\x5f\xc9\xbc\x93\xae\xe2\x18\x3e\x95\x39\x36\x9d\xb1\xee\xc5\xdb\ +\x50\xc6\x62\x97\x19\x9f\xc6\x76\xb3\xc4\xef\xe5\xe2\xfd\x68\x4a\ +\x26\x73\x31\xa5\x05\x9a\xee\x10\x50\x99\xa4\x91\xd4\x7b\x6c\x2b\ +\x9b\x3a\x55\x38\x7a\xbb\xe7\x6a\x1f\xde\x5c\x08\x3e\x30\x57\x06\ +\x3d\x11\xc3\xc7\x22\x2a\xf5\xc3\x1f\x38\xb1\x96\xb2\xf2\xa5\xd6\ +\x8f\x8b\xd9\x05\x22\x5b\xf3\x62\xeb\x5d\xea\xf5\x29\x31\x1a\xbb\ +\x84\xf9\x35\x9f\xf8\x1c\xd1\x4d\x66\x12\xff\xf1\xef\x31\x27\x62\ +\xef\xd2\x18\xe3\x63\xa7\x5c\x91\xe8\xaf\x3c\x15\xb5\xe3\x92\x42\ +\x2e\xfc\x78\xcc\x64\xd8\x37\x0a\x64\xe5\xca\xda\x02\xc8\xb1\x7d\ +\x6c\x05\xe4\x85\x2d\x80\x3a\x2c\x89\x9b\x61\x76\x03\x5b\x57\xf0\ +\x31\xaa\xdc\xc1\x95\x05\x70\xc9\x2d\xaa\x57\x2f\xc9\x4c\xd4\xc8\ +\x4d\x17\xbb\x88\x76\xa4\x34\x8b\x70\x3b\x56\x96\xa5\x49\xb3\x67\ +\xab\xac\x23\xa8\x55\xc2\x22\xa9\x9b\x42\x2f\x55\x08\x37\xac\xb7\ +\xb1\x7f\x37\x99\xf5\x37\xfb\x7a\x7f\xe7\x7c\xe2\xc7\xba\xfb\x0e\ +\xac\x65\x6a\x12\x09\x9d\xb1\x13\x7f\xbc\x8c\x9e\x70\x82\xcf\x5a\ +\xfe\xae\xf0\x5d\x3f\xbc\x2b\x5e\xf9\xb6\x6f\x43\xb3\x8f\xff\xe3\ +\x7e\xa2\xef\xb8\xde\xdc\x90\x2c\x5f\xa4\x93\x51\xbb\x71\xec\x2a\ +\x37\xd2\x70\x91\x47\x1d\x23\xd4\x2c\x16\x3e\x9d\x17\x86\xc9\x4e\ +\x31\x85\x3b\xef\xe7\x99\x95\xc9\x37\x4d\x9b\x5f\x71\xd7\x04\x5b\ +\x86\x67\xc9\x2f\x03\x3f\x43\xa5\x6a\xdd\x65\x2c\x51\xa9\x22\xc7\ +\x49\x57\x74\x91\xe9\x67\x9b\x72\x2b\x17\x53\xc4\x19\x75\x58\xf6\ +\xe2\xe3\xed\x48\xc0\x98\xda\x02\x6d\x9a\x10\xcb\xb4\x93\xeb\xb4\ +\x12\x12\xb5\x62\x01\x3b\xf7\x9d\x11\x89\xb0\xf0\x41\xf0\x81\x39\ +\x33\x1e\xbb\x8b\xba\xad\xc7\xc7\xad\xac\x54\x3a\x4e\x96\x9f\x2f\ +\x33\xee\xde\x78\xfd\x52\x46\x08\xa6\x62\xfb\x78\xdb\xe1\x78\x86\ +\xd7\xf7\xda\xd9\xbf\x29\x11\x78\xee\x2c\xd1\x67\x1e\x22\x7a\xdb\ +\xeb\x89\x6e\xb8\xe6\xd2\x18\xe3\x87\x8f\x12\xfd\xc9\xdf\x52\xb3\ +\x1c\x4a\xa6\x9e\x96\x8a\xdc\x81\xb6\xd7\xa9\x6f\x56\xcf\x62\xaf\ +\x34\x62\x64\xbc\xe9\x26\xb4\x89\x2f\x3d\x92\x15\x80\xda\x25\x2e\ +\xb0\x50\x1c\x9e\x70\x93\xb2\xad\x63\xb7\xe6\x62\xdb\x58\x04\x56\ +\xd9\xc0\x05\xd5\x2d\xc9\x12\x02\xb0\xab\x0c\xcc\xc2\x2c\x7c\xde\ +\xd2\xd1\xe8\xee\x22\x8f\x69\x3b\xeb\x29\xca\x72\xee\x58\x6f\xcb\ +\xbe\x1c\x72\x96\x3b\x1e\x57\x2e\xf2\xcc\xb5\xfe\x6c\xdc\x9f\x8f\ +\xf3\xb3\x42\x48\x37\x6b\x25\xda\xac\x60\xb3\xd0\xf3\xde\xe5\x7e\ +\xd0\x65\x00\xdb\xec\xdf\x65\xb7\xdf\xaf\x9b\x73\xfd\xcf\x9f\x24\ +\xfa\xb1\x57\x9a\xf1\xbf\x48\xb1\xa6\x4a\x47\x13\xb2\x8a\x44\x5e\ +\x99\xb1\x76\xe9\x29\x16\xbe\x9c\x68\x2c\xbb\x2d\x63\x8b\x88\xe3\ +\xe3\xdf\xc9\xf0\x6b\x4e\x8c\x97\x4b\xbe\x08\x34\x17\xdf\x3e\xe0\ +\x7b\x00\x2f\xf9\x99\x6a\xc5\x9f\xf7\x45\xe6\xdc\x4e\x24\xab\xb5\ +\xba\xdd\x4c\x37\xa0\xb5\x85\x51\x39\xdb\xb6\x5d\x16\xc6\xae\xee\ +\x48\x9d\x42\x70\x16\x31\xb8\x9d\xf6\x77\x2a\x23\x8c\xa7\xbc\xa6\ +\xb5\xeb\x58\x24\x96\x10\x7d\x10\x7c\x60\xae\x1c\x7f\x89\xe8\xb9\ +\xa7\xcd\x97\x66\xbe\xb5\x5e\xcf\x27\x3c\x14\xf5\xff\xfb\xec\xfa\ +\xea\x3b\x6b\x59\x59\xf8\xac\x5c\x71\x65\x5a\x4e\x08\x36\xbb\x7e\ +\xd0\x5e\xbf\x9c\xb1\xe8\xf1\xbe\x63\xe1\x36\x48\xad\x4b\x58\x20\ +\x83\x6b\xb7\xf2\x5a\x98\x3f\x1e\x33\x02\xeb\x53\x5f\x20\xfa\xf1\ +\xef\xbb\xf8\xe3\xbb\x61\xc4\xd6\xfb\xfe\x9c\xe8\xa5\x97\xa3\x0b\ +\x7a\xea\x6e\x56\xb5\xff\x56\x62\x92\xe5\xb1\xb7\x19\xa8\x5c\x07\ +\xed\x4a\x2f\x00\xc7\x4e\xf8\x59\xeb\xdf\xd8\x3f\x8e\xbc\x28\x49\ +\x74\x07\x08\xa2\x64\xe8\x27\x65\x9b\x0c\xb2\x9c\x48\x06\x19\x78\ +\xb7\xb3\x98\x08\xf5\x2c\xb3\xd3\xdc\x66\xec\x7a\x02\x69\x59\x45\ +\x8b\x6d\xac\x8f\xba\x87\x68\x9a\x7d\x7d\x10\x42\x85\x19\xeb\xc1\ +\x55\x3e\xa1\x68\xc3\x5b\xfd\x36\x7c\x09\x98\x75\xf7\x68\xfb\xcd\ +\xca\xef\x90\xbf\x83\x97\xdd\x62\x27\xd7\x25\x97\xf0\xc1\x56\xc0\ +\xdf\x32\xdb\x5f\x69\xbe\xbb\xef\x7f\xf5\x45\xb2\xf0\x95\xde\x85\ +\x17\x97\xe9\x50\xe9\x31\x68\x64\xec\xaa\x19\xac\x35\x5d\x99\xe1\ +\xa9\x7d\x86\x78\xbb\x05\x7c\x56\x15\xdc\xf1\x9b\xae\x15\x9c\x0b\ +\xe8\x34\x8b\xcf\xbe\xb6\x02\x70\xc5\x27\xe3\xf8\x0e\x21\xe4\x2d\ +\x82\xe1\x3c\x4a\x56\x08\x88\x2d\x9d\x45\xa6\xdd\x59\x22\xeb\x55\ +\x65\xc4\x5b\x32\x43\x56\x65\xdc\xad\x45\xb7\x50\x53\x5d\x89\x12\ +\x1d\x62\x30\xd9\xe2\xb2\x6b\x3f\x33\x88\x75\x9d\x39\x3f\xa6\xbe\ +\x07\xc4\x1e\x04\x1f\x98\xb3\x85\xcf\xcc\xfc\x5b\xe7\x6c\x4e\x40\ +\x5b\x7f\x14\x4e\x00\xf2\xa2\x54\x7d\xf7\x3b\x58\x76\x42\x70\xb0\ +\xe4\x32\x7c\x07\xde\x1d\xc3\xeb\xe4\xc5\x31\x25\xda\xec\x7c\x93\ +\x73\xcf\x96\x09\x31\x17\x5d\xec\xca\xbe\x13\x8d\x8d\xcf\x90\x88\ +\xf9\xfb\xe3\x7b\x89\xee\x79\x23\xd1\xb5\x17\xb9\xcf\xee\x7f\xf8\ +\x2f\x44\x0f\x3d\xd6\x4e\x40\x48\x5a\x4b\xe2\x92\x17\x29\x2b\x8b\ +\x78\x8e\x2d\xa6\x85\x77\x01\xf3\x24\x15\xca\xdc\x68\x1f\xe3\x37\ +\x19\x3a\x0b\x1f\x27\x83\xb0\x10\x6c\x59\x6e\xbc\x65\x70\xb4\x25\ +\x04\x8e\xb0\xf8\xf1\xbe\x6d\xb2\xc3\xb2\xb7\x06\x2e\xd5\xc7\xb2\ +\x68\xb7\x2e\x4f\x72\x6a\x5c\xbb\x89\xb2\x19\xb1\xd3\xd6\xab\x84\ +\xc5\x2f\xb5\x5e\x4c\x62\xd5\xa4\x97\x98\xdc\x42\xf2\x87\x7d\xbf\ +\x49\xed\xea\xb5\xd9\xbf\xec\x06\x3e\xed\xc4\x45\xe3\xfb\x0d\x56\ +\xd8\x0d\xf7\xdf\x47\xcc\x4d\xd6\xaf\x9e\x22\xba\xe3\x6a\xa2\xdb\ +\xaf\xbd\x38\x16\xbe\x56\x42\x4c\x57\x21\xef\x5e\xfa\x1c\x6d\xb9\ +\xc6\x55\xda\xe2\x57\x95\xd7\x49\x3d\x47\xdb\xb0\x24\x6d\xe7\x33\ +\x16\xbe\x98\x79\x24\x1c\xec\x9f\x2c\xce\xcf\xb8\x72\x31\x1c\xfa\ +\x60\xd7\xf5\xfc\xcc\xe5\x5d\xbf\xca\xff\xa6\xac\x65\x30\xf4\x65\ +\x5e\xcd\x58\x33\xbb\xac\x64\x89\xe3\x4a\x7d\xd6\x54\xa1\x65\xd5\ +\x25\xec\x72\x9f\xbb\xc3\xfa\xae\x67\x10\x68\xb3\xb4\x4d\xd4\xdb\ +\xe9\x7f\x9c\x72\x47\xeb\xd9\xcf\x53\x00\xc1\x07\xe6\x08\x8b\xb9\ +\x5e\x22\xd6\xcd\x0a\x2d\x6f\x8d\x2b\xa3\xdf\xea\xe6\x86\x17\x81\ +\x85\x17\x1d\xc1\xea\xe7\x2d\x83\x4b\xe6\x62\xb9\xb4\xe4\x16\xbe\ +\xe8\x16\xde\x42\x18\x26\x5b\x16\x67\xe3\x49\xfb\x1a\x30\x89\xac\ +\x7f\x56\x1c\xf6\xa6\x0b\x41\x16\x96\xa3\x68\x7f\x5b\x66\xd2\xfd\ +\xe0\xc7\x88\xfe\xd9\x7f\x73\xf1\x02\x7f\x9f\x31\x13\xfb\xfb\xff\ +\xd2\x4d\xf6\x2a\x16\x79\x3a\x13\x1c\x2e\x2f\x76\x91\xc5\x4f\x67\ +\x9e\x93\x93\x6e\x2f\x4c\x5c\x46\x7c\x6b\xf3\x3d\x94\xfb\xbd\x20\ +\x19\x09\x77\xf0\x86\xb0\x00\x52\xd4\xc5\xa4\x74\x59\xac\x3c\xe4\ +\x93\x0d\x97\x19\x6c\x93\x13\xfa\xde\x1a\xb8\x56\x0b\xc0\x45\x8b\ +\x12\xd2\xe9\x42\xd1\x71\x33\x7b\xad\x3a\xd6\xeb\x44\x06\x72\x6e\ +\x3d\xcd\xbe\x5e\xab\x3a\x0e\xd2\xfe\x8e\x56\x5d\xf9\x97\xc1\xc8\ +\xd7\xff\x5b\xf7\x6e\xe0\x73\x54\x97\x1f\x11\x37\x59\x5f\x7e\x82\ +\xe8\xe3\x0f\x11\xdd\xfa\xdd\x3b\xec\xda\x95\xe3\x9a\x1b\xbb\x69\ +\xae\xdb\x22\x7d\x0e\x37\x84\x5c\x2a\x53\x35\x27\x28\xd5\xfc\x27\ +\x77\x2e\x64\xbe\xef\x35\xe6\x5c\x7d\xd9\xd5\x02\xa4\x53\xae\x2c\ +\x8c\xbd\xb3\x4d\x95\x45\x19\xfb\x65\x53\x8c\x0b\xc7\xb8\x0e\xbc\ +\x08\xec\x7b\x0b\xed\x3e\x6f\x15\xdc\xef\x5d\xc3\x2b\x42\xd4\x17\ +\x09\x71\x17\x0f\x7f\xa6\x96\x9e\x2a\xb7\x29\xec\xa8\xdb\xc2\xb7\ +\x5d\x31\x38\xf3\x4d\x9c\x9a\xbd\xef\x71\x67\xe6\xf0\xac\xe7\x2a\ +\x80\xe0\x03\xf3\xe3\xc6\x1b\x89\x6e\xbf\xdd\x09\xa4\xd1\xc8\x97\ +\x49\x29\x5d\x6c\x1f\xff\xcd\xc2\xaa\xf4\x42\x2c\x08\x84\xaa\x60\ +\xb2\x77\xa5\x06\xf1\x55\xfa\x42\xb3\xe7\xe2\xdf\xad\xbf\xf8\xb3\ +\x6b\x78\x65\xc5\x05\x68\xb3\x18\xe4\xff\x5b\x77\xb1\xb7\x28\x0d\ +\x7c\x1c\x59\xe9\xad\x57\xca\xb7\xd2\xaa\x32\x72\xa3\x52\x2d\x72\ +\x5d\x19\x5c\xbb\x62\x9b\x4f\xff\x1d\xd1\xb7\xdc\x41\xf4\xd6\xd7\ +\xee\x60\xc1\x60\x0f\x77\xd4\xf8\xb7\xff\x91\xe8\xe5\x97\x13\xd6\ +\x80\x5c\x72\xc4\xa4\x7d\x41\xce\x59\xa7\x64\xac\x5a\xa3\x30\x6e\ +\x54\x4c\xb7\xe7\x27\x2e\x1e\x73\xeb\xaa\xf2\x02\x86\xc7\xc9\xc6\ +\x01\x6e\x39\x11\x68\xdd\xc0\x21\x16\x50\xf6\x20\x2d\x9d\x15\x6b\ +\xec\xdf\x7b\x24\x32\x82\x47\xec\x17\x5e\x50\x89\x91\x56\x96\x6e\ +\xec\xda\x52\x09\xeb\xd2\x0c\x56\xbb\xce\xf5\xdb\xb0\x0a\xc6\xa2\ +\xd0\x3e\xf8\x9e\xb3\xdc\x31\x85\x5b\x8f\x85\x4c\xe2\xaa\xfe\xdf\ +\x19\xe7\xfe\xe5\x38\xc0\x75\xb3\xfc\xfa\x47\x89\xbe\xf9\x66\xa2\ +\xb7\xdc\xe1\x6e\x5a\x76\x4a\xf0\x29\x7f\x33\x17\x8b\x80\xec\xb9\ +\x36\x89\x3e\xf7\x24\x23\xb8\x83\xa5\x2c\x31\xe9\x57\x16\xc5\xdc\ +\x7b\xcd\x39\x5e\x8b\x85\x78\xff\xa0\x3f\xef\x6f\x10\x42\x76\x5c\ +\xd7\x04\xe4\x0b\x15\x97\x85\x09\x62\x8f\x9f\xa3\x70\x23\x14\x92\ +\x86\xb8\x38\xf4\x7a\x43\x2b\x37\x3f\xf3\xc0\xd7\x1c\x5c\xf3\xd6\ +\xc0\xfd\x5e\x04\x2e\xd5\x37\x5e\x95\xf5\xb0\xc8\x5b\xed\x74\xb1\ +\x3d\x2b\x97\xd6\xe7\x69\xe1\x53\xdd\x56\xd1\xa9\x02\x6e\xd6\xeb\ +\x68\xa6\xe4\xcc\x4c\xa7\xa8\xa2\x68\xc0\x01\x04\x1f\x98\x0b\x87\ +\x0f\x13\xbd\xfa\xce\xba\x1e\x1e\x8b\x3e\x9e\xc8\x37\x37\xdd\x23\ +\x97\x11\xd9\xe0\x98\xaf\xa1\x13\x81\xc1\x1d\xcb\x85\x81\x47\xe3\ +\xa6\x18\xec\xf9\x0b\x51\x58\x24\xa5\x77\x33\xae\x9f\x8d\xd6\x6b\ +\x67\x95\x1a\x78\xab\x20\x97\x66\xe8\xf9\xb8\x41\x76\x15\x17\x3e\ +\x6e\xb0\xa7\x84\x9b\xd9\xfc\xbd\xa4\xda\x96\xbe\x52\xc6\xf3\xf9\ +\xf7\xff\xec\x03\x44\xaf\x79\x95\xb9\x29\xdf\xc1\x82\xb7\xfc\x59\ +\x1f\x7c\x8c\xe8\x91\x27\xfc\xb5\x5d\x47\xd7\x2e\x9d\x71\xd9\xa6\ +\x92\x0e\x12\x77\xbc\x29\xa1\x23\x85\x75\xa7\xc5\x26\x14\xc6\xe5\ +\x31\x5c\x73\x13\x15\x3f\xc7\x22\xcf\xba\x7f\xfd\xc2\xd6\xc0\xd2\ +\x8b\x3f\x1d\x4f\xc6\x3b\x91\xa9\xab\x85\x35\x8a\x28\xe9\x7e\x4c\ +\x65\x44\xc6\x63\x12\x8e\x75\xa6\xf5\x52\x2c\x47\xeb\x5b\x42\x6f\ +\x1b\xeb\x6d\xcb\x3c\x16\x05\xd7\xfb\xba\x8b\xde\xf2\xf7\x82\x59\ +\x3e\xfc\x20\xd1\x91\xeb\x88\x5e\x71\xe5\x0e\x0a\xbe\xa8\x03\xc5\ +\xb4\xcf\x50\x3d\xa4\x0a\x75\xeb\x7c\xe6\x2e\xa9\x7c\xf6\x6e\x9c\ +\x35\xad\xe6\xdc\xb2\x8f\xcf\x8d\xc2\x8b\xdb\x86\xe5\xb1\xe7\xda\ +\xe8\x55\x25\x61\xbc\xab\xdd\x76\x03\x39\xeb\x85\x60\xe8\x07\x3c\ +\xf1\x22\x70\x1c\x5d\xcf\x64\xfc\xd9\x96\x2b\xe0\x4d\xc7\xcd\x77\ +\xfa\xac\x10\x4e\x07\xbc\x45\x90\x4b\x2e\xad\x7a\x21\x18\xda\xf6\ +\xf5\xeb\xa5\x6a\x9d\x98\x29\x6a\x9e\x13\x82\xd9\x18\xbc\x2e\x51\ +\x36\xcd\xc2\x37\x83\x88\x9b\x45\xf0\x6d\xcb\xe5\x1b\x23\x5c\xde\ +\x05\xc4\x1e\x04\x1f\x98\x2f\x2c\xa6\x56\x57\xfc\x84\xcf\xa2\x6c\ +\x39\x2d\x60\x26\xde\xda\x67\x05\xa1\xef\x04\xb1\xe5\x45\x20\xbb\ +\x78\xc7\xfe\x39\x2b\x04\x47\xee\xb9\xad\x2d\x9f\x05\x1c\x26\xd2\ +\xb2\xb6\x2e\xc5\xfb\x1f\x6d\xba\x25\xb6\x0a\x16\x3e\x36\xd0\x26\ +\x94\xf4\x9c\x35\xd0\xba\x88\x07\xce\x4d\x1c\x5a\x8d\xf1\xff\x97\ +\xbc\xa5\xd0\x5a\xaf\xbc\x08\x7d\xee\x05\xa2\x07\x1e\x26\x7a\xf3\ +\x6b\xcd\xf3\x83\x9d\x19\xd3\x27\xbe\x41\xf4\xf1\x4f\x1b\x71\x6b\ +\x26\xf4\xe5\xbe\xb0\x8a\xfa\x45\x96\xc5\x2a\xe3\x1e\x9d\x42\xc8\ +\xe9\xcc\xc5\x56\xa5\x1a\xb7\xab\x44\x12\x83\x78\x9d\x8a\x26\xf0\ +\x56\x62\x08\x39\xb7\x24\x27\x84\x54\x19\xbe\xa1\xf3\x44\x48\x0c\ +\x19\x8a\x84\x90\xc9\x62\xc7\xd0\xc6\xf0\x95\x89\xc9\x80\xa2\xcf\ +\xa2\x9a\xaf\xd1\x89\x96\x51\x4a\x65\x92\x38\x68\xf6\xf5\x2a\x9e\ +\xcc\x66\x59\x9f\x48\x06\xb1\xe7\xf1\x55\x4e\x70\xf0\x39\xf1\x27\ +\xe6\xfc\xbc\xe3\xeb\x44\xff\xfc\x0d\x3b\x73\x6e\x2a\x2f\x16\xa4\ +\x35\x59\xc5\x42\x41\xae\x8f\xfa\x2d\x27\x13\x5d\xe4\xe7\x9e\x24\ +\xba\x41\xe4\x5a\xe2\xc5\x61\x0b\x73\xbf\xf3\xaa\x93\x41\x74\xe6\ +\x18\xec\xc3\xaa\xeb\x07\x5c\x31\xa9\x8b\x9d\xdb\x64\x0f\x8e\xbd\ +\x1c\xba\x2e\x21\x56\xdc\x6d\xfa\xc7\xb2\x6d\x95\xae\xba\xd9\x9c\ +\x70\x05\xa5\xe5\xfb\x5e\x65\x84\xe6\xff\x78\x8f\xb3\xfe\x59\x2b\ +\xa0\xcf\xdc\xb6\xe5\x91\x88\xb6\xdf\x96\x4c\x6d\x6f\xfd\x4c\x82\ +\xee\x42\x9e\xdf\xee\x76\x39\xa1\xe8\xb9\xcb\x8c\xcd\xa1\x4b\xb4\ +\x6b\x12\x80\xe0\xdb\xb5\x82\x2f\x14\x5e\x2e\xbd\x65\x6e\x32\x69\ +\x5f\x1c\x82\x15\x2f\x55\x98\xd9\xba\x7f\xbd\xd0\x93\x8f\x6c\x25\ +\x0c\xa2\x70\xec\xd7\x0d\xcd\x85\x72\x6b\xe4\x2c\x86\x13\x6f\x55\ +\xe4\x49\x50\xfb\xfd\x37\x6e\xa4\xbd\x40\xe2\xa4\x82\x51\x64\x15\ +\xb4\xe2\xcf\x67\x11\x87\x84\x91\x20\x0e\x83\x28\xe4\xf5\x9b\x66\ +\xbf\x5f\x7e\x8c\xe8\x15\x37\x12\x1d\xb9\x79\xf1\xe3\xc9\x62\xf8\ +\xef\x8c\xc0\xfc\xca\xe3\x4e\xec\x71\x7c\x96\x1c\x33\x69\x85\x0c\ +\xe3\x19\xc7\x48\x96\xd2\xb2\x25\x2d\x7f\x5d\x81\xf5\x9a\xda\xee\ +\xdd\x94\x55\x25\x67\x11\x14\x13\x70\x30\x38\xd8\x64\x90\xe5\x7a\ +\xdb\x20\xf8\x58\xd4\xb3\x05\xb0\xb7\xc8\x8b\xb1\x8e\x5c\xdc\xc1\ +\xbd\x4c\xed\xcf\xd2\xb0\x44\xe9\x84\x85\xaa\x2b\xd6\x4c\x58\x15\ +\x5a\x71\x6b\x45\xc7\xb8\x5f\xc0\x7a\x25\xac\x5f\x5b\x66\xf9\xc0\ +\x57\x89\xde\xf1\x6a\xa2\xeb\x76\xc2\x0a\x2d\x2c\x7c\x9d\xe7\x93\ +\xcc\x10\x4d\xb8\x6f\x5b\xaf\xc9\x9d\x67\xd3\xac\xd2\xd3\xb2\x4a\ +\x2f\x50\xf0\xa5\x5c\xd7\xad\x76\x70\xd4\x8e\xff\xe4\x3a\x8a\x24\ +\xbf\x8f\xb1\x2f\xb9\xe3\x45\x60\x25\x06\xc3\xff\x87\xee\xb1\x95\ +\x51\x2b\xae\x69\x57\x1a\xa1\xf7\xaf\xde\x80\x39\x07\x40\xf0\x81\ +\x8b\xf5\x65\x99\x6f\x6b\x39\xb2\xea\x59\xe1\x97\xe9\xba\x11\x2c\ +\x7d\x8d\x6b\xda\xa0\xce\xbe\x2d\xa3\xee\x17\xda\x27\x68\x54\xee\ +\xe0\x49\xd3\x5a\xc8\x2e\x63\xb6\x04\x6e\x6d\xb8\xbf\x37\xb7\x9c\ +\xc5\x50\xbe\x87\xf6\xd6\x41\x99\x89\x6b\xf7\x35\x72\x8b\x3c\x6e\ +\x7b\xbd\x2e\xea\x1e\xb3\x2c\x02\x5f\xfa\x86\x4b\x40\xf8\xc9\x9f\ +\x20\x3a\xb0\x7f\xa1\x73\x29\x7d\xe3\x45\xa2\xfb\xbe\xe0\x3e\xd3\ +\x8a\xb4\xee\x91\x88\x75\xa4\x3a\x69\x45\x0a\xbf\x20\x78\xcb\x48\ +\x58\x8f\xca\x68\xb2\x8c\xee\xa4\xb5\x4a\x37\x62\xd7\x72\x62\x8e\ +\x5f\x43\xed\xb6\x69\xc9\x89\x59\xec\x43\x85\xe2\xb5\xfe\xf5\xbd\ +\x05\xc6\x9d\x71\x7c\x21\x4f\xb2\xaa\xa0\xce\xd8\xb9\xe4\x67\xa7\ +\x29\xee\xda\xd8\x7d\x56\x46\xee\x60\x6a\x8a\x98\xd4\xb8\x6f\x67\ +\xbd\x56\x89\x63\x17\xeb\xbf\xfa\x12\xd1\xff\xf2\x49\xa2\x5f\xfb\ +\x07\x2e\x5c\x61\x91\xac\x9f\x36\xda\xe4\x45\xe7\xae\xb7\xb5\x01\ +\xfb\xed\xe3\xcf\x0a\xb3\xc4\x39\x93\x6d\xbd\x96\x8b\x9b\xcc\x9d\ +\x67\xe5\x02\x7e\x8c\x65\xdb\x4a\xdc\x48\x8c\x9a\x76\xbc\xc2\xaa\ +\x6c\x8b\x73\xfb\x92\x2d\x55\x5d\x3c\x9f\xe0\xe4\x7e\xdc\xde\x0d\ +\xbc\xe5\xac\x83\xb6\x6f\xf0\x86\x73\x13\x2b\x6f\x0d\x44\xd6\x29\ +\x80\xe0\x03\x17\x15\x2e\xab\xb2\xbc\x94\xb9\x66\x6a\x67\xb1\x4a\ +\xde\x3c\x97\x99\xae\x1c\x54\xc7\xf6\xa5\x2c\x81\xa9\x0e\x1c\xc3\ +\x51\x7b\x1d\xc7\x0d\x9e\x3e\xe3\xc4\xdf\xfa\xba\x7b\x3c\x73\xd6\ +\xad\x6f\x08\xd0\x32\xd1\xa3\xd7\x17\x29\x0e\xf5\x65\xd9\x42\xf8\ +\x89\x7b\x89\xde\xfa\x46\xa2\xd7\x7c\xab\xcf\x2a\x5e\x00\x27\x4e\ +\x12\xdd\xfb\xb7\x2e\x51\x63\xdf\x92\xaf\x39\x18\x8b\xd4\x30\x2e\ +\x03\x6f\xbd\x14\x63\x38\x9c\x34\x3f\x57\xd8\x76\x39\x7c\x17\x65\ +\xb4\x6d\xa6\xdd\x99\x4e\x58\xaf\xa6\x09\x3a\x92\x05\xb7\x53\xd6\ +\x99\x28\x96\x6e\xd1\x21\x7c\x27\x1e\x25\x3a\x75\xd6\x77\x09\xd9\ +\xef\x4a\xa1\x0c\xf6\xd7\x31\x4f\x3a\xb6\xc4\x4d\xf9\xec\xa9\xcf\ +\xdd\x98\xfc\x27\x19\x51\x90\x10\x3d\xdb\x5d\x9f\x15\x4f\xe2\xd8\ +\xff\xec\x2b\x44\x1f\xbe\x8d\xe8\x9d\x0b\xae\xcd\xb7\x61\x84\xc8\ +\xc6\x57\xcd\xf2\x52\x5d\x1c\x9a\x0b\x43\x87\xfe\xc0\xaa\x3f\x45\ +\x98\x45\x63\x97\x13\xbd\xa9\xef\x26\xfb\xbd\x2d\x20\x86\xcf\xbe\ +\x47\x29\x5c\xcc\x62\x5d\xe7\xf1\xc6\xc7\x5c\x26\x3a\x83\x88\x7d\ +\x84\xf1\x92\x37\x1e\x45\x62\x1f\x1c\xb3\x79\x00\xee\x49\x00\xc1\ +\x07\x2e\x26\xab\xbd\x21\xdd\xb8\x6f\xab\x2a\x89\xb2\x4e\x6b\x46\ +\x24\xd5\xa6\x9b\x55\xaa\x13\x3a\x52\x25\x05\xaa\xe7\xc4\x55\x95\ +\x05\x64\x6a\xbd\xed\x1b\xab\x7d\x5c\x9f\x58\xbd\xb2\xec\x44\xa2\ +\x16\xf1\x6c\x2b\x66\x1f\x57\x1c\xac\xdd\xbe\x52\x08\x9d\x3d\xeb\ +\xdd\xc5\xde\x1a\xc8\x6e\xe3\x73\xe7\xdc\xf1\x4b\x4b\x62\xc8\x3a\ +\x0e\xbc\xff\x43\x44\xfb\x8d\x68\x78\xe5\xed\x8b\x19\xcb\x2f\x99\ +\x49\xfb\xef\xbe\x68\xc6\x6c\xa9\x99\x38\x12\x44\x5d\xf8\x1c\x52\ +\x08\x2e\x97\xb5\x2b\x7d\x65\xd0\xbd\xed\x92\xdc\xb6\x5f\x0b\xc0\ +\x2a\x66\x91\xc4\x18\xfa\x0c\x51\x3d\xe9\xa8\xe9\xa7\x22\xb7\x27\ +\xd1\x74\x37\x5c\xc2\x1d\xb6\x28\x73\x29\x1f\xfb\xf8\x8c\x5b\xc2\ +\x6c\xca\x9d\x42\xb8\xe4\x86\xed\x18\xb2\xea\x5a\xc7\x91\x68\x15\ +\x17\x2c\x82\xc9\xec\xd2\x28\x21\xa1\xe5\xfe\x0e\xa2\x77\xd2\x61\ +\xb9\x4b\x88\xb7\x78\x7d\xae\x8e\x5f\x72\xbd\x70\x3d\xff\xf6\xe7\ +\xcc\x0d\xc9\x75\x44\xb7\x5f\xb5\xd8\x71\x55\x21\xe9\x60\x24\x5a\ +\xc3\x3d\x49\x95\x2b\xb3\xd8\xef\x12\x0e\xec\xe3\xc0\x5b\x02\x7d\ +\x7d\x46\xdb\x53\xb5\x68\x26\x7d\x90\x4a\xb4\xbc\x13\x96\xbb\xa4\ +\x15\x30\xfa\x7e\xe6\xde\x55\x41\xd7\x96\xb7\x38\x29\xaa\x25\xd2\ +\xa3\xef\xb4\xf3\x98\x69\xf6\x7d\x48\x0b\x28\xb7\x77\x5b\xdd\x8f\ +\xf9\x06\x40\xf0\x81\x8b\xc8\x55\xf4\x32\x5d\xa5\x9f\xac\x2a\xd0\ +\x3f\x45\xb7\x58\xd1\xd7\xc8\xd1\xef\xd7\x96\x3b\x16\x83\xf6\x79\ +\x0f\xcf\xb7\xa5\x16\xc2\x2c\x88\x93\x41\x54\x2a\x45\x5c\xcc\xf9\ +\xb9\xca\x85\x29\xb7\x2f\xdb\x16\xbb\xe0\x12\x96\xfb\x58\x5d\x16\ +\x16\x30\xbf\x8e\xdf\x9f\x85\x1f\x8b\x41\xeb\x22\xde\x74\x2e\x62\ +\xb6\x0e\xb2\x08\xe4\xe7\x9f\x7b\x8e\xe8\xcb\x8f\x2c\x46\xf0\xad\ +\x1b\xe1\x79\xdf\xe7\x9c\x8b\x87\xc5\x98\x56\xf5\xe7\xe3\x64\x92\ +\xd2\x8b\x32\x3e\xde\xa5\x0e\x11\xc8\xdb\x4a\x17\x6f\xe7\xb6\x54\ +\xd7\x34\x5c\xf2\x1a\x5d\xb6\x9f\x8b\x4b\xd7\xc8\x84\x99\x6c\x79\ +\x91\x8e\x72\x2f\x5d\xc5\xa0\xe7\x6e\x9d\x89\x2a\xf1\x87\x78\x43\ +\x76\x97\x0d\x37\x85\x00\x14\xc5\xa1\xf9\x31\xf4\x0a\x0e\x19\x90\ +\xaa\xa0\xa4\xdb\x2e\xe7\xce\x8b\xdd\xc3\x5d\x19\xc1\x8d\x71\x10\ +\x63\x94\x1c\xd3\x19\xd6\x3f\x65\x84\xd7\xc7\x1e\x23\xba\xee\x6e\ +\x73\x63\xb2\xa0\x5e\xbb\xa1\x7e\x66\xb2\xcd\x9f\x2f\x21\x53\x9e\ +\x13\xe2\x6d\xe0\xad\x80\x6b\x4e\xb4\xd8\xae\x14\x4b\x5e\xfc\xf5\ +\xa9\x6a\x4b\x96\x12\xc3\x95\x20\x56\xe9\x31\x95\xe3\xbd\x08\x77\ +\xa7\x6c\xcf\x27\xcf\xf9\xd6\xf7\x46\x94\x2f\x84\x9e\x89\xfd\xcc\ +\xee\x83\x3a\xb6\x81\x4b\x17\x40\xf0\x81\x8b\x0a\x2b\xa1\x61\xdd\ +\x67\xe3\x16\x7a\xcc\xcf\x0c\x3d\x5f\x4c\x59\x04\x6a\x15\x1c\xa2\ +\xb2\x44\xc7\xe8\x1a\xd1\x7a\xc3\x33\xa8\xeb\xf3\x1d\xa3\xc3\x66\ +\xbb\xe5\xc6\x64\x5d\x0a\x61\x56\xad\x2f\xdb\xf1\x6a\xf6\xe6\xd8\ +\x8b\x94\x46\xb2\x83\xdf\xb6\x8c\xca\xb0\xc8\x24\x93\x35\x33\x29\ +\x1d\x3a\xe8\xb7\x13\x16\x2f\x4e\x10\x19\xfa\x52\x33\xcf\x7c\xc3\ +\x88\x42\x33\xa1\xed\x9f\x63\x80\x3c\xbf\xc7\xc7\xfe\x92\xe8\xfe\ +\xcf\xd7\x09\x24\xb6\xd4\xcc\xc0\xbb\xcd\x7c\x76\xf1\x8a\xff\x9b\ +\x8f\x6f\x59\xd7\x35\x0c\xed\xff\xa9\x4e\xe6\x08\xee\xf0\x30\x36\ +\xcb\x83\x5a\x3c\x86\xcf\x5c\x59\xf4\x4a\xd7\x7d\x24\x14\xae\x5e\ +\x12\xdb\xc8\xac\xe0\x20\x20\x93\x99\xc2\x71\xaf\x53\x29\x82\xe4\ +\xfa\x44\x0d\xb5\x45\xb1\xef\x6a\x23\xd8\xc9\x17\x8c\xf6\x99\xc2\ +\x55\xd2\x86\xb4\xd4\x70\x26\x78\x48\xea\xf1\xc5\x90\xab\x36\x71\ +\x4b\x75\x9b\x38\xdb\x93\xd8\x97\xc5\xb0\x99\xdc\x0d\x15\x94\xf9\ +\xcc\x45\x2d\x44\xe2\xac\xe0\xe0\x02\x8e\xb3\x82\x55\x42\xd4\x74\ +\x65\xed\xca\xf5\x1b\x66\x9f\x1f\x78\x88\xe8\x5a\x73\x6e\xfe\xc8\ +\x37\x2f\x26\x29\x66\xff\x15\xe6\x77\x72\xb3\x39\xa7\x94\x8b\x39\ +\xb3\xed\xe1\x86\xae\x36\x60\xb0\xfe\x35\xc6\x98\x9f\x3f\x26\x42\ +\xec\x0a\x6f\xf1\x5b\xa9\x97\x62\x49\x08\xc1\x55\x2f\xb6\x3b\xda\ +\xdc\xa9\x94\x80\x5a\x40\x0c\x5f\xe8\xd6\x92\x3c\xaf\x33\xd9\xd9\ +\x94\x6b\x21\x98\xfb\x5d\xe4\xf6\x91\xc8\x96\x5f\x84\xdb\x1a\x00\ +\x08\x3e\x30\x3b\x5c\x6c\xef\xdc\xd9\xba\x87\x5a\xeb\x62\x16\x7a\ +\x4a\xb9\xab\xf8\xb2\x99\x01\x6e\xe2\xbe\x44\x2a\x7a\x4e\x70\x98\ +\x5e\xa2\x09\xf9\x8a\xca\x11\x2c\x4e\xd8\x42\xc8\x96\xc4\x86\xc0\ +\xa3\x76\xd2\x87\x14\x74\x53\xd7\x45\x49\x26\xc1\x22\xc8\x56\xc9\ +\xd5\xe5\x7a\x3b\xe6\xd1\xaf\x10\xbd\xee\xee\xf9\xc5\xf2\x7d\xe1\ +\x7e\xa2\x7b\xff\xca\x88\x4a\x1f\x5f\xb8\x25\xde\xab\xf0\x56\x26\ +\xe5\x3b\x14\x0c\x7c\x49\x99\xe5\x55\x37\x6c\xdc\x95\xc4\x0a\x3a\ +\x61\x5d\xab\x84\x99\x70\xe1\x86\xcf\xc6\xcf\x57\xee\x72\x6a\x0a\ +\xe6\xd6\xb6\xa2\x3c\x8d\x14\x7e\xe1\xd8\xc6\x93\xba\xdc\x5d\x78\ +\x2f\x6b\x45\xf3\xe3\xdb\x99\x94\xb0\x60\x0e\x1c\x36\x02\xa8\x57\ +\xb7\x8a\xd3\xbe\x34\x4c\x55\x27\x30\x94\x86\xd1\x51\xec\x61\xe9\ +\x05\xe2\x86\xb0\xda\x49\x97\x6f\xdf\x5b\x03\x57\x5c\x19\x9a\xd0\ +\x47\xb8\xb2\x46\x45\x16\x9b\xe4\xfa\x54\x42\x8c\xea\xb0\x9c\x52\ +\x3a\x71\x23\xe5\x36\x3c\x6a\xc4\xd5\x7f\xfa\x82\x39\x3f\x6f\x22\ +\x3a\xb2\x80\xda\x7c\x9c\xa0\xb5\x7c\x95\x59\x0a\xef\xee\x9f\xd4\ +\x63\xc6\x49\x06\x9c\x2c\x53\x9e\xf5\xd9\xa7\xe3\xc4\x67\x0b\x6d\ +\xe2\xd6\x6b\x01\x68\x97\x60\x51\x1d\x78\xab\xa0\x6f\x45\xc6\x9d\ +\x29\x0a\x11\x7b\x99\x4b\x1a\x5a\x64\x0c\x5f\x43\x80\x53\xba\x38\ +\xb1\xec\x20\x92\x4d\x80\xca\xc5\xbf\xe6\xb6\x21\x6a\xc4\xf2\xa9\ +\x09\xa6\x1b\x00\xc1\x07\x2e\x26\xec\xef\xdc\xda\xac\xaf\x84\x56\ +\xd8\xa5\x9a\x40\x8e\xdc\x55\x2c\x21\xe2\x9c\xb5\xaf\xa8\x84\xdf\ +\x1a\xf7\x31\xb2\xe6\xbc\x70\xf5\x6b\x8a\xc9\xfd\x74\xda\x2e\x95\ +\xc8\xf4\x46\x44\xed\x2d\x2f\x95\x5b\x99\x84\x65\x4b\x88\x41\x59\ +\xbe\x65\x22\x62\x05\x35\xb5\x85\xe3\x60\x20\xc4\x8c\xe7\xb4\x79\ +\xeb\x17\x5f\x22\xba\xfe\xba\x0b\x1f\x3e\x76\x1b\x7f\xea\x6f\x88\ +\x8e\x1f\x73\x85\xa3\x83\x65\xb2\x17\x84\xd4\x44\xd4\x1f\x24\x27\ +\x56\x78\xb8\xd6\x7d\x5c\x5a\x18\x1a\x2b\x02\x57\xea\x1e\xc5\x2c\ +\x04\xc3\x2f\x29\x16\x81\xa1\x2b\x43\x25\xd8\x3a\xb2\x7c\x07\x51\ +\x1b\xbb\xb1\x18\x9b\x49\xaf\x43\x48\xca\x72\x32\xca\xbb\x95\xf5\ +\xce\x75\x2c\xb1\xf3\xaf\x72\xe5\x75\xec\x57\xc7\x3d\x83\xd9\x82\ +\xb9\xcf\x8b\x40\xdf\x2e\xce\x16\x8b\xde\x72\x25\x63\x82\x40\xa9\ +\x5a\xa1\x25\xda\xc5\xf1\x8e\x27\xe7\x9a\x22\xb0\x72\x0b\xfb\xd8\ +\xc0\x62\xa9\x59\xbb\x2f\xe5\x92\x6c\xf5\x80\x55\x42\xc4\x68\x9a\ +\x9a\x19\xac\x8b\x4c\x0c\xa1\x59\x1e\x7d\x8e\xe8\x93\x5f\x23\xba\ +\xf2\x5b\x89\xae\x58\x44\x27\x93\x10\xc7\xc7\xe7\x91\x17\x62\x85\ +\xaf\x45\xd7\xe3\xef\xd9\x0b\x41\x16\xcd\xb6\x4d\xdc\x19\x5f\x97\ +\x6e\x28\x84\x70\x10\x38\xa1\x6b\x87\x8f\xcb\x0d\x22\x76\x72\xca\ +\xed\xdb\xf6\x65\x66\xcb\xf6\x8a\x2f\x3e\xbc\xea\xdc\xc3\xd6\x22\ +\xd8\x17\x9f\x7f\xce\x5d\x15\x54\x88\xe1\xf3\x09\x16\xa9\x6e\x34\ +\x2d\xe1\x9e\x4a\xe8\xf0\xee\xef\x94\x08\x6c\x08\x49\x9d\x11\x82\ +\x3a\x8a\x53\x04\x00\x82\x0f\x5c\x34\x26\x42\xf0\xb5\x66\xdc\x5e\ +\xfa\x96\xb8\xd1\x80\x37\x61\xf2\x91\x35\x51\x24\x5e\x4c\x72\xd7\ +\x8c\x03\xea\x8c\x58\x3f\x10\xc2\x51\xd1\xb7\x72\x47\x73\x7b\x26\ +\x85\xd9\x48\x55\x77\xca\x8f\xd2\x9d\xe2\x7d\xeb\x37\x4e\xc5\x17\ +\xda\x0c\x63\x11\x27\x18\x04\x12\x7f\xe4\x27\x9e\x20\x3a\x78\xc0\ +\xb9\x81\xcf\x17\x16\x56\x4f\x1c\x25\x5a\x35\x13\xe5\x5d\x77\x39\ +\x43\xe9\xa6\xef\x4c\x32\x1a\xd7\xb1\x83\xa1\x4e\x61\x59\x36\x45\ +\x29\x79\xeb\xd5\x58\xbb\xa2\xd3\x5c\x32\x83\x84\xb8\xb3\x6e\xe1\ +\xa5\xba\x0b\xc9\xd2\x8a\xef\x44\xe2\xe3\xd2\x6c\x21\xe9\xc8\xc5\ +\x24\xe3\xfd\x26\xba\xae\x9f\x58\xfa\xff\x57\xf1\x7d\x65\xd3\xf5\ +\x2b\x63\x27\x47\x09\x0b\xa1\x96\x71\x88\xd4\xb4\x80\x2d\xe6\xbc\ +\xac\x0b\xd9\x2a\x29\xbc\x7a\xbe\x6e\xe3\x92\x2b\x12\x4d\x07\xeb\ +\x49\xdc\x96\xfb\xf1\xe2\x8f\x85\x8a\x15\x81\x5e\x70\x97\xa2\x26\ +\x5b\x2c\x00\x29\x9a\xa8\x83\x08\xec\xef\xf3\x16\xc0\x15\x27\x3c\ +\x65\x72\x48\x65\xb9\x99\x35\x2e\x70\x86\xde\xb1\xa1\x34\xcc\x96\ +\x39\xb6\x5f\xfe\xb8\xb3\xc2\xfd\xc4\xeb\xe7\x3c\xae\xde\xea\x25\ +\x13\x53\xc2\x18\xab\x70\xf5\x0e\x16\x4f\xf3\xf9\x07\xf2\xf3\x8c\ +\x9c\xf0\xb3\x7d\x82\xd7\xbd\x15\x75\x5c\x0b\xc4\xaa\x2b\x4b\x10\ +\x82\xaa\xd6\x71\x93\x38\xd3\xb5\xf0\x16\xc0\x55\x67\x0d\x1c\xae\ +\xa5\x33\xfb\x2f\x48\xf4\x4d\x12\xb5\x1c\xa9\xc3\x12\xab\xa2\xd3\ +\x5a\xb6\xf3\xeb\xca\xec\x55\x53\xf6\x21\x2c\x8e\x00\x40\xf0\x81\ +\x8b\x68\xe1\x1b\x35\x2d\x7c\x29\x0b\x4e\xd1\x4f\x0b\xb8\x20\xfe\ +\x82\x20\x8c\xcd\x3f\x83\x41\xf3\xaa\x37\x1a\xd6\xff\xef\xf5\x44\ +\x1c\xe0\xb0\xde\x57\x10\x8b\x76\xfb\xb0\x6d\xbf\x3a\xbe\x3b\xd5\ +\x17\xfd\xb6\xe1\x78\x42\xd3\x58\xe9\x2a\xf6\xeb\xfa\xb5\x7b\x92\ +\x63\x0f\x87\x3e\xae\x30\xac\x3b\x71\xc2\xf5\xf6\x3d\x5f\xd7\x2e\ +\x0f\xdb\xf1\xe3\x2e\x9b\xf8\x8a\x03\xb5\x70\x0a\x61\x91\x9c\x40\ +\xc2\x49\x23\xe7\xd6\xdd\xb6\x9c\x4c\x32\x12\x22\xd0\x66\x12\x8b\ +\x4e\x24\x21\x76\x51\x8a\xc2\xf1\xd0\x95\x95\x39\xe7\x85\x99\x6d\ +\x89\xb6\xec\x86\x84\x2d\x82\x2c\x08\xf9\x91\xfc\xd7\x30\xe8\xb5\ +\x63\xd4\xb6\x23\x02\x6d\xa2\x88\x88\x13\x5c\x12\x22\xb0\x2a\x14\ +\xed\x9f\x5f\xa4\xb5\x4f\x79\x81\x96\x15\x51\x89\x84\x12\x3e\xa5\ +\x7a\xbe\x5d\x9c\x75\x4d\x4f\x9c\xf5\xaf\x14\x02\xb0\x94\x02\x70\ +\x92\xb6\xc6\x69\xdf\x49\x66\x12\x4a\x00\x15\x5e\xf8\x79\x57\xb0\ +\xb5\x04\x0e\x6a\x0b\x99\x15\x2f\xfd\x6d\x08\x40\x4d\x69\x77\xaf\ +\x10\x04\x7c\xe3\xf0\xfb\x9f\x27\xfa\x87\xdf\x32\xff\x04\x8e\xaa\ +\xf0\x72\x7c\x9c\xb9\x76\x71\xe1\xff\x3d\x67\xa5\xe3\x36\x71\x03\ +\xdf\x8e\xcf\x0a\xc0\x0d\x27\x02\x6d\xfd\xb9\x4d\x2f\xfc\x84\xa5\ +\x30\x99\x11\x3d\xf1\xc9\x21\xdc\x67\xd8\xdc\xdf\x9d\xbb\xda\x9d\ +\xeb\xb4\x36\xff\xcf\xd9\x18\xe7\xcc\x67\x6e\x89\xc0\x58\x08\xea\ +\xb4\x88\xdb\x8e\x10\x84\xe0\x03\x10\x7c\xe0\xe2\xc2\x33\xe0\xc6\ +\x59\x21\xf8\x8a\x5a\x7c\xc5\x08\xe1\xd5\xea\xb4\xad\x12\x96\xbd\ +\xd1\x56\x73\xfb\x2a\x98\x8d\x6a\xd1\x56\xbd\x5f\x2f\x5a\x2f\x7a\ +\x51\xc5\xe2\xb2\x48\xac\xe3\xb9\xa2\xb7\x45\x77\x72\x4f\x23\x79\ +\x7c\xb6\x97\x15\xd1\x19\x3a\x40\xa7\xd9\x1c\x24\x92\x4d\x56\x4f\ +\x14\x34\x59\xdd\x4f\xc5\x55\x07\xb6\xaf\x93\xc7\xce\x2d\xcc\xe5\ +\x60\xf6\xad\xd6\xee\xdb\x20\x28\xd9\x3a\x72\xe8\x8a\x3a\x31\x23\ +\xbc\x26\x74\x1f\xb1\xe5\x64\xb6\xea\x84\x12\xfe\x3b\x14\xa1\x66\ +\x5d\x3c\x16\xf5\xc3\x64\x4b\x3a\xed\x1b\xc0\x73\xed\xc2\xe1\xba\ +\xb7\x52\xf9\xce\x23\x85\xb7\x08\xb2\x3e\x1f\xf8\xc4\x11\xdb\x81\ +\xc4\x87\x53\x2a\xbf\x8d\x4c\xde\x28\xe3\x64\x0e\x21\x02\xc3\xf3\ +\x2c\xf2\x58\x53\x8e\x45\x66\xf5\x64\x07\x04\x5f\x51\x76\x67\x3d\ +\x26\xb3\x2e\xc5\xc4\xcd\x96\x64\x16\x67\xb6\xb6\xd0\x15\xb5\x75\ +\x97\x13\x40\x6c\x9c\xdf\xd0\xb7\x8d\xf3\xff\x67\x17\xb1\x2a\x9b\ +\xfb\x08\x56\x31\xfe\x6e\x59\xd4\x8c\x44\x4f\xd6\x2a\x39\x64\xe0\ +\xc4\xa0\x75\x5d\xca\x44\x91\x5e\x42\x24\xa8\x76\x9b\xaf\x5c\x32\ +\xc1\x23\xcf\x12\xfd\x81\x11\x7d\xff\xf4\x2d\xde\xd8\x3d\x8f\x71\ +\xd5\x75\x6c\x5b\x32\xc1\x24\x35\xc6\x42\xbc\xc8\x63\xb5\x3f\x79\ +\x3e\x31\x78\x11\xbd\x69\x4b\x9f\x0c\xa2\x47\x75\x42\x08\xc7\x04\ +\x86\x75\xda\x5b\xde\x65\x12\x90\xd2\x73\x8e\x0d\xd5\xcd\x18\xbe\ +\x58\xa4\x25\x5b\xf3\x49\x0b\x5d\xa2\x58\x79\x72\x5c\x62\x51\x4c\ +\x89\xf3\x27\xfc\x0d\xc1\x07\x20\xf8\xc0\x45\xc5\x9a\x9b\x86\x89\ +\x99\xa1\xa8\x1f\x83\x80\x1b\x8f\xda\x82\x2f\xce\xd6\x95\xdb\x17\ +\xbd\x8c\x35\x90\xda\x6d\x1a\x2a\xcb\x5e\x91\x16\x91\x72\x7d\x2f\ +\x71\x6c\x61\x9f\xad\x6d\xdd\xba\x03\xc5\xd0\x48\xbe\xe3\xb5\xe8\ +\xe4\x7d\x9f\x33\xa7\xea\x89\xc3\x44\x07\x57\xd3\xb1\x89\x1d\xb0\ +\xe5\xee\xd8\x31\x97\x84\x11\x3e\x4a\x39\x69\x67\x24\xc7\x42\x70\ +\xdf\x5a\x9d\x99\x2b\xeb\x05\xda\x9a\x82\xa1\xeb\x88\x5f\x36\xb7\ +\xea\x12\x33\x2c\xf0\x78\xbb\x10\x63\xd7\x12\x81\xda\x89\x17\x6e\ +\x5b\x57\xfa\xf5\x3c\x34\x03\x21\xf8\x58\xaf\xf7\x42\xeb\x39\x2f\ +\x0e\xad\x1e\x2e\xea\xd8\x3f\x2d\x5a\xbf\x55\xc7\x19\x65\xfa\xe6\ +\x5a\xec\xcd\x93\x42\xa7\x2d\x7c\x5d\x16\xa8\x5c\x51\x5f\xb9\x7d\ +\x3f\x94\x13\x59\xa9\xbf\x9b\x52\x08\xbe\x4a\x0c\x4e\x9a\x71\x81\ +\x55\x72\x88\x2c\x61\x32\x16\xc9\x21\x3d\x37\xce\x95\x00\xf4\x62\ +\x50\x05\x01\xd8\x77\xc9\x0c\x59\x37\x62\x66\xfd\x9f\x7f\x91\xe8\ +\x35\x37\x11\xbd\xe1\xc8\xfc\x84\x50\xd5\x81\x22\x55\x6c\x3b\x37\ +\x96\x9a\x3a\x93\x55\x64\x0c\x63\x48\x8a\x09\x6f\x67\xdd\xe7\x5b\ +\x75\x22\x48\x25\x06\x85\x28\x9c\x7b\x78\x80\xae\xdd\xcd\x4a\xb5\ +\x45\x5a\xee\xf8\x53\x37\x10\xc9\xe7\x13\x16\xc2\xd6\xb8\x45\xe7\ +\x2d\x2c\x7c\x00\x82\x0f\x5c\x5c\x58\xec\xad\x9f\x11\x82\xac\xd7\ +\xb6\xdc\x55\x22\x6d\x90\x10\x86\xe2\x0a\x19\x65\xeb\x36\x44\x63\ +\x2c\xe4\x1a\xd9\xbf\x89\x5b\xe4\x5e\x54\xdb\x21\x3c\x21\x85\x59\ +\x10\x9b\x41\x14\xca\xf5\x72\xbf\x61\xbb\x5e\x62\x3b\xf6\xbd\xb2\ +\x29\xee\xea\xc3\xdb\x1a\xb6\x17\x5f\x74\xfa\x97\x05\x9c\xd6\xed\ +\xe4\x11\x99\x38\x12\x67\x16\x37\xb2\x8a\x75\xb3\xd6\xa0\x6c\x47\ +\x17\xfa\x0d\xb3\xd0\xb3\x82\xcf\x97\x96\x61\xb1\x69\xbb\x8e\xac\ +\x3b\x41\x28\x93\x2d\x28\xca\xe8\x2d\x7d\x76\xeb\x88\x9a\x93\x75\ +\xf5\x95\x28\x37\xa4\x55\xac\xa0\x5f\xd8\x32\x38\xd1\xcd\x84\x90\ +\x32\xca\x1e\x5e\x68\xfe\x46\x99\x8e\xbf\xca\x16\x40\xa6\x29\x19\ +\xb6\x99\xed\xf9\xde\xc1\x16\x6f\x5e\x16\x02\xba\xac\x17\x3b\x7e\ +\x43\x11\x1b\xb8\xe5\xdd\x95\xf1\xe4\xee\x05\x06\x3f\x3f\x16\xa7\ +\xbf\xea\xd7\x31\x7f\x41\x04\xf6\xd7\x7c\x72\xc8\x8a\xb7\xb8\xc6\ +\x02\x44\x1c\xe3\xa3\xcf\x12\xfd\xdf\x9f\x20\xba\xed\xc7\xcc\x29\ +\x3a\x87\xc2\xbd\xf6\x1e\x2a\x74\xa0\x98\x26\x3e\x89\xda\xc9\x27\ +\x94\x17\x4a\x2a\x23\x00\xed\x7d\x19\x5b\x59\xd7\xea\xfd\x95\xa2\ +\x2d\x19\x8f\xf3\x81\x5b\xd3\x97\x97\xf3\x65\xc4\xbf\x91\x2f\x99\ +\xdf\xcd\x69\x91\x28\xb2\x2a\x4a\xca\x14\x1d\x9f\x39\x72\x5e\xe8\ +\x4c\xeb\xb8\xc6\x36\x1d\x19\xbe\xd5\xe9\x08\xc1\x07\x20\xf8\xc0\ +\xc5\xb5\xf0\x8d\x5d\x23\xdb\x98\x41\xaa\xdf\xda\x46\x6d\x8d\x2b\ +\x3a\x7c\x4c\xac\x16\x72\xfe\xbe\x41\x26\x20\x49\xf9\x94\xcc\x94\ +\xa5\x31\xb6\xbe\x85\xb4\xd6\x59\xb6\x4d\x09\x51\xf9\x7a\x56\x50\ +\x8f\x3e\x42\xf4\xed\xdf\x39\xfb\x98\x9d\x38\x41\xaf\x7c\xe1\xcb\ +\xf4\xca\x60\x41\xec\xd5\x99\xc6\x67\xe8\x20\x3d\x52\x25\x96\x38\ +\x86\xc2\x80\x1a\xb7\x97\x9b\x44\x2d\xea\x42\x27\x92\x6a\xe2\x8a\ +\xda\xd4\xc9\xed\x79\x5b\x76\x0f\xb3\x00\x3c\x7b\xc6\x65\x1f\x9f\ +\x3c\xe9\x92\x46\xf8\x3d\xcb\xa8\x70\x71\x5c\xd7\x30\x58\xaf\x78\ +\x1f\xc1\xab\x6f\x8f\xb1\x74\x43\xb4\xba\xe6\x12\x45\x38\x46\x70\ +\x6d\x9f\x4b\x12\x09\x13\xe5\x78\xc1\x25\x26\xb8\x35\xdd\xfa\xa0\ +\x4e\x20\x19\x4b\x91\x99\x28\xa9\x91\x2b\x94\xdb\xb9\x7d\xd4\x76\ +\x8e\xe4\xfa\x30\xe9\x2f\x39\xf7\xbc\x9c\xbc\x6d\xdd\x3f\x9f\xc1\ +\x3a\x3a\xe7\x2c\x82\xb2\xb3\x82\x92\x56\x26\x2f\xac\x42\x9b\x3f\ +\x3b\xde\x32\x79\xdd\x5b\xc3\x38\x41\xc4\x26\x89\xf8\x78\xcc\x20\ +\x48\x39\xa1\xe7\xf3\x8f\x13\x7d\xf4\x01\xa2\xff\xee\xdb\xe7\xd0\ +\x6b\x37\x1c\xc8\x64\x4a\xcb\xb7\xae\xb1\x4c\xad\x8f\x84\x52\x56\ +\x40\xf9\xb1\xb0\xab\x06\xf5\xbe\x96\xd7\xe6\x5b\x77\xd0\x76\x69\ +\x39\xe9\xe2\x03\x5b\xf5\x24\x7b\xbe\x6c\xcc\x9a\x2f\x2a\xed\x13\ +\x47\x6c\xaf\xe8\xa2\xf9\x5d\x36\x4e\x9f\x22\x2d\xf2\xec\x36\x25\ +\xa5\x4b\xb5\x88\x7d\xa8\x31\xa6\x1b\x00\xc1\x07\x2e\x26\x9c\xae\ +\xfa\xe0\x97\x5c\x90\x90\xad\x10\xec\x7d\x7c\xc1\x57\xc9\xc2\x88\ +\x67\xfa\x50\x42\x25\xc4\xd7\xc9\x65\xb0\x54\xc7\xee\xf1\x32\xf2\ +\xad\x10\x64\xbc\x5d\xf0\xab\x84\x22\xcf\xb2\xee\x5f\x1c\x0f\xd8\ +\xeb\x35\xe3\xfd\x42\xaf\x5d\x25\x5d\xb6\x1b\x62\xdb\x44\xd2\x48\ +\xd8\x87\x9c\x21\x65\xf2\xc9\xd6\x66\xfd\xf7\xf3\x46\x31\x7d\xc5\ +\x88\xbe\xbf\x77\xe7\xf4\xc0\x34\x16\x88\x0f\xde\xef\xd4\x93\xb4\ +\x54\x5a\x7f\x9e\xa2\x03\x6a\x48\x6f\xa2\xbf\xae\x93\x4a\xf8\x18\ +\x06\x22\xa1\x64\xc9\x1d\x6b\x9c\x4c\x22\xbb\x89\xc8\xe4\x88\xe5\ +\xa8\xc6\x60\xb0\x00\x86\x0c\xe4\x55\x23\x10\x0e\xee\x37\xcf\x1f\ +\xae\xcb\xd7\xf0\xbe\xf8\xe3\x05\x6b\x20\x0b\xc1\x0d\xdf\x82\xce\ +\x66\x0c\xfb\x96\x73\xe1\xb1\x91\x39\x4c\xae\x94\x8b\xf6\x99\xc3\ +\x5c\x5b\x90\xb5\xe0\x8b\x65\xfd\x55\x87\x12\x32\x7a\x81\x59\xba\ +\xdc\x66\x6e\x6d\x39\xca\x14\xd6\xcd\x4e\x22\x8d\x76\x72\x24\xdc\ +\xdd\x1d\x16\x27\x69\x94\x6e\xb5\x90\x4b\xb5\x9d\x13\xa7\x6e\x58\ +\xd1\x5f\x72\x0b\xc7\x05\xda\xed\x7c\x72\x48\x28\x13\x13\xac\x80\ +\x21\x39\x44\xcb\xb6\x61\xd2\x62\xa6\x5d\xb6\x2b\x2f\xc3\x13\xf5\ +\x29\x6e\xcb\xc4\xac\xd4\x6d\xe4\xce\x99\xb1\x78\xff\x5f\x11\xbd\ +\xce\x9c\x3e\xaf\xbe\xf9\x02\x85\x91\x6e\x27\x33\x54\x02\x2d\xe7\ +\xe6\xa4\x28\x51\x3f\xb7\xbe\x6c\x5a\xf6\x5a\xd6\xd6\xcc\xf7\x42\ +\x8b\xb2\x7e\xa5\xc2\x02\xfc\x3a\x5b\x36\x86\x97\xe7\xea\xe7\xec\ +\xd8\xaf\x38\x4b\xa4\x2d\x23\xe3\xad\x81\xfc\x3d\x50\x48\xd0\x09\ +\xa5\x6c\x8a\xb6\x95\x30\x16\x79\xad\x73\x0a\x9d\x36\x00\x04\x1f\ +\xb8\xa8\xb0\x2a\x38\x7d\xaa\x16\x5e\x2a\x17\xc4\xe2\x45\xd4\x92\ +\x0f\xfc\xea\xfb\xa5\xe7\xdb\x48\x54\x35\x44\x7c\xdd\x10\x99\x85\ +\x6b\x95\x82\x0f\x1c\x2b\x44\xf1\xb9\x70\x25\xee\x89\x14\x47\xde\ +\x36\x08\xbc\x7e\xb4\x5e\x5a\x10\xc3\xfa\x89\xb8\xd2\x4a\x71\x39\ +\x52\x89\x7d\x0c\x9b\xaf\x97\x59\xc9\x5f\x36\xa2\xf7\xa6\x57\xb8\ +\x66\xbb\xd9\x09\xc4\x5c\xb1\x9f\x7e\x8a\xe8\xcc\xe9\x76\x52\x49\ +\x10\x8f\x8d\xc4\x16\xaa\xfc\x54\x6b\x6a\x8b\xee\x54\xa7\xdc\xd8\ +\x54\xaf\x55\xb6\x2b\x09\x2f\xd4\xef\x10\x82\x65\xbb\x9c\xca\xd2\ +\x20\xaa\xbf\x17\xdc\xc2\x5e\x04\xad\xad\x08\x91\xe8\x5f\x13\x04\ +\x20\x27\x9a\x04\xb7\xf0\x30\x64\x0e\x8f\xeb\x0c\xe2\xd1\x48\xcc\ +\x8d\x7e\x1f\xfd\x28\x4e\x70\x6b\xdd\xbd\x66\x91\x16\xbe\x8d\x41\ +\x3b\x53\x78\xd9\x5b\x17\x4b\xdd\x5c\x2f\xad\x80\x8d\x7e\xc2\x65\ +\x5d\x1f\x2e\xdb\x36\xad\xab\x78\xf2\xb4\xed\xfd\xfb\x70\x89\x98\ +\xaa\x4c\x8c\x1f\xa7\xd1\xb0\x16\x80\xfc\xff\x58\x04\xc6\x6d\xcd\ +\x92\x19\xc2\xde\x22\xf5\xa0\x11\x26\x7f\x62\x04\xdf\xb5\x3f\x68\ +\x96\x0b\x2c\xc8\xac\x74\xb3\x17\x6e\x95\x45\x3b\xad\x7f\x30\x45\ +\x82\x6e\x16\x71\x18\x8f\x5b\xa6\x97\xb3\x9a\x73\x2f\x5d\x7b\x9f\ +\xb5\xe6\x93\x76\x64\xd9\x18\xd9\x52\x2e\x12\xe0\x56\x08\xf2\x39\ +\x7d\xd6\x5f\x56\xc2\xe7\xf7\x71\x98\xe4\x5d\xf1\xd6\x35\x1c\xd6\ +\x85\x72\x3d\x85\x10\x85\x89\x32\x2d\xa4\x16\xd3\x3e\x0e\x00\x08\ +\x3e\xb0\x8d\xbb\xe0\x49\x7a\xe6\x2e\x22\x3f\x8c\x6d\x76\x3e\xf2\ +\x8d\x4c\x63\x11\x16\x27\x6e\x90\xcf\x10\xe8\xd5\x8b\x15\x43\x85\ +\xc8\x1e\x08\x15\x87\xfb\xc2\xba\xd8\x6f\x8a\x48\x9b\x5d\x50\x34\ +\x93\x3d\x78\xbb\x70\x0c\xd2\x82\x27\x5d\xcc\x21\x40\x8d\xf7\x33\ +\xde\x6a\x1f\x63\x2a\xdb\x98\x4d\x62\x0f\x7d\x81\xe8\x4d\x6f\xcd\ +\x27\x70\x9c\x78\x99\xe8\xb1\x47\x8d\x5a\x3a\x5b\xbf\x5f\xf5\x58\ +\x88\x46\xa5\x41\x00\x46\xee\x64\x69\xb9\xf4\x09\x29\x87\xd5\xd3\ +\x74\xb8\x78\xb6\xb5\xbe\xca\x2a\x36\xa2\x50\xb3\x10\xf4\x13\x34\ +\xbb\x8c\x4f\x9b\xe7\xaa\xb8\xba\xb2\x16\x74\x55\x67\x8e\x28\x76\ +\x30\x58\x0f\x59\x04\xda\xcc\x61\x5d\x27\x64\xb0\xb8\xe3\xe4\x10\ +\x16\x7e\xc1\x15\xcc\x65\x64\x6c\xbc\xe0\x96\xcb\x26\x1e\xfa\xa4\ +\x12\x2d\x3a\x81\x2c\xba\x08\xf3\xaa\x99\x4c\xf7\xad\xd4\xc7\x1f\ +\xc4\xae\x4c\x1e\x19\x0b\xa1\xd7\xc8\x2a\xee\x68\x27\x27\x45\xac\ +\x74\xd5\xe5\x02\xfa\x73\x81\xfa\xad\xed\x23\xb1\xd8\xf3\xa7\x39\ +\xed\x13\x2d\xf0\xc6\xcd\x64\x10\x9b\x28\x12\xb2\x86\xbd\x20\x6c\ +\x18\x83\x84\xf5\x8b\xad\xb1\xef\xf9\x43\xa2\xab\x8c\xe8\xf8\x67\ +\xff\xf5\x05\x8a\xbd\xa8\x3e\x9d\x4a\x88\xb3\x64\xc6\x69\x22\x96\ +\x2d\x57\xea\x24\x39\x3e\xa9\x1a\x75\x99\x5e\xc5\x17\xca\x92\x11\ +\x65\x57\xdc\x61\xce\xa3\x2b\x44\xb6\xb0\xcf\x10\xb6\x8f\x3e\x43\ +\x5b\x6f\xd5\xdd\x46\xe4\x58\x34\xc6\x7f\xd3\x17\x9e\x3e\x13\x19\ +\x22\x43\x22\x8e\xef\x2d\xac\x96\xea\x4e\x23\xd6\x3a\xc8\xcf\xf9\ +\xf6\x8a\xd6\x7d\x0d\x97\x2e\x80\xe0\x03\x17\x95\x03\x66\x46\x3a\ +\x10\xda\x51\xf8\x0b\x9f\xf5\x0b\x8e\xeb\x19\x34\xae\x8b\x91\x2c\ +\x5c\x17\x25\x48\x8c\x47\xd1\x2d\xae\x10\x3d\xb1\xb0\x0c\x16\x3f\ +\x69\x15\xac\x5c\xc8\x3d\x67\xce\xea\x7b\x11\xc8\x16\xc6\x41\x78\ +\x1c\xd4\x16\x45\x59\x4e\xa6\xe1\x46\xa6\x76\x32\x4a\xa1\x22\xcb\ +\xa2\xdf\xf6\xb1\x2f\x13\xdd\xfc\x0a\xa2\x5b\x6e\x6b\x7f\x3c\xae\ +\xaa\xfc\x15\xf3\xfc\x0b\xcf\x35\x3f\x83\x14\x91\xa1\x2e\xa0\x14\ +\x80\x85\xf0\xfb\xf4\x7a\xf5\xcc\x28\x13\x4b\xe2\x84\x12\x9b\x55\ +\xbc\x6e\x96\x63\xc2\x6d\xec\xb6\xe1\x1e\xc5\xb6\x9e\xa0\x19\x83\ +\x20\xbe\x82\x95\xb0\x94\x2e\x61\x91\xc8\x31\x1e\xb7\x93\x49\x64\ +\xe2\x08\xbb\x84\xa5\x30\xb4\x89\xdb\x13\x9f\x10\xe2\xad\x7e\x1b\ +\xde\x3a\xb8\x7e\xae\xb6\x12\x2e\x32\x6b\x63\x9f\xf9\x88\x5b\x4b\ +\xcd\x4e\x22\x32\x7b\x38\xac\x1f\x47\xed\xe7\xaa\xe2\xd1\x93\x3a\ +\x86\x51\x16\x8c\x1e\x09\x17\x76\xbc\x3d\x25\x4a\x71\x64\x1f\x69\ +\x7b\xdb\x5b\x1d\xef\x05\x80\x2d\x13\xa3\xeb\xef\x40\x26\x88\x48\ +\xd7\x70\xd5\x3e\xce\x0b\x28\x8e\xd3\xfc\xcd\xff\x4c\xf4\x3d\x6f\ +\x21\xba\xed\xc6\xf3\xbd\xc1\x73\x16\xaf\xaa\x5c\x49\x91\xb0\x6a\ +\xce\xf2\xb9\xa8\x23\x46\xb2\x98\x21\xb9\x23\x12\x91\x8b\x70\xe9\ +\xf6\x7c\xfd\x44\x6b\x16\x4e\xb5\x87\x13\xf5\x02\xed\x58\x6f\xf9\ +\xba\x82\x9b\x3e\x83\x38\xce\x1e\x8e\x05\xab\x17\x91\xb4\xde\x14\ +\xb1\x5a\x25\x5c\xbf\xe6\xef\x13\xc7\xcd\xe3\x3f\xc4\x94\x03\x20\ +\xf8\xc0\x45\x13\x7c\x6b\x44\x57\x8a\x80\xac\x46\x8b\xa3\x5e\xad\ +\x10\x82\x2f\x71\x2c\x14\x43\x19\x66\xd3\x89\x37\x5d\x94\x6d\xf1\ +\x17\x8b\xc3\x50\xa3\x24\x08\x9d\x49\x42\x34\x16\x71\xbd\xbe\xa2\ +\x16\x48\x55\x2d\x3e\x61\x51\xeb\x7b\x57\x33\x3f\x06\x11\xc8\xa2\ +\x70\xc5\xb7\xa6\xe8\xf7\x9b\xe5\x60\xa4\x15\xae\x12\xa1\x66\xdd\ +\x49\x73\xf5\xfe\xda\xa3\x44\x87\xae\x32\x2a\xe8\x0a\x31\x83\xb1\ +\x2b\xf7\x28\xd1\xe3\x5f\x75\x41\x6d\x8d\x92\x34\x41\x5c\x0e\x23\ +\x8b\x5e\xa8\x03\xb2\xd4\x34\x8b\xc8\x98\xc3\x62\x28\x4a\xcb\xcc\ +\x26\x04\x97\x8b\x4d\xb3\xf4\x7c\xeb\x02\xf7\xfc\x0a\x9d\xa3\x6b\ +\xe9\x79\xff\xeb\xf3\x42\xb0\x27\x84\xe0\xa0\x2d\x02\xad\x85\x2f\ +\xea\xfc\x31\x9e\x34\x05\xe3\x7e\x5f\x04\x77\xec\x05\xa0\xed\xc2\ +\xb7\x55\x17\x8e\x1e\x0c\x16\x77\x5a\x9e\x39\xe1\xda\xd0\x0d\x7c\ +\xc6\x70\x38\x4e\x2b\xd8\xfa\x91\x45\x93\x6a\xb1\x6a\x85\x9c\x79\ +\x1c\x94\xed\xbe\xc2\xfc\xda\xa5\x5e\xba\xa7\x70\xbf\x68\xf6\x13\ +\x0e\xef\xa5\x32\xad\xb4\x5a\xf1\x81\xba\x6d\x1d\x6c\x88\xa2\x84\ +\xf8\x29\xfc\x4f\x2c\xb4\x37\xe3\x7d\x70\xcc\x9e\x14\x81\xb6\x58\ +\xf4\xd0\x25\xd3\xb3\x6b\xf8\x98\x11\x0d\xff\xe6\x77\x8c\xf0\xfb\ +\xf9\xf3\xb3\xb0\x9e\x3d\x69\xf4\xc9\x8b\xe6\xb3\x9d\x71\xd9\xc9\ +\x36\x3e\x2d\xfa\xb9\x25\x45\x60\xc2\x7a\xa7\x72\x22\x30\x21\xde\ +\x72\x22\xb0\xeb\x35\x17\x4c\xd9\x74\x5d\xcb\xcf\xd0\x68\xeb\x46\ +\x22\x3b\x7b\x12\x09\xc2\xa1\x2f\x28\x3d\xf4\x05\xa6\x87\x4e\xe8\ +\xd5\x45\x32\x23\x21\x18\x12\x8b\xf8\x7d\x47\xcd\x71\x1c\x9d\xc4\ +\x74\x03\x20\xf8\xc0\xc5\x24\x14\xc6\x92\x16\xab\xea\x4a\x3e\xae\ +\x33\xd3\x8a\x5e\x14\xdf\x17\xf7\x4a\xf2\xdd\xc7\xad\x72\x28\xeb\ +\xb6\x13\x95\x40\xf4\xb3\xec\x58\x28\x0d\x2d\x1a\xc0\xda\xf6\x10\ +\xc1\x07\xe9\xaf\xc0\xad\xee\x1d\xb2\x4f\x51\xd1\x9e\x71\x1a\x35\ +\xf8\xc4\xb6\xb6\xe8\x5c\xbf\x16\x7f\xcb\x03\xa7\x22\x58\x10\xda\ +\x9a\x24\xfd\xda\xc2\xf8\xb7\x7f\xe5\xac\x93\xf7\x7c\x9f\xab\x4f\ +\x62\x4d\x68\x2f\x10\x7d\xf6\x53\x44\x2f\xbe\xe0\x3b\x78\x48\xcb\ +\x60\xd1\x14\x92\x4a\xa4\x60\xaa\xc8\xc5\x1b\x27\x98\x54\xc2\x2e\ +\xe8\xeb\x81\x9f\x90\x8a\xa6\x58\x0e\xff\xb7\xef\xab\x6a\x97\xb9\ +\xff\x98\xcb\xe6\xf9\xe5\xb0\x2f\x61\xbd\xbc\x82\x5e\xa4\xdb\x43\ +\xfc\x64\x68\x4e\x6b\xfe\x5f\xf5\x2a\x36\xaf\x8b\x63\x06\x65\x3b\ +\xba\x49\x14\x13\x27\x4b\xc7\xf0\xdf\xc5\x02\x2d\x7c\x2f\x3f\xef\ +\x92\x4d\x4a\x5f\x4f\xb0\xf0\x9d\x45\x6c\x77\x11\xdf\x69\x64\xc9\ +\xd7\xbe\xe3\x53\x8b\xe3\xa9\x64\xf1\x68\x69\xcd\x93\x9f\x43\x5a\ +\x02\x47\x65\x54\x0a\x47\xa7\xdb\xc9\x4d\x74\x3b\x79\x24\x8c\x7d\ +\xa9\x33\x31\x6d\xb4\xcd\xec\xe1\x70\xef\xe1\xdb\xc7\x55\x57\xd1\ +\x95\x66\x12\x00\xbf\xf7\xa7\x1e\x27\xfa\x7f\x3f\x4e\xf4\x8f\xdf\ +\xbe\xfd\x04\x8e\x8d\x73\x66\x31\x63\x7b\xee\x85\x5a\xdc\xb1\xe5\ +\x31\xd4\xce\xb3\x59\xc3\x3e\x49\x41\x85\x7b\xa3\x7e\x22\x41\x21\ +\x27\x84\xbb\x44\x60\x57\x29\x93\x79\x0b\xbe\x90\x9c\x32\xce\x08\ +\xf5\x54\x4b\x3d\x39\x7b\x75\x15\x59\xf6\x25\x78\x82\x25\x30\x08\ +\x42\x2d\x92\x75\x42\xb9\x19\x9a\xd4\x62\xf2\x8c\x19\xfb\xf7\xdc\ +\x7b\x1e\x9f\x45\x89\x1a\xf5\xaa\x19\xe5\x52\xdd\x73\x16\x75\xeb\ +\xc1\x90\x65\xae\x42\xb6\xb9\xda\xde\x7b\x25\xff\xbe\x10\x66\xdc\ +\xcf\x9b\xaf\x21\xba\x65\x1f\xa6\x64\x08\x3e\xb0\x40\xc1\xc7\x22\ +\x4c\x94\x65\x51\xbd\x66\xd6\x6c\x2a\xb6\xa6\x88\xeb\xea\x25\xba\ +\x6e\x2c\xf5\xc5\x95\x27\x4a\xd9\xd3\xde\xef\x11\x2c\x87\x63\xaf\ +\x20\x42\x60\xd9\x24\x31\x4b\xcb\xe3\xa8\x5c\xcd\x42\x2c\xaa\x58\ +\x10\xf6\x44\x1b\xa7\xb1\xe8\xfa\x91\x48\x4c\xe9\x79\xcb\x20\xbb\ +\x8e\xd9\x52\x78\xce\x8c\xc7\x1d\x77\x11\xdd\x7a\x9b\x9b\x21\x1f\ +\xba\x9f\xe8\xab\x8f\xd4\x31\x87\x21\xf9\xc4\xc6\x27\x46\xf1\x80\ +\x8d\x1e\xc3\xaa\x59\x3e\x66\xac\xea\x4c\xe3\x70\x2c\x8d\x6c\xe5\ +\x61\xb3\x8c\x8c\x4c\xfc\xb0\xad\xe6\xba\x84\xe0\x30\x12\x82\x42\ +\x54\x0e\x9b\x42\xf0\x16\xf5\x35\xff\x3a\x6f\x5a\x6a\x24\x8f\x14\ +\xd9\x0c\xe2\x25\x61\xe8\x5d\x5a\x70\x0c\x9f\xf5\xda\x0f\x44\x9c\ +\xa2\x39\x86\x4d\xae\x3d\x78\xa6\x16\x81\x21\x5b\x78\xc9\xb7\x98\ +\xeb\xf5\x45\x5d\xbb\x5e\x6d\x19\x0c\xa7\xd1\x48\x58\x93\x43\x32\ +\x88\x8c\x11\x0c\x1d\x44\xa4\x20\x0c\xeb\x67\xca\x14\x96\x49\x22\ +\x34\x83\x75\x30\x95\x24\x92\x4b\x76\x10\xdb\x70\x7c\xd8\x9f\x7f\ +\x81\xe8\xbb\x5f\x47\x74\xe3\xd5\xe7\x31\xff\x7a\x31\x64\xf7\xef\ +\x8d\xf3\x2c\x5a\x46\xa7\xc5\x69\xb9\x54\x8b\xbf\x42\xb4\x92\x53\ +\xb2\x95\x5c\x91\x10\x08\x89\xa4\x8c\xac\x08\x94\x71\x84\x73\x4e\ +\xda\xa0\x10\xab\x58\x26\x6a\x34\x76\x65\x6e\x27\x12\x78\x52\xaf\ +\xb5\x31\x7b\xbe\xcd\x9c\x14\x82\x36\x2e\x50\x5a\x05\xbd\x6b\x98\ +\x5d\xbf\x27\xcc\xdf\xff\xf3\xef\xd5\x62\xb0\x12\x86\x33\x88\x24\ +\xad\xf2\xdb\x14\x21\x8e\x30\x2c\x3e\x7e\xb0\xfa\xdb\xdf\x27\x36\ +\x44\x60\x51\x5b\x96\xe3\xb2\x39\x33\x1d\x83\x9a\x5d\xe0\x69\x35\ +\x45\x00\xfa\xff\xff\xe6\x5b\x20\xf8\x20\xf8\xc0\x62\xe1\xab\x7f\ +\x43\xaf\xf9\xa0\x96\x9c\xd8\x93\x05\xc5\x48\x58\xd0\x62\x81\x28\ +\x03\x94\x1b\x22\x2b\x8a\xe1\x63\x5f\x5a\x3f\x5c\x8d\xfa\xed\x08\ +\xf2\xaa\xff\x17\xd5\xf1\x85\x13\x59\x05\x98\x9a\xe6\xa8\x6a\x19\ +\xd5\xa6\x99\x86\x0b\x37\x72\x15\x07\x21\xc6\x89\x20\xe7\xbc\x48\ +\xe2\x9e\x69\x1f\xfe\x43\xa2\x7f\xf2\x2e\xa2\x87\x1f\x22\x7a\xdf\ +\x6f\x13\x9d\x3d\x5d\x27\x92\xd8\xda\x24\xfd\xda\x85\x2c\x05\x63\ +\xdf\xbb\x93\x83\x30\x4c\xb5\x9c\x0b\x96\xbf\x22\xca\xf0\x55\x51\ +\xeb\xb8\x86\xa5\x4f\x66\x04\x17\xb5\xc5\x30\x65\x5d\x8c\xb3\xa1\ +\xed\x6b\xc5\x3e\x64\xff\xe3\x9e\x3b\x8e\xc3\xf4\x14\x1d\x2e\x9e\ +\x69\x58\x0e\xcf\x14\x87\x5c\xe2\x48\xe1\xc6\x2a\xd4\xff\xe3\xc4\ +\x91\x93\xe5\x81\x85\x0a\xbe\xab\x0e\x39\x91\x37\x1c\xd5\x89\x17\ +\xad\xee\x22\xbe\x2b\xe0\xb9\x33\x62\x18\x43\xd2\xb8\xef\x30\xc2\ +\x5f\x89\x35\x7e\xf6\x5d\xa9\x97\xc2\x8b\x96\x52\x18\xa1\x1b\xc5\ +\xa5\xa9\xbe\x0f\x91\x02\x30\xdc\x67\xc8\x84\x91\x52\xf4\x40\x1e\ +\xeb\xf6\x69\x28\x5b\xe3\x35\x0c\x5c\x29\xd7\x68\xa2\x35\x5c\x52\ +\x18\xfa\xed\xbf\xf6\x34\xd1\x87\xfe\x86\xe8\x27\xde\x66\xc6\x6a\ +\x1b\x9d\x01\xd9\xb8\x6d\xdb\xef\x15\xcd\x24\x91\xc6\xc4\x1c\x5a\ +\xc9\x6d\x78\x63\x7b\x88\x40\xf0\x09\x08\xa1\x93\x88\x0a\xdd\x44\ +\xc2\x3a\xd1\x4d\xa4\x61\x64\x2f\xa2\xcb\x49\x24\x6e\x1b\xbd\x76\ +\xe7\x68\x54\xaa\x5a\xc8\x65\xc6\xb1\xab\x74\x4f\x67\x82\x4a\xe6\ +\xb5\xd5\xe5\x2d\x14\x99\x96\xaf\xf5\x75\x2f\x59\x10\xf2\xa0\x86\ +\xa4\x1d\x8a\xd6\xc5\x0b\xc5\xa1\x02\xd1\x39\x62\xff\xe6\x7b\xd9\ +\xad\x44\x3b\xb7\xf0\xff\x90\x49\xec\x7f\x07\x14\x8a\x81\x07\x11\ +\x1f\xbe\xb7\x9e\x78\xf4\x56\x66\x25\xb2\x90\x55\x42\xf0\x4d\x15\ +\xab\xa9\xf6\xec\xa9\xc4\x18\x42\x16\x33\x04\x1f\xd8\x21\xc1\xa7\ +\x13\x57\x93\xcc\x5d\x5c\xf2\x56\x73\x92\xbd\xcb\x76\x8f\x93\xbc\ +\x38\x9c\xe4\x84\x61\xd1\xbc\xea\x16\x45\x55\xba\xa4\x51\xa0\xb9\ +\x14\xa6\x19\x69\x35\x6c\xcc\xdc\xaa\x8e\xf8\xaf\x62\x0e\x75\x1d\ +\xb4\xd6\x48\x17\xf4\xb3\xd3\xdf\x7c\x82\xe8\xce\x6f\x26\xba\xf7\ +\x63\x46\x00\x3e\xd7\xb4\xda\xc5\x6e\xda\x90\x55\x1c\xe2\x07\xa5\ +\x18\xb4\xf1\x85\xcb\xfe\xff\x9c\x68\x12\x1a\xdc\xf6\x9a\x02\xb0\ +\x08\x42\x4f\x35\x33\x7f\x1b\xef\x57\x44\xc2\x30\x21\x04\x0b\xd5\ +\x14\x98\x2a\x6a\x67\x17\xef\x5b\x26\xcd\xc8\x5e\xc5\xe6\xc1\xb6\ +\xa3\xb3\x89\x23\xaa\x21\x5e\x39\x83\xf8\x50\xef\xa0\x99\xc7\x6f\ +\xa6\x45\x65\x6e\xdc\xf1\x4a\xa2\xab\xaf\x76\xa5\x64\x42\xf6\x30\ +\xc7\x0f\xae\x6f\xd4\x65\x63\x92\x22\xb0\x74\xc6\x5c\x4e\xba\x26\ +\x5d\x1b\x5d\x79\xc8\x7b\xa2\xcd\x9c\xec\x31\xcc\x8f\x61\x1f\xd2\ +\x2d\x2c\x13\x42\x2a\xa3\xb2\xb7\xea\x4d\xa2\xbe\xc2\x72\x7b\x99\ +\x29\x9c\x15\x81\xba\x59\x18\x5b\x66\x0a\x27\x45\x48\x14\x4b\xc7\ +\x59\xd5\x7f\x75\xbf\x19\xa7\x1b\x89\xde\xfe\xfa\xd9\xdd\xeb\x6b\ +\xfb\x89\xf6\x99\x71\x5d\xdb\xf0\xa2\x6e\x54\x27\xea\x37\x32\x85\ +\x65\x4b\x3a\x2d\x44\xcb\x96\xff\xc9\xa5\x44\xa0\xcf\x54\xad\xc4\ +\x60\xbf\xfe\x3b\x29\x90\x64\x6b\xb7\x45\xc5\xf0\x4d\x12\x02\x3b\ +\x73\x1c\x34\xc5\x12\x98\x74\xc3\xab\xc4\x25\x33\xe7\x32\xf6\xe3\ +\x11\x0b\x1f\x1b\xd6\x2c\x44\x5e\x10\x80\xa1\xa0\x77\x52\x08\x06\ +\x31\x18\x97\xd3\x89\x6a\x1e\xda\x3f\x86\xf5\xba\xe4\xe5\xbb\x10\ +\xe2\xae\xdf\x14\x7b\xc1\x92\xab\x06\x75\x22\x8a\xea\x37\x1f\x1b\ +\xeb\x12\xc5\xcc\x73\xce\x22\x45\xb4\xe0\x76\x3d\x00\x82\x0f\x24\ +\x7e\xf0\x19\xc1\xd7\xa9\xe2\x12\xdb\x69\xd5\xfd\x03\xd6\x91\xe9\ +\x4f\x45\xfb\xd0\x89\x6e\xed\xa9\xf7\x6a\xc4\xc9\xf5\x84\x28\x54\ +\xee\x02\xb4\x2c\xd6\xcb\x5a\x22\x61\x9d\x9c\xc5\x6d\xfa\xa7\xf2\ +\x59\x09\x3e\xc6\x90\xd7\x71\x4a\xdd\x7b\xfe\x2f\xa2\x97\x8f\xd7\ +\x26\x1e\x29\xb2\xc2\x7b\x8e\xa3\xe0\x26\xb9\x4d\xf8\xbb\x2a\x2f\ +\xd3\xab\x6b\x14\x2e\x2f\xd7\x2e\xe4\xe5\x25\x17\x4f\xc8\xeb\x42\ +\x9c\x5f\xdc\x1a\xae\xe8\xd7\x49\x1d\x45\xbf\x19\x6b\x59\xf4\x9a\ +\xe2\x30\xb8\xb3\xc3\xf3\xb2\xee\x60\xe8\x92\x12\x62\x05\x1b\x99\ +\xd1\x51\xed\xc4\xd0\xde\xae\x6a\x42\x4b\xcd\x0c\x62\x7d\xc3\xc2\ +\x7e\xee\x47\x6e\x25\xba\xf6\x5a\x27\xee\x42\xa6\x30\x97\x89\xe1\ +\xff\x87\x8c\x61\x4e\x9c\x66\x01\xc8\x65\x11\x83\x25\x50\x7a\xff\ +\x27\x22\x23\x97\xb3\x5f\xd9\x88\x1b\x9e\xaf\x92\xb9\x45\xed\x70\ +\x8e\x0d\xe4\xc7\x95\x55\x6f\xe8\x2c\x9a\xee\x5c\xbb\x1f\xd1\x5f\ +\x58\x5a\x01\x1b\xa7\x94\x17\x4d\xa3\x49\x53\x48\x56\x65\x64\x26\ +\xf5\x5c\xbd\xad\x4c\xe1\xe8\x67\xc1\x56\xbe\x3f\xba\x97\xe8\x5b\ +\x8e\x10\xdd\x30\x63\x67\x40\x3e\xe5\xb8\xab\xc5\xea\xa1\x28\x8c\ +\x56\xb6\x92\xf3\x75\x03\xc7\x3e\x61\xa4\x1c\xb5\x2d\x60\x55\xf9\ +\x12\xdf\x49\xa4\x94\x96\xc9\x42\xc4\x95\x79\x6b\x51\xe5\x1e\x5e\ +\xf2\x2d\xe5\xbc\x50\xac\x3e\xe2\x22\x62\xf8\x44\x36\x72\x57\xd2\ +\x48\xf6\xef\x94\xd8\x4e\x5d\x9a\xd4\x94\x96\x6a\x53\x5e\x6f\xc5\ +\x7a\xc8\xe0\x8e\x45\x92\xf7\x6c\x84\xef\x88\xca\xe6\x63\x70\x17\ +\x97\xbe\x4f\xb1\xf6\xdf\x19\x8d\x32\x22\x3a\x75\x49\x0d\xb1\x86\ +\x5b\x6d\x0b\x68\xe3\x75\x85\x78\x8c\x63\x04\x0b\xd1\x4a\x30\x58\ +\x0c\xfb\xa2\x34\x8d\xb4\x20\xf6\x85\x33\x47\x35\x05\x3a\x80\xe0\ +\x03\x0b\x15\x7b\xfc\x43\xd4\xcd\xec\xdc\x69\x77\x5d\xad\x9e\x43\ +\x33\x88\xc1\xd4\x3e\xe2\xbb\xd3\xc6\xed\xa9\x4e\x5f\xa8\x1a\x11\ +\xec\x13\x5f\x17\xa1\xe3\x82\xa1\x8a\x76\xcb\xb6\x46\x0b\xb6\xa2\ +\x3e\x6d\x55\xd1\x2c\xd5\xc2\xb7\xde\x2b\x07\x9c\x15\x72\xe8\x2b\ +\x13\x6f\x8d\xbd\x30\x1c\x37\x0f\xb1\xd5\xcf\x57\x58\xd6\x42\x4e\ +\x4c\x2b\xf6\x51\xb5\x5d\xdc\x2c\xfa\xac\x00\x5c\x76\x22\x70\x6d\ +\xd5\x15\x83\x96\x6e\x62\xf9\x1e\xf2\x78\xe3\x96\x76\x21\xa8\x6d\ +\xb8\xd9\x6e\x5d\x97\xda\x56\x5a\x0a\x65\x62\x8a\x14\x9f\x72\xbb\ +\x05\xb6\xda\xd8\x17\x62\x79\x56\xeb\xb2\x32\x41\xab\x07\x0b\x5f\ +\xa8\x1c\xc4\xcf\xb1\x05\xf0\xe5\x97\x5d\xa2\x07\x97\x8c\xe1\xbf\ +\x79\x5d\x6c\x49\x93\xf1\x7a\xa5\x88\x5c\x58\xdf\xac\xf7\x1f\xb6\ +\x67\xe1\xc7\x22\x90\x8f\x85\x8b\x2a\xb3\x36\xe7\xfa\x80\x32\x1e\ +\x70\x2c\xca\xbf\x34\xd6\x47\x2e\xdf\x89\x28\x0b\x53\xea\x66\x8c\ +\x20\xf9\xd3\x6b\xb9\xd7\x74\x21\xdb\xf5\x93\x7c\x7b\x38\x3e\xfd\ +\x3f\xf7\x25\xa2\xbf\xf8\x1c\xd1\x3b\xff\x3e\xd1\xc1\x59\xe2\x9f\ +\x44\xa7\x0d\x25\x2c\x75\xa1\x35\xa0\x2d\x61\xb2\xbf\x39\xe9\xdb\ +\xe8\x88\x2d\x67\x39\xb5\xbd\x82\x43\xb6\x2a\x25\x2c\x60\x54\x5b\ +\xa5\xec\x9f\x1d\xa2\x8a\x93\x44\x6c\xb2\x88\x11\xa0\xc3\xdb\xea\ +\xfa\x91\x73\xbb\xb4\x85\x9e\xc1\x85\x28\x43\x13\x0f\x47\x91\xb1\ +\xcc\xa5\x8e\x79\x5a\x8f\x5d\x35\xe5\xfe\xb7\x98\x72\x19\x4d\xe5\ +\xc2\x91\xe8\x82\x52\x64\x8e\x31\x63\xb9\xb3\x02\x9e\x6f\x72\x44\ +\x3c\xa1\xed\x02\x13\x4a\xce\x8c\x84\x85\x50\xcf\xb0\xcf\x49\x42\ +\xcc\x46\x57\x6f\x3d\x6b\x6c\x9f\xaa\xbb\xca\x04\x41\x78\xfa\x2e\ +\xf3\xf7\x6d\x98\x92\x21\xf8\xc0\x62\x51\x3a\x23\xf2\x54\x46\xcf\ +\x6d\xc7\x22\x48\xed\x60\x8d\xae\xa0\x1d\x4d\x53\xae\x3c\x19\x51\ +\xa9\xc5\xa4\xd5\xb8\x2a\xf9\xdb\xe0\x78\xfb\x09\xb5\xad\x84\x76\ +\x86\x0d\x22\x27\xb8\x57\xc9\x59\xe5\xb8\x25\x5a\x28\xb3\x5f\xed\ +\xc3\x27\x98\x8c\xbc\x75\xd0\xb6\xaa\x08\x31\x86\xc2\x85\xcc\xef\ +\x51\xea\x5a\x24\xc6\x49\x26\x52\x1c\xae\xb3\x09\x2b\xce\x36\xf6\ +\x6e\xdb\x50\x7f\x90\x97\xb5\x35\x1f\x90\xe5\xad\x84\xc1\x8a\x18\ +\x8a\x5a\x73\x39\x98\x60\xe5\x93\x89\x23\xc1\xaa\x37\x12\x71\x7c\ +\x85\xc8\x1e\x96\x9d\x49\x1a\x2e\x60\x55\x77\xb7\x97\xf1\x83\x0b\ +\xe2\xca\xa5\x0d\xea\x99\xb1\x9e\x84\xce\x14\xa1\x9d\x1c\x7f\x8d\ +\x2b\xde\x1d\xa9\xeb\xe7\x58\x78\x5d\x71\xb0\x59\x9e\x85\x5d\xc1\ +\x67\xce\x98\xc7\x75\xf7\xc8\x05\xa5\x6d\x7b\xb9\x91\xdb\x17\x3f\ +\x4e\xbc\x76\x1f\xf4\x9b\x39\x42\xf6\xeb\x1d\xf9\x3e\xc3\xa7\x6b\ +\xf1\x66\x87\xda\x6b\x71\x1e\x0e\x5b\x36\xa6\x70\xa7\x28\x0f\xff\ +\x72\x5f\x14\x79\x16\x02\x30\x94\x8e\x59\x89\x04\x60\x10\x7e\xab\ +\x83\x66\x2c\x60\x10\x8d\xab\xba\x3e\xa5\xb4\xec\x2c\x22\x2c\x99\ +\xef\xfb\x08\xd1\x35\xe6\xb3\xff\xc0\xb7\xcf\xf8\x53\x2f\xeb\x9b\ +\x23\x95\xb1\x4a\xc9\x24\x06\x1b\xb5\x60\x4e\xb7\xc1\xbe\xa6\x98\ +\xe0\x92\x31\x76\x19\xf9\x62\xd2\x23\x2f\xa2\xc5\xf7\x42\x65\xdb\ +\x9a\x1f\x2e\x07\xb6\xad\x1c\xe7\x8b\x9d\x34\x93\xfd\xb3\xae\xd8\ +\xf7\x3c\x2d\x7c\xa1\xae\x68\x95\x21\x9c\x4a\x88\x99\x64\x04\x54\ +\x91\x18\x9f\x92\xd2\xc9\x1c\xb9\x96\x7c\xf1\xf3\xb9\xd7\x53\x7e\ +\x1f\x44\xe9\xa4\x97\x54\x5c\x67\xbc\x8d\xbd\xc7\x5b\x75\x37\x4d\ +\x49\xab\x9d\x0f\x75\x0e\x96\x41\x5b\x90\x7a\x54\xc7\x17\xda\x7b\ +\xe9\x50\xaa\x29\x98\x70\xe5\xdf\x71\xd6\x7a\xea\xb2\xde\xd1\x5a\ +\xce\x9e\x43\xe2\x7e\x7d\xe3\x0c\xa6\x62\x08\x3e\xb0\x60\xb1\x17\ +\xdd\x79\xea\xed\x28\xb9\x9c\x45\x30\x15\xd8\x32\xa3\xf5\x6f\x56\ +\x71\xd8\x8a\x5d\x51\x69\xe5\xa8\x73\x1f\x47\x0a\xc2\xb2\xbd\x8d\ +\x2c\x1c\x1d\x2c\x63\x13\xf1\x7c\xb0\xac\xf5\x5c\x96\xab\xf3\x51\ +\x2d\x09\x53\x52\xe9\xc5\xa0\x57\x14\x63\x5f\xa2\x66\xa2\x9b\x8a\ +\xc0\x9a\x50\xc6\x4d\x5f\x23\x49\x31\xe8\xdd\xaf\xd6\xca\x68\xfe\ +\x58\x2f\x9a\xee\x5c\x7e\x5c\xf2\x56\x41\x16\x7f\x6c\x8e\x0a\xae\ +\x62\x99\x54\x52\xc5\x1a\x0e\x6a\x97\x73\x4a\xdc\xb1\xef\x33\x2b\ +\x04\xb7\x44\x0d\xc4\x1e\x2d\xb2\x39\xe8\x37\xf5\x9e\xa4\xa7\xca\ +\x1b\x7c\x26\xa1\x32\x62\x49\x91\x1e\x90\x8d\x1f\x4c\x89\x40\x12\ +\xdd\x43\xc2\x24\xc4\xc3\x70\xf0\x40\x53\x60\xb1\xf5\x2f\x2c\x6c\ +\x0d\xe4\x18\x41\xb6\x04\xb2\x4b\x98\x2d\x87\x93\x90\x28\x52\xba\ +\xe1\x93\xfb\x2b\x7d\x32\xc3\xf0\x9c\xcb\x18\x2e\x75\xed\xf9\x5e\ +\x5d\xab\xdd\xc2\x7d\x9f\x14\xc1\x5f\xd9\x8a\x8f\x6b\xb2\xf3\x63\ +\x3f\x12\x81\xde\xcd\xbb\x44\x51\xe1\x6b\x2f\xf2\x96\xa9\x29\x02\ +\x83\x6b\x99\xd7\x4b\x11\xc8\xee\xed\x8f\x7c\x92\xe8\xbb\xdf\xe0\ +\x0c\xc3\x53\x29\x85\xc5\x4b\xc6\xa1\xa5\x6a\x0c\x52\x26\x5b\xd8\ +\xdf\xff\xf4\x57\xc4\x76\xda\x59\x90\x82\x00\x2c\x43\x87\x8b\x52\ +\xb4\x94\x0b\x42\x2c\xb6\x2c\x2d\xe2\x54\xca\xd5\xe1\xa3\xfc\xba\ +\x6d\x09\xc1\x8c\x50\x9b\x45\xc8\x4d\x13\x71\x6a\xca\x31\x77\xee\ +\x67\x56\xd1\x48\xfe\x1e\x8e\xb3\xd9\x57\xd3\xd7\x73\xfb\x7d\xf9\ +\xee\x24\x55\x92\x89\xec\x58\x12\x62\x0a\xcb\x5a\x00\xd6\x2d\x6d\ +\x44\x7f\xf0\xb2\xed\x14\x40\x92\x06\x04\x1f\xd8\x69\x82\x4b\x37\ +\x75\xd1\x9d\xfa\x83\x54\xdd\x42\x31\x97\xe5\xab\xa6\xed\x67\x56\ +\xd7\x70\x7c\x65\x9a\xf2\x1a\x2d\xaf\x84\xba\xdb\xba\xd8\x88\x7a\ +\x1e\xb5\x9f\x0f\x82\x30\x58\xe7\x26\xe2\x35\x41\xb4\xd9\x78\xbd\ +\x50\x8a\x65\x50\x5b\xed\x26\x65\xb3\xf4\x4c\x29\x66\xf6\x2a\xe1\ +\x24\x98\x76\x84\xeb\x58\x45\xb1\x81\xd5\xb1\x0c\xbd\x10\x34\x7f\ +\xbf\xa4\x9a\x25\x62\x96\x7c\xd6\x70\x95\x38\xe2\x6b\x99\x0c\x7c\ +\x12\xc9\x92\xb4\x10\xf6\x84\xf5\x4e\xd5\x1d\x4a\x6c\xe7\x14\xe1\ +\xee\xb5\xe7\xcd\x62\x2d\x7c\x6b\x9b\xc7\xe9\xce\xe1\x73\xcd\x0c\ +\x64\xf3\xf7\xb3\x74\x93\x77\x6d\x71\xdf\x61\x23\x02\xcd\xa1\xf3\ +\xba\x2a\xb1\x62\x12\x25\x54\x4c\x9a\x05\x98\x57\xcd\x47\xbf\xfa\ +\xaa\x7a\x3d\x8b\x2c\xb6\x2c\x71\x2c\x20\xb7\x91\xb3\x49\x22\x9b\ +\x3e\x41\x64\xdd\x89\xc1\xf0\xff\xca\x3d\x1c\xd5\xf4\xe3\xff\x73\ +\xb6\xf0\x86\xae\x0d\x5a\x85\x1f\xea\xc2\x67\x0c\x57\x89\x22\x3d\ +\xa7\xcd\x65\x89\xc6\x90\x3f\x54\x15\xc4\x0e\x7a\xa5\x6c\x67\x05\ +\x87\xf7\x9f\x88\x38\x42\x7e\xfc\xda\x51\xa2\x8f\x7f\x86\xe8\x07\ +\xbe\x73\x4a\x02\x87\x8f\x6d\x2b\xca\x8c\x08\xc9\x65\x0b\x0b\x71\ +\x98\x6c\x37\xe7\xb7\x67\x11\x38\x58\x16\xfb\xd6\x75\xeb\x38\x6b\ +\x95\x15\xc9\x21\xe5\xa8\x5e\x97\xf1\x54\x9e\xbf\x81\x8f\x8d\xee\ +\xe6\x3b\xd1\xeb\x75\x49\x99\xed\xb6\xc9\x6b\x5d\x0a\x54\xda\x35\ +\x9c\x6a\x47\xb7\x9d\xcc\xde\x6c\x66\xb0\xca\xdf\xa7\x66\xb3\x8b\ +\x73\xef\x45\xdb\xd8\x5f\xbc\xad\x4f\xcc\xe8\xad\x76\x5c\xb2\xe3\ +\xa2\xd5\xe1\xef\x90\x74\x52\xb6\xb7\x21\x5d\x77\x3a\xe1\xe7\x17\ +\x59\xc8\x1d\x40\xf0\x81\x6a\x06\xd0\x19\x4b\x1d\x75\x8b\xa2\x4e\ +\xd7\xae\x9a\xe1\xf5\xd3\x04\x5e\xc7\x3e\xe2\xd7\xa9\xcc\x6b\x74\ +\xc7\x7b\xa5\x2c\x83\xf1\xe7\xd3\x1d\xb7\xcd\x61\x66\x69\x59\x16\ +\x54\xc2\x7c\x12\xdd\x6a\xdb\xb4\x46\x71\xa5\x2d\x7c\x2a\x63\xa9\ +\x9b\x33\x4c\x88\xf6\x97\xf5\x0a\xd9\x44\x34\xdc\x12\x31\x8b\x71\ +\x87\x92\xd0\xde\xce\x2c\x5b\x89\x64\x92\x50\x64\x5a\xf6\x3b\xb6\ +\x05\xa9\x43\xec\xa0\xf7\x59\x5a\x61\xd8\x4f\x64\xf7\x7a\x11\xb8\ +\xc0\x18\x3e\x9b\x91\xc1\x96\xd1\x46\x86\x71\x41\x37\xa9\xaf\xfa\ +\x98\x42\xaa\xcc\x6b\x57\xd2\x89\x4a\x60\x97\x7d\xa7\xa2\x1e\xa5\ +\x3b\x1b\x96\x39\x19\xbb\x17\x3c\xeb\x93\xd0\x1c\x66\xcd\xf5\x18\ +\x0e\x22\xd0\xba\x7b\xc7\xf5\x23\x8b\x3d\xdb\x63\xf8\x1c\xd1\xd9\ +\xb3\x4e\x08\xf2\x23\x27\x91\x94\x65\x33\x4e\x50\xcb\xee\x1f\x5b\ +\xcd\x40\xf8\x60\xb0\x0d\x89\x0d\x7d\xaf\xc3\x07\x7e\xb8\x8b\x81\ +\x28\x20\x5d\x36\xb3\x85\xe3\x42\xd8\x9a\xa2\xf5\x66\xf9\xf8\x7d\ +\xae\x2e\xdf\xeb\xee\xea\xb8\xbf\xe3\x04\x15\xf3\xde\x03\x55\xdf\ +\x7b\xc8\xee\x22\x39\xb7\xee\x34\x0b\x56\x63\x7b\x29\x90\xfc\x67\ +\xee\x45\xb3\x82\x16\x6e\x5f\xfe\x09\x5d\x79\x7d\x33\xf2\xe0\x42\ +\x19\x19\x91\x7e\xfa\x49\xf3\x9d\xbd\xe4\xc5\xf5\xc0\x89\x51\x5b\ +\x64\x7a\xa9\xfe\x3b\x7c\x1f\xf1\x71\xcb\xcb\x43\x2e\xe1\xa2\x65\ +\x65\x23\x9a\xa9\xf5\x5c\xe3\xd2\xa3\xba\x93\x49\x92\xef\x3f\x6d\ +\x9b\xdc\x76\xb1\x28\xed\x12\x79\xa9\x30\xed\xae\x3a\x7c\xb2\x9d\ +\xdc\xa0\xe3\x35\xba\x69\x05\xd4\xe2\xf1\xd0\xd5\x04\x20\xf8\xc0\ +\xc2\x2d\x7c\xc5\x36\x92\x2d\x14\xe5\x05\x93\x9a\x41\x0c\x2a\x9a\ +\x5d\x24\xea\x8e\x7d\x64\x5f\x17\xbd\xa6\xcc\xed\x5e\xa5\x2d\x83\ +\x2a\x63\x15\x4c\x0a\x40\x6a\x67\x19\x37\x2c\x9c\x3a\x3f\x80\x65\ +\xfc\xbe\xe2\x0a\x1d\x32\x72\xc9\x67\x1d\xb3\xe8\x0a\x49\x2e\xa5\ +\x4f\x7f\x0c\x29\x9e\x36\xbb\xd8\xab\x82\x91\x17\x83\x93\xe8\xc3\ +\x84\x78\xbb\x89\xc8\xc0\x1d\x45\x42\x30\x3c\xf6\x06\x75\x26\x71\ +\xa8\x2f\x18\x12\x49\x38\x6e\x90\xc5\x60\x88\x23\x5c\xa4\xe0\x1b\ +\x7a\xb3\x5a\xdc\x7a\x8e\xad\x92\x91\x08\x3c\xa0\x36\x9a\x22\xd0\ +\x88\xd3\x6f\x56\x5f\xaa\x04\xaa\xee\x17\xb6\x6b\x46\x25\x02\x97\ +\x9a\x22\x30\xee\x35\x3c\xf0\xd6\x07\xd9\x6a\x8e\xc5\x9f\xcd\x18\ +\x9e\x38\x2b\xe0\xd8\xff\x9f\x63\x03\xd7\x45\x8c\xa0\xd5\xe1\x41\ +\x68\x7a\xaf\x77\x15\xaf\x27\x04\xa1\xd5\xf1\x9b\xae\x53\x1f\x7f\ +\xbc\x53\x45\x2d\x02\x6d\x41\xe9\x55\x2f\x06\x97\x5c\xa8\x66\x29\ +\x0a\x44\x5b\x41\x33\x69\x26\x9a\xf3\xdf\x27\x4f\x12\x7d\xe2\xb3\ +\x44\xaf\xfa\x26\xa2\x7d\xab\xe9\x61\xe5\xaf\x96\xe3\x05\xd7\x06\ +\xa2\x25\x1d\xd5\x55\x8b\xe6\x22\x02\x89\xa6\xf7\xce\x15\xa7\x25\ +\xcf\x18\x2c\x7a\x8b\x62\x7e\xa7\x8f\x15\xc2\x9b\x2e\x4e\x30\x5c\ +\xe7\x1a\xcd\x6f\x7a\xa2\xc0\xb4\x10\x83\x85\xcf\x24\x56\xaa\xbb\ +\x2d\x1e\x75\x08\xb1\x69\x42\xb0\x12\x69\xba\x43\x0c\xe6\x5c\xc7\ +\xb3\x0a\xb8\x94\x9b\xfa\x7c\xb6\xcb\x6c\x3b\xd3\xf6\x99\xfb\xef\ +\xca\x8a\x1a\x1d\x7b\xbf\x47\x00\x82\x0f\x2c\xdc\xc2\x57\x6c\x53\ +\xe5\xcd\x6c\xe5\xeb\xb2\x12\xa6\xdc\xc1\xdb\x48\xe4\x88\x1b\x58\ +\xe6\xb6\x2f\x12\xef\x39\x93\xdb\x38\xb1\xad\x9e\xf2\x59\x74\x87\ +\x9b\xb8\x25\x06\x27\xf9\xfd\x94\xe3\xf4\xfb\xc5\x09\x26\x5c\x97\ +\x70\x49\xf4\xbf\x2a\x84\xe5\x6d\xe4\x83\xbc\x46\x3e\x99\x64\x14\ +\x55\x1a\xae\x22\xe7\x45\x6f\xb0\xb8\x03\x48\xc3\x7a\x28\x8a\x44\ +\xf3\xf3\x2c\x04\x7f\x7a\xec\x02\xca\x16\xc1\x99\x53\x4e\x59\xc9\ +\xf6\x73\xb6\xe3\xc8\xb9\x76\x0d\xc3\x41\x54\x76\xc6\x96\x8e\x39\ +\xdd\x2c\x47\x63\xf6\xf3\x66\xf5\xa2\x17\xb7\x5e\x6d\xf4\x14\x3d\ +\xa5\x6e\xa5\xf5\x65\xd7\x66\x2e\x58\xca\xb8\xed\xdc\xd6\xa4\xd7\ +\x68\x29\x57\x59\xdb\xc6\x42\x50\x08\xf7\x71\x68\x49\xc7\x22\xf0\ +\xb4\x17\x81\x21\x4e\x90\x45\xe0\xd8\xc7\x06\xda\x18\xc1\x61\xb3\ +\x64\x4c\xe9\xdd\x5d\xa5\xb7\x26\x6e\x9a\x27\xce\x9e\xa8\xef\x09\ +\xac\xee\x5e\x76\x06\x57\x16\x25\x2c\x04\x97\x64\xf7\x84\x41\x7d\ +\x5f\x73\xf4\x49\xa2\x4f\x7e\x86\xe8\x7b\xbe\xc3\xdd\x27\xc4\xb0\ +\x27\xff\xa0\xd9\xcf\xfe\xe5\xfa\x14\x2b\x45\x3c\x60\x1c\x5f\x28\ +\xcb\xca\x4c\x44\x2c\x56\x70\xd7\xea\x9c\x20\x50\x33\x5a\xa4\xc2\ +\xe9\x35\xe7\x4e\x1b\x55\xf5\x90\x52\xfc\x84\x45\x39\x1c\xdd\x25\ +\x4c\x54\x5d\x5c\xba\x10\xd6\x40\x2b\x04\x43\xdd\x39\x95\x6e\x3b\ +\xa7\xa9\x23\xeb\x76\x16\x31\x18\xf6\x51\x76\x7b\x5c\x1a\x65\x66\ +\xba\x2c\x75\x53\xde\xab\x75\x2f\xde\x11\x13\x18\x5f\x92\xd4\x14\ +\x8b\x5f\xea\xf2\xaa\xba\xb2\xa0\x51\x96\x05\x82\x0f\x2c\x18\x59\ +\x92\x21\x2b\x80\x12\x82\x4a\xcf\x68\x01\x4c\xfd\xfa\xb7\x93\xc4\ +\x91\x15\x88\x3a\x6f\xdc\x53\xb9\xab\x8e\xee\x7e\x3f\x9d\x88\x09\ +\x54\x39\xeb\xe4\xac\x82\x30\x63\x1d\x4c\xd5\x30\x8c\xf7\xd3\x72\ +\x27\x8b\x04\x13\x11\x18\x5d\x09\xc1\x89\xb8\x85\xb6\xa2\x88\x5c\ +\x76\x31\xf9\xbe\x63\x41\x08\x06\xd7\x30\xd7\x02\x19\xcb\x42\x72\ +\x65\x53\xd9\xc8\x8c\x62\x2b\x42\x45\xec\xa0\x2d\xd2\xb6\xc0\x2b\ +\xb4\xb5\xf0\x6d\x44\x35\x09\x37\x7d\x2d\xc3\x48\x04\x72\xbd\x90\ +\x22\xea\x5c\xc2\xa6\xb1\xe1\x56\x3d\x33\x87\xc2\xd2\x91\x08\xbc\ +\x45\x3d\xe6\xd7\xd7\xd9\xc8\x56\x04\xf6\x9a\xbd\x86\x59\x04\x8e\ +\xcc\x0f\x45\xfb\xb0\x4a\xe9\x5e\xe5\xff\x2f\x2f\xd5\x6e\x4a\xce\ +\x9b\x09\x22\xd0\x96\x8c\xd9\x72\xd6\xbf\xe0\x0e\x3e\x75\xca\x89\ +\xbe\x91\xcf\x16\xe6\xfd\x8f\xc7\xf5\x90\xcb\xcc\xc7\xe0\xda\x65\ +\x4b\xe0\xe6\x7a\xbd\xbe\x1f\x84\xdf\xb2\x73\x51\x0e\x7c\xce\x0e\ +\x8b\xb4\x07\x1f\x26\xba\xfb\xd5\x44\x37\x5c\xdb\x9e\x60\xd9\xe3\ +\xcd\x62\x71\xbf\x4f\xb6\x90\x89\x2e\x5c\x02\x66\xb9\xdf\x2d\x02\ +\x5b\x35\x07\xa3\x2c\xcd\xaa\xec\xa5\xca\x5b\xfd\x1a\x31\x6a\x0b\ +\x4a\xda\xb0\x89\x34\x07\xcd\x98\x9c\xae\xfb\xdb\x6a\x99\x55\x5a\ +\x46\x16\x7d\xdd\xfc\x19\xda\x1e\xb9\x5b\x09\x21\xd8\x6f\x0a\xc0\ +\xb0\x54\x75\x07\x43\x8d\xba\x5e\xb3\x76\x7c\x67\x09\x97\x19\x13\ +\x31\x1a\xce\x94\x4c\x52\x09\xa9\x29\x16\xbd\xed\x08\x3d\x95\xb9\ +\xb4\xaa\x6d\x8a\xbc\xae\xd7\x88\x75\xa8\xc1\x0c\xc1\x07\x16\xcd\ +\x49\x33\xfb\xbc\xb4\x51\x5f\x84\xaa\x18\x33\xd5\x5c\xa7\xe4\xba\ +\xe8\xff\xa1\x4f\x6b\x91\xfb\xd9\xea\x6d\xde\xc5\xa9\xa6\xa8\xd9\ +\x96\x40\x3c\xcf\x44\x0e\xca\xbc\x87\xce\x1d\xdb\xac\x59\xc0\x5d\ +\xc7\x10\x89\x46\x9d\x39\x36\xdd\x25\xba\x15\x55\x29\x72\xf2\xb9\ +\xb1\xb4\x0a\x2a\xff\x7f\x2f\xd8\x6c\xef\x4d\xf3\xb8\x6f\xa9\x4e\ +\x21\xb5\x16\x10\x5d\x67\x13\x57\xff\x9f\xd4\xbb\xb7\x7e\x4c\xdd\ +\x34\xf1\x2c\x0a\xce\x82\x58\x3f\x57\x17\x7f\x6e\x14\x96\x2e\xea\ +\x64\x8e\x60\x7d\x94\x35\x04\xf9\xef\x4d\xef\xe6\x95\x96\xc0\x7e\ +\x5f\xec\x43\xd5\x5d\x49\x44\x51\x69\xfe\xff\x2d\xc5\xa3\x75\x89\ +\x1b\xbf\xfe\x58\x71\x2d\x95\xfb\x0f\x90\x3e\x78\x25\x6d\x3d\xfb\ +\x92\x33\x42\xfa\x16\x73\xda\x0f\x4d\xd0\xbf\x55\x56\x70\x68\x11\ +\xbd\x4a\x74\xe8\x90\x5f\x3f\x71\x8f\xb6\xdc\xcb\x86\xb3\xe8\x71\ +\x57\x90\x75\x9f\x30\xc2\x8f\x2c\x10\xed\xfa\x61\xed\x6e\x2e\xc5\ +\x4d\x56\x88\xf1\x1b\xf1\xeb\xd7\x6b\x57\x71\x48\x0a\x39\xf1\x22\ +\xd1\x47\x8c\x5e\xfd\xd1\x1f\x26\xba\x26\x8a\x8b\x62\x97\xee\x01\ +\x23\xf6\xce\x2e\xd7\xc2\x2e\x58\xf3\x56\xa9\xd9\x66\x4e\x66\x02\ +\x87\x98\xc7\x2a\x92\x40\x64\x5d\xca\xc6\x36\xb2\x53\x49\x29\x4e\ +\xdb\x89\x6e\x4f\xfe\x32\xd9\x61\xde\xd6\x1d\x1e\x8b\x43\xd7\x13\ +\x1d\x0c\xf7\x31\x93\x3a\x71\x44\x4f\x44\xe2\x48\x48\x26\x99\x88\ +\x64\x83\xae\x7b\xcd\x50\x8e\x26\x12\x61\xa1\xe8\x70\x21\x0a\x0f\ +\xdb\x3a\x73\xfd\xba\x7d\x59\x21\xd6\x57\xfd\x6d\x63\xc1\x93\x4b\ +\xc4\x50\x1d\x61\xca\x72\x3f\x7a\x86\x06\x49\x1d\x82\x30\xd7\x2f\ +\x37\x27\xda\x72\x91\x35\x6a\x06\xf1\xd8\x7a\x0e\x59\xbb\x10\x7c\ +\x60\xc1\x9c\x30\x82\xef\x85\x8d\x66\x02\x81\x9a\x51\xf0\xa5\x84\ +\x9f\x52\x55\x16\x65\xeb\xb1\x9a\x64\xa3\xd7\x14\x8a\xb2\xfd\x81\ +\x66\xcd\x14\x4e\x25\x72\x64\x2d\x8f\x2a\x6d\x6d\xd4\xb9\x8b\x50\ +\xae\x77\x51\x4e\xe4\x65\xb6\x8b\x8f\x21\xb6\x22\xe6\x62\x0c\xbb\ +\x2c\x87\xb9\x38\x47\x9d\x99\x49\x65\xfd\x41\x52\x4d\x51\xc8\xab\ +\xb8\xfa\xaf\xac\x25\x32\x11\x6d\x22\xca\x41\x9d\x2e\x3a\xd1\xb4\ +\xd0\x66\xba\x27\x4f\x38\x0b\xdf\xd2\x72\x1d\x4f\x58\xf5\xff\x4d\ +\x89\xc0\xe0\xce\x95\xf1\x88\xfd\xa6\x85\x6f\xa8\xda\x22\x30\xec\ +\xb7\x6a\x47\xa7\x6a\x01\x29\xde\xe3\x70\xf1\xb4\x19\x17\xa3\xda\ +\xae\xbf\x92\xe8\xe5\x17\x6d\xc6\x46\x68\x31\xc7\xcf\x4f\x7c\x6a\ +\xac\x14\x81\xd6\xe5\x3b\x89\x5a\xb3\x79\x11\xc8\x61\x91\x07\xf6\ +\x35\x33\x88\x59\xec\x05\x11\x68\x33\x86\xb7\xdc\xff\x59\x08\x86\ +\xec\xe1\x50\x3a\x46\x8a\x40\xb9\x6f\x16\x30\x1b\x46\x90\xfc\xf5\ +\x5f\x13\x7d\xd3\x2d\x44\xdf\xfb\xf6\xe6\xb0\xb2\x95\x70\xfd\xb4\ +\x13\x8b\x7d\xdf\x6b\x98\xbf\xf7\x50\xea\x25\x16\x81\xb2\x1b\x88\ +\xee\xd7\xa7\x42\x4a\x04\x4e\xca\xa8\x9d\x5c\x38\xe5\xca\x66\xaf\ +\x62\x12\xeb\xab\xb3\x7d\x01\xbd\x74\xf9\xcd\x0a\xdf\x71\xb1\x88\ +\x3a\x59\x84\x64\x91\x49\x2c\x00\x27\xbe\x0e\xdd\x44\x2c\x63\xf1\ +\x33\x8a\x6a\x09\x56\xc7\xed\x33\x4f\x27\x5b\x09\x21\xd8\x8b\xc4\ +\xa0\xe8\x6b\x5b\x89\x42\xf9\x5c\x8f\xd2\x59\xb6\x91\x75\x70\xa6\ +\x58\x41\x79\x9c\x53\x84\x95\x56\x1d\x3f\xe9\xae\xf8\xbd\x19\x5d\ +\xc6\x94\x3a\x5e\xa2\xb6\x27\x05\x40\xf0\x81\x05\x61\x45\x97\x6e\ +\x5b\x9a\x26\xe7\xb1\x2f\x69\xf7\x57\x42\x30\x92\x6a\x5b\x10\xe3\ +\xf5\x3d\x55\x8b\xbf\x43\x66\x52\x1d\xae\xbb\x2b\x68\x55\xe7\xce\ +\x3f\x56\xa5\x4e\x28\x6f\x95\x6b\xdd\xaa\x46\x94\xb9\xac\xe4\x5c\ +\x09\xf9\x59\xca\xd0\xe7\xac\x79\x2a\x63\x09\xd4\x99\x6a\xa8\xd4\ +\x14\x7a\xb6\xad\x19\xa7\x71\xee\x27\x3a\xfb\x72\x33\xde\x51\xe7\ +\x44\x60\x66\x9b\xb8\x4b\x89\x14\xca\xd5\x73\x45\x7b\x36\xe1\xa7\ +\x96\x43\x67\x8e\x90\xd6\x58\xb8\xef\x62\x51\x7c\xe5\xab\xce\x07\ +\xca\x81\x68\x9c\xc2\xca\x49\x22\xfb\xd6\xdc\xdf\x9c\x38\x62\xeb\ +\x0b\x0a\xcb\x9c\xec\x06\x52\xf4\x6b\x31\xd8\x1b\x44\xeb\x45\xdb\ +\xb8\x70\xfe\xf5\x45\x22\x48\x25\x14\xa9\xdd\x61\x24\x1c\xc7\x75\ +\xd7\x13\x3d\xfa\x08\x1d\x28\x8d\xe4\xeb\xbd\x58\x77\x50\x31\x22\ +\x71\xcb\x48\xa7\xa1\x72\x22\xb5\xec\x2b\xeb\x0a\x7e\x52\xdf\x52\ +\x17\x73\x16\x71\x7f\xb2\x59\x0b\xff\xcd\x1f\xab\x3c\x28\xea\xd5\ +\x91\x13\x79\xfc\x1c\xc7\xff\x0d\xbd\xc5\x8f\x45\xe0\xa9\xd3\xce\ +\x00\xca\x0b\xbb\x8b\xa5\xf8\xe3\x3f\xd9\x6a\xf8\xa1\x3f\x26\x7a\ +\xf3\x1b\x5d\x41\xea\x00\x6f\xcf\x16\x40\xee\x1c\x18\x34\x7f\x28\ +\x21\x13\xe2\x04\x59\x8c\x2a\x3f\x6c\x13\x51\xc3\x50\x86\x80\x8e\ +\x33\x6d\xe3\x74\xdc\x72\x4e\x66\x2d\x8b\xcf\x25\xad\x87\xfc\x77\ +\x5f\xcd\xd9\xa5\x27\x0a\x4c\xa7\xb2\x6c\xad\xbe\x28\xda\xd5\x85\ +\x42\x89\x91\x52\x47\x6d\xcd\x26\xae\xbe\xa0\xed\x3a\x12\xca\xc9\ +\xc8\xb6\x73\xf1\x7d\xa4\x28\xb6\x6c\xcb\x92\x6c\xd5\xc9\xfb\x95\ +\x08\x0b\xc7\x22\xdc\xc1\xa1\xf9\x4e\xe8\x49\x6c\x4f\xd9\x25\x9f\ +\x65\x3c\xa8\x7b\xf1\x9e\x57\xf6\xee\x14\x31\xd6\x19\x93\x17\x5d\ +\x3e\x66\xb9\xd7\xd3\x89\xc4\x8c\xae\x63\x41\x5d\x3e\x08\x3e\xb0\ +\x23\xe8\xa9\x77\x74\x1d\x2b\x33\x4f\xeb\x3a\xc6\x6c\x96\x5b\x37\ +\xb9\xc9\x77\xff\x10\xd1\x03\xf7\x11\x7d\xfd\xe1\xbc\xa0\x0c\xc2\ +\x6f\x20\x1e\xfb\x61\x11\xb5\x27\x7a\x89\xc2\x5a\x85\xb4\xb2\x51\ +\xda\x7a\x97\xcc\xee\x9d\x62\x85\x54\xa9\xfd\x4e\x71\x2b\xeb\xe8\ +\xd6\x36\x2e\x14\xcb\xcf\xbf\xe5\x07\x88\x5e\xfe\x86\x11\x19\x9f\ +\xc9\xbf\x9f\x7c\x83\x32\x77\xa5\xce\xc4\x1a\x96\x32\x7e\x70\xd2\ +\x7e\x7f\xbb\xcd\x24\xbd\xef\x45\x71\xf6\x14\xd1\x99\xb3\xe2\x3b\ +\x13\xf5\x33\xf8\x6f\xdb\x6d\xc4\xa8\x93\x83\x07\x9d\x10\xe4\x02\ +\x7b\xb6\xfd\x85\x4c\x6a\xf1\x85\xb0\x65\x59\x19\x55\x34\xeb\x7f\ +\x58\xf3\xca\x46\xbb\x9d\xdc\x60\x50\xbb\x8f\xc3\xfa\x35\x9f\xfa\ +\x7a\xf8\x6a\x57\xd1\xf9\xd9\x67\xeb\xf7\xf3\x89\x23\xcb\x56\x1b\ +\xd7\x35\x17\xaf\x18\x2c\xd1\x0d\xf4\xb4\x3f\x9e\xbe\x3d\x1f\xd9\ +\x0a\xf8\x08\xdd\x59\xbd\x55\x70\x07\x37\x0a\x49\x07\x11\xb8\xd2\ +\x4c\x16\x19\x09\xd7\xf1\xa4\xac\xad\x88\x9c\x25\xcc\xed\xe4\xb8\ +\x9a\x0d\xc7\x08\x9e\xe5\x84\x91\x93\x44\xbf\xfe\x9b\x44\x3f\xff\ +\x2f\xeb\x7d\xf6\x7c\x0c\x9f\x12\x37\x75\x5c\x52\x72\xb8\x5e\x9f\ +\x1a\xbc\x5f\xeb\xf9\x5f\x76\xe2\x6f\xff\x41\x27\x04\x7b\xbe\xa0\ +\xb4\x6c\x27\x27\x05\x61\x63\x7d\xd4\x22\x2e\x7c\xae\xd8\x1d\x1c\ +\xda\xcb\xad\x0e\x16\x60\x30\x8e\x0b\x4c\x47\x3f\xcf\x6c\x3c\x5d\ +\xb8\x6c\x24\x66\xb3\xd8\xe5\x19\x8a\x4d\x87\xf6\x73\xa1\xff\x30\ +\x97\xe4\x89\x7f\x2b\xc9\x56\x6a\x54\xb7\xa1\xd3\xaa\xc3\xdd\x19\ +\xfe\xdb\x6b\x26\x92\x70\x7b\x3a\xfe\x7f\x6f\xb9\x2e\xf5\x99\xbc\ +\x3c\x64\x04\x5c\x57\x8f\xdf\xec\x71\x77\x4d\x07\x32\x86\x70\x46\ +\x01\xa7\x8b\x6e\xcb\x23\x80\xe0\x03\xf3\xe2\xc8\xf5\x44\x77\x1d\ +\xa8\x6f\xc3\xab\x42\xc0\xfe\xb1\xf2\x47\x89\x75\xd5\xad\x39\x35\ +\x9b\x8f\xea\x78\xd1\xcd\xbf\x53\xdb\xa4\x2e\x1e\x3c\xc3\xfc\xc4\ +\x4f\x13\xfd\x9f\x3f\xe7\x02\x9a\x52\xdb\x94\xde\x67\x34\x9a\x4c\ +\xbf\x50\xf4\xbd\x28\xec\x7b\x0b\x61\xe3\xff\xca\xfd\x3f\x17\x9f\ +\x18\x92\x16\xd4\x14\x61\xd6\xb8\x82\xcf\x22\xf2\x32\xd6\xc3\x78\ +\x7f\xd7\xdd\x4a\x74\xcf\x3f\x22\xfa\xd0\xaf\xb6\xb3\xa9\xe3\x84\ +\x90\xaa\x0b\x08\xb5\x67\xa5\xa4\xe5\x32\x08\xa8\x4c\x42\x49\x2a\ +\x91\xa4\xa0\x4c\x3d\x88\x39\xc3\x66\x94\xd0\x03\x58\x76\x15\x29\ +\x7c\x72\x0a\x17\x5a\x5b\x37\xea\xe6\xf8\xf1\xda\x54\xc2\x42\x8b\ +\x15\xd2\xaa\x5f\x96\x7d\x81\x3b\x5e\xdf\xef\xd5\xe9\xae\x45\xc2\ +\xb5\xcb\x22\x70\x28\x0a\xc7\x6d\x09\xd7\x6e\x30\xc7\x1c\xd8\xef\ +\xc5\x9d\x99\x61\x6f\x31\xdf\xcb\xd9\xd3\x44\x2f\xbc\xe0\xf6\xb3\ +\x19\x92\x42\xfa\xce\xbc\x16\xba\x93\xb0\x0f\xb5\xd7\x6f\x24\x94\ +\x1c\xe8\x6f\xd2\x9b\x38\x63\xd8\x97\x95\x09\xed\xfb\xb8\x6c\x0c\ +\x27\xd9\x4c\xcc\xb8\x5b\x51\x67\x0e\xf5\x94\x3e\x50\x17\x89\x1e\ +\xbb\xd6\x6c\x41\x7f\xcb\x0c\xe1\xfd\xfb\x88\x6e\xb8\xae\x2e\xdf\ +\xc2\x22\xee\x9c\xcf\x14\xfe\xf2\xa3\x44\xaf\x7e\x95\xfb\x08\x3c\ +\x1c\xac\x55\x79\x7d\x48\x16\x29\x85\x2b\x97\xc4\x4f\x9e\x3b\x29\ +\xb0\x7b\x98\xb5\x77\x18\x26\x5b\xcb\x9b\x93\x45\x56\xdc\xc2\xc3\ +\x16\x5a\xcb\xf5\x7b\xb5\x2e\x9f\x88\xec\x58\xfe\x89\xae\x0e\x9a\ +\xfd\x85\x63\x2b\xe1\x4a\x7f\xfe\xa7\x53\xe8\xa5\x3b\x53\x79\x99\ +\x0e\xcb\x53\x4b\xf4\x44\x85\x96\x59\x70\x69\xdf\x83\xb8\xa1\x37\ +\xc7\x75\x71\x69\x4e\xec\x09\x16\xc1\x52\x16\x25\xd6\x51\x22\x49\ +\xe6\x1e\xae\x0a\xe7\x1d\xfb\xa4\xa1\xf5\xf4\xb1\x87\x38\x41\x6b\ +\x19\x14\x99\xc6\xc1\x5d\x4c\x21\xa9\x44\x89\xc7\x5e\xb3\xc5\xf7\ +\x42\xb3\x77\x53\x42\x72\x02\xc1\x07\xc1\x07\x76\x06\xbe\xdd\xdf\ +\x37\xa3\x6b\x4e\x67\x56\x56\x9d\x21\xbc\x08\x9c\xc8\xff\x47\xc2\ +\x70\x22\x96\xf0\xba\x58\x24\xb2\x28\x7b\xcd\x77\x9a\x59\xea\xdb\ +\xcc\x6c\xf5\x39\x51\x24\xed\x7c\xc5\x83\xef\x6c\xb1\xd5\x35\x3b\ +\x78\x01\x58\x89\x41\xef\xb2\x1c\x04\x91\xa8\xda\xb1\x8a\xf1\xdf\ +\x72\x9c\xf4\x79\x0a\x42\x29\x06\x07\x66\x46\x7d\xeb\x3b\xcd\x77\ +\xb4\xea\x6e\x97\x0b\x3d\xdb\x7e\x72\xa5\x62\x54\xe6\x38\xb2\x6d\ +\xea\x66\x10\x82\x8b\x62\x9f\x99\xa9\x36\x43\x07\x93\xe0\x63\x2b\ +\xea\x94\x51\x25\x6a\x0a\x06\x31\x65\x9b\xdf\x72\xc5\x5d\xd5\x14\ +\x81\xc1\x1d\x1c\xac\x80\xa1\x8e\x60\xdf\x0b\xae\xa5\xe5\xda\x95\ +\x5b\x89\x40\xe5\x2c\x84\x23\xaa\x63\x03\xad\x90\xf3\x5c\x7d\x35\ +\xd1\xf5\x37\x10\x3d\xf7\xac\x59\x3f\xae\x5f\x3f\x14\x9d\x50\x86\ +\x32\x29\x24\x9c\x2f\xfc\x9e\xe9\x6c\xe3\x3b\xd5\x83\x42\x04\xba\ +\xfd\x58\x11\xd8\xf7\x22\x70\xe0\xb2\x85\x27\x66\x86\x0e\x19\xc0\ +\x2c\xda\x42\x49\x44\x5d\x36\x45\x20\x97\x4d\x64\x63\xe4\xb1\xe3\ +\x2e\x2e\x90\x3f\x3a\x1f\xf6\x1d\x77\xb8\xa1\x60\x77\x31\x0b\xbf\ +\x50\x53\xd0\x66\x0a\x27\x44\xe0\xc0\x7f\xf7\xa1\x2c\x0b\x27\x8a\ +\xac\x9f\x8d\x44\x60\x18\xd6\xd5\xba\x9e\x77\xd0\xc9\xab\x83\xda\ +\xea\x17\x44\xa0\x75\xef\xf6\x6b\xd7\xee\xd2\x9c\x05\x5f\xf8\xb9\ +\xf4\xc2\x25\x4a\x4f\x8f\x3b\x6b\x09\x41\x9d\x58\x3f\xcd\x5a\x28\ +\x1e\x43\xe8\xa9\x4e\x94\x17\x2d\x83\x7b\x38\xb8\x86\x7d\xe7\x11\ +\x2d\x7a\xd7\xb6\xc4\xa0\xce\x88\x41\xf1\x7f\xde\x17\x85\xa4\x92\ +\x84\x75\x50\xf5\x6b\x57\x71\x70\x13\x87\x75\x21\xc3\xb8\x25\x08\ +\x8b\x66\xc8\xef\xdc\xb2\x77\xd5\x2c\xf3\x0b\x80\xe0\x03\x73\xbc\ +\x32\xd2\x0c\x75\xf8\x3a\x6e\xd7\x74\x57\x76\x2e\x4d\xef\x9b\x1b\ +\x5b\x0f\xf9\xf1\x9a\xfd\x2e\x76\xeb\x27\xff\x27\xa2\xf7\xff\x6b\ +\xa2\x17\xbe\x56\x8b\xc5\x60\x61\x2c\xa5\x90\xcc\xac\xd3\xd3\x0f\ +\xbf\x71\x15\x1e\x09\x1f\x53\xea\xf8\x43\x8c\x61\x3f\x8a\x29\x0c\ +\x49\x28\xc1\x52\xd8\x4f\xc4\x1c\x16\xaa\xd9\x44\x3d\x37\xbe\xb2\ +\x20\xf3\x1b\xbe\x9f\xe8\xb5\xdf\xeb\x2c\x5d\x85\xb0\xdc\xe9\xae\ +\x56\x78\xd3\xca\xc0\x24\x2c\x7e\xad\xc4\x13\xdd\x51\x1a\x26\xb1\ +\xef\x45\xc0\xe7\x40\x7f\xb3\xfe\x2e\x43\xf6\x70\xf8\x5b\x76\x20\ +\xa9\xfc\xa0\x09\x11\xc8\xbe\x35\xb6\xbe\x71\x5d\x3f\x99\xd0\xc1\ +\xea\x64\xd9\xc7\x02\x86\xe2\xd2\x21\x36\x70\xd9\x0b\x42\xb6\xe4\ +\x05\x01\x66\x05\xdf\xba\x38\x17\xcc\xfa\x5b\x6f\x23\x7a\xf2\x09\ +\xa2\xa7\x9f\xac\xc7\x54\x8a\xc6\x42\x24\x9b\xc8\xac\xe2\xe0\x62\ +\xae\xd6\x8b\x38\xc1\x6a\x7b\x27\x06\xef\xa4\x2f\xd4\xfb\x34\xcb\ +\x31\x75\x8d\x8d\x13\xe4\x4a\x3b\x9a\xcb\xc6\x98\x8f\x7f\x8c\x0e\ +\xd3\x46\xb9\x5c\x27\x4c\x8c\xeb\x44\x8e\x89\x6f\xca\xf2\xdc\xf3\ +\x44\x37\xdf\xe4\x0a\x32\xdf\x72\xb3\x8b\xeb\x0b\x56\xb6\x2d\x9f\ +\x10\x22\x13\x44\xb6\x7c\xc2\x08\xc7\x0d\x72\x29\x99\xf1\x58\x64\ +\xdf\x8a\xca\x40\xc1\x15\xcd\x2e\xe1\x2d\x23\x1e\xb5\x6f\x7a\x12\ +\xaa\xe4\x58\x0b\xe0\x52\xfd\x77\xd0\xc5\xcb\x83\x66\xa6\xea\xea\ +\xd2\x7c\x5d\xba\x56\x68\xf6\x5d\x81\xe9\x52\xba\x91\xa9\x99\xf1\ +\x3c\x89\xef\x87\xe2\x76\x6b\xa9\x76\x69\xd1\xb5\x24\xdb\x0a\x2d\ +\xd3\x7e\x4e\x25\x92\x48\xaa\xd7\xf9\x5a\x90\x65\x10\xdd\x93\x3a\ +\x99\x24\xd4\x6a\x0c\x59\xc7\xd5\xe3\x24\x7d\x9d\x6d\x85\x19\xfb\ +\x78\x42\x5b\x90\x3a\x71\xc9\xa8\x12\x4c\xe2\xa5\x10\x09\x25\xa1\ +\x68\xb5\x7c\x2e\xfa\x7f\xf2\x92\x32\x83\x30\x44\x59\x16\x08\x3e\ +\xb0\x70\xc1\xa7\xeb\x5e\xba\x53\x45\x9a\x9e\x51\xd0\xa9\xd9\xad\ +\x84\x3d\x6a\x57\x6f\x5d\xf1\x33\xc3\x1d\x6f\x24\xfa\x91\xff\x81\ +\xe8\xa3\xbf\xe0\x6a\xad\x91\xb0\x02\x96\xd4\x74\x1b\x97\xd1\x7a\ +\x2b\x08\x74\xdd\x85\x62\xec\x05\x9d\x75\x07\x7a\xb3\xc2\x58\x44\ +\xb9\xe7\x62\x16\xe3\xb8\x44\xde\x7e\x6b\xca\x4c\x93\x74\x0f\x7b\ +\x21\xb8\xe4\x2d\x87\x4b\x22\xd6\x30\x08\x45\x59\x53\xe1\xba\xdb\ +\x89\xbe\xe3\xc7\x5d\x6d\x89\x97\x8e\xfa\x4c\xe7\x32\x6d\x35\xd4\ +\x91\x70\xcd\x5d\xf5\x93\x62\x2d\xf3\x7a\x9d\xf8\x1e\x4b\xbd\x73\ +\x57\xe5\xd1\xba\x5b\xc2\x98\x72\xc5\xe0\x25\xe1\x83\xe2\x01\xa9\ +\xd2\x53\x7b\xee\x3b\x1e\xfa\x02\xd3\xfc\x37\xab\x18\x59\xaf\xa2\ +\x10\xcd\x6b\x79\x35\xab\xa0\xb3\x67\x9a\x99\xbe\x36\xc6\xae\x5f\ +\x8b\x3c\x9b\x20\xb2\xe4\x62\xf7\xd6\xf6\x11\x1d\x3c\xd4\x3c\xc6\ +\x15\xb3\xfe\x75\xe6\x1c\x3d\x79\xcc\x59\xfa\x82\xc5\xae\xd7\x4f\ +\x64\x0f\xcb\xf5\xe2\xfd\x48\x88\xbe\x70\x8c\x55\xc2\x49\xbf\xd9\ +\x05\xc5\xec\xe7\xb0\x3a\xeb\x93\x4c\xa8\xda\xcf\xe1\xe2\x25\x9a\ +\xf0\x8c\x6c\xf6\x67\x7f\x12\xe6\xe7\xf3\x14\xdd\x42\x67\xf5\x5a\ +\xd5\x29\xe4\xd8\x31\xf7\x31\xf8\x50\x56\x57\x6b\xcb\x91\x4d\x1c\ +\xd9\x57\x97\x8b\xb1\x1a\x79\x52\x27\x88\x70\xb2\x08\x8b\x3f\xfe\ +\x3b\xd4\x11\xe4\xc4\x0f\x0e\xaf\xdc\xdc\x6c\xd6\xe4\x0b\xe7\x4d\ +\x95\xad\xeb\x4b\x3d\x72\x0d\xc1\x70\xda\x05\x8b\xa0\x16\x43\xc0\ +\xba\x7a\xf3\xcc\x7c\xcb\x3a\xf2\x4f\x6c\x6d\xc9\x19\x8a\x4b\x69\ +\x55\xa4\x66\x62\x49\x49\xcd\x4c\x69\x1e\x57\x36\x22\xcf\x92\x7d\ +\x4a\x09\xf1\xa2\xe9\xfc\x5f\x2b\x8f\xbd\xb7\x9c\xbe\x74\x36\x6a\ +\x09\x0a\xeb\x5f\x95\x51\x3c\x8a\xfa\x16\x8f\x22\x41\x98\x2a\x89\ +\x1a\x27\x98\xe4\xac\x72\xaa\x6d\xf1\x4b\x59\x02\x83\xd5\x30\x55\ +\x8e\x26\x94\xa9\x49\x8a\x52\x58\xf8\x20\xf8\xc0\x0e\x89\xbe\x99\ +\xf4\xda\x8c\xb3\x7d\x57\xc7\x09\x35\x4d\x04\x0a\x91\xc5\xfe\xa1\ +\x5b\xdf\x40\xf4\x8a\x6f\x23\x3a\xfa\x99\xcc\x6b\x52\xbe\x19\xd5\ +\x8e\x23\x94\xae\xe3\x89\x8c\x57\xd4\xb5\x18\x0c\x4b\x10\x89\xbc\ +\xdd\x50\xb7\x4b\xa3\xa8\xae\x31\xd1\x4d\xb3\x41\x3c\x7e\xc9\x52\ +\x35\xfe\x2a\x1f\x12\x4d\xb8\x56\xc6\x9b\xdf\x4c\x74\xed\x6d\xc2\ +\xc5\x28\x84\x79\x7c\x95\xec\x4a\xe4\x98\xc5\x6d\x9b\xb4\xf6\x25\ +\x2c\x81\xc5\x0e\xde\x8b\xab\x28\xe2\xde\xce\x74\x51\xab\x02\xdb\ +\xa4\xb5\x70\xdb\xf1\x24\xb2\x32\xa8\xad\xc4\x6a\x9f\xef\x39\x3c\ +\xf6\x6d\xe7\x7c\xf1\xbb\xea\x73\x7b\x37\x6c\x29\x44\xe0\x58\xf6\ +\x0b\x56\x4e\x10\xf6\xbc\x69\xca\xaa\x94\x44\xe8\xc3\x55\x87\x89\ +\x5e\x75\x17\xd1\x93\x5f\x17\xc2\x4c\xf8\x27\x43\x96\x70\x1c\x0f\ +\xc8\xff\x1f\x8a\x12\x33\x21\x5e\x54\x8a\x40\x2b\x14\xa9\x29\x0a\ +\xad\x20\x1d\x35\xcc\x68\x6b\xa1\xb5\xdc\xa4\x4e\x4c\xb9\xbd\xd8\ +\x34\xf7\x36\xb5\x08\xe4\x71\x19\x9f\x3a\x4c\xc3\x83\x87\x6d\xae\ +\x4b\x19\xda\xc9\x2d\xd5\x96\xc0\x50\x4f\x70\x49\x3b\x97\x6f\x28\ +\xca\x1c\x44\xdc\x56\xe8\x18\xb2\xe5\x84\x20\xbb\x81\xa5\x08\xe4\ +\x98\x41\xfe\xbb\x27\x5c\x91\x55\xbb\xb6\x89\x68\x2d\x37\x16\xf1\ +\x7d\xe6\x61\xcb\x1b\x60\x27\x93\x39\x9e\x3e\xfe\xbe\x71\x6d\xb9\ +\x16\xb1\x5a\x58\xf5\xc6\x65\xfd\xd3\x89\xb3\x87\xfb\x45\xfd\xd3\ +\x89\x85\x60\xd8\x77\xea\xe7\x9f\x6b\xc5\x26\xef\xb1\x66\x7a\x6d\ +\x87\x90\x54\x45\xfa\xb2\x22\xdd\xbf\xba\xa4\x66\x9f\xda\x49\xb3\ +\x0e\xa1\x7d\xf4\x8b\x1e\xd7\xdb\xa7\x2e\x1b\x8d\x63\xae\xbe\xcc\ +\x66\x41\x80\xd6\xe5\x3b\x23\x06\xab\x75\xe1\xd4\x8e\xca\xd3\x48\ +\x03\x3a\x80\xe0\x03\x8b\x40\xba\x0a\xa7\x2b\xb9\xf6\xaa\x59\x5d\ +\xa7\x33\xa5\x78\xf9\xf7\x90\xb1\x6a\x57\xbe\x82\xe8\xee\x77\x12\ +\x9d\x7e\x86\xe8\xe4\x33\x99\x97\x4c\x09\x06\x29\x3b\x6e\xad\xf5\ +\x0c\x1f\xd5\x0a\x3f\x2f\x1a\xb8\xef\xd5\x56\xe9\x96\xb1\xff\x7b\ +\x5c\x4e\xef\xd9\x2b\xc5\x59\x70\x3d\xe7\xde\xef\xc6\xeb\x88\xbe\ +\xed\xfb\x89\xb8\xe5\x17\xc3\x05\xd9\x9e\x7c\x91\xe8\x1b\xe7\x9c\ +\x28\xe4\x5a\x79\x76\x29\xda\x7e\x30\x9d\xc8\xd8\xd5\x34\x3d\xa3\ +\x57\x4d\x11\x82\x8d\xfd\xef\xc0\xad\x78\x8f\x67\x5e\x2d\x32\x88\ +\x75\xfb\x73\xe8\x49\x3b\xb6\xd3\x06\x28\x29\x27\xe8\x96\xbd\x99\ +\xa7\xba\x32\x2d\x79\x93\xd3\xd8\x09\xf9\x75\x5f\xdd\x58\x7e\x17\ +\xd2\x12\xc8\xb3\x52\x28\xbb\xc1\x7f\x9f\x3a\x99\xf8\xfd\x98\x6d\ +\xbf\xed\xf5\x44\x7f\xf8\x7b\x44\xc7\x5e\x72\xbe\x52\x0e\x9e\x5b\ +\x0e\xcd\x61\x37\x7c\xdc\x9e\xff\x91\xf5\x07\x4d\x17\x6e\x58\xdf\ +\x13\x99\xc3\x32\xab\x78\x49\x98\x7a\xe4\x6b\x65\xa6\x71\x95\x51\ +\xac\xaa\x8c\xe2\x35\x3a\xeb\xd6\x0f\x85\x1b\xf9\xa5\xe3\x66\x58\ +\x5e\xa0\xeb\xb9\x46\xc8\xb2\x73\x83\x3f\x4b\x37\xd9\x85\x61\xab\ +\x5e\x68\x95\x17\x32\x80\xed\x7a\x3f\x06\x1c\x06\x59\x8a\xf5\x3c\ +\x74\x81\xe0\xf2\xe5\x47\x8e\x0b\x3c\x71\xc2\x3d\x72\x7f\xdf\x33\ +\xa7\x5d\x5f\xe1\x86\x25\x90\xea\xde\xc0\x5a\xcf\xff\x94\xe2\x28\ +\x8a\xfd\xbe\x85\x9c\x15\x6d\x83\x66\x56\x70\x4b\x04\x0a\xb7\x6f\ +\x38\xa6\x78\xdb\xe5\x5e\x7d\x39\x90\x19\xc8\xc3\x49\xfa\x5e\x4b\ +\x5e\x62\xba\x32\x66\x75\x47\x51\xe5\xe4\xf3\x99\xd7\x57\xd6\xba\ +\x1e\x25\xe3\xe8\xb2\x75\xdf\x4b\x11\x53\x38\xf2\x21\x01\xa3\x28\ +\xc9\xa4\xc3\x65\xac\x52\x73\x02\xb5\x4b\x82\x4e\x73\x06\x9d\x38\ +\x86\xe9\x18\x82\x0f\x2c\xde\xba\x57\x5c\xc0\xd5\xb6\x48\x09\x17\ +\x35\x83\x56\xec\xea\xbe\x11\x6d\xfc\xaa\xef\x31\x93\xad\x11\x7b\ +\x7f\xfb\xef\x5d\xdd\x83\xd6\x26\x53\xb2\x62\x8b\x8c\x30\xd4\x33\ +\xb8\xb2\x79\x5d\xe1\x5d\x8a\x7c\x25\x4d\x6d\x13\xac\x84\x56\x14\ +\x7a\xab\xe0\xd8\xcf\x18\x43\xd9\x8a\x40\xd7\x5d\x2b\xe2\x64\x95\ +\x70\x68\x57\x5c\x4d\xf4\xf6\x1f\x35\x9f\xf9\xf5\xcd\xfd\xaf\x9b\ +\xcf\x7d\x62\x2b\x3d\xb3\x71\x3f\xdd\xe0\x1a\x5e\x16\xa5\x69\x0a\ +\xb1\x48\x6b\xa2\xbc\x35\xdf\x6e\x46\x6f\x32\x09\x64\x41\x37\x22\ +\x45\x64\x7d\xce\x65\x0f\x17\xf2\x78\x47\x4d\xb3\xc3\x84\xea\x40\ +\xa3\x91\x6f\xb5\xc6\xe3\xb5\xcc\x59\xb7\xec\xe3\xdc\xef\x7b\x97\ +\x95\x75\xdf\x61\x16\x84\x36\x2e\x74\x5c\xef\xcb\x66\x36\x6c\xe6\ +\x2d\xb7\x3f\xf4\x63\x44\xbf\xf1\xcb\x44\x4f\x3d\xe3\x6b\x9a\x0c\ +\x9c\x99\x8c\xfb\xac\xad\xfa\x8c\x06\x8e\x11\x64\x91\x37\xf0\x6e\ +\x63\x1b\xdc\xa6\xda\x05\xa2\x43\xaa\x2b\x3f\x6e\x6d\x34\x5b\xc3\ +\x85\x64\x12\xb5\x51\x5b\x39\xb7\xa4\xd8\x13\x75\x06\xb7\x36\xdb\ +\xae\xe4\x8d\xf5\x46\x61\xea\x9b\xe8\xeb\x74\x93\x7a\xdc\x25\xb0\ +\x70\xdd\x40\xb5\xdf\xba\x83\x83\x69\x67\x3c\x31\x8b\x51\x3a\x36\ +\x59\xa4\x6c\x96\x8f\xe1\x8f\xa3\x65\xad\x3d\xed\xc4\x21\x27\x33\ +\x5f\x77\x6d\xdd\x71\x84\x05\x22\xc7\x03\x72\xe9\x98\xf0\x18\xe2\ +\x04\xed\xb0\x8e\x5d\x85\x9d\x79\x97\x65\x61\x4b\xa3\xad\x21\xee\ +\xf5\xf0\x38\xee\x13\x9c\x69\x11\x37\x16\x25\x2a\xab\xb2\x33\x21\ +\x16\x50\x37\x7f\x06\x21\x03\x59\xb6\x9d\x0b\xcf\xb7\x5a\xcf\x05\ +\x6b\x5f\xd9\x6c\x94\xd3\x25\x14\xbb\xac\x82\x9d\x96\xc1\xc8\x3a\ +\xd8\xd8\x4f\x74\x4f\x67\xc5\xe2\x92\x6f\xd1\x97\xda\x97\x16\xd6\ +\x41\xef\xf6\x0d\xd6\x41\x59\x98\x5a\x8b\xcc\x6c\x2d\x32\xbe\x75\ +\x57\x55\x06\x3d\xe5\xfa\x0b\x20\xf8\xc0\x3c\x05\x1f\xb5\x63\xf8\ +\xa6\x99\xe7\xa6\xfe\x30\xa7\x08\x29\x3d\x45\x34\xc6\xff\x5f\x3e\ +\x40\xf4\x2d\x3f\x4c\x74\xf4\xaf\x88\x9e\x7b\xa0\x29\x58\x66\x39\ +\xee\x4a\x54\xe9\x6d\x7c\x8e\x29\xbd\x72\x75\x74\xbc\x41\x6c\xa5\ +\xf6\x1f\xae\xfc\xd6\x55\x4c\xa2\x85\x99\x5c\xef\xff\x7e\xab\x11\ +\xb7\x3f\xf4\xee\xf4\xe1\xa4\xc4\x35\xef\x7b\x73\x6c\xdb\xcc\xb6\ +\xae\xee\xbd\x28\xf3\x38\x64\x1f\xcb\x4c\xe4\x7e\x54\xcc\xba\xcc\ +\x8c\x45\xa9\x77\xf6\xa2\x5c\xe8\x76\xb9\x98\x5c\xf7\x91\x5c\x0b\ +\xba\xea\x1c\x29\x9b\x66\x87\x31\x45\x2d\xe7\xc8\x0b\x1e\xb6\x08\ +\xfa\x4a\xb7\x25\xd5\x09\x40\x21\x39\xe4\x8a\xd5\xfc\xf1\xde\xf5\ +\x1a\xa2\x1f\xfd\x09\xa2\xff\xe7\x37\x9c\xd0\x62\x71\xc9\x25\x59\ +\xd8\xd4\x15\xc4\x56\x48\x10\xe1\x20\x3a\x16\x67\x32\x51\x64\xc5\ +\x57\x3d\x1e\x0c\x9a\xe2\xae\x10\x56\xc0\xb0\x9e\xda\xa5\x5e\xea\ +\xf8\xc1\x5e\x73\xfb\x50\x9c\xba\xdf\x17\x96\xcb\xa2\x76\x2f\xcb\ +\xf8\x41\x76\x0f\xf7\x36\xe9\x4e\x75\xbc\xb6\x74\x9a\xe7\xb6\x7a\ +\xab\x74\x8c\xae\xf1\xe7\x88\x2b\x19\xc3\xae\xe0\x67\xf4\x4d\xd5\ +\x69\x11\xca\xc4\xd8\xe2\xd1\x22\xcb\xd4\x5a\xce\x26\xae\x6c\xcc\ +\xe1\xab\x44\x51\xe6\x89\x33\x5c\xdb\xae\x22\x66\xb8\x6e\x7d\x45\ +\x55\xca\x70\x2e\xb0\xdb\xf9\xd8\x73\x66\xf8\x5f\x76\x9a\xd8\xc6\ +\x0a\xfa\x38\x32\xfe\xc8\x2b\xfe\x6f\x2d\xee\xfd\x26\x65\xb3\xbe\ +\x60\x15\xf5\x11\x75\x20\x29\x75\xb3\x1a\x55\xea\x75\x93\x44\xeb\ +\xb9\xea\x79\xf1\xda\x52\xe6\x47\xa5\x1c\x05\x2a\x2d\xde\x28\x93\ +\x71\x9c\xba\xdf\x56\xd3\x9c\x1a\xb9\xae\x1e\xe2\x3e\xaf\x27\xba\ +\x17\xe6\xea\xc4\x97\xa5\xe8\x59\x1c\x2f\x93\xc4\x3a\x2d\xd6\xeb\ +\xc5\xd6\x71\x07\x10\x7c\x20\x27\x24\xb6\x2b\xe0\xa6\x09\x81\x69\ +\x45\x9c\x53\x2e\xe1\xf8\x35\x87\x6e\x21\x7a\xd5\xf7\x12\x3d\x7f\ +\x7f\x66\x9f\x2a\x6f\xf9\x9b\x55\x1c\x36\xb6\x9b\x56\x8c\x7a\x5a\ +\x57\x0b\xb1\x7d\x10\x84\x4b\x89\xf1\x0a\xf5\x0c\x59\xf0\xf5\xd7\ +\x88\xbe\xef\x1f\x13\x1d\x8c\x1a\xa0\xf2\x6b\x0f\x2e\xbb\xf2\x39\ +\x13\x1f\x67\x58\x76\x8c\x75\x10\x3e\xa1\x24\xce\xd6\x24\x5d\xb4\ +\x5a\x0a\xbe\x7e\x46\x10\xf6\xd4\xc5\x49\x9f\x2b\x28\x21\xf8\xc4\ +\xb8\xea\x29\x22\x30\xcc\x56\x9d\x22\x50\xc0\x0a\x24\x88\xc0\xe0\ +\xe2\xb5\xae\x58\xe5\x6b\x86\x98\xe5\xca\x7d\xf9\xe3\xdd\x6f\x6e\ +\x4a\xde\xf4\x56\xa2\x4f\xfe\x05\xd1\x83\x5f\xa8\x2d\x6d\xc1\x3a\ +\x67\xdf\x63\xe4\x32\x86\x39\xf0\x2d\xc4\xf1\x0d\xfa\x75\xfd\x40\ +\x6b\x05\x5c\x71\x31\x9c\xa1\x9c\x8c\x15\x87\x5e\xc4\x15\x22\x09\ +\xa4\xe8\x8b\x44\x0f\xd1\x72\x2e\x24\x88\x34\x92\x45\xd8\xba\x19\ +\x25\x86\x48\x71\x18\x8e\xb3\xe7\x03\xad\xfa\xcd\x1a\x85\xcb\xfd\ +\x21\xdd\x44\xa7\x9a\xa2\xd1\xbc\xb6\x67\xe3\x2c\xff\x7f\xf6\xde\ +\x3d\x58\x92\xac\xbe\xef\xfc\x9d\xac\xba\xcf\xbe\xfd\x98\x9e\x77\ +\xcf\xc0\xf4\x00\xc3\x80\x41\x30\xbc\x24\xd9\x96\x60\xb0\x05\x18\ +\x0b\x49\xc0\x82\x57\x8f\x58\x03\xb6\x43\xb2\x25\xcb\x20\xc7\x86\ +\xc2\x11\xde\x58\xc3\x3a\x62\xff\xda\x08\xe3\x8d\x8d\xd8\xd8\xf0\ +\xc6\x6a\x2c\x7b\x1f\x72\x84\x57\x48\xbb\x96\xec\xd5\xc6\x32\xec\ +\x1a\x08\x61\x21\x86\x95\xb4\x48\x20\x89\xd6\x30\xc0\xbc\xbb\x7b\ +\xba\xfb\xbe\x2b\x73\xf3\x97\x79\x4e\xe5\xc9\x93\xe7\x64\xd5\xbd\ +\x5d\x75\x1f\x7d\x3f\x9f\x88\xea\xba\x5d\x8f\xac\xcc\x93\x8f\xf3\ +\xcd\xdf\x39\xdf\xdf\xcf\x8a\x40\x3b\x64\xeb\x8b\xc0\x22\x6f\x86\ +\x4d\x47\x79\x5b\x04\x6a\x73\xbb\xa4\xd2\xfa\xf7\xdd\x77\xd7\x9b\ +\x39\x2b\xaa\x68\xa3\x96\xaa\xbb\xd6\x4c\xe5\x1d\x0e\x1b\xb7\xf0\ +\xc0\xce\x1b\x5b\x58\x68\x32\xe4\xb8\xdd\x31\x1c\xb6\x05\x5b\xab\ +\x94\x9c\x27\xee\x62\x42\xd0\xaf\x3d\x5c\x78\x55\x46\x62\x42\x30\ +\xf7\x3c\x63\x1d\xc1\x19\xce\x4a\xc9\xd3\xd5\x17\x5b\xd1\xc0\x44\ +\x94\xcf\x4f\xe1\x92\xbc\x1c\x4f\x9b\x53\xaf\xe8\x0a\x4c\x77\x59\ +\x19\xf7\x25\x89\x41\x1e\x27\xf6\xc6\xd5\x64\x3c\xc1\x77\xfa\x34\ +\xdd\x31\x82\x0f\xe6\x2c\xf8\x8a\x3a\x43\xe9\xcd\xf6\xea\xc5\x1e\ +\x5f\xef\x1b\x86\x8d\xce\x01\xd4\xb9\x52\x3f\x2e\xf2\x8d\xdf\x10\ +\x79\xe6\xff\x8d\x7c\x37\xb5\xbc\x09\xc3\xcb\x45\x44\x1c\xfa\xb7\ +\xb6\x31\xfc\xea\x21\x66\x0a\x11\x1b\x8d\x12\x7a\x9f\xd5\x8e\x6e\ +\xb1\xfc\xff\x5f\xfa\x59\x91\x57\xbd\xb5\xfb\x7b\x4b\xe5\x29\xf5\ +\xf2\xb3\x65\x0f\xb6\xda\x4e\x43\xb3\xe5\xcc\x08\x45\x7b\x4e\x61\ +\x4c\xfc\xf8\x57\xee\xdc\xe6\x25\xdc\x1e\x75\x3f\x93\xf9\xc6\x12\ +\x2f\x4a\xa8\xa2\x47\x5d\xb2\x3a\x1c\xba\x68\x45\x62\x31\xc7\x70\ +\x9f\x1e\x93\x83\x30\x3d\x4c\x11\xaf\x6d\xdc\x3a\x7c\x4d\x5c\x84\ +\xf7\x95\x9d\x1b\xef\xeb\x20\x89\xf7\xb8\xba\x88\x1d\x26\x1e\x6d\ +\xf5\xaf\xb3\x96\x5c\xfb\xf0\x4f\x8a\xdc\x78\xb6\x4e\x08\xbd\x65\ +\x87\x87\x77\x76\xda\xfb\xdc\x37\x6f\xec\xd8\x9e\xf6\xaa\x34\xa9\ +\x5b\x06\x83\xc6\x2d\xbc\x60\x6d\xac\x6a\xb1\xad\x4a\xcb\x2d\x35\ +\x11\x42\xdf\xe0\x21\x59\x53\x93\x7a\xfc\xba\xef\x16\x0e\xdc\xbf\ +\xbe\x28\x1c\x78\x93\xbe\x86\x76\x0e\xe4\x58\xa8\xba\x54\x44\x0b\ +\x5e\x24\xb0\x5e\xce\x7d\xe6\xeb\x56\x04\x36\x15\x50\x6e\x33\x97\ +\xed\x71\x33\x18\x1f\x86\x9a\x4b\xd0\x39\x70\xf3\xbc\x19\x32\xdd\ +\xdd\x6d\x0e\xd5\x95\xa5\xd9\x0e\xe9\xea\x2a\x2c\xd9\x51\xf4\xdc\ +\x13\x52\xba\x8b\xb7\x77\xda\xf7\x5b\xfe\xa6\xba\x91\x74\xd5\xda\ +\x03\x9b\xad\xa7\xca\xd5\x6d\x0d\xdc\x7e\x5d\xe4\x56\x12\xe9\xc0\ +\xf8\x91\x7b\x43\xb9\xb1\xd2\x73\xee\x6f\x65\x27\x28\x53\x17\xe6\ +\xb2\xaf\xcc\x33\xd2\xfd\xbe\xdb\x65\x51\x57\x71\xec\xbe\x74\xca\ +\xe1\xdf\x64\x0d\xdf\x48\xe4\x6f\xd2\xf2\x5a\x83\x0e\xd2\x98\x36\ +\x06\x83\xee\xf2\x57\x57\xe9\x8e\x11\x7c\x30\xff\x08\xdf\x60\x42\ +\x88\x6e\x9a\x08\xd9\xb4\x17\xeb\x62\x8a\x0f\x9a\xc4\x0f\x2e\x9d\ +\x11\xf9\xe1\x4f\x8b\xfc\xf2\xbb\xec\xc4\x91\x69\x96\xdb\x57\x02\ +\xad\xef\xfb\x3d\x73\x0c\xb3\xd4\xf7\x6e\xa2\x34\xdb\xbd\xaf\x17\ +\xf9\x81\x9f\x8e\xaf\x9c\xb1\xe9\x5c\x56\x82\xd0\x67\x31\x8c\xef\ +\xa7\xca\x5c\x32\xaa\xc5\xdf\xba\xfe\x6d\x1f\x3b\x79\x37\xfa\x18\ +\xae\xd3\x28\x30\x94\x8c\x9b\x21\x98\x3b\xa9\x82\x6f\x77\x67\xfe\ +\x11\xbe\x8e\x78\x36\xdd\xb6\x1b\x44\xde\xcb\xc3\x90\x47\x64\x7f\ +\xf6\x19\x54\x0a\x7f\x3e\xa3\xed\x51\xb3\x09\x79\x43\x54\x29\x7c\ +\xff\x3b\x4a\x85\xf3\x25\x91\xff\xf0\x78\x33\xff\x4e\x43\x49\x2a\ +\xfc\x34\xd2\xaa\x22\xf0\xc6\xa6\xb5\x7c\x7a\xe6\x90\x2a\xb2\xb8\ +\xd3\x44\xdf\xfc\xd7\xdd\x0d\x8f\x6f\x26\x51\x25\xb3\xb6\x5a\xcf\ +\x11\xd4\x49\x73\x55\x85\x11\x3b\xe4\xac\xba\xd4\xaf\x21\xec\x97\ +\x87\x1b\x2e\x78\xe9\x69\xec\x90\xf1\xf8\xf5\x20\x1f\x60\xeb\xf5\ +\x8d\x26\xca\xe8\x70\x86\x92\x41\xf3\xfa\xe9\xec\x46\x63\x28\xb1\ +\x11\xc4\xef\x93\xcf\x37\x73\x0c\x75\xb1\x0b\x2a\x02\x5f\x2b\x2f\ +\xc9\xe9\x7a\x3e\xe0\x4e\x2d\xb0\x66\x29\xf8\xaa\x92\xc7\x77\xd6\ +\xbb\x40\x73\x0c\x16\x81\xd1\x62\xe4\x09\x34\x3f\xb7\x60\x75\xa4\ +\x6f\x35\xa5\xe6\x5c\xb4\x4d\x3f\x53\xa5\x68\x5c\x6e\x2a\x8d\x68\ +\x56\x9e\xc1\x62\xd3\x54\x7e\x99\x39\xb7\xdc\x1d\xef\x5c\xda\x0e\ +\x2e\x59\x2e\x9d\xe4\xb2\x34\x39\x11\x7d\xb6\xdc\xe7\x17\xda\x9f\ +\xf7\xb7\x41\x1f\xce\x4c\x52\x44\xd2\x88\x76\x0c\x25\x3d\xe2\xad\ +\xf5\x56\xd6\x7f\x89\x34\x13\xae\xfb\xd1\xcf\x26\x96\x3b\x6d\xb7\ +\x00\x08\x3e\xb8\x69\x6e\xc2\xb4\x51\xec\x23\xd9\xb2\xec\x65\x1e\ +\x5d\x84\x73\x17\x45\xde\xf2\x37\x45\xbe\xf2\xdf\x37\x11\x98\xbd\ +\x2e\x37\x9a\x68\xaa\x27\xe2\x98\x45\xbe\x97\x8a\x0c\xfa\x51\xbb\ +\x4e\x64\x29\x21\x3e\x97\xcf\x8a\xbc\xe3\xe7\xd3\x57\x4f\x97\x92\ +\x65\x58\xc4\xe7\xab\x75\x7e\xdb\xe6\xa4\x68\x45\x2c\xa5\x31\x27\ +\x6c\x59\x33\x89\x8a\x40\x67\x56\x18\x27\xaf\x96\xb6\xb9\xc4\x44\ +\x6e\xd3\xab\xff\xe7\xf3\x9d\xcf\x37\x90\xf6\x71\x59\xf4\x55\x17\ +\x89\x24\x84\xf6\x4b\xc0\xf5\x95\x9e\x2b\x24\x31\x1f\x30\x1c\x06\ +\x9e\x72\x68\x5b\xc5\xd4\x8f\x7d\x4c\xe4\xf2\x33\x22\x5f\xfd\x62\ +\xf3\x7d\x15\x44\x2b\x76\x8e\xe0\x1d\xb7\xd5\x6d\xac\x6a\xa0\x32\ +\x8a\x8c\xea\x9e\x79\xd7\x77\x40\x18\x5b\x40\xd7\xab\x1a\xe2\xca\ +\xbe\x55\x26\xe4\xad\xba\x70\xae\xdb\xfe\x6a\xf9\xb6\x9c\x5c\x95\ +\x37\x70\xb5\x0e\x71\x55\x45\x70\xbd\x1c\x83\x2e\x22\xe7\x44\x99\ +\x0b\x6f\x8d\x87\x7c\x87\xde\x10\xf1\xa0\x9d\x4f\x30\xcc\x33\xb8\ +\xb3\xdd\x1e\x56\x1e\x38\x87\xb4\xb4\x87\xa0\x8d\x1f\x21\xac\x5f\ +\x7f\x8d\x7c\xc5\x4e\x2d\x18\xd8\xe9\x04\x3a\x8d\xe1\xa2\x04\xb6\ +\x81\x7d\xa3\x9b\xff\x96\xb7\x88\xbc\xea\xa1\x76\x0e\x41\x35\x8d\ +\xb8\x14\x32\x6a\x16\xd9\xf5\x2a\x8c\xb8\x52\x73\x1a\x54\x1d\x67\ +\x74\x2a\xda\x91\xbb\x2a\xc1\xf4\x7a\x7b\x1e\xde\xd0\x8e\xc0\xeb\ +\xb3\x8a\x41\x0d\xc8\x66\x76\x2a\xe6\x62\xd6\x1c\xba\xba\x2b\xc6\ +\xf5\x77\xf3\xae\x28\x6c\x55\x21\x91\x3a\x05\x64\x11\x44\x13\x5d\ +\xe4\x70\x37\x8c\x12\x06\xff\x77\x11\xc7\x94\xa1\xc4\x09\xd9\xf0\ +\xb5\xf1\x39\xee\x47\x30\x27\x45\xf3\x22\x97\xc5\xa4\xd1\x64\x94\ +\x16\x85\x06\xd3\x06\x82\x0f\x0e\x20\xc2\xb7\xdf\xbb\xab\xd4\x19\ +\xba\xdf\x13\xd7\x75\xb6\x93\xe6\x14\x3e\xf4\x57\x45\xfe\xf4\x37\ +\x44\xae\x3e\xb9\x37\x51\xda\x12\x2d\xc5\xe4\xf5\x2d\x62\x59\x4a\ +\x7b\xcc\x0b\x31\x21\x69\x26\x7c\x5e\x3b\xd8\x8b\xdf\x2b\x72\xcf\ +\xeb\xa7\xdb\x4f\x26\x10\x36\xb1\xf5\x4d\x89\xc1\x6a\x2e\xa1\x66\ +\xde\x1d\xb4\xd7\x61\xe4\x1c\xc5\x5e\xda\x19\xf7\xff\x4e\x35\x13\ +\x39\x18\xe3\x46\xa7\x94\x5c\x11\x11\x67\x9e\x08\x34\x3d\x22\x30\ +\x14\xbe\x61\xb2\x31\xd3\x23\x1c\x5b\xc7\xcd\x94\x1b\x7e\xfe\x2e\ +\x91\x77\xfe\x98\xc8\xd7\xbe\x64\x13\x40\x7b\x1d\x9d\x0a\x36\x8d\ +\xe4\x39\x41\x35\xb4\xb5\xc7\x06\x2b\xb6\x6a\x48\xd1\x24\x91\xae\ +\x72\x08\xba\xf0\x93\xb1\x22\x7c\xd4\x08\x48\x5f\x04\xea\x7b\xd7\ +\x77\xea\x5c\x28\x2f\xba\x49\x55\x83\x26\xf2\x77\xca\xa6\x8a\x71\ +\x55\x44\xdc\x90\xb0\xaa\x93\x20\xa5\xcb\x78\x42\xdb\xae\x6d\x13\ +\x67\xfc\x68\xb9\x85\xb3\x46\xc4\x19\x7b\x1c\x9b\x6d\xfb\x9e\x34\ +\x4e\x63\x37\x44\x5c\x89\xc3\x2c\x22\x02\xed\x77\x77\x77\x67\x1b\ +\x20\x36\xb5\xfe\xd5\x9f\xd5\x66\x5d\x5d\xf1\xea\xf7\xda\x26\x55\ +\xb3\x88\x73\x0c\x6b\xb3\x6d\xda\x68\xa0\x73\x0e\x8f\xac\x08\xd4\ +\x08\xa4\x7e\x5e\x4f\x1d\x7f\xa8\xd6\x17\x82\x6a\xe0\xd6\x82\x38\ +\x5a\x5e\xb9\x3a\x1c\x6d\xb3\x55\x62\x70\xd1\x46\x05\x97\x6c\xf2\ +\x61\x9b\x3e\xb2\x4a\x03\x29\xf5\xfb\x2e\xf0\xac\xbf\xb3\x24\xde\ +\x9c\xc0\x09\xc2\xce\x45\x2a\xc3\xef\x2c\x79\x82\x35\xe6\x2c\x0e\ +\x45\x5e\xb8\x5c\xff\xb5\x68\xae\xf6\x3c\x7e\x4a\x99\x29\x05\x61\ +\x2b\xf8\x0e\x08\x3e\x38\x20\xaa\x3c\x7c\x53\x88\x9f\x9b\x56\x95\ +\xd3\x2e\xbf\x98\xdc\xb1\x9e\x7f\xb5\xc8\x6b\x3e\x28\xf2\xfb\x8f\ +\x95\x57\xe9\xcb\xe9\x2b\x4a\x2a\x02\x57\x4c\x58\xcf\x62\xaf\xdf\ +\xdd\x83\xa3\xb7\x25\x2c\x4a\xee\x7e\xad\xc8\x1b\xfe\xa3\xb2\x43\ +\xbe\xb3\xbf\xf9\xb2\xa2\x3d\xa7\x2d\xfc\x40\xde\x27\x2e\x83\xf5\ +\x0b\x93\x2c\x57\x73\x08\xcb\x7f\x4e\x0d\x3c\x51\x65\xc5\xdd\x8e\ +\x9f\x98\x3a\xf7\xfe\x9f\xb7\x87\xf7\x66\xcd\xe6\x76\xdd\x3b\x57\ +\x55\x48\x6c\x0f\x1a\x1b\x51\xcd\x53\xed\x5b\xf4\xd4\x29\x2e\xd2\ +\x89\xa6\x63\xaf\xbb\x9e\x30\xdb\x43\x29\x88\x37\xfe\x80\xc8\x0f\ +\x7d\x58\xe4\x37\x7f\x39\xe8\x15\x5d\x2d\x2c\x89\xb8\x85\x9d\x63\ +\x3a\xab\x6b\x82\xd5\xf5\xd3\x9a\x24\xe0\xae\x26\xf4\xee\xc8\x4b\ +\x0e\x9e\x37\x62\xc9\x04\x43\xc4\xbb\xa6\x56\x21\x9a\x08\xcf\x45\ +\x01\x07\x83\xb6\x49\x64\xd1\x4e\x54\x73\x8e\x61\x57\x67\x38\x0b\ +\xab\x7f\x0c\xba\xaf\x19\x3f\x71\xb4\x97\x0e\xc6\x5f\x07\xf7\x9b\ +\xbe\x39\xc4\x37\x9f\x18\x2b\x5e\x37\x4e\xcf\x74\x4e\x68\x15\xf0\ +\xd4\xf9\x7b\xa7\xda\xb9\x03\x9d\xe8\x53\x61\xa4\xce\xe1\xdb\xce\ +\xb6\x4d\x18\x9a\x8b\x50\x1f\x95\x7b\x78\xcb\xfe\xed\x4a\xcd\x6d\ +\x35\xef\xa9\x90\xac\xf2\x78\x17\x4d\x72\xea\x22\x88\xc2\x55\x91\ +\xb5\xad\x3a\x18\xbb\xfe\x92\x17\x7c\xb6\xe6\x69\xa7\xab\x87\xd6\ +\x40\xe2\xf2\x7b\xbb\xe6\x5a\xb4\x86\xf1\xc2\x5b\x66\xd4\xf4\x51\ +\xb4\x3f\xe3\x6f\x8f\xbf\x3e\x29\x43\x89\x8b\x08\xfa\xcb\x75\x62\ +\xd2\x1f\x1c\x70\xcb\x2c\x24\xed\x34\xf6\xaf\x79\xb1\xd7\xfb\xea\ +\x18\x93\x96\x05\xc1\x07\x07\x11\xe1\xdb\x8b\xe0\xdb\xd7\x49\x39\ +\x69\xb8\x75\x8f\x45\x15\x87\x65\x67\xf5\x70\x29\xf8\x6e\x7c\x4b\ +\xe4\x8f\xfe\x75\xb7\xd7\x2f\x26\x2c\x28\x29\xc6\x8a\x74\xb4\xae\ +\xf7\xbb\x45\xff\xef\x16\x91\xcf\xeb\xdf\x8b\x9a\x6e\xe6\x03\x22\ +\x0f\xfc\xc0\xe4\x09\x4c\x99\x5f\x69\x23\x5c\x76\x11\x49\x9c\xec\ +\x8b\xa2\x22\x7e\xfb\x9d\x2c\xa9\xe6\x45\x5a\x35\x02\x35\x5e\xa6\ +\x27\x08\x47\x32\x5b\x5b\x65\xc8\x53\x57\xea\xf0\x4b\x66\xcb\xcf\ +\xe9\x63\x79\xd0\x24\x9d\x1e\x78\xa5\xe8\xc2\x48\xac\x1f\xf1\xf3\ +\xc5\x75\x51\xc4\xa3\xb7\x9d\xed\x8f\x7c\xa7\x90\xbd\x8d\x39\x69\ +\x8f\xfd\xa3\x7f\x53\xe4\x4f\xbf\x2a\xf2\x8d\x27\x22\x42\x33\x65\ +\x14\xb1\x61\xdc\xdc\x34\x85\x57\xf5\xff\x3a\xce\xb8\xe8\xc2\x28\ +\x4b\xcd\x30\xf3\x38\x42\x6b\x93\x49\x57\x91\xc1\xed\x46\x40\x8a\ +\xb4\xf3\x69\xe8\xf2\x34\x6f\x9f\x0e\x07\x87\xa6\x0c\xa7\x38\x9c\ +\x08\x5c\xf1\xa2\x83\x8b\x0b\x6d\x83\xc7\x60\xc1\x2b\xef\xe6\xcd\ +\xc2\x77\x43\xbb\x2d\xb3\x88\xf1\x5e\x97\x76\x14\x71\xec\x22\xde\ +\x9a\xa9\xe0\x5b\xce\xb6\xe4\xae\xe5\x2d\x79\x56\xce\x8c\x05\x99\ +\x6f\x12\x19\x8d\x1a\xd1\xe2\x9c\xc2\xfa\xac\x91\xc0\xdd\x51\x23\ +\xe0\xdc\x3c\xb9\xdc\x46\xfa\xb6\xed\x10\x70\x6e\x75\xb6\x96\x98\ +\xd3\xd4\x86\x2a\x02\x75\xc8\xb8\x1a\x2a\xde\x0e\x4c\x1d\x6e\x4f\ +\x7b\x43\xb7\x85\xf5\xf2\x74\x8c\xfd\x56\x23\x17\xa6\x1d\x48\xad\ +\x02\xb2\x36\x52\xa8\x7a\xbd\xca\xd3\x6d\xda\xd1\xc6\x91\x27\xc6\ +\x44\xda\xf9\x04\xfd\x72\x72\xfe\x67\x46\x45\x57\xbc\xe5\x41\xb4\ +\x2f\xf7\xd6\x3d\xf7\x0e\xe3\x51\xde\xbe\xbc\x14\xd2\xae\x96\x12\ +\x35\x9a\x78\x87\x65\x5e\x34\x9f\x67\x48\x17\xc1\x07\x07\x22\xf8\ +\x82\x88\xda\x24\xb1\x35\x29\x3a\xb6\x2f\x3d\xb8\x8f\x33\x7d\xed\ +\x82\xc8\xc5\x77\x95\xc2\xe0\x73\xe5\x15\xf7\xd9\xc9\xcb\x4d\x95\ +\x46\xeb\x13\x87\xb1\xef\x77\xbe\x1b\x13\x77\x45\xfc\x3b\x61\xcd\ +\xe0\xf3\x0f\x88\x3c\xf8\xce\x5a\xc0\x4e\x13\x89\xcd\x52\x82\xcf\ +\xf4\x08\x37\x49\x57\xd1\x48\x25\xfd\x4a\xcd\x37\xf4\xdf\x1f\xcc\ +\xf9\xb8\xdc\xb6\xf3\xda\xf4\xb7\x36\xad\xf8\xcc\x3c\xe7\xf0\xc0\ +\xe6\x3d\x74\x02\xd0\xd5\x28\x36\x89\x68\x5e\xa7\x4a\x47\x64\x1f\ +\x8d\xe7\xe9\x15\xf1\xf1\xaa\xbd\x6e\xf3\xca\x9a\xc8\xa3\xe5\x8d\ +\xc9\x53\x7f\x58\xbb\x07\xfc\xf5\x8a\xcd\x11\x1c\xaf\x6f\x24\x2e\ +\x92\xe7\xc1\x04\x29\x6f\x2e\xdd\xd0\x86\x83\x56\x8a\xda\xc8\xa3\ +\x35\x74\xb5\xb7\xaf\xe6\x08\x16\xcd\xf0\x70\x1e\x88\xc0\x91\x97\ +\x90\xdb\x39\x89\x5b\x25\xdc\x7c\xa7\xb0\x4d\x24\xad\x51\x40\xad\ +\x2b\xac\x22\xd0\xcd\x0b\x6c\x95\x7f\xd3\x7d\xb0\xd5\x94\xbd\x73\ +\xc3\xba\x95\xd8\xdc\x6e\x44\x60\x35\xfc\xeb\x6d\x83\x3a\x25\x66\ +\x18\xe2\x19\x14\x23\xb9\x50\x3c\x25\x77\xa8\xd2\x18\xd4\xe9\x75\ +\xb4\x8a\x9e\x26\x90\xd6\xc4\xd2\x31\x11\x98\xe7\xed\x24\xd2\x4e\ +\x04\x8e\xff\x5e\x6e\x0f\xa5\xea\x6b\xb7\xdf\x6e\x6b\x0e\x6f\x37\ +\x43\xc0\x1a\x19\xd4\x21\xe2\x2a\x4a\xb8\x61\xe7\x0d\x6e\xd4\xff\ +\x77\x91\x2f\x3f\x87\x7b\x4b\x08\xe6\x4d\x01\x99\x91\x3d\x4c\x76\ +\x36\xda\x46\x6b\x37\xa2\xaf\x7a\x79\xc9\xe5\xf2\xb6\xae\xe2\xca\ +\xfc\x62\x7b\xe0\x7c\xd8\x16\x71\x79\xd1\x15\x60\x61\xe5\x93\x96\ +\x60\xf4\x22\x7c\xce\x1f\x16\x9a\x5d\xdc\xe7\xc6\x42\xd2\x3b\x47\ +\x5a\x11\xc5\x22\xf2\x7d\xef\xf3\xfa\x7a\xc6\xf0\x2e\x82\x0f\xe6\ +\x8c\x3a\x06\x2f\xef\x7a\x8e\xbe\x40\x04\xb4\x1e\xa6\xc9\xdb\xe7\ +\x5e\x73\x69\x3c\xdc\xb0\x5b\x2b\xb2\xb4\x87\x28\x63\x47\x84\x4e\ +\x12\x40\xe5\x61\xf6\x40\x29\xf8\xfe\xec\xdf\x94\x8f\x7f\x5b\x5e\ +\x75\x36\xbb\xbf\x37\x95\x1b\x37\x12\xa5\xbb\x19\x37\xef\xc4\xef\ +\xd8\xcf\x9f\xbe\x5f\xe4\x4d\x1f\x13\x39\xfb\xf2\xe9\xda\xc7\x48\ +\xdc\xc4\x90\x5a\xf7\x30\xba\x15\x8a\x1f\x27\x22\x63\x42\x70\x94\ +\x10\x82\xc9\xba\xbd\x73\xc0\x3f\xc6\xc4\x33\x93\xf8\x9b\x7c\x2d\ +\xd8\x05\x1a\x05\x5c\x19\xd6\x91\x40\x35\x47\xe8\xdf\x43\x13\x69\ +\xb3\xa2\x71\x52\xb7\xa2\x9e\x91\x76\xf4\x9d\xbf\xfb\x31\x37\xbd\ +\xe1\x07\x45\xde\x5c\x8a\xfa\xdf\xfe\x8d\xb4\xd8\x0e\x9d\xc7\xfe\ +\xeb\x63\xf1\x3d\xea\x46\x07\xfd\x4a\x20\x95\xca\x59\x6c\x36\x45\ +\x4d\x3b\x2b\xd6\x00\x31\x5c\x6b\xc6\x32\x37\xed\xfc\xc0\xeb\x9b\ +\x75\x54\x30\x0f\x1c\xc1\x63\xa7\xf0\x76\xfb\xf5\x6a\xb9\x59\xbb\ +\x98\xeb\xca\x6a\xe3\x14\x3e\x63\x9d\xc2\x0b\x0b\xcd\x5c\xc0\x6a\ +\x9d\xbc\x68\x9f\xff\xba\xef\x16\xae\xea\x0a\xcf\x36\xc2\xa7\x0a\ +\x67\x69\xf3\xaa\x2c\x6d\x7b\xd5\x51\x4a\x55\x74\x56\x5e\x90\x7b\ +\xe5\x5b\x36\x29\x9f\xa9\x84\xd7\x35\x39\x23\x5f\x93\xd7\x34\xf7\ +\x1a\xdb\xed\x45\xb9\xb2\x71\x4e\xe4\xec\x78\xd3\x0d\x77\x3c\xa3\ +\x7a\x65\xfa\x18\xb5\xef\xbd\xfc\xcf\xaa\xe8\x53\x21\xa8\x02\x50\ +\x03\xac\x57\xaf\xd6\x51\xc1\xcd\xcd\x46\x70\xb6\x7e\x77\xd4\xd9\ +\xa4\x2a\x32\x58\xbd\xe7\xe6\x21\x5e\xf7\xa2\x6b\xf6\xfb\x95\x59\ +\x64\xa9\x6e\x62\x75\x12\x3b\x67\xb1\x56\x33\x09\x2b\x3a\xba\xe1\ +\x6d\x9f\x49\x6e\xe3\xf1\x6b\x79\xf7\xb5\xdd\x51\x7c\x80\x61\x67\ +\x14\xbf\x9f\xf6\x97\x31\x24\xf1\x32\x82\x0f\xe6\xcc\xb7\x9f\x17\ +\xf9\xbd\xcd\x19\x45\x0b\x3d\x11\xe8\x84\x61\xf8\xb7\xff\x5c\x0d\ +\x27\x9b\xa6\x9e\xaf\x7b\xfd\xe1\xcd\x60\x0c\x20\xf5\x7b\xe5\x97\ +\x1e\xf9\x44\x79\xc5\x7e\xba\xdc\x8e\x2f\x74\x85\x69\x74\x98\x74\ +\x2f\xc3\xd7\x13\x92\x39\xc7\x44\xca\xb4\x4e\xde\x87\xde\x2b\xf2\ +\xea\x1f\xdd\x4b\xc8\xc2\x0e\xe9\x4e\x70\xe8\xc6\x4c\x09\xd1\xcf\ +\xf5\xcc\x3b\xf4\xb7\xa3\x08\x44\xca\xa4\x7c\x0c\xb3\xe2\x6e\xcd\ +\x79\x71\xbd\xee\x11\x5c\xaa\x18\x57\x83\x38\x4c\xbe\x6c\xbc\x9e\ +\xea\xda\x76\x23\x04\x9d\x08\x74\xf5\x87\x55\x08\x2e\x0e\x9a\xb2\ +\x73\x03\xd3\x0c\x39\x76\x22\x81\x91\x84\xce\xfb\xd9\xe4\x53\x67\ +\x45\xde\xfe\xc1\x52\x60\x3d\x5f\xa7\x6b\x71\xbd\xeb\xa0\x27\xda\ +\xd8\xe7\x14\x96\x20\x02\xd9\xfa\xbc\x97\x3a\x27\x77\x82\x6d\xa1\ +\x9e\x44\xe6\xc4\xda\xb2\xa9\xdb\x41\x27\xaf\x55\x8a\xc5\x46\x01\ +\x77\x6c\xca\x98\x1d\x2f\xcc\x25\x13\x9c\xc2\x95\x49\xe4\x5a\xf3\ +\x5e\xcb\x29\xbc\xda\x44\x04\x07\x2e\x14\x65\xe7\x0f\x6a\x48\x4a\ +\x85\x98\xf1\x2a\x7e\x68\x96\xe4\x59\x0a\x3e\x0d\x95\xe9\x58\x6b\ +\x95\xe4\x5a\xea\xdf\xd4\xca\x27\x7e\xf9\x3a\x1b\xdd\x3c\x3d\xd8\ +\x94\xef\x35\xcf\xd9\xc8\xa6\x4d\x42\x5e\x7e\xce\x45\x03\xeb\xa1\ +\x74\x53\x1b\x39\xa4\x5d\x66\x4e\x9d\xb4\x1d\x73\x85\x67\x0e\x71\ +\x06\x11\x7d\xff\x5c\xd9\xe4\x0f\x3e\xd0\x15\x58\x3a\x17\x70\xc7\ +\xce\x1d\xd4\x21\xe3\x71\xd9\xb9\x9d\x6e\xfd\xe1\x22\x12\x00\xce\ +\xbb\x5a\xb7\x9b\x6b\xbc\x68\x02\xb8\x43\x3b\x3a\xbf\xe8\xb9\x8b\ +\xf5\x39\x76\xc5\x0d\x2f\xc3\x79\xc4\x9c\x9f\x27\x0c\xfb\xb1\xd7\ +\x8b\x22\xfd\xd9\x57\xdc\x4f\x77\x8c\xe0\x83\xf9\x32\x55\xa5\x8d\ +\xbd\xde\x5d\xfb\x91\xa2\x29\x2f\xe2\xfe\xc7\x5e\xbc\xd1\x9e\x0c\ +\xd2\xc7\xd9\x57\x8a\xbc\xf2\xa3\x22\xff\xf7\xe7\xea\x4b\x9f\x2f\ +\x1c\x07\x66\x7c\x17\x3f\x16\x9a\x03\x69\xca\x8c\x39\xa1\xe9\xff\ +\x3f\x1c\xb3\x9d\xd6\xec\x11\x8b\xa4\xa5\xbe\xb7\x56\x5e\xd9\xde\ +\xf0\x37\x9a\x1c\x68\x53\x09\xe9\xa2\xc9\x97\x58\xa4\xd6\xc7\xa4\ +\x8d\x19\xa9\x28\x97\xf4\x45\x05\x83\xcf\x64\x91\x88\xd4\xbc\x78\ +\x40\x67\xd4\x5f\xaf\xe7\xa2\x39\xb3\xc8\x8e\x4d\x2b\xb3\xed\x92\ +\x4c\x7b\x46\x92\x3c\x22\xca\xaa\x32\x6a\x79\xfd\xd8\xd8\x6d\xc4\ +\x9c\xab\x8d\xbc\x60\x3b\x77\x27\x0a\x5d\x3d\xe2\x05\x2f\x89\xb1\ +\xbf\x99\xfb\x1d\xc6\x7e\xc5\x1b\x4b\xd1\xf7\x21\x91\xa7\xbf\x21\ +\x72\xf5\x85\x09\xfb\xaa\xaf\x72\x48\x64\x7f\x15\x13\x44\xa3\xab\ +\x2d\x3c\x6e\x9f\xc0\x24\x92\x79\xf3\x23\x4f\x5b\x03\xc6\xae\x9b\ +\xbc\x66\x8d\x21\x2e\x6d\x4f\x95\xbb\xa4\xa8\xeb\x59\xbb\x8c\xc5\ +\xe2\x0d\x0b\x57\xdf\x2d\xdf\xbb\x5e\xfe\xfd\x82\x97\xd8\x59\x87\ +\x9b\x97\x96\x1b\x43\x88\x73\x0a\xab\x20\x5c\xb0\xff\x9f\x43\x84\ +\xaf\x12\x7c\x1b\xd7\x1b\xd3\x89\x91\xc0\x3c\x62\x1a\xd1\x19\xa9\ +\x42\xb2\x5a\x4a\xbb\xd7\x0c\x2e\xb7\x5e\xdb\x92\xc5\x56\x99\x39\ +\xbd\x4c\x6d\xc9\x52\xf9\xda\x1d\x2d\x91\x33\xf2\xdc\xc0\x4e\xb4\ +\x9d\x2e\x0f\xe9\xd7\xbe\x86\x2e\x07\x10\x7c\x70\x68\x7b\xcb\xd4\ +\x25\xbf\xfa\xea\xdd\x16\x89\x5b\xc9\x79\x8a\xd0\x3d\x75\xa8\x6f\ +\x17\x59\x7d\x48\xe4\xc9\x3f\x88\x2c\xcb\x13\x7c\x03\x4f\x08\xc6\ +\x04\xdf\xf8\x6f\xef\xd9\x45\x83\x86\xfe\xf7\x22\xc3\xba\xa9\xe2\ +\xe0\xa1\x88\xd2\xe8\xc8\xab\xde\x27\x72\xea\x9e\xbd\x84\x2b\x6c\ +\x44\x34\x95\x46\xc4\xfb\x5c\x3e\xe1\xf7\x5b\x26\x84\x29\xc4\xe2\ +\x24\xf3\xc3\xbc\x6f\x44\xf4\xd8\x5c\x08\xd2\xc8\xb8\xda\xc3\x5b\ +\x79\x23\x02\x77\x6d\x6e\xc1\x6d\x4f\x1c\xc6\x86\xf8\x9d\xe5\x50\ +\xa3\x59\xfe\xb4\xb1\x50\xf0\x0d\x6d\x12\xb5\xa1\x27\x0e\xf7\x3b\ +\xab\x5c\xa3\x4b\x0f\x69\x52\xb8\x37\x89\xfc\xc1\xbf\xaf\xa3\x5b\ +\x31\xc1\x5d\x78\x91\xc6\x58\xf9\xb8\x7c\x8a\x34\x32\xa9\x3c\x8d\ +\x63\x01\x98\xb7\xdb\x72\x64\x45\xa0\x3f\xb4\x2b\x76\xbb\x17\xb3\ +\xfa\x72\x5e\x64\x5e\x4d\x61\x57\x07\xda\x77\x0a\x5b\x21\xe8\x97\ +\x93\x73\x22\x50\x87\x9d\x35\xc2\xa8\xe3\x98\x2d\xa7\xf0\x62\xe3\ +\x0a\x56\xc1\x77\xe7\x85\xe9\x6f\xf2\xa6\x3a\x65\x8a\xba\x9d\x37\ +\x37\xbc\x48\xa2\x04\xb9\x05\xad\xe8\xeb\x88\x40\xcf\x6d\x3c\x9e\ +\x97\x58\x0b\xbe\x25\xd9\x90\xfb\x06\xd7\xbd\x72\x74\xb5\x08\x5c\ +\x95\x8d\xe6\x7a\xaa\x42\x30\x6b\x84\xa0\x13\x7c\x8b\x8b\x74\x37\ +\x80\xe0\x83\xc3\xe4\x65\xe5\x9d\xe9\x5b\x96\xa6\x48\x54\x2c\x5e\ +\x36\x52\x4f\x00\x8e\x2d\x60\xde\xff\x73\xaf\x63\x75\x09\x7c\xc7\ +\x45\x25\x83\xd7\xf5\xff\xa3\x20\xcf\xdb\x5e\x23\x8e\x4b\xa7\x45\ +\xde\xf7\xf7\x45\xfe\x65\xf9\xb8\x7e\xb5\xbb\xf2\xee\xb7\x76\x8b\ +\x74\xa4\xc4\x31\x2e\x2b\x26\xcd\xb0\x70\x26\xed\x61\x69\x15\x7d\ +\x0b\x4e\x2c\xeb\xdf\xf6\xb1\xe4\xfd\x6d\xfc\x44\xbd\xde\xef\xbe\ +\xfc\x5d\x65\xe7\xff\xa1\xbd\xef\xa7\xa8\x4b\xb7\xe8\x46\x86\xa2\ +\x86\x94\x22\x6e\xbc\x88\x7e\x26\x74\xbc\xf6\xbc\x3f\x4f\x06\x5e\ +\x1a\x9a\xa2\x68\x0b\x1c\xed\x38\xb5\xdd\x5d\x3e\xc1\x22\x3c\x96\ +\xbc\x1a\xc2\xeb\x36\xd1\xb4\xce\x5d\xdb\x1a\x05\x22\xd0\xdb\x47\ +\x2e\x12\xd8\x6a\x73\x6f\xe8\xb7\x8a\x0a\xbe\xb8\xff\xed\x59\x3b\ +\x2f\xf2\x57\x7e\x5a\xe4\xa5\x67\x44\xbe\xf9\xd5\x20\xda\x2a\xf1\ +\xd2\x71\xa1\xc0\x36\x37\xe1\x46\xee\x08\x43\x7f\x59\xde\xfc\xc0\ +\x2a\x99\xb9\x8d\x6e\xee\x7a\x51\x41\x8d\x46\xeb\x31\xbf\x60\x93\ +\x33\x17\xc3\xae\x53\x78\xd7\x0a\xe9\x1d\x97\x43\x30\x6f\x7e\x67\ +\xec\xe2\xcd\xea\x74\x31\x9a\xeb\xe4\x86\x69\x9c\xc2\x17\x1e\x68\ +\x4f\x88\x9b\x45\x84\xaf\x2a\xb3\x71\xc3\x4b\x28\x2d\xad\xe4\xcf\ +\x8d\x00\x0d\x8d\x27\xde\x7a\x8d\xcb\xe0\x89\x97\x77\xb0\x5d\x62\ +\x6e\xa9\xdc\xa6\xfb\x06\xd7\xbc\x68\x7c\x2d\x18\x47\x65\x57\x78\ +\x8f\x3c\x3d\xfe\xac\xd1\xf2\x1c\x55\x72\x69\x00\x04\x1f\x1c\x06\ +\x3a\xb1\xfb\xdc\x5e\x3a\xf1\x29\x3f\x7b\x33\xb5\x72\x5e\x79\xbe\ +\x9e\x60\xb2\x17\xfe\xdc\x3b\x45\x7e\xe4\xef\x89\xfc\xaf\xff\xd8\ +\xce\x68\xee\x59\x07\x33\x61\x53\x0a\xbf\xbc\x58\x31\x39\xb2\x19\ +\x4b\x36\x55\x55\xbb\x70\x73\xa6\xac\x18\xbc\xed\x76\x91\xd7\xfd\ +\xed\xf2\xf9\xa1\xbd\x47\xbb\x4c\xd1\x63\xda\x48\x44\x19\xa7\xfe\ +\x9c\xe9\x8a\xc8\xd0\x80\xd2\x57\x86\x6c\x2e\x11\x3e\xeb\x1e\x37\ +\x11\x05\x9b\x47\xd6\x6b\x60\xaf\x3c\x85\x5f\x05\x7e\xd0\x15\x48\ +\xeb\x56\x04\x5e\x2f\x9f\x6f\x8c\x6c\x72\xe9\x22\x3e\x37\xd1\x25\ +\x3a\xdb\xb5\xdf\x7f\x69\xfd\xe6\xb6\xe9\xbe\x87\x4b\xd1\xf7\x33\ +\x22\xff\xdd\xdf\x91\x4e\x32\x69\x13\x44\xf9\xc2\x36\x0e\xcb\xc6\ +\xc5\xa2\x7c\x9d\x72\x72\xde\xeb\x66\xc2\x7e\x6c\x19\x44\x8a\x60\ +\x9d\xf2\xba\x94\x61\x78\xdc\x38\x93\x48\x95\x4c\x6e\x50\x1f\xe3\ +\x6b\x3a\x57\xef\x54\xfd\xfa\xc8\x89\xed\x52\xc8\xad\xab\x65\xd5\ +\x73\x3f\x84\x66\x10\x1d\x0a\x96\x59\xcf\xe1\xbb\x5e\xbb\x1a\x32\ +\x4f\x6c\x0e\xbd\xeb\x4a\xe6\xd5\x01\x76\xc9\xef\x1c\xae\x6c\x9c\ +\x78\x42\x4f\x45\xaa\x1f\xfd\x0b\x3f\x57\x7d\xb6\x59\xe6\xa0\x14\ +\x79\xa7\x87\x37\xbc\x1b\xd3\xf3\xf4\x37\x80\xe0\x83\x43\x64\x1e\ +\x73\xf8\x62\x8a\x60\x2f\xd7\xf2\x69\x5c\xba\x21\xab\xe5\xc5\xf4\ +\xfb\xff\x5a\x79\xf4\x7d\x41\xe4\xd9\x2f\x7a\x55\x0b\xa4\x1e\xea\ +\xdb\xb5\x11\xbe\x6a\x4e\x98\xff\x5c\x34\x02\x51\xd3\x54\xe4\x9e\ +\x83\xd3\x8f\x4c\xe6\xd2\x5f\x53\xb8\x33\x93\xd9\x89\x0b\xfb\x9e\ +\x76\x12\x0f\xfe\xd5\x52\x5c\x3f\xbc\xf7\x28\x59\x15\x55\xcd\x03\ +\x01\x34\x8d\x69\xa3\xef\x73\x53\x54\xe5\x28\x26\x08\xbd\x79\x06\ +\xfb\xaa\x52\x72\x12\xe4\x70\x90\x46\xdc\x25\xab\x62\x24\x22\x5a\ +\xee\x3d\x9d\xa7\xb6\x56\xfe\x7d\x97\x7d\x6d\x54\x34\x35\x87\x37\ +\x46\x4d\xe9\x39\x27\xf4\xfc\xea\x22\xb3\xd8\xde\xd7\xfe\x80\xc8\ +\x1b\xff\xb2\xc8\x1f\xfc\x5f\x71\xa1\x59\x14\xe9\x6a\x22\x61\xbb\ +\xfb\x43\xc0\xfe\x3e\x0b\x93\x48\xef\xc7\x20\x92\xfa\xed\xd6\xf1\ +\x65\x8d\x21\xe3\x48\xa0\x1d\xfe\xd4\x97\xdd\x7c\xb8\x05\x3b\x47\ +\xf2\xdc\xe9\xfa\x7b\x55\x79\x3f\x5b\x52\x4e\x85\xa0\x4b\x1e\x9d\ +\xcd\xf8\x0e\x42\x87\x87\xd7\xaf\xd7\xa5\x2f\x9c\x53\x41\xd7\x47\ +\xa3\x7e\x99\x67\x16\x71\x02\x6e\xd7\x78\xdb\x60\xd3\xc4\x18\x2f\ +\xe2\x27\x5e\x65\x10\x97\x7b\xb0\x2a\x49\xb7\xdd\xae\x2c\xe2\xce\ +\xcf\x85\x85\x26\x2a\xeb\x8c\x22\x4b\x4b\xf4\x37\x80\xe0\x83\xc3\ +\x16\x7c\x07\x98\xf1\x32\x5a\xc7\x76\x46\xac\xde\x2b\x72\xe1\xdd\ +\x22\xd7\xbe\x5e\x5e\xd8\x9f\xab\x3b\x9b\x56\x14\x24\x51\x8d\xa3\ +\x70\x95\x24\x9c\x38\x2c\xea\x4e\x4b\x3b\xb2\x2d\xf7\x7f\xfb\x3c\ +\x32\xcd\xf0\x73\x28\x0c\x73\x89\x47\x03\x75\x38\xf0\xfc\xdd\x22\ +\x8f\xbc\x47\x64\xe5\xcc\xde\xb7\x4b\x3b\xc5\x17\xca\x8e\xea\xc5\ +\x5d\x5b\x77\xd4\xb4\xcd\x29\xce\x8c\x22\x26\x5d\xfe\x2d\xdc\x7e\ +\x33\x85\x10\x34\xd2\x9d\xaf\x17\x26\x26\x9e\x17\x99\x8d\x68\xc6\ +\xd6\xaf\x90\x74\x74\xcc\xf4\x6c\x7b\x34\x47\xa1\xed\xd4\x57\xb5\ +\x01\x17\x1a\xa3\x87\x8a\xbe\x1b\x2e\x42\x65\x9d\xac\xcb\x83\xd9\ +\x9c\x70\x7f\xfe\x43\x22\x4f\xfe\xbe\xc8\xe5\xef\x34\x55\x27\x3a\ +\xd1\xbb\x40\x6c\xc5\xd2\xb5\x98\x48\x94\xce\xf4\x89\xe0\x50\x34\ +\xf6\x95\x92\x33\xe9\xdf\x0e\xe7\x06\x8e\x97\x35\x6a\x27\x91\x73\ +\x02\x4a\x0f\x52\x4d\xf7\xe2\xa2\x7a\xd5\xf4\x87\xb2\x9b\x38\x6d\ +\xf3\xf3\xa9\xf8\x3b\xbf\xe6\x25\x88\x9e\x01\x6a\x73\x7d\xea\xdb\ +\x22\x2f\x5d\xa9\xb3\x29\x6b\xe4\x4d\x05\x57\x66\xd3\xc3\x0c\xfd\ +\xe4\xd0\xdb\x6d\x01\xe8\x47\xf5\x76\x3d\x11\xe8\x6a\x07\xfb\x9f\ +\x8d\x89\xc0\xea\x7b\x3b\x5d\x21\x78\x6a\x8d\xfe\x06\x10\x7c\x70\ +\x2b\x46\xf8\xa6\x8c\xfe\xc5\xe6\xd3\xed\xbb\xb6\x6f\x79\xe8\xdd\ +\xff\x23\x22\x1b\x4f\x89\xfc\xe9\x63\xe5\x32\x77\xe2\xbf\x1b\x8b\ +\xd4\x55\x43\xb0\xe1\x8f\x9b\x6e\x35\x0a\x17\xf5\xd9\xb1\x9d\xda\ +\x8e\xff\x9a\x8b\x24\xda\x68\xe2\x8e\x7d\x7f\xe1\xac\xc8\x5b\xcb\ +\xf5\x7a\xf8\xed\xfb\xec\xbc\xca\x05\x3e\x75\x4d\xe4\x8f\x37\xed\ +\x5c\x22\xdb\x69\x0e\xbd\x39\x83\xe3\x39\x85\x59\xf0\xba\x15\x88\ +\x12\x74\xec\x93\xdc\xbb\x7d\xe5\xe0\x52\xb5\x7c\xe7\x21\xf8\x52\ +\xa2\xa5\x88\xac\x57\x51\xc4\x87\xaa\x8b\xc4\x3a\x17\x89\x1a\xbd\ +\x03\x6b\xd8\x58\x0b\x6e\x08\x2e\x9c\x9b\xcd\xb6\xbd\xfc\x8d\xa5\ +\xe8\xfb\x88\xc8\xbf\xfc\x2f\xcb\xe3\x66\xbd\x31\x8c\xe8\x6f\x2e\ +\xd8\x2a\x22\x6e\x77\x84\x73\x0e\x5d\x09\xbd\xa8\x10\x4e\x88\xf3\ +\x64\x1d\xe2\x54\x52\xea\x1e\xd1\x38\x8d\x41\xa4\xf5\xdb\x56\x04\ +\xba\x9b\xa1\x8e\x4b\xd8\x46\x05\x17\x64\xb6\x99\x77\x55\x9c\x7d\ +\xb7\x14\xd4\xcf\x3c\x63\x9d\xc8\xb6\x7a\xc8\xc2\xb0\x29\x25\xa7\ +\xc2\x4f\xc5\xa0\x7b\xad\xaa\xfe\x61\x1d\xbd\x5a\xf9\xc3\x77\xed\ +\x56\xc7\x85\xad\x0e\xb2\x23\xed\xe8\xa0\x48\x2b\xcd\x4b\x53\x56\ +\xce\xab\x3e\xa2\xbf\xb5\xb5\x2e\x00\x08\x3e\x38\x44\xc1\x57\xb4\ +\x4b\xab\x1d\x16\x51\x67\xe8\x3e\x58\xb9\x20\xf2\xc0\x7f\x2c\xf2\ +\xdc\x67\x45\xae\x7f\x63\xba\xdf\xeb\xfb\x50\x18\x91\x1c\x84\x91\ +\x42\xd3\x8d\x8e\xf8\x51\x3f\x8d\x08\x5e\x78\x54\xe4\xfb\x3e\x3e\ +\x5d\x45\x8d\x94\x28\x37\x45\x23\xcc\x47\x45\x13\x61\x0c\x85\xf2\ +\x20\xc8\x75\x38\xb0\xa2\xcf\xcd\x23\xd4\x79\x85\x2b\x56\x18\x2e\ +\x84\x29\x68\x4c\x4f\xfb\x1c\xb4\x69\x43\xda\xb5\x83\x8b\x20\x19\ +\x9e\x2f\xe4\x62\x11\xcc\xd0\x04\xd1\x2a\x5f\xe6\x1d\xfb\x51\x13\ +\x4b\xc4\x3c\xa1\xed\x79\x6a\x61\x36\xdb\xb6\x7a\x46\xe4\x6d\x7f\ +\x45\xe4\x3f\x7c\x4e\xe4\xdf\xff\x66\xfd\x03\x83\xac\x31\x89\xa8\ +\x60\x77\x15\x44\x34\x81\xf4\xa2\x97\x2b\x30\x5a\x4a\xce\x8b\xf6\ +\xf9\x22\xd7\x8f\xf6\x99\x1e\x83\x48\xdf\xb6\x8f\xa3\x8e\x91\xf6\ +\x6f\x09\xf0\xbe\x72\x7d\xfe\x7b\x41\x56\x6f\x9d\x1b\xe8\x22\x80\ +\x33\x9d\xc3\x57\xd4\xa2\x6f\xc7\x0e\xcd\xea\xf3\xfa\x8d\xa0\x3e\ +\xb0\x15\x82\xae\x80\xad\xce\x1b\xd6\x52\x72\x2a\x02\xab\x1c\x82\ +\xcb\x4d\xa4\xcf\x77\xeb\x56\xdf\xb7\x62\x2e\xb3\x25\xe3\xfc\xa8\ +\x69\xab\xbe\xb0\x27\x1a\x35\x37\x21\x00\x82\x0f\x0e\x35\xc2\x77\ +\x14\x8a\x18\x9a\x20\xba\x73\x33\x9c\x7e\xb5\xc8\xbd\x3f\x24\xf2\ +\x27\x5f\x9f\x52\x64\x9a\x09\x9f\x49\x0c\x7f\x46\x4b\xa6\x49\x30\ +\x67\xaa\x7c\x7a\xfd\x5f\x2f\xd7\xe9\x9e\x9b\x6b\x9b\xa1\x49\xec\ +\xbb\xe0\x35\xe7\x50\x95\x20\x61\xf0\x20\x18\xfe\xcd\xac\xc3\x78\ +\xc9\x0a\x40\x15\x83\x2b\x59\x2d\x08\xb3\x9e\x6d\x2b\x0e\xd0\xb4\ +\x91\x15\xf1\xdf\x6f\xa5\x2d\x89\xcc\xd9\x0b\x53\x9a\xc4\xca\xa8\ +\xc5\x92\x4a\xfb\xef\x85\xdb\x3a\xab\x39\x7c\x8e\xb3\xb7\x8b\xfc\ +\x85\xf7\x8a\xfc\xfe\x6f\xd7\xb9\xf9\xaa\xbc\x77\xf6\x3d\x4d\x17\ +\xa3\x79\x03\xc3\x32\x72\xcb\xc3\x5a\x04\xae\x0c\x9a\x68\x58\x21\ +\xdd\xaa\x2b\xf9\x84\x88\xdd\x34\x65\xe4\x8a\xa2\x5b\x91\xc5\x35\ +\xac\xbf\x7c\x93\x88\xbc\x16\x5e\x94\xb2\x98\x34\xf4\x5c\x6e\xb8\ +\xc9\x67\x7f\x3d\xd1\xe3\xc7\x99\x4d\xfc\xe1\xe2\xdc\x8b\xda\xa9\ +\x28\xcc\x32\x2f\x12\x67\x13\x43\x0f\xb2\x26\x61\xb4\x8a\x40\xcd\ +\xb0\xac\xe5\xe4\xb4\x74\xc5\xd0\xd5\x13\x1e\x34\x43\xc2\xce\x09\ +\x5c\x2d\x67\xbb\x31\x87\xf8\x22\x70\x77\x9b\xfe\x06\x10\x7c\x70\ +\xc8\x42\x2b\x3b\x82\xeb\x74\x33\x64\xe5\xc5\xf9\xd5\x7f\x5f\xe4\ +\xa9\xff\xa9\xbc\xa0\x5f\x99\x2e\xbc\x38\x4d\x7f\x53\xf4\xcd\x5d\ +\x8b\x38\x48\x95\x8b\x3f\x2e\x72\xcf\x3b\x6f\x6e\x7b\xd4\xf9\xf8\ +\xb6\xbb\x4a\x11\xbb\x2c\xb2\x59\xfe\xce\xf5\xbc\x7e\xac\x97\x7f\ +\x6f\xd8\xc7\x6e\xa4\xfd\x4c\x20\x04\x73\x49\xa7\xba\xf7\x5f\x53\ +\xe1\xb7\xaa\x43\x9a\xe5\xe3\xf4\xa0\x16\x84\xcb\x41\xaa\x99\x62\ +\xce\x91\xbe\xcc\x8b\x68\xc6\xaa\x60\x14\x3d\xed\x5e\x44\x04\x62\ +\xe7\xbd\x48\x99\xb9\xc2\xc4\x8f\x07\x27\x0e\x66\x39\xd7\x55\x87\ +\xfb\xde\xf1\x41\x91\x2f\xfd\x96\xc8\x17\xff\xad\xad\x25\x9b\x70\ +\x08\xab\x20\xba\x61\x95\xa0\x5b\x05\x15\x7e\x6b\xb6\x7c\x9c\x3e\ +\xfb\xb9\x0a\xb3\xa0\x7d\x46\x11\x31\x1b\xba\x7a\x8b\xc4\xfc\xcf\ +\x58\x5b\x0e\xfc\xf7\x12\xf3\xf9\x52\xb5\x98\x53\xc3\xc5\xb3\x3e\ +\x9c\x54\xa0\x3d\x58\x9e\x33\xcb\x5b\x75\x3a\x9e\x0d\xcd\x05\xb8\ +\x63\xf3\x34\xda\xc4\xd1\x9d\xb2\x71\x76\x9e\x5e\x75\xd3\x36\x6c\ +\xef\x2b\xb7\x82\x55\x35\x11\x8d\x00\x2e\xd5\x02\x50\x1f\xa7\xcf\ +\xd8\xba\xc1\xd2\x44\x04\xb7\x5d\xfe\x3f\x6f\x39\x1b\xcc\xe1\x03\ +\x04\x1f\x1c\x2a\xc5\xc1\x9a\x36\xa6\x15\x7c\x37\xdb\x01\x0c\xcb\ +\x8b\xf0\x23\xff\xb5\xc8\x57\x7f\xba\xec\x34\x37\xa6\xe8\x80\x65\ +\x0a\x43\xc2\x14\xd1\x2d\x5f\x38\xac\xbd\x52\xe4\x0d\xff\x78\x76\ +\xfb\x68\x45\x6a\xf1\x75\xdb\xa0\xc9\x83\xe6\x3a\x62\x35\x97\xa8\ +\x18\x5c\xb7\x7f\x6f\xd8\xe7\xed\x5a\x2f\x34\x4e\x64\xaf\x3c\x59\ +\x4a\xf0\xb9\xbc\x75\x2f\x7a\xaf\x0f\xac\xe8\x5b\xb1\xa9\x66\x56\ +\xb3\xf9\x86\xf9\xfc\x3c\x7c\x1d\xc1\x9d\x72\x0c\xf7\x98\x10\x92\ +\x65\xc9\x12\x73\x13\x33\xe9\x1a\x44\x66\x1d\x09\x57\xf1\xf0\x33\ +\xff\xa8\xdc\x9f\xe5\x72\xbf\xfc\x7f\xd4\x2e\xe1\xad\xbc\xbb\xbf\ +\xfc\xa8\xa5\xdb\x0c\x75\xba\xbe\x38\xb2\xe1\x40\xbb\x7f\x56\xbc\ +\x08\xe0\xe2\xc0\x8b\x10\x1a\xaf\x06\xae\x27\x74\x43\xe1\x16\x6b\ +\x33\x13\x4b\xd4\xdd\x23\xdc\xc2\xfd\x33\xad\x39\x64\xd6\xd7\xa0\ +\x6a\xbe\xed\x56\x5d\x63\x5b\xb7\xf3\x6c\x29\x00\xcf\x2d\x37\x42\ +\x77\xc7\x16\xba\x55\xa7\xf0\x8e\xad\x2c\xa2\x39\x08\xdd\x36\xec\ +\x6c\xd6\xeb\x56\x09\xc1\x9d\x46\xf8\x8d\xec\xf0\xf0\xb5\xac\x5d\ +\x61\xc4\x55\x0e\x59\xb1\xc9\xa4\xd7\xd6\xea\x48\x60\x95\xb2\x66\ +\x58\xbf\xb6\xbb\x23\x00\x08\x3e\x38\x3c\xb2\xa3\x18\xe1\x9b\xd1\ +\xc5\xff\xae\xf7\x88\xdc\xfb\x63\x22\xdf\xf9\x5f\xa6\xd7\x26\xc5\ +\x94\x86\x84\xde\x61\x60\x15\x2b\xe5\x05\xff\xa1\x9f\xaf\x85\xe7\ +\x2c\x04\x70\x2b\xe2\x15\x89\x42\xad\x9a\xfa\x71\x5e\xda\x06\x07\ +\xd5\x04\xdb\xae\x0c\x59\xf0\xf7\xa6\x2b\x57\xe6\xbd\x17\x56\xea\ +\xf0\x45\x65\x15\x4d\xf4\x0a\x64\x6a\x87\xb9\x38\xcf\xe3\xb2\xe8\ +\x46\xe5\xf2\x48\xe3\x14\x93\xa2\x79\x7b\x88\xf4\xf9\xcb\x0c\x87\ +\x22\xe7\x31\xd7\xf5\xdc\x05\x91\x1f\xfe\xbb\x22\x97\xbf\x5a\x8a\ +\xf5\x2b\xb6\xd2\xbc\xab\x22\x62\x85\xb7\xfb\xff\x96\x57\x46\x2e\ +\x36\x94\x7f\x63\xa7\xae\x6f\xeb\xa2\x7c\xae\x5a\x48\xf5\xb0\x35\ +\x84\xab\x67\x6b\x12\x31\x3d\xa9\x60\x5a\x09\xaf\x13\x6d\x16\x0a\ +\xb7\x9b\x31\x87\xcc\x5a\x4c\x6b\x1a\xa3\x51\x29\xcc\x46\x9b\xd6\ +\x79\xed\x45\xea\x8c\xad\xa4\xa2\xed\x71\x6a\x45\xc6\x66\x18\xbf\ +\x9a\xc8\x8e\xad\x1e\x32\xca\x9b\xd4\x31\xd5\x90\xac\xab\x29\xec\ +\x45\xfc\x2a\x13\xca\x56\x9d\x06\xc6\x37\x6a\xe8\xfc\x40\x15\x7a\ +\x95\x61\x64\xf1\xe0\x2a\x15\x01\x20\xf8\xa0\x57\x4c\xdc\xaa\x1b\ +\x77\xef\x07\xcb\xce\xf4\xb3\x65\x67\xf9\xf4\x74\x17\xdc\x69\x87\ +\x2c\xfb\x84\xa1\x7e\xf5\xb6\x37\x88\xdc\xfe\x17\x67\x2f\x80\xc2\ +\xda\xae\xb1\xf5\xf4\xe7\xb9\xe9\xf7\xd4\x9c\x71\xca\x74\x85\xa0\ +\x13\x7c\x5b\x36\xe2\xe1\x22\x82\x95\xc8\xf0\xc4\xe1\x28\x12\x4d\ +\x2b\x26\x85\x3a\x67\x71\x5c\x46\xda\x7a\x10\x88\xbc\x58\x39\xb9\ +\x22\x22\x64\x4c\x44\x1c\x86\x91\xab\x3e\x11\x38\x8f\x61\x47\xc7\ +\x85\xd7\x89\xbc\xb9\x3c\x4e\xbf\xf0\x3f\x34\xa6\x8d\xe5\xac\x11\ +\x72\xbb\x36\x7a\xeb\xea\x08\x57\xb9\x02\xbd\x32\x72\xbb\x45\x77\ +\x18\xbf\xb0\xa2\x45\x1f\xd7\xa5\x59\xae\x13\x7c\x4b\xae\x7e\xf0\ +\xa0\x71\x09\x4f\x12\x80\x26\x32\x0f\x70\xfc\x77\x11\x1c\x1b\x3d\ +\x73\xfb\x62\xfb\xcd\xcc\xe1\xf8\x31\xae\xfc\x8a\x34\x05\x6e\x9d\ +\xe0\x73\xc3\xdc\xd9\xb0\x89\x7e\x1a\x5b\x53\xd8\xd5\x5c\xd4\x92\ +\x72\x55\x3e\xcf\x51\x53\x49\xa4\xaa\x27\xec\xa2\xb0\xa3\x5a\x04\ +\xfa\xdf\xd7\x28\xe1\xb8\xac\xdc\x4e\x53\xda\xad\x12\x80\xcb\x02\ +\x80\xe0\x03\x04\x5f\x47\x74\xcd\x68\x9d\xce\xbd\x4d\xe4\x9e\x1f\ +\x15\xf9\xf6\x3f\x2f\x2f\xc6\x9b\x7b\x8c\xf4\x4d\x12\x7d\xc1\xb3\ +\xe3\xd4\xab\x44\xee\xff\x71\x91\x95\xfb\x66\x2b\x7e\x06\x45\x3c\ +\x87\x60\x54\xf4\xc5\xd6\xad\x68\x57\xa6\x58\x34\x6d\x07\xa5\x73\ +\x18\xbb\x7c\x83\x23\x2f\x41\xb5\x0e\x15\x6b\x5e\xba\x1b\x45\x2d\ +\x08\x37\x0b\x99\xfb\x90\x6e\xe6\x85\xb2\xc2\x32\x6f\x12\x08\x8e\ +\xd6\x36\x4f\xe9\xbe\x8d\x2d\xb7\x48\xbc\x57\x14\xf3\x8b\x84\xab\ +\xe3\xf3\xed\x3f\x5b\x1e\xa3\x5f\x11\xf9\xd6\xef\xb6\x7f\xdf\x19\ +\x6e\xd4\x54\xe3\x2a\x87\xb8\xb9\x7d\x2e\x1a\xb5\x65\xf7\xc9\xf5\ +\xdd\xfa\x59\x85\xa0\x1f\x49\x73\xe7\x53\x35\x8c\xe9\xdd\xf5\xf8\ +\x65\xe3\x74\x0e\x9a\x0e\x05\xab\xe0\xd1\xa1\xe1\x85\xcc\x4b\x93\ +\x52\x74\xcd\x21\x29\xe7\x6e\xcb\x1d\x6c\xba\xfb\xc6\xdf\x07\xfe\ +\x7b\xfb\x49\xb6\x3e\xf9\x24\x6e\x2f\x77\x7c\x93\xe2\x95\x7c\xab\ +\x4a\xc9\x65\xcd\xfa\x69\x02\xe6\xcc\x73\x63\x0f\x06\x5e\xfe\x45\ +\xdd\x07\x0b\xcd\x8d\x83\x96\x6f\xd3\x7d\xb2\xb5\x63\xcb\xc9\xe5\ +\xb6\xa4\x9c\x15\x96\x59\xd6\x3e\x89\x47\x0c\xe9\x02\x82\x0f\x0e\ +\x9b\xa3\x36\xa4\x3b\x4b\x01\xaa\x43\xab\xf7\xfe\x44\x79\x51\x7e\ +\x52\xe4\xf9\xdf\xd8\x9b\xd9\x60\x9a\xe1\xdd\xd0\xc8\xa1\x9d\xc5\ +\x85\x0f\x95\xbf\xf9\xfe\x26\x75\xc3\xac\xda\x24\x2b\x24\x3e\xc7\ +\xb0\xa7\xac\xda\xa4\xcf\x86\x91\xaf\x81\x34\xe9\x5a\xf2\x20\x6c\ +\x54\x78\xa7\xb6\x0a\x8d\xe1\xc2\xfc\x8e\x81\x97\x6c\x9a\x8e\x85\ +\x54\x14\x4e\xda\x91\x4c\x49\x44\xfe\xa2\x11\xdb\x9e\xa8\x60\xb2\ +\xdc\x99\xb6\xcd\x1c\xc7\xe4\xb4\x1e\xf4\xa3\x1f\x17\xf9\xd7\x3f\ +\x57\x1e\xab\xd7\x02\xb1\x19\xac\x9b\xb1\xfb\x49\xe7\xeb\xe9\xff\ +\x4f\xbb\xb7\x6d\x25\x07\x15\x1e\xd7\x6c\xe9\xb8\x6b\xbb\x75\xc5\ +\x17\x7f\x5f\x8f\x45\x5b\xd1\x94\x3a\xd3\x5c\x79\x37\x3c\x41\x52\ +\xa5\x11\xb1\x69\x61\xce\x2c\xd6\xa2\x67\x20\x93\x4d\x2e\xa1\x31\ +\x24\x66\xb6\x89\x25\x87\x1e\xcc\xa1\x4d\x63\xf3\x40\xdd\x71\xdd\ +\x4a\x5a\x3d\x6a\xaf\x5f\x3e\x6a\x44\xed\xae\x77\x5e\x8f\x87\x71\ +\xed\xdc\xbc\x81\xfd\xd0\x72\x55\xdc\xb9\xf9\x9c\x2e\x43\x4b\xc9\ +\xa9\x10\xdc\xb2\x75\x85\xab\xb9\x82\x5b\xf4\x35\x80\xe0\x83\xc3\ +\x8c\xa6\x1d\x91\x3c\x7c\xf3\x64\xed\xf5\xb5\xe8\xbb\xfe\xe5\xf2\ +\x02\xfc\xcc\xde\xbe\x9b\x4f\x12\x88\x7e\x87\x52\xf6\x00\x67\xde\ +\x20\x72\xcf\x07\xcb\x0b\xff\xf2\xec\xf7\x53\x16\xe9\x28\x7b\x05\ +\x5e\xe2\xb3\xf9\x94\x75\x78\x4d\x44\x28\xf9\x1d\x7e\x36\x47\xa7\ +\xee\xef\xbd\x54\xee\xaf\x1b\xf5\xd0\xe3\xa9\xac\x36\x8b\xac\xd9\ +\x14\x32\x03\xaf\xda\xc8\x20\x4b\x6f\x6f\x9f\x61\xc0\x44\xc4\x63\ +\xaa\x2d\x8b\x03\x3a\x3f\xee\xfd\x1e\x91\x37\x96\xc7\xce\x13\xff\ +\xaa\x14\x07\x1b\xe9\x44\xd7\x45\x4f\xb2\xe3\xc2\xa6\xda\xd1\xc7\ +\xed\x0b\x8d\x38\xbf\x61\x2b\x86\xac\x8f\x9a\xb9\x80\xce\xc0\x93\ +\xc7\xf6\xbf\x7e\x67\xb7\x7e\x3c\x67\x6b\xc8\x56\xcb\xb5\x11\x40\ +\x8d\x06\x56\xa9\x4b\xbc\xfd\x21\xa9\x2a\x28\x93\x4c\x1b\x32\xfb\ +\x39\x7c\x6e\x98\xd8\x9f\x07\xea\xcf\xc5\x9c\x66\x3e\x67\x6b\xca\ +\xc4\x4e\x23\x6a\x5d\x02\x69\xa7\x52\x75\x58\x77\xec\xf8\xb5\xa2\ +\x4e\x2b\x89\x38\x21\x68\x6c\x9e\xa3\x7b\xef\x11\x00\x04\x1f\x1c\ +\xb2\xe8\xbb\xd5\xd7\xa7\x5c\xe0\xd9\xbf\x58\x8a\xb1\x37\xd5\xf3\ +\xf9\xf2\xbd\xdc\x69\x4f\xd1\x11\xb9\xce\x64\xf1\x76\x91\x3b\xdf\ +\x2b\xb2\xfa\xe0\x7c\xda\xc4\x8f\x7c\x46\x85\x68\x62\x58\x2c\xfc\ +\xec\x60\x4a\x21\x18\x9d\xb8\x1f\x46\xd2\xe6\x84\xcb\x05\xe8\x6a\ +\xdb\xba\xdf\x5b\xb0\x0e\xe1\x4a\x00\x9a\x5a\x0c\xea\x9c\x34\xed\ +\x7b\x87\x9e\xf0\x10\x49\x97\x19\x8b\x95\x8d\xeb\x6c\x57\x30\x14\ +\x38\xcf\x39\x7c\x8e\xa5\x35\x91\x87\xdf\x2d\xf2\xfc\xd7\x45\x9e\ +\xfc\x62\x23\xc4\x42\x81\x6a\xa6\xdd\x26\xfb\xba\xb6\xcb\x59\xbd\ +\x19\x19\x34\x02\x56\x23\x80\x3a\xfc\x1b\x13\x80\xa3\x44\xde\x1e\ +\x15\x8c\xfa\xb8\x6a\xcd\x0b\x2b\xd6\x11\xbc\x64\x87\x3d\x7d\x01\ +\x38\xce\x13\xe8\xe5\xee\x0b\x45\xb8\x99\x70\x03\x73\xd3\xc7\x90\ +\x3f\xb7\xb1\xc7\x9d\x9d\x9a\x9b\xe8\xda\x2a\xfa\x5e\x5e\x1b\x43\ +\xdc\xf1\x11\x8a\xc0\x71\x29\x39\x7b\x43\xa2\x7f\x2f\x1c\xb5\x0b\ +\x2d\x00\x82\xef\xe4\x89\x3d\xd3\x23\x70\xcc\x21\xad\xd3\xcc\x8f\ +\xca\x73\x22\x2f\xfb\x78\xd9\xb1\x3d\x23\x72\xfd\x77\x67\xbb\x32\ +\xae\x53\xb8\xf3\x3d\x22\xf7\x7d\x74\xf6\x1b\x90\x72\xe9\xa6\xc4\ +\x67\xe7\xfb\xa9\xdc\x7b\x66\xf2\xe7\x8a\xc8\xe7\x8a\x03\x28\xad\ +\xe6\xa5\x3e\x6b\x25\x52\x1e\x59\xb1\xf2\x52\xf9\x70\xc1\xda\x65\ +\x5b\x39\x64\xc9\x34\x7f\x2f\xdb\xf4\x31\xcb\x4e\x7c\x98\xf4\xba\ +\x17\x3d\x66\x05\x7f\x9b\x0f\x22\x12\x7e\xdf\x9b\x45\xde\xf4\x13\ +\x22\x2f\x96\xa2\xef\xc6\x73\xc1\xbc\xb9\x44\x14\xd2\xec\xc1\x81\ +\x5c\xd8\xb6\x38\x5b\x5e\xa6\xcf\x7a\x11\x2b\x27\xac\xdd\x63\xdb\ +\x73\x0a\x6f\x15\xdd\x53\x41\xdb\x63\x73\xb7\x7e\xb8\x4a\x1e\x95\ +\xd9\xc4\x9a\x42\xd4\x1c\xb2\xe8\x4a\xc6\x59\x87\xb0\xf4\xb8\xad\ +\x67\x3e\x8f\xb8\xe8\x1a\x9d\x5a\x62\xb3\x98\x5c\x5e\x30\x7a\x0e\ +\xf4\x7d\xcf\x8a\xc0\xf1\x7c\x58\x3f\x1d\x8e\xb6\x31\xa5\xd5\x00\ +\xc1\x07\x87\x49\xb6\x8f\x8b\xed\x3c\xab\x73\xcc\x33\xa1\xef\xda\ +\xdb\x44\x2e\xfc\x2d\x91\x3f\xfe\x3b\xb3\x15\x9f\xda\x79\x65\xa7\ +\x6b\x41\x39\x3c\x37\xbf\xfd\xe4\x0b\x8e\x54\x1a\xbc\x94\x89\x23\ +\x8f\xf7\x89\xfb\xfa\xdc\x9c\xfd\x1a\x15\x6f\x5c\x13\x79\xe1\x7a\ +\x29\xee\xd4\x8c\x60\xd3\xc1\x6c\x24\xf6\x4b\x25\x4e\x54\x04\x7a\ +\x6d\xe3\xa2\x7d\x0b\xd6\xf0\xa0\xc3\xc1\x6b\x83\xfa\x79\x21\x52\ +\x0e\x2f\xba\x6d\x81\xc9\xe3\x20\xe6\xba\xaa\x6b\xf4\x15\xef\x10\ +\x79\xf2\x87\x44\xfe\xe8\x7f\x6b\xe6\xf3\x89\x69\xf6\x79\x91\xd8\ +\x0f\xb1\x92\x71\x12\xf9\x4e\xf8\xac\xe2\xba\x4a\xae\x3d\x6c\x96\ +\xe3\xdc\xc1\xa3\xa2\xc9\xcb\xa8\xd1\xc0\x0d\xfb\xb7\xaf\xa2\x5c\ +\xa4\x6e\xd7\x26\x05\x1f\x6f\x8b\x69\x4a\xc6\x2d\x66\x4d\x34\x50\ +\xe7\x05\x0e\xb3\xee\xf1\x3d\xf3\x1b\x47\x6f\x0e\xdf\x34\xc6\x9c\ +\x54\xa9\x3d\x93\xf8\x9e\x7f\xce\xb9\x6b\x57\x9f\x41\x84\xbc\x2c\ +\x80\xe0\x83\xc3\x65\xce\x89\x97\xb3\x29\x04\x45\x78\x91\x9e\x97\ +\xe6\xd3\xbb\xed\xdb\xde\x5b\x76\x3c\xaf\x28\x3b\xb1\x3f\x9d\x4e\ +\xc8\x4d\x23\x6c\xb5\x43\xbb\xf3\x47\xcb\x0e\xf3\xe5\x73\x5a\xef\ +\xa2\x2b\xb2\x8d\xa4\xdd\xb9\xd1\x39\x67\x3d\xe9\x5b\x3a\xfb\x2b\ +\xf8\x6c\x1e\x89\xfa\xce\xbb\xd2\xc6\x6d\x36\x79\xf0\x1d\x03\x2b\ +\x3c\xa4\x8e\x34\xbd\xe4\x55\x19\xb9\xd1\x93\xda\x63\xd7\xe6\x17\ +\xdc\xb0\xdb\x74\x45\x23\x50\xbb\x5d\x01\xa8\xc3\xc3\x4b\xd2\x33\ +\xa4\xe7\xb7\xe7\x01\xcd\xe5\x5b\x58\x2d\x05\xef\x4f\x89\x3c\xff\ +\x35\x91\xa7\xbf\x12\x29\x1b\x97\x48\x79\x62\x24\x6d\x3e\x09\x53\ +\xae\xf4\x99\x60\x06\xf6\xe1\x44\x99\x0a\x35\x57\xbf\x79\x64\xdb\ +\x75\x7d\xd4\x3c\x76\xac\xf9\xa3\x55\x96\xce\x1e\x37\xce\x00\xa1\ +\xb3\x28\xd6\xb3\xc6\x19\xac\x91\xbf\x53\x5e\xa2\x68\x99\xf1\x79\ +\xef\x9c\xed\xee\xdc\x49\x95\x8d\x0b\xdf\x4b\x95\x8e\x8b\x7d\xaf\ +\x23\xf2\x82\x29\x02\xe1\x10\xfc\xf5\x2b\x22\x9f\xff\x37\x89\x73\ +\x36\x78\x6d\x90\x45\x36\xc6\x3f\xd6\xb3\xee\xb5\x4d\x6c\xda\x98\ +\x9d\x9d\xda\xc1\xad\xcf\xdb\x3b\xb5\x9b\xd8\x98\xf6\xb6\xc4\x72\ +\xe1\x64\xe1\x32\x4d\xfb\x6f\x4d\x20\xbd\xb0\x58\xe7\x16\x5c\x1c\ +\xda\x3a\xc4\xc3\x60\xb9\x12\xf9\x2d\x77\x63\x10\x51\xf5\xe1\x6f\ +\xfa\xee\x66\xad\x60\x72\xc7\x5d\xf5\xef\x00\x82\x0f\x66\x78\x61\ +\x3c\x28\xb2\x29\x3f\x33\x2f\x3d\xa1\x11\xb8\xfb\x7f\x51\xe4\xd2\ +\xcf\x95\xd7\xc7\xdd\x09\x6d\x33\xe5\x5c\xb5\x95\xd7\x8b\x5c\xf8\ +\xe9\xd9\xba\x72\x63\xa2\xaf\x23\x9e\x8b\xf8\xfe\x8c\x89\xb1\x2c\ +\x51\x3e\x2e\x36\x17\x30\x5c\x6e\x2c\xdf\x5f\x51\xcc\x77\xb8\xbf\ +\x4a\x1b\x23\xd6\xa5\xeb\x8d\xed\xde\xe5\xb5\xb1\x8a\x8f\xcb\xe5\ +\x46\x5d\xc9\x6b\x21\x78\x35\xaf\xf3\x08\xc6\x8e\xb5\x91\x4d\x4b\ +\xe2\xa2\x84\xcf\x79\x9d\xac\x0e\xfb\xde\x56\x5e\xb6\xce\x97\xcb\ +\x3e\x6d\x87\x83\x63\xa6\x83\xc1\x01\x9e\x27\x77\xbd\x56\xe4\xcf\ +\xff\xbc\xc8\x67\x3e\x16\x2f\x27\x37\x8d\x03\x39\x14\x73\xb9\xdf\ +\xcf\x27\x84\x61\x9f\x00\x2c\x6c\x83\x9e\x2a\x1f\xe7\x86\x6d\x71\ +\x5d\xb9\x82\xcb\xf3\xe9\xf2\x4e\x1d\x11\x8c\xdd\x64\xb8\xfc\x75\ +\x6a\x06\xd6\xfd\xf0\xd2\x76\x73\x9e\x9d\xbf\x5c\x8b\x93\x53\x73\ +\x38\x67\xc2\xf9\x82\x7d\x62\xb7\x73\xbc\x87\xc2\x39\xd8\xae\x3c\ +\x71\x13\x16\x9b\x9f\xf8\xc2\xb7\x45\xfe\xd9\x7f\x1e\x8f\x12\x4a\ +\xe4\x1c\xf3\xcf\x57\xdf\x25\x5c\xfd\x7f\xd0\x24\x79\x76\x82\x6f\ +\x10\x74\xbd\x03\xeb\x1c\xde\x71\x4e\xe1\x51\xed\x1c\xde\xd8\xae\ +\x5f\x33\x81\x08\x33\x11\x51\x36\x88\x74\xe7\xe3\x9a\xc1\x52\x57\ +\x13\x39\x55\xee\xb4\xd5\xf2\x26\x65\x75\xa5\x4e\x30\xad\xd5\x46\ +\x16\x96\xba\xa2\x6f\x10\xac\x73\x6c\x3b\xfc\xcf\xbd\xfa\x75\xe5\ +\x39\xf0\x83\xe5\xb2\xce\x09\x20\xf8\x60\x56\x82\xcf\x1c\x81\x79\ +\x7b\x9d\x5b\xde\x39\x72\xf6\x1d\x22\xf7\xfc\x6d\x91\x67\xff\xdb\ +\x3a\x1d\x45\x6a\x15\x26\x45\xf7\xaa\xce\x50\xe7\x06\xfe\x83\xf2\ +\x22\xf7\xf0\x9c\x85\x72\x64\x0e\x99\xd9\x43\x84\xaf\x90\xb8\xab\ +\x36\xb6\x8d\x66\x0a\x11\x38\xf7\x1b\x83\xa2\x99\x7f\xd5\xe7\x14\ +\xbe\xc3\x46\x01\x5d\xa7\x7d\xc3\xe6\x0c\xd4\xa1\xe0\x6b\x76\x18\ +\x78\x64\x2b\x89\xec\x06\xa2\xd7\x2d\x47\x23\x56\x4f\x97\x1f\xf8\ +\xee\x4e\x7d\xec\x2f\x59\x63\xc8\x39\x17\x01\xb4\x66\x84\xd1\x01\ +\x0f\xc9\x5d\x2c\x8f\xd3\x57\xbd\x5b\xe4\x9b\xbf\xd5\x8d\xc6\xc6\ +\x22\x7d\xe3\x63\x32\xe1\x8a\xed\x4b\xa5\x32\x4d\xe9\xb9\x98\x21\ +\xa4\x8a\x48\x5a\xc1\xac\x8f\xfb\x97\xad\xb0\xb6\xae\xe0\xeb\xd6\ +\x14\xb2\xeb\x45\x08\xf3\xc8\xc1\x39\xda\x6d\x4c\x10\x33\xbb\x91\ +\x2d\xba\xc9\xca\x63\x91\x37\x91\x74\x7a\x9e\xd8\xf6\x4a\x4a\x1c\ +\xc6\xbe\xe7\xdf\x30\x14\xfd\x22\xaf\x53\xd9\x45\xe2\x2e\x61\x27\ +\x06\x7d\x93\x88\x3b\x6f\x2b\x81\x66\xbf\xa3\x69\x60\x06\x36\xb1\ +\xb4\x1e\xd3\xea\x1c\xd6\xec\x01\xc6\x96\xb3\x53\x11\xb8\x63\xab\ +\x8b\xa8\xd8\xd6\xff\xe7\x56\xd9\x8e\x6b\x39\x5b\x83\x8e\xbf\x89\ +\xce\x90\xa2\x2f\x6a\xb9\xb8\xcd\xcd\x52\xe8\x5f\xf1\xea\x12\x6b\ +\x04\x57\xa3\xb7\xa5\xe8\x5b\x5e\xae\x45\xa0\x3e\x16\x6c\x84\x50\ +\xd3\x39\x65\x36\x92\xa7\x0f\x8d\xe0\x65\xde\x5d\xbe\xcb\x0e\xa4\ +\xeb\x5f\xdc\xe2\x19\x24\x10\x7c\x70\x28\xa2\xaf\x4f\xec\x1d\xb4\ +\x00\x34\x73\x3e\xc9\x17\xee\x29\x45\xc2\x7f\x52\x8a\x83\xff\xa7\ +\x14\x07\x4f\x44\xb4\xe6\x14\x8e\xcc\xea\xba\x58\x5e\xf4\xce\xbf\ +\x4f\xe4\xf4\x5f\x28\x2f\x58\x4b\xf3\xdd\x3f\x61\x09\xbc\x42\xd2\ +\x43\xf1\xa9\x54\x32\x31\xd1\x56\x98\x29\x05\xf7\x01\x1f\x04\x6e\ +\x7b\x7b\x4b\x7c\x49\x77\xf2\xfd\x69\x53\x47\xe9\xee\x1e\xd8\x1a\ +\xc3\xd2\xd4\x16\xde\xb4\xe9\x49\xdc\xff\xd7\x8b\xa6\xe2\x82\x09\ +\x04\xa0\xd6\xaa\xbd\x6c\xdf\x5c\xb2\x95\x2f\x46\x2f\x1d\xfc\x89\ +\xf9\xd6\x9f\x29\xd7\xfb\xf9\x52\x90\x7e\xb9\xbd\xed\x85\xc4\x5d\ +\xc6\x61\xb4\xaa\xef\x3b\x26\x11\xa9\x0a\x4d\x0a\x26\xd6\xf6\x31\ +\xf1\x6c\xdf\x5b\xb4\x15\x3d\xd4\x19\x7c\x97\xd8\xe1\xf8\xbc\x9e\ +\x67\xb9\x65\xe7\x04\x6e\xbb\xb2\x71\x36\xea\x67\xe6\x75\xde\x14\ +\xed\x73\x22\xe6\x64\x4e\xdd\xe4\xc5\x86\x7a\x63\xc7\x61\x9f\x18\ +\x4f\x39\xc5\xfb\x04\x62\xef\xfb\xa6\xad\xfa\x8a\x3c\x98\xd3\xb9\ +\xdb\x6e\xcc\xdd\x48\xe4\x6f\xb8\xd8\x08\xca\x81\xbd\x8e\x0d\x96\ +\x9a\x6a\x24\xb9\x4d\xbc\xae\x42\x70\x77\xd7\xd6\x19\x16\xfb\xff\ +\xbc\xfe\x4c\x25\x04\xbd\xea\x2c\x2a\xd8\x33\xef\xff\xfa\xfe\xc6\ +\xba\xad\x32\x92\x35\xc7\x9b\x46\x04\xab\x72\x73\x8b\xf5\xdf\x95\ +\x08\xb4\xc3\xc3\x8b\x4b\x76\xd8\x78\x58\x47\xf9\x76\x11\x7c\x08\ +\x3e\x98\x8f\x98\xd8\x8f\x00\x33\x73\x5c\xa7\x79\x6f\xf4\xe2\xcb\ +\xcb\x0e\xe9\xdd\x65\x87\xf3\x67\xe5\xc5\xea\x72\x37\x07\xd9\x54\ +\x47\xfa\x1d\x22\xe7\xde\x3b\x3f\xa3\x46\xd8\x26\x26\xe8\x55\x52\ +\x5a\x2d\xdb\x43\x84\x2f\x26\x02\xb3\x69\x45\xe0\x61\x44\xf8\x24\ +\x3d\x8c\xe6\x22\x1f\xe2\xb9\x4a\x35\x1d\xa2\x8a\x35\xe7\x48\xdd\ +\x2c\x1a\xc1\x57\x45\xa1\xa4\x16\x7e\x95\x29\xc4\x13\x80\x3e\xce\ +\x14\xf2\xc2\xc6\xc1\x9f\x9f\xe7\x1f\x12\xf9\x73\x1f\x2e\xc5\xe7\ +\x1f\x95\xc7\xea\xf5\x66\xdb\x3b\xf3\xd2\x24\x28\x6f\x16\x11\x73\ +\x7d\x73\xd9\x06\x3d\xc2\x30\x4f\x08\xec\xd4\x77\x42\xc1\x53\x0d\ +\xfd\xd9\x7c\x8a\xfa\x11\x57\xf1\x63\x23\x6f\xda\xf6\xf4\x42\x64\ +\x3e\xd7\x2c\x8e\xa1\xc4\xf1\x9b\x4f\x13\xe5\x0b\xbe\x97\x25\x8e\ +\xc3\xd8\xf7\xf2\x22\x7d\x51\xeb\x38\x84\x4d\x70\x3c\xa7\x44\x67\ +\xe4\xbb\x26\x22\x02\x5b\xc2\x34\xef\x46\xa6\x55\x24\x86\x43\xb7\ +\xc5\x62\xfb\x66\xab\xaa\xbc\xb2\x50\x57\x16\x29\xbc\xc8\xac\x13\ +\x7c\xae\xdc\x5c\x55\xa7\x78\xd4\x3c\xf4\xb5\xd6\xb2\x47\x6d\x21\ +\xa8\xd1\xc1\xad\xcd\x5a\x64\xfa\xf5\x88\xf5\xf7\x9c\x10\x74\x82\ +\xef\xc2\x03\xf5\x3c\x44\x40\xf0\xc1\x0c\xa3\x69\x7d\xe9\x26\xdc\ +\xe4\xeb\xaa\x76\xa4\x7d\x8c\xec\xc3\x95\xaa\x74\x17\x03\x67\xde\ +\x73\xb5\x57\x73\x69\xe6\x0e\xa5\x92\xc8\x86\x77\xcc\x7a\x0d\x58\ +\x28\x2f\x08\x67\xf2\xf9\xce\x99\xca\x56\x45\x6e\xfb\x89\xf2\x82\ +\xf2\xad\xb2\x33\xfd\x9f\xf7\x16\x55\xd4\xce\x62\x70\xba\x14\x7b\ +\xef\x2a\xd7\xf3\x2f\x1d\x4c\xf4\x2b\xdc\x4f\xa9\xd1\xaf\x3c\xb1\ +\x3a\x85\xe9\x76\x7a\x45\xe2\xb3\xf9\x11\x39\x2e\xb3\xa2\xdb\x57\ +\xc7\xd6\xb9\x6f\x7d\xc3\xed\xae\x04\xa0\xa6\x25\xb1\x77\x3a\x6e\ +\x4e\xd9\x8e\xad\x23\xbc\x61\xe7\x03\xaa\x33\xf8\x6a\xde\x8e\x00\ +\x1e\xc6\x54\x87\xc5\xf2\x38\x7b\xb0\xbc\x31\xf9\xce\x17\x44\xfe\ +\xf8\xd7\xdb\xdb\x12\x6e\x5b\x9e\xd0\xe7\xfe\xc7\x92\x8e\xe4\x9e\ +\xf7\x4c\x9f\xb3\x55\x26\xff\xbe\xff\x9e\xf1\x22\x80\xa7\x6c\xa5\ +\x10\xbd\x7e\xdc\xb3\x56\xd7\xf5\x9d\x15\x2a\x72\xb6\xb6\x6b\xc3\ +\x82\x4b\xd2\x9d\x6a\x93\x70\xdd\x4d\x5f\x3b\x78\x1f\x18\xf4\xb4\ +\x5f\xb8\xfc\xa2\xc7\x39\x6d\x22\x8d\xd9\x9a\x06\xd9\xe3\x10\x1e\ +\x47\x1f\xbd\xf7\x07\xc1\xf5\xb6\xe5\x14\x96\x6e\x39\xb9\xaa\xbd\ +\xb6\xdb\x25\xe5\xfc\x48\xe0\x78\xb9\xa6\xde\x47\x66\xd1\x8b\x78\ +\x66\x9e\x41\x27\xf7\xe6\x69\x8e\x9a\xb9\x83\xfa\x28\x42\x21\xe8\ +\xcd\x21\x74\xe7\xa0\xab\x3b\xec\x84\xff\xb3\xcf\x95\xaf\x6f\xd3\ +\x47\x23\xf8\x60\x66\x5c\x5e\x17\xf9\xc3\x51\xdd\xd9\xed\xd8\xe8\ +\x47\x75\xf2\x59\x71\xb7\x9d\x88\x7a\xcc\x93\xfb\xcb\x1f\x7f\xe0\ +\x00\x7e\x74\xe9\x95\xe5\xe3\xc3\x22\x4f\x7f\xb6\xdc\xc6\x6f\xd7\ +\x6e\xcd\x6a\xae\x8b\x7d\x8e\x14\xa4\x18\x47\xd0\x4e\xbd\x4e\xe4\ +\xde\x7f\x58\x0b\xc7\x83\x88\xee\xf9\x43\xba\xc9\x28\x9e\x49\x47\ +\x6b\xf3\xc4\x30\xad\x91\x29\xa3\x79\xe6\x60\x03\x7e\x03\x7f\x7b\ +\x7b\x12\xe3\x76\xa2\x30\xd2\x9d\x77\x56\xf4\x44\x2f\x5d\x99\xb2\ +\x65\x57\x29\xa2\x5c\x80\x2b\x81\xac\xe7\x83\x0a\xbf\x17\x35\xba\ +\x97\xdb\x5c\x72\x87\xc0\xca\x79\x91\xd7\xfd\x54\xd9\x01\x7e\x59\ +\xe4\xda\xb7\xd2\x79\xf6\xb2\x69\x12\x06\x4b\xe0\xd4\x8d\x98\x41\ +\xaa\x97\x03\xc3\x52\x9e\x1a\x06\xf5\xbe\xd3\x72\x78\xef\x61\xdd\ +\x8c\x75\xed\x9a\x19\xb6\xaf\x8a\xbd\x3f\x79\xb1\x6c\xb3\x2b\xb5\ +\x0b\x58\x23\x88\xfa\x58\xb2\x91\x25\x13\x39\x2e\xfc\x76\x18\x05\ +\xeb\x97\x3c\x97\x12\xd1\xb7\x49\x51\xbf\xd8\x71\x9c\x07\xc7\xb1\ +\xf4\x39\x84\x83\xfd\x93\x2a\x21\x97\x8c\x2a\x8e\x22\xfb\x78\xd4\ +\x16\xbe\xbb\x9b\x8d\xf1\x63\xfc\x3b\x0b\x11\x33\xc6\x82\x77\x6e\ +\xb8\x44\xd3\x81\x69\x63\x64\xa3\x83\x9b\xdb\xcd\xa3\x8a\x12\x46\ +\xee\xd6\x4c\xd6\xec\x23\x21\x59\x35\x82\x0f\x66\xc7\x77\x2f\x8b\ +\xfc\xfe\xa8\x5f\x6c\x1c\xf8\x1c\xbe\x03\xfc\xad\x3b\x1e\x2d\x2f\ +\x3a\xef\x16\xf9\xed\x7f\xd6\x5d\x87\xa1\x15\x7e\x43\x2b\x02\xf5\ +\x06\x58\x9f\x57\xcb\x8b\xe0\xdb\xff\x5e\xf9\xff\x0b\x07\x1b\xf1\ +\x32\x45\xbf\x50\x4b\x99\x35\x0a\xd3\x8d\x96\xa6\xcc\x1d\x79\x44\ +\x34\xba\x09\xe7\xa9\xce\x64\x9e\x11\xbe\xd8\x9c\xca\xa2\x98\x20\ +\x46\x53\xf3\xd1\x24\x9d\xbc\xd8\x7d\xcf\x5f\x54\x65\x46\xd0\x47\ +\xd9\x20\xe5\xbd\x81\xdc\x76\xc7\xe1\x9d\xa7\xf7\x7c\xaf\xc8\xc3\ +\x1f\x12\xf9\xda\x2f\x8b\xac\xbf\x90\xd8\xbe\x7d\x18\x2e\x3a\x6e\ +\xeb\xc4\xb2\xfa\x4c\x22\xa6\xa7\x7d\xcd\x94\xeb\x36\x97\x79\xbb\ +\x85\x35\xf2\xd8\x12\x71\x4f\x6f\xd4\xc2\x6e\xd5\x96\x86\xd3\x24\ +\xd1\x6a\x64\x70\x25\xfa\x7c\x51\x3a\x48\x88\xad\x94\xb0\x8d\x6d\ +\x57\x78\x33\x32\x90\xfe\x72\x7e\x7d\xe5\xfe\xfa\xca\x1c\xfa\xfb\ +\xa1\x4f\x04\x16\x66\xf2\xf5\x22\xbc\x21\x2a\xb6\x03\x51\x58\xfe\ +\xdf\x04\x91\x40\x35\x57\x84\xd1\xc0\x6c\xb7\x6d\xc4\xa8\x94\x41\ +\x56\xd7\x65\x3e\xb3\xd4\xbe\xc1\x50\x43\x8f\x1f\x11\xd4\xbf\xf5\ +\x37\x97\x17\xe6\x5b\xbe\x11\x10\x7c\x27\x8e\xbd\x0a\x3a\xe3\x3d\ +\xfc\xf3\xd9\x04\xcb\x32\xb1\xe5\x4f\x59\x9e\x6a\x71\xc6\x77\xfb\ +\x7d\x68\x8a\x81\x37\x7f\xb4\x14\xbd\xbf\x52\x76\xa4\xc1\xa4\x7c\ +\x37\xd4\x17\xde\x3a\x5f\x7c\x83\xc8\xf9\x77\x1d\xec\x7e\x6a\xb5\ +\x75\x11\x17\x75\xa6\x47\x3c\x75\xfa\xa0\x44\x04\x33\x4b\x7c\xb6\ +\xb3\x3e\xc5\xfc\xb7\x37\x66\xda\x88\xad\x4f\x56\xa4\x3b\xb5\xce\ +\xf6\x47\xa2\x2a\x26\x21\x0e\xc3\x6d\x1c\x1c\x62\xe7\xa3\xe7\xc3\ +\x6b\x7e\x5c\xe4\xc5\xdf\x13\xb9\xf4\x7f\xc6\xb7\xaf\xd3\x36\xa9\ +\x6d\x9b\x72\x9b\x5b\xe5\xc7\x24\x3e\xaf\x2f\x7c\xdd\x17\x1d\x9d\ +\x9b\x8a\x9e\x75\x33\x07\x74\x6d\xd3\xfd\x7d\x7d\xa7\x7e\x28\xae\ +\x2c\xdc\xaa\x4d\x0a\x3d\x1c\x34\x49\xbb\x8d\x89\x47\xd4\x8a\x9e\ +\x6d\x36\x53\x98\x30\x24\x11\xa9\x33\x13\x6e\x56\xfa\x22\xef\xa9\ +\x32\x75\xf9\x5e\xe6\xff\x49\xa2\x8a\x4e\xf8\x99\xc8\xbc\xba\x58\ +\x86\x2b\xe7\x10\x9e\xf4\x9a\xce\xd9\x5b\x5c\xb0\x77\xd4\x4e\x08\ +\xea\x8d\xd6\xe9\x7a\x7f\x00\x82\x0f\x66\xb5\xb7\x06\xb5\xb3\x71\ +\x28\x8d\x6b\x6b\x60\xcf\xbd\x81\xbd\xe3\x73\xc3\x6b\xee\xbd\x2c\ +\x78\x18\xfb\xb9\x2c\x10\x83\xbe\x50\x31\x91\x6b\x47\xea\x5a\x76\ +\x61\xf5\x60\x4f\xf4\xbb\xdf\x2a\xf2\xce\x7f\x20\xf2\x5b\xff\x59\ +\x3d\x99\xbc\x2f\xd2\x78\xfa\x4e\x91\xf7\xfd\x37\xb5\xab\xed\xa0\ +\x45\x79\x96\x0e\x64\x24\xa3\x7b\x26\x31\x14\x1b\x8b\xa8\xe4\x91\ +\xcf\xe7\x09\xc1\x68\xe6\x5c\x5b\xd6\x2f\x8b\xd5\x37\xf4\x15\x8b\ +\x30\x99\x3e\xc1\xda\x13\x55\x29\x24\xed\xe0\x3c\x08\x91\x3b\x89\ +\x53\xf7\x8a\x3c\xf2\xf3\x22\xcf\xfc\x8e\xc8\xd6\xe5\xee\xf6\xf5\ +\x3a\x77\x13\xdb\xd6\xb7\xcd\x79\xca\xa1\x1b\xb4\xe3\x7e\x1c\xc2\ +\x79\x20\x56\x66\x7d\x2c\x69\x62\xe0\x7b\x4e\x95\x42\x64\xa1\x76\ +\x03\xbb\xd4\x30\xe1\xef\xa8\x1b\x7b\xdb\x55\x69\x71\xd1\xfc\x41\ +\x9d\x8a\x67\x68\x9f\xf5\xff\x0b\x59\x23\xf8\xc3\x63\x2a\xdc\xe6\ +\x70\xdb\xfa\x5c\xe6\x45\xcf\xf1\x3a\xc9\xa1\x1e\x7b\x3f\x8c\xf2\ +\x65\x3d\xdf\x8f\x45\xf9\x62\xe7\x9b\x14\x93\x73\x06\xaa\x08\x0c\ +\x3f\xb3\x3b\x8a\x5c\x4f\x12\x43\xc2\x9d\xe4\xcd\xd9\xfc\xb3\x35\ +\x00\x82\xef\xc4\x71\xf1\x7c\x1d\x6a\xcf\xfc\xa8\x9d\x69\x6a\x99\ +\x9a\x40\xb4\x1d\x44\x3d\x51\x9d\xcc\x7d\xd0\xa1\xfc\x47\x7e\xb6\ +\xec\x24\xbe\x20\x72\xf5\x37\x6d\x95\x06\x69\xe6\x32\xee\xd8\xf4\ +\x1e\x9a\xe2\xe0\x4d\x65\x87\x7b\xc7\x1b\x0f\x76\xdd\x74\x88\xe3\ +\xc5\xad\x3a\x95\xc5\x8a\xd4\x9d\x92\x9b\x14\x1e\x8b\x8e\xb4\x22\ +\x7e\x45\x44\xec\x25\x2e\xde\x83\xc8\xe7\x63\x9f\x2d\x52\xd1\x86\ +\x59\x0a\x3e\x37\xa4\x1b\x31\x26\x0c\xc2\x08\x46\x9f\xe9\xa8\xe7\ +\xfd\xb0\x8d\x52\xa5\xca\x52\xc2\xf7\x30\xb8\xe3\x11\x91\xef\x2d\ +\x6f\x4e\x3e\xff\x8b\xdd\xed\x8b\xd6\xcd\x4d\xbd\x37\xe1\x39\x7a\ +\xf3\x30\x45\x1b\xf7\xfd\x4e\x11\x0a\x13\xef\x3b\xb3\x6e\x5b\x35\ +\x85\xdc\xbb\x5a\xe7\x9d\x73\x39\x18\xd5\x0d\xac\xe2\xef\xfa\xae\ +\xc8\xba\x35\x18\x84\x95\x34\x76\x6d\x5e\xba\x1b\x4e\x8c\x58\xa1\ +\xa7\x8f\x95\x61\x13\x0d\x5c\xf4\xae\x51\xa6\x67\xdb\xa2\xdb\x2e\ +\xe9\xf7\xfc\x85\xc5\x0e\xdd\x94\x19\x26\xb5\x9f\x3a\xbf\xdd\x63\ +\x1e\xe9\xfb\xcd\xce\x75\x20\x30\x92\x44\xbf\x17\xc9\xb7\x93\xe7\ +\x5d\xb1\xa8\x86\x91\xca\xb5\x9b\x75\x5f\x17\x44\x1f\x82\x0f\x66\ +\x87\x5a\xef\xcf\xcb\x84\xab\xca\x3e\xa2\x51\x37\x1b\xd1\x3a\xf0\ +\xa3\xb6\x54\x52\xaf\xf8\xeb\x22\xcf\x7e\xb1\xbc\xd0\x5c\x69\x9c\ +\xc6\x95\x0b\xd9\xd4\x17\xd6\xc5\xb7\x88\x3c\xf0\xc3\x07\xbf\x6e\ +\x9a\xc0\xf6\x4f\xca\x1e\xe8\xe9\xdd\xfa\xec\xd2\xba\xa7\xa7\xb4\ +\x3c\x98\xa9\xff\x3e\x2d\xb5\xe9\xc0\xf4\x44\xf0\xfa\x86\x71\x93\ +\xfb\xdb\x44\xa2\x0f\xc5\xc1\xec\x9f\xf1\x71\x14\xac\x5b\x16\x38\ +\x19\xc3\x21\xc8\xac\x67\xdb\xc2\xf7\x63\xa9\x33\x4c\xea\x3c\x30\ +\x47\x63\xfe\xb8\x8a\x89\x8b\xef\x13\xf9\xc6\xbf\x12\x79\xfe\x77\ +\x22\x39\xe0\x52\xf3\xe9\x82\xf5\xcf\x03\xf5\x6f\x22\xdf\xc9\x7a\ +\xa2\x49\x59\x4f\x84\x2b\xb5\x0e\xa9\x48\x6c\x9f\xd9\xe8\xa6\x6e\ +\x1a\x6c\x2d\xe5\xa1\xdd\x40\x4d\x0b\xa3\x8e\xe0\xf3\x0b\x56\x00\ +\xaa\x23\xdb\x56\xa0\xd8\xcc\xe3\xe2\x69\xe4\x39\xb5\xf5\x73\x03\ +\x1b\x05\x54\x21\xa8\xe2\x4f\xcb\xc3\xa9\x10\x1c\x9a\xf6\x7c\x45\ +\x13\xbb\x49\x8a\xb5\x89\xa4\xcb\xb7\x8d\x85\x54\x4f\x84\x3a\x9b\ +\x62\x48\xd6\x4c\x48\x6b\x64\x26\x2c\x23\x55\xbd\xc7\x4c\x98\xd7\ +\x1b\x8b\x6e\xa6\xee\x1c\xf2\x48\x7a\x01\x35\x91\x90\x87\x0f\xc1\ +\x07\x07\x40\x76\x02\xb7\x79\x4d\xc5\xdc\x7f\x25\x72\xe5\x9f\x94\ +\x9d\xc1\xff\xe7\xdd\x69\x97\x1d\xc4\xa9\x1f\x11\x39\xf7\x09\x91\ +\xa5\xd7\x1f\xfc\x7a\xe5\xb6\x76\xe9\x95\xa2\xb9\x20\x87\x54\xf5\ +\x61\x35\x02\x94\xd5\x43\xf4\xe7\xad\x28\x1c\x4c\x88\x50\xe5\x26\ +\x9d\x9a\xc5\x4c\x12\x44\x73\x3e\xfe\xc2\x9b\x87\x70\x9d\x26\x75\ +\x76\x79\x4a\xe4\x24\xbe\x9f\xf7\x88\x43\x53\x1c\x9d\x21\xa6\xc5\ +\x33\xe5\xcd\xc9\xdf\x15\xf9\xd2\xdf\x10\x59\xdd\x12\xb9\x7d\x58\ +\x0b\x9b\x5e\xc1\xe6\xb5\x8d\xbf\x6d\x7d\x2e\xe6\x3e\xa7\x6f\x9e\ +\x98\xc3\xd6\xb7\x0e\x7d\xeb\x36\x8f\xb6\x1d\xef\x33\x2f\x82\x3d\ +\x30\x4d\x5b\xe9\x0b\xb7\x5b\xb3\x81\x0a\xc1\x1b\xe5\x39\x76\x75\ +\xa7\x11\x81\x61\x19\xb5\x71\x04\x50\xea\x3b\x41\x9d\x07\xf8\xbc\ +\x34\x79\x06\x9d\x13\x58\x47\x28\x06\x83\xf6\x7a\x14\x3d\x02\x6c\ +\x14\xb4\x65\x6c\xde\x61\xf2\x6e\x78\xc2\x90\xee\x34\xc7\x7d\x4c\ +\xb0\x9b\x09\xcb\x88\x89\xc0\x2c\x71\x7d\x91\x69\x03\x0a\xe1\x4d\ +\xea\x48\x00\xc1\x07\xb3\x8e\xa4\xec\xb5\x76\xed\x34\x17\x67\x73\ +\x93\xeb\x74\x58\x87\xee\xda\x4f\x96\x8f\xbf\x66\x13\x32\x3f\x5d\ +\xae\x8b\x66\x7f\xbf\xbf\xbc\x80\xdf\x7d\x78\x2b\xa6\xfb\xc7\x1f\ +\xc6\x8d\xad\xc6\x96\x1d\x76\x7e\xc1\xbb\x48\x6a\x9f\xa3\xa2\xef\ +\xb4\x7d\x68\x9d\x52\x8d\x0a\x2e\xda\xa8\x87\x2e\x73\x58\xb4\xd3\ +\xbd\x38\x41\x30\x08\xae\xc3\xd1\x2a\x04\x73\x66\x10\x98\x04\x3a\ +\xeb\x14\xe6\xe9\x0b\x3a\x2a\x93\xf8\x7e\x6a\x7b\xb2\x84\x43\xd2\ +\x45\x47\x8f\xd2\xfc\xf1\x07\xde\x2a\xf2\xaa\x0f\x88\x7c\xfe\x57\ +\x44\xbe\xbe\x5e\x0a\x3f\x53\x0b\x3f\xad\x32\xb2\x92\xd9\x7d\x2b\ +\x8d\xf1\xc5\x6f\x9b\x3e\x63\x40\xca\xcd\xdb\xb9\xd1\x48\xcc\x43\ +\xf3\xdb\xd1\x6f\xe3\x49\xed\x6b\xe6\x71\x5d\x73\xb5\x74\x7b\x4c\ +\x3f\x4e\xa4\xe8\xe7\xb4\x32\xc8\x59\xdb\x7d\xe9\xf4\x09\x1d\xf6\ +\x5d\xdf\xad\x85\xa0\x0e\xf3\xee\x48\x53\x1e\x2e\x44\xe7\xfe\xea\ +\xb4\x0b\x7d\xa8\x60\x72\x2e\x60\x75\x04\xab\x09\x6d\xe8\x0d\x0d\ +\x87\xdb\xda\xe7\x08\xee\x08\xb4\x3d\xb8\x82\xfb\x04\x9e\xc8\x94\ +\xfb\x59\xd2\x49\xdb\x8b\x29\x9c\xfb\xd9\x94\xd5\x7d\xa2\x91\x49\ +\xba\x67\x04\x1f\xcc\xfe\xc2\x68\x7a\x86\xe9\xcc\x3e\x97\x79\xb3\ +\xeb\x74\xd8\x87\xf0\xc2\x2b\xeb\xc7\x51\x60\xb9\x5c\x9f\x87\x4e\ +\xd5\x43\x52\x2a\xea\xb4\x5a\x84\xab\x10\xe1\xe6\x1a\xc6\x46\x7d\ +\x34\x22\x76\xad\xa8\x1f\xbe\x78\xd4\xe1\xdf\x65\x2b\xfe\xf4\xef\ +\x55\xfb\x9a\x0e\x0f\xab\x70\x58\x8a\xcd\xfb\xeb\x29\xec\x3e\x17\ +\x91\x5b\xf4\xe7\x1d\xec\xa4\x5b\x09\x05\x9c\x09\x44\x48\x4f\xe7\ +\x14\x0e\x83\xfb\x46\x10\x27\x46\x8a\xe2\x68\x75\x40\xab\xe7\x44\ +\xde\xfe\x11\x91\x97\x9e\x15\xf9\xca\xbf\xab\xc5\xc9\x8d\x6d\x5b\ +\x5d\xc4\x56\xb5\xd0\x7d\xba\x36\xb0\x7f\x67\x6d\xc1\x3b\x8d\x99\ +\xa3\xe3\xa4\x35\xdd\xf6\xec\x98\x66\x24\xee\x1a\x9e\x64\x28\x99\ +\x87\x71\xc3\x37\xfe\xc8\x14\x26\x15\x7f\x0a\x80\x46\xcc\xd5\xf8\ +\x71\x6e\xd8\x44\xf6\xd6\x6d\x49\x38\x9d\x0b\xa8\xf9\x49\x9d\x19\ +\x64\x54\x74\xd3\x06\x6d\xec\xd6\x8f\xcb\x56\x00\x2e\x66\x8d\x19\ +\x64\xc1\x33\x83\x0c\x4d\xbf\xc9\x28\x56\xf5\x24\x9c\x76\x10\x7e\ +\xdf\xf4\x94\x8b\x8b\x09\xae\x49\x46\xa7\x94\x29\x4c\xa6\x88\xf0\ +\x15\x53\x08\xc5\xf1\x71\x13\xcb\xa8\xce\x90\x2e\x82\x0f\x66\x1f\ +\xe1\xdb\xaf\xf8\x9a\x4b\x27\xc8\x49\xde\x42\x3b\x8b\x97\x2d\xd5\ +\xc3\xb4\x2a\xee\x36\x4c\x23\xf8\x2a\x01\x28\x75\x59\x30\xfd\xfb\ +\x86\x15\x83\xdb\x89\x26\xcd\xed\xf7\xd6\xcb\xe7\x17\xed\x8b\x8b\ +\x56\xf0\xad\xda\xc7\x29\xff\xff\xb6\xf3\x0b\xab\x5e\xcc\xdb\x54\ +\x93\x05\xc3\x7c\xbe\xd8\x90\x88\x40\xcb\x12\x1d\x77\xea\xfb\x59\ +\x4f\x27\x1a\x96\x67\x73\xe2\xf1\xa8\xb9\x06\xef\x7f\xbd\xc8\x5b\ +\x7f\x4c\xe4\x8f\xbf\x54\x8a\x7a\x2f\x37\xdf\xa6\x2d\x5b\xa6\xac\ +\x94\xe2\x64\xb5\x3c\x7e\xd6\x6c\xe4\xcf\x09\xc1\x41\xaa\x6d\xfa\ +\xda\x3c\xf1\x5e\x31\xc5\xbc\xbf\xbe\x12\x78\x22\xf3\xcb\xc3\x67\ +\x82\x75\xef\x1b\x9a\x0e\x2f\x68\xe1\xf1\x57\x45\xff\x34\xc2\xb7\ +\xd0\x94\x84\xdb\xb2\x35\x81\xb7\xbd\xe7\xce\x8d\x89\xfb\xac\x8d\ +\xbe\xeb\xfc\x3f\xdf\xfd\xbb\x6c\xa3\x80\x8b\x2e\x17\x60\x11\x08\ +\xe1\x9e\x39\x7c\x79\xb8\xce\xd2\x9f\xbf\x2f\x26\x00\xb3\x09\x02\ +\x2f\x9a\xbe\x25\x76\x9d\x9e\x62\xf8\x36\x95\x39\x20\x0b\x3e\x4b\ +\xce\x65\x04\x1f\xcc\x51\xf4\x89\xcc\xef\xe2\xbb\x9f\x5c\x7f\xd0\ +\xbd\x28\xbb\xb4\x38\xcb\x5e\x43\xb9\xe1\xc9\x6d\x2d\x76\x6e\x6a\ +\x47\xf1\xae\x8d\x02\xaa\xd3\x50\xd3\x4d\x5c\x2d\xff\x7e\xc9\x0a\ +\xc1\xd8\xb5\x58\xc5\xe1\xb6\xfd\x8c\x6b\x7f\x3d\x8b\x75\xae\xd3\ +\xa2\x15\x84\x5a\x8a\xec\x5c\x56\x27\x22\x56\x21\x38\x38\x40\x97\ +\xae\x6f\xde\xc8\x83\x63\x24\xf6\x7e\x74\xc2\x7c\xcf\xf7\x07\x81\ +\xc3\x74\x10\x44\x11\x4d\x71\x34\x8f\x4b\xad\x82\xf0\xc8\x0f\x8b\ +\x3c\xf3\x79\x91\xdf\xfd\x75\x91\xef\x5e\x6d\x57\x88\x50\xb6\xac\ +\xf8\x7b\x51\x9a\xf9\x6b\x3a\xa4\x7f\x5e\x87\x2f\xb3\x3a\x02\xd8\ +\x9a\xe7\xe9\x47\x66\x26\xb8\x7f\x63\x55\x25\x4c\x70\x63\x10\x7b\ +\x2f\x0f\x6e\x32\x8b\x79\xcc\x0d\xb5\x11\xe2\xe8\x3c\xd4\x9e\xf5\ +\x48\x6d\x73\x16\xb8\x67\x87\x36\x81\xb3\x52\x95\x9b\xb4\x51\x40\ +\x3d\xff\x36\xca\x9d\x70\x6d\xd7\xd6\x0a\x8e\x84\xc2\x47\xde\x70\ +\xb1\x13\x80\xce\x0c\xb2\x68\x87\x81\x4f\xd9\xbc\x80\xa1\xfd\x35\ +\xbc\x39\x0f\x1d\xc1\x1d\xef\x83\x09\xa6\x36\x44\x22\x70\xa1\x53\ +\x3d\xbc\x5e\xa7\xca\x19\x86\xe7\x99\x29\xfa\x3f\x33\xbe\x51\x88\ +\x89\xd6\x48\x27\x60\xe8\x0b\x10\x7c\x30\x63\x31\xb6\xc7\x09\xd3\ +\x19\x4d\x76\x68\x82\x3c\x59\x36\xcd\x89\xc0\x60\x8e\x55\x11\x24\ +\x8b\x55\x51\xa7\x81\xa0\xe7\xca\x2f\x5c\x29\x9f\x9f\x2d\xba\x09\ +\x53\x5d\x47\xe7\x92\x4e\xaf\xdb\xff\x3f\x53\x34\x57\x66\x15\x0c\ +\xb7\x97\x8f\x87\xe7\x58\xef\x78\x9c\xc8\x3b\x88\xc4\xf5\xb9\x70\ +\x25\xe2\xf6\xf4\x87\xab\x26\x7d\xbf\x63\x3e\xf0\xa3\x88\xe6\x68\ +\x76\x3e\x4b\xa7\x45\xde\xf3\x29\x91\x33\xcf\x8a\x7c\xfb\x73\xb5\ +\xb8\x7f\xba\x14\x1c\xcf\x8d\x6c\xd2\x70\xef\xb8\x71\xa2\x64\x43\ +\x6f\x02\x6c\x35\x03\xdd\x97\xe7\xca\x9d\x78\x4f\x79\xd9\xbe\x6d\ +\xe0\xb5\x41\xc2\xe1\x5b\xf4\x44\xcb\x5a\x26\x8f\x29\x8d\x1c\xee\ +\xbd\x6c\x0e\x37\x99\xba\x4c\xbf\xae\x6c\x21\xfd\x2e\xe4\x3e\x57\ +\x76\xdf\xb6\xb9\xf9\xb0\x4b\xf6\x9c\x3b\x53\xbe\x70\xb7\xcd\xd3\ +\xe9\xd2\xc0\xbc\x64\xcd\x20\xa3\x88\x98\x72\x4e\x60\xbd\xf1\xba\ +\x61\x87\x81\xab\xe5\x66\x75\x0a\x98\x33\x0b\xf5\x43\x8d\x20\x26\ +\x22\x6c\x4d\x28\xa8\x7c\x71\xb9\x0f\xc3\x46\x21\x93\x23\x75\x7d\ +\x86\xa7\xe4\x6f\x49\xbc\x4a\x4e\xec\xba\x56\x14\xe4\xe1\x43\xf0\ +\xc1\xdc\x23\x7c\xe6\x66\x23\x6f\x37\x31\xd7\xc9\x17\x36\xdc\xd9\ +\xc5\x23\x5e\xad\x68\x84\x8b\xf4\x44\xae\xc9\x31\x53\x82\xe6\xf0\ +\xbb\xaf\xfc\xde\x7d\xf6\x0b\xda\xc9\xe8\x50\xb0\xba\x7f\xaf\x95\ +\x7f\x5f\xd6\x2a\x04\x9e\xd8\xdb\x96\x78\x06\x7d\x7d\xef\xbb\x3a\ +\xb4\xb5\xdb\x24\xc7\x9f\xc7\x31\x19\x8a\xb3\x30\x12\x17\x0e\x39\ +\x16\x91\x0a\x12\xd9\x14\xdf\xf7\x3b\x23\x13\xf9\xae\x5b\x76\x96\ +\x1f\xcd\x63\x63\xe9\xbc\xc8\xf7\xfc\x62\xb9\xff\xbe\x5c\x0a\xb8\ +\x1b\xb5\x53\x7b\x34\xb4\x75\x80\xcb\x75\xbe\x3c\xaa\x45\xde\x8e\ +\x8d\x40\xf9\x02\x48\x05\xe0\x73\xe5\x7e\x7c\x76\xb7\x8e\xfe\x69\ +\xe4\x4f\x85\xdf\xaa\x1d\x76\x1c\x06\xc3\xf9\xa9\xf2\x5f\x45\x90\ +\xde\xa5\x48\x08\xed\xd0\xc8\x51\x98\xf9\x9d\xef\xe1\xfc\xe4\xa9\ +\xcb\xc0\x49\x7c\x9e\x9f\x49\x1c\x67\xd2\x53\xcd\x42\x87\xd1\x57\ +\x16\x45\xee\x5c\x6c\xdc\xf6\x1a\xf9\x53\x61\xb7\x69\x93\x41\xef\ +\x5a\x23\x48\x27\x71\xb1\xa6\x8d\xc9\x6b\xb1\xe8\x3b\x81\x97\xad\ +\x21\xc4\xa5\x87\x19\x04\x29\x61\xfc\xf5\x1e\x84\x91\xb4\x48\xd2\ +\xf2\x50\xd8\xf6\x26\x2e\x97\xc4\xb0\xf0\x84\xcf\xc4\x8e\x1d\x91\ +\xb8\x41\xc4\xed\x0b\x82\x0b\x08\x3e\x98\xa3\xe0\x33\x13\x22\x81\ +\x7b\x11\x8e\x37\x2b\x3c\x21\xb8\xa0\x4a\xdc\x81\x5a\x24\x1a\xd0\ +\xc4\x6f\xcc\x5b\xdf\xd3\x33\x55\x73\xf8\xad\x79\x57\x56\x1d\x0a\ +\xd6\xe8\x9f\x0e\xef\x5e\xb6\x42\xd0\x0d\xf9\x3a\x01\x78\x10\xf3\ +\x6b\xfc\x39\x7c\xb1\xe8\x9a\x5f\x4a\xca\x9f\x67\x17\xce\xf1\x2b\ +\x12\x6d\xe3\x47\x74\x7c\x17\x6b\x91\x6a\x5f\x73\xb4\x8f\xcf\xdb\ +\xdf\x2c\x72\xf7\x0f\x8a\x7c\xe7\xdf\xd5\x2b\xad\x62\x40\x87\xe1\ +\x75\xd8\xf6\xe5\xe5\x8e\xbe\x52\xf6\xf6\x57\x35\xb2\x3b\xaa\x87\ +\xf6\xb7\xac\xd0\xf0\x87\x5e\x55\x74\x3c\x6b\xc5\x9f\x9a\x3c\x54\ +\xf8\x9d\x19\xd4\xce\xdf\x05\xd3\x18\x0c\x4c\x11\x29\xe7\x57\x04\ +\xed\x1f\x71\xc2\x86\x9d\xbe\x3f\xdf\x6c\x1e\x2e\xdd\xd8\x90\xa0\ +\x7f\x4c\x65\x09\x31\x54\x24\x8c\x11\x45\xe2\x42\x15\x0a\xd8\x54\ +\x2e\x47\x15\x45\x6b\x43\x9b\x3e\x69\xb1\x99\xf7\x77\xcd\xa6\x81\ +\xd9\x2a\x1a\x17\x70\x9f\x13\xb8\xba\xc9\xb3\x15\x41\x4e\xd9\xe1\ +\x5f\x35\x82\x64\x4e\xa0\x9b\xe9\xa2\x72\x31\x23\x8d\x99\x90\x6f\ +\x6f\x5a\x01\x98\xda\xa1\x93\x96\x15\x9e\x9b\x80\xe0\x83\x79\x5c\ +\x18\x8b\x9b\x17\x62\xb3\x9a\xa7\x87\xf0\x8b\x50\x44\x86\x23\x25\ +\x9d\x6c\x39\x14\x82\x7d\x25\x39\xfd\x0e\x4b\x27\xf4\xdf\x53\x3e\ +\xdf\x63\xbf\xb0\x63\x45\xa0\x46\xfe\xae\xdb\xe7\x0d\x2b\x04\xe7\ +\x59\xef\x78\x9c\x52\x43\xba\xae\xcf\x58\x7f\xe6\xcf\xcb\x0a\x3f\ +\x93\x5a\x46\xd1\xd3\x69\x4b\xe4\xfb\xe6\x88\x9f\xc8\x0f\xff\x5c\ +\x29\x1a\x9e\x2b\x45\xc1\xef\xb4\x3b\x6f\x7d\xba\x3d\xab\x1f\xf9\ +\xb0\xde\x7f\x2a\xfe\x6e\xe4\xf5\x3e\xd5\xe7\x8d\x20\xc2\xa4\x73\ +\xff\xbe\xab\x8f\x9d\x3a\xda\xa7\x91\x2a\x15\x7e\xab\xf6\x79\x29\ +\x4b\xbb\x9a\xc3\xf6\x76\xc3\x84\xc6\x7b\x2f\xdc\x0f\xf3\x8a\xf0\ +\x65\x11\x11\x17\x73\x20\x0f\xa6\x70\x13\x9b\xc4\xb6\xb5\xc4\x6c\ +\x8f\x53\x3c\x3c\x3f\x97\x6c\x3b\xaa\x19\x44\x8f\xdb\x6d\xe7\x02\ +\xb6\x42\x70\xd3\xfb\x7f\x58\xf3\x39\xf7\x9c\xc0\xcf\xdb\xe8\x5f\ +\xc7\x09\x3c\xa8\x5f\x1b\x98\x44\xc4\x2d\xe6\xbc\x0f\xe7\x03\x4e\ +\x21\x00\x4d\x8f\xe3\xdd\x6f\x9b\xd0\x18\x62\x12\x11\x3e\x63\xe8\ +\x07\x10\x7c\x30\xbf\xbe\xa2\xb8\xb9\x09\xb2\x33\xaf\x31\xca\xdd\ +\x5d\x52\x94\xc7\xca\xa3\xb5\x26\xda\xc7\x44\x73\xd1\x9e\x18\xdd\ +\x29\xbd\x14\x14\x60\xf7\x3b\x6e\x67\xda\x38\x63\x3f\xa7\xcb\xd9\ +\xc9\xea\x68\xc4\x70\xce\xa7\xba\x3b\xae\xf2\xc4\xfa\x75\xb6\xc5\ +\xb4\xcb\xc3\xf9\x93\xc6\xf3\x9e\x22\xf3\x31\x03\x48\x68\xee\xc8\ +\xcd\xd1\xef\x80\x6e\xfb\x1e\x91\xd7\x7e\x5c\xe4\xcb\x9f\x28\xf7\ +\xcf\x0b\xf1\x76\x30\xb6\x42\x8b\x46\x85\x8a\x41\xbd\x1f\xab\xb9\ +\x9a\x3a\xbf\x33\xaf\xf3\x38\x6e\x04\x51\xb7\x0d\x6b\x32\x50\xe3\ +\xc7\xd0\x9a\x3e\xd4\xec\x71\x5e\xa3\x80\x2e\xe9\x73\xe4\xb7\xfc\ +\xf6\x0c\xcb\x00\x86\x06\x89\x79\x54\x70\x31\xbe\xf1\xa7\xcf\x4c\ +\x92\x38\x66\xdc\xfb\x45\xec\xbd\x44\x59\xbe\x3e\x03\x4b\x78\x52\ +\xfb\xef\xeb\x71\xbb\x62\xf3\xf7\xa9\x28\x77\xc3\xed\x1a\xe9\xd3\ +\xc8\x9e\x0e\x05\x6b\x14\xf0\xa5\x51\xfd\xff\x70\xfa\x86\x0e\xff\ +\xee\x5a\x23\x88\xb1\x51\x3e\x67\xd2\x59\xf6\x72\x02\x2e\x0f\x22\ +\x77\x8a\xe2\x39\x65\x25\x88\x80\x26\xb6\x2f\xd5\x06\xbd\x77\xf0\ +\x45\xff\xcd\x7d\xb8\x2f\x98\xc3\x87\xe0\x83\x79\xdc\x05\x4f\x29\ +\x08\x67\xd2\x89\xcf\x38\x52\x78\xe2\x44\x9f\x74\xa3\x4f\xb1\x3b\ +\xef\x50\x00\x76\xcc\x0c\xfe\x72\x03\x63\x84\x48\xa2\xca\x85\xed\ +\x98\x74\x2e\xdc\xf2\xbc\x23\x7c\xd2\x44\xf8\xb2\x48\xa2\x60\x7f\ +\xfd\xf2\x30\x4a\x67\xda\xdb\x1c\x5b\x86\x1f\x05\x8c\x7d\x3f\x8c\ +\xa6\x66\xc5\xd1\x3f\x2e\xb3\xa5\x52\x80\xbd\x59\xe4\xae\xb7\x8b\ +\x3c\xf5\xab\x56\x68\x05\xf3\xe7\x42\xc3\xc5\x8a\x7d\x68\x2a\x9e\ +\x33\xe5\x07\x2f\x0c\xea\x79\x63\x3a\x04\xac\xf3\xff\x76\x02\xf1\ +\x37\x76\xa2\x6a\x94\xb0\xfc\xff\x77\x77\xeb\x88\xdf\x1d\x03\x5b\ +\x61\x22\x91\xe2\x25\x6c\x6f\x33\xc1\x6c\x33\xab\x63\xc8\xf4\xac\ +\x87\x48\xba\xba\x4a\x18\xd9\x8d\xcd\xe9\x4c\x55\xa7\x88\x6d\x5f\ +\x2a\xe2\x2c\x91\xc8\x97\x3b\x56\x07\xf6\x77\x97\x86\xb5\x79\x43\ +\xc5\xdf\x1d\xb6\x24\x5c\x35\x17\x70\x54\x27\x85\x1e\x85\xe7\x87\ +\x1b\x16\x96\x7a\x1a\xc6\xc6\xa8\x49\xfa\xac\x51\x3f\x2d\x05\xa7\ +\xc2\x72\xc5\xee\xaf\x22\x32\x02\x60\x26\x94\x87\x0b\xdb\x2e\x6c\ +\x03\x49\xb5\x85\xc4\xa7\x68\xa4\xea\x29\xd3\x17\x20\xf8\x60\x5e\ +\x82\x62\x9f\x77\x54\x4c\xae\x3d\x80\xfd\x53\xb4\x87\xa7\xfa\x92\ +\xd5\xe6\x92\xbe\x43\x0e\xc5\x50\x28\x1e\xb3\x44\x16\xfe\xd8\xe7\ +\xe7\xc9\xd7\x36\x4a\x01\x52\x6e\xc8\x9d\x59\xf3\xdb\x79\x70\x9c\ +\xc6\xd6\x2d\x56\x3f\x37\x14\x87\x66\x82\xb8\x4b\x7d\x7f\x70\x0c\ +\x8e\x93\xa5\xdb\xeb\x9a\xd0\x57\x7e\xa7\x14\x03\x4f\x76\x43\x5a\ +\x51\x51\x62\x23\xa3\x2e\x0f\xa3\xce\xfb\x7b\x99\xed\xd0\xd5\xd0\ +\xf3\xe4\x8e\xc8\xe5\xbc\x9e\xbf\xe9\xd7\x38\xd6\xb9\x80\x1a\x75\ +\xd2\x79\x81\xfa\x19\x8d\x26\xbd\x6c\x41\xe4\x76\x2b\xfe\x52\xbf\ +\x95\x45\x04\x81\x29\x66\x7f\x50\x19\x6f\x5a\xc0\x34\x02\x6f\x92\ +\x33\xd9\x8f\xee\xf9\x1f\x1d\x05\xa1\xc3\x49\x4e\x72\x93\x7a\x3f\ +\xa1\x6e\x06\xd6\x6d\xbc\x60\x13\x38\x6b\xdb\x6a\x3d\x60\x37\xb4\ +\xab\xa2\x4f\x8d\x1d\xea\x06\x5e\x8f\x44\xff\xf4\x33\xfa\xd8\xb1\ +\xe2\xef\x8a\x4d\xd2\x39\xb0\xa9\x65\xf4\x71\xd6\x9a\x41\x92\x0a\ +\xab\x48\x44\xd3\x63\x6d\xd0\x23\xe4\x63\xf3\xf3\x52\x0e\x78\x22\ +\x7c\x08\x3e\x98\xb5\x90\xc8\xbb\x9d\xbc\xd9\x47\xd4\x6d\xea\x79\ +\x7e\x9c\xc4\x37\x15\xe1\x73\x6d\x18\x5e\xd4\xa5\x27\x9a\x97\x34\ +\x32\x48\x7b\x38\x37\x36\xcf\xaf\xef\xf3\xf3\xe4\x4f\x6c\x07\xa6\ +\x91\xc4\xbb\xca\x1f\xbb\xcd\xce\x41\xd3\xff\x2f\x99\x76\x8e\xb5\ +\x22\xe8\x38\x8a\x20\x6a\x10\xab\x21\x1b\x46\x15\x8a\x29\xbe\x7f\ +\x5c\xa6\x1a\x9c\xff\xbe\x52\x78\x7d\x58\xe4\xd2\x63\x22\xdb\x2f\ +\xf4\x3b\x90\x7d\x31\x13\xbe\x67\x6c\xfa\x9d\xf3\x4b\x75\x64\xef\ +\xc5\xbc\x31\x7f\xe8\xbc\xce\xed\xa2\x29\x86\xe0\x4c\x1f\xdf\x2c\ +\xc5\xc4\x25\xa9\x8d\x40\x9a\xee\x45\x87\x7c\x57\xec\x30\xb0\xcb\ +\xc1\x16\xfd\xad\x39\x9d\x2f\xa6\x67\xbb\x43\x53\x8f\x49\x1c\x17\ +\x92\x70\xb2\x16\x11\xe1\x93\xf7\xb8\x60\x53\xc7\x5b\xca\xfd\x1b\ +\x8b\xbc\x85\x15\x4e\x32\x9b\x10\xfa\x8c\x9d\x07\xb8\xeb\x45\xff\ +\xae\xdb\xd4\x3c\xbb\x56\xf0\x85\x46\x10\x7d\xed\xda\x4e\xfd\x78\ +\x66\xa3\x4e\x02\xbd\x6a\xa3\x7f\x1a\x51\x1c\xda\x92\x70\x99\xe9\ +\xaf\xde\x51\x44\xca\x0e\xc6\x1c\xc1\xb1\x28\x60\xaa\x64\x63\x61\ +\x88\xee\x21\xf8\x60\xae\x42\x62\xd2\x85\x37\xf9\xde\x3e\x86\xbb\ +\x30\x6d\xec\x2d\x52\xd1\x37\xc7\x32\x9c\x97\x16\xa6\x3f\x30\x3d\ +\x5a\x25\x14\x74\xe1\x04\xf5\x89\x9f\x9f\xf3\x71\xa9\xa6\xc4\x6f\ +\x95\x3f\xf8\xad\xb2\xf3\x5a\x28\x7b\x91\x73\x52\xbb\x4f\xcf\xd8\ +\x44\xd0\x67\x4d\x42\x08\x07\x86\x80\xa2\xe7\xfd\xf0\x66\x24\x69\ +\xfc\x38\x2e\xc7\x4b\xd9\x2e\xaf\xf8\x5b\x65\xdb\x3d\x23\xf2\xe4\ +\xbf\x90\xe4\x70\x6a\x2c\xb5\x4d\xea\x3d\x8d\x2e\xdd\x3d\xa8\x1f\ +\x5b\xb6\x5c\xdf\xb5\xbc\x31\x7f\xac\x07\xc7\xc5\xf5\xbc\x7e\xff\ +\x3b\xbb\xb5\xf8\xd3\x61\x5f\x75\xfc\xae\xd9\x32\x6f\x46\x26\xcc\ +\x71\x9b\x43\x84\x6f\x2f\x26\x13\x91\xae\xb1\xa0\xcf\xcd\xed\xde\ +\x1f\x04\xcb\x6d\xcd\x5f\x4c\x54\xc4\x48\x99\x91\x62\xe6\x88\x22\ +\x38\xe7\xfd\xef\x57\xd3\x2d\xac\x79\xe3\xac\xcd\x97\xb4\x1d\x54\ +\x02\xd9\xb4\xd5\x41\x74\x1e\x60\x6b\xbe\xa5\x15\x8b\x2f\x6d\xd7\ +\xc9\xda\xf5\xf5\xb1\xf1\xc3\xce\x03\x5c\xb4\x26\x10\x15\x86\xad\ +\x75\x89\x08\xb6\x41\xc4\xf8\xe4\x7f\x20\x8f\x08\xe9\xd8\x35\x0f\ +\x10\x7c\x30\x87\xce\x75\xaa\xd7\x66\x98\x9a\x05\xc1\xb7\x3f\xd1\ +\x37\x8d\x88\x13\x89\xa7\x15\x99\xe6\xbb\x61\x49\xa5\x94\x2b\x76\ +\xde\x11\x2f\x75\x85\xae\x07\x9d\x85\x46\x2b\x9e\x2b\x9f\x9f\x2f\ +\xea\x28\xdf\xb9\xa2\xae\xfc\xa1\x8f\xd3\x76\x38\xd2\x84\x02\x38\ +\x25\x60\x8b\xb8\x7b\x50\xc9\x02\x01\x2d\xc7\xd0\x35\xb8\x78\x5e\ +\xe4\xe5\x3f\x29\xf2\xf4\xff\x5e\xb6\xdb\xe5\x76\x49\xbc\x54\x3b\ +\x98\xd4\x7b\x81\xc8\x59\xb2\x49\x86\x35\xea\xba\x6e\x5d\xbe\x57\ +\xad\xe3\x57\x13\x3a\xef\x06\x51\xa4\xab\xd6\x6c\xa0\xf9\x00\x4f\ +\x59\xf1\xa7\xa6\x8f\x33\x59\x93\x2a\x64\x9e\x79\xf8\x64\x0f\xdb\ +\xd6\x39\xae\xc3\xf7\x27\x94\xfb\x4b\x2d\x37\x34\xaf\x48\x4f\xfa\ +\x96\xbe\x65\x87\x8d\x55\x48\x5c\x3c\xbb\xef\x2f\x5b\x27\x70\x95\ +\xe0\xd9\xce\xff\xdb\x18\x35\xa5\xe1\x9c\x18\xdc\x2d\xba\x02\x6d\ +\x7b\x54\x3f\xaa\x5e\x3d\x6b\xca\xbf\x2d\x3b\x21\x68\x4b\xc3\x65\ +\xd2\x76\x62\x4b\x20\x46\x63\x51\xbd\xb0\x94\x5a\x4c\x00\xd2\x0f\ +\x20\xf8\x60\x0e\xec\xd5\x65\x3b\xcb\x39\x7b\xdc\xc9\x4d\xdf\x79\ +\x65\x09\x77\xa9\x4f\x6e\xe2\xe2\xb9\xe8\xf9\x6e\x61\xd2\xd3\x88\ +\xf2\xa2\xfb\x9d\x58\x02\xd5\x59\xf3\xfd\xcb\x22\x57\xca\x03\xed\ +\xe9\xa2\x7e\xac\x4b\x33\x7c\xa8\x8f\x4d\xfb\xfa\xd3\x2a\x40\xac\ +\x08\x39\x57\x3e\xee\x35\x75\xe2\xe1\xe5\x3e\x87\xa8\xb4\x87\x8c\ +\xc2\xe2\xf1\x62\xe2\x6d\x77\xdc\xa6\x23\x9c\x7b\xb3\xc8\x6b\xff\ +\xa1\xc8\xef\xfd\xa7\xf1\x6d\xed\x6d\x87\x20\x57\x5e\xab\x33\x37\ +\xcd\x95\x5e\x23\xad\xa7\x07\xb5\x69\xa3\x72\xfb\x0e\x6b\xd3\xc7\ +\xb3\xa3\x3a\xca\xe7\x57\x96\x58\xb7\xd1\x40\x4d\x23\xa2\x51\xa3\ +\x55\x3b\xec\x7b\xc7\x60\x3e\x37\x0f\xce\xb4\x11\x73\x5e\x87\x0e\ +\x62\x89\x1c\xdb\xfe\xf7\x5a\x43\xaf\xa6\xfb\x5e\xdf\x72\x53\xa5\ +\xfe\x3a\x91\xe3\xc4\xef\xc6\x96\x9d\xbc\xdf\x4a\x94\x4a\xab\xa2\ +\x6e\xba\xcf\xec\xfc\x4a\x27\x00\x77\xec\x50\xaf\x8a\x3f\x9d\x42\ +\x71\xc3\x46\x00\xf3\x40\xa0\x8d\x5d\xc0\x6e\x59\x36\xe7\xdf\xd0\ +\x3a\x8b\xd5\x08\xa2\x39\x01\x87\x59\xbc\x0d\x43\xbd\x1a\x66\x16\ +\x88\x39\x82\x8b\x42\xc8\xd8\x80\xe0\x83\x03\x15\x83\x07\x28\x24\ +\xb9\xb3\x9b\x10\xad\x90\x7e\x97\x6e\xd8\xe6\xa1\x90\x8b\x39\xe8\ +\x62\xa2\x3b\x4f\xcc\x07\xcc\x23\x11\xc4\x79\xa1\xd1\x23\x8d\x06\ +\xdd\x5e\xfe\xce\x2b\xa5\x4e\x04\xad\xe2\xee\xe9\xbc\xae\x1a\xd1\ +\xca\x19\x27\xf5\x30\xe3\x0d\x4d\x2d\xa2\xd1\xa7\xbc\x9e\xf7\x77\ +\x21\xab\x0d\x08\x7e\xd2\xea\xd0\xb4\x91\x27\x9c\x99\x12\x74\xee\ +\xc7\xb1\xae\xa7\x46\x56\x2e\xbc\x5f\xe4\xdb\xbf\x22\x72\xf9\x4b\ +\x91\xb9\x7b\x3d\xed\x30\x4d\x69\x35\xbf\x7d\x74\x04\x71\x60\x23\ +\x7f\x1a\xc1\x53\xe3\x46\x95\xea\xc5\x96\x79\xdb\x0e\xa6\x25\x6c\ +\x3a\x91\xa1\x02\x50\x23\x7f\xe5\x4e\x7d\xdb\x68\xb6\xe7\x8b\x58\ +\x33\x48\xcc\xb5\x3d\xad\x83\xb8\xe8\x39\x3e\x8a\x09\xc7\x4e\xca\ +\x09\xde\x11\x44\xd2\x9d\xf3\x96\x32\xb7\xc4\x8e\xcb\x70\xf9\x12\ +\x59\x7e\x11\x5c\x1f\x5c\xe4\x6d\x68\x3f\xa0\xe2\x5b\x05\x9b\x13\ +\x81\x3a\x0f\xf0\xc6\x6e\xdc\x05\xac\xb8\x72\x70\x7a\xee\x55\x2e\ +\xe0\x9d\x3a\xda\xb7\x60\x97\xa3\x0f\x8d\x00\x66\xa1\x48\x0e\x29\ +\xd2\xa2\x18\x33\x20\x82\x0f\xe6\x24\x24\xa6\x11\x5b\x37\xeb\xa4\ +\x42\xc8\xdd\x5c\x74\xcf\x44\x26\x92\x99\xf8\xf5\x73\xfc\x9f\xce\ +\xfb\x09\x31\x57\xf4\x88\xc6\x49\x9f\x9f\xd7\x8d\xc6\xb0\xa8\xd3\ +\x85\x9c\xb2\xe6\x81\x8b\xda\xb1\x65\x75\x05\x90\xe7\x6d\x84\x4f\ +\x13\x41\xaf\xdb\x9a\xc0\xda\x01\xa9\xe8\xbb\x51\x3e\xbf\x50\x3e\ +\xff\x61\x5e\xe7\x9c\xbb\xd3\x46\xfe\x4e\xdb\xc8\xdf\x30\xd6\x01\ +\x46\x0c\x1b\xfe\x1f\xc7\x75\x12\xf9\xf0\x8c\xc8\x6b\xff\x0b\x91\ +\x27\x7e\xa6\xec\x98\xff\x2c\x61\x16\x90\xda\x09\x5a\x44\x6e\x14\ +\x26\x89\x3f\xbf\xea\x46\xe6\x45\x92\x16\xed\x3c\xcb\xbb\x35\xd9\ +\xf3\x42\x6d\xf6\xd0\xa8\xdf\x35\x9b\xe4\x79\xc7\x46\xb0\xaa\x72\ +\x6f\xea\x1c\xdd\x2c\xf7\xe1\x0c\x05\x9f\x4b\x54\x1e\x0e\x77\xc6\ +\xb6\x5d\x7a\xea\xeb\x86\x55\x45\xa6\x36\x76\x48\x7b\xce\x68\x96\ +\x28\x29\x16\x35\x7e\x84\x51\xb1\x84\xd9\x68\x5a\x33\x52\xea\xe2\ +\x6b\x82\xe1\xe9\x15\x6f\x18\x59\x8d\x20\x1a\x3a\x77\x09\xa1\xfd\ +\xe8\xdf\xae\x57\x12\xae\xe5\x02\xb6\x0e\x61\xe7\x02\xce\x9c\x0b\ +\xd8\xe6\x00\x5c\xb0\x49\xa0\xab\x8a\x2d\x59\x44\xe8\x05\xa2\xd8\ +\xd0\x67\x20\xf8\x60\xbe\x82\x2f\x75\x7d\x98\x56\xd8\x19\xc4\xde\ +\x5c\xf7\x95\x04\x43\xac\x7b\x19\x5a\xed\xcb\xff\x15\xfe\x4e\x74\ +\xce\x92\xcc\x7f\x28\x37\x7a\xcc\x04\xe9\x43\x6e\xb7\x02\xf0\x95\ +\x5a\x0a\xac\xfc\xff\x77\x6c\x19\x38\x15\x7e\x1b\xd2\x1e\x0a\xbb\ +\x66\x0d\x06\x9a\xa1\xe4\xae\xa2\xae\x25\xac\x51\xbf\x55\xa9\x8d\ +\x08\x26\xa6\x60\x13\x86\x8f\xe3\x3a\xbc\xb4\xf6\xea\x52\x78\xbd\ +\x47\xe4\xa9\xff\xb1\xec\xa4\x6f\x74\xc5\x82\x49\x44\x5d\xfc\x76\ +\xf0\x8f\x87\x98\x53\xbb\x6f\x99\xba\xcf\xaa\x24\xcd\x59\x2d\xf6\ +\x34\xa2\x77\xd9\x9a\x3a\xb6\x6d\xbe\xb8\x79\x44\x8c\x63\xd3\x45\ +\x7c\x01\xe7\x5f\xff\x26\x09\xfc\x49\x6d\x16\x5b\xee\xb4\xef\xa7\ +\xc4\x5b\xab\xcd\x23\x82\x7b\xda\x75\xeb\xfb\xfd\x22\x71\x7d\xf7\ +\xab\x5d\xe8\x7c\xda\xe5\x45\x91\x3b\xa4\x16\x79\x37\x6c\xf4\x6f\ +\xdd\x73\x01\x8f\x82\xe1\x7b\xb1\x42\xf0\xfa\x4e\xfd\xc8\x6c\x4a\ +\x99\x55\x1b\xfd\xd3\x79\x80\x99\xf1\x4a\xc1\xe1\xca\x45\xf0\xc1\ +\x21\x74\xae\x53\x08\xbc\x59\x89\xba\x3e\x87\x16\xc4\xa3\x5e\x13\ +\x8d\x13\x3d\xe6\x8c\xbe\xc9\xe9\x53\x8b\xb9\xe2\x60\x44\x7c\xe6\ +\x09\xb7\xb0\x33\x76\xeb\xaa\x9d\xc5\x05\xa9\xa3\x77\x2a\x1c\x2e\ +\xdb\xc8\xde\x55\x1d\x4a\x94\xda\x44\xe0\xd0\xce\x48\x4b\x85\xe9\ +\xb0\xf0\x4a\x5e\x0b\xc6\x3b\xac\xe1\xe3\x5c\xe6\x95\x71\xf3\xa2\ +\x2f\xa1\xe1\xe3\xb8\x0e\x31\x0d\x4f\x8b\xdc\xff\x53\x65\x1b\x95\ +\xea\xf8\xe9\x5f\x2f\xb7\x25\x8f\x8b\x85\xb0\x04\x5a\x51\x24\x04\ +\xf0\x14\x6e\xe7\xa8\xa3\x55\xea\x68\xad\xce\xf7\xba\x5f\x6a\xa3\ +\xc7\x35\x6b\xea\x58\xcd\x66\x9f\xc8\xbb\x35\xef\x75\x0a\x87\x6c\ +\x2a\x7d\x51\xac\x34\x9c\xff\xdd\xbd\x2e\x37\xf6\x7e\xaa\xec\x9c\ +\x04\x82\xbb\x28\xba\xc6\x0c\x91\xbd\x3b\xd1\xc3\x65\x74\xae\x13\ +\x89\x73\x60\xc1\xa6\x81\x71\xe5\xe0\x76\xac\xfb\xd7\x37\x80\xb8\ +\xe7\xd6\xfa\xdb\x21\x7c\x7d\x5c\xde\x6a\xdc\xc4\x4b\x59\x53\x02\ +\xce\x95\x85\xcb\x32\xfa\x01\x04\x1f\xcc\xaf\x73\x2d\xf6\x17\x05\ +\x9c\xc5\xb2\x67\xfd\x7b\xb7\xba\x20\x37\x3d\xa2\x39\x96\xd5\xbe\ +\x2f\xaf\x57\x32\x01\xac\x74\x4b\x92\x75\x5e\x9f\xf7\xf6\x7a\x65\ +\xb1\x62\xdb\x12\xd6\x42\x5d\xb2\x35\x80\xef\x36\xb6\xba\x80\xe6\ +\x8d\x2b\x1f\x4f\xd9\xe1\xdf\x2d\xaf\xe3\x5c\xb7\xd1\xc0\xef\xda\ +\xef\x9d\x29\x3b\xa1\x07\xb2\x7a\xf8\x71\x18\x98\x18\x6e\x95\x63\ +\x72\xed\x35\x22\x2f\xfb\x48\x29\xae\xbe\x5c\xb6\xcd\x93\xdd\x79\ +\x64\x79\xd0\xa6\x2d\xc1\x11\x98\x36\xf2\xc4\x7b\xe1\x32\x53\xef\ +\x0d\xec\xef\x69\xc4\x4f\xd3\xec\xdc\xab\x15\x3e\xd6\x4a\x31\x31\ +\x9c\xfd\x39\x13\x33\x6d\x74\x84\x4c\xd1\x4d\x64\x9e\x32\xb3\xa4\ +\x4c\x1d\xb1\xe5\xee\xc5\x10\x92\xfa\x6e\x6c\xbd\xc2\xc8\x5f\x4b\ +\x9b\x05\x51\xeb\x98\xf1\xa3\x75\x1e\x27\xe6\xd0\x99\x88\x40\x0c\ +\xe7\xf9\xba\x34\x30\xcb\x59\xdd\xed\xbb\x52\x70\xbb\x45\x53\x0f\ +\xf8\xba\x8d\x04\x8e\x02\xa1\xa9\x11\xc1\x0d\x3b\x1f\xb7\x3a\x26\ +\x4c\xbd\xac\x05\x6b\x02\x59\x1e\xd6\x43\xfd\x05\xa2\x0f\xc1\x07\ +\xf3\x17\x17\x7b\xed\xdc\x32\x4e\xcc\xb9\xef\x93\x2c\x52\x02\x2d\ +\x8c\x8a\x49\x24\x2a\x30\xde\xa7\x3d\xdf\x37\x09\x31\x17\x2b\x6b\ +\x26\x07\x10\xed\xca\x4e\x97\xbf\x75\x36\x9e\xf4\xb9\xe8\xd9\xce\ +\xaa\x5c\x98\xd4\x0f\xad\xff\x7b\x97\x35\x73\x3c\x53\x76\x1e\xdf\ +\x1e\xd9\x4a\x04\xde\xb6\x6b\xf5\x01\x8d\x0c\xaa\x20\x7c\xa6\xec\ +\x68\xee\x2a\x1f\xf7\xda\xfa\xb0\xe1\x84\xfc\xc1\xda\x31\x3e\x7e\ +\xd4\xc0\xa2\x65\xd7\xde\x25\xf2\xec\x6f\x96\xdb\x7d\x5d\x3a\xe9\ +\x3d\x3a\x6d\x1a\xb4\x77\xeb\xd9\xf4\x7f\x27\x5c\x66\x1e\x2c\xcf\ +\x7f\x4f\x6b\x35\xaf\xad\x35\x91\x9d\x99\xdd\x20\x45\x4c\x1b\x7b\ +\x31\x57\xb8\xe3\xa9\xcf\xb4\xd1\xb7\x5c\x7f\xde\xdf\x34\xe5\xff\ +\x64\x4a\x53\x46\x2c\xcf\xa6\xc8\xfe\x8c\x1f\xb1\x6b\x86\x89\xcc\ +\xe1\x4d\x99\x3f\xfc\x6b\x89\x73\xed\xaa\x6b\x5e\x87\x6c\xd7\xca\ +\xcf\x9c\x5b\xa8\xa3\x80\x6a\xec\x58\xb7\xa5\xe0\xb6\xf3\xee\x31\ +\x52\x95\x82\x1b\x79\xa5\xe0\xca\x13\xf3\x3b\x2f\x96\xff\xdf\xe1\ +\xda\x7f\x4c\x62\x11\x27\x8a\xa2\x28\x1e\x2d\x9f\x3e\x7b\xfc\x56\ +\xfc\xa5\xf2\x71\xe5\x68\xed\x35\x73\x9b\xd4\xb3\xf5\xb1\x69\x8d\ +\x55\x49\xa1\xb9\xd4\x36\x8f\xce\x29\x5b\x09\xa0\xfb\xd8\x47\x70\ +\x34\xc9\xcb\x73\x66\xab\x14\x0c\xf9\x6e\xf7\xb8\x3d\x8c\x6e\x6d\ +\x96\xf7\xc3\xf3\xb8\xb7\x9e\xe9\x32\x27\xb4\x55\x9e\xd7\x06\x1d\ +\x15\x78\xfa\x77\x2a\x8a\xb7\x5a\xde\xf4\x9d\x39\x5f\x5e\x67\x16\ +\x8e\x87\xf0\x31\xe6\x44\x6a\x1f\x04\x1f\x00\x00\x00\x9c\x1c\xe1\ +\x73\x42\x05\x1f\xb7\xfc\x00\x00\x00\x00\x08\x3e\x00\x00\x00\x00\ +\x40\xf0\x01\x00\x00\x00\x00\x82\x0f\x00\x00\x00\x00\x10\x7c\x00\ +\x00\x00\x00\x80\xe0\x03\x00\x00\x00\x00\x04\x1f\x00\x00\x00\x00\ +\x20\xf8\x00\x00\x00\x00\x10\x7c\x00\x00\x00\x00\x80\xe0\x03\x00\ +\x00\x00\x00\x04\x1f\x00\x00\x00\x00\x20\xf8\x00\x00\x00\x00\x00\ +\xc1\x07\x00\x00\x00\x00\x08\x3e\x00\x00\x00\x00\x40\xf0\x01\x00\ +\x00\x00\x20\xf8\x00\x00\x00\x00\x00\xc1\x07\x00\x00\x00\x00\x08\ +\x3e\x00\x00\x00\x00\x40\xf0\x01\x00\x00\x00\x00\x82\x0f\x00\x00\ +\x00\x00\x10\x7c\x00\x00\x00\x00\x80\xe0\x03\x00\x00\x00\x00\x04\ +\x1f\x00\x00\x00\x00\x82\x0f\x00\x00\x00\x00\x10\x7c\x00\x00\x00\ +\x00\x80\xe0\x03\x00\x00\x00\x00\x04\x1f\x00\x00\x00\x00\x20\xf8\ +\x00\x00\x00\x00\x00\xc1\x07\x00\x00\x00\x00\x08\x3e\x00\x00\x00\ +\x00\x04\x1f\x00\x00\x00\x00\x20\xf8\x00\x00\x00\x00\x00\xc1\x07\ +\x00\x00\x00\x00\x08\x3e\x00\x00\x00\x00\x40\xf0\x01\x00\x00\x00\ +\x00\x82\x0f\x00\x00\x00\x00\x10\x7c\x00\x00\x00\x00\x08\x3e\x00\ +\x00\x00\x00\x40\xf0\x01\x00\x00\x00\x00\x82\x0f\x00\x00\x00\x00\ +\x10\x7c\x00\x00\x00\x00\x80\xe0\x03\x00\x00\x00\x00\x04\x1f\x00\ +\x00\x00\x00\x20\xf8\x00\x00\x00\x00\x00\xc1\x07\x00\x00\x00\x80\ +\xe0\x03\x00\x00\x00\x00\x04\x1f\x00\x00\x00\x00\x20\xf8\x00\x00\ +\x00\x00\x00\xc1\x07\x00\x00\x00\x00\x08\x3e\x00\x00\x00\x00\x40\ +\xf0\x01\x00\x00\x00\x00\x82\x0f\x00\x00\x00\x00\xc1\x07\x00\x00\ +\x00\x00\x08\x3e\x00\x00\x00\x00\x40\xf0\x01\x00\x00\x00\x00\x82\ +\x0f\x00\x00\x00\x00\x10\x7c\x00\x00\x00\x00\x80\xe0\x03\x00\x00\ +\x00\x00\x04\x1f\x00\x00\x00\x00\x82\x8f\x26\x00\x00\x00\x00\x40\ +\xf0\x01\x00\x00\x00\x00\x82\x0f\x00\x00\x00\x00\x10\x7c\x00\x00\ +\x00\x00\x80\xe0\x03\x00\x00\x00\x00\x04\x1f\x00\x00\x00\x00\x20\ +\xf8\x00\x00\x00\x00\x00\xc1\x07\x00\x00\x00\x80\xe0\x03\x00\x00\ +\x00\x00\x04\x1f\x00\x00\x00\x00\x20\xf8\x00\x00\x00\x00\x00\xc1\ +\x07\x00\x00\x00\x00\x08\x3e\x00\x00\x00\x00\x40\xf0\x01\x00\x00\ +\x00\x00\x82\x0f\x00\x00\x00\x00\xc1\x07\x00\x00\x00\x00\x08\x3e\ +\x00\x00\x00\x80\x63\xcc\xa5\x93\xba\xe1\xe6\xa4\x6e\x78\x51\xc2\ +\x71\x0f\x00\x00\x70\xa2\x78\xdc\x18\xf3\xce\x93\xb8\xe1\x44\xf8\ +\x00\x00\x00\x00\x10\x7c\x00\x00\x00\x00\x80\xe0\x3b\x9e\x3c\xc1\ +\xee\x07\x00\x00\x38\x51\x7c\x0e\xc1\x77\xf2\xb8\xc2\x71\x0f\x00\ +\x00\x00\x08\x3e\x04\x1f\x00\x00\x00\xdc\x3a\x5c\x42\xf0\x9d\x3c\ +\xbe\xca\x71\x0f\x00\x00\x80\xe0\x43\xf0\x01\x00\x00\x00\xdc\x3a\ +\x9c\xd8\xd1\xbd\x93\x9c\x87\xef\xd1\xf2\xe9\xb3\x1c\xfb\x00\x00\ +\x00\x27\x44\xf4\x94\x9c\xd4\x6d\x67\x0e\x1f\x00\x00\x00\x9c\x04\ +\x2e\x9d\x68\xb1\x7b\x92\x37\x9e\x6a\x1b\x00\x00\x00\x27\x86\x13\ +\x5b\x65\x43\x39\xe9\x73\xf8\xc8\xc5\x07\x00\x00\x40\x9f\x8f\xe0\ +\xbb\xc5\xb9\xc4\xf1\x0f\x00\x00\x70\x22\xf8\x33\x04\xdf\xc9\x85\ +\xd4\x2c\x00\x00\x00\x27\x83\x13\x1d\xe1\x3b\xe9\x73\xf8\xde\x5f\ +\x3e\xfd\x2a\xe7\x00\x00\x00\xc0\x2d\x2e\x78\x4e\xb0\x43\x17\xc1\ +\x57\x14\x17\xcb\xa7\x6f\x72\x1a\x00\x00\x00\xdc\xd2\x5c\x2a\xf5\ +\xde\x83\x27\xb9\x01\x4e\xf4\x90\x6e\xb9\xf3\x2f\x09\xe9\x59\x00\ +\x00\x00\x6e\x75\x4e\xbc\x49\x93\x4a\x1b\x1c\x04\x00\x00\x00\xb7\ +\x3a\x9f\x43\xf0\xc1\xe7\x68\x02\x00\x00\x80\x5b\x9a\x13\x1f\xdc\ +\x31\x27\xbd\x01\x28\xb1\x06\x00\x00\x70\x8b\x8b\x9d\x13\x6e\xd8\ +\x40\xf0\x35\xa2\x8f\x8a\x1b\x00\x00\x00\xb7\x26\x27\xba\xc2\x86\ +\x83\x21\x5d\x7b\x30\xd0\x04\x00\x00\x00\xb7\x24\x4c\xdd\x42\xf0\ +\x71\x30\x00\x00\x00\xdc\xe2\x3c\x4e\x13\x30\xa4\x5b\xc1\x3c\x3e\ +\x00\x00\x80\x5b\x92\x2b\xc6\x98\xdb\x68\x06\x04\x9f\x2f\xfa\x2e\ +\x97\x4f\xe7\x68\x09\x00\x00\x80\x5b\x86\xcf\x94\x82\xef\x03\x34\ +\x03\x43\xba\x3e\x8f\xd3\x04\x00\x00\x00\xb7\x14\xbf\x46\x13\xd4\ +\x10\xe1\xb3\x14\x45\xf1\xd1\xf2\xe9\x97\x68\x09\x00\x00\x80\x5b\ +\x86\xdb\x8c\x31\x54\xd4\x42\xf0\xb5\x04\x9f\x0e\xe7\x5e\xa6\x25\ +\x00\x00\x00\x6e\x09\x9e\x28\xc5\xde\x9b\x68\x86\x1a\x86\x74\x9d\ +\xf2\xad\xef\x00\x3e\x43\x4b\x00\x00\x00\xdc\x12\xfc\x73\x9a\x00\ +\xc1\x97\x82\xb1\x7e\x00\x00\x80\x5b\x03\x82\x38\x1e\x0c\xe9\x7a\ +\x30\xac\x0b\x00\x00\x70\x4b\xc0\x70\x6e\x00\x11\x3e\x5f\xfd\x32\ +\xac\x0b\x00\x00\x70\x2b\xc0\x70\x6e\xa8\x71\x68\x82\x36\xb8\x75\ +\x01\x00\x00\x8e\x3d\xb8\x73\x11\x7c\x53\x89\x3e\x92\x30\x03\x00\ +\x00\x1c\x4f\x48\xb6\x1c\x81\x21\xdd\xc4\xc1\x42\x13\x00\x00\x00\ +\x1c\x4b\x18\xce\x8d\x40\x84\x2f\x42\x51\x14\x8f\x94\x4f\x5f\xa1\ +\x25\x00\x00\x00\x8e\x15\x97\x8c\x31\x0f\xd2\x0c\x5d\x88\xf0\xc5\ +\x54\xb0\x31\x4f\x94\x4f\x4f\xd0\x12\x00\x00\x00\xc7\x0a\xa2\x7b\ +\x29\x6d\x43\x13\xc4\xc1\xbc\x01\x00\x00\x70\xec\x78\xd0\x18\x73\ +\x89\x66\x40\xf0\xed\x55\xf4\x61\xde\x00\x00\x00\x38\x1e\x3c\x56\ +\x8a\xbd\x8f\xd1\x0c\x71\x18\xd2\xed\xe7\x9f\xd2\x04\x00\x00\x00\ +\xc7\x02\x86\x73\x7b\x20\xc2\xd7\x03\x95\x37\x00\x00\x00\x8e\x05\ +\x8f\x1b\x63\xde\x49\x33\xa4\x21\xc2\xd7\xa7\x86\xeb\xa4\x8d\x8f\ +\xd1\x12\x00\x00\x00\x47\x1a\xa2\x7b\x93\x34\x0d\x4d\xd0\x4f\x51\ +\x14\x17\xcb\xa7\x6f\xd2\x12\x00\x00\x00\x47\x12\x52\xb1\x4c\x01\ +\x11\xbe\x49\x8a\xb8\x76\xfb\x3c\x46\x4b\x00\x00\x00\x1c\x49\x3e\ +\x45\x13\x4c\xa1\x67\x68\x82\xc9\x10\xe5\x03\x00\x00\x38\x92\x10\ +\xdd\x9b\x12\x22\x7c\xd3\xa8\x62\xa2\x7c\x00\x00\x00\x47\x11\xa2\ +\x7b\xd3\x6a\x19\x9a\x60\x3a\x88\xf2\x01\x00\x00\x1c\x29\x9e\x30\ +\xc6\xbc\x89\x66\x98\x0e\x22\x7c\xd3\x2a\xe3\x3a\xca\xf7\x69\x5a\ +\x02\x00\x00\xe0\x48\xf0\x0b\x34\xc1\x1e\x74\x0c\x4d\x30\x3d\x36\ +\x2f\x9f\x46\xf9\xa8\xbe\x01\x00\x00\x70\x78\x90\x77\x6f\x8f\x10\ +\xe1\xdb\x8b\x3a\xae\xf3\xf2\x51\x7d\x03\x00\x00\xe0\x70\x21\xba\ +\xb7\x57\x0d\x43\x13\xec\x9d\xa2\x28\x34\xca\x77\x91\x96\x00\x00\ +\x00\x38\x70\xa8\x99\x8b\xe0\x3b\x30\xc1\xf7\x68\xf9\xf4\x59\x5a\ +\x02\x00\x00\xe0\x40\xd1\x91\xb6\x07\xed\x88\x1b\xec\x01\x86\x74\ +\xf7\xa3\x92\x8d\x79\xbc\x7c\xfa\x0c\x2d\x01\x00\x00\x70\xa0\x7c\ +\x0a\xb1\xb7\x4f\xed\x42\x13\xec\x0f\x9b\xa6\xe5\x2b\x82\x81\x03\ +\x00\x00\xe0\x20\xc0\xa8\x81\xe0\x3b\x34\xd1\xf7\x89\xf2\xe9\x9f\ +\xd0\x12\x00\x00\x00\x73\xe7\x4d\xa5\xe0\x7b\x82\x66\x40\xf0\x1d\ +\x96\xe8\xd3\x28\xdf\x23\xb4\x04\x00\x00\xc0\xdc\xd0\xa1\xdc\x4f\ +\xd2\x0c\x08\xbe\xc3\x14\x7c\x2a\xf6\xbe\x42\x4b\x00\x00\x00\xcc\ +\x05\x2a\x6a\x20\xf8\x8e\x8c\xe8\x63\x68\x17\x00\x00\x60\x3e\x30\ +\x94\x8b\xe0\x3b\x52\xa2\x4f\xd3\xb4\x3c\x4a\x4b\x00\x00\x00\xcc\ +\x0c\x86\x72\x11\x7c\x47\x4e\xf0\x5d\x14\x5c\xbb\x00\x00\x00\xb3\ +\x02\x57\x2e\x82\xef\xc8\x8a\xbe\xf7\x97\x4f\xbf\x4a\x4b\x00\x00\ +\x00\xdc\x14\x9a\x6b\x4f\x87\x72\x2f\xd1\x14\x08\xbe\xa3\x2a\xfa\ +\x7e\xa9\x7c\xfa\x28\x2d\x01\x00\x00\xb0\x6f\x3e\x50\x8a\x3d\x0a\ +\x1c\x20\xf8\x8e\xb4\xe0\xd3\x21\x5d\x9d\xcf\x47\xaa\x16\x00\x00\ +\x80\xbd\xf3\xe9\x52\xec\xfd\x02\xcd\x80\xe0\x3b\x0e\xa2\xef\xa2\ +\x30\x9f\x0f\x00\x00\x60\xaf\x90\x82\x05\xc1\x77\xec\x44\xdf\xa3\ +\x52\x47\xfa\x00\x00\x00\x60\x32\x3a\x6f\xef\x41\x6a\xe5\x22\xf8\ +\x8e\xa3\xe8\x23\x3f\x1f\x00\x00\xc0\x74\x90\x6f\x0f\xc1\x77\xac\ +\x45\x1f\x26\x0e\x00\x00\x80\x7e\x3e\x56\x8a\xbd\xc7\x68\x06\x04\ +\xdf\x71\x17\x7d\xd4\xdb\x05\x00\x00\x88\x43\x72\x65\x04\xdf\x2d\ +\x23\xf8\x70\xee\x02\x00\x00\x74\x79\xac\x14\x7b\x1f\xa3\x19\x10\ +\x7c\xb7\x92\xe8\xbb\x28\x38\x77\x01\x00\x00\x1c\x54\xd2\x40\xf0\ +\xdd\xb2\xa2\x4f\x23\x7c\x9f\x45\xf4\x01\x00\xc0\x09\x47\xcd\x19\ +\xef\xc4\x91\x8b\xe0\x43\xf4\x01\x00\x00\x20\xf6\x00\xc1\x87\xe8\ +\x03\x00\x00\x40\xec\x01\x82\xef\x68\x8b\xbe\xf7\x97\x4f\xbf\x4a\ +\x4b\x00\x00\x00\x62\x0f\x10\x7c\xb7\xb6\xe8\xfb\xa8\xd4\x89\x99\ +\x89\xf4\x01\x00\x00\x62\x0f\x10\x7c\xb7\xb0\xe8\x63\x78\x17\x00\ +\x00\x10\x7b\x80\xe0\x43\xf4\x01\x00\x00\x20\xf6\x00\xc1\x87\xe8\ +\x03\x00\x00\x40\xec\x01\x82\xef\x58\x88\xbe\x8b\x52\x1b\x39\xa8\ +\xc8\x01\x00\x00\xc7\x99\xc7\xca\xc7\x2f\x20\xf6\x10\x7c\x90\x16\ +\x7d\x94\x61\x03\x00\x80\x63\x2d\xf6\x28\x97\x86\xe0\x83\xe9\x85\ +\xdf\x2f\x95\x4f\x1f\xa5\x25\x00\x00\xe0\x18\xf1\xb1\x52\xec\x3d\ +\x46\x33\x20\xf8\x60\x6f\xa2\xef\x93\xe5\xd3\x3f\xa2\x25\x00\x00\ +\xe0\x88\xa3\x43\xb7\x1f\x28\xc5\xde\xe3\x34\x05\x82\x0f\xf6\x27\ +\xfa\x34\x41\xb3\x46\xfb\x30\x73\x00\x00\xc0\x51\xe4\x09\x2b\xf6\ +\x2e\xd1\x14\x08\x3e\xb8\x39\xd1\xf7\x88\x15\x7d\xcc\xeb\x03\x00\ +\x80\xa3\xc4\x63\x82\x39\x03\xc1\x07\x33\x15\x7d\x1a\xe1\xd3\xaa\ +\x1c\x1f\xa5\x35\x00\x00\xe0\x08\xa0\x42\xef\xd3\x34\x03\x82\x0f\ +\xe6\x23\xfc\x3e\x2a\x94\x63\x03\x00\x80\xc3\x43\x87\x70\xd5\x9c\ +\xf1\x04\x4d\x81\xe0\x83\xf9\x8a\xbe\x8b\x42\xbe\x3e\x00\x00\x38\ +\x78\x34\xa2\xf7\x29\x86\x70\x11\x7c\x70\xb0\xc2\xef\x93\x82\x8b\ +\x17\x00\x00\xe6\x8f\x0a\x3c\x8d\xea\x7d\x86\xa6\x40\xf0\xc1\xe1\ +\x88\x3e\x0c\x1d\x00\x00\x30\x4f\x3e\x63\xc5\x1e\x51\x3d\x04\x1f\ +\x1c\x01\xe1\xf7\xc9\xf2\xe9\xe3\xc2\xdc\x3e\x00\x00\x98\x0d\x97\ +\xac\xd0\x7b\x9c\xa6\x40\xf0\xc1\xd1\x12\x7d\x17\xa5\x8e\xf6\x3d\ +\x4a\x6b\x00\x00\xc0\x4d\xc0\x5c\x3d\x04\x1f\x1c\x03\xe1\xa7\xc9\ +\x9a\xd5\xc9\x7b\x91\xd6\x00\x00\x80\x3d\xf0\xb8\xd4\xe9\x56\x70\ +\xe0\x22\xf8\xe0\x98\x88\x3e\x1d\xda\xfd\x84\x30\xcc\x0b\x00\x00\ +\x93\xb9\x64\x85\x1e\xa6\x0c\x04\x1f\x1c\x63\xe1\x47\xc2\x66\x00\ +\x00\x88\xa1\x43\xb6\xff\xb4\x14\x7a\x9f\xa4\x29\x10\x7c\x70\x6b\ +\x08\xbf\x8b\x52\xa7\x70\x41\xf8\x01\x00\x40\x25\xf4\xca\xc7\xa7\ +\x99\xa7\x87\xe0\x03\x84\x1f\x00\x00\x20\xf4\x00\xc1\x07\x08\x3f\ +\x00\x00\x40\xe8\x01\x82\x0f\x8e\x83\xf0\xfb\xb8\x15\x7e\x98\x3b\ +\x00\x00\x6e\x2d\x2e\x95\x8f\x4f\x95\x8f\xcf\x20\xf4\x10\x7c\x00\ +\xce\xdc\xa1\xe9\x5c\x34\xea\x77\x91\x16\x01\x00\x38\xd6\x3c\x2e\ +\xb5\x19\x03\xd7\x2d\x20\xf8\x20\x29\xfe\x1e\x2d\x9f\x3e\x22\x0c\ +\xf7\x02\x00\x1c\x27\x2e\x49\x5d\x06\x4d\x85\xde\x25\x9a\x03\x10\ +\x7c\x30\xad\xf0\x73\x51\x3f\x1d\xf2\xa5\x56\x2f\x00\xc0\xd1\xe4\ +\xb1\xf2\xf1\x6b\x44\xf3\x00\xc1\x07\xb3\x10\x7f\x17\xad\xf8\xfb\ +\x08\xe2\x0f\x00\xe0\xd0\x51\x71\xf7\x6b\xc2\xdc\x3c\x40\xf0\xc1\ +\x01\x88\xbf\x77\xd8\x67\x00\x00\x98\x2f\x57\xac\xc8\xfb\x1c\x22\ +\x0f\x10\x7c\x70\x58\x02\xd0\x89\xbf\x47\x85\xe8\x1f\x00\xc0\xac\ +\x78\xdc\x13\x78\xd4\xb6\x05\x04\x1f\x1c\x29\xf1\x77\xce\x0a\xbf\ +\x77\x58\xf1\xf7\x28\xad\x02\x00\x30\x11\x8d\xd8\x3d\x61\x05\xde\ +\xe3\xa5\xc0\x7b\x9c\x26\x01\x04\x1f\x1c\x37\x11\xf8\x88\x15\x7f\ +\x6f\xb4\xcf\xfa\x20\xe7\x1f\x00\x9c\x54\x2e\xd9\xc7\xe7\xac\xc8\ +\x7b\x02\x57\x2d\x20\xf8\xe0\x56\x15\x81\xe7\xac\xf0\xbb\x68\x1f\ +\x0f\xd8\x67\xf7\x3a\x00\xc0\x71\xe6\x71\xfb\xac\x82\xee\xaa\x7d\ +\xbe\x42\xe4\x0e\x10\x7c\x00\x69\x51\x18\xf2\x28\xad\x03\x00\x87\ +\x88\x1b\x7e\x6d\x77\xa8\x88\x39\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x98\x07\xff\xbf\x00\x03\x00\x03\x79\xc1\xc8\ +\xd5\x9f\x55\x66\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ +\ +\x00\x00\x01\x72\ +\x3c\ +\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\ +\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\ +\x30\x2f\x73\x76\x67\x22\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\ +\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x20\x66\x69\x6c\x6c\x3d\ +\x22\x62\x6c\x61\x63\x6b\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x31\ +\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x38\x70\ +\x78\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x30\x20\x30\ +\x68\x32\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x22\x20\x66\x69\x6c\ +\x6c\x3d\x22\x6e\x6f\x6e\x65\x22\x2f\x3e\x3c\x70\x61\x74\x68\x20\ +\x64\x3d\x22\x4d\x31\x33\x20\x33\x63\x2d\x34\x2e\x39\x37\x20\x30\ +\x2d\x39\x20\x34\x2e\x30\x33\x2d\x39\x20\x39\x48\x31\x6c\x34\x20\ +\x33\x2e\x39\x39\x4c\x39\x20\x31\x32\x48\x36\x63\x30\x2d\x33\x2e\ +\x38\x37\x20\x33\x2e\x31\x33\x2d\x37\x20\x37\x2d\x37\x73\x37\x20\ +\x33\x2e\x31\x33\x20\x37\x20\x37\x2d\x33\x2e\x31\x33\x20\x37\x2d\ +\x37\x20\x37\x63\x2d\x31\x2e\x39\x33\x20\x30\x2d\x33\x2e\x36\x38\ +\x2d\x2e\x37\x39\x2d\x34\x2e\x39\x34\x2d\x32\x2e\x30\x36\x6c\x2d\ +\x31\x2e\x34\x32\x20\x31\x2e\x34\x32\x43\x38\x2e\x32\x37\x20\x31\ +\x39\x2e\x39\x39\x20\x31\x30\x2e\x35\x31\x20\x32\x31\x20\x31\x33\ +\x20\x32\x31\x63\x34\x2e\x39\x37\x20\x30\x20\x39\x2d\x34\x2e\x30\ +\x33\x20\x39\x2d\x39\x73\x2d\x34\x2e\x30\x33\x2d\x39\x2d\x39\x2d\ +\x39\x7a\x6d\x2d\x31\x20\x35\x76\x35\x6c\x34\x2e\x32\x35\x20\x32\ +\x2e\x35\x32\x2e\x37\x37\x2d\x31\x2e\x32\x38\x2d\x33\x2e\x35\x32\ +\x2d\x32\x2e\x30\x39\x56\x38\x7a\x22\x2f\x3e\x3c\x2f\x73\x76\x67\ +\x3e\ +\x00\x00\x01\x8a\ +\x3c\ +\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\ +\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\ +\x30\x2f\x73\x76\x67\x22\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\ +\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x20\x66\x69\x6c\x6c\x3d\ +\x22\x62\x6c\x61\x63\x6b\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x31\ +\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x38\x70\ +\x78\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x30\x20\x30\ +\x68\x32\x34\x76\x32\x34\x48\x30\x7a\x22\x20\x66\x69\x6c\x6c\x3d\ +\x22\x6e\x6f\x6e\x65\x22\x2f\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\ +\x22\x4d\x32\x31\x20\x36\x68\x2d\x33\x2e\x31\x37\x4c\x31\x36\x20\ +\x34\x68\x2d\x36\x76\x32\x68\x35\x2e\x31\x32\x6c\x31\x2e\x38\x33\ +\x20\x32\x48\x32\x31\x76\x31\x32\x48\x35\x76\x2d\x39\x48\x33\x76\ +\x39\x63\x30\x20\x31\x2e\x31\x2e\x39\x20\x32\x20\x32\x20\x32\x68\ +\x31\x36\x63\x31\x2e\x31\x20\x30\x20\x32\x2d\x2e\x39\x20\x32\x2d\ +\x32\x56\x38\x63\x30\x2d\x31\x2e\x31\x2d\x2e\x39\x2d\x32\x2d\x32\ +\x2d\x32\x7a\x4d\x38\x20\x31\x34\x63\x30\x20\x32\x2e\x37\x36\x20\ +\x32\x2e\x32\x34\x20\x35\x20\x35\x20\x35\x73\x35\x2d\x32\x2e\x32\ +\x34\x20\x35\x2d\x35\x2d\x32\x2e\x32\x34\x2d\x35\x2d\x35\x2d\x35\ +\x2d\x35\x20\x32\x2e\x32\x34\x2d\x35\x20\x35\x7a\x6d\x35\x2d\x33\ +\x63\x31\x2e\x36\x35\x20\x30\x20\x33\x20\x31\x2e\x33\x35\x20\x33\ +\x20\x33\x73\x2d\x31\x2e\x33\x35\x20\x33\x2d\x33\x20\x33\x2d\x33\ +\x2d\x31\x2e\x33\x35\x2d\x33\x2d\x33\x20\x31\x2e\x33\x35\x2d\x33\ +\x20\x33\x2d\x33\x7a\x4d\x35\x20\x36\x68\x33\x56\x34\x48\x35\x56\ +\x31\x48\x33\x76\x33\x48\x30\x76\x32\x68\x33\x76\x33\x68\x32\x7a\ +\x22\x2f\x3e\x3c\x2f\x73\x76\x67\x3e\ +\x00\x00\x01\x90\ +\x3c\ +\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\ +\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\ +\x30\x2f\x73\x76\x67\x22\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\ +\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x20\x66\x69\x6c\x6c\x3d\ +\x22\x62\x6c\x61\x63\x6b\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x31\ +\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x38\x70\ +\x78\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x30\x20\x30\ +\x68\x32\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x22\x20\x66\x69\x6c\ +\x6c\x3d\x22\x6e\x6f\x6e\x65\x22\x2f\x3e\x3c\x70\x61\x74\x68\x20\ +\x64\x3d\x22\x4d\x31\x31\x20\x31\x38\x68\x32\x76\x2d\x32\x68\x2d\ +\x32\x76\x32\x7a\x6d\x31\x2d\x31\x36\x43\x36\x2e\x34\x38\x20\x32\ +\x20\x32\x20\x36\x2e\x34\x38\x20\x32\x20\x31\x32\x73\x34\x2e\x34\ +\x38\x20\x31\x30\x20\x31\x30\x20\x31\x30\x20\x31\x30\x2d\x34\x2e\ +\x34\x38\x20\x31\x30\x2d\x31\x30\x53\x31\x37\x2e\x35\x32\x20\x32\ +\x20\x31\x32\x20\x32\x7a\x6d\x30\x20\x31\x38\x63\x2d\x34\x2e\x34\ +\x31\x20\x30\x2d\x38\x2d\x33\x2e\x35\x39\x2d\x38\x2d\x38\x73\x33\ +\x2e\x35\x39\x2d\x38\x20\x38\x2d\x38\x20\x38\x20\x33\x2e\x35\x39\ +\x20\x38\x20\x38\x2d\x33\x2e\x35\x39\x20\x38\x2d\x38\x20\x38\x7a\ +\x6d\x30\x2d\x31\x34\x63\x2d\x32\x2e\x32\x31\x20\x30\x2d\x34\x20\ +\x31\x2e\x37\x39\x2d\x34\x20\x34\x68\x32\x63\x30\x2d\x31\x2e\x31\ +\x2e\x39\x2d\x32\x20\x32\x2d\x32\x73\x32\x20\x2e\x39\x20\x32\x20\ +\x32\x63\x30\x20\x32\x2d\x33\x20\x31\x2e\x37\x35\x2d\x33\x20\x35\ +\x68\x32\x63\x30\x2d\x32\x2e\x32\x35\x20\x33\x2d\x32\x2e\x35\x20\ +\x33\x2d\x35\x20\x30\x2d\x32\x2e\x32\x31\x2d\x31\x2e\x37\x39\x2d\ +\x34\x2d\x34\x2d\x34\x7a\x22\x2f\x3e\x3c\x2f\x73\x76\x67\x3e\ +\x00\x00\x02\xab\ +\x3c\ +\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\ +\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\ +\x30\x2f\x73\x76\x67\x22\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\ +\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x20\x66\x69\x6c\x6c\x3d\ +\x22\x62\x6c\x61\x63\x6b\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x31\ +\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x38\x70\ +\x78\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x30\x20\x30\ +\x68\x32\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x22\x20\x66\x69\x6c\ +\x6c\x3d\x22\x6e\x6f\x6e\x65\x22\x2f\x3e\x3c\x70\x61\x74\x68\x20\ +\x64\x3d\x22\x4d\x31\x34\x2e\x32\x35\x20\x32\x2e\x32\x36\x6c\x2d\ +\x2e\x30\x38\x2d\x2e\x30\x34\x2d\x2e\x30\x31\x2e\x30\x32\x43\x31\ +\x33\x2e\x34\x36\x20\x32\x2e\x30\x39\x20\x31\x32\x2e\x37\x34\x20\ +\x32\x20\x31\x32\x20\x32\x20\x36\x2e\x34\x38\x20\x32\x20\x32\x20\ +\x36\x2e\x34\x38\x20\x32\x20\x31\x32\x73\x34\x2e\x34\x38\x20\x31\ +\x30\x20\x31\x30\x20\x31\x30\x20\x31\x30\x2d\x34\x2e\x34\x38\x20\ +\x31\x30\x2d\x31\x30\x63\x30\x2d\x34\x2e\x37\x35\x2d\x33\x2e\x33\ +\x31\x2d\x38\x2e\x37\x32\x2d\x37\x2e\x37\x35\x2d\x39\x2e\x37\x34\ +\x7a\x4d\x31\x39\x2e\x34\x31\x20\x39\x68\x2d\x37\x2e\x39\x39\x6c\ +\x32\x2e\x37\x31\x2d\x34\x2e\x37\x63\x32\x2e\x34\x2e\x36\x36\x20\ +\x34\x2e\x33\x35\x20\x32\x2e\x34\x32\x20\x35\x2e\x32\x38\x20\x34\ +\x2e\x37\x7a\x4d\x31\x33\x2e\x31\x20\x34\x2e\x30\x38\x4c\x31\x30\ +\x2e\x32\x37\x20\x39\x6c\x2d\x31\x2e\x31\x35\x20\x32\x4c\x36\x2e\ +\x34\x20\x36\x2e\x33\x43\x37\x2e\x38\x34\x20\x34\x2e\x38\x38\x20\ +\x39\x2e\x38\x32\x20\x34\x20\x31\x32\x20\x34\x63\x2e\x33\x37\x20\ +\x30\x20\x2e\x37\x34\x2e\x30\x33\x20\x31\x2e\x31\x2e\x30\x38\x7a\ +\x4d\x35\x2e\x37\x20\x37\x2e\x30\x39\x4c\x38\x2e\x35\x34\x20\x31\ +\x32\x6c\x31\x2e\x31\x35\x20\x32\x48\x34\x2e\x32\x36\x43\x34\x2e\ +\x31\x20\x31\x33\x2e\x33\x36\x20\x34\x20\x31\x32\x2e\x36\x39\x20\ +\x34\x20\x31\x32\x63\x30\x2d\x31\x2e\x38\x35\x2e\x36\x34\x2d\x33\ +\x2e\x35\x35\x20\x31\x2e\x37\x2d\x34\x2e\x39\x31\x7a\x4d\x34\x2e\ +\x35\x39\x20\x31\x35\x68\x37\x2e\x39\x38\x6c\x2d\x32\x2e\x37\x31\ +\x20\x34\x2e\x37\x63\x2d\x32\x2e\x34\x2d\x2e\x36\x37\x2d\x34\x2e\ +\x33\x34\x2d\x32\x2e\x34\x32\x2d\x35\x2e\x32\x37\x2d\x34\x2e\x37\ +\x7a\x6d\x36\x2e\x33\x31\x20\x34\x2e\x39\x31\x4c\x31\x34\x2e\x38\ +\x39\x20\x31\x33\x6c\x32\x2e\x37\x32\x20\x34\x2e\x37\x43\x31\x36\ +\x2e\x31\x36\x20\x31\x39\x2e\x31\x32\x20\x31\x34\x2e\x31\x38\x20\ +\x32\x30\x20\x31\x32\x20\x32\x30\x63\x2d\x2e\x33\x38\x20\x30\x2d\ +\x2e\x37\x34\x2d\x2e\x30\x34\x2d\x31\x2e\x31\x2d\x2e\x30\x39\x7a\ +\x6d\x37\x2e\x34\x2d\x33\x6c\x2d\x34\x2d\x36\x2e\x39\x31\x68\x35\ +\x2e\x34\x33\x63\x2e\x31\x37\x2e\x36\x34\x2e\x32\x37\x20\x31\x2e\ +\x33\x31\x2e\x32\x37\x20\x32\x20\x30\x20\x31\x2e\x38\x35\x2d\x2e\ +\x36\x34\x20\x33\x2e\x35\x35\x2d\x31\x2e\x37\x20\x34\x2e\x39\x31\ +\x7a\x22\x2f\x3e\x3c\x2f\x73\x76\x67\x3e\ +\x00\x00\x01\xeb\ +\x3c\ +\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\ +\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\ +\x30\x2f\x73\x76\x67\x22\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\ +\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x20\x66\x69\x6c\x6c\x3d\ +\x22\x62\x6c\x61\x63\x6b\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x31\ +\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x38\x70\ +\x78\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x30\x20\x30\ +\x68\x32\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x22\x20\x66\x69\x6c\ +\x6c\x3d\x22\x6e\x6f\x6e\x65\x22\x2f\x3e\x3c\x70\x61\x74\x68\x20\ +\x64\x3d\x22\x4d\x31\x32\x20\x36\x63\x33\x2e\x37\x39\x20\x30\x20\ +\x37\x2e\x31\x37\x20\x32\x2e\x31\x33\x20\x38\x2e\x38\x32\x20\x35\ +\x2e\x35\x43\x31\x39\x2e\x31\x37\x20\x31\x34\x2e\x38\x37\x20\x31\ +\x35\x2e\x37\x39\x20\x31\x37\x20\x31\x32\x20\x31\x37\x73\x2d\x37\ +\x2e\x31\x37\x2d\x32\x2e\x31\x33\x2d\x38\x2e\x38\x32\x2d\x35\x2e\ +\x35\x43\x34\x2e\x38\x33\x20\x38\x2e\x31\x33\x20\x38\x2e\x32\x31\ +\x20\x36\x20\x31\x32\x20\x36\x6d\x30\x2d\x32\x43\x37\x20\x34\x20\ +\x32\x2e\x37\x33\x20\x37\x2e\x31\x31\x20\x31\x20\x31\x31\x2e\x35\ +\x20\x32\x2e\x37\x33\x20\x31\x35\x2e\x38\x39\x20\x37\x20\x31\x39\ +\x20\x31\x32\x20\x31\x39\x73\x39\x2e\x32\x37\x2d\x33\x2e\x31\x31\ +\x20\x31\x31\x2d\x37\x2e\x35\x43\x32\x31\x2e\x32\x37\x20\x37\x2e\ +\x31\x31\x20\x31\x37\x20\x34\x20\x31\x32\x20\x34\x7a\x6d\x30\x20\ +\x35\x63\x31\x2e\x33\x38\x20\x30\x20\x32\x2e\x35\x20\x31\x2e\x31\ +\x32\x20\x32\x2e\x35\x20\x32\x2e\x35\x53\x31\x33\x2e\x33\x38\x20\ +\x31\x34\x20\x31\x32\x20\x31\x34\x73\x2d\x32\x2e\x35\x2d\x31\x2e\ +\x31\x32\x2d\x32\x2e\x35\x2d\x32\x2e\x35\x53\x31\x30\x2e\x36\x32\ +\x20\x39\x20\x31\x32\x20\x39\x6d\x30\x2d\x32\x63\x2d\x32\x2e\x34\ +\x38\x20\x30\x2d\x34\x2e\x35\x20\x32\x2e\x30\x32\x2d\x34\x2e\x35\ +\x20\x34\x2e\x35\x53\x39\x2e\x35\x32\x20\x31\x36\x20\x31\x32\x20\ +\x31\x36\x73\x34\x2e\x35\x2d\x32\x2e\x30\x32\x20\x34\x2e\x35\x2d\ +\x34\x2e\x35\x53\x31\x34\x2e\x34\x38\x20\x37\x20\x31\x32\x20\x37\ +\x7a\x22\x2f\x3e\x3c\x2f\x73\x76\x67\x3e\ +\x00\x00\x01\x2e\ +\x3c\ +\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\ +\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\ +\x30\x2f\x73\x76\x67\x22\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\ +\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x20\x66\x69\x6c\x6c\x3d\ +\x22\x62\x6c\x61\x63\x6b\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x31\ +\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x38\x70\ +\x78\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x30\x20\x30\ +\x68\x32\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x22\x20\x66\x69\x6c\ +\x6c\x3d\x22\x6e\x6f\x6e\x65\x22\x2f\x3e\x3c\x70\x61\x74\x68\x20\ +\x64\x3d\x22\x4d\x31\x30\x20\x31\x36\x2e\x35\x6c\x36\x2d\x34\x2e\ +\x35\x2d\x36\x2d\x34\x2e\x35\x7a\x4d\x31\x32\x20\x32\x43\x36\x2e\ +\x34\x38\x20\x32\x20\x32\x20\x36\x2e\x34\x38\x20\x32\x20\x31\x32\ +\x73\x34\x2e\x34\x38\x20\x31\x30\x20\x31\x30\x20\x31\x30\x20\x31\ +\x30\x2d\x34\x2e\x34\x38\x20\x31\x30\x2d\x31\x30\x53\x31\x37\x2e\ +\x35\x32\x20\x32\x20\x31\x32\x20\x32\x7a\x6d\x30\x20\x31\x38\x63\ +\x2d\x34\x2e\x34\x31\x20\x30\x2d\x38\x2d\x33\x2e\x35\x39\x2d\x38\ +\x2d\x38\x73\x33\x2e\x35\x39\x2d\x38\x20\x38\x2d\x38\x20\x38\x20\ +\x33\x2e\x35\x39\x20\x38\x20\x38\x2d\x33\x2e\x35\x39\x20\x38\x2d\ +\x38\x20\x38\x7a\x22\x2f\x3e\x3c\x2f\x73\x76\x67\x3e\ +\x00\x00\x71\x5c\ +\x89\ +\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ +\x00\x02\x00\x00\x00\x01\x55\x08\x06\x00\x00\x00\x99\xd3\x57\x23\ +\x00\x00\x00\x4d\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\ +\x00\x4d\x61\x74\x70\x6c\x6f\x74\x6c\x69\x62\x20\x76\x65\x72\x73\ +\x69\x6f\x6e\x33\x2e\x36\x2e\x30\x2e\x64\x65\x76\x31\x36\x35\x31\ +\x2b\x67\x33\x30\x64\x36\x31\x36\x31\x34\x30\x36\x2c\x20\x68\x74\ +\x74\x70\x73\x3a\x2f\x2f\x6d\x61\x74\x70\x6c\x6f\x74\x6c\x69\x62\ +\x2e\x6f\x72\x67\x2f\xb0\x17\x16\xc1\x00\x00\x00\x09\x70\x48\x59\ +\x73\x00\x00\x2e\x23\x00\x00\x2e\x23\x01\x78\xa5\x3f\x76\x00\x00\ +\x70\xb5\x49\x44\x41\x54\x78\x9c\xed\x9d\x77\x9c\x14\x45\xfa\xff\ +\x9f\xea\xd9\x9d\x65\x13\xb0\xbb\x24\x61\x89\x12\x25\x08\x46\x14\ +\x15\xc1\x00\x7a\x2a\xa2\x22\x98\x30\xfb\x35\x9d\x77\x78\x62\x46\ +\x3c\xcf\x43\x14\xc3\xe9\xe9\xa1\xa7\xbf\x33\x9f\x82\xe0\x9d\x0a\ +\xea\x61\x02\x33\x8a\x8a\x22\x39\x09\x2c\xcb\x92\x36\xe7\xd9\x99\ +\xfe\xfd\xb1\xd3\xb3\xd5\xd5\x95\xba\xa7\x67\x77\x61\x9e\xf7\xeb\ +\x35\xaf\xdd\xe9\xae\xae\xd0\xd3\xdd\xcf\xa7\x9e\x7a\xaa\x9a\x98\ +\xa6\x09\xad\x0d\x42\x48\x36\x00\x74\xa3\x3e\xed\x00\x20\xc8\xf9\ +\x90\x96\xaa\x23\x82\x20\x08\x92\xb4\x84\x01\xa0\x9e\xfa\xd4\x45\ +\xff\xee\x01\x80\x82\xe8\xa7\xc8\x34\xcd\x86\x16\xab\xa1\x06\xa4\ +\xa5\x04\x00\x21\x24\x13\x00\xba\x43\x93\x91\xcf\x8f\x7e\xba\x01\ +\x40\xdb\x16\xa9\x14\x82\x20\x08\x82\xf8\x43\x04\x00\x76\x43\xa3\ +\x18\xd8\xc9\xfc\xdd\x65\x9a\x66\xb8\x05\xeb\x06\x00\xcd\x2c\x00\ +\x08\x21\xed\x01\xe0\x24\x00\x38\x19\x00\x86\x03\xf6\xe0\x11\x04\ +\x41\x90\xe4\xa3\x0c\x00\xbe\x00\x80\xa5\x00\xb0\xb2\xa5\xc4\x40\ +\xc2\x05\x00\x1a\x7d\x04\x41\x10\x04\x11\xd2\x62\x62\x20\x21\x02\ +\x00\x8d\x3e\x82\x20\x08\x82\xb8\x86\x16\x03\x3f\x99\xa6\x19\x49\ +\x64\x61\xbe\x0a\x00\x42\x48\x3a\x00\x4c\x01\x80\xc9\x00\x90\xe6\ +\x5b\xc6\x08\x82\x20\x08\x92\x5c\x6c\x05\x80\x7f\x98\xa6\xb9\x22\ +\x51\x05\xf8\x22\x00\x08\x21\x06\x00\x8c\x07\x80\xab\x01\x20\x37\ +\xee\x0c\x11\x04\x41\x10\x04\x01\x00\xf8\x0e\x00\xe6\x9a\xa6\xf9\ +\x9b\xdf\x19\xc7\x2d\x00\x08\x21\x47\x02\xc0\x8d\x00\xd0\xc7\x97\ +\x1a\x21\x08\x82\x20\x08\x42\x63\x02\xc0\x22\x00\x78\xd1\x34\xcd\ +\x12\xbf\x32\xf5\x2c\x00\x08\x21\x6d\x00\xe0\x56\x00\x38\xcd\xaf\ +\xca\x20\x08\x82\x20\x08\x22\xa4\x1a\x00\x1e\x36\x4d\xf3\x73\x3f\ +\x32\xf3\x24\x00\x08\x21\xf9\x00\xf0\x00\x00\xf4\xf6\xa3\x12\x08\ +\x82\x20\x08\x82\x68\x33\x0f\x00\x9e\x8f\x77\xc6\x80\x6b\x01\x40\ +\x08\x39\x1e\x00\xee\x01\x80\x8c\x78\x0a\x46\x10\x04\x41\x10\xc4\ +\x33\x3f\x03\xc0\xfd\xa6\x69\x96\x7a\xcd\xc0\x95\x00\x20\x84\x8c\ +\x01\x80\x7b\x01\xc0\xf0\x5a\x20\x82\x20\x08\x82\x20\xbe\xb0\x15\ +\x00\x6e\xf5\x2a\x02\xb4\x0d\x39\x1a\x7f\x04\x41\x10\x04\x69\x55\ +\xf4\x06\x80\xc7\xa3\x6b\xef\xb8\x46\xcb\x98\x13\x42\x4e\x06\x34\ +\xfe\x08\x82\x20\x08\xd2\xda\xb0\x44\x80\xeb\x77\xe8\x28\x0d\x3a\ +\x21\xa4\x2f\x00\xdc\xa5\x93\x16\x41\x10\x04\x41\x90\x66\xa7\x37\ +\x00\xcc\x88\xae\xc9\xa3\x8d\x34\x71\xf4\xb5\xbc\x0f\x40\xe3\xab\ +\x77\x11\x04\x41\x10\x04\x69\x9d\x1c\x05\x00\x57\xb8\x39\x40\x28\ +\x00\x08\x21\x04\x00\xee\x06\x80\x43\xe2\xab\x13\x82\x20\x08\x82\ +\x20\xcd\xc0\x65\x84\x90\xe3\x74\x13\xcb\x3c\x00\xbf\x03\x80\x91\ +\xf1\xd7\x07\x41\x10\x04\x41\x90\x66\xe2\x0e\x42\x48\xa6\x4e\x42\ +\xae\x00\x20\x84\x64\x00\xc0\x55\xbe\x56\x09\x41\x10\x04\x41\x90\ +\x44\xd3\x0e\x00\x2e\xd5\x49\x28\xf2\x00\x5c\x04\x00\x39\xbe\x55\ +\x07\x41\x10\x04\x41\x90\xe6\xe2\x02\x42\x88\x72\xf8\xde\x21\x00\ +\x08\x21\x9d\x00\xe0\xc2\x84\x54\x09\x41\x10\x04\x41\x90\x44\x93\ +\x02\x00\xd7\xaa\x12\xf1\x3c\x00\xd7\x00\x46\xfd\x23\x08\x82\x20\ +\xc8\x81\xcc\x18\x42\xc8\x60\x59\x02\x9b\x00\x20\x84\xf4\x07\x7c\ +\xbb\x1f\x82\x20\x08\x82\x1c\x0c\xdc\x18\x9d\xd1\xc7\x85\xf5\x00\ +\x9c\x9d\xe0\xca\x20\x08\x82\x20\x08\xd2\x3c\x1c\x06\x92\xb7\xf6\ +\xc6\x04\x00\x21\x24\x00\x00\x27\x36\x47\x8d\x10\x04\x41\x10\x04\ +\x69\x16\xc6\x88\x76\xd0\x1e\x80\xe1\xd0\x38\x7d\x00\x41\x10\x04\ +\x41\x90\x83\x83\x93\x45\xc3\x00\xb4\x00\x38\xb9\x79\xea\x82\x20\ +\x08\x82\x20\x48\x33\x91\x0f\x82\x61\x00\x03\x00\xdd\xff\x08\x82\ +\x20\x08\x72\x10\xc3\x1d\x06\xb0\x3c\x00\xc3\x01\xdd\xff\x08\x82\ +\x20\x08\x72\x30\xc2\x1d\x06\xb0\x04\xc0\xc9\xcd\x5b\x17\x04\x41\ +\x10\x04\x41\x9a\x09\xee\x30\x80\x81\xee\x7f\x04\x41\x10\x04\x39\ +\xe8\x39\x99\xdd\x60\x00\xc0\xe1\x80\xee\x7f\x04\x41\x10\x04\x39\ +\x98\x19\xcd\x6e\x30\x00\xa0\x7f\x0b\x54\x04\x41\x10\x04\x41\x90\ +\xe6\xa3\x07\x21\x24\x8d\xde\x60\x00\x40\xb7\x16\xaa\x0c\x82\x20\ +\x08\x82\x20\xcd\x87\xcd\xde\xa3\x00\x40\x10\x04\x41\x90\xe4\x00\ +\x05\x00\x82\x20\x08\x82\x24\x21\xf9\xf4\x17\x03\x00\x3a\xb6\x50\ +\x45\x10\x04\x41\x10\x04\x69\x3e\x1c\x1e\x00\xe1\xab\x02\x11\x04\ +\x41\x10\x04\x39\x68\x70\x78\x00\x10\x04\x41\x10\x04\x39\xf8\x71\ +\x78\x00\x10\x04\x41\x10\x04\x39\xf8\xe9\x40\x4f\x05\x44\x01\x80\ +\x20\x08\x82\x20\xc9\x43\xcc\x0b\x80\x02\x00\x41\x10\x04\x41\x92\ +\x07\x14\x00\x08\x82\x20\x08\x92\x84\xc4\x66\xfe\xa1\x00\x40\x10\ +\x04\x41\x90\xe4\x21\x68\xfd\x83\x02\x00\x41\x10\x04\x41\x92\x07\ +\x14\x00\x08\x82\x20\x08\x92\x84\xa0\x00\x40\x10\x04\x41\x90\x24\ +\x04\x05\x00\x82\x20\x08\x82\x24\x21\xb8\x0e\x00\x82\x20\x08\x82\ +\x24\x21\x86\xe3\x1f\x04\x41\x10\x04\x41\x92\x07\x14\x00\x08\x82\ +\x20\x08\x92\x84\xa0\x00\x40\x10\x04\x41\x90\x24\x04\x05\x00\x82\ +\x20\x08\x82\x24\x21\x28\x00\x10\x04\x41\x10\x24\x09\x41\x01\x80\ +\x20\x08\x82\x20\x49\x08\x0a\x00\x04\x41\x10\x04\x49\x42\x50\x00\ +\x20\x08\x82\x20\x48\x12\x82\x02\x00\x41\x10\x04\x41\x92\x10\x14\ +\x00\x08\x82\x20\x08\x92\x84\xa0\x00\x40\x10\x04\x41\x90\x24\x04\ +\x05\x00\x82\x20\x08\x82\x24\x21\x28\x00\x10\x04\x41\x10\x24\x09\ +\x41\x01\x80\x20\x08\x82\x20\x49\x08\x0a\x00\x04\x41\x10\x04\x49\ +\x42\x50\x00\x20\x08\x82\x20\x48\x12\x82\x02\x00\x41\x10\x04\x41\ +\x92\x10\x14\x00\x08\x82\x20\x08\x92\x84\xa0\x00\x40\x10\x04\x41\ +\x90\x24\x04\x05\x00\x82\x20\x08\x82\x24\x21\x28\x00\x10\x04\x41\ +\x10\x24\x09\x41\x01\x80\x20\x08\x82\x20\x49\x08\x0a\x00\x04\x41\ +\x10\x04\x49\x42\x50\x00\x20\x08\x82\x20\x48\x12\x82\x02\x00\x41\ +\x10\x04\x41\x92\x10\x14\x00\x08\x82\x20\x08\x92\x84\xa0\x00\x40\ +\x10\x04\x41\x90\x24\x24\xa5\xa5\x2b\xd0\x5a\xe9\xde\x6f\x64\xc6\ +\x51\x13\xe7\x0e\x70\xec\x20\x9c\xc4\x9a\xdb\x4c\x9d\x74\x04\x00\ +\x22\xa1\x48\xd1\xaa\x79\xbb\xbf\x7d\xef\xb1\x22\x75\x4d\x0f\x2e\ +\x46\xfc\xee\xba\x8e\xf9\x27\x5d\xdb\x0d\x02\xc1\x46\x71\x4a\x9f\ +\x1f\xea\x7f\x93\x10\xee\x76\xe1\xff\xbc\xef\x56\x5e\x94\x0c\x2e\ +\xf8\xe0\xc9\xed\x3f\x2f\xfa\x7f\xfb\x5c\x54\x59\xca\x89\x37\xcc\ +\xea\x9e\x7d\xd4\xd9\x1d\x1d\x75\xa0\xeb\x42\x95\x6f\x0a\xdb\x22\ +\xa8\x3c\x2d\xe1\x6d\xe7\xa7\xe9\x5b\xa4\x68\x53\xd5\x92\x3f\x9e\ +\xbb\x4e\xb7\xce\x2d\x49\xf7\x63\x4f\xca\x1c\xf4\xc0\xb3\x03\x01\ +\x40\x78\x8e\x00\x24\xe7\x89\xfd\xce\xec\x33\x05\xe7\x8b\xfd\xfe\ +\xeb\x8d\x97\xac\x29\xfa\xe5\xa7\x1a\x65\x85\x15\x9c\xf2\xed\xea\ +\x23\xc1\xa0\x0a\xe5\xfd\x8c\x9c\x6e\x18\xfd\xfb\x49\x8f\x15\x5c\ +\x16\xd2\xae\x9d\xe8\x5a\x92\xe5\xa7\xbb\x5f\xa7\x7c\x09\xdc\x67\ +\xa4\x4f\x6c\x9f\x3e\x7d\xe3\xe6\xf7\xde\x2b\x4b\x5c\x09\x07\x1e\ +\x28\x00\x04\xe4\x74\xea\x9b\xbe\xb9\x62\x78\x3b\x00\xe6\xa1\x41\ +\xa3\x63\xe4\xf9\x37\xb7\x13\xf6\x01\xd7\xff\xe8\x9c\x09\xf7\x5d\ +\x97\xff\xee\x83\x83\x57\x98\x91\x06\x65\x7d\x0f\x06\xce\x7e\xf0\ +\xf3\xa1\xab\x53\x4f\xcc\x2b\x03\x00\x30\xa3\x1b\x4d\xea\x7c\x99\ +\xd0\x74\x9e\x44\xdb\xad\x4d\x2e\x0d\xac\xf5\x1b\xf7\xef\x77\x74\ +\x36\x80\x7f\x02\xa0\xed\xe0\x93\x72\x7e\x0a\x0e\x69\x1b\xbb\x86\ +\x38\x65\x4a\xeb\xa8\x53\xf7\x80\x22\x7d\xf6\xe0\xec\x63\xaf\xb9\ +\xa3\x6c\xf9\x0b\x0f\xef\x72\x53\xf7\x96\x20\xb7\xef\xc0\x8c\x6f\ +\x3a\x0d\xca\xb6\xbe\x0b\xcf\x11\x7b\x5f\x89\xce\xab\xea\x38\xf6\ +\xba\x89\x7e\xef\xd1\xb3\x4f\x9a\x1f\x02\xe0\xeb\x5e\x83\xb2\xc3\ +\x54\xe5\x6c\xbf\x95\xa0\x0e\x74\x3d\x3c\xa5\x13\xa4\x95\xa6\x97\ +\x1c\xa3\x75\xac\x8b\x7c\xe2\xca\x3b\x0e\x8e\x1e\x36\x2c\x13\x05\ +\x80\x1d\x1c\x02\xd0\x80\x44\x9a\x3e\x36\x4c\x68\x32\x54\x56\x5a\ +\xb3\xf1\x13\x23\x42\x7d\x98\x34\xdc\x74\x54\x9a\x4d\x95\xfd\xb3\ +\xc6\x5f\xfb\x52\x5f\x1f\x9a\xd0\xea\x19\x39\xe9\x8e\x43\xd6\xa4\ +\x9c\x98\x17\xdb\xc0\x9c\x0b\xdb\xf6\x48\xd3\xf6\xd8\x3e\xce\xf9\ +\x53\xe6\x45\xfd\xef\xf8\x6d\x7d\x26\x96\x3f\x75\xcd\xd8\xca\x8c\ +\xa7\xee\x61\x75\xfa\xba\x49\x7f\xee\x97\xde\x3e\x97\x67\x7e\x5a\ +\x17\xa6\x64\x1f\xdd\x36\xc9\xef\xe5\x75\x5f\x73\x60\xfb\xad\x0e\ +\x46\x5a\xa9\xf1\x47\xf8\xe0\xa9\x97\x61\x82\xc3\xc8\x73\xc5\x00\ +\x2f\x9d\xcc\xc8\x73\xc4\x80\x28\x4d\x71\xce\xe4\x6e\x1d\xba\xf6\ +\x4f\xf3\xa1\x35\xad\x96\x94\x60\x1b\x12\x3c\x61\x46\x5f\x00\xb1\ +\x51\xe7\x9e\x27\x68\xda\xa7\xb5\x9d\x97\x17\x6b\x90\x65\x06\xc8\ +\x0b\x22\x91\x41\x95\x19\xdb\xce\xd4\x31\x86\x0f\x22\xa0\xa0\x21\ +\xcd\x38\xf9\xa9\xff\x0d\x75\x53\xf5\x16\xc3\x8b\xa1\x67\x85\xb8\ +\x40\x5c\xb9\xda\xe7\x07\x3a\xd7\x13\xa7\x4c\x6e\x3d\x74\xd3\x09\ +\xd2\x4a\xd3\x4b\x8e\x71\x85\xcb\x3c\xd0\xf8\xb7\x2c\x78\xfa\x25\ +\x38\x1e\xb6\x12\x31\xe0\xe8\xe1\xb9\x14\x03\xa2\x34\x95\xb5\x29\ +\xe4\x84\x2b\x17\x0c\xf6\xab\x4d\xad\x91\x71\xd3\xdf\x1e\xb4\xb3\ +\x2a\x33\x20\x14\x46\x1a\xbd\xfe\xb8\x85\x83\xdf\x86\x9f\x86\xae\ +\x8f\xc0\x68\xf1\x8c\x92\x56\x9b\x5c\x88\x80\x9f\x3a\x1c\xd9\x7e\ +\xd8\x79\x57\x75\xf0\xd0\x82\xe6\x47\x24\x02\x64\x06\xfb\x00\x11\ +\x01\x5c\x2f\x40\x1c\x65\xba\xad\xaf\xd7\xf6\x25\xc2\x7b\xd2\xd2\ +\x1e\x99\x64\x07\x05\x80\x02\xda\x30\x3b\x7a\x8d\x2a\xef\x00\x27\ +\x9d\x17\x31\xb0\xb9\x7a\x48\xdb\x11\x63\x0e\x90\x07\xb7\x4b\xf2\ +\x07\x1e\x9b\xb1\xbd\xed\xe9\x9d\x64\xae\x7c\xdf\x85\x80\x86\x31\ +\xf5\x0d\x8e\xf8\xe0\x5e\x23\xec\x76\x37\x1e\x0e\x3a\x8f\x30\x65\ +\x60\x98\x76\x36\x98\x04\xda\x5c\xf7\xd4\xa0\x94\x36\x19\xad\xf7\ +\xbe\x37\xa9\xc6\xca\x44\x80\x68\x9f\x4f\x22\xc0\x77\x3c\x88\x00\ +\xee\xb0\x63\xc4\x99\xce\x4a\xcb\xcd\xcf\xad\x27\x40\x72\x8c\xf4\ +\x38\x36\x0f\x17\xa0\x08\x68\x39\x5a\xef\x83\xa0\xa5\x11\x19\x79\ +\x4d\x41\xa0\x33\x54\xc0\xcd\x8b\x23\x06\xcc\x08\x81\xce\x27\x3c\ +\x36\x80\x18\x07\x5f\xcc\xe6\xf0\xab\xdf\x18\x52\x53\xdf\x38\x34\ +\xad\x3a\x0f\xcd\x26\x04\x7c\x46\x58\x26\x2b\x04\xa0\x69\xbb\xa7\ +\x7a\x2b\xbc\x01\xc4\x04\xd8\xda\x90\x19\x38\xf5\x6f\xef\x1d\x16\ +\x57\x83\x12\x8c\x70\x58\x84\xbd\x9f\x64\x22\x40\xe6\x5d\xd1\x14\ +\x16\xbe\xc3\x11\x01\x0e\x21\xe0\xc6\xb8\xeb\xa4\x13\xa4\x95\xa6\ +\x97\x1c\x63\x1d\xa7\x34\xda\x2e\xcf\xa5\x56\x9e\x88\xef\xa0\x00\ +\x10\x62\x9a\x5a\xbd\x7e\x95\x20\x00\xbb\x18\x70\xe5\x1d\xa0\x8c\ +\x60\x41\x79\xfb\xd4\xf1\x57\xff\xeb\xa0\x0a\x08\x3c\x7e\xd2\xdd\ +\x5d\xd7\xd7\xf4\xce\x00\x00\x5b\x5b\x85\xe7\x01\x12\xbf\x3d\x21\ +\x31\x00\xaa\xba\x58\xd7\x85\xcc\x1b\xa0\xd1\x9e\x58\x79\x0a\x6f\ +\xc0\xaa\xee\x63\x3a\xf4\x1b\x73\x4e\x7b\x3f\x9a\x97\x48\xb4\x8c\ +\xbd\x4c\x20\x70\xbc\x73\xa2\xfc\x12\xe5\x0d\x10\x76\x02\xe8\x34\ +\x3c\x11\xe0\xd1\x1b\x20\x34\xa4\x12\xcf\x41\x3c\xde\x00\x6d\x21\ +\xa0\x79\x4e\xb9\xcf\x4a\x24\x61\xa0\x00\x90\xc1\x3c\x74\x6d\x86\ +\x89\xf7\xe1\xa4\x55\x79\x07\x44\xb1\x03\x8e\x7c\x22\x00\x25\xed\ +\xa6\x1c\x34\x01\x81\xa9\x6d\x32\x8d\xc0\xd1\xf7\x1c\xda\xd2\x06\ +\xdf\xb1\xdd\x6f\xcc\xe8\x2f\xaa\x53\x17\xf6\x5a\x00\x17\x42\x80\ +\xdd\x26\x11\x02\xf5\x61\x02\x5d\x6e\x7b\x69\x30\x90\x56\x7a\xfb\ +\xeb\xb4\x5f\xd7\xa0\x7b\xf5\x06\xf8\x2c\x04\x55\x33\x88\x84\xde\ +\x80\x03\x48\x08\xf8\x2d\x06\xd8\xbc\x51\x10\xf8\x4f\x2b\x7d\x02\ +\xb4\x02\x78\xae\x68\xce\x03\x58\x39\x14\xe0\x45\x10\x08\xf2\xa9\ +\xaa\x4d\x25\x27\x4e\x7d\xeb\xa0\x08\x08\x1c\xff\xc7\x85\x03\x77\ +\x57\x66\x34\xfa\xfe\x05\xe7\x55\xe8\x15\x48\xe0\x76\xdb\x18\xb4\ +\x1f\xb8\x14\x37\xdc\xeb\x00\x34\x84\x00\xaf\x2d\x00\x42\x21\xb0\ +\x2e\x9c\x93\x7a\xfa\xc3\xf3\x9d\x0b\x5d\xb5\x02\x5c\x09\x21\xc1\ +\x76\xc7\x3e\x51\x5e\xb2\xfc\xfc\x40\xe6\x85\x60\xea\x05\x90\x38\ +\x21\xe0\x26\x96\x40\x29\x1e\xfc\x16\x03\x71\x08\x02\x14\x06\xf1\ +\x81\x02\x40\x02\xb7\xe7\x0f\xd0\xa2\x82\x60\x4b\xe5\xd0\xb6\xc3\ +\x47\x5f\x71\x40\x07\x04\xf6\x18\x34\x2a\x73\x5b\xfa\x69\x9d\x00\ +\x98\x76\x0a\xce\x29\x40\xf3\x6e\xf7\x1d\x99\xf8\x90\xd5\x8f\xf3\ +\xfb\xbb\x19\x1a\x50\x09\x81\x8d\x87\x4d\x3c\xa4\xfb\x88\x51\x99\ +\x89\x68\xb2\x67\x78\x46\xba\x05\x84\x80\xaf\xb0\xd7\x35\xcf\x70\ +\x09\x84\x80\x23\x96\xc3\x8d\x10\x88\x23\xad\xb0\x9e\xbc\xe3\x04\ +\xe7\xcc\x95\x91\xf6\x28\x08\x44\x65\xa1\x48\xd0\x03\x05\x80\x0c\ +\x8e\x81\xe6\x8a\x02\xf6\xe2\x8d\x08\xd2\x69\xe4\xc7\x4d\x43\x5d\ +\xc4\x66\x98\xc0\x21\xc7\x3f\x7e\x40\x07\x04\x1e\x3e\xf5\xf5\xc1\ +\x75\x21\xc3\x71\x53\x8a\xc4\x40\xb3\x7a\x05\x7c\x76\x00\x00\xaf\ +\x4c\x70\x51\x3f\xea\xbc\x58\xf9\xb1\xd7\x84\xae\x10\x60\xaf\xaf\ +\xaa\x90\x01\xfd\xee\x5b\x38\xac\xd5\x0d\x05\x28\xda\xee\xd8\xae\ +\x12\x02\x9a\x79\xd9\x8e\xf3\x09\x12\xa1\x7e\x0f\xa6\xae\x8e\x3a\ +\xd1\xf5\x92\x79\x05\x38\x86\x52\xd9\xd3\x77\x9b\x56\x72\x6f\xb6\ +\x88\x20\x88\x43\x1c\x20\x62\x5a\xd9\x9d\xdf\xba\xf0\x6a\xc4\xe3\ +\x11\x05\x3a\x9e\x84\x9d\xa5\x39\xa9\xe3\xaf\x38\x30\x03\x02\x4f\ +\xb8\x70\x66\xb7\x0d\x95\x3d\x33\xb8\x86\x8c\xa2\xa5\xc4\x40\xb3\ +\x04\x01\xea\x18\x7d\x5e\x5a\xfa\x9c\xf0\x8c\xa1\x9b\x7c\xa3\x75\ +\xfb\xd5\xec\x9c\x36\xf6\x9e\x67\xfb\xf8\xdc\xea\xb8\x90\x7a\x43\ +\x00\xc4\xc6\x5b\x74\xae\x74\xf2\xe2\x09\x0b\x3f\x60\xce\x3b\xd7\ +\x8b\xc8\xb6\xc3\x82\xad\x9b\xcc\x2b\xe0\x83\x18\xf0\xea\x1d\xf0\ +\x4b\x10\x68\x9f\x7b\x99\x38\x40\xa1\xe0\x0a\x14\x00\x22\xe2\x30\ +\xe2\xf1\x88\x02\x6e\xef\x9f\x93\xa6\xac\xed\x94\x6e\xb9\x9d\x0f\ +\x3d\xa0\x02\x02\x83\xe9\xd9\x46\x60\xd8\xed\x7d\x44\xed\x6d\x15\ +\x62\xc0\x67\x88\xe9\xbe\xde\x5a\x69\x59\x21\xc0\x9e\x3f\x95\x98\ +\xa0\xf2\xdd\x79\xcc\x55\xdd\x3b\xf6\x19\xd4\x3a\xae\xa5\x68\x3d\ +\x55\xed\x66\xdb\x0e\xa0\x36\xf8\x4a\x91\x60\x3b\x97\x3e\x5e\x0c\ +\x82\xdf\xc3\x2f\x31\xe0\x98\xe9\x21\xc8\xcb\x96\x9f\xc0\x58\x6a\ +\xa7\x97\xdc\xa3\x5a\xde\x05\x89\x91\x16\x09\x03\xd7\xe2\x8c\x2d\ +\x2b\x01\xf7\xf7\x81\x0e\x0a\x00\x11\x6e\x8c\x78\x98\x93\x96\x7d\ +\x40\x69\xe6\xc7\xcb\x8b\x97\x4f\x55\x4d\x2a\x39\xf9\xb2\xb7\x0f\ +\xa8\x80\xc0\xf1\x37\x2f\x1c\xb8\xa7\x22\x23\x20\x34\x70\xad\x40\ +\x0c\x24\xc4\x03\x20\x32\xce\x82\x7a\xb8\xa9\xb3\xc8\x20\xba\x39\ +\x47\xa5\xa1\x00\x39\xe2\xa1\xc5\x87\xfb\xdc\xf2\xf8\xe0\x18\x4c\ +\xdb\x76\x8d\xb6\xb3\xed\x07\x10\x88\x04\xaf\x06\x46\x03\xa1\x27\ +\x82\x69\x9b\x8e\x18\x10\x79\x2d\x00\x38\x62\x80\x93\x97\x30\x3f\ +\x36\xad\x4e\x7a\xde\x31\x92\x7b\x55\xdb\x53\x20\xc8\x4b\x95\x6f\ +\xdc\x62\x21\x49\x41\x01\x20\x80\x00\x88\xc7\xe3\x75\x85\x81\x8e\ +\x28\xe0\xa4\x53\x8a\x82\x68\x3e\x5b\x2b\x86\xb6\x1d\x7e\xe2\x81\ +\x11\x10\xd8\x6b\xf0\x49\x59\xdb\x02\xa7\x74\xd2\x35\x70\xf4\xb9\ +\x22\x61\xfe\x4d\x6d\xbb\xd9\x1b\x80\x7f\xbe\xc2\x1a\xdb\x59\x11\ +\xe2\x27\xbc\xf6\x08\x0c\x94\xb2\x6e\x9c\xb4\x2a\x83\x48\x9f\x3b\ +\x99\x18\x58\x69\xf4\xce\x38\xe1\xe6\x59\xdd\x7d\x6e\xbd\x7b\x74\ +\xc5\x12\x50\xfb\x40\x6c\x20\xb5\x7b\xff\xcc\x3e\xdf\x10\x18\x73\ +\xb6\x4c\x00\x85\x18\x10\xe5\xc1\x3e\xa3\x00\xb4\xbd\x03\x6c\x7e\ +\x5e\x04\x81\x1f\xa2\xc0\x93\x38\xe0\xe4\xcd\x03\xc5\x80\x1c\x14\ +\x00\x42\x4c\xc8\x0a\x02\x04\x03\x9c\x8f\x01\x7a\xc2\x80\x7a\x90\ +\x0b\x8d\xb9\xa6\xc8\xe0\x89\x0b\x33\x42\xa0\xdb\xb1\x07\x46\x40\ +\xe0\xb0\x0b\x5f\x1b\x1c\x6a\x30\xb4\x8d\x32\xd7\x30\xf3\x1e\xea\ +\x14\x9e\xbd\x03\x8c\xc1\xf5\x15\xba\xb7\x9e\x00\x31\x40\x9f\xbf\ +\x78\xc5\x40\xf9\xd8\x3f\xf5\x69\xdb\x39\x3f\xd5\xe7\x33\xe0\x1e\ +\x51\x3b\x74\x7e\x53\x41\xdb\x5d\x8b\x01\x9f\xae\x03\xae\x27\x82\ +\x57\x4f\x49\xfb\x3c\x7b\x07\xdc\x08\x02\x41\x9e\x5c\xb1\xc2\x18\ +\x5f\xa5\x41\xe7\x1d\xcb\xe4\x21\xcb\x47\x6a\xc0\x65\x79\xb3\x1f\ +\xbf\xef\xed\x83\x00\x14\x00\x42\x4c\x08\xa6\x00\x64\xa5\xf1\x45\ +\x00\x57\x1c\x78\x14\x06\x3a\x43\x08\x8e\x7c\xa2\x79\xec\x2c\xc9\ +\x49\x3d\xe3\xb2\x17\x5a\x75\x40\xe0\x49\x93\x1e\xe8\xb6\xb9\xac\ +\x7b\xba\x70\x38\x44\xa7\x97\x1e\xe6\x9f\x1b\xdf\xbc\x03\xb4\xd0\ +\xf0\x11\x62\x9a\x72\xf1\xe1\xc2\x40\x27\x5a\x0c\xec\xa9\x0f\x92\ +\x51\x0f\x7f\xd0\xe2\x6f\x0c\xd4\x6a\x87\xc8\x58\xb2\xf7\x16\xa7\ +\xed\x3a\x62\xc0\x37\x4c\x4e\x3d\xc0\x5e\x26\x80\xda\xe0\x6a\x79\ +\x1c\x79\xe5\xf0\x3a\x18\x61\xe7\x87\x57\xa6\x27\x51\x20\xba\x07\ +\x45\xc6\x5c\x65\xb4\x41\x2f\x3f\x6d\xc1\x80\xd8\x40\x01\x20\x80\ +\x98\x11\x33\x66\xec\xd3\x00\x82\x29\xce\x0f\x4f\x1c\x78\x16\x06\ +\x3a\xde\x02\xde\x43\xcc\x04\xa8\xc8\xbc\xb8\x5b\x6e\xa7\xd6\x19\ +\x10\xd8\x26\xb3\xbd\x61\xf4\xbf\xed\x50\x99\x11\x6f\x6e\x41\x20\ +\x35\xae\x7e\x23\x30\xd0\xa2\xb6\x70\xdb\x41\x0b\x18\xd5\xf1\xaa\ +\xb2\x38\x06\x91\x2e\x6b\x65\x9b\xc1\x6d\x8f\xba\x78\x5a\x97\x04\ +\x9c\x09\x3d\x4c\xd3\xf6\x1b\x09\xdb\xd1\xc0\x3f\x3f\x22\x71\xa4\ +\x2b\x06\x62\x79\xf9\x84\xca\xe3\xa0\xe5\x1d\x00\x4e\x1a\x56\x10\ +\xf0\xd2\x88\xf2\x62\x45\x01\x80\x2f\xa2\x40\x29\x0c\x04\xf7\x63\ +\x5c\x02\x81\x93\xb7\xa8\x0c\xf4\x00\x38\x69\xfd\xbe\xe3\x16\xc3\ +\x84\x34\xea\xec\x58\xff\xd7\x35\xd8\x53\x05\x03\x00\xf5\xcc\xca\ +\x5d\xc1\x14\x80\x7a\x8d\x74\x00\xf6\x6d\xb1\x1b\x99\x50\x09\xc2\ +\x00\x26\xf5\x9d\x00\xd8\x65\x9b\x09\x50\x55\x9d\x4a\xc6\x5c\xbc\ +\xf0\xb0\x85\x7f\x1b\xfe\x93\xb0\x39\x2d\xc4\xb8\xeb\x16\x0e\xda\ +\x5c\x9e\x6e\xc4\xa6\x9a\x53\xed\xb1\xb5\xc5\xe4\xb4\x13\x14\xc7\ +\x68\xe4\x45\xc2\x4d\x99\x59\xef\x1e\x67\x1f\x30\xb1\xed\xd1\x63\ +\x7d\xc5\x32\x5c\x8a\x3a\x9b\x11\xe7\x36\x5b\x3b\xa8\x87\xab\xad\ +\xbe\xb2\xe3\xe9\xb2\x78\xdb\x22\xd4\xf1\xb1\xfd\x04\xc8\x79\x7f\ +\xed\xdf\xe6\xbd\x17\xf7\xd4\x56\x94\x26\x42\x12\xc9\xb1\x7a\xcc\ +\xcc\x26\x0b\x7a\xc9\x02\x93\xaa\x5d\x3c\xe9\x01\xc0\xf6\x5e\x7a\ +\x5f\x8d\x05\x65\x30\xe9\x6d\xb6\xf2\xd8\xba\x30\xdf\xe9\xdf\x49\ +\x98\x86\xd3\x95\x33\x79\x46\x91\x53\x45\x53\xd0\x0d\xe4\xbe\xb1\ +\x90\x53\xb6\x2a\x7f\x5b\x39\xaa\x2b\x8a\x77\x8f\xaa\xf2\xb4\x50\ +\x1d\x83\x02\xc0\x01\x0a\x00\x01\x04\x22\x90\xca\x39\x3b\xa9\x29\ +\x00\x21\xc6\xb8\xf3\xc4\x41\xb0\x71\x91\x5b\x9b\x81\x0f\x46\xd3\ +\xd5\x33\xe9\xb4\x84\x01\x23\x0a\x00\xec\x06\x73\x5b\xc9\xb0\x76\ +\xc3\x8f\x9f\x9a\xb7\xf2\xeb\x57\xf6\xf3\xda\xd3\x12\xf4\x19\x3c\ +\x36\x6b\x47\xe4\xe4\x8e\xc4\x04\x4f\x46\x1c\x80\xda\x47\x9f\x03\ +\x8f\x82\x80\x16\x03\x00\x22\x41\xe0\xf3\x53\xc2\xea\xa1\xb9\x31\ +\xfc\x3c\xc3\x2d\x12\x35\xa0\x79\xbc\xaa\x7c\xca\xc8\xec\xac\x49\ +\x37\x4e\x79\xf8\xfd\x21\x8b\x6f\x3c\xfe\x17\xf7\x0d\x8e\x1f\xc7\ +\xf9\xa2\x0d\xa0\x40\x00\xc5\x93\x3e\xb6\x2f\x8a\xc8\x20\x7a\x81\ +\x44\xc5\x0c\x9b\xa7\x54\x10\xe8\x18\x7c\xb6\xa0\x08\xa7\x8c\x38\ +\x44\x01\x80\x07\x61\x10\x00\xa9\x11\x96\x89\x83\x58\x79\x6e\x24\ +\xa7\xa1\x2f\x14\x10\x3e\x28\x00\x84\x98\x66\x56\xd4\xa9\xce\xeb\ +\xcd\x03\x38\x0d\xb7\x48\x1c\xa8\xbc\x06\x22\x61\xc0\x2b\x43\x24\ +\x0c\x4c\x93\x40\xfe\x51\x4f\x0c\x58\xf9\xcd\x6b\x5f\x73\xef\xf2\ +\x16\x60\xe8\xd9\x2f\x0d\xde\x5a\x66\x34\xa9\x7a\x4d\x23\x0e\xa0\ +\xd1\xb3\xf7\x2a\x08\x28\x61\x21\x12\x04\xbe\x42\xbb\x4d\x39\x75\ +\x53\xd6\xd7\x8d\xa8\xe1\x19\x3b\xc5\xb9\x11\x95\xbf\x2a\x6b\x64\ +\xee\xe0\xf1\x97\xe4\xae\xfe\xf0\xf5\x62\xef\x8d\xf7\x80\xe5\xb6\ +\x07\x3d\x03\xef\xba\xb7\xaf\x2b\x20\xfc\x82\x23\xb0\x00\x14\x82\ +\x00\x3c\x1a\x7c\x9d\x34\xbc\x6b\x9c\x11\x43\xc2\xfc\xac\xba\xb9\ +\x14\x06\xb1\xe3\xe4\xbb\x95\x02\xc1\x51\x07\xb7\xbf\x13\x7a\x00\ +\x1c\xa0\x00\x10\x61\x9a\x31\xc3\xcc\x33\xd0\xa2\xed\x3c\xc3\x6d\ +\x79\x12\x68\x71\xe0\xc6\x6b\xa0\x2b\x0c\x76\x15\xe7\x06\xcf\xbc\ +\xf8\x85\x43\xdf\x7f\xfd\xaa\xcd\xe2\x86\x35\x0f\x27\x9f\xf7\xd7\ +\xee\x5b\x4b\xbb\xa7\x03\x48\x7a\xde\x74\xf7\x55\x66\xf8\x75\x5c\ +\xfd\x7e\x09\x02\xbf\xd1\x19\x02\xf0\x68\xa4\xe9\x36\xf0\xbc\x03\ +\x96\x01\xf0\x22\x32\xc2\x26\x81\x9c\xa9\x73\x0f\x33\x3e\x79\xeb\ +\xcb\x48\xa8\x3e\xbe\x73\xe0\x12\x57\x43\x26\x71\xf6\xf6\x85\xc7\ +\xf8\x64\x2c\xe8\x31\x7a\x9b\x58\xe1\xf5\xf2\x69\x2f\x00\x73\xcf\ +\x78\x36\xf8\x1a\x69\x00\xfc\x11\x06\x00\x72\x11\x1d\xaf\x40\xd0\ +\xad\x03\xa2\x0f\x0a\x00\x01\x04\x22\x90\xd5\x86\x31\xbe\x6e\x84\ +\x80\x8f\xe2\xc0\x95\x30\x48\xbd\x24\x3f\xb7\xe3\x83\x3b\x8a\xf7\ +\x6e\x69\xde\xa7\x36\x45\x7a\x56\x6e\xc0\xe8\x39\xad\x17\xa9\x82\ +\xa6\x3b\x95\x67\x68\x45\xa2\x20\x4e\x57\x7f\x5c\xf9\xf8\xec\x3c\ +\x21\x11\x33\xe6\xd2\x06\x10\x1b\xf4\xd8\xa9\x10\x09\x02\x37\xc7\ +\xf3\x04\x81\xc2\x38\xf2\xb6\x6d\xad\xcd\x4e\x19\xff\xe0\x3b\x87\ +\xbd\x7f\xc7\x19\x6b\xe2\x3b\x0b\x2e\xb0\x04\x53\x38\xea\x52\x76\ +\x51\x5f\x00\x7f\x05\x81\x1f\x90\x50\x53\xb9\x0e\xa3\xaf\xe3\xfa\ +\x4f\x84\x28\xe0\x94\xe3\x87\x30\x10\xe6\x4d\x21\x15\x08\x0d\xa0\ +\x6d\xd9\x3d\xe9\x33\xf4\x00\x38\x40\x01\x20\xc4\x84\x60\x2a\x40\ +\x30\x3a\x2b\xba\x3e\xd4\xb4\x27\x48\x9d\x35\xa5\xd1\xf7\x51\x1c\ +\xe8\x08\x03\x30\x83\x64\xfc\x45\xf3\x0e\xfb\xf7\x53\x47\xaf\x14\ +\xb5\x2c\xd1\x8c\xbf\x62\xe1\xa0\xad\xe5\xe9\x01\xc2\xf4\xb0\x01\ +\x18\xc3\xac\x21\x0a\x44\x5e\x02\xd6\x60\x72\xb7\xd3\xf9\xcb\xf2\ +\x61\x86\x1f\xfc\x86\xc4\x6b\xd0\xbd\x1e\xcf\x1b\x2e\xb0\x32\x11\ +\x0c\x17\xb0\x79\x6e\xe8\x74\x7a\xa7\xbe\x23\xc7\x17\x6c\xfa\xf6\ +\xc3\xf2\xb8\x4e\x82\x2e\x74\x64\x3a\x6d\x10\x78\x82\x40\x71\x0e\ +\x00\x14\x82\x80\x93\x4f\xec\x18\xbf\x8c\x85\x25\x68\x18\xaf\x8c\ +\x45\x22\x44\x81\x23\x8d\xa6\xc1\x77\x25\x0c\x80\x93\xd6\x22\x0e\ +\x81\x00\xa0\x27\xc0\xdc\x88\x85\xa6\x8c\x5d\xa6\x4f\x02\x50\x00\ +\x08\x31\x4d\xda\xd0\x0b\x7b\xff\x9a\x02\xc1\xed\x36\x99\x38\x50\ +\x09\x83\xb2\xd0\x91\xed\x47\x8c\xbc\x28\xe7\xa7\x6f\xdf\x28\xe1\ +\xb7\x2d\x71\xf4\x1d\x72\x7a\xdb\x82\xba\x93\x3a\x70\x03\xdf\x00\ +\x1c\x86\x19\xc0\x29\x0a\x6c\xbd\x57\xdb\xc1\x71\x88\x02\x99\x97\ +\x80\x4e\xeb\x77\x10\x60\x98\x69\x82\x1f\x3d\x7c\xdd\xe3\x15\xe7\ +\x40\x47\x10\xd4\x85\x0d\xe8\x77\xc3\x6b\x43\x36\x2d\xef\xd4\x6c\ +\xb1\x25\xb6\x21\x00\xfa\xf7\x72\x2b\x08\x78\xdb\x23\xd4\x76\x00\ +\xc9\x10\x80\x3f\xd7\x81\x6d\x08\xc0\x2a\x87\xbd\xae\x7d\x16\x05\ +\xb1\x32\x54\xde\x02\x37\xc2\x00\xf8\x69\x79\x65\x03\x80\x67\x81\ +\x20\x2d\x4f\x40\x42\x62\x77\x92\x04\x14\x00\x02\x08\x98\x90\x95\ +\xde\xf8\xbf\xcd\x18\x53\xeb\xa4\x69\x79\x05\x24\x02\x41\xe6\x15\ +\xd0\x11\x07\x22\x61\x00\x40\xe0\xf0\x13\x9f\x18\xf8\xd3\xf2\x79\ +\xdf\x34\x77\x40\xe0\x90\xd3\xfe\x75\xd8\xf6\x12\xa3\x31\xf2\x99\ +\xed\x79\x03\xb8\xef\xad\x03\xf0\x5d\xda\xd6\x2e\xdd\xa1\x03\xdd\ +\x72\x13\xd0\x4b\xe0\x45\x7c\x2b\xc5\x4a\x2b\x12\x04\x1b\x6b\xf2\ +\x82\xa7\xdf\xfd\x6a\xbf\x25\x7f\xbd\x64\xa3\xb7\x33\xe0\x02\xd3\ +\x34\xb9\x43\x26\x56\x7d\xdd\x08\x02\x6b\x3b\x7b\x6e\xd8\xed\x02\ +\x2f\x81\x1f\xd0\x75\xe4\x19\x36\x99\x28\xb0\xed\xe7\x08\x17\xe9\ +\x7e\x26\x7f\x6e\x19\x9c\x7c\x64\x79\x09\xbd\x01\xa2\x5e\xbd\x4a\ +\x20\xf0\xca\xa0\x51\x08\x05\x5b\xf9\xaa\xbc\x00\x80\xf8\x24\xea\ +\x0e\x26\x50\x00\x08\x89\xc4\x8c\xb7\x57\xa3\x4f\xa7\xe7\x1a\x74\ +\x49\x3a\x37\xe2\x80\x97\x77\xd8\xec\x9c\x36\xf1\xe2\xbf\xf7\xfa\ +\xcf\xeb\x37\xfd\xe6\x68\x5a\x82\x18\x3b\xf1\x91\x1e\x3b\xf6\x75\ +\x6b\x13\x33\xfe\xd6\x0e\x51\xaf\x1f\x40\xde\x8b\x17\x89\x82\x44\ +\x0e\x1d\xf8\xfd\x8c\xb0\xe6\xb4\x13\xaa\x6e\x8c\x20\x88\x6d\x17\ +\xd5\x4b\xd1\xbb\xd5\xf2\x7e\xb8\xc9\x93\x23\x08\x0a\x7a\x4e\xee\ +\xd6\x6d\xd0\x93\x3b\x77\xae\xfd\xae\xda\xd3\x79\xd0\xc5\x8c\x1a\ +\x4d\x60\xce\x09\x80\x30\xde\x21\xd6\x06\xeb\x9e\xa1\x7f\xf3\x80\ +\xc6\x6f\xce\xf3\x12\xf8\xb8\x0e\x80\xc3\xd8\xf1\x66\x1c\x70\x44\ +\x01\x80\xba\x27\xcf\xf3\x16\xd0\x79\xc8\x8c\xa3\xdf\xe2\x80\x4d\ +\xab\x3c\x46\x71\x1c\x5b\x1d\xa5\x47\x80\x1a\xc2\xe3\x0a\x07\xb4\ +\xff\x0e\x50\x00\x08\x20\x10\x31\x63\x1e\x00\x8f\x46\x9f\x4e\xaf\ +\x2b\x10\x74\xc4\x81\x2d\x9d\x40\x1c\xd4\x37\x00\x74\xea\x79\x55\ +\x8f\xdc\x0e\x8f\x14\x16\xef\xdb\x96\xf0\x80\xc0\x8c\xac\xbc\x40\ +\xda\x21\x37\xf7\x82\xe8\x48\xb1\x96\x01\x06\x10\x1b\xf9\x96\x12\ +\x05\x3e\x3f\x24\x48\xc4\x04\x12\xad\x9f\xb4\x6e\xd6\x43\x92\x15\ +\x04\x00\xf6\xb6\xd1\x75\x4d\x84\xf1\x17\x18\xd9\xaa\xfa\x00\x0c\ +\x9d\xf6\xf6\xd0\x9d\xd7\xe5\x2f\xf7\x70\x1a\xf4\x61\x56\xa7\xb3\ +\xbc\x11\x00\x9a\x9e\x93\x68\x1e\xbe\x78\x09\x7c\x80\x84\xa1\xf1\ +\xf7\x07\x10\xc7\x20\xb0\x75\x50\xf4\xe6\x55\xde\x82\x58\x1e\x12\ +\x61\xa0\x53\x0e\xdd\x06\x7b\xa5\xf9\xf9\xc9\xf2\x94\x1d\x63\x3b\ +\x4e\x74\xac\xe2\x78\x47\x7e\xf4\x17\x1c\x22\x10\x82\x02\x40\x88\ +\xc9\xf5\x00\x00\x50\x06\x59\xb4\x5d\xb3\xb7\x4f\xa7\x75\x6d\xf8\ +\x53\xe5\x69\x1a\xbf\xb7\x31\x26\x5f\xf1\xea\x80\xb9\x8f\x9e\xb4\ +\x8a\x6d\x9d\xdf\x9c\x33\x75\xfe\xc0\x5d\xb5\xe9\x46\x56\x90\xaa\ +\x47\xd8\xfe\x3f\xcf\x00\x03\xb8\x30\xf2\xcd\x21\x0a\x12\x31\x04\ +\xa0\xea\xb9\x72\x86\x35\x74\x87\x0d\x62\x1b\xdd\x18\x7a\x8f\x82\ +\x62\x6d\x6d\xb7\xf4\x31\x37\x3d\xd9\xeb\xb3\x67\xfe\xf0\x9b\x8b\ +\xe6\xbb\x26\xd6\x93\x67\x7a\xc5\xae\x83\x18\xad\xed\x5e\xbc\x04\ +\x7e\x4d\x03\xa4\x3c\x00\xa2\x18\x04\x00\xce\xef\x0b\x60\x6b\x8f\ +\x95\x97\x6d\xbf\x95\xaf\x5b\x61\xc0\xe6\x21\xf2\x1a\x70\xca\x72\ +\xe4\x47\x07\xce\x72\x04\x02\x9b\x5e\x27\x7f\xa5\x57\x80\x1d\xd5\ +\x54\x19\x78\x2b\x3d\x7a\x00\x1c\xa0\x00\x10\x40\x20\x62\x66\x67\ +\x36\x7d\xaf\xa3\xfa\xd0\x42\x63\xce\x33\xde\x0a\x83\x0e\x10\x35\ +\xd8\x1e\x87\x0d\x54\xe2\x20\xb5\xfd\xa8\xbc\x23\x8f\x3d\xaf\xfd\ +\x0f\xcb\xdf\x2e\x85\x04\xd1\x7f\xf0\xa9\xd9\xd5\xe4\xe4\x8e\x59\ +\xd4\xdb\x08\xea\xc3\x76\x71\x23\x5b\xbf\x20\x2e\x23\xef\xb3\x28\ +\x48\xc0\x10\x80\x69\x33\xf0\x32\x43\x25\xab\x97\x3d\xcf\xc6\xb4\ +\xb2\x61\x03\x00\x4f\x86\x5e\x25\x28\x4a\x87\xdc\xd0\x33\xb7\xdb\ +\xdf\x77\x15\xef\xdc\x54\xa7\x68\xb9\x37\xe8\x75\x13\x64\xc1\x7b\ +\xd1\xb4\xb1\x3a\xeb\x7a\x09\xac\xed\x0a\x51\xe0\x17\xf4\x6f\x2f\ +\xaa\x17\x00\xd3\x3e\xd6\x20\xd3\xc7\x81\xe2\x58\x0a\xd3\x00\x38\ +\x6e\xfd\xa7\xfb\x7e\x79\xf4\xce\x2d\xe2\x0a\x6a\x35\xc3\x3f\x9a\ +\xbb\x3c\x8a\x95\x9b\xd6\xd7\xb6\x5c\xe9\xad\x13\x14\x00\x42\x22\ +\x90\x4a\x19\xdb\x54\xa6\xb7\x1f\x8a\x1a\xe9\xb4\xa0\x7d\xbb\x25\ +\x14\x54\x62\x40\xb4\x9d\x35\xea\x00\xfa\x02\x81\x9f\x8f\x01\x67\ +\x9e\xfb\xc4\x80\x1f\xbe\xfb\xef\xf2\x84\x04\x04\x12\x03\x4e\xfc\ +\xdd\x0b\x83\x2a\x43\x4d\x4f\x9f\x50\x83\x7d\x66\x02\x6d\xfc\x1d\ +\x75\xe6\x08\x03\x2d\x51\x00\x60\x7f\xe0\x89\x8c\x38\x6d\xd0\x34\ +\x44\x41\x22\x62\x00\x62\xcb\x01\x53\x75\xd3\x32\xc0\x12\xb1\xa2\ +\x1c\x36\x50\x18\x75\xa5\x20\xe1\xd5\xd3\x04\x28\xad\x49\x25\xc7\ +\xdd\xb1\x78\xe8\xe2\x5b\x06\xac\x50\xb4\xdc\x1b\xd6\xb9\xd2\x1d\ +\xab\x57\x78\x09\x62\xdb\x45\x79\xd0\xdb\x13\x71\x1d\x44\xa8\xdf\ +\xde\x65\xaf\xdf\xd1\x53\xf6\x20\x0c\x22\xe5\x65\xa1\x5d\x3f\x7f\ +\x9f\xd8\xb8\x0d\xe4\x80\x05\x05\x80\x08\x62\x42\x56\x86\x7d\x93\ +\xaa\xb7\x0f\x60\x17\x0a\xf1\x8a\x04\x1d\x81\xa0\xe5\x19\x48\xed\ +\x91\x3e\xe5\x92\xd9\x3d\xde\x7c\xed\xf6\xed\xe0\x33\x67\x9e\x3b\ +\x33\x1f\x8c\x9e\x19\xf4\xb2\xc9\xb4\x51\xa7\xdf\xa7\x40\x2f\x76\ +\x24\x12\x06\xf4\xf1\xd6\x3e\xde\xff\xae\x7b\xff\x3c\x51\x60\xa5\ +\xa7\xd3\x9a\x3e\x87\x0a\x9b\x94\x51\xa2\x63\x0c\x08\xf5\xd7\x4d\ +\xef\x9d\x6d\x8f\xae\x97\x40\x94\xa7\x1b\x41\x12\xdd\xb6\x26\xd2\ +\x3f\xeb\xf8\x8b\x67\x74\xfd\xfa\xdf\x7f\x29\x14\x94\x1e\x17\x44\ +\xc7\xf8\x5b\xdb\xe9\xfa\x0b\x02\xea\x3c\x8b\x02\x1f\xb0\xde\x66\ +\xa8\xdb\xeb\xe7\xd6\x0d\x40\x4b\x18\x38\x8e\x05\x40\xb7\x37\x22\ +\x05\x05\x80\x80\x6c\xa8\x80\x13\x82\xdf\x01\x00\xc0\x77\x70\x0c\ +\x00\x00\x04\x19\x43\x5e\xcf\x31\xe4\x00\xea\x1e\xbf\xae\x48\xd0\ +\x12\x03\x9c\x32\x78\x9e\x81\x91\xa3\x6e\xe8\xb1\xe4\x83\xbf\x15\ +\x16\xef\x2f\x64\x56\x32\xf0\x4e\x56\x76\x87\xc0\xc0\x11\xd3\x7a\ +\x85\x23\x54\x59\x29\xce\xff\x63\xdf\x75\x84\x01\x63\xfc\x55\xc3\ +\x08\x7e\x0d\x21\x34\x0d\xd4\x2a\x9b\xed\x0a\x62\x9a\x4d\x06\x48\ +\x64\x6c\x63\x89\x25\xf5\x52\x1c\xef\x66\xe8\x20\x56\x66\x1c\x1e\ +\x81\xf0\xb1\xf7\xf4\xcd\xfa\xe0\xf9\x3d\x95\x25\x45\xbe\x5d\x4f\ +\x00\xd1\xf3\xc5\xc6\x00\x28\x82\x16\x6d\xf5\x67\xeb\xea\x55\x14\ +\xf8\x15\x03\xd0\x00\x8d\xab\xe9\xc9\xbc\x58\x3e\x09\x03\xc7\xb1\ +\x00\xfe\xbf\xdc\x0a\x39\xa8\x40\x01\x20\x80\x80\x19\xb3\xce\xc7\ +\xc0\x57\xf6\x9d\x96\x05\x67\x8c\xb6\x17\xa1\xa0\x2b\x12\xe2\x17\ +\x08\x59\x29\xff\x77\xe3\xbf\xfa\x3f\xf4\x97\xf1\xbe\x2d\xeb\x7a\ +\xd9\x35\x2f\xf5\x4f\x4f\xcb\x4e\x89\x95\x25\x98\x21\x21\x9a\xaa\ +\x18\xdb\xc7\x11\x06\xf4\x30\x02\x80\x53\x18\x70\xf3\x12\x79\x0b\ +\x74\x7b\xff\x6c\x5a\x3f\x88\x44\x1f\xca\x1e\x8c\xba\x2d\x2d\xef\ +\x78\x91\x01\x94\x88\x02\xdd\x19\x07\xaa\x72\xf6\xd5\xa4\x19\x63\ +\xef\x58\x3c\xf4\xdd\x3b\x8f\xfc\x09\xfc\xc4\x6c\xac\x9f\xad\xee\ +\xbc\xa0\x31\x8f\xa2\xc0\x96\x9f\x4c\x14\xf8\xb5\x10\x50\x84\xf2\ +\x68\x04\xe4\xc6\xdf\x17\x61\xc0\x1c\x8b\x1e\x00\x44\x06\x0a\x00\ +\x01\x86\x09\x00\x55\x15\xf6\x8d\xc1\xa8\x9f\x9b\x7d\x39\x4a\x6a\ +\xa3\x75\x76\x08\x05\x80\x46\x4b\xee\x42\x28\xe8\x8a\x04\x2f\x02\ +\x61\xf0\xe0\xd3\x3a\x1d\x75\xf4\xf8\x82\x15\xdf\xc7\xbf\xac\xeb\ +\xe0\xa1\x63\xb3\xba\xf7\x1a\xd7\xc9\x9a\xe2\x54\xaf\x88\x47\xb0\ +\xed\x93\x78\x09\x62\xff\x33\x06\x9f\x27\x0c\x74\x86\x11\xb4\x66\ +\x22\x50\x4f\x4f\xbf\x17\x0b\x21\xd6\x10\x00\xc7\x40\x7b\xee\xe9\ +\xcb\x86\x35\x14\xf1\x04\x42\x31\x64\xe1\x62\xe8\x60\x1d\x19\xd1\ +\xee\x88\x33\xaf\xef\xf4\xe3\xfb\xcf\xee\xe1\xe4\xe4\x0d\xc9\xf9\ +\x52\x8a\x02\x51\x5d\x15\x5e\x0d\x6e\x7e\x7e\xce\x02\x10\x9d\xfb\ +\x38\x85\x41\x6c\xbf\x64\x48\x00\x05\x00\x22\x03\x05\x80\x00\x03\ +\x4c\x02\x55\x95\x8d\x5f\x62\x86\x3f\xe4\x4c\x98\x9a\x0a\x50\xcf\ +\x09\x88\x96\x88\x05\x37\x1e\x05\x95\x48\x70\x27\x10\x0c\xb8\xfe\ +\x86\x27\xfb\x5f\xbb\x62\xe0\x8a\x78\x86\xba\x09\x21\x70\xc9\xe5\ +\xff\x18\x90\x9d\x91\x42\x62\x75\x10\x19\x72\xc5\x3e\x7a\xbb\xcc\ +\x4b\x10\xfb\x9f\x12\x06\xba\xc3\x08\xdc\x7c\x74\xbc\x05\x7e\x40\ +\x7b\x00\x40\xe2\xaa\xd7\x14\x05\xae\x3c\x18\xbc\x61\x0d\x99\x28\ +\xd0\x1d\x3a\x88\x1e\x63\x02\x81\xec\x53\x1f\x1d\x90\xfa\xe9\xab\ +\xfb\x42\xb5\x55\xfe\x44\x98\x9a\xe0\x5c\x3c\xc7\x8d\x97\x43\x36\ +\x5b\x80\xce\x43\xc7\x83\xe0\x03\xa4\x21\xea\x50\x10\xc5\x72\xf0\ +\xea\x47\x97\x4f\x5d\xcb\x5c\xe3\x0f\x8a\xfd\xcd\xbb\x10\x28\x72\ +\x80\x81\x02\x40\x80\x01\x00\xd0\x10\xb5\x50\x0d\x1c\xc3\x9f\x62\ +\xcd\xb5\x63\x8c\xbf\x48\x2c\xa4\x4a\xd2\xeb\x7a\x14\x18\x6f\x82\ +\x8e\x40\x00\xb0\x1b\xda\xde\xbd\xfa\x67\x5d\x7e\xf9\x8c\x6e\x2f\ +\xbd\xf4\xc0\x4e\xf0\xc8\xf9\x17\xde\xd9\xf5\x90\x2e\x03\xb2\xad\ +\xbc\xa5\xb3\x11\x5c\xec\xa3\xeb\xab\xe5\x25\xf0\x59\x18\x34\x7a\ +\x08\x7c\x56\x00\x96\x0b\x98\x36\xf0\xc0\xf9\x5f\x62\xe0\x9b\x55\ +\x14\xc8\xf2\xa6\x89\x1a\x96\xc2\xf2\xcc\xc0\x19\xd3\x17\x1d\xf6\ +\xee\x5f\xc6\xfc\x2a\x48\xe9\x1a\x22\x12\x64\x9a\x43\x1f\xb6\xe3\ +\x74\xc6\xf8\x79\xe7\xd4\x2f\x0f\x40\xd4\x9b\x21\x0a\xcc\x93\x09\ +\x03\xdb\x14\x45\xa6\x9e\x32\xaf\x81\x2d\x5f\x14\x00\x88\x04\x14\ +\x00\x02\x08\x80\x73\x08\x00\x40\xdc\xb3\x07\x68\x34\xdc\x22\xb1\ +\x20\xf2\x12\xf0\x84\x82\x30\x2d\x55\xa6\xc8\x93\xa0\xf0\x22\xd4\ +\xd7\x03\x4c\x99\x72\x6b\xaf\x77\xdf\x7d\xae\xa8\xb8\x78\xb7\xeb\ +\xbe\x4e\x76\x76\xae\x71\xce\x39\xd3\x7b\xa7\x73\x56\x49\x64\xbf\ +\x7b\xf6\x0c\x34\xa3\x30\x60\x87\x11\x48\x75\xd8\xff\x59\x00\xd4\ +\xb4\x36\xa5\xb1\xd6\x15\x05\xa2\xfd\xcd\x21\x0a\x98\xfc\x37\x19\ +\x27\x75\x18\x74\xfc\xf9\x39\x6b\xbf\x5e\x58\x02\xf1\x42\x07\x4d\ +\x52\x65\x0a\x5d\xf7\x6e\xe2\x29\x64\x2e\x7f\x36\x8d\x8f\x02\x80\ +\xd0\x75\xa4\xbd\x14\x92\xfa\x71\x7b\xf7\x9a\x5e\x03\x9b\x38\xc0\ +\x21\x00\x44\x02\x0a\x00\x01\x06\x98\x04\x6a\xab\x9a\x36\x04\xa2\ +\x5d\x57\x91\xe1\x07\x90\x78\x03\x34\xc5\x82\x5b\xa1\xe0\x48\xa3\ +\x31\xdc\x10\x4c\x05\x08\x42\xea\x3f\xef\xba\xad\xef\x05\xd3\xa7\ +\xaf\x77\x16\x26\x67\xda\x9f\xe6\xf6\xed\xd8\x21\x27\x56\x38\x5d\ +\x2d\x51\x60\xa2\xf5\x7f\xdc\x9e\x01\x97\xc2\x40\xfa\x7f\xd4\xe8\ +\xb3\xa2\xa0\xd6\xe7\xb0\x69\x12\x31\xed\x6e\x58\x37\x06\x9e\x70\ +\xf6\x7b\xe9\xb9\xeb\x96\xe3\x71\xf8\x20\x0c\x06\xf4\x38\xfb\xf9\ +\xc3\xd6\x2d\x7f\xe7\x2b\x33\x1c\xe7\xa4\x00\x7a\xda\x24\x5d\xbe\ +\xce\x39\x50\x05\xff\xb9\xf0\x16\xf8\x05\x09\x73\x0c\xb9\x55\xac\ +\xc1\xb4\x55\x24\x0c\x74\x5c\xff\x34\x36\xa1\x83\x0a\x00\x11\x83\ +\x02\x40\x40\x00\x4c\x80\xea\x8a\x26\xe3\x1e\xe2\x18\xe6\xd4\xa8\ +\x81\x0f\x73\x7a\xfd\x81\x54\xf7\x62\x21\x1e\xa1\xa0\xe3\x49\xa0\ +\xca\x38\xf7\x88\x11\x5d\xc6\x1c\x7d\x74\xe1\x67\xdf\x7f\xcf\x71\ +\x73\xf0\x39\xfc\xf0\x51\x99\x63\x4f\x3e\xef\x90\x80\xd5\x63\x66\ +\x8c\xba\x68\xb1\x24\x00\xfb\x4a\x8a\xd6\xb1\xa2\xff\x5d\x7b\x06\ +\x34\x63\x0c\x94\xff\x07\x00\xea\xfd\x5e\x0a\x8e\x35\x68\x22\x17\ +\x3f\x95\xde\x91\x96\x67\xac\xe9\xff\xe3\x11\x05\xaa\x72\x34\x45\ +\xc1\x8e\xb2\x9c\xd4\xdf\xfd\xe1\xad\x81\x8b\x1e\x9f\xb8\x0e\xe2\ +\xc1\xab\x00\x50\xb9\xf4\x05\xdb\x85\xde\x02\xbf\xa0\x83\x00\xd9\ +\xfa\xc8\x96\xc4\x15\xa5\x63\xd3\xaa\xc4\x01\xda\x7f\x44\x02\x0a\ +\x00\x31\x04\x1a\xea\x01\x1a\x04\xef\xd1\x49\x09\xf2\x0d\x36\x40\ +\xa3\xd1\x16\x09\x06\xb7\x62\x41\x37\x66\x80\x27\x12\x1c\xc7\x35\ +\xa5\x09\x00\x90\xb9\x37\xdd\x30\x60\xd0\x95\x2b\xb4\x02\x02\x09\ +\x21\xf0\xca\x1d\x37\xf5\x1f\x96\xfe\x63\xec\xf9\xf2\x5d\xf0\x18\ +\x5b\x9a\x7a\xaa\x5a\x5e\xc5\x81\x74\x48\xa1\x19\x84\x41\x19\xf8\ +\x3b\x04\x10\x80\xb0\x19\xa4\x1e\xd8\xdc\x80\x43\x2f\x3d\x73\x3a\ +\xad\xa6\x28\x20\x00\x4d\xeb\xab\x7b\x19\x02\xb0\xb6\x51\x41\x8d\ +\x74\xd9\x3b\xd3\xcf\xee\xd2\x7b\xc8\xc9\x05\x5b\x7f\x5d\x5a\x09\ +\x5e\x69\x66\x01\x20\x4c\xeb\xe7\x10\x80\x95\x17\x1b\x60\x28\x28\ +\x9b\x1b\xd9\x6f\xed\x93\x79\x0d\x98\xb4\x60\x00\x06\x01\x22\x52\ +\x50\x00\x08\x30\xc0\x04\xa8\xe1\x3c\xc7\x52\x2c\x8f\x80\xa4\x77\ +\xcf\x13\x0d\x22\xc1\xe0\x46\x2c\xf0\x84\x82\xae\x48\xe0\x08\x84\ +\x01\x6d\xb3\xb3\xee\x9f\x7a\x59\xb7\x99\x2f\xbf\xa2\x0c\x08\xbc\ +\xfd\x92\x8b\x0f\x19\x76\x48\x97\x76\x50\xdd\x34\x2c\x72\x4c\x2a\ +\x67\x78\x21\x8a\x57\x71\xc0\x3a\x32\x44\xe2\x20\x51\xc2\x80\xf8\ +\xec\x33\x4d\x21\x11\x33\x37\xa3\xc9\xf0\x2b\x67\x22\xb8\x31\xf6\ +\x12\x01\x61\x45\x9e\xbb\x39\x86\x5b\xb6\x28\x80\x91\xb3\xad\xae\ +\x2e\x00\x87\x5d\xf8\xc6\xd0\xad\xab\xbb\x7d\xe3\x79\xd9\x69\x93\ +\x31\x5a\xba\x51\xfd\xa2\x7a\x7a\x16\x00\xfe\x5c\x06\x36\x01\xc0\ +\x66\xa9\x53\x4f\x76\x9f\xc4\xe0\x73\xc5\x01\x7a\x00\x10\x09\x28\ +\x00\x04\x18\x00\x00\xb5\x94\x00\x48\x91\x18\x77\x6b\xbf\x5b\x6f\ +\x41\xaa\xe0\x18\x59\x7a\x96\x70\x9a\x73\x9b\xce\xb1\xd1\xf8\x84\ +\x3f\x9e\x7c\x42\xef\xe7\xde\x5b\xb4\xbb\xb0\xb8\x58\x38\x78\xdb\ +\xb1\x7d\xfb\xc0\x5d\xbf\x3b\xe3\xd0\xd8\xb4\xc8\x58\x9e\x94\xe5\ +\x66\x44\x47\x3c\xe2\x40\x54\x84\x8e\xd7\x20\x5e\x61\x60\x10\x7f\ +\x87\x00\x02\x24\x0c\x59\x69\x54\xd0\xa1\xc6\x4c\x04\xde\x7e\x6d\ +\x63\xaf\xdb\x8b\x4f\x50\x5c\xc1\x6f\xa5\x5d\xd2\xc6\x5d\xfd\xfc\ +\xa1\xff\x7b\xe1\xea\xcd\xe0\x09\x93\x3b\x64\x42\xa8\xb7\xff\x29\ +\x83\xe8\x5c\x08\x00\xe1\xb9\x4b\x84\x07\x40\xd6\x5b\xd7\x15\x00\ +\x6e\xc5\x01\x7a\x00\x10\x09\x28\x00\x04\x18\xc4\x24\x50\x57\x09\ +\x10\xd0\x30\xfc\xaa\xfd\x22\x23\xef\xd6\xf8\xb3\xe9\x79\x69\x79\ +\x1e\x05\x9e\x37\x21\x7a\x5c\x5b\x80\x94\x7f\x5d\x7f\x4d\xff\xf1\ +\xb3\x1e\x11\xae\x10\xf8\xfc\xcd\xd7\xf7\x6b\x17\x69\x48\x81\x6a\ +\x46\x00\xa4\x48\xba\xee\xbc\x20\xc5\x28\x7e\x88\x83\xb8\xbc\x06\ +\xe9\x82\x61\x80\x74\x00\x83\xf8\x3b\x04\x90\x42\xc2\x66\x5a\x5a\ +\xd3\x6c\x84\xcc\xb4\x26\xc3\xae\x9a\xa2\xe8\xc9\x5b\xe0\xc2\x65\ +\xef\xc7\x10\x02\xcf\x80\x96\xe6\x4c\xcd\xef\xd2\xf3\xc9\x9d\x45\ +\xdb\x7e\x71\xff\xf6\x35\x76\x08\xc0\xc3\xd4\x44\xe5\x50\x87\x68\ +\x3b\xed\x45\xf0\x09\xc7\x10\x00\x8d\x97\x78\x00\xb7\xe2\xc0\xef\ +\x69\xad\xc8\x41\x05\x0a\x00\x01\x04\xa0\xd1\x48\x8a\xc6\xf9\x01\ +\x1a\x8d\x6d\x58\xb2\x3f\xe0\xd2\xf8\x5b\xfb\x74\xd3\xf3\xd2\x36\ +\x70\xb6\x29\x3c\x02\xa7\x75\x6e\xdf\xf1\x77\x47\x1d\xd1\x6e\xf1\ +\x8a\x1f\xcb\xd8\x64\xe3\x8e\x18\x91\x7d\x56\x8f\xae\x9d\xa1\xbe\ +\xce\x3e\xd4\xc0\x0e\x33\xa8\x62\x10\x42\x02\xeb\x6d\xe5\x15\x45\ +\x57\x1c\xd4\x33\x4d\xd2\x11\x07\x3a\x5e\x03\xc3\xe7\x21\x80\x80\ +\x11\x31\xb3\xda\x08\x02\x0f\xd3\x9a\xfe\xe7\xae\x5d\x90\xd6\x38\ +\x4d\x11\xc0\x85\xb7\xc0\x8d\xb1\xe7\x19\xf1\x78\x86\x10\xa2\x65\ +\x57\xd7\xa4\x90\x63\x2f\xfb\xef\xb0\x77\x1e\xec\xf3\x1d\xb8\xc5\ +\x04\x30\x1a\x14\x1e\x0d\xba\x4c\x99\x01\xb4\xea\xc4\x1e\x23\x3b\ +\x2e\x11\xd3\x00\xad\xbc\x58\x81\xa1\x9a\x95\x60\x11\x8f\x38\x40\ +\x0f\x00\x22\x01\x05\x80\x80\x00\x31\x09\xd4\x0b\x62\x99\x2c\xaf\ +\x80\xc8\xf8\x5b\xfb\x45\x86\x57\x76\x9c\xae\xa1\x17\x89\x02\x9e\ +\x00\x60\x8f\x67\xbc\x09\x06\x00\x79\xe6\xbc\xdf\x0d\x7c\xff\x87\ +\x9f\x96\xd3\x01\x81\x84\x10\x98\x7b\xe1\x84\x81\x81\xda\x2a\x02\ +\xec\xf4\x2e\xc1\xe2\x45\x31\x82\xcc\xd0\x04\x2d\x10\xfc\x10\x07\ +\xcc\x6a\x47\x22\x71\xe0\x56\x18\xf8\x3d\x04\x90\x62\x84\xcd\x2c\ +\x91\xc7\x41\xf0\xbf\x72\xed\x82\x38\xbc\x05\x3c\x63\x1f\xdb\x41\ +\xa5\xb3\x6d\xa3\xd3\x6a\x7a\x0b\xb6\x94\xf4\xce\x18\x33\xf9\x91\ +\x1e\x9f\xcd\x73\xf9\x06\x4a\x2b\x06\x40\xe5\xbd\x50\xb5\x87\xe7\ +\xc1\x70\x33\x8c\xe0\x93\x00\x30\x1a\x18\x1b\xec\x36\x58\x11\x20\ +\x3e\x71\x80\x02\x00\x91\x80\x02\x40\x80\x23\x06\xc0\x42\xd6\x7b\ +\xb7\xf6\xcb\x0c\xbc\x6c\x9f\x1b\xc1\x20\x32\xf4\xbc\x61\x02\x76\ +\x1b\xe7\xd8\x9e\x29\xc1\xf4\x39\x17\x9d\xdf\xf3\xb6\x7f\x2f\xd8\ +\x66\x6d\x9b\x3d\xe5\xbc\xee\xbd\x49\x28\x13\x6a\x42\x6a\x83\xcf\ +\xc6\x22\xb8\x11\x08\x32\xef\x41\x9c\xe2\xc0\xad\x30\x20\x09\x18\ +\x02\xc8\x4a\x07\xe1\xb0\x83\x4a\x18\xf0\x16\x35\x6a\x49\x6f\x81\ +\x6e\xc0\x21\x89\x00\x34\x74\xbb\xa5\x57\xfb\x0e\xcf\xec\x2a\xdd\ +\xb7\x8d\x33\xf5\x45\x80\x35\x04\xe0\x25\xd6\x41\x15\xb0\x28\x3a\ +\x8e\xc6\x32\x98\x3e\xbd\x16\xda\xb1\x0e\x80\x86\x00\xb0\xb5\x53\ +\x76\x0c\x80\x52\x1c\xa4\x64\x75\x4c\xed\x3b\xf2\x8c\xb6\xdc\x63\ +\xdd\xa2\x73\x3c\x89\xb3\x90\x78\xeb\x48\xb1\x77\xf3\xea\x9a\xb2\ +\x5d\x2e\xae\xbd\x24\x04\x05\x80\x08\x62\x02\x98\x94\xc5\xb0\x1e\ +\x0c\x5e\x5c\xfe\x00\x6a\xb7\xbf\x28\xdf\x30\xc7\xd0\xcb\xc4\x02\ +\x8b\x8e\x47\x20\xba\xed\x86\x43\xbb\xf4\x78\x22\x37\x67\xe7\xce\ +\xe2\x92\x86\x6e\xb9\x39\x29\x37\x0f\xec\xd9\x2b\x36\x13\x82\x3d\ +\x86\xcd\x37\x1e\x81\xe0\xb7\x38\x10\x09\x03\x42\xa5\x0b\x36\xbd\ +\x94\x09\xa0\x49\x1c\x04\x8c\x06\x7f\x87\x00\x02\x61\xc8\xb2\x56\ +\x4d\xe4\x05\x1f\x52\xc2\x40\x35\x55\xd1\xcb\x30\x82\xdf\xde\x02\ +\xa5\x11\xa6\xd2\x96\x57\xa6\x19\x63\xaf\x78\x6f\xe8\xdb\x8f\x0e\ +\xfb\x11\x5c\x60\x19\x4d\xed\x61\x00\x95\x67\x40\x21\x0c\xac\xb7\ +\x0f\x72\xcb\x88\x13\x99\x00\x10\xd6\x91\xad\x83\xa6\x38\x70\xec\ +\x33\x00\x7e\xcd\x18\xdd\x01\x2e\x1d\xdd\x01\xd8\x7d\x6c\x19\x40\ +\x79\x47\xa8\xe3\x6d\xfb\x79\xe7\xc5\x43\x1a\x37\xe9\xb8\xf5\xa2\ +\x91\xfc\x56\xc7\x6e\x9e\x57\xb8\x64\xc6\x94\x0d\x92\xa3\x93\x1e\ +\x14\x00\x32\x0c\xb3\xe9\x06\x0b\x48\x53\x36\x5e\xd1\xa6\xc0\x88\ +\x47\x40\x2e\x1c\x78\x46\x1a\xc0\xdd\xd8\x3f\x80\x58\x2c\xb0\xb0\ +\x06\x37\x2a\x5c\x32\x00\x02\x6f\x4e\x9d\x78\xd8\x89\x7f\xfb\xd7\ +\x2f\x6f\x5c\x7e\xfe\xa0\x8c\x9a\xb2\xa6\x56\xab\x04\x40\x3c\x02\ +\xa1\x16\x9a\x16\x55\x62\xd3\xea\x8a\x03\x5a\x18\x54\x55\xda\xd3\ +\xe9\x88\x83\xa8\xd7\xe0\x6d\xb2\x1b\xfc\x24\x18\x68\x88\x58\x02\ +\x40\x68\xec\x39\xc2\x40\xb5\x86\x81\xee\x30\x02\x37\xe8\x50\xe1\ +\x2d\xa0\xb7\xc7\x1b\x5b\xb0\xa5\x64\x68\xdb\xe3\xc6\xdf\xd6\xe5\ +\x9b\x0f\x1f\x2d\x02\x1d\xe8\x20\x40\x26\xb6\xc0\xca\x93\x15\x06\ +\x3c\xc3\x27\xab\x93\x8e\xb0\xf0\x2b\x76\x8e\x84\xa3\x8f\x11\x97\ +\x86\xde\xb6\x5d\xf7\x18\x37\xc7\x01\xf0\x87\x0c\x68\x64\xb3\x16\ +\x74\xca\x17\xa5\x71\x93\x8e\x57\x0f\x59\x7d\xac\xac\x0c\x00\x88\ +\x60\x04\xa4\x0a\x14\x00\x22\x08\x34\xde\xb9\x2a\x22\xd6\x93\x49\ +\x90\xd6\x04\xb9\x78\x10\x09\x07\x99\x68\xd0\x75\xff\x5b\xdb\x59\ +\x58\xa1\x40\x89\x84\x51\x84\xe4\x3e\x35\xf9\xec\x5e\xa3\xa0\x32\ +\x0f\xea\xa8\x36\xa9\x0c\x3e\xbb\xad\x96\xf9\xee\x46\x20\x24\x52\ +\x1c\x28\xbc\x06\x01\xaf\xf3\xd7\x05\x04\x02\x61\xc8\xca\x68\xfa\ +\xce\x0b\x3e\x74\xe5\x19\xe0\x89\x05\xdd\x61\x84\x44\x04\x1d\x6a\ +\x18\xe1\xcc\x7e\x7f\xee\x97\xfe\xc5\xff\xdb\x5b\x53\x55\xa2\x0e\ +\xb0\xa0\xd6\x01\xb0\xf5\xcc\x25\x41\x87\x8e\xfd\xba\xf1\x0e\x22\ +\x61\x11\xf6\x57\x00\x90\x08\xe8\x0d\x43\x78\x19\x06\x50\x19\x79\ +\xdd\x3c\x79\xc7\xaa\x04\x02\x80\x37\x91\xc0\x2b\x4b\x94\x4e\x94\ +\x56\x96\x1e\x00\xd7\x40\xd0\x04\x05\x80\x08\x1d\x01\x60\x12\x80\ +\x80\x24\x8d\xea\x02\x8c\x10\xb1\x70\x10\x5e\xf4\x02\xc1\xc0\xae\ +\xb5\x0b\xc0\x1f\x92\x10\xc5\x09\x50\xc5\xfe\xbe\x1d\xf4\x82\x5a\ +\xe6\xce\xd6\xf1\x24\xa8\x44\x02\xfd\x5d\x96\xd6\x6f\x71\x40\x0b\ +\x83\x9a\x2a\xfb\x3e\x4b\x1c\x44\xbd\x06\x81\x48\x44\x74\xe6\x3d\ +\x91\x62\x44\xcc\xdc\xf6\x4d\x46\xda\xcd\xac\x04\x6d\x61\x10\xe7\ +\x30\x82\x5f\x41\x87\xbc\x75\x0b\x08\x00\x14\x97\x65\x04\xce\xb8\ +\x62\xf1\x90\xb7\x9f\x39\xfe\x67\x50\x61\x42\xe3\x1b\xf0\xfc\x5e\ +\xe5\x90\x32\x94\x4a\x61\xe0\x23\x24\xd2\xf4\xe1\x05\x20\xda\xea\ +\x9d\x88\x61\x00\x17\x1e\x02\xc7\xb1\xaa\xfd\x00\x0e\x83\xef\x18\ +\x4e\x01\xe0\xf7\xe0\x75\x85\x82\x55\x30\x0f\xd9\xf3\x95\x08\xca\ +\x45\x6c\xa0\x00\x10\x40\x00\x88\x58\x00\xf0\x9e\x44\x0c\x74\xef\ +\x84\x87\x4c\x3c\xa8\x94\xad\xac\x4a\xb6\x7c\x34\x85\x82\x4e\x9c\ +\x80\x4b\x4f\x02\x00\xc8\x05\x82\x2c\x7f\x99\xf7\xa0\x86\xc9\x57\ +\x47\x1c\xd4\x56\x35\xbd\xcc\x89\x4d\xc7\x78\x0d\x02\x26\x77\x74\ +\xd2\x33\x81\x40\xd8\xcc\xca\xe4\x07\x20\x66\x66\xe8\x0b\x03\xd5\ +\x02\x47\x5e\xe2\x0b\x74\x83\x0e\xfd\x18\x46\xd8\x5e\x3e\x32\x67\ +\xc4\xa8\x2b\x3a\xfc\xf4\xd5\x4b\xfb\x40\x86\xe5\x01\xd0\x8d\x43\ +\xb0\x36\x00\xf8\x2b\x0c\x12\x31\x0d\x50\x15\x80\x08\xe0\x4d\x1c\ +\x78\xdd\xc7\xd6\xc9\x8f\xfd\xbc\x34\x00\x7a\xde\x04\x36\xf8\x53\ +\x74\xac\x2c\x0f\x80\xd8\xfb\x10\x70\x19\x64\x35\x28\x00\x44\x10\ +\x70\x1a\x68\xdd\xa7\x03\xeb\xf2\xe3\x66\x2e\xc8\x43\x26\x1c\x44\ +\xa2\xc1\xcd\xd8\x99\x57\xa1\xe0\x45\x24\xb8\x11\x08\x5e\xbd\x07\ +\x5e\xc4\x41\x58\xec\x35\xf0\x7b\x08\xa0\x97\xf9\x1b\x1c\x13\xfa\ +\xca\x16\x80\xa8\x9a\x99\xe0\x66\x1d\x03\xde\xe2\x46\x5e\xe2\x0b\ +\xfc\x1a\x46\x10\xaf\x74\x48\x20\xef\xa8\x27\x06\xac\xfa\xee\xcd\ +\xfd\x0d\xa1\x5a\xe9\x0d\xe4\x88\x01\x50\xb9\xec\x13\x21\x0c\x12\ +\x20\x00\x44\xc3\x24\xda\x41\x88\x22\x71\x00\xe0\xdd\x43\xe0\x52\ +\x20\x28\x8f\xe7\xe5\xa1\x93\x8f\x85\x1b\xcf\x80\x2a\x36\x00\x87\ +\x00\x94\xa0\x00\x10\x41\x0b\x00\x37\x86\x1f\x40\x3d\xe6\xef\xf7\ +\x95\xe9\xca\x2b\xa0\x79\xbc\xe3\x41\xe0\x41\x24\xc4\x23\x10\xbc\ +\x7a\x0f\x74\xc4\x41\x4d\xa5\x7d\x9f\xb5\xbd\x16\xc0\x00\x7f\x87\ +\x00\x02\xe1\x06\x02\x55\xd1\x17\x2e\x46\xbd\x0d\xc7\xc0\x67\x4d\ +\x65\xfa\x28\x0c\x54\xab\x1e\xea\x0e\x23\xd0\xdb\xdd\x0e\x23\xd0\ +\xde\x02\x00\xbb\x30\xa8\xab\x6f\x9f\x7a\xfe\x95\xf3\x07\xcc\xfb\ +\xe7\x39\xc2\x37\x06\x12\x33\x62\xc6\x7a\x6e\x1e\xc6\xf2\xb9\x51\ +\xfe\xe0\x5e\x18\x10\xbf\xee\xd1\x06\xe6\x56\xd2\x19\xd7\xa7\xdd\ +\xee\x1e\xc4\x01\x9b\x47\x5c\x71\x02\x3a\x06\x5e\xc7\xb8\xc7\x11\ +\x07\xe0\x4a\x2c\x00\x75\xce\xd0\x03\xa0\x04\x05\x80\x8c\xd8\x43\ +\x45\xe3\x61\xa0\x0a\xf6\x03\x50\x1b\xff\x88\x24\x0f\xd1\xb1\x42\ +\x6f\x83\x24\x3d\x2f\xa9\xa3\x3c\x0f\xc7\xb1\x22\x21\x1e\x81\x50\ +\x0f\x76\x41\xa0\xeb\x3d\xf0\x22\x0e\xa8\xed\x01\xd3\xe7\x18\x80\ +\x86\x10\x89\xbd\x43\x81\x9e\x9d\xc0\x09\x40\xf4\x53\x18\x34\x57\ +\x7c\x81\xdb\xd9\x08\x06\x39\xb3\xf3\xa0\x61\xe3\x0b\xd7\xfe\xf2\ +\x61\x39\x70\x20\x66\x04\xb2\xe8\xcb\x40\x32\x1b\x41\xe9\x05\x88\ +\x47\x18\xf8\xa5\xd1\x2d\x0f\x80\xa2\x7e\xb6\xed\x9a\x2b\x06\xca\ +\xc4\x81\x2d\xe6\xc0\x2a\x8b\x4a\x66\x7a\x14\x08\xdc\xfd\x3a\x22\ +\xc1\xa3\xa7\x40\x9a\x56\xb6\x74\x33\x01\xff\x22\x39\x0f\x62\x50\ +\x00\x88\xe0\x0d\x01\xf0\xb0\x6e\x2c\x95\xd9\x30\x89\x5c\x48\xc8\ +\x04\x84\x4c\x38\x08\x87\x0b\x14\xf5\x51\xe1\xe8\x51\x98\x4e\x19\ +\xae\x12\x09\x8e\xba\xd5\xd9\xf3\x70\x23\x10\x64\xde\x03\x2f\xe2\ +\x40\x10\x73\x90\x62\x86\xfd\xf5\x00\x44\x1a\x08\xd4\x34\xbd\x41\ +\x91\x3b\x3b\xc1\x8d\x30\x50\x2c\x72\xc4\x8b\x2f\x00\x90\x0b\x03\ +\x2f\xf1\x05\x5e\xa6\x29\x36\xfe\x0d\x90\xb1\x67\xbf\x30\x68\xdd\ +\xaf\x3d\x97\x9b\x11\xce\xa4\x00\x33\x62\xe6\x65\xc6\x37\x4d\xd1\ +\x0f\x61\xe0\xe7\x10\x80\x43\x50\x08\x0c\xba\x6d\xbb\x17\x71\xc0\ +\xe6\xa7\x33\x45\x8f\x2e\x8f\x4a\x26\x13\x08\x3a\x3d\x7c\xcf\x69\ +\x44\x46\xdd\xed\x73\x0e\x3d\x00\x5a\xa0\x00\x10\xa1\x12\x00\xec\ +\x38\xa5\x34\x23\x80\xf8\x02\x06\x35\xb2\xd7\x39\x46\xe4\x61\xf0\ +\xfa\xb0\x53\x1a\x7c\x4e\xde\xec\xd2\xa5\xf4\x7e\x99\x07\xa1\x0e\ +\xec\x86\x5e\xd6\xd3\xd7\x11\x07\x82\xfc\x02\x3e\x0f\xcf\x04\xc2\ +\x0d\x04\xca\x8b\x9b\x82\x10\x25\x01\x88\x4d\x75\x89\x43\x18\x08\ +\x16\x38\x8a\x2b\xf0\xd0\xcd\x90\x81\x56\x7c\x41\xb7\xf4\xc9\x53\ +\xe7\xf6\x79\xf3\xa5\xeb\xb6\x00\x03\x81\x30\x64\xa6\x79\x9b\xa6\ +\x48\x6f\x8f\x5b\x18\xf8\x04\x51\xd5\x43\x14\x84\x48\xd5\x31\xb6\ +\xd3\x42\x94\x17\x80\xdc\x9d\x2f\xf3\x1e\xb0\x65\xb0\xa8\xa6\xfb\ +\x25\xd8\x0b\x20\x1c\x06\x90\x08\x00\x82\x1e\x00\x25\x28\x00\x64\ +\xb0\xb3\x00\xe8\xaf\x3a\x67\x4e\x67\xbc\x5f\xf9\x06\x32\x49\x1e\ +\xa2\x07\x96\xc8\xdb\xe0\x46\x2c\x38\xd4\xbb\x46\x5b\xc0\x74\x1e\ +\xa8\x12\x09\x26\xf3\x85\xf6\xc0\xcb\x86\x18\x42\xf5\xf6\xbc\x75\ +\x87\x01\x44\xe2\x20\x6a\x48\xfd\x8e\x01\x30\x22\x61\x02\xd5\x15\ +\xb6\x38\x03\xe5\xec\x04\x95\x30\x50\xac\x65\xc0\x2e\x70\x54\x54\ +\x52\x52\xdb\xb1\x5d\xbb\xb4\x1f\x8c\x91\x04\xc0\xdb\x8c\x04\xbf\ +\xe3\x0b\xb2\x06\x4e\xcd\xef\xd9\xfb\xd9\x5d\xdb\xb6\xfe\x58\x03\ +\x36\x4c\x33\xab\x0d\x73\xbc\xe7\xc0\x43\xce\x36\x4d\x61\xe0\x9b\ +\x07\x80\x9e\x8f\x2e\x32\xee\x71\x8a\x03\xa9\x88\x91\x8c\xe7\xc7\ +\x23\x10\x1c\x43\x0c\x56\xd9\x4c\x72\x47\x1a\xbf\xbd\x00\x82\xb4\ +\x84\x53\x1f\xc4\x09\x0a\x00\x01\xc4\xf2\x00\xd0\x17\x91\xae\x69\ +\xb0\xae\x68\x55\xec\x80\x09\xf2\x5f\x40\x66\x74\xbd\xbc\xba\x94\ +\x77\x43\xe8\x8a\x85\x78\x17\xf3\xd0\xad\x93\xac\x5c\x59\xda\x78\ +\xc5\x41\x34\xe6\x20\xe0\x77\x10\xa0\xd9\x40\xa0\xa6\xd2\x1e\x84\ +\xa8\x9a\x9d\xa0\x12\x06\xa2\xb5\x0c\x04\xc2\x60\xc7\x8e\x1d\x35\ +\x4b\xbf\xdb\x55\x3c\x65\x6c\xb8\x2b\x00\xd8\xe2\x0b\x68\x8f\x81\ +\x5f\x81\x87\x7a\x43\x06\x69\xc6\xd4\x6b\x5e\x19\xf4\xe0\xbd\x43\ +\x7f\xb4\xbd\x80\x0a\xc2\x60\x09\x00\xdb\xf1\x1e\x03\x0f\x63\xc7\ +\xb8\xf5\x18\xf8\x24\x00\x8c\x30\x73\xd9\x5a\x8f\x06\x59\x70\x9f\ +\x4e\x74\xbf\xe6\x50\x80\xc3\xc0\xc6\x23\x10\x98\xba\xb9\xf6\x22\ +\x58\xe5\xb3\xc4\xb3\x78\x90\x28\xad\x6c\x3b\x12\x03\x05\x80\x08\ +\x02\x8d\x86\x51\x15\xd8\x47\xe3\x66\x12\xb1\xce\x3a\x01\x32\x01\ +\x21\xbc\x19\x04\x4f\x2f\x37\x82\x81\x57\x37\xaf\x02\x40\x27\x9f\ +\xd8\x2e\xe2\x8c\xbe\xb6\x0d\x19\x30\x71\x08\xba\xde\x03\x17\xe2\ +\x20\x00\xe9\x3e\xc7\x00\x84\x9b\x5e\x2a\xc5\x13\x22\xa2\xd9\x09\ +\x2a\x61\xc0\x1b\x4a\x10\x08\x03\xa3\xbe\x8e\x5c\xfb\xd0\xec\x4d\ +\x63\xfa\x1e\x9a\xd7\x39\x2f\x2f\xcd\x76\x0c\x50\x1e\x03\x4a\x18\ +\xf8\x15\x78\xc8\xf3\x16\x00\x44\xbd\x00\xe9\x83\xdb\x5e\x30\xf9\ +\xfe\x6e\x6f\xbd\x39\x73\xa7\xb5\x9d\x98\x11\x33\xb7\xad\xbf\x81\ +\x87\xb1\x7a\xba\x10\x06\x55\x3e\x59\x0f\xc7\x52\xc0\x00\xca\x20\ +\xc4\xb8\xc5\x81\x6a\x9f\xcc\xc8\xc7\xbb\x2e\x80\xce\xc2\x40\x84\ +\x93\x46\xe1\x4d\x88\xd5\xd5\x8f\x69\x82\x48\x0c\x14\x00\x32\x74\ +\x96\x02\x06\xd9\xdd\xc3\xc1\xc5\x54\xc1\xf5\x19\xfd\x2b\x07\x54\ +\x6f\xc8\xe2\xe6\xe1\x25\x28\xc6\x4d\x7a\x1d\xb8\x1e\x05\x45\xb9\ +\x92\xfd\xeb\xda\xf6\xa9\x18\x54\xb6\x25\xdb\x73\xf9\xc2\x9f\x42\ +\x5f\x1c\x18\xc0\xbc\xb4\x28\x4e\x52\x22\xf5\x06\xd4\xd2\x6e\xfc\ +\xa0\xfd\x2f\x80\x37\x61\x20\x1a\x4a\xe0\x08\x83\x40\x6d\x35\xa9\ +\xac\xa9\x89\xdc\xf6\xff\xfe\xb5\xe1\xd5\x9b\x6e\x18\x0a\x00\xe2\ +\xf7\x25\xf8\x14\x78\xa8\x1b\x5f\x70\xda\xf8\x5b\xfb\x7c\xfe\xd9\ +\x4b\x7b\x76\xef\xde\x1a\x02\x00\x20\xa4\xf1\xe5\x49\xcd\xf5\x7e\ +\x04\x00\x7e\x8c\xc1\x6e\x33\xe2\x8b\x02\x88\x0d\x01\x88\xae\x4d\ +\x5d\xc3\xec\x56\x1c\xb8\xe8\xe9\xcb\x8c\xb8\xca\x83\xe0\xd8\xaf\ +\x3b\xce\xaf\xb3\x30\x90\xae\x14\x17\x19\x7a\x14\x00\x4a\x50\x00\ +\x88\x20\x00\x90\x22\x1a\x7b\x77\x69\xf4\x01\x9a\x2e\x46\xad\xa0\ +\xc1\xc6\x3c\xbf\xdf\x53\x57\x1a\x38\xa4\x1f\xe9\x5b\xbd\x21\xd3\ +\x96\x8f\x97\xd9\x02\xf1\xb8\xff\x41\x9c\xad\x12\xb6\x5c\x81\x00\ +\x28\x69\x9b\x1f\x7a\x61\xe5\xae\x82\xc7\x7a\x9b\x83\xa4\x75\xa1\ +\x8f\x37\x38\xf9\x8b\xca\xd6\x15\x07\x6e\x87\x55\x14\x04\xcc\x30\ +\x40\x6d\x6d\xd3\x06\x45\x10\xa2\x2b\x61\xc0\x1b\x4a\xe0\x08\x83\ +\x40\xa8\x8e\x00\x00\xbc\xf6\xc9\xa7\xfb\x2f\x3f\xe1\xb8\xbd\xa7\ +\xf6\xef\xd7\xb1\x31\x2f\xca\x63\xe0\x53\xe0\xa1\xfb\xf8\x82\xac\ +\xc0\x1f\x6e\x7d\x65\xe0\xdd\x77\x9c\xb8\xaa\x71\x6b\xc4\xf1\xfa\ +\x64\x2f\x81\x87\x74\xf9\xba\xc2\x00\xa0\x29\xc6\x80\x00\xa5\x10\ +\xe2\xc0\x9a\x05\x20\xea\x85\x6b\x8f\xdf\xcb\x0c\xb6\x86\x38\x70\ +\xf4\x9e\xdd\xcc\xfd\x57\xf4\xf2\x55\x22\x9f\x3b\x94\xa0\x93\x86\ +\x53\x16\xd7\xa3\xc0\xab\x03\x60\x10\xa0\x0e\x28\x00\x44\x10\x68\ +\xf2\x00\xb0\x57\x9c\xce\xf4\x40\x0b\x2b\xa9\xca\xb0\xc4\x8c\x90\ +\xfd\xce\xbc\xf0\x5f\xdf\xff\xfa\xfd\x15\x87\x1c\x13\xa8\xaf\x22\ +\xf2\x15\x06\x15\xc6\xdf\x4d\xf4\xbf\x4e\xcf\xde\x2a\x52\x37\x4f\ +\x09\xa6\x69\xc0\xb4\xaf\x76\xad\x6b\x97\x1e\x4c\x81\x00\x73\x87\ +\xbb\x71\x41\xba\x29\x5b\x20\x0e\x88\xce\x9a\x0f\x2e\x30\xcc\x06\ +\x02\xd5\xc5\x4d\x06\x5c\x11\x84\x98\x08\x61\x60\x50\xef\x7e\xb8\ +\xec\xa9\x67\xd6\xaf\xfb\xeb\xcc\xf6\xed\x52\xa2\x16\x9f\x37\x94\ +\xe0\x65\x46\x02\x67\x18\x41\x37\xbe\x60\xc4\xf0\x13\xf2\xce\x39\ +\xe7\xfa\x4e\xef\xbe\xfb\xec\x1e\x02\x61\xd3\x7a\x79\x52\x22\x5e\ +\x9c\x64\xcb\x4b\x26\x0c\x7c\x32\x1e\x59\xa9\x4d\x5e\x05\x00\x8d\ +\x20\x44\x00\x77\x6b\x06\x50\xe9\xb9\xfb\x34\xdd\xfb\xf1\x08\x04\ +\x55\xac\x80\xa3\x1e\x00\x7c\x2f\xa6\xee\xf3\x44\xe4\x19\xd0\x18\ +\x46\x40\xec\xa0\x00\x90\x11\xbb\xa0\x5c\x3e\x0c\xdc\x04\x0e\x5a\ +\x77\x0e\x47\x54\x10\x03\xc8\x4f\x5b\x76\xd5\xbc\x57\x7e\x44\xd1\ +\xb9\x19\x1b\x0f\x11\x1a\xff\x88\xc2\xf8\xbb\xe9\xd5\xba\x11\x0b\ +\x8e\xde\x3d\xc7\xa3\xa0\x11\x03\xf0\x73\x5a\x9f\xb2\x97\x3f\xff\ +\xef\xfe\x5b\xc6\x1f\xd3\x99\x3b\xec\x42\x97\x23\x1d\x62\x30\x25\ +\xe2\x81\xb3\x8e\x01\x0f\x5f\x23\x00\x00\x02\x66\x98\x40\x5d\xa5\ +\x7a\x3d\x02\x7a\xe1\x23\x9f\x85\x81\xd1\x40\x62\xad\x2a\x2a\x29\ +\x6d\xf8\xf3\xe2\xff\x6d\x7e\x7c\xdc\xe8\x81\x00\x20\x5d\x16\x19\ +\x00\xbc\xcd\x48\xf0\x10\x5f\xf0\x7f\xd7\x3f\xd8\x77\xe9\xd2\xf9\ +\xfb\x09\x44\x20\xab\xc9\xdf\xe5\xff\xc2\x46\xa2\xb4\x60\x4f\x4b\ +\xc0\x9f\x21\x80\xac\x34\x80\x54\x3a\xe6\x80\x27\x06\x98\xff\x5d\ +\x0f\x11\xc8\x3c\x07\xd4\x71\x5c\x43\xed\x83\x40\xe0\x96\xe9\xe5\ +\x45\x42\xbc\x34\x1c\xa1\x20\xf4\x16\xb0\x75\x32\x31\x0c\x50\x05\ +\x0a\x00\x01\xb1\x59\x00\x3a\xb0\x86\x50\x27\x70\x50\x27\x60\x30\ +\x9a\x64\xf2\xdf\x3e\x58\xbf\xef\xa1\xd1\x1d\xb3\xab\x0a\x9c\xbf\ +\x97\xea\xa5\x42\x5e\x44\x03\xb7\x22\x9c\xe0\x3c\xb6\x9d\x5a\x22\ +\xc1\xfe\x35\xd4\xa6\xad\x79\xfe\xf3\xcb\x56\x03\x44\xcf\x79\x8a\ +\xe9\xbd\x27\xcf\xe6\x2f\x13\x1f\x22\x71\xe0\xf3\x10\x00\x18\xd0\ +\xf8\xfb\xd0\xb1\x06\xb4\x35\x6b\x06\x61\x60\x90\x34\x5b\x6b\x9f\ +\x58\xfc\x61\xd1\xc5\x23\x0e\xeb\x72\x54\x76\x7a\x7b\xe1\xb2\xc8\ +\x5e\x66\x24\xf0\xbc\x05\x00\x7a\xf1\x05\x79\x10\xbc\xef\xbe\x17\ +\xfa\x2e\x5c\x38\x77\x57\x6e\x7b\x79\x7c\x01\x40\xe2\x85\x81\x5f\ +\xdd\xc7\xac\x34\x13\x32\xd3\x08\x37\xce\x00\x40\x3d\x3b\xc1\xf6\ +\xbf\x57\x43\xef\x35\x20\xd0\x85\xf7\xcd\x21\x10\x14\xe9\x01\xc0\ +\x7b\xbc\x80\x28\x1d\x7b\x7e\xd0\xfc\x2b\x41\x01\x20\x42\x26\x00\ +\xd8\xe1\x41\x57\x46\x43\xc3\xf0\x33\x49\xeb\x1b\x22\x70\xd7\xb2\ +\x8a\x0d\x7f\x1f\x09\x87\xd9\xc6\xb5\x74\x96\x16\xe6\x21\x7b\xa9\ +\x10\xaf\x2d\xf1\x3c\x0b\x15\x0f\x91\x57\xf6\xb4\xdd\xb1\x65\x77\ +\x49\x7d\x2c\xad\xc1\x11\x00\x5e\x7a\xf2\x16\x22\xef\x81\xa0\x5e\ +\x84\xf8\xeb\x03\x20\x86\x19\x3d\xd7\x54\xac\x01\xbd\x00\x92\x57\ +\x61\xe0\x62\x28\xc1\xe0\xdc\xe5\x53\x9e\x7f\x75\xcd\xaa\x1b\x2f\ +\x1d\x99\x6e\x46\x0c\xae\x17\x21\xde\x19\x09\x1e\x86\x11\xa6\x8d\ +\x68\x7b\x48\x78\xdb\xc0\xea\x63\x42\x5f\xf9\x14\x5f\x60\x4f\xab\ +\x5a\xf1\x90\x16\x06\xc4\xa7\x20\xc0\xcc\x36\x00\x60\xf2\xe3\x0c\ +\x00\xa8\x00\x44\xd0\x9b\x9d\xe0\xd8\xa7\xe3\x39\x00\xd0\x5a\x54\ +\x48\x75\x9c\xca\x83\xa0\x5c\x3b\x00\xd4\x22\x81\x5b\x06\x47\x28\ +\x68\xc5\x0c\xe0\x10\x80\x12\x14\x00\x32\x44\xe3\xc1\x6e\xcf\x9a\ +\x74\x9e\x0d\x07\xeb\xa6\xa6\xca\x7f\x66\xd1\x8a\x3d\x37\x8f\x3e\ +\xbb\xfb\xc0\xfa\x35\xd1\x28\x79\x0d\xe3\xcf\x75\xe5\x2b\x86\x0b\ +\x78\xe9\x79\x62\x41\x47\x28\x28\x86\x1f\xf6\x65\xf5\xaa\xbf\xee\ +\x91\x0f\x9b\x56\x83\xb3\x44\x17\x5b\x9c\xd7\x5e\xbe\x0a\x9e\x38\ +\xf0\x79\x08\xc0\x16\x4b\xc2\xbd\x0c\x12\x2f\x0c\x02\x6d\x02\x8e\ +\x56\x6d\xde\xbd\xb7\xfe\xc9\x55\x1b\xb7\xde\xd9\xb7\xf3\xa1\xb6\ +\xe3\xe9\x57\x31\x7b\x09\x3c\x14\xad\x71\x20\x13\x06\x51\xcb\x6d\ +\x00\xc0\xef\x4f\x3f\xb5\xb7\xe3\xe5\x49\x1e\xe2\x0b\xe2\x17\x06\ +\x7e\x79\x00\xa8\xbc\x79\x31\x07\xd4\x22\x47\x3a\xb3\x13\x00\xe2\ +\x17\x07\xae\xbc\x00\x2e\x04\x02\x77\xbf\xce\xfd\xa9\xca\x53\xf7\ +\x38\x36\x1d\x06\x01\x2a\x41\x01\x20\xc2\xcd\x10\x00\x8b\xd6\xab\ +\xb0\x18\xe8\xe7\x8d\xf5\xab\x30\x37\xc2\xf9\x4f\x2f\xff\xf5\xe7\ +\x5b\x0e\x19\x99\x12\xaa\x24\xc2\x3c\xa5\x4b\x14\x2b\x8c\xbf\x9b\ +\xb1\x7f\x47\x13\x39\x42\x41\x22\x12\x4c\xc3\x80\x9b\x3f\xd8\xb1\ +\x36\xc2\x1a\x73\x83\x53\xa8\xcd\x70\x32\x6d\xd0\x9c\x65\xc0\xdd\ +\xc7\x23\x91\x02\x80\x46\x39\x43\xc1\x07\x61\x10\xfd\x6b\x04\xf9\ +\x6b\x1b\xdc\xb5\x60\xd1\x8e\xf3\xa7\x5f\xdd\xb9\x5f\x43\x65\x96\ +\x52\x58\xf0\xf6\xd3\xdb\x78\x86\x5f\x37\xbe\x80\x1a\x46\x48\x07\ +\x30\x62\x69\x7d\x8a\x2f\x00\x90\xaf\x5f\x00\xc0\x13\x06\x3e\xc5\ +\x00\xa4\x3b\xeb\x00\xa0\x9e\x99\x00\x60\xf7\x1a\x24\x54\x1c\xc4\ +\xe3\x05\x50\x4d\x0b\x54\x1d\xcf\x29\x4f\x37\xd2\x9f\xeb\x51\xa0\ +\xeb\x83\xf6\x5f\x09\x0a\x00\x11\x2a\x01\x20\x75\x43\x7b\x88\x1d\ +\xe0\x18\x5f\xc2\x14\xb1\x66\xdb\x9e\xba\x85\x45\x47\xec\x9a\xdc\ +\x69\x43\x57\x61\x7e\xa2\x5f\xd4\xd3\xaa\x82\x82\x63\x74\x02\x70\ +\xac\xfa\xb0\x75\x88\x9e\xd3\x1f\x02\xfd\x4a\xe7\x7d\xf1\x6e\x89\ +\x2d\x5b\x9e\xb1\x54\x05\x31\xaa\xb4\x96\x6e\x00\x61\xa2\x3c\x00\ +\x01\xa0\x5e\x2b\xad\x91\xde\x8d\x30\xb0\xed\xe7\x08\x83\xe8\x30\ +\x40\xa0\x4d\x3b\x61\xab\x2e\x7a\x6b\xc9\x9a\x6f\xce\x3e\xea\xe8\ +\xd4\xda\x4a\x12\x1b\x4a\x50\xbd\x4c\x89\xe7\x2d\xa0\xb7\xfb\x35\ +\x8c\x10\x6f\x7c\x41\x1c\xc2\x80\xf8\xe4\x01\xc8\xc9\x02\x33\x14\ +\x8e\xfe\x52\x82\x69\x8c\x74\x3d\x1c\xdb\x35\xc4\x41\x2a\x73\xcf\ +\xeb\x88\x03\x47\x7e\xec\x3e\xd1\xd0\x02\x80\x7b\x2f\x80\xea\x78\ +\x9d\x3c\x44\xdb\x64\x9e\x01\x1c\x02\x50\x82\x02\x40\x86\x74\x6c\ +\xdf\x83\xbc\x74\x1b\x2c\xc8\x29\xff\xd2\xc7\xff\xb7\x61\xfc\xd3\ +\x27\x77\x6a\x57\xf3\x5b\xd3\x6f\xa7\x5a\x5c\x48\xe5\xf6\xe7\x5d\ +\x05\xae\x56\x14\x74\x17\x24\x58\xdf\x26\xc7\x3c\xf7\xe1\xa5\xab\ +\x1d\x79\xf3\x44\x57\xbc\x53\x7b\x64\x1e\x00\x9e\x38\xf0\x3d\x08\ +\xd0\xa4\xda\x44\xc5\x2f\x04\xa8\xf2\x13\x2c\x0c\x08\x61\x83\x56\ +\x9a\xf8\xe1\xb7\x1d\xd5\xaf\x97\x0c\x2f\xb8\x22\x58\xd9\xdd\x53\ +\xe0\x21\x4f\x00\xb4\xc4\x30\x82\x8f\x6f\x54\x6c\xc4\x9f\xee\x63\ +\x56\x3a\x40\x88\xee\x79\x8b\x66\x20\x28\x66\x26\x70\xf7\xc5\x23\ +\x0e\xc0\x1e\x73\x00\xa0\xef\x3d\x70\x7c\x57\x79\x10\x00\xfc\x79\ +\x9b\xa0\x6e\x30\xa0\x75\x1b\xe0\x10\x80\x12\x14\x00\x22\xe2\x19\ +\x02\x00\xe0\x1b\x2a\xdd\x65\x85\x25\xde\x85\x86\xb0\x09\xb7\xbe\ +\x5b\xb2\xee\x85\xf1\x64\x08\x89\x44\xe4\xf9\xaa\x66\x1a\xb8\x8d\ +\x13\x10\x8a\x05\x61\x75\xb9\x3c\xbf\x39\x7b\xfb\xce\xfd\xe5\x21\ +\xc7\x0e\xde\x39\x57\xbd\x2b\x40\x26\x10\xdc\x78\x0f\x12\xe5\x01\ +\x30\xc0\xee\xd5\xe0\xc6\x2f\x24\x50\x18\x68\x4c\x03\xbd\xfa\xf5\ +\xf7\x36\x9f\x71\xf3\x84\x4e\x9d\xcb\xf6\x34\x75\xad\xbd\x04\x1e\ +\xb6\xe4\x30\x02\x80\x2f\x2f\x4e\xb2\x20\xc4\xa7\x20\xc0\x0c\x00\ +\x83\x50\x86\x5c\xe4\x05\x70\xbb\x1d\xc4\xdb\x1d\xe9\xdc\x0e\x2b\ +\x00\xb8\xf2\x1e\x70\xf7\xb3\x9a\xd3\xad\x17\x81\x97\xc6\xcd\x94\ +\x40\xd5\x22\x61\x08\x00\xa0\x00\x10\x42\x88\x29\x16\x00\x3a\x17\ +\x96\xa7\x77\x08\xc4\x36\x34\xfe\x11\x3c\xb8\xff\xf5\xc1\xca\x7d\ +\xd3\xce\x38\xab\x62\x48\x60\x95\x78\xd9\x5c\x9d\x60\x3f\xb7\xc6\ +\x5f\x37\x3d\x4f\x28\x44\xd3\xed\x4d\xef\x5b\x77\xcb\x3f\xff\xb7\ +\x55\x54\x6d\xc7\x39\x77\xe3\xe2\x67\xd3\x7b\xf0\x1e\xf8\xbd\x10\ +\x10\x21\xa6\x78\x49\x69\xe5\x0c\x05\x4a\x18\xb8\x8d\x65\x88\xad\ +\x3c\x29\x29\xdf\x4a\x6a\x02\x5c\xf2\xf1\xaa\x5f\xff\x77\x5c\xe7\ +\x23\x03\x96\x91\xd5\x8c\x2f\x00\x00\xbe\x30\xe0\x0d\x23\xe8\x7a\ +\x0b\xe8\x63\x12\x31\x8c\x00\xa0\x14\x06\x99\x66\x15\xf8\x41\x76\ +\xa6\x09\x06\x51\x4f\x57\x04\xf0\xe0\x1d\x60\xee\xb1\x78\xc5\x01\ +\x1d\x90\x08\xe0\xdd\x7b\xc0\x2d\x53\x25\x10\x74\x17\x06\xd2\xf5\ +\x02\x44\x27\xde\x20\x72\x50\x00\xc8\x10\x19\x03\x37\xc6\x9d\xc5\ +\xcd\x4b\xad\x25\xc6\x68\xc2\xec\xaf\x56\xad\xbf\x3f\xff\xb8\x94\ +\x50\x59\x53\x86\xaa\xd9\x06\x5e\x87\x0a\xdc\x1a\x7f\x87\xeb\xbf\ +\x31\x9d\x49\x52\xe0\xda\x79\xbf\xad\x15\xf6\xad\x58\x0f\x80\x63\ +\xb8\x81\x13\x00\x48\x97\xa5\x0a\x10\x64\x4f\x0f\x6f\xca\x50\x22\ +\xd6\x01\xa0\x97\x94\x76\xdb\xab\xa7\x87\x26\x4c\x66\x1b\x80\x9e\ +\x30\xd0\x78\xa7\xc5\x27\x6b\xb7\x54\x2c\x3a\x61\x48\xd1\x84\x70\ +\x41\x17\x67\xbe\x9c\xf8\x82\x78\xa7\x29\x7a\xf1\x16\x24\x7a\x18\ +\x01\x80\x9e\x91\xe0\x8b\x2f\x28\x33\x1d\xc0\x30\xdc\xaf\x63\x60\ +\xfb\xee\xc5\x3b\xe0\xb7\x38\x00\xb9\xf7\x00\x40\x2e\x10\x00\x7c\ +\x10\x09\x00\xfa\xb3\x01\xe8\xfb\x05\x11\x82\x02\x40\x84\xd7\x21\ +\x00\xe5\x1c\x75\x17\x01\x82\x92\xac\xb6\x14\x96\xd4\xff\x7b\xf3\ +\x51\x3b\xa7\xf6\x2e\xcd\xd7\x72\xf5\x03\x78\x1b\x2a\xf0\xc9\xf8\ +\x03\x00\x7c\x5b\x3f\xa0\xe4\x9d\xaf\xdf\x2b\x15\xd4\xc2\x79\xce\ +\x3d\x4c\xa6\x70\xe4\xc7\xa9\x47\xac\xae\xbc\xb4\xfe\x0f\x01\x10\ +\xf5\x10\x00\xb8\x14\x06\x1a\x9e\x01\x99\x27\x44\xc0\xa4\x7f\x2d\ +\x5a\x57\xf4\x87\x33\xf2\x72\x4b\x0b\x53\x6d\xf9\x71\x87\x19\xea\ +\x9a\xea\x90\x08\x6f\x81\xe8\xf8\x66\x1a\x46\x30\xfc\x8a\x01\xc8\ +\x6c\x14\x00\x00\xea\xe9\x8a\x00\xea\x29\x8b\x8e\xff\x05\xde\x01\ +\x3f\xc4\x81\x63\x9f\x44\x2c\x00\xb8\x13\x08\x00\x6a\x2f\x02\xb7\ +\x0c\x5e\x1a\x5d\xa1\x80\x38\x40\x01\x20\x22\x36\x25\xcd\x2d\x3e\ +\x04\x07\x02\x34\x1a\x52\x45\xf9\x57\x3e\xf2\xd1\xa6\xb3\x5f\x1a\ +\xd3\x39\xa7\x6e\x53\x2a\x37\x41\xbc\x86\x9f\x77\xac\xe8\x18\xc5\ +\x34\xc2\xba\xb4\x8e\x91\x09\x7f\xfe\xe4\x57\x41\x4d\x00\x80\x19\ +\x76\xd1\xe9\xdd\xa7\x48\xf6\xab\x5e\x7f\x2c\x10\x07\xec\xcc\x8b\ +\xb8\x61\x45\x0d\x7d\xda\x74\x16\x29\x52\x5d\x4e\x3a\x79\x68\x5e\ +\xc7\xa1\x70\x04\xae\xfb\x62\xeb\x9a\xf9\x47\x64\x1c\x6e\x44\x1a\ +\x9a\xf2\x16\xd5\x87\xbb\xbf\x95\x78\x0b\xe8\xed\x1e\x86\x11\xfc\ +\xf2\x00\x1c\xdd\xf0\x35\x04\x48\xf4\x07\xd0\x08\x3e\xac\x17\x8c\ +\x4c\xe8\x08\x03\xf6\xbb\xa7\xa1\x03\xc9\x3e\x47\xfe\x2a\x41\x20\ +\x11\x08\x00\xf2\x21\x06\x0b\x2f\x22\xc1\x4a\x67\x84\x31\x08\x40\ +\x05\x0a\x00\x19\xf1\x04\x01\xd2\xa8\xae\x43\x81\x81\x56\x19\xa3\ +\x88\x09\x70\xf3\xeb\x7b\xd7\xbe\x36\x25\x30\x8c\x98\x0d\xce\xb2\ +\xfc\x34\xfc\xb1\xe3\x34\xbd\x04\x4c\x19\xcf\xac\xcc\xdc\xb6\xb7\ +\xac\x5a\xfe\x8a\x35\xcb\x58\xea\x2c\x33\xec\x76\xaa\x11\xbd\x5f\ +\x26\x0e\x7c\x16\x00\xd2\x18\x00\x95\x61\x65\xb7\x7b\x14\x06\x44\ +\xeb\xb5\xd6\x8d\x2c\xfc\x7e\x4d\xc9\xd2\xe3\x26\xee\x1b\x5b\xb7\ +\xa5\x83\x2a\x5f\x61\xd0\xa1\x63\xff\x01\xe4\x2d\x88\x0e\x23\xf8\ +\x76\x19\x54\x56\x34\xb9\x00\x78\x6b\x18\x00\xc4\x25\x0c\x00\xdc\ +\x7b\x0d\x64\xde\x01\xe9\x3e\x90\x78\x1c\xd8\x32\x78\xfb\x5d\x0a\ +\x04\x00\x6f\x22\x01\x20\x2a\x00\xaa\xfd\x09\xe4\x3c\x98\x41\x01\ +\x20\x82\x80\x74\x0c\xde\x15\x5e\x03\x02\x35\x9e\x42\xff\x5e\xf2\ +\x6b\xf1\xf4\x89\xbf\x2b\x1b\x9e\xb6\xb2\x9d\xb4\xac\x78\xe2\x03\ +\x64\xbd\x7e\xde\x31\x8c\x50\x28\x0a\x0e\xac\xfd\xd3\xb3\x4b\xb6\ +\x09\x6a\xd6\x84\x75\xce\x15\xf9\x69\x79\x07\x64\xfb\x65\xe2\x20\ +\x11\xb3\x00\xac\xeb\x88\x7e\xff\x82\x2c\x3a\x5f\x64\x58\x63\x3f\ +\x03\x69\xca\xd3\xa7\x18\x00\x9a\x73\x9e\x5b\xb4\x66\xd7\x1f\xc7\ +\x1c\x9f\x5d\x56\x94\x22\xcd\x97\xe6\x40\xf1\x16\xa8\x82\x0e\x01\ +\x00\x42\xf5\xbe\x0d\x01\x90\xf2\x92\xa6\xb8\x04\xc5\x54\x45\x00\ +\xb1\x30\xa0\x57\x3e\x04\xd0\xf7\x1a\x14\x14\xfc\x5c\xbe\x73\xe7\ +\xba\xca\x48\xc4\x7e\xb1\x45\xa8\x57\x60\x47\x4c\x7b\x57\x23\x42\ +\xad\xae\xe3\xdc\x47\x1d\xc7\xe6\x29\xc9\x87\xb7\xdf\x60\x86\x4b\ +\x83\x8e\x55\x7d\x00\x52\x99\x6e\x50\x84\x73\x83\x86\x03\x86\x63\ +\xa3\x69\x10\x58\xb7\x75\x69\xa9\x23\x31\x62\x03\x05\x80\x08\x02\ +\xf6\xe0\x2d\x3f\x89\xc8\xac\x0c\x55\xa6\xa6\xeb\xf6\xec\x99\x5f\ +\xfc\xba\xe5\x6f\x3d\x8f\x4f\x0d\xed\xb7\x67\xac\xb3\x04\xb1\x17\ +\xc3\x6f\x1d\xa7\xe1\x25\x88\x18\x69\x30\xf5\xd9\x4d\x6b\xf8\x85\ +\x33\xf0\xe2\x2e\xdc\x1a\x77\x2b\x1f\x5b\x1e\x92\x7a\x3b\xc4\x81\ +\xcf\xbf\x39\x2d\x00\x44\x6e\x79\xa1\x30\xa0\xb6\xc7\xe3\x2d\x70\ +\x39\x94\x55\x55\x17\x8a\x4c\x5b\xbe\x77\xdd\xf3\x83\x61\x08\x01\ +\xd3\x9f\xe1\x09\xde\xa5\xc4\xdd\xaf\x58\xe9\x90\x67\xd4\x01\x7c\ +\xf7\x16\x18\x7e\xbd\x13\xa2\xaa\x12\x20\x8d\xea\xb6\xaa\xd6\x30\ +\x00\x70\x2d\x0e\x64\x5e\x83\x5f\x56\xbe\xbf\xf7\xd9\x67\xef\xde\ +\xe1\xb9\xfe\xc8\x41\x0d\x0a\x00\x19\x7e\xf7\x06\x2d\xfc\x1a\x5a\ +\x88\x52\xb0\xa7\x3c\xf4\xe2\xcf\x1d\x77\x5c\x37\x6c\x5f\x0f\xbb\ +\xb8\xd0\x98\x7e\xe6\xc5\xf0\xf3\x8e\x13\x1c\xf3\x65\xe9\xc0\xfd\ +\x1f\x7d\xbf\xa8\x9c\x5f\x11\x06\x02\xc4\x3e\x5e\xae\x30\xfe\xbc\ +\x2a\xba\xe9\xfd\xf3\xf2\xf7\x7b\x16\x00\x2d\x6a\x54\xc6\x0f\x80\ +\x3f\x33\x41\x95\xbf\x85\x28\x7f\xb6\xab\xa5\xc1\xff\x5b\xfa\xd3\ +\xbe\x6b\x8f\x3e\xb7\xe4\xd8\xda\x4d\x39\x5a\x02\x40\x11\x87\xca\ +\xad\x97\xf6\x10\x02\x34\x09\x03\x3f\x5f\x98\x24\x39\xde\x2f\x0f\ +\x00\xd4\x56\xd9\x8d\x3e\x6f\x46\x02\x80\x72\x56\x42\xe3\xb1\xee\ +\x85\x81\x61\x84\xd1\x0d\x8e\x08\x41\x01\x20\x20\xf6\x6a\xda\x44\ +\xa2\x7a\xc0\xbb\x30\x46\x37\x3c\xf2\xe9\x96\xf3\xe7\x8d\xe9\x92\ +\xd7\xb0\x2e\xc8\x4d\x40\x37\x85\xbb\x46\xbf\xc7\x21\x02\xc9\xf0\ +\x40\x6d\xda\x21\x91\x09\xf7\x7f\xec\x5c\xf1\x4f\x84\xcd\x58\xea\ +\x18\xff\x78\x87\x06\xc0\xb9\xcf\xef\x18\x80\xd8\x3c\x7c\x99\x10\ +\x61\xea\xc1\x4b\xe3\x55\x18\x98\xc4\xb3\xa8\xf9\xdd\xb3\x4b\x7e\ +\xdd\xf1\xfb\x63\x46\xa5\x57\xed\xa3\x7c\xc2\x54\x02\x7a\x5a\x62\ +\x80\xd9\x46\xd7\x81\xdd\xce\xc3\xaf\x21\x04\x00\x7d\x6f\x01\x4f\ +\x14\x44\x8f\xf7\x2b\x08\x10\xaa\x2b\xec\x86\x5e\x35\x23\x01\xc0\ +\x57\x61\xf0\x91\x59\xe8\xa1\xd2\x48\xb2\x80\x02\x40\x84\x9f\x31\ +\x00\x22\x3c\x2c\x05\x2c\x22\x62\x02\x5c\xfb\xec\xae\x35\x0b\x6f\ +\x08\x0c\x27\x66\x83\x9e\x91\xa0\x8d\xbe\xe8\xf5\xc0\xa2\xba\x44\ +\xc4\x86\xbf\x31\x3f\x80\xc7\x96\xb5\xd9\x52\x5a\x51\xab\x1f\x8a\ +\x6b\x9b\x79\xa1\x32\xec\x3a\x69\xdc\xc7\x05\x10\xbf\x3d\x00\xd6\ +\x3a\x00\xc2\x20\x44\xc6\xfd\x1f\x33\xa4\xcc\x76\xda\xa0\xd3\x1e\ +\x05\x2b\x1f\x59\x7c\x81\xc7\x75\x2b\xf6\x57\x54\x87\x67\xae\xaa\ +\xdb\xf0\x48\x3f\x73\xa0\x73\x2f\x53\x9e\x05\xd7\x2b\xa0\x18\xca\ +\x10\x79\x5e\x3c\x0d\x21\x10\x7d\x6f\x81\x64\x08\xc1\xb7\xd9\x20\ +\x75\x55\x04\xc2\xf4\x3c\x3f\x81\xd1\x4f\x90\x30\x48\x89\x84\x13\ +\xe5\xc7\x44\x0e\x02\x50\x00\xc8\xf0\xd9\x55\x6f\xc3\xbd\x57\x56\ +\xc9\x7f\x3e\x5d\x57\xba\xe2\xe2\x33\x4b\x8f\x6e\xf7\x43\x7b\xb9\ +\x6b\x3f\x8a\xea\x2d\x75\x8e\x7d\x12\xc1\x60\x7d\x8d\x3e\xc8\x77\ +\x92\xa1\xb5\xf7\xfe\x73\x49\x81\xa2\xca\x4e\x78\x4b\x01\xab\xc6\ +\xfc\xe3\x35\xfe\xf4\xfe\x44\x04\x01\xd2\x6f\x38\x74\x4c\x5f\xd4\ +\xc8\x23\x4e\x6f\x81\x9b\x59\x00\x2c\x73\xde\xfb\xa6\x68\xea\x8c\ +\x73\xbb\x0e\xa9\xd9\xd0\x16\x4c\x68\xaa\xbb\x4e\x8f\x9e\x67\xb4\ +\x79\x6d\x51\x7a\x0a\x04\x62\xc3\xcb\x10\x42\x04\x00\xc2\xea\x80\ +\x43\xdf\x86\x00\xe8\x29\x88\x29\xc1\x26\xe3\x9e\x1a\x04\x68\xa8\ +\xe7\x6f\x0f\xd5\x45\xff\x4f\x83\x98\x78\x08\xa4\x7a\x12\x06\x86\ +\x29\x0d\x38\x42\x92\x1c\x14\x00\x22\x6c\xbd\xd1\x44\xa0\x7e\xc0\ +\x10\x0f\x63\xb7\xe7\xdc\xf9\xf9\xaf\xdb\x5f\xec\x35\x2a\x35\xb4\ +\x87\x68\xbd\x96\x58\xf5\x82\x22\x3a\x0f\x4d\xc1\x10\x31\xd2\xe1\ +\xe2\xd9\xeb\xa5\x73\xfe\xb9\xd0\x43\x00\xbc\x21\x07\xd1\x62\x3e\ +\x7e\x19\x7f\xab\x0e\x7e\x62\x80\x78\x71\x23\xb6\x97\x9f\xc2\xd9\ +\xee\x87\xb7\x20\x0e\x01\x00\x00\xf0\xbb\x67\x97\xae\xda\x78\xe3\ +\x61\xc7\x05\xab\x4b\x9a\xee\x08\x55\x4f\x5f\x44\xec\x3d\x07\x8a\ +\x99\x0c\xb6\xf6\x29\x2a\x48\xb7\xd9\xa7\x21\x04\xdf\x82\x00\x1b\ +\xea\xed\x86\x9e\x2a\x23\x86\x64\x36\x42\x0c\xda\xe0\xbb\x10\x06\ +\x81\x88\x7c\xe6\x2d\x92\xdc\xa0\x00\x90\x41\x3f\x38\xfd\x10\x03\ +\x6e\xd7\xa5\xf0\xf0\x08\x2a\xda\x57\xd9\x30\xf7\xf3\x0e\xbf\xdd\ +\x32\x6a\x4f\x6f\x4f\x06\x1f\x40\x3d\x7b\x40\x14\x44\x18\x3d\xee\ +\xb3\xa2\x41\x7b\x3f\xff\xf1\xfd\x4a\xbd\x1a\x53\x10\x68\x3a\xe7\ +\x8e\x28\x79\xa6\x2e\x89\x1a\x16\xf0\x5b\x00\xb0\xa2\x86\x67\xb4\ +\xad\x74\xb1\x7a\x80\x7a\xbb\xac\x3c\x36\x7d\x9c\xd7\xee\xf6\xbd\ +\xa5\xa1\x47\x37\xa4\x6c\xbd\xbb\xa7\x79\x68\x63\xbe\x22\xc1\xa1\ +\xa8\x87\x68\xfd\x05\x5d\x51\x40\x6f\x77\x2b\x36\x74\x03\x31\xa3\ +\x43\x08\xbe\x5d\x06\x75\x55\x4d\x85\xaa\x82\x16\x65\x69\xdc\x0a\ +\x83\x68\xba\x40\x04\x3d\x00\x88\x18\x14\x00\x22\xe8\x07\xb7\x5f\ +\xb8\x1d\x8b\xf5\x78\xeb\xfe\x61\xce\xd2\x6d\x53\x4e\x1a\xd3\xb5\ +\x93\xf9\x6b\x9a\x63\xa7\x32\x00\x10\x40\x38\xae\x6f\x21\x09\x04\ +\xac\x49\xed\x1e\x39\xf7\xce\x8f\xd6\xea\xd7\xb6\x09\xc2\x9e\x73\ +\xab\x5e\xbc\x95\xf4\x44\xd3\xe8\xe8\xfa\xda\x8c\x2d\x75\xb0\x64\ +\x3f\x21\xfe\x8e\xcd\xc4\x56\x37\xd4\x09\x40\x8c\xd5\x45\xa3\x97\ +\xef\xc6\x5b\xe0\x43\x8b\xee\x79\xe3\xf3\x1d\x93\xff\x7a\x4e\x97\ +\x43\xab\xd6\x67\xda\x76\x88\x7a\xef\xb4\x41\x95\x19\x75\x95\x28\ +\xa0\xd3\xaa\x82\x0d\xd9\x73\xa4\x12\x1b\x32\x51\xe0\xd3\x55\x40\ +\xea\x2a\x01\xac\xd7\xd2\x4a\x82\x0e\x63\xf8\x29\x0c\x42\x75\x10\ +\x30\x31\x06\x00\x11\x83\x02\x40\x46\x42\x87\x00\x34\x88\xe3\xd6\ +\xbd\xe2\x91\xed\xab\x17\xdf\x95\x7a\x04\xa1\xdd\x9c\x00\xfc\xd8\ +\x03\xd6\x45\xcc\x1a\x7c\x61\x00\x60\x2c\xd3\xd8\x7f\xb3\xde\x4b\ +\xd9\x54\x59\x1d\xf2\xb6\x06\xa7\x25\x00\xac\x3a\x1a\x1c\xa3\xce\ +\xab\x07\x2b\x10\x54\xe2\xc0\xcd\x22\x41\xf1\x62\xad\x03\x20\x2a\ +\xd3\x8b\xf1\x57\x79\x11\x1c\xa2\xc0\x1f\x21\x7b\xce\x73\xdf\xfe\ +\xb2\xf2\xda\x9e\x23\x53\xeb\xca\x49\xac\x2e\xba\x6b\x16\x78\x16\ +\x05\x8a\x60\x43\x95\x28\x60\xeb\xa0\x1b\x6c\xe8\x17\x96\x00\x48\ +\x09\x36\xc5\x1e\x04\x98\xff\xe9\xf1\x7f\x7a\x3b\x3d\x74\xa0\xfa\ +\xdf\xfa\x6e\x11\x15\x06\x18\x03\x80\xc8\x40\x01\x20\x80\x10\x20\ +\xb6\x59\x00\x89\x16\x03\x1c\x03\x17\x4f\x24\xf2\x07\x5f\x6c\x2e\ +\xff\xe6\xaa\x33\x8a\x8f\xef\xf2\x7d\xae\x7d\x4f\x9c\x71\x00\x00\ +\x42\xc1\xb0\x2d\x34\xbc\xe6\xc1\xe7\x3f\x8a\x6f\xde\x11\x1d\x30\ +\x47\xd7\x8d\xe7\xee\x97\x09\x04\x56\x1c\xe8\x7a\x0e\x7c\x1f\x02\ +\x30\x41\xb8\xb6\x81\x8e\x91\x07\x50\x1b\x36\x47\x5e\xf6\xf4\xc4\ +\x27\x4f\xd6\x9a\xed\x7b\xea\x5e\xd8\x31\x6c\xfb\x0d\x87\x94\xf5\ +\x04\x00\x8d\x19\x08\xad\x41\x14\x68\xd4\x21\x41\x5e\x13\x00\x68\ +\x12\x00\x3a\xbd\x7c\xd5\xba\x05\x00\xf2\x7c\x38\xc2\x20\x60\x7a\ +\xd3\xe2\x48\x72\x80\x02\x40\x04\x81\xc4\xaf\x03\x40\xc3\x13\x18\ +\x71\x8a\x8e\xb3\xa7\x2d\x5d\xbd\xeb\xed\xbe\xa3\x82\x0d\x85\x8d\ +\x39\x89\x9e\x05\x2a\x83\xaf\x31\x24\x10\x31\x32\x61\xf2\xbd\x6b\ +\xdc\x07\xfe\xd1\x68\x05\x01\x32\xf5\x11\xb9\xf3\xbd\x8a\x83\x44\ +\x4c\x03\xe4\xad\x6d\xa0\x6d\xfc\x7d\xf0\x16\xf8\xd8\xa6\x9b\x9f\ +\xff\x78\xeb\x39\x73\xce\xec\xdc\xad\x72\x63\x1b\x6e\xe0\x9d\x17\ +\x51\x20\x0b\x62\x64\x45\x81\x6e\x80\x24\x7b\xbc\x5b\x51\xe0\x17\ +\xd6\xf0\x0f\x6f\x16\x82\xac\x97\xef\xc6\x5b\x00\x20\x14\x06\x9c\ +\x48\x60\x04\x89\x81\x02\x40\x04\x81\x96\x1f\x02\x88\x93\xe2\xd2\ +\x9a\xf0\x13\x8b\xda\xfd\x76\xc7\x99\x3b\xfb\x00\x80\x86\x2b\xdf\ +\xc2\x7d\x0c\xc0\x87\x9b\x07\xed\x5e\xfe\xcb\x87\x55\x1e\xab\xda\ +\x88\xdb\x20\x40\xba\x2e\x5e\xc4\x01\x4f\x18\xf8\x3f\x04\x40\x1c\ +\x6b\x1b\x68\x1b\x72\x2f\xc7\x70\xb6\xfb\xd8\xa6\x88\x09\x70\xfe\ +\x0b\xbf\xfc\xf2\xd5\x15\xb9\xc7\x04\x42\xd5\x62\xe3\x4d\x57\x5f\ +\x67\xad\x02\x0b\xa5\x28\x50\xa4\x75\x13\x68\x28\x13\x05\x7e\x9d\ +\x33\xd6\x5b\x66\x13\x1c\x8a\x85\x8c\x00\xf4\xbd\x05\x82\x61\x84\ +\x00\x60\x0c\x00\x22\x06\x05\x80\x0c\x37\x0b\x01\xc5\x2b\x16\x78\ +\xbd\x0e\x1f\xc6\x6e\xef\x7c\xfc\xcb\xed\x53\xc7\x8d\xe9\x7a\x48\ +\xea\xcf\x6d\x94\xf9\x8b\x7a\x3e\x5c\x63\xdc\x44\x75\xa0\x67\x78\ +\xd2\x6d\x1f\xaf\xf7\x5e\xcb\x46\x6c\xaf\x03\xe6\x4d\x3f\xa4\xeb\ +\x67\x33\x2a\x00\xae\xc4\x81\xc8\xbb\x60\x98\x89\x9f\x05\xe0\xa8\ +\x97\xc6\xd8\x7e\xbc\x71\x02\x3e\x07\xb3\x2e\x5f\x57\x50\xfd\xd6\ +\x9e\x21\x3b\xa7\xe4\x6d\xea\xa6\x55\x17\xb6\x1e\x6e\x44\x81\xd6\ +\x74\x47\xea\xaf\x32\x38\x92\xc9\xdb\x4a\xcb\x8a\x02\xdf\x04\x00\ +\xd8\xf3\x8e\x95\x2d\x11\x22\x7e\x78\x0b\xa2\xdf\x03\xe8\x00\x40\ +\x24\xa0\x00\x90\xc1\x3e\x34\x7c\x0a\xa6\xd2\x2a\x0b\xc0\xb7\x87\ +\xd0\x25\xf7\x6f\x5d\xfd\xf1\x23\x6d\x8e\x34\x22\x35\x72\xf7\xa6\ +\x72\x28\x00\xb8\xae\xf5\xfb\x5e\x23\x1b\xab\x6b\x1b\xe2\x77\x9c\ +\xda\xbc\x2e\x1c\xa3\x2f\x8b\x03\xd0\x11\x07\x3a\xc3\x0a\x89\x08\ +\x02\x64\x67\x01\xb8\xe9\xdd\x5b\xf5\x8d\xc7\x23\x90\x00\x4f\xd6\ +\x25\x4f\x7c\xb8\xf1\xd4\x27\x4f\xef\xd8\xa1\x7a\x73\x50\x6f\xad\ +\x02\x68\xda\x46\xf7\xdc\x55\xa2\x20\x86\xa6\xf0\xb1\xb6\xd1\xc6\ +\x55\x57\x14\x24\x22\x06\x24\x60\x36\xc6\x00\xc8\xda\x1d\x2b\x3f\ +\x0e\x51\x00\xe0\xf4\x16\x84\xeb\x21\x00\x19\xa8\x00\x10\x21\x28\ +\x00\x44\x34\x77\x0c\x80\xa8\x0e\x3e\xf0\xd9\x37\xbf\x55\x7c\xb1\ +\x65\xdc\xbe\xd1\x7d\x96\x77\xd0\x1f\x06\x00\xe0\xf6\xaa\x69\x02\ +\x00\x5b\xaa\x8e\xaa\x7a\xec\xc5\x8f\x8a\x7c\xa8\x66\x53\x6f\x59\ +\x65\xf4\xe9\xba\xe9\x04\x09\x5a\x69\x64\xc3\x0a\xd1\x34\xbe\x2d\ +\x01\x0b\x56\x7e\x66\x63\x3d\x44\x3d\x5e\x00\xf7\xbd\x7b\xeb\x18\ +\x5d\x51\x90\x00\x13\x10\x31\x01\x2e\x7b\x75\xcb\xaf\x8b\x27\x07\ +\x8f\x30\x48\x9d\xb3\x4c\x9e\x28\x70\xb3\x78\x11\x40\x93\x28\xd0\ +\x89\x27\xd0\x11\x1b\x2a\x51\x10\x83\x34\xfe\x6e\x7e\x60\x98\x00\ +\x61\x53\x5c\x67\xaf\xa2\x00\xc0\x3e\x84\x20\x10\x06\x04\xd2\xfd\ +\x69\x07\x72\x50\x72\x80\x8f\x72\x27\x10\x02\x8d\x37\x5e\x8b\x7e\ +\xfc\x6b\xce\x39\x37\x7d\xb6\xa6\x2e\xa5\x47\x24\x16\xe8\x66\x7d\ +\x52\x4c\xfb\xc7\x10\x7c\x6c\x69\x1a\x8f\x0d\xa7\xb4\x83\xf3\xa6\ +\xff\x12\x5f\xe0\x1f\x8b\xa0\xac\x58\x4f\x3a\xc0\x9c\x23\x6b\x9b\ +\x2c\x1d\x2f\x0d\xdb\x5e\xeb\x7b\xa2\x3c\x00\xd6\xb8\x72\xac\x2e\ +\x9c\x36\x8a\xea\x4c\xb7\x47\xb4\x9d\x30\x6d\xa4\xb7\x27\xc8\x73\ +\xf5\xe1\xf7\x9b\xca\x3f\x28\xeb\xb3\x9b\x5b\x1f\xab\x6d\x81\xe8\ +\x76\xa0\xaf\x27\xcd\x36\x59\x43\x32\xc2\x73\x66\xb5\x8d\x93\xb7\ +\x2a\x5f\x2b\xef\xd8\xff\x6c\x7d\x7d\x22\xc0\xd4\x43\x56\x67\x5e\ +\xfd\xe8\xe3\xe9\x0f\x5b\x77\xba\x0c\xb3\xbe\xf1\x13\xae\x87\x00\ +\x4e\x03\x44\x24\xa0\x00\x90\xc1\x1a\xcb\xe6\xfe\xf8\x48\x79\x65\ +\x7d\x64\xd6\x1b\x19\x9b\x95\xa2\xc3\xf1\xc0\xe2\x7c\xa2\x0f\xe1\ +\xf7\x7e\xe9\x57\xf4\xf3\xda\xdd\x35\xbe\x55\xd2\x1a\x02\x90\x19\ +\x6a\x9e\xe1\xd4\x15\x07\xa2\x34\x74\x3e\x89\x88\x01\xe0\xb5\xa3\ +\x39\x44\x01\xbd\x3d\x41\x9c\xf7\xd0\x07\x6b\xcb\x32\x7b\x35\x38\ +\xca\x65\xeb\xcf\x0a\x02\x1d\x51\xa0\x63\x18\x79\xa2\x20\x1e\xb1\ +\x11\x33\xd0\x7e\xa1\xf1\xdb\xa8\x44\x81\xaa\xfd\x32\x51\x80\x4f\ +\x78\x44\x02\x0e\x01\xc8\xf0\xfa\x20\xf0\xe9\xa6\xf3\xfb\xcd\x74\ +\x0f\x3c\xf5\xed\xce\x6b\xcf\x1b\x9b\x9f\x9f\xb1\xa2\xd1\x2f\xa8\ +\xec\x1c\x50\xed\x67\x86\x00\x2a\x49\xbf\xf0\xa4\x3f\x7c\xbc\xce\ +\xd7\x0a\xf2\x02\xe6\x2c\x1c\xe3\xe7\xd0\xf4\xfb\x48\xc7\xff\x01\ +\x62\xee\x67\x61\x5e\x54\x3e\x89\x8c\x01\x60\xcb\xe6\xd5\x9f\x37\ +\x45\x91\x17\xd3\xe0\x62\x28\x20\x9e\x97\x01\xa9\xa8\x6f\x88\xc0\ +\x75\xf3\x76\xaf\x7e\xf3\xdc\xc0\xe1\xc4\x6c\xa0\xea\xc3\xaf\x8b\ +\x6d\x3b\xeb\x86\xb7\x96\xad\x37\x00\xb4\xc7\xfc\x09\x34\xb5\x3f\ +\x60\x6a\x06\x18\x72\xf2\x16\x0d\x49\xc4\x8b\xf5\xdb\xab\x66\x1e\ +\xa8\x86\x7c\x62\xe7\x04\x80\x5b\x4f\xc1\x10\xc2\xd9\x59\x0d\x9d\ +\x77\x4e\x3b\x37\x8f\xdd\xde\xf8\x95\x6a\x24\x75\x0d\x9a\xcc\xc2\ +\x11\xa6\x11\xa0\xf7\xd9\xf3\x30\xec\xe3\xa4\x26\x35\x46\x41\x1f\ +\x17\xfd\x6e\x4f\xeb\xcc\x4b\x9e\xde\x99\x1f\xb0\x38\xea\x13\x68\ +\x4a\x73\xeb\x82\x77\x36\x7e\xf8\xe3\xcf\xe5\x8e\x83\x92\x18\x14\ +\x00\x22\x6c\x01\x69\xaa\xb4\x09\x7a\xc0\x26\xc0\x79\x37\xf9\xb6\ +\x0d\xbf\x7e\xf1\x5c\xd6\xd1\x86\x59\x69\x17\x38\xaa\x10\x3e\xea\ +\xa1\x6a\x02\x81\x3b\x9e\x69\x58\xd7\xd0\x90\x80\x76\xb3\xd3\x00\ +\x45\xab\x0e\x4a\x0d\x3a\x38\x8d\xab\x48\x18\xd0\x69\x02\xe0\x73\ +\xef\x0f\x9a\x86\x92\x78\xa2\xc3\x37\x51\xc0\x69\x07\x5d\x4e\x82\ +\x9d\xc0\xf3\x97\xae\x29\xb9\x69\xfc\x59\xfb\x4f\xca\x58\x95\xe7\ +\x6a\x96\x02\x7b\x2e\x74\x97\x37\x76\xb4\x53\xc3\x20\xb2\xe7\xcb\ +\x61\x58\x99\xba\x25\x62\x16\x80\x2c\x26\xc1\xb3\x28\xe0\xd4\x9f\ +\x2a\x23\xab\x6a\x77\x4a\x16\x40\x8a\x23\xae\x80\x86\xde\xc7\x06\ +\xfe\xda\xee\xb3\xa0\x7d\x9f\x68\x8a\x22\x2f\xad\x68\x61\x23\x59\ +\xbe\x61\xc1\x77\x6b\x9b\x19\x72\x1e\x03\xd4\x74\x48\xe6\xa0\x6e\ +\xed\xdb\x33\x05\x21\x28\x00\x04\x10\xeb\xc1\x2d\x23\xd1\xee\xb5\ +\x04\x3c\xb8\xbf\x5e\x51\x50\xf5\xf1\xea\x71\x7b\x4f\x1f\xfc\x75\ +\x47\xdb\x0e\x9e\x11\xe5\x61\x98\xb0\xb1\xf8\x98\xca\x7f\xbc\xfa\ +\xd1\x5e\xbf\xeb\x46\x78\x43\x1f\xa2\x39\xfb\xb1\xe0\x30\x1a\xd3\ +\x99\xce\x66\x68\x04\xf9\xd1\x79\xf9\x1d\x04\x68\xb9\xa8\x45\xbd\ +\x77\x00\xa7\x21\x64\xeb\xad\x12\x05\xd6\x65\x6a\x3b\x27\xb4\x11\ +\x49\x9c\x07\xc0\xe2\xcc\xfb\xff\xb7\x7a\xf7\xd3\xc7\x8c\xca\xac\ +\x2d\x08\x08\x85\x88\xcc\x70\xcb\xbc\x04\x74\x1e\x6e\xde\x8e\x18\ +\xa1\x93\xb9\xf5\x12\xf8\x84\xe5\xda\x97\xd5\x3b\x2e\x51\x10\x4d\ +\xec\x42\x14\x70\x83\x0d\x01\x9c\x01\x87\xc0\x94\x45\xcf\x44\x00\ +\x10\x4f\x51\x04\x70\x7e\x97\x4c\x55\x74\xbc\xc3\x80\xdd\xcf\x9b\ +\xe9\x20\x2a\xcf\xda\xc6\x39\xc6\xf0\xf9\x3d\x1f\x07\x03\x28\x00\ +\x64\xc8\x1e\x06\x89\x9c\x12\x68\x91\xa0\xcb\x75\xe2\xff\x7d\xb2\ +\x76\xdf\xd7\x43\xf2\xd2\x61\x33\xa7\x85\x02\x83\x1b\x25\x6c\xe4\ +\xc0\x84\x1b\x7f\x5a\x95\x98\x9a\x41\xe2\x86\x00\xd8\x74\x22\x61\ +\xe0\x37\xb1\xb8\x06\xce\x43\x5a\x29\x0a\x24\x3d\x7b\x3a\x3f\x8b\ +\x08\xe7\xe1\x9e\x88\xb8\x06\x0e\x55\xb5\xa1\xc8\x9f\xde\xad\x5e\ +\x37\x77\xbc\x31\x98\x90\x68\xaf\x4b\x65\xe4\xbd\x08\x05\xe5\xb0\ +\x01\x95\x87\xe7\x19\x07\xe0\xdf\x39\x4b\x81\xc6\x59\x00\x6e\x5f\ +\xf5\xdc\xdc\xa2\x00\x80\x2f\x0c\x62\xe7\x43\x54\x07\x00\xe1\x14\ +\x45\x00\x77\xc2\x80\xf7\x3d\xc4\xf4\xe8\x45\xf9\xcb\xca\x88\x7e\ +\x37\x1c\x8a\x07\x41\x01\x20\xc2\x0a\xa8\x11\xd1\x1c\xc1\x35\x09\ +\x7a\x70\x57\xd7\x34\x44\x66\xfe\x33\x65\xc3\x23\xd7\x9b\x03\x85\ +\x89\x04\x2e\xf8\x05\x5f\xf5\x2f\x5c\xb7\xe9\xe3\xba\x84\x54\x8c\ +\x3e\xe7\xdc\x1e\x3e\x80\x56\x2f\x1f\x80\x6f\x20\x1d\x79\x4a\x04\ +\x84\x5f\xd0\x2e\x60\x9e\xe8\xa0\x1f\xd2\xa2\xb8\x05\x99\x81\x04\ +\x6a\x1f\xdb\x66\x2b\x3f\xa3\x39\x24\x00\xc0\x73\xef\xfe\xb4\xf7\ +\xda\x53\x7e\x57\x7a\x64\x70\x55\xfb\x68\xc5\xe4\x6d\x70\x33\x3c\ +\x20\xca\xc3\xf3\xb0\x01\x93\xb7\xe5\x25\x88\x79\x4b\x7c\x3a\x65\ +\xd6\x35\x6d\x52\x86\xd4\x2a\x82\xe7\xdd\x00\xe0\x8b\x19\xba\x8e\ +\x56\x3e\xd1\xdd\xbe\x8a\x02\xb6\x1c\x5d\x51\x00\x4c\x79\x32\x6f\ +\x01\x80\xff\xc2\x40\xa3\x0c\xa3\x59\xa4\xf0\x81\x05\x0a\x00\x19\ +\x2d\xee\x01\x48\x5c\x19\x73\xe6\xae\x28\xba\xf1\xe2\x53\x7a\xf4\ +\x6a\xbf\x3c\x43\x9a\x90\xea\x65\x57\x84\x07\x35\x5c\x7c\xcb\xc7\ +\x1b\x12\x56\x29\x00\xfb\x39\x77\xeb\xfe\x67\xd3\x8a\xbc\x06\x5a\ +\xf9\xf9\x04\x81\xa8\xdb\x99\xe3\x9a\xb7\xea\x28\x5b\xb7\x20\xde\ +\xa1\x83\x44\xb8\xb5\x25\x9c\x31\xf3\xb3\x55\xdb\x9f\x1c\x3c\xaa\ +\x4d\x4d\x91\xe1\xa8\x8f\xd7\x5e\xbf\x5b\xcf\x81\xdb\x61\x03\x4e\ +\x1e\x84\xf8\xb4\x22\x44\xec\x39\xa1\xd1\xbb\x77\xeb\x25\x88\x6d\ +\xe2\x88\x8b\x44\x88\x02\xeb\x38\xd9\x10\x82\xb4\x1e\x90\x38\x61\ +\x40\x6f\x13\x78\x0d\xd0\x03\xe0\x04\x05\x80\x08\x95\x07\xa0\x39\ +\xb4\x64\x82\xcb\x38\xef\xc6\xd5\xab\x56\xcc\xcb\x3e\xd6\x30\xd5\ +\x81\xb1\xa6\x11\x80\x5b\x66\xd5\xac\x8b\xa8\x82\x05\xe3\x81\x00\ +\x5f\xf4\xe8\xf6\xf0\x01\xf4\x62\x03\x24\xf9\xf9\xbd\x10\x50\x53\ +\x30\xa9\xac\x0e\x92\x7d\x7e\x79\x09\x9a\x89\xbd\xa5\xd5\xe1\x07\ +\x3e\x0e\x6c\x9c\x75\x92\x39\xa0\xb1\x2a\x71\xf4\xee\x45\xdb\xdd\ +\x7a\x0e\x94\x3d\x6d\x8e\x81\xf4\x0b\xcb\x03\xa4\xeb\x81\x00\xf0\ +\xe6\x25\xb0\xea\xcf\xba\xf5\x59\x63\xac\x5c\x3c\x8a\xa9\x97\x4d\ +\x14\x70\xea\x4c\x1f\xe7\xd5\x5b\x00\x10\xbf\x30\xe0\x1d\xc3\x7c\ +\xc7\x17\x23\x39\x41\x01\x20\x43\x75\xb9\x34\x87\x17\x20\x81\xfc\ +\xb4\xaa\xa8\x66\xf1\x77\xa7\xee\x3e\xfb\x98\x6f\x3a\xab\xd2\xae\ +\xd9\x75\x6c\xc5\x4b\xf3\x3e\xda\x97\xd0\x0a\xc5\x8c\x25\x0f\x4d\ +\xd7\x3f\x80\xbe\xfb\x9f\x97\x5f\x42\x04\x80\x68\x58\x43\x64\x18\ +\x24\xfb\xd8\xb6\xc9\xbc\x04\xec\xc3\xbe\x99\x78\xe8\xd5\x6f\x77\ +\x5d\x76\xe2\x99\x5d\x07\x05\x7e\xc9\xd6\xea\xdd\xc7\x2b\x08\xdc\ +\xe6\xad\x63\x54\x7d\xbb\x0e\xcc\xc6\x8f\xc8\xad\x2f\x13\x04\x00\ +\xfa\x5e\x02\xb6\xfe\x3a\x79\xf1\xca\x07\xf0\x26\x0a\xac\xe3\xac\ +\xcd\x32\x51\x00\x90\x38\x61\xc0\xdb\x16\xb6\x56\x45\x44\x58\x50\ +\x00\xc8\xd0\x31\xf0\x89\x74\xaf\x36\xc3\x15\x7b\xc1\x75\x9f\xae\ +\x2d\x59\x39\xac\x43\x86\xb1\x41\xd8\xf7\x09\x93\x8e\x70\xf6\x95\ +\xdf\x27\x2e\xf0\x8f\x26\x45\xd0\xb3\x07\xd0\x8f\xfc\x07\xd0\xf3\ +\x1a\xf0\xf2\xf3\xf9\xf7\x24\xf4\x32\xc0\xca\x3a\x68\x7a\x09\xb4\ +\x63\x09\x2c\xaf\x46\xf3\x0b\xd5\xdf\xdd\xff\xf5\xaa\xf5\x8f\xf4\ +\x3c\x2e\xb5\x7e\x1f\xf1\xc5\x68\xb7\x84\x20\xf0\x83\x14\x13\x1a\ +\xdf\x05\xc0\xd4\x45\x6b\x26\x03\x53\x4f\x4f\x5e\x02\xeb\x40\x45\ +\x5e\xd0\x54\x0d\xed\x98\x13\xd6\xf0\x8b\xbc\x12\xb6\xcd\x94\x30\ +\x10\x89\x02\xb6\x7d\x8e\x3a\x81\x5a\x18\x00\x38\x63\x00\x5a\xe0\ +\x3e\x68\xed\xe0\x3a\x51\x22\xac\xde\xa8\xea\x63\x21\x5a\x42\x37\ +\x8e\x8f\xef\xee\x68\x0e\xf5\xf5\x11\xb8\xe3\x31\xb2\xc1\x34\x0c\ +\x61\x1b\x5f\xfd\xb0\x4f\xc1\xd6\xed\xa5\xf5\xd2\x8c\x7c\x80\x58\ +\xbd\x65\x76\x95\x3f\xdb\x87\x73\xae\x44\xe9\x79\x2b\x1d\xb2\x69\ +\xd8\x55\x06\x7d\x6f\x14\x80\x72\xf5\x41\x59\x5b\x44\xc7\xc9\xf2\ +\x64\x97\x38\x6e\x81\xae\xcf\xd6\xc2\xd2\xfa\x27\xbe\x6e\xbb\x95\ +\x5b\x5f\xe1\x92\xc5\x2e\xd2\x7a\xdd\xae\x93\xd6\xcf\x77\x80\x10\ +\x68\xba\x06\xb8\xbf\xb1\x09\x4d\x5e\x02\x97\x6d\xb2\x56\x05\xb4\ +\xee\x1b\xba\xfe\xb1\x55\x17\xa3\xc7\x88\xca\xd0\xc9\x8f\xb7\xa2\ +\xa6\xf0\x7a\x36\xc5\x6d\xe2\xd5\x9f\x2e\x93\x5e\xdd\x90\x5d\xe1\ +\x90\x50\x79\xcb\xee\x25\x6a\x19\xe4\xd8\xa7\xae\x12\xa0\xa1\x1e\ +\x0c\x1c\x02\x70\x80\x02\x40\x86\x1b\x83\x1d\x3b\xc6\xc7\x4f\x33\ +\x5d\xae\x4f\xff\xbf\x9f\x76\xff\xe7\xcb\x63\x8b\x22\x81\x2c\x5b\ +\x9b\x4c\x23\x15\x56\x6c\x39\xa1\xf4\xca\x3f\x7c\xb6\xa9\x59\x2a\ +\x42\x80\xff\xc0\xd4\x31\xf6\xbc\xf7\x18\xf0\xd2\xb2\x0f\x2f\xf6\ +\x21\x92\xa8\x21\x00\x5d\xc3\xce\x33\xe0\xa2\xfa\x3b\x96\x31\x96\ +\xec\x6b\x01\xee\xf8\xc7\x17\xdb\x7f\x33\x86\x56\xf3\xcf\x33\x6d\ +\x08\x14\x6d\xf3\x7b\xbb\x4e\x5a\xbf\xae\x03\x91\x81\x4d\x94\x20\ +\x90\x89\x02\x9e\x20\x90\x95\x23\xcb\x2f\x6e\x51\x20\x39\x96\x2e\ +\xd3\x47\x61\xd0\x1c\x1d\xaa\x03\x0d\x1c\x02\x90\x11\xcf\x83\xd3\ +\x8f\xde\x64\x33\x5e\xb0\xe7\x5f\xf5\xc9\xba\x6e\x87\x64\x6f\xba\ +\xfa\xd2\x61\x9d\xfa\xf6\xca\xc8\xd8\xb5\xa7\xbe\xfe\xc5\x37\xd6\ +\xef\x5e\xb7\x61\x49\x62\xa6\xfc\x89\xa0\xcf\x9b\x28\x32\x5f\x78\ +\x6e\x79\xe9\x79\x69\x39\xe9\x22\x8d\xe9\x7c\x77\x97\xdb\xe2\x1a\ +\xa8\xbc\x0d\x90\xc7\x24\x18\x02\x17\x7f\x2c\x53\xce\x71\xac\x9b\ +\x34\xda\xa6\x96\x94\xf9\x67\xfd\xf9\x87\x5f\x56\xfe\xb5\xe3\xb1\ +\x29\xa1\x52\x62\xab\x93\xef\xae\x7e\xc9\xf6\x08\x7d\xde\x5d\xe4\ +\xe1\x07\x96\x31\x52\xb9\xf4\x65\x0b\x3a\x89\x5c\xec\x3a\xb3\x42\ +\x64\xc3\x06\xec\x31\xdc\x58\x02\x49\x7e\x6c\x9e\xb2\x7a\x58\xfb\ +\x45\x71\x2e\xb1\x6b\x54\x30\x84\xc0\x2b\x5b\x67\x18\x81\xca\x0a\ +\x71\x82\x02\x40\x04\x6b\x23\xe2\x31\xe8\x5e\x1f\xc0\xcd\xac\x58\ +\x77\xee\xaa\x68\x78\x60\xce\xf2\xc2\xe6\x2d\x95\xc2\x66\x2c\x01\ +\x84\x77\xae\x1b\x61\xe0\x30\xb4\xc0\x31\xa8\x54\x3a\xdf\x3d\x00\ +\x66\xd3\x83\x88\xf7\x00\xb4\x27\xb6\xef\xf3\x4b\x10\x24\x72\xe6\ +\x86\x82\xd5\x9b\xf7\xd6\xfe\x6b\xe5\xd0\x1d\xd7\x0d\x2d\xe9\x01\ +\x00\x62\xe3\xe6\x97\xe1\xa7\xcf\x53\x3c\x62\xc3\xaf\xeb\xc0\x80\ +\xe8\xcf\xac\x51\x6e\x5c\x82\xc0\x43\x19\x00\x4e\x51\x20\x8a\x85\ +\x10\x89\x0f\x55\x9e\x32\xa3\xaf\xb3\x5f\xd6\x46\x5e\xd9\xaa\xa0\ +\x43\xc4\x06\x0a\x00\x01\xb1\xf7\xb8\xb3\xf8\xd1\x9b\xd2\xed\x65\ +\x26\xe3\x35\x6b\x3b\xe7\xa2\x13\xe0\x83\x30\x10\x79\x0b\x12\x32\ +\x04\x40\x7d\xa7\x7b\xa3\xb2\x60\xbe\xd8\xc1\x54\xdd\xbc\x08\x82\ +\x88\xa9\x7f\xbd\x25\x88\x1b\x1e\xfe\x74\xcb\x39\x6f\x9e\xd2\xb9\ +\x4b\x78\x4d\x1a\x00\x68\x1a\x37\x8e\x21\xd7\xda\xee\x97\xa8\xf0\ +\x89\x80\x09\x60\xbd\x33\x43\xd6\x83\xf7\x43\x10\x00\xd8\xcf\x8f\ +\xae\x97\x43\xe6\x25\x60\x8d\xb3\x4e\x80\x21\x2f\xcf\x68\x12\x67\ +\x5d\x38\xc7\x7a\x11\x05\x00\x6a\x61\x80\x03\xde\x0e\x50\x00\xc8\ +\x50\x5d\x30\x7e\x3e\x58\xf1\xe2\xd4\xf7\x00\xf8\x21\x0c\xb4\x45\ +\x41\x9c\x58\x63\x94\xa2\x32\x22\x4c\x3d\xbc\xf4\xf4\xd9\xe3\xd8\ +\xa1\x86\x06\xfd\xea\x26\x82\x88\x09\x70\xc1\x43\x1b\x7e\xf9\xfc\ +\x9e\xcc\xa3\x8d\x70\x65\x74\x23\xd3\xce\x18\xcd\x34\x2c\xa0\x10\ +\x1b\xbe\x8d\x17\x13\xb3\xc9\x35\x0e\xd0\xb8\x2c\xb0\x85\x8e\xc8\ +\x89\x5b\x10\x58\x07\x32\xfb\x64\x46\x56\x24\x08\x00\x9c\xa2\x20\ +\xcc\x5c\x6b\xba\xbd\x75\x55\x94\xbf\x8e\x28\x00\x70\xe5\x2d\x48\ +\xc6\xfe\x94\x0a\x14\x00\x22\x08\xb8\x33\xf0\x68\xc0\xfd\xc1\x32\ +\x06\x11\x22\x39\xa7\x3e\x08\x03\x91\x28\xf0\xfb\x29\xc1\x06\xe1\ +\x49\x8d\x3a\xc8\x05\x81\x6c\xc8\x40\xe6\x59\x68\x05\xd3\x9f\xbe\ +\x5a\xb9\xa3\x6a\xc1\x86\xd3\x0b\x2f\xec\xf7\x4b\x57\x00\x70\x9e\ +\x07\xd5\xcb\x8d\x5c\x79\x01\xdc\x6e\x4f\xa0\x17\xc0\x0a\x78\x74\ +\xb3\x06\x81\x4c\xdc\xd1\xf5\xd7\x12\x04\xcc\x31\xba\xe5\x78\x1a\ +\x36\x60\xda\xe4\x28\x4f\x52\x7f\x36\x5f\x00\xb5\x28\xd0\x49\xe3\ +\x10\x05\x08\x0d\x0a\x00\x1d\xbc\x18\x77\x1f\x82\x00\x89\x91\x64\ +\xd3\x56\x6c\x1e\x00\xd9\xf9\x73\x69\xe8\xb9\xbf\x9f\x20\x6d\x22\ +\x86\x00\x58\x03\xac\x6b\xd4\x01\x24\x3d\x43\xf6\x58\xc9\x71\xad\ +\xe4\x2a\xba\xe8\xde\x25\x1b\x4e\x5d\x38\xba\x63\x6e\x78\x5d\x2a\ +\x00\x88\xcf\x03\xcf\x3b\xd0\xdc\xb1\x03\x7e\x7a\x00\xc0\x14\x1b\ +\x71\xb7\x82\x40\x14\xe8\x09\x74\x7a\x48\xac\x20\x60\xcb\x4a\x94\ +\x97\x80\x97\x37\x4f\xa0\xb1\xa2\x59\x24\x0a\x5a\xc9\x7d\xd0\x9a\ +\x40\x01\x20\x83\x67\x38\x9a\x33\x18\x30\x19\x2f\x58\x62\x36\xde\ +\xbc\xa2\x73\x15\x01\xfe\x6f\xe0\xd6\xd0\xb3\x69\xad\x7c\x89\xcf\ +\x67\xdd\x31\xac\x01\x2e\x8c\xba\x95\x01\xb5\xcf\x4b\x1c\x40\x2b\ +\xf1\x4e\x45\x4c\x80\x4b\x1f\x2b\x58\xb5\xe8\xb6\x36\x47\x18\x91\ +\x5a\xb9\xdb\x3b\x76\x90\x6a\xa8\x00\x12\xe3\x05\xf0\x0b\x65\x10\ +\x20\x53\x9f\x80\xc8\x70\x0a\xae\x03\xe9\x0c\x06\xc1\xb9\xf3\x2a\ +\x08\x74\xe3\x08\xac\xfd\x16\x6e\xbd\x04\x7e\x88\x02\x5e\xdd\x11\ +\x07\x28\x00\x44\x10\xd0\x33\xf6\x7e\x3c\x5c\x13\xb1\x00\xcd\x81\ +\x88\x75\x0f\x8b\x5c\xd6\x22\x61\xe0\x87\x28\xb0\x0a\x4f\x84\xe8\ +\x62\xa7\x36\x8a\x82\x02\x01\xe4\x82\xc0\xd5\xb1\xad\xcf\x03\x00\ +\x00\xf0\xc1\x57\x9b\xcb\x97\x5c\x78\xc6\x9e\xf1\xdd\x7f\xe8\xa4\ +\x3d\x06\x1e\x4b\xd3\x5c\x62\xc0\x47\x2c\x0f\x80\x76\xb9\x1a\xc3\ +\x05\x5e\xf2\x62\x8d\xa8\x27\x41\xc0\xec\x93\x09\x19\x95\xd1\x96\ +\x79\x09\x1c\xed\x00\xe7\xb5\xae\xca\x9f\x97\x47\x2b\xba\x0f\x5a\ +\x0b\x28\x00\x74\x70\x6b\xe4\xd1\xa0\x7b\x47\xd6\xf3\x17\x09\x03\ +\xe1\x83\xdb\x65\xef\x3f\x11\x38\x3c\x00\x2e\x5c\xfe\x00\x2e\x62\ +\x00\xc0\x7e\x7e\xa8\xe3\x5a\x62\x29\x60\x19\x13\x6e\xfb\xdf\x9a\ +\x7d\xff\x19\x99\x9b\x5d\xbf\xa9\xe9\xf9\xa3\xe3\xb2\x6f\xce\xb8\ +\x01\xbf\x08\xd0\x75\xd6\xec\xb9\x8b\xea\x24\xf4\x0e\x00\xc4\x15\ +\x3f\x00\x90\x78\x41\x10\x2b\x94\x26\x1e\x2f\x81\x46\xfe\xac\x28\ +\x40\x01\xe0\x00\x05\x80\x0c\x91\x31\xf2\x62\x2c\xbc\x78\x0a\x92\ +\xed\x82\x25\x26\x71\xd5\x93\x8f\xf5\x6e\x38\xc7\xf0\xbc\x05\x42\ +\x43\x4f\xb8\xff\xfa\x01\x31\xd8\x36\xb9\x70\xf9\x4b\x8c\xba\xf3\ +\x58\xc9\xc3\xb1\x95\x5d\x47\xf5\xa1\x08\x5c\xff\x8f\xb2\x35\xaf\ +\xdd\x90\x3a\x8c\x84\x43\x4d\x3b\x5c\xf7\xd2\x15\xde\x81\x78\xe2\ +\x06\xfc\x3a\x67\x06\x00\x98\x3e\x79\x25\x84\xb1\x03\xa0\x37\xbb\ +\x40\x37\x7e\x00\x40\xec\x61\x91\xd5\x4f\x55\x9e\x6c\x88\xc2\xda\ +\x4f\x13\x60\xf6\x3b\xbc\x04\x6c\x1e\x6c\xfe\xbc\x32\x10\x9a\x56\ +\x32\x3a\xd8\x4a\x51\x2d\xfb\x1b\x4b\xa7\xf1\x71\x5b\x46\x22\x22\ +\xd2\x5b\x3b\x56\x6f\x99\x17\x39\xcf\xfb\x0d\x64\xe7\x99\x5d\xee\ +\x57\x96\x3e\x91\xe7\x9c\x50\x1f\xb6\x2c\xb6\x4e\x8e\xb6\x32\xfb\ +\xa5\xed\x51\x9c\x9b\x56\xc6\xbf\xdf\x5f\x5d\xfc\x75\xd1\xb0\x62\ +\x61\x7b\x79\x4b\x05\xcb\xb6\xb3\xbf\xa1\xf5\xf1\x9a\xbf\x5f\xe8\ +\xd6\x5f\x67\xbb\x74\xe9\x67\x68\xfa\x80\xd9\xf4\xd1\x6d\xb7\x63\ +\x89\x6c\xea\x1c\x3a\x96\xe5\x66\xce\xbb\xad\x3c\x10\x97\xa7\x2a\ +\x93\xf7\xdb\xd1\xfb\x6d\xcb\x19\x73\xda\xca\x2b\x9f\x2d\x03\xb1\ +\x81\x1e\x00\x5d\x70\x18\xa0\x79\x30\xcc\xa6\x5e\x9b\x5f\x81\x80\ +\xa2\xe1\x03\x59\xb0\xa1\x5f\xd0\xb1\x24\xbc\x15\x09\x1d\x89\xa3\ +\xb0\x6d\xf4\x3a\xfe\xcf\x64\xdb\x9a\x38\x73\xda\x27\xbf\x16\xbd\ +\x35\xec\x84\xf4\xf0\x36\xc3\xd6\x5e\xba\xad\xf4\x6f\xc7\xfe\x5e\ +\x89\x8c\x1b\x68\x8d\x1e\x00\xdd\x63\x64\x3d\x67\xd9\x90\x87\x74\ +\x38\x44\x74\x1e\x99\x7d\xd2\x99\x04\x8a\x32\x55\xe5\x5a\xf9\xd3\ +\xa8\xbc\x04\xbc\x32\x90\x18\xad\xb0\x6f\xd0\x4a\x10\xf5\x46\x69\ +\x64\xbd\x77\x11\x3a\xde\x82\x98\x7a\xf6\xa9\x2d\x07\x1a\xaa\x73\ +\xaa\xd3\x93\xd7\xe9\x0d\xf3\xbc\x04\x7e\x43\xc7\x00\xb0\xf5\x92\ +\x79\x07\x1c\x6d\x94\xb4\x89\x6d\x8b\xe3\xbc\x24\xa6\x69\xf1\x52\ +\x5e\x59\x1f\xb9\xed\xe5\xf0\x3a\x93\x10\x71\x5b\x75\x3c\x20\x00\ +\xe2\xed\xbc\xf3\xcd\x6e\x17\xe5\xe3\x07\x7e\x7a\x00\xbc\x1e\x23\ +\xf4\x0e\x80\x5e\x7e\x8e\x3c\x01\xc4\x5e\x16\xf6\xfa\x53\xf4\xd0\ +\x65\xed\xe3\xed\x67\xcb\x66\xf7\x4b\xbd\x04\x08\x4b\x2b\x7d\x34\ +\xb4\x32\xdc\x18\x79\x95\x51\x77\x53\x56\xb2\x09\x00\x56\x74\xb1\ +\xe7\xcb\x8d\x28\x10\x1d\xc3\x4b\x4b\x3d\xfc\x89\xdf\x9e\x9b\x98\ +\xc1\x61\xca\x72\x23\x06\x1c\x75\x76\x29\x06\x5a\xf1\x75\xf4\x8f\ +\x37\x7f\xda\xb3\xb2\xec\xc8\x32\x2d\xd7\x7e\x3c\x86\xd4\xed\x10\ +\x81\x5f\xc4\x6b\xf4\x75\x8d\xb4\xee\x31\xba\xee\x73\x5d\xe1\xa1\ +\x14\x04\x4c\x5d\x12\x2d\x08\x80\x93\xc6\x6a\x6f\x2b\xbe\x0f\x5a\ +\x0a\x1c\x02\x90\xa1\xea\xc9\xfb\x95\x17\xd2\x04\xeb\x2e\x17\x9d\ +\xe7\xd8\x02\x2e\xcc\x79\x15\x1d\xe7\x36\xbd\x9f\x48\x5d\xf9\x9c\ +\xfd\xac\x37\x5b\x76\xbc\x74\x9a\x20\xf8\xdf\xa3\x4d\x00\xe3\x6e\ +\x5a\xf6\xcb\x8e\x79\x7d\x47\xa5\x85\x0b\x1b\x6b\xaf\xe3\xda\x8f\ +\x67\xea\x9f\xd6\x10\x81\x8f\xd8\x7e\x13\x97\x75\xa5\xb7\xeb\x0e\ +\x81\xe8\x1e\xa3\x3b\x54\x20\x2b\x8b\x57\x1e\xbd\x5b\x3a\x5c\x00\ +\xee\x87\x04\x5c\x0f\x19\x00\xc4\x7e\x6f\x14\x00\x0e\xd0\x03\xa0\ +\x83\x6e\x4f\xde\xcb\x90\x80\x28\xff\x56\xde\x73\x4b\x38\xaa\x73\ +\xa8\xdb\xe3\x57\x0d\x03\xb8\xf9\x9d\xbc\x40\x7b\x35\xd8\xf2\xd8\ +\x7a\xf1\xf6\xfb\xe9\x19\x68\xa5\xec\x2d\xa9\x0e\x3f\xb0\x30\x63\ +\x93\x67\xd7\xbe\x2a\xbd\x17\xcf\x80\x5f\xf7\x9e\xce\xb0\x85\x17\ +\xcf\x80\x6e\xfb\x74\x7b\xf2\x00\x12\xef\x80\xa4\xac\xb8\xbc\x03\ +\x9c\x1e\xbe\xef\x1e\x02\xaa\x0e\x88\x83\x56\xfe\x68\x68\x41\xd8\ +\x07\x37\x8b\x9f\xe3\xfe\x48\x23\x89\x18\x02\x60\x8f\x53\xa5\x4f\ +\xc4\x2c\x00\x5d\x63\xef\x97\x18\xe0\xed\x6b\xe5\xcc\x7a\xee\xdb\ +\xc2\xf5\xd5\x47\x55\xc6\xe5\xda\xd7\x31\x96\x00\xe2\xed\x89\x38\ +\x5f\x3a\x6d\x10\xd5\xc9\x6f\x31\x00\x9a\xf9\xb9\x19\x2a\x90\xe5\ +\xc9\xab\xa7\xdb\x59\x31\x32\x21\xa2\x23\x08\xd8\x73\x8b\xd8\x40\ +\xf3\xa3\x43\xbc\xe3\xff\x5e\xf2\x4f\xc6\x18\x00\x00\xe7\xb9\x55\ +\x9d\x4f\x3f\x62\x02\xe8\xf4\x89\x10\x00\x6c\xb9\x6c\xfd\xbc\xec\ +\x17\x3d\x48\x79\xc7\x1e\x20\x0f\xbf\x33\x6e\x59\xfe\x4b\x28\xd8\ +\xc9\x54\x1a\x6a\x37\x5e\x01\x5d\x23\xca\x88\x0a\x5f\xdf\x06\xe8\ +\x87\x61\x07\x0f\xc7\x78\x15\x03\xec\x3e\x9b\x18\x00\xd0\xee\xa1\ +\xcb\xea\xe2\x87\x77\x80\xcd\x9f\xad\xb7\xe3\x3c\x69\xfe\x66\x49\ +\x04\xc6\x00\x08\x28\x2d\xab\x0f\xd7\x36\xf4\x77\x2c\x2b\x61\xc3\ +\x8d\x7c\xf2\x70\xf1\xed\xde\x1d\xaa\x73\x7f\xd4\x81\x4b\x71\x71\ +\x5d\xa8\xb6\x7e\x00\xf7\x9c\x9b\xbc\xb7\xf7\xc5\x76\x4a\x32\x15\ +\x1d\x27\xf8\x65\xf7\xec\x0b\xd5\x4b\x72\x73\xcd\xcf\xab\xeb\xca\ +\xbb\x77\x39\x36\x1d\x00\xb8\xf5\x34\xd9\x7a\xb0\x69\x1c\x2f\x3e\ +\x91\xa5\x65\xbe\x47\xdb\xbe\x7e\x7b\x6a\xa5\x46\x55\x5b\x9c\xad\ +\x05\xa5\xf5\xb3\xff\x7b\xd8\xe6\x2b\xc7\x75\xeb\x16\xdb\xc8\x9c\ +\x1f\x7a\x46\x9d\xad\xbd\xb6\x05\x6e\x44\x69\xf4\xd3\x17\xee\x5b\ +\x4d\xad\x50\xe4\x9d\xca\xb4\xee\x0d\x06\x01\x42\x97\x21\x6c\x83\ +\xec\x5a\x10\xd6\x97\xb9\x3e\x6c\xf9\x11\xfe\x76\xf6\x38\xc7\x75\ +\x23\xc8\xaf\xf1\xbb\xc9\x4d\x07\x00\xa6\xac\x2e\x6c\x99\x8e\xb6\ +\xaa\x8e\x05\x27\x2e\xf3\x28\xa9\x2a\x6e\xe1\x17\x63\xb7\x3e\x08\ +\x00\x7c\xd6\xd2\x95\x40\x10\x04\x41\x10\xa4\x59\x58\x64\x9a\xe6\ +\x63\x00\x38\x04\x80\x20\x08\x82\x20\x49\x09\x0a\x00\x04\x41\x10\ +\x04\x49\x42\x50\x00\x20\x08\x82\x20\x48\x12\x82\x02\x00\x41\x10\ +\x04\x41\x92\x10\x14\x00\x08\x82\x20\x08\x92\x84\xa0\x00\x40\x10\ +\x04\x41\x90\x24\x04\x05\x00\x82\x20\x08\x82\x24\x21\x28\x00\x10\ +\x04\x41\x10\x24\x09\x41\x01\x80\x20\x08\x82\x20\x49\x08\x0a\x00\ +\x04\x41\x10\x04\x49\x42\x50\x00\x20\x08\x82\x20\x48\x12\x82\x02\ +\x00\x41\x10\x04\x41\x92\x10\x14\x00\x08\x82\x20\x08\x92\x84\xa0\ +\x00\x40\x10\x04\x41\x90\x24\x04\x05\x00\x82\x20\x08\x82\x24\x21\ +\x29\x2d\x5d\x81\x03\x81\xd4\xd4\x54\xd2\xb6\x6d\x5b\x63\xff\xfe\ +\xfd\x61\x37\xc7\x75\xed\xda\x35\xb5\xb0\xb0\x30\x64\x7d\xef\xdb\ +\xb7\x6f\x5a\xdb\xb6\x6d\x03\xb2\x63\x4a\x4a\x4a\x1a\xb6\x6e\xdd\ +\x5a\x2f\xca\xef\xe8\xa3\x8f\xce\x0c\x06\x83\x64\xf7\xee\xdd\xa1\ +\xcf\x3f\xff\xbc\xd2\x4d\x7d\xe2\xc5\x30\x0c\x18\x3e\x7c\x78\x86\ +\x2a\xdd\xe6\xcd\x9b\xeb\xca\xca\xca\xc2\x00\x7a\x6d\x2e\x2d\x2d\ +\x6d\xd8\xb2\x65\x4b\xb3\xb7\xf9\xd8\x63\x8f\xcd\x5c\xb7\x6e\x5d\ +\xad\x55\x57\x1e\xdd\xbb\x77\x4f\x2d\x28\x28\x08\x99\xa6\x29\xcd\ +\x8b\x10\x02\x79\x79\x79\x29\xfb\xf6\xed\x6b\x10\xa5\xe9\xd7\xaf\ +\x5f\x5a\x38\x1c\x36\x45\x6d\x05\x00\xe8\xd0\xa1\x43\xca\xfe\xfd\ +\xfb\x1b\x44\xe5\x75\xe9\xd2\x25\xa5\xa8\xa8\x48\x58\x86\x45\x30\ +\x18\x24\xd9\xd9\xd9\x8e\x6b\xb6\x5f\xbf\x7e\x69\x87\x1f\x7e\x78\ +\x06\x00\xc0\xa6\x4d\x9b\x6a\x57\xae\x5c\x59\xc3\x3b\x3e\x2f\x2f\ +\x2f\x50\x52\x52\x12\x8e\x44\x22\x8e\x7d\x86\x61\xc0\x89\x27\x9e\ +\x98\xd5\xb9\x73\xe7\x60\x6d\x6d\x6d\xe4\xab\xaf\xbe\xaa\x70\x7b\ +\x6f\x64\x66\x66\x1a\xa7\x9c\x72\x4a\xdb\xb4\xb4\x34\xf2\xed\xb7\ +\xdf\x56\xee\xd8\xb1\x23\xa4\x3e\xaa\xa9\x6e\xa5\xa5\xa5\xe1\x70\ +\x58\xaf\xc8\xce\x9d\x3b\xa7\xec\xde\xbd\xbb\x81\xfd\x9f\x25\x37\ +\x37\x37\x70\xc2\x09\x27\x64\xb7\x69\xd3\xc6\xd8\xb3\x67\x4f\x68\ +\xd9\xb2\x65\x15\xbc\xdf\x21\x27\x27\x27\x50\x55\x55\x15\xa9\xaf\ +\xaf\x97\x5e\x14\xa9\xa9\xa9\x24\x2b\x2b\xcb\x28\x29\x29\x11\x56\ +\xf4\xf4\xd3\x4f\x6f\xbb\x64\xc9\x92\x72\xdd\xfa\xbb\x29\x2f\x3f\ +\x3f\x3f\xb5\x53\xa7\x4e\xa9\xaa\xe3\x2a\x2a\x2a\xc2\x1b\x37\x6e\ +\xac\x13\xed\x3f\xed\xb4\xd3\xda\x7e\xf4\xd1\x47\x8e\x3a\x66\x66\ +\x66\x1a\x03\x06\x0c\x68\xa3\xca\xbf\xb6\xb6\x36\xb2\x66\xcd\x9a\ +\x5a\x00\x75\x5b\x86\x0c\x19\xd2\x66\xc0\x80\x01\xe9\xa6\x69\xc2\ +\xea\xd5\xab\xab\xd7\xaf\x5f\x2f\xac\x57\xcf\x9e\x3d\x83\xdb\xb6\ +\x6d\x13\xde\x4b\x16\xba\xe7\x2f\x19\x41\x0f\x80\x06\xbd\x7a\xf5\ +\x0a\xce\x9d\x3b\xb7\xaf\x9b\x63\xfa\xf6\xed\x9b\xf6\xd4\x53\x4f\ +\xf5\xa1\xb7\x4d\x9f\x3e\xbd\xdb\xe0\xc1\x83\xd3\x65\x9f\x1e\x3d\ +\x7a\x04\xd9\xbc\x06\x0e\x1c\xd8\xe6\x6f\x7f\xfb\x5b\xcf\x2b\xaf\ +\xbc\xb2\x63\x59\x59\x59\x78\xc7\x8e\x1d\xf5\x1d\x3b\x76\x4c\x9d\ +\x3d\x7b\x76\xf7\x2b\xaf\xbc\xb2\x43\xbc\xed\xd3\xa5\x63\xc7\x8e\ +\x29\xd3\xa6\x4d\x53\xb6\xa1\x7d\xfb\xf6\x31\x83\xaf\xd9\xe6\x34\ +\x51\x9b\xaf\xba\xea\x2a\x5b\x9b\x1f\x7e\xf8\xe1\x1e\x7e\xb5\xf9\ +\x8a\x2b\xae\xe8\x7c\xd1\x45\x17\xe5\xc9\xd2\xdc\x7d\xf7\xdd\xdd\ +\x6f\xbc\xf1\xc6\x4e\xaa\xbc\xda\xb6\x6d\x1b\xf8\xbf\xff\xfb\x3f\ +\x69\xba\x4b\x2f\xbd\xb4\xe3\xd4\xa9\x53\x3b\xca\xd2\xdc\x78\xe3\ +\x8d\x9d\x33\x32\x32\x84\xf7\xe5\xdd\x77\xdf\x9d\xaf\xaa\x8b\x95\ +\x4f\xe7\xce\x9d\x63\x0f\xff\xf3\xce\x3b\x2f\xe7\xef\x7f\xff\x7b\ +\xef\x33\xcf\x3c\xb3\x7d\x51\x51\x51\x68\xd7\xae\x5d\xf5\x23\x46\ +\x8c\xc8\x9c\x3d\x7b\x76\xf7\x8b\x2e\xba\x28\x97\x3d\xfe\xea\xab\ +\xaf\xee\x94\x9b\x9b\x6b\xeb\x20\x04\x02\x01\xf8\xeb\x5f\xff\xda\ +\xfd\x91\x47\x1e\xe9\xd1\xb1\x63\xc7\xd4\xed\xdb\xb7\xd7\x95\x97\ +\x97\x87\xaf\xbd\xf6\xda\xce\x73\xe6\xcc\xe9\xd1\xbb\x77\x6f\xc7\ +\xb5\xcb\xe3\xd6\x5b\x6f\xed\x32\x7d\xfa\xf4\x43\xca\xcb\xcb\xc3\ +\x05\x05\x05\xf5\x17\x5c\x70\x41\xde\x63\x8f\x3d\xd6\x33\x18\x0c\ +\x12\x9d\xe3\x47\x8c\x18\x91\x31\x61\xc2\x84\x1c\x9d\xb4\x27\x9f\ +\x7c\x72\xf6\x49\x27\x9d\x94\x6d\x7d\xff\xe3\x1f\xff\x78\x08\x9b\ +\x66\xe0\xc0\x81\x6d\xe6\xce\x9d\xdb\xe7\xf7\xbf\xff\x7d\x97\xca\ +\xca\xca\xf0\xf6\xed\xdb\xeb\xda\xb5\x6b\x17\xf8\xeb\x5f\xff\xda\ +\xfd\xce\x3b\xef\xec\xca\xa6\x3f\xff\xfc\xf3\x73\xfb\xf5\xeb\xe7\ +\xb8\x66\x59\x7a\xf7\xee\x1d\x9c\x3c\x79\xb2\xf0\xfa\x4a\x4d\x4d\ +\x25\xb3\x67\xcf\x3e\x34\x3f\x3f\x5f\x6a\xa4\x79\x75\xe6\xd1\xb3\ +\x67\xcf\x20\x7d\x3d\x77\xed\xda\x35\x55\x74\xcf\x1d\x76\xd8\x61\ +\xe9\x83\x06\x0d\x4a\x3f\xe6\x98\x63\xb2\xa6\x4d\x9b\xe6\x68\x23\ +\xcd\xcc\x99\x33\x7b\x1d\x7e\xf8\xe1\xe9\xec\xf6\xac\xac\x2c\x83\ +\xcd\xf7\xce\x3b\xef\xec\x7e\xf8\xe1\x87\x67\xd0\xdb\xfa\xf6\xed\ +\x1b\x13\x09\xa2\xb6\x9c\x7a\xea\xa9\xd9\x4f\x3e\xf9\x64\xaf\xe3\ +\x8e\x3b\x2e\x7b\xf7\xee\xdd\xa1\xbd\x7b\xf7\x86\x4e\x3d\xf5\xd4\ +\x76\xd1\x6d\x99\xbc\x63\x3e\xf9\xe4\x93\xc3\x73\x72\x72\xa4\x9d\ +\x0b\x59\x99\x08\x7a\x00\xb4\xe9\xde\xbd\x7b\x9b\xcc\xcc\x4c\xa3\ +\xaa\xaa\xca\xd9\x25\xe2\x70\xc9\x25\x97\x74\xd8\xb0\x61\x43\x35\ +\xbd\xad\xb8\xb8\x38\xf4\xea\xab\xaf\xee\x77\x53\x6e\x76\x76\xb6\ +\xf1\xa7\x3f\xfd\xa9\xdb\x8d\x37\xde\xb8\x25\x14\x0a\xd1\x3d\x8e\ +\xaa\x85\x0b\x17\x96\x5c\x79\xe5\x95\x1d\x2e\xb8\xe0\x82\x9c\x05\ +\x0b\x16\x94\xb8\xc9\xd7\x2b\x3f\xff\xfc\x73\xa5\x9b\x36\x78\x69\ +\x73\x66\x66\xa6\x71\xdb\x6d\xb7\x75\xbb\xe1\x86\x1b\xb8\x6d\xbe\ +\xfc\xf2\xcb\x3b\x4c\x9a\x34\x29\xe7\xad\xb7\xde\xf2\xdc\xe6\x8c\ +\x8c\x0c\xa3\xa0\xa0\xa0\xb6\x7b\xf7\xee\xd2\x07\xf9\xbe\x7d\xfb\ +\xea\x7b\xf4\xe8\xd1\xa6\x5d\xbb\x76\x01\x99\xa7\x40\x87\x40\x20\ +\x00\x84\x90\xb8\x44\xf7\x8a\x15\x2b\x2a\x4e\x3c\xf1\xc4\xac\x2f\ +\xbe\xf8\x42\xea\x09\xe9\xdd\xbb\x77\x1b\xab\xd7\x75\xe3\x8d\x37\ +\x76\x22\x84\xc0\xef\x7f\xff\xfb\xad\x74\x9a\xaf\xbe\xfa\xaa\x0a\ +\x00\xe0\xa6\x9b\x6e\xea\x74\xc9\x25\x97\xe4\xbd\xfe\xfa\xeb\xc2\ +\xdf\x89\x10\x02\x2f\xbc\xf0\x42\xdf\xc7\x1e\x7b\xac\xe0\xd7\x5f\ +\x7f\xad\xa5\xf7\x2d\x5d\xba\xb4\x22\x35\x35\x95\x3c\xf7\xdc\x73\ +\x7d\xee\xbe\xfb\xee\x6d\x32\x0f\xc5\x84\x09\x13\xda\x17\x14\x14\ +\xd4\xcf\x9f\x3f\xbf\xd8\xda\xf6\xcd\x37\xdf\x54\xe5\xe7\xe7\xa7\ +\xce\x98\x31\xa3\xdb\x8c\x19\x33\x0a\xe4\x67\x00\xe0\x93\x4f\x3e\ +\xa9\x78\xe4\x91\x47\x7a\xbc\xfd\xf6\xdb\xca\xdf\xff\xf4\xd3\x4f\ +\x6f\x7f\xff\xfd\xf7\x0b\xf3\x3c\xee\xb8\xe3\x32\x2f\xba\xe8\xa2\ +\x8e\xb7\xdc\x72\xcb\x56\xe6\x5a\x83\x77\xde\x79\xa7\x74\xe4\xc8\ +\x91\x99\x8f\x3c\xf2\x48\x8f\xdb\x6f\xbf\x7d\xbb\xaa\x2c\xb7\x9c\ +\x75\xd6\x59\xed\x1e\x7c\xf0\xc1\xdf\x2e\xbc\xf0\xc2\xbc\xc7\x1f\ +\x7f\xbc\xc8\xef\xfc\xbf\xfb\xee\xbb\xea\xef\xbe\xfb\xae\x5a\x96\ +\x66\xd6\xac\x59\xdd\x67\xcc\x98\x21\x6c\xdb\x09\x27\x9c\x90\xf5\ +\xcc\x33\xcf\x14\x9c\x7d\xf6\xd9\x39\x3f\xff\xfc\xb3\xcd\x5b\xb4\ +\x7b\xf7\xee\x06\xf6\xde\xee\xdb\xb7\x6f\xfa\x9b\x6f\xbe\xb9\xbf\ +\xa6\xa6\x46\xee\x32\xa3\xe8\xd7\xaf\x5f\xda\x98\x31\x63\xda\xff\ +\xe1\x0f\x7f\xf8\x8d\xde\x6e\x5d\xe3\x8f\x3f\xfe\x78\xcf\xad\x5b\ +\xb7\xd6\xb1\xd7\xd5\x07\x1f\x7c\xb0\xf7\xf6\xdb\x6f\xef\x7a\xd7\ +\x5d\x77\xed\xd0\x2d\x0b\xb1\x83\x1e\x00\x4d\x96\x2c\x59\xb2\x7f\ +\xea\xd4\xa9\xda\x3d\xcf\xac\xac\xac\x40\x6d\x6d\xad\x96\x58\x90\ +\x31\x6b\xd6\xac\x1e\x33\x67\xce\xdc\xce\x3e\x9c\x2c\x5e\x7c\xf1\ +\xc5\x7d\xa7\x9e\x7a\xaa\x56\x6f\xe8\x40\xe1\xa1\x87\x1e\x92\xb6\ +\xf9\xe5\x97\x5f\xde\x37\x76\xec\xd8\xb8\xda\x7c\xc1\x05\x17\xe4\ +\xbc\xfb\xee\xbb\x25\x75\x75\x75\x91\xf4\xf4\x74\x61\xcf\x93\x10\ +\x02\x73\xe6\xcc\x29\xbc\xfd\xf6\xdb\xa5\xbd\x24\x15\xc3\x87\x0f\ +\x4f\x5f\xb7\x6e\x5d\xcd\xa6\x4d\x9b\x6a\x06\x0f\x1e\xac\x74\x9b\ +\x8a\x78\xed\xb5\xd7\xf6\x4f\x9c\x38\x51\xea\xb5\x98\x38\x71\x62\ +\xfb\x25\x4b\x96\x94\x00\x34\x0a\xc8\x81\x03\x07\x66\x3c\xf3\xcc\ +\x33\x7b\x44\xe9\x9f\x79\xe6\x99\x3d\x27\x9e\x78\x62\x3b\x59\x9e\ +\x57\x5d\x75\x55\xc7\xd7\x5f\x7f\x7d\x37\x6b\xfc\x2d\x42\xa1\x90\ +\x39\x7d\xfa\xf4\xdf\xfe\xf4\xa7\x3f\x49\xcf\xd3\xa8\x51\xa3\xda\ +\xd2\xc6\xdf\xa2\xa0\xa0\x20\x94\x96\x96\xa6\xf5\x3c\xb2\xdc\xf2\ +\x84\xa8\x1d\x06\xc1\x60\x90\xc8\x5c\xf5\x57\x5c\x71\x45\xe7\x3f\ +\xfe\xf1\x8f\xbf\x89\xae\xb5\x6f\xbf\xfd\xb6\xaa\xb8\xb8\xb8\x61\ +\xe0\xc0\x81\x9e\x7f\x33\x11\x47\x1d\x75\x54\xf6\xdb\x6f\xbf\x5d\ +\xd2\xa1\x43\x07\xa5\x9b\x3e\x11\x0c\x1f\x3e\x3c\xbd\xa8\xa8\xa8\ +\x5e\x36\x7c\x73\xda\x69\xa7\xb5\x5f\xb0\x60\x41\x49\x46\x46\x86\ +\xb2\xa7\xed\x95\x3b\xee\xb8\x23\x5f\x26\xd2\x66\xce\x9c\xb9\x63\ +\xe6\xcc\x99\x3d\xd8\xed\x65\x65\x65\x0d\x7b\xf6\xec\x09\xf1\xbc\ +\x13\x88\x1e\x28\x00\x34\xd9\xb6\x6d\x5b\xed\xa1\x87\x1e\xaa\xf5\ +\x10\x38\xe5\x94\x53\xb2\xbf\xf9\xe6\x9b\x8a\x94\x94\x14\x2d\x97\ +\xa6\x88\x8c\x8c\x0c\xa3\xb4\xb4\xb4\x81\x8e\x23\xe0\xb1\x61\xc3\ +\x86\x6a\xb7\x46\xa5\x6f\xdf\xbe\x69\x57\x5d\x75\x55\xb3\x0d\x1f\ +\xe8\x92\x9e\x9e\x4e\xca\xca\xca\x1a\x76\xee\xdc\x29\x6d\xf3\xfa\ +\xf5\xeb\xab\x87\x0e\x1d\xea\xf9\xc6\x1f\x38\x70\x60\xc6\xaa\x55\ +\xab\x6a\x16\x2f\x5e\x5c\x72\xde\x79\xe7\x39\x5c\xe0\x16\x86\x61\ +\x90\x7d\xfb\xf6\x35\xec\xdf\xbf\x3f\x14\x4f\x79\x67\x9f\x7d\x76\ +\xee\xdb\x6f\xbf\x5d\xb2\x60\xc1\x82\xe2\x89\x13\x27\x0a\xcb\x53\ +\x11\x89\x44\x60\xf7\xee\xdd\xf5\xdd\xbb\x77\x17\x1a\x8d\xd1\xa3\ +\x47\xb7\x5b\xbc\x78\x71\x19\x00\xc0\x84\x09\x13\x72\x16\x2e\x5c\ +\xb8\x4f\x95\xef\xaf\xbf\xfe\x5a\x75\xe2\x89\x27\x66\x89\xf6\x0f\ +\x1f\x3e\x3c\xf3\xe3\x8f\x3f\xae\x90\xe5\xb1\x7f\xff\xfe\x70\x87\ +\x0e\x1d\xa4\xc3\x00\x91\x48\x44\x68\x8c\xc3\xe1\xb0\x76\xaf\xf1\ +\xdb\x6f\xbf\xad\x18\x33\x66\x4c\xb6\x2c\xcd\xf0\xe1\xc3\xd3\xd7\ +\xae\x5d\xcb\x8d\x71\x00\x68\x1c\xf3\xdf\xb7\x6f\x5f\x88\x17\xe7\ +\x40\xb3\x60\xc1\x82\xfd\x97\x5c\x72\x89\xaf\xf7\x0a\x21\x24\x26\ +\x60\xca\xca\xca\x1a\x72\x73\x73\x13\x66\x60\x45\x5c\x7d\xf5\xd5\ +\x9d\x9f\x7e\xfa\xe9\xdd\xb2\x34\xa9\xa9\xa9\x24\x14\x0a\x99\x3b\ +\x77\xee\xac\xd3\x1d\xe2\x71\xc3\x80\x01\x03\xd2\x56\xac\x58\x51\ +\x21\x12\x60\x00\x00\x15\x15\x15\x91\xdf\x7e\xfb\xad\x86\x3d\x47\ +\x86\x61\x90\x27\x9f\x7c\xb2\xe8\x9a\x6b\xae\xe9\xe2\x77\xbd\x92\ +\x05\x14\x00\x9a\x04\x83\x41\xa3\xa0\xa0\xa0\x5e\xe7\x26\x18\x37\ +\x6e\x5c\xce\x7f\xff\xfb\xdf\x92\x78\x05\xc0\x29\xa7\x9c\x92\xfd\ +\xe5\x97\x5f\x4a\x03\x84\x00\x00\x96\x2e\x5d\x5a\x3e\x7a\xf4\xe8\ +\xb6\x6e\xf2\x36\x0c\x03\x82\xc1\x60\xab\xfb\xfd\xc7\x8e\x1d\xdb\ +\xf6\xeb\xaf\xbf\xd6\x69\x73\x99\xdb\x36\x5b\x04\x02\x81\x98\xb1\ +\xf9\xe1\x87\x1f\xaa\x87\x0c\x19\x22\x0c\x6c\xb4\x1e\xd2\x7f\xfb\ +\xdb\xdf\x8a\xae\xbb\xee\xba\xce\x5e\xca\x03\x68\x14\x73\xd5\xd5\ +\xd5\x91\x8a\x8a\x8a\x48\x66\x66\x66\x5c\x0f\xfb\x67\x9f\x7d\x76\ +\x8f\xa8\x2e\x43\x87\x0e\x4d\xdf\xb0\x61\x43\xcc\xe8\xb5\x6d\xdb\ +\x36\x50\x51\x51\xa1\xf4\x44\x2d\x59\xb2\xa4\xf4\x88\x23\x8e\xe0\ +\x8e\xb5\xba\xa1\xa4\xa4\x44\x2a\xdc\x6a\x6a\x6a\x22\x3c\x63\x97\ +\x96\x96\x46\x0c\xc3\xd0\xbe\x5f\xde\x7d\xf7\xdd\xd2\x53\x4f\x3d\ +\xb5\xbd\x2c\xcd\x84\x09\x13\x72\xdf\x7a\xeb\x2d\xe1\xb0\x46\xdb\ +\xb6\x6d\x03\x95\x95\x95\xca\x61\x9d\x4d\x9b\x36\xd5\x65\x65\x65\ +\xf9\x6a\xa0\xc7\x8c\x19\x93\xfd\xed\xb7\xdf\x56\x00\x00\xbc\xfd\ +\xf6\xdb\xc5\x17\x5e\x78\xa1\xd4\xab\xe3\x37\x17\x5d\x74\x51\xee\ +\x7b\xef\xbd\x57\x2c\x13\x3f\x43\x86\x0c\x69\xb3\x71\xe3\xc6\x1a\ +\x00\x80\x79\xf3\xe6\x15\x5f\x70\xc1\x05\xbe\xd7\x71\xec\xd8\xb1\ +\xed\x3e\xfb\xec\xb3\x32\x55\xba\xaf\xbf\xfe\xba\x62\xec\xd8\xb1\ +\xb6\xfb\x9d\x10\x02\x91\x48\x04\x3e\xfc\xf0\xc3\xe2\xc9\x93\x27\ +\x7b\x16\xd5\xc9\x4c\xab\x33\x00\xad\x15\xc3\x30\xe0\xe5\x97\x5f\ +\xde\x7b\xe9\xa5\x97\x4a\x83\xb8\xd2\xd3\xd3\x49\x28\x14\x32\x23\ +\x91\x08\x18\x46\x7c\xa7\xf7\xb8\xe3\x8e\x6b\xbb\x74\xe9\x52\x69\ +\xaf\x0b\x00\xa0\xb0\xb0\xb0\xde\xef\x07\x54\x4b\x71\xdc\x71\xc7\ +\x65\x7f\xfa\xe9\xa7\x3a\x6d\x0e\x79\x6d\xf3\x59\x67\x9d\xd5\xfe\ +\x93\x4f\x3e\x89\x3d\x74\x22\x91\x08\x04\x02\xf2\xac\x22\x91\x08\ +\x2c\x59\xb2\xa4\xe4\x82\x0b\x2e\x70\x3d\xf4\xd0\xbb\x77\xef\x60\ +\x41\x41\x41\x2c\x9a\xb9\xa8\xa8\x48\xda\x83\x57\x51\x56\x56\x16\ +\x4e\x49\x49\x21\xbc\xa1\x8b\x4b\x2f\xbd\xb4\xe3\x0b\x2f\xbc\xb0\ +\xd7\xfa\xfe\xeb\xaf\xbf\x56\x1f\x73\xcc\x31\xc2\x9e\xbd\xc5\xbe\ +\x7d\xfb\x1a\xca\xcb\xcb\x65\xc6\x90\x64\x66\x66\x2a\x2f\xe8\xcd\ +\x9b\x37\xd7\xc8\x82\xf9\x9e\x78\xe2\x89\x5d\x77\xdd\x75\x57\x37\ +\x7a\x5b\x20\x10\x80\x99\x33\x67\xe6\xff\xe3\x1f\xff\xd0\x1e\x07\ +\x0f\x85\x42\xa6\x4a\x60\x67\x66\x66\x06\xca\xcb\xcb\x85\x16\xae\ +\xb0\xb0\x30\x94\x9f\x9f\xaf\x0c\xe6\x03\x00\xd8\xbe\x7d\xbb\x30\ +\x1a\xdd\x0b\x63\xc7\x8e\x6d\xb7\x78\xf1\xe2\x52\x00\x80\x8d\x1b\ +\x37\xd6\xf5\xea\xd5\x4b\xab\x1e\x7e\x90\x96\x96\x46\x8e\x39\xe6\ +\x98\x6c\xd5\xec\x83\x73\xcf\x3d\x37\x77\xc1\x82\x05\xc5\x00\x8d\ +\xd7\x47\x5e\x5e\x9e\xef\x31\x63\xbd\x7b\xf7\x6e\x23\x8b\xf4\xb7\ +\xf8\xf6\xdb\x6f\xab\x8e\x38\xe2\x08\xee\x75\xbc\x78\xf1\xe2\xb2\ +\x51\xa3\x46\xb5\xd5\x0d\x22\x45\x9a\x40\x01\xe0\x82\x92\x92\x92\ +\x70\x76\x76\xb6\xd4\x52\x5c\x7c\xf1\xc5\x1d\xde\x7a\xeb\x2d\xa5\ +\xcb\x55\x07\xc3\x30\xa0\xae\xae\x4e\xe9\x16\xdd\xbb\x77\x6f\x83\ +\xac\xa7\x73\x20\x11\x08\x04\x88\x4e\x9b\xf7\xef\xdf\xdf\x30\x6f\ +\xde\x3c\x4f\xe7\x79\xe4\xc8\x91\xd9\xb4\xb0\x5a\xba\x74\x69\xd9\ +\x19\x67\x9c\x21\x1d\x03\x07\x00\x78\xef\xbd\xf7\xca\x4e\x3a\xe9\ +\xa4\x76\xa9\xa9\xa9\xae\x1e\x34\x93\x26\x4d\xca\x9b\x37\x6f\x5e\ +\x6c\xdc\xfb\xad\xb7\xde\x8a\xbb\xc7\xf7\xe2\x8b\x2f\xee\xb9\xf6\ +\xda\x6b\x6d\xb3\x0e\xf2\xf2\xf2\x02\xd5\xd5\xd5\x61\x7a\xcc\xfb\ +\xf3\xcf\x3f\xaf\x1c\x39\x72\x64\xdb\xce\x9d\x3b\x4b\x1f\xde\xc5\ +\xc5\xc5\xe1\x17\x5f\x7c\x51\x78\x3e\x5f\x7a\xe9\xa5\xdd\x33\x67\ +\xce\x54\xce\x40\x78\xe6\x99\x67\xf6\xc8\xc6\xdc\xcb\xcb\xcb\x23\ +\x3f\xfc\xf0\x43\xe5\x79\xe7\x9d\x97\x63\xd5\x79\xf6\xec\xd9\x3d\ +\x9e\x7a\xea\xa9\x5d\x3a\x53\xba\x68\xd6\xae\x5d\x5b\x3d\x7c\xf8\ +\x70\xee\xb0\x4c\xaf\x5e\xbd\x82\x3b\x77\xee\x94\x1a\x96\xfa\xfa\ +\x7a\xb3\xb4\xb4\xb4\x41\x14\x65\x4e\xf3\xc4\x13\x4f\xf8\x1a\xa4\ +\x97\x9a\x9a\x6a\xd0\xd3\x18\xeb\xea\xea\x4c\x59\x2c\x8a\x9f\xdc\ +\x76\xdb\x6d\x87\x3c\xf5\xd4\x53\xbb\x54\xe9\xb2\xb2\xb2\x6c\xde\ +\xa3\x92\x92\x92\x86\x0e\x1d\x3a\xf8\x2a\x02\x74\x87\x7d\x42\xa1\ +\x90\x29\x8b\xf9\x78\xfa\xe9\xa7\x77\xdd\x7a\xeb\xad\x18\xed\xef\ +\x12\x14\x00\x2e\x59\xbe\x7c\x79\xc5\xc9\x27\x9f\x2c\x1c\x7b\x1c\ +\x34\x68\x50\xba\x68\x6e\xb5\x5b\x54\x73\xcf\x2d\xc2\xe1\x30\x88\ +\xd6\x0e\x38\x58\x89\xa7\xcd\xec\x83\xe4\xe3\x8f\x3f\x2e\x1f\x35\ +\x6a\x94\xd6\x70\xc2\xd3\x4f\x3f\xbd\x6b\xda\xb4\x69\xae\xc6\x1c\ +\xf3\xf2\xf2\x52\xe9\xf5\x01\x0a\x0b\x0b\x43\x9d\x3a\x75\x8a\x6b\ +\x3c\x75\xc3\x86\x0d\x75\xec\xf4\xc9\xeb\xae\xbb\xae\xf3\x73\xcf\ +\x3d\xe7\x18\xd3\xbd\xe5\x96\x5b\xb6\xcc\x98\x31\xa3\xfb\xf4\xe9\ +\xd3\x0f\xf1\xea\x79\xf8\xe1\x87\x1f\xaa\x3f\xfc\xf0\xc3\x92\xb9\ +\x73\xe7\xf6\x99\x32\x65\x4a\x6e\x5a\x5a\x9a\x67\x63\xf5\xe6\x9b\ +\x6f\x16\xf7\xed\xdb\xb7\xcd\xbd\xf7\xde\xdb\xf5\xea\xab\xaf\xee\ +\x74\xef\xbd\xf7\xee\xd0\x59\xdb\x80\x65\xc1\x82\x05\xc5\x13\x26\ +\x4c\xe0\xba\x7e\xa3\xa2\x4b\x29\x8a\xef\xb9\xe7\x9e\x1d\xe3\xc7\ +\x8f\xcf\x79\xe0\x81\x07\xf2\x8f\x3c\xf2\x48\xe5\x1a\x17\x7e\x70\ +\xc4\x11\x47\x64\xac\x59\xb3\xc6\x16\x9d\xff\xfe\xfb\xef\x4b\x63\ +\x51\xfc\xa2\x67\xcf\x9e\x41\xc3\x30\x88\xea\xde\xe9\xd9\xb3\x67\ +\xb0\xa8\xa8\xc8\x36\x9c\xb3\x70\xe1\xc2\xe2\x29\x53\xa6\x34\xeb\ +\x50\x85\x2e\x1b\x36\x6c\xa8\x4b\x4f\x4f\x37\xba\x75\xeb\xd6\x22\ +\x01\x95\x07\x2a\x38\x0d\xd0\x25\xff\xf9\xcf\x7f\x4a\x66\xcd\x9a\ +\xd5\x9d\xe7\x9a\xcf\xcf\xcf\x4f\x65\x6f\x1a\x9a\xee\xdd\xbb\xb7\ +\x99\x3d\x7b\x76\x77\xd1\xfe\x2f\xbf\xfc\xb2\x7c\xd1\xa2\x45\xca\ +\xf1\x30\xb7\x5c\x78\xe1\x85\xb9\x23\x46\x8c\xb0\xf5\x72\xda\xb5\ +\x6b\x97\xd2\xbd\x7b\xf7\x36\x3d\x7b\xf6\xb4\x19\x91\xbd\x7b\xf7\ +\x86\x64\x53\x92\x4e\x3f\xfd\xf4\x5c\x59\x2f\xe0\xa9\xa7\x9e\x2a\ +\xa2\x83\x16\xf3\xf3\xf3\x5d\xb5\x59\x27\xb2\x3b\x1e\x4e\x3e\xf9\ +\xe4\xec\xef\xbe\xfb\xce\x36\x85\xce\x34\x4d\x08\x04\x02\x5a\x05\ +\x6f\xd8\xb0\xa1\x2e\x2b\x2b\x2b\xc0\x2e\xf2\x24\xa2\x53\xa7\x4e\ +\x29\xc5\xc5\xc5\x8e\x74\xe5\xe5\xe5\x0d\xb9\xb9\xb9\x81\xe2\xe2\ +\x62\xcf\x53\x0b\x97\x2c\x59\x52\x3a\x61\xc2\x84\xf6\xef\xbc\xf3\ +\x4e\x69\x20\x10\x80\x9c\x9c\x1c\xee\x22\x41\xe5\xe5\xe5\x91\x9b\ +\x6f\xbe\x79\x6b\x7e\x7e\x7e\xea\x25\x97\x5c\xd2\x21\x27\x27\x27\ +\x85\x10\x42\x52\x53\x53\xc9\xe2\xc5\x8b\x8b\x3f\xf9\xe4\x13\xe5\ +\x90\x0b\x00\xc0\xa7\x9f\x7e\x5a\xf1\xe9\xa7\x9f\x56\x8c\x1e\x3d\ +\x3a\xeb\xbe\xfb\xee\xcb\x4f\x49\x49\x21\x86\x61\x40\x55\x55\x55\ +\xf8\x85\x17\x5e\xd8\x53\x50\x50\xa0\xb5\x98\x4f\x66\x66\xa6\xd1\ +\xb6\x6d\xdb\x40\xbb\x76\xed\x52\x5f\x7f\xfd\xf5\x3d\x3c\x8f\x4f\ +\x76\x76\xb6\xd1\xa5\x4b\x97\x54\xd9\x02\x35\x15\x15\x15\x11\x51\ +\x74\x7a\x87\x0e\x1d\x52\x75\x45\xc5\xcc\x99\x33\x0b\x32\x33\x33\ +\x8d\x8b\x2f\xbe\x38\xef\x82\x0b\x2e\xc8\x33\x0c\x83\xa4\xa5\xa5\ +\x91\x1f\x7f\xfc\xb1\xf2\x8d\x37\xde\x28\x96\x05\xa8\x79\xe1\x9c\ +\x73\xce\xc9\x99\x33\x67\x8e\xad\x07\xbe\x7c\xf9\xf2\xaa\xf3\xcf\ +\x3f\x5f\x3a\x15\xd3\x0f\xfe\xf0\x87\x3f\x1c\x72\xcf\x3d\xf7\x28\ +\xa7\x34\x4e\x9a\x34\x29\xef\xdf\xff\xfe\xb7\xcd\x23\xb4\x69\xd3\ +\x26\x87\xe8\x6c\x4d\x3c\xf2\xc8\x23\x85\x0f\x3c\xf0\x40\xf7\x3f\ +\xfd\xe9\x4f\xdb\x5a\xba\x2e\x07\x0a\x28\x00\x5c\x12\x89\x44\xa0\ +\xa1\xa1\xc1\x4c\x4b\x4b\x73\xb8\xaa\x2f\xbf\xfc\xf2\x8e\xcf\x3d\ +\xf7\x9c\x70\xba\xd5\x8e\x1d\x3b\x6a\xfd\x9a\xb3\x7a\xc5\x15\x57\ +\x74\x18\x30\x60\x40\x3a\x40\xa3\xd1\xfc\xfc\xf3\xcf\xcb\xdf\x7f\ +\xff\x7d\xae\x78\x98\x3f\x7f\x7e\x31\x3b\xf5\xaa\x7f\xff\xfe\x69\ +\x63\xc7\x8e\x6d\xf7\xec\xb3\xcf\x0a\xeb\xcb\x63\xc9\x92\x25\xc5\ +\x8f\x3e\xfa\xa8\xd2\x7d\x68\x51\x50\x50\xe0\xaa\xcd\x32\xaf\x07\ +\xdb\x66\x2f\x82\xe9\xb4\xd3\x4e\xe3\xce\x0b\x5f\xb1\x62\x45\xa5\ +\xce\xfc\x7a\x00\x80\x39\x73\xe6\x14\xce\x9c\x39\x33\xff\xb6\xdb\ +\x6e\x53\x3e\x48\x27\x4f\x9e\x9c\xb7\x70\xe1\x42\xc7\xb4\xb7\x05\ +\x0b\x16\xec\x9f\x3c\x79\x72\xde\xdc\xb9\x73\x5d\x9d\x7f\x9a\x25\ +\x4b\x96\x94\xcf\x99\x33\xa7\xc7\x3b\xef\xbc\x53\x7a\xd9\x65\x97\ +\x75\x50\x0d\x89\x14\x14\x14\x84\x1e\x7e\xf8\xe1\xd8\x6f\x17\x0c\ +\x06\xc9\xc4\x89\x13\x73\x5e\x7c\xf1\xc5\xbe\x7f\xfe\xf3\x9f\xb7\ +\xff\xf6\xdb\x6f\x5a\x1e\x95\x65\xcb\x96\x55\x2e\x5b\xb6\x2c\x76\ +\x9e\x2c\x61\x91\x97\x97\x97\xaa\x9a\x2f\x6f\x18\x06\xcc\x9a\x35\ +\xab\xc7\x7d\xf7\xdd\xb7\xa3\xac\xac\x2c\x3c\x6b\xd6\xac\xee\x35\ +\x35\x35\x11\x76\x8e\x79\x9f\x3e\x7d\xd2\x46\x8c\x18\x91\x29\x13\ +\x00\xd1\x36\xd5\xf5\xea\xd5\x2b\x48\xd7\x3d\xba\x8a\xa1\x2b\x8f\ +\x42\x55\x55\x55\xe4\xf9\xe7\x9f\xdf\x4b\x6f\x1b\x3d\x7a\x74\xd6\ +\x93\x4f\x3e\xd9\x6b\xd1\xa2\x45\xc5\xa2\x7b\xcb\x0b\x99\x99\x99\ +\x01\xde\x7a\x22\x91\x48\xc4\x8c\x06\xa8\xfa\x55\x94\x8d\x53\x4f\ +\x3d\x35\xfb\xa7\x9f\x7e\xaa\xd4\x99\xa3\xdf\xa9\x53\x27\xae\xc0\ +\xad\xa9\xa9\x89\xb8\x59\x0f\xa5\x39\xa9\xaa\xaa\x8a\xac\x5d\xbb\ +\xb6\x5a\xf7\x3e\x46\x70\x08\xc0\x13\x0b\x17\x2e\xdc\x7f\xf1\xc5\ +\x17\x3b\x5c\x61\xed\xdb\xb7\x97\x2e\x05\xeb\x27\x2f\xbd\xf4\xd2\ +\xbe\xbb\xee\xba\x6b\xc7\x5d\x77\xdd\xb5\xe3\xe1\x87\x1f\x2e\x3c\ +\xfc\xf0\xc3\xe3\x8e\xe0\x6e\xed\xd0\x6d\x7e\xe8\xa1\x87\x0a\x87\ +\x0f\x1f\xee\xba\xcd\xc1\x60\x90\xf0\x7a\x74\xff\xfd\xef\x7f\x4b\ +\xc6\x8f\x1f\xaf\x15\xe0\x57\x51\x51\x11\x59\xbf\x7e\x7d\xcd\x09\ +\x27\x9c\xa0\x0c\xae\xeb\xd1\xa3\x47\xda\xa6\x4d\x9b\x1c\x46\x6c\ +\xfd\xfa\xf5\x75\x7d\xfa\xf4\x89\x7b\x6e\xf9\x86\x0d\x1b\x6a\x86\ +\x0c\x19\xd2\x66\xf0\xe0\xc1\x19\x3f\xfc\xf0\x83\x74\xd1\x17\x96\ +\xfa\xfa\x7a\x73\xde\xbc\x79\xc5\xd7\x5f\x7f\xfd\xe6\xfb\xee\xbb\ +\xaf\xbb\xdb\xd8\x06\x0b\x4b\x58\xbc\xf9\xe6\x9b\xfb\xfe\xfc\xe7\ +\x3f\x4b\xe3\x04\xee\xb9\xe7\x9e\x6e\x4f\x3e\xf9\x64\xa1\xb5\xa8\ +\xd2\xdd\x77\xdf\xbd\xe3\xb2\xcb\x2e\xeb\xd8\xbf\x7f\x7f\x5b\xcf\ +\x32\x3a\x7b\x41\x69\x09\xe7\xcd\x9b\xb7\x9f\x8d\x4e\xbf\xf0\xc2\ +\x0b\xf3\x16\x2e\x5c\x18\x77\x4f\x7a\xd9\xb2\x65\x95\x37\xde\x78\ +\xe3\xd6\x63\x8f\x3d\x36\xdb\xaf\xe1\x81\xbe\x7d\xfb\xa6\x89\x02\ +\x0a\x97\x2d\x5b\xa6\x15\x8b\xe2\x05\x42\x08\x4c\x98\x30\xa1\x83\ +\xce\xa2\x5c\x79\x79\x79\x81\xf2\xf2\x72\xee\x73\x6c\xd1\xa2\x45\ +\xc5\x17\x5c\x70\x41\x8b\x44\xdc\xeb\x0c\x89\xbe\xf0\xc2\x0b\x7b\ +\xa7\x4c\x99\x22\x0d\xd4\x46\x9a\x40\x01\xe0\x81\x9f\x7e\xfa\xa9\ +\x66\xd0\xa0\x41\xb6\x07\xc2\xa8\x51\xa3\x32\x7f\xf8\xe1\x07\x5f\ +\x55\xa7\xae\x5b\xfa\x60\x22\x91\x6d\x1e\x31\x62\x44\xfa\x96\x2d\ +\x5b\x6a\xd3\xd2\xd2\x08\xfb\x21\x84\x80\x6c\x09\x5e\x96\xe7\x9f\ +\x7f\x7e\xef\x45\x17\x5d\x24\x7d\xd0\x64\x66\x66\x1a\xa1\x50\xc8\ +\xe4\x95\x97\x96\x96\x46\xc2\xe1\xb0\xe9\xa6\x4c\x1e\x2f\xbd\xf4\ +\xd2\xbe\x07\x1f\x7c\xb0\xd7\x77\xdf\x7d\xc7\x75\xe3\xeb\xcc\x44\ +\xa9\xab\xab\x33\x5f\x7e\xf9\xe5\xdd\x53\xa6\x4c\x91\xad\x87\xa0\ +\xcc\xe7\xc7\x1f\x7f\xac\xce\xca\xca\x0a\x88\x84\x44\x46\x46\x86\ +\x91\x96\x96\x46\xd8\x77\x21\xdc\x7e\xfb\xed\xdb\x6f\xba\xe9\x26\ +\x5b\x7c\xc2\x91\x47\x1e\x99\xb5\x7c\xf9\x72\xe5\xfd\xb4\x67\xcf\ +\x1e\x47\x74\x7a\x8f\x1e\x3d\xd2\x54\x9e\x03\x00\xfb\x5c\x7c\x19\ +\xb3\x67\xcf\xde\x39\x65\xca\x14\x5f\xd6\x01\x98\x34\x69\x52\xde\ +\xbb\xef\xbe\x5b\xc2\xbb\x1e\x96\x2d\x5b\x56\x39\x7a\xf4\xe8\x84\ +\x08\x80\x9b\x6e\xba\xa9\xf3\x8b\x2f\xbe\x28\x9d\xf3\x6f\x31\x65\ +\xca\x94\xbc\x45\x8b\x16\x71\xeb\xf8\xcb\x2f\xbf\xd4\x0c\x1b\x36\ +\xcc\xb7\xce\x86\x9b\xa9\x9f\xba\xcf\x86\x37\xde\x78\x63\xef\xb5\ +\xd7\x5e\x8b\x22\x40\x03\x1c\x02\xf0\xc8\x9e\x3d\x7b\x42\xf4\x38\ +\xf0\x59\x67\x9d\x95\x7b\xef\xbd\xf7\xfa\xba\x24\x65\x61\x61\x61\ +\x3d\xeb\xde\x3c\xd8\xd9\xb9\x73\x67\x5d\x9f\x3e\x7d\x82\xb2\x17\ +\xe6\x78\xe5\x9c\x73\xce\xc9\x35\x0c\x83\xdc\x7d\xf7\xdd\xdd\x78\ +\xfb\x3b\x76\xec\x18\x1c\x3a\x74\x68\xfa\xaa\x55\xab\xb4\x82\x38\ +\xdf\x7c\xf3\xcd\xbd\xd7\x5c\x73\x8d\x6d\xda\x1d\xcd\xa4\x49\x93\ +\x72\x83\xc1\xa0\xb0\xbc\x60\x30\x68\x9c\x77\xde\x79\x39\xaf\xbd\ +\xf6\x9a\xe7\xde\x6a\x28\x14\x32\xb7\x6d\xdb\x56\x2b\x5a\x16\xf9\ +\xc1\x07\x1f\xec\x7e\xef\xbd\xf7\xee\x50\x2d\x76\xb3\x6c\xd9\xb2\ +\xca\xd9\xb3\x67\xe7\x00\x00\xb7\x2e\xb3\x66\xcd\xea\x7e\xe7\x9d\ +\x77\x2a\xaf\xef\x0d\x1b\x36\x54\xf7\xe9\xd3\x27\xc8\x9b\xda\x75\ +\xd4\x51\x47\x65\xfc\xf8\xe3\x8f\x55\xec\xf6\x48\x24\x02\xb7\xdd\ +\x76\xdb\xb6\xc7\x1f\x7f\xbc\xd7\x5f\xfe\xf2\x97\x1d\x7b\xf6\xec\ +\x69\xe8\xdc\xb9\x73\xaa\x6e\x4c\x41\x49\x49\x49\x43\x5e\x5e\x5e\ +\x60\xff\xfe\xfd\xe1\xf4\xf4\x74\xa2\xbb\x02\xe7\x98\x31\x63\xb2\ +\xb3\xb3\xb3\x03\xef\xbc\xf3\x4e\xa9\x2c\x5d\x4d\x4d\x8d\x49\x47\ +\xab\x37\x34\x34\x28\xa7\x20\x02\x34\x2e\xa2\x53\x5f\x5f\x6f\xab\ +\xcb\xa1\x87\x1e\x9a\x7e\xd5\x55\x57\x09\xdf\x19\xd1\xb3\x67\x4f\ +\xdf\x57\xb5\xcb\xc9\xc9\x09\xe4\xe7\xe7\x07\x7f\xfc\xf1\x47\x2d\ +\x0f\xd1\xa0\x41\x83\x32\x3b\x75\xea\x14\x9c\x38\x71\x22\x77\x7f\ +\xaf\x5e\xbd\xda\x58\x0b\x04\xc5\x5b\xb7\xda\xda\xda\x48\x7a\x7a\ +\x3a\x51\x0d\x4b\x74\xed\xda\x35\x75\xef\xde\xbd\x5a\xd7\xc3\x97\ +\x5f\x7e\x59\x39\x71\xe2\xc4\xbc\xec\xec\x6c\x43\x67\x0d\x8c\x64\ +\x06\x05\x80\x47\x5e\x79\xe5\x95\xbd\x57\x5e\x79\x65\xc7\x87\x1f\ +\x7e\x78\x57\x6a\x6a\x2a\x31\x4d\xd3\xf7\xb1\xbb\x8f\x3e\xfa\xa8\ +\x74\xdc\xb8\x71\xed\x9e\x7b\xee\x39\xae\x81\x39\x18\xf9\xe8\xa3\ +\x8f\xca\xc6\x8d\x1b\xd7\x3e\x9e\xb1\x71\x11\xe9\xe9\xe9\x86\xcc\ +\x88\x65\x67\x67\x1b\xd3\xa6\x4d\x3b\x64\xd5\xaa\x55\x3b\x75\xf2\ +\xfb\xe2\x8b\x2f\x2a\x27\x4e\x9c\x98\x27\x9a\x1f\x3f\x68\xd0\xa0\ +\x0c\x55\x9c\xc0\xc3\x0f\x3f\xdc\x03\x04\x46\x57\x97\xea\xea\x6a\ +\xe1\x85\x57\x54\x54\x54\xdf\xbf\x7f\xff\x36\xeb\xd6\xad\xe3\x2e\ +\xe1\x4b\x23\xbb\x7e\xdb\xb4\x69\xa3\xe5\xa9\x20\x84\x10\x51\x3e\ +\x84\x10\xe1\xb4\xaf\x50\x28\x64\xde\x7e\xfb\xed\xbf\x3d\xfa\xe8\ +\xa3\xbd\x66\xcc\x98\xb1\xdd\xcd\x22\x55\x0b\x16\x2c\xd8\x7f\xe1\ +\x85\x17\xe6\xcd\x9d\x3b\x77\xcf\x79\xe7\x9d\x97\xbb\x68\xd1\x22\ +\xad\x77\x44\xec\xd8\xb1\xa3\xfe\xac\xb3\xce\xd2\x1a\xf6\xa1\xdb\ +\x54\x52\x52\xd2\xd0\xb1\x63\x47\xe5\xb3\xb3\x63\xc7\x8e\xa9\xf4\ +\xfb\x23\x3a\x77\xee\x9c\xb2\x7e\xfd\xfa\x6a\x36\x00\x90\x66\xd2\ +\xa4\x49\x39\x7e\x8f\x61\x4f\x9f\x3e\xbd\xeb\x9c\x39\x73\x0a\x75\ +\xd2\x66\x64\x64\x18\xfb\xf6\xed\xab\xbf\xff\xfe\xfb\x85\xf7\xc0\ +\xd8\xb1\x63\xb3\xcf\x3a\xeb\xac\x76\xff\xf9\xcf\x7f\x4a\xe3\xad\ +\xdb\xf7\xdf\x7f\x5f\x71\xc2\x09\x27\x64\xf3\xde\x36\x48\x33\x7a\ +\xf4\xe8\xec\xcf\x3f\xff\x5c\x3b\x0e\xe3\xb1\xc7\x1e\x2b\x9c\x3e\ +\x7d\x7a\xd7\xfb\xee\xbb\x4f\xf9\x6e\x89\x64\x06\x87\x00\x3c\xb2\ +\x67\xcf\x9e\x86\xdc\xdc\xdc\x54\x00\x80\xc9\x93\x27\xe7\xbe\xf3\ +\xce\x3b\x8e\x40\xaf\x78\x59\xbd\x7a\x75\xed\xf0\xe1\xc3\xa5\xcb\ +\x9d\x7a\x65\xdf\xbe\x7d\x0d\x3f\xfe\xf8\x63\xab\x0b\x94\x59\xb3\ +\x66\x4d\xed\xb0\x61\xc3\x94\x63\xeb\x6e\xe9\xd3\xa7\x4f\xb0\xa0\ +\xa0\x40\xea\x55\xa8\xa8\xa8\x88\xb8\x5d\x5c\xe8\xb1\xc7\x1e\x2b\ +\xbc\xfd\xf6\xdb\xbb\x9a\xcc\x00\x65\x6a\x6a\x2a\x69\x68\x68\x50\ +\xf6\x90\xac\xc0\xaf\x44\x31\x7f\xfe\xfc\xfd\xb2\x37\xd2\x59\x8c\ +\x1c\x39\x32\x73\xed\xda\xb5\xc2\x1e\xe2\xfe\xfd\xfb\x1b\xba\x76\ +\xed\xaa\x9c\x62\xd5\xa3\x47\x8f\xb4\xed\xdb\xb7\x73\xcf\xf3\xba\ +\x75\xeb\x6a\x65\xee\xe3\x9a\x9a\x1a\xf3\xce\x3b\xef\xdc\xf6\xd2\ +\x4b\x2f\xf5\x5f\xb6\x6c\x99\xf6\xc3\x7e\xcb\x96\x2d\xf5\xd6\x4b\ +\x9d\x0e\x3b\xec\x30\xed\x58\x88\x8d\x1b\x37\x6a\xc5\x61\xb0\x53\ +\x1e\x3f\xfe\xf8\xe3\x72\x1d\x57\xfd\x98\x31\x63\xda\x2d\x59\xb2\ +\x24\xd6\x8e\x29\x53\xa6\xe4\x2d\x58\xb0\x40\x2a\xf6\xde\x79\xe7\ +\x9d\xd2\x71\xe3\xc6\xb5\x57\xe5\xad\x8b\xce\x7a\xff\x34\xe7\x9f\ +\x7f\x7e\xce\xe2\xc5\x8b\xa5\x02\xea\xb3\xcf\x3e\xab\x18\x39\x72\ +\xa4\xa7\x55\x38\x59\x96\x2d\x5b\x56\xa1\x5a\xd1\x11\x00\xe0\xd8\ +\x63\x8f\xcd\x5e\xb1\x62\x85\x76\x8c\x4b\x61\x61\x61\xa8\xaa\xaa\ +\x2a\x3c\x60\xc0\x80\x34\xdd\xe9\xd4\xc9\x08\x0a\x80\x38\x58\xb9\ +\x72\x65\xe5\xb1\xc7\x1e\x9b\x39\x6c\xd8\xb0\xcc\xe5\xcb\x97\x3b\ +\x5c\x9b\x7e\xf0\xd1\x47\x1f\x95\xa8\xc6\xb3\xda\xb5\x6b\x17\x90\ +\xad\xb1\xce\xa3\xb8\xb8\x38\xac\x7a\x53\x58\x4b\xb1\x64\xc9\x92\ +\x62\x55\x9b\xdb\xb6\x6d\x6b\xb8\xf1\xb8\x4c\x9a\x34\x29\x6f\xfe\ +\xfc\xf9\xca\x9e\xf6\xae\x5d\xbb\xea\x7b\xf5\xea\xa5\x3d\x47\x7f\ +\xe7\xce\x9d\xa1\x9a\x9a\x9a\xc8\xa1\x87\x1e\x6a\x0b\x62\x3b\xfb\ +\xec\xb3\xdb\x7f\xf2\xc9\x27\xa5\xaa\xe3\x97\x2e\x5d\x5a\x76\xe6\ +\x99\x67\x26\x64\xdc\x17\x00\xa0\xa8\xa8\xa8\x21\x2f\x2f\x2f\x55\ +\xb6\x10\x10\x21\x04\x2e\xb9\xe4\x92\x4e\xb2\x29\x68\xaf\xbc\xf2\ +\xca\x5e\xd5\x42\x2b\xbd\x7b\xf7\x0e\xd6\xd4\xd4\x44\x44\x0b\x01\ +\xed\xde\xbd\xbb\xa1\x53\xa7\x4e\x41\xd9\x8a\x6d\xed\xdb\xb7\x0f\ +\xec\xd8\xb1\xa3\x76\xdc\xb8\x71\x39\x6e\xd6\x1a\xa8\xa9\xa9\x89\ +\x64\x67\x67\x1b\x6e\xef\x83\x4d\x9b\x36\xd5\x8c\x1d\x3b\x56\x2a\ +\xb2\x6f\xbd\xf5\xd6\x43\x5e\x7a\xe9\xa5\x98\x47\xaa\xaa\xaa\x2a\ +\x92\x91\x91\x11\x90\x2d\xdc\x93\x96\x96\x46\xd8\x85\x74\xf2\xf3\ +\xf3\xd3\x54\xf3\xef\xeb\xeb\xeb\x4d\xdd\x97\x22\xe9\xa0\xb3\xde\ +\x3f\xcd\xd0\xa1\x43\x33\xbf\xff\xfe\x7b\xe9\x73\xc1\x34\x4d\xed\ +\xf8\x09\x15\x15\x15\x15\x91\xe2\xe2\xe2\x90\x2c\xc8\xf2\xd4\x53\ +\x4f\xcd\x5e\xbf\x7e\x7d\x8d\x6a\x18\x8b\xe5\xf1\xc7\x1f\x2f\xba\ +\xf9\xe6\x9b\x0f\x71\x7b\x4d\x24\x13\x28\x00\xe2\x60\xfe\xfc\xf9\ +\xc5\x53\xa7\x4e\xed\xb4\x7f\xff\x7e\xad\xb1\x29\x2f\xbc\xfd\xf6\ +\xdb\x25\x9d\x3b\x77\x4e\x9d\x36\x6d\x5a\x17\x36\x60\xcc\x30\x0c\ +\xb8\xfc\xf2\xcb\x3b\xdc\x76\xdb\x6d\xdd\xdc\x4e\xe7\x6b\xcd\xfc\ +\xe7\x3f\xff\x29\xb5\xda\xcc\xba\xd7\x0d\xc3\x80\xa9\x53\xa7\xe6\ +\xdd\x71\xc7\x1d\xf9\xcf\x3e\xfb\xac\xf6\x83\xad\x43\x87\x0e\xa9\ +\x7b\xf6\xec\x51\xce\xd0\x98\x37\x6f\xde\x7e\xb7\xab\xf4\x3d\xfe\ +\xf8\xe3\xbb\xae\xbf\xfe\x7a\x9b\x71\x1c\x39\x72\x64\xf6\x67\x9f\ +\x7d\xa6\x9c\x5f\xff\xd1\x47\x1f\x95\xab\xde\xc4\x17\x2f\x77\xdd\ +\x75\xd7\xf6\x59\xb3\x66\xf5\x3c\xe6\x98\x63\x1c\x0f\xd9\x5e\xbd\ +\x7a\x05\xff\xfe\xf7\xbf\xf7\xfe\xe7\x3f\xff\x59\x24\xeb\x29\x6d\ +\xdb\xb6\xad\xfe\x83\x0f\x3e\x28\x99\x33\x67\x4e\x8f\xbc\xbc\x3c\ +\x9b\xcb\x82\x10\x02\x13\x27\x4e\x6c\x7f\xc3\x0d\x37\x74\x99\x3d\ +\x7b\xb6\xd4\xd5\xfc\xf7\xbf\xff\xbd\xf0\x99\x67\x9e\xe9\xc3\x7a\ +\x13\x0c\xc3\x80\x8b\x2e\xba\x28\x77\xfa\xf4\xe9\xdd\xa6\x4d\x9b\ +\xf6\xdb\x5f\xfe\xf2\x97\x1d\x8f\x3e\xfa\x68\x4f\x5d\xef\xc8\xa2\ +\x45\x8b\x8a\xe7\xcc\x99\xd3\x6b\xe9\xd2\xa5\xae\xa6\xeb\x3d\xf9\ +\xe4\x93\xbb\xc7\x8d\x1b\x97\x33\x69\xd2\xa4\x1c\xd6\xa0\xb5\x6b\ +\xd7\x2e\x70\xff\xfd\xf7\x77\xdb\xbc\x79\x73\x2d\x1b\x54\xf8\xf8\ +\xe3\x8f\x17\x3e\xfe\xf8\xe3\xbd\x79\xc2\xaa\x73\xe7\xce\x29\x4f\ +\x3c\xf1\x44\xaf\x27\x9e\x78\x22\x76\x2e\xb2\xb3\xb3\x8d\xea\xea\ +\x6a\x2d\x0b\xb6\x61\xc3\x86\x9a\x78\x5e\x3c\x65\x31\x65\xca\x94\ +\xdc\x45\x8b\x16\x49\xd7\xfb\xa7\xa1\xdf\x91\xa1\xe2\xbb\xef\xbe\ +\x53\xbe\x8c\x49\x97\x47\x1e\x79\x64\xd7\x35\xd7\x5c\xd3\xe5\xac\ +\xb3\xce\x72\xdc\x07\x17\x5e\x78\x61\xee\x59\x67\x9d\x95\xeb\x65\ +\x48\x30\x14\x0a\x99\x9f\x7f\xfe\x79\xd9\xf1\xc7\x1f\xdf\xde\x8f\ +\x7a\x1e\x8c\x60\x0c\x80\x06\x55\x55\x55\x11\xde\xd4\x9d\x70\x38\ +\x0c\x45\x45\x45\xf5\xaf\xbe\xfa\x2a\x77\xfe\x35\xfd\x52\x16\x80\ +\x46\x75\xff\xd0\x43\x0f\x09\x17\xc5\x01\x68\x5c\xde\xf4\x95\x57\ +\x5e\xb1\xf5\xc4\x1e\x7c\xf0\xc1\xc2\x81\x03\x07\xb6\x99\x31\x63\ +\x46\x37\x80\xa6\xe9\x30\xe9\xe9\xe9\xc6\xfc\xf9\xf3\xf7\xbd\xfc\ +\xf2\xcb\xbe\x2c\x3d\xac\xa2\xb6\xb6\xd6\x1c\x34\x68\x50\x86\xaa\ +\x0d\x6f\xbe\xf9\xe6\x3e\x6b\x5e\xb7\x4e\x9b\xd7\xad\x5b\x57\xc3\ +\xb6\xc1\x6a\xf3\xbd\xf7\xde\xeb\x68\xf3\x82\x05\x0b\xf6\xb1\xe7\ +\x48\x46\x30\x18\x24\xab\x57\xaf\xd6\xf2\x76\x14\x16\x16\x86\xca\ +\xca\xca\x62\x42\x81\xfd\x0d\x79\xd4\xd7\xd7\x9b\xef\xbf\xff\x7e\ +\x71\x7a\x7a\x7a\x4c\xac\x6c\xdc\xb8\xb1\x46\xc7\xf5\x18\x89\x44\ +\x60\xf3\xe6\xcd\xb1\x32\x36\x6f\xde\x5c\x7b\xdf\x7d\xf7\xe5\xcb\ +\x7a\x2d\x33\x67\xce\x2c\xa0\x7b\xd9\xaa\x3a\x56\x55\x55\x45\xae\ +\xbb\xee\xba\xcd\x17\x5d\x74\x51\xde\xa4\x49\x93\x3a\xd4\xd7\xd7\ +\x9b\x84\x10\x30\x0c\x03\x0a\x0b\x0b\xeb\xef\xbe\xfb\xee\x6d\xec\ +\x9a\xf9\x5b\xb7\x6e\xad\x65\x03\xd8\x3e\xf9\xe4\x93\x8a\xb5\x6b\ +\xd7\xd6\x5e\x77\xdd\x75\x9d\xa3\x5e\xa7\x58\x4f\xf0\xcb\x2f\xbf\ +\x2c\x57\xad\x01\x10\xad\x6b\xdd\x3d\xf7\xdc\xb3\xed\xaa\xab\xae\ +\xea\x94\x93\x93\x93\x62\x19\x9c\x60\x30\x48\xde\x7d\xf7\xdd\xe2\ +\x5b\x6e\xb9\xe5\x37\x80\x46\xcf\xc5\xa3\x8f\x3e\x5a\x38\x75\xea\ +\xd4\x0e\xb2\x25\x8a\x2d\x7e\xfa\xe9\xa7\x9a\x43\x0f\x3d\x34\x5d\ +\xf5\x1e\x09\xde\xb9\xba\xe3\x8e\x3b\xb6\x8f\x1e\x3d\x3a\xeb\xe1\ +\x87\x1f\xee\x61\x2d\x39\x6b\x18\x06\x54\x56\x56\x86\x5f\x7b\xed\ +\xb5\x7d\xbc\x69\x9c\x3b\x77\xee\x0c\xdd\x73\xcf\x3d\xdb\xae\xbe\ +\xfa\xea\x4e\x1d\x3a\x74\x48\xb5\x7e\x2f\xc3\x30\xc8\xfe\xfd\xfb\ +\x43\xf7\xde\x7b\xef\x76\x7a\x91\xa7\xc3\x0f\x3f\x3c\xe3\xed\xb7\ +\xdf\xd6\xba\x66\xe7\xcf\x9f\x5f\x7c\xc6\x19\x67\xb4\xb3\x82\x51\ +\x33\x32\x32\x02\xb2\x7b\xe8\xe7\x9f\x7f\xae\x7a\xf3\xcd\x37\x8b\ +\x2b\x2b\x2b\xc3\xf4\x73\xea\xa8\xa3\x8e\xca\xaa\xad\xad\x8d\x8c\ +\x1e\x3d\xba\x9d\xac\xb7\xde\xd0\xd0\x60\xce\x98\x31\xa3\xe0\xb0\ +\xc3\x0e\x4b\x57\xb9\xff\x2d\xde\x7d\xf7\xdd\xd2\xe8\x54\x68\xdb\ +\xf9\xde\xb4\x69\x53\x4d\x43\x83\x58\x67\xf3\xce\xbf\x69\x9a\x70\ +\xc3\x0d\x37\x6c\x99\x38\x71\x62\xfb\x47\x1f\x7d\xb4\x87\x75\x6d\ +\x07\x83\x41\xf2\xd1\x47\x1f\x95\xfe\xf1\x8f\x7f\xe4\x2e\xec\xa3\ +\x73\x6f\xbe\xf5\xd6\x5b\x25\xe3\xc6\x8d\xc3\x17\x05\x09\x20\x00\ +\xf0\x59\x4b\x57\x02\x41\x10\x04\x41\x90\x66\x61\x91\x69\x9a\x8f\ +\x01\xe0\x10\x00\x82\x20\x08\x82\x24\x25\x28\x00\x10\x04\x41\x10\ +\x24\x09\x41\x01\x80\x20\x08\x82\x20\x49\x08\x0a\x00\x04\x41\x10\ +\x04\x49\x42\x50\x00\x20\x08\x82\x20\x48\x12\x82\x02\x00\x41\x10\ +\x04\x41\x92\x10\x14\x00\x08\x82\x20\x08\x92\x84\xa0\x00\x40\x10\ +\x04\x41\x90\x24\x04\x05\x00\x82\x20\x08\x82\x24\x21\x28\x00\x10\ +\x04\x41\x10\x24\x09\x41\x01\x80\x20\x08\x82\x20\x49\x08\x0a\x00\ +\x04\x41\x10\x04\x49\x42\x50\x00\x20\x08\x82\x20\x48\x12\x82\x02\ +\x00\x41\x10\x04\x41\x92\x10\x14\x00\x08\x82\x20\x08\x92\x84\xa0\ +\x00\x40\x10\x04\x41\x90\x24\x04\x05\x00\x82\x20\x08\x82\x24\x21\ +\x28\x00\x10\x04\x41\x10\x24\x09\x41\x01\x80\x20\x08\x82\x20\x49\ +\x08\x0a\x00\x04\x41\x10\x04\x49\x42\x50\x00\x20\x08\x82\x20\x48\ +\x12\x82\x02\x00\x41\x10\x04\x41\x92\x10\x14\x00\x08\x82\x20\x08\ +\x92\x84\xa0\x00\x40\x10\x04\x41\x90\x24\x04\x05\x00\x82\x20\x08\ +\x82\x24\x21\x28\x00\x10\x04\x41\x10\x24\x09\x41\x01\x80\x20\x08\ +\x82\x20\x49\x08\x0a\x00\x04\x41\x10\x04\x49\x42\x50\x00\x20\x08\ +\x82\x20\x48\x12\x82\x02\x00\x41\x10\x04\x41\x92\x10\x14\x00\x08\ +\x82\x20\x08\x92\x84\xa0\x00\x40\x10\x04\x41\x90\x24\x04\x05\x00\ +\x82\x20\x08\x82\x24\x21\x28\x00\x10\x04\x41\x10\x24\x09\x41\x01\ +\x80\x20\x08\x82\x20\x49\x08\x0a\x00\x04\x41\x10\x04\x49\x42\x50\ +\x00\x20\x08\x82\x20\x48\x12\x82\x02\x00\x41\x10\x04\x41\x92\x10\ +\x14\x00\x08\x82\x20\x08\x92\x84\xa0\x00\x40\x10\x04\x41\x90\x24\ +\x04\x05\x00\x82\x20\x08\x82\x24\x21\x28\x00\x10\x04\x41\x10\x24\ +\x09\x41\x01\x80\x20\x08\x82\x20\x49\x08\x0a\x00\x04\x41\x10\x04\ +\x49\x42\x50\x00\x20\x08\x82\x20\x48\x12\x82\x02\x00\x41\x10\x04\ +\x41\x92\x10\x14\x00\x08\x82\x20\x08\x92\x84\xa0\x00\x40\x10\x04\ +\x41\x90\x24\x04\x05\x00\x82\x20\x08\x82\x24\x21\x28\x00\x10\x04\ +\x41\x10\x24\x09\x41\x01\x80\x20\x08\x82\x20\x49\x08\x0a\x00\x04\ +\x41\x10\x04\x49\x42\x50\x00\x20\x08\x82\x20\x48\x12\x82\x02\x00\ +\x41\x10\x04\x41\x92\x10\x14\x00\x08\x82\x20\x08\x92\x84\xa0\x00\ +\x40\x10\x04\x41\x90\x24\x04\x05\x00\x82\x20\x08\x82\x24\x0f\x11\ +\xeb\x1f\x14\x00\x08\x82\x20\x08\x92\x3c\xd4\x59\xff\xa0\x00\x40\ +\x10\x04\x41\x90\xe4\xa1\xde\xfa\x07\x05\x00\x82\x20\x08\x82\x24\ +\x0f\x28\x00\x10\x04\x41\x10\x24\x09\x41\x01\x80\x20\x08\x82\x20\ +\x49\x08\x0a\x00\x04\x41\x10\x04\x49\x42\xf6\x5a\xff\xa0\x00\x40\ +\x10\x04\x41\x90\xe4\x61\xa7\xf5\x0f\x0a\x00\x04\x41\x10\x04\x49\ +\x1e\x50\x00\x20\x08\x82\x20\x48\x92\xb1\xcf\x34\x4d\x5c\x07\x00\ +\x41\x10\x04\x41\x92\x8c\x9d\xf4\x17\x03\x00\xcc\x16\xaa\x08\x82\ +\x20\x08\x82\x20\xcd\x47\x01\xfd\xc5\x00\x2a\x22\x10\x41\x10\x04\ +\x41\x90\x83\x16\x87\x07\x60\xa7\x20\x21\x82\x20\x08\x82\x20\x07\ +\x0f\x0e\x0f\x00\x0a\x00\x04\x41\x10\x04\x39\xf8\x41\x0f\x00\x82\ +\x20\x08\x82\x24\x21\x0e\x01\xb0\xa1\x85\x2a\x82\x20\x08\x82\x20\ +\x48\xf3\xb0\x9d\x9e\x02\x08\xd0\x28\x00\x7e\x06\x80\xb2\x96\xa9\ +\x0f\x82\x20\x08\x82\x20\xcd\xc0\x32\x76\x83\x61\x9a\x66\x18\x00\ +\xbe\x68\x81\xca\x20\x08\x82\x20\x08\xd2\x3c\x2c\x65\x37\x18\xa2\ +\x1d\x08\x82\x20\x08\x82\x1c\x14\x14\x00\xc0\x56\x76\xa3\x25\x00\ +\x56\x02\x0e\x03\x20\x08\x82\x20\xc8\xc1\xc8\x52\xd3\x34\x1d\x8b\ +\xfe\x19\x00\x00\x38\x0c\x80\x20\x08\x82\x20\x07\x2d\x9f\xf1\x36\ +\xd2\xef\x02\x58\xda\x3c\xf5\x40\x10\x04\x41\x10\xa4\x99\xe0\xba\ +\xff\x01\xec\x02\x60\x25\xe0\x30\x00\x82\x20\x08\x82\x1c\x4c\x70\ +\xdd\xff\x00\x94\x00\xc0\x61\x00\x04\x41\x10\x04\x39\xe8\xe0\xba\ +\xff\x01\x9c\xaf\x03\x7e\x2f\xc1\x15\x41\x10\x04\x41\x10\xa4\x79\ +\x58\x03\x02\xf7\x3f\x00\x23\x00\x4c\xd3\xdc\x00\x00\x1f\x25\xba\ +\x46\x08\x82\x20\x08\x82\x24\x9c\x7f\x88\xdc\xff\x00\x4e\x0f\x00\ +\x00\xc0\x0b\x00\x50\x9f\xb8\xfa\x20\x08\x82\x20\x08\x92\x60\x3e\ +\x33\x4d\x73\xb5\x2c\x81\x43\x00\x98\xa6\xb9\x07\x00\xe6\x27\xac\ +\x4a\x08\x82\x20\x08\x82\x24\x92\x10\x00\xfc\x53\x95\x88\xe7\x01\ +\x00\x00\x78\x03\x00\x4a\x7c\xad\x0e\x82\x20\x08\x82\x20\xcd\xc1\ +\x42\xd3\x34\x8b\x54\x89\xb8\x02\xc0\x34\xcd\x6a\x00\xf8\x97\xef\ +\x55\x42\x10\x04\x41\x10\x24\x91\x94\x01\xc0\x6b\x3a\x09\x45\x1e\ +\x00\x00\x80\xc5\x00\xf0\xad\x2f\xd5\x41\x10\x04\x41\x10\xa4\x39\ +\x78\xd8\x34\xcd\x2a\x9d\x84\x42\x01\x10\x8d\x1c\x9c\x05\x00\xbb\ +\xfc\xaa\x15\x82\x20\x08\x82\x20\x09\xe3\x55\xd3\x34\xbf\xd1\x4d\ +\x2c\xf3\x00\x80\x69\x9a\x15\x00\x70\x1f\xe0\xac\x00\x04\x41\x10\ +\x04\x69\xcd\xac\x00\x80\x97\xdc\x1c\x20\x15\x00\x00\x00\xa6\x69\ +\x6e\x02\x80\x87\x00\x20\xe2\xad\x4e\x08\x82\x20\x08\x82\x24\x90\ +\xad\x00\xf0\x17\xd3\x34\x5d\xd9\x69\xa5\x00\x00\x00\x30\x4d\x73\ +\x29\x00\x3c\x08\x28\x02\x10\x04\x41\x10\xa4\x35\xb1\x15\x00\x6e\ +\x35\x4d\xb3\xdc\xed\x81\x5a\x02\x00\x00\xc0\x34\xcd\xcf\x00\x45\ +\x00\x82\x20\x08\x82\xb4\x16\x2c\xe3\x5f\xea\xe5\x60\x6d\x01\x00\ +\x10\x13\x01\x33\x00\xa0\xda\x4b\x61\x08\x82\x20\x08\x82\xf8\xc2\ +\x4a\x88\xc3\xf8\x03\x00\x10\xc9\x32\xc1\xe2\x83\x08\xc9\x07\x80\ +\x07\x00\xa0\xb7\xd7\x82\x11\x04\x41\x10\x04\xf1\xc4\x9b\x00\xf0\ +\x42\xf4\x2d\xbe\x9e\xf1\x24\x00\x00\x00\x08\x21\x6d\x00\xe0\x56\ +\x00\x38\x2d\x9e\x0a\x20\x08\x82\x20\x08\xa2\x45\x35\x00\xcc\x36\ +\x4d\xf3\x0b\x3f\x32\xf3\x2c\x00\x62\x19\x10\x72\x24\x00\xdc\x08\ +\x00\x7d\xfc\xa8\x10\x82\x20\x08\x82\x20\x36\x4c\x00\x78\x0f\x00\ +\x5e\x8c\xc7\xe5\xcf\x12\xb7\x00\x00\x00\x20\x84\x18\x00\x30\x0e\ +\x00\xae\x01\x80\xdc\xb8\x33\x44\x10\x04\x41\x10\x04\x00\x60\x39\ +\x00\x3c\x6b\x9a\xe6\x6f\x7e\x67\xec\x8b\x00\x88\x65\x46\x48\x3a\ +\x00\x4c\x01\x80\xc9\x00\x90\xe6\x5b\xc6\x08\x82\x20\x08\x92\x5c\ +\x6c\x01\x80\xb9\xa6\x69\xae\x48\x54\x01\xbe\x0a\x80\x58\xa6\x84\ +\xb4\x07\x80\x13\x01\xe0\x64\x00\x18\x01\x00\xc4\xf7\x42\x10\x04\ +\x41\x10\xe4\xe0\xa2\x0c\x00\xbe\x00\x80\xa5\x00\xf0\x93\xdb\x85\ +\x7d\xdc\x92\x10\x01\x60\x2b\xa0\x49\x0c\x8c\x01\x80\xe1\x80\x62\ +\x00\x41\x10\x04\x41\x2c\x68\xa3\xbf\x32\xde\xc8\x7e\x37\x24\x5c\ +\x00\xd8\x0a\x43\x31\x80\x20\x08\x82\x20\x2d\x66\xf4\x69\x9a\x55\ +\x00\xd8\x0a\x26\x24\x13\x00\xf2\xa3\x9f\x6e\xd1\x8f\xf5\x7f\xbb\ +\x16\xa9\x14\x82\x20\x08\x82\xf8\x43\x04\x00\x8a\x00\xa0\x00\x00\ +\x76\x52\x7f\x77\x02\xc0\xae\x96\x32\xfa\x34\x2d\x26\x00\x64\x10\ +\x42\xb2\xa1\x49\x14\x58\x82\x20\xc8\xf9\xa0\x07\x01\x41\x10\x04\ +\x69\x6e\x1a\xa0\xf1\x2d\xb9\xec\x67\x37\x34\x19\xfa\x22\xd3\x34\ +\x1b\x5a\xac\x86\x1a\xfc\x7f\xcb\x37\x9b\x3d\x94\xd1\x4a\xcb\x00\ +\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ +\x00\x00\x03\x83\ +\x3c\ +\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\ +\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\ +\x30\x2f\x73\x76\x67\x22\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\ +\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x20\x66\x69\x6c\x6c\x3d\ +\x22\x62\x6c\x61\x63\x6b\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x31\ +\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x38\x70\ +\x78\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x30\x20\x30\ +\x68\x32\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x6d\x30\x20\x30\x68\ +\x32\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x6d\x30\x20\x30\x68\x32\ +\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x6d\x30\x20\x30\x68\x32\x34\ +\x76\x32\x34\x48\x30\x56\x30\x7a\x22\x20\x66\x69\x6c\x6c\x3d\x22\ +\x6e\x6f\x6e\x65\x22\x2f\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\ +\x4d\x31\x32\x20\x36\x63\x33\x2e\x37\x39\x20\x30\x20\x37\x2e\x31\ +\x37\x20\x32\x2e\x31\x33\x20\x38\x2e\x38\x32\x20\x35\x2e\x35\x2d\ +\x2e\x35\x39\x20\x31\x2e\x32\x32\x2d\x31\x2e\x34\x32\x20\x32\x2e\ +\x32\x37\x2d\x32\x2e\x34\x31\x20\x33\x2e\x31\x32\x6c\x31\x2e\x34\ +\x31\x20\x31\x2e\x34\x31\x63\x31\x2e\x33\x39\x2d\x31\x2e\x32\x33\ +\x20\x32\x2e\x34\x39\x2d\x32\x2e\x37\x37\x20\x33\x2e\x31\x38\x2d\ +\x34\x2e\x35\x33\x43\x32\x31\x2e\x32\x37\x20\x37\x2e\x31\x31\x20\ +\x31\x37\x20\x34\x20\x31\x32\x20\x34\x63\x2d\x31\x2e\x32\x37\x20\ +\x30\x2d\x32\x2e\x34\x39\x2e\x32\x2d\x33\x2e\x36\x34\x2e\x35\x37\ +\x6c\x31\x2e\x36\x35\x20\x31\x2e\x36\x35\x43\x31\x30\x2e\x36\x36\ +\x20\x36\x2e\x30\x39\x20\x31\x31\x2e\x33\x32\x20\x36\x20\x31\x32\ +\x20\x36\x7a\x6d\x2d\x31\x2e\x30\x37\x20\x31\x2e\x31\x34\x4c\x31\ +\x33\x20\x39\x2e\x32\x31\x63\x2e\x35\x37\x2e\x32\x35\x20\x31\x2e\ +\x30\x33\x2e\x37\x31\x20\x31\x2e\x32\x38\x20\x31\x2e\x32\x38\x6c\ +\x32\x2e\x30\x37\x20\x32\x2e\x30\x37\x63\x2e\x30\x38\x2d\x2e\x33\ +\x34\x2e\x31\x34\x2d\x2e\x37\x2e\x31\x34\x2d\x31\x2e\x30\x37\x43\ +\x31\x36\x2e\x35\x20\x39\x2e\x30\x31\x20\x31\x34\x2e\x34\x38\x20\ +\x37\x20\x31\x32\x20\x37\x63\x2d\x2e\x33\x37\x20\x30\x2d\x2e\x37\ +\x32\x2e\x30\x35\x2d\x31\x2e\x30\x37\x2e\x31\x34\x7a\x4d\x32\x2e\ +\x30\x31\x20\x33\x2e\x38\x37\x6c\x32\x2e\x36\x38\x20\x32\x2e\x36\ +\x38\x43\x33\x2e\x30\x36\x20\x37\x2e\x38\x33\x20\x31\x2e\x37\x37\ +\x20\x39\x2e\x35\x33\x20\x31\x20\x31\x31\x2e\x35\x20\x32\x2e\x37\ +\x33\x20\x31\x35\x2e\x38\x39\x20\x37\x20\x31\x39\x20\x31\x32\x20\ +\x31\x39\x63\x31\x2e\x35\x32\x20\x30\x20\x32\x2e\x39\x38\x2d\x2e\ +\x32\x39\x20\x34\x2e\x33\x32\x2d\x2e\x38\x32\x6c\x33\x2e\x34\x32\ +\x20\x33\x2e\x34\x32\x20\x31\x2e\x34\x31\x2d\x31\x2e\x34\x31\x4c\ +\x33\x2e\x34\x32\x20\x32\x2e\x34\x35\x20\x32\x2e\x30\x31\x20\x33\ +\x2e\x38\x37\x7a\x6d\x37\x2e\x35\x20\x37\x2e\x35\x6c\x32\x2e\x36\ +\x31\x20\x32\x2e\x36\x31\x63\x2d\x2e\x30\x34\x2e\x30\x31\x2d\x2e\ +\x30\x38\x2e\x30\x32\x2d\x2e\x31\x32\x2e\x30\x32\x2d\x31\x2e\x33\ +\x38\x20\x30\x2d\x32\x2e\x35\x2d\x31\x2e\x31\x32\x2d\x32\x2e\x35\ +\x2d\x32\x2e\x35\x20\x30\x2d\x2e\x30\x35\x2e\x30\x31\x2d\x2e\x30\ +\x38\x2e\x30\x31\x2d\x2e\x31\x33\x7a\x6d\x2d\x33\x2e\x34\x2d\x33\ +\x2e\x34\x6c\x31\x2e\x37\x35\x20\x31\x2e\x37\x35\x63\x2d\x2e\x32\ +\x33\x2e\x35\x35\x2d\x2e\x33\x36\x20\x31\x2e\x31\x35\x2d\x2e\x33\ +\x36\x20\x31\x2e\x37\x38\x20\x30\x20\x32\x2e\x34\x38\x20\x32\x2e\ +\x30\x32\x20\x34\x2e\x35\x20\x34\x2e\x35\x20\x34\x2e\x35\x2e\x36\ +\x33\x20\x30\x20\x31\x2e\x32\x33\x2d\x2e\x31\x33\x20\x31\x2e\x37\ +\x37\x2d\x2e\x33\x36\x6c\x2e\x39\x38\x2e\x39\x38\x63\x2d\x2e\x38\ +\x38\x2e\x32\x34\x2d\x31\x2e\x38\x2e\x33\x38\x2d\x32\x2e\x37\x35\ +\x2e\x33\x38\x2d\x33\x2e\x37\x39\x20\x30\x2d\x37\x2e\x31\x37\x2d\ +\x32\x2e\x31\x33\x2d\x38\x2e\x38\x32\x2d\x35\x2e\x35\x2e\x37\x2d\ +\x31\x2e\x34\x33\x20\x31\x2e\x37\x32\x2d\x32\x2e\x36\x31\x20\x32\ +\x2e\x39\x33\x2d\x33\x2e\x35\x33\x7a\x22\x2f\x3e\x3c\x2f\x73\x76\ +\x67\x3e\ +\x00\x00\x0a\x97\ +\x3c\ +\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\ +\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\ +\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\ +\x6e\x6f\x22\x3f\x3e\x0a\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\ +\x6c\x6e\x73\x3a\x64\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\ +\x75\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x65\x6c\x65\x6d\x65\ +\x6e\x74\x73\x2f\x31\x2e\x31\x2f\x22\x0a\x20\x20\x20\x78\x6d\x6c\ +\x6e\x73\x3a\x63\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x63\x72\ +\x65\x61\x74\x69\x76\x65\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x6f\x72\ +\x67\x2f\x6e\x73\x23\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\ +\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\ +\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\ +\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\ +\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x73\x76\x67\x3d\x22\ +\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\ +\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\x20\x20\x78\ +\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\ +\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\ +\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x73\x6f\x64\x69\x70\ +\x6f\x64\x69\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x73\x6f\x64\x69\ +\x70\x6f\x64\x69\x2e\x73\x6f\x75\x72\x63\x65\x66\x6f\x72\x67\x65\ +\x2e\x6e\x65\x74\x2f\x44\x54\x44\x2f\x73\x6f\x64\x69\x70\x6f\x64\ +\x69\x2d\x30\x2e\x64\x74\x64\x22\x0a\x20\x20\x20\x78\x6d\x6c\x6e\ +\x73\x3a\x69\x6e\x6b\x73\x63\x61\x70\x65\x3d\x22\x68\x74\x74\x70\ +\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\ +\x6f\x72\x67\x2f\x6e\x61\x6d\x65\x73\x70\x61\x63\x65\x73\x2f\x69\ +\x6e\x6b\x73\x63\x61\x70\x65\x22\x0a\x20\x20\x20\x76\x69\x65\x77\ +\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x32\x34\x20\x32\x34\x22\x0a\ +\x20\x20\x20\x66\x69\x6c\x6c\x3d\x22\x62\x6c\x61\x63\x6b\x22\x0a\ +\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\x31\x38\x70\x78\x22\x0a\ +\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x38\x70\x78\x22\ +\x0a\x20\x20\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\ +\x22\x0a\x20\x20\x20\x69\x64\x3d\x22\x73\x76\x67\x34\x33\x37\x30\ +\x22\x0a\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x64\x6f\ +\x63\x6e\x61\x6d\x65\x3d\x22\x6d\x6f\x76\x69\x65\x2d\x62\x6c\x61\ +\x63\x6b\x2d\x31\x38\x64\x70\x2e\x73\x76\x67\x22\x0a\x20\x20\x20\ +\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\ +\x3d\x22\x30\x2e\x39\x32\x2e\x35\x20\x28\x32\x30\x36\x30\x65\x63\ +\x31\x66\x39\x66\x2c\x20\x32\x30\x32\x30\x2d\x30\x34\x2d\x30\x38\ +\x29\x22\x3e\x0a\x20\x20\x3c\x6d\x65\x74\x61\x64\x61\x74\x61\x0a\ +\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6d\x65\x74\x61\x64\x61\x74\ +\x61\x34\x33\x37\x36\x22\x3e\x0a\x20\x20\x20\x20\x3c\x72\x64\x66\ +\x3a\x52\x44\x46\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x63\x63\x3a\ +\x57\x6f\x72\x6b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\ +\x66\x3a\x61\x62\x6f\x75\x74\x3d\x22\x22\x3e\x0a\x20\x20\x20\x20\ +\x20\x20\x20\x20\x3c\x64\x63\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x69\ +\x6d\x61\x67\x65\x2f\x73\x76\x67\x2b\x78\x6d\x6c\x3c\x2f\x64\x63\ +\x3a\x66\x6f\x72\x6d\x61\x74\x3e\x0a\x20\x20\x20\x20\x20\x20\x20\ +\x20\x3c\x64\x63\x3a\x74\x79\x70\x65\x0a\x20\x20\x20\x20\x20\x20\ +\x20\x20\x20\x20\x20\x72\x64\x66\x3a\x72\x65\x73\x6f\x75\x72\x63\ +\x65\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\ +\x72\x67\x2f\x64\x63\x2f\x64\x63\x6d\x69\x74\x79\x70\x65\x2f\x53\ +\x74\x69\x6c\x6c\x49\x6d\x61\x67\x65\x22\x20\x2f\x3e\x0a\x20\x20\ +\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x74\x69\x74\x6c\x65\x3e\ +\x3c\x2f\x64\x63\x3a\x74\x69\x74\x6c\x65\x3e\x0a\x20\x20\x20\x20\ +\x20\x20\x3c\x2f\x63\x63\x3a\x57\x6f\x72\x6b\x3e\x0a\x20\x20\x20\ +\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\x20\x20\x3c\x2f\ +\x6d\x65\x74\x61\x64\x61\x74\x61\x3e\x0a\x20\x20\x3c\x64\x65\x66\ +\x73\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x64\x65\x66\x73\x34\ +\x33\x37\x34\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x73\x6f\x64\x69\x70\ +\x6f\x64\x69\x3a\x6e\x61\x6d\x65\x64\x76\x69\x65\x77\x0a\x20\x20\ +\x20\x20\x20\x70\x61\x67\x65\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x66\ +\x66\x66\x66\x66\x66\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\ +\x65\x72\x63\x6f\x6c\x6f\x72\x3d\x22\x23\x36\x36\x36\x36\x36\x36\ +\x22\x0a\x20\x20\x20\x20\x20\x62\x6f\x72\x64\x65\x72\x6f\x70\x61\ +\x63\x69\x74\x79\x3d\x22\x31\x22\x0a\x20\x20\x20\x20\x20\x6f\x62\ +\x6a\x65\x63\x74\x74\x6f\x6c\x65\x72\x61\x6e\x63\x65\x3d\x22\x31\ +\x30\x22\x0a\x20\x20\x20\x20\x20\x67\x72\x69\x64\x74\x6f\x6c\x65\ +\x72\x61\x6e\x63\x65\x3d\x22\x31\x30\x22\x0a\x20\x20\x20\x20\x20\ +\x67\x75\x69\x64\x65\x74\x6f\x6c\x65\x72\x61\x6e\x63\x65\x3d\x22\ +\x31\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\ +\x65\x3a\x70\x61\x67\x65\x6f\x70\x61\x63\x69\x74\x79\x3d\x22\x30\ +\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\ +\x70\x61\x67\x65\x73\x68\x61\x64\x6f\x77\x3d\x22\x32\x22\x0a\x20\ +\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\ +\x64\x6f\x77\x2d\x77\x69\x64\x74\x68\x3d\x22\x39\x35\x36\x22\x0a\ +\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\ +\x6e\x64\x6f\x77\x2d\x68\x65\x69\x67\x68\x74\x3d\x22\x31\x30\x37\ +\x36\x22\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6e\x61\x6d\x65\ +\x64\x76\x69\x65\x77\x34\x33\x37\x32\x22\x0a\x20\x20\x20\x20\x20\ +\x73\x68\x6f\x77\x67\x72\x69\x64\x3d\x22\x66\x61\x6c\x73\x65\x22\ +\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x7a\ +\x6f\x6f\x6d\x3d\x22\x33\x34\x2e\x34\x35\x31\x38\x31\x34\x22\x0a\ +\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x78\ +\x3d\x22\x38\x2e\x32\x36\x35\x32\x34\x30\x33\x22\x0a\x20\x20\x20\ +\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x79\x3d\x22\x38\ +\x2e\x38\x33\x33\x30\x33\x35\x33\x22\x0a\x20\x20\x20\x20\x20\x69\ +\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x78\ +\x3d\x22\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\ +\x70\x65\x3a\x77\x69\x6e\x64\x6f\x77\x2d\x79\x3d\x22\x30\x22\x0a\ +\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x77\x69\ +\x6e\x64\x6f\x77\x2d\x6d\x61\x78\x69\x6d\x69\x7a\x65\x64\x3d\x22\ +\x30\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\ +\x3a\x63\x75\x72\x72\x65\x6e\x74\x2d\x6c\x61\x79\x65\x72\x3d\x22\ +\x73\x76\x67\x34\x33\x37\x30\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x70\ +\x61\x74\x68\x0a\x20\x20\x20\x20\x20\x64\x3d\x22\x4d\x30\x20\x30\ +\x68\x32\x34\x76\x32\x34\x48\x30\x56\x30\x7a\x22\x0a\x20\x20\x20\ +\x20\x20\x66\x69\x6c\x6c\x3d\x22\x6e\x6f\x6e\x65\x22\x0a\x20\x20\ +\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x34\x33\x36\x36\x22\ +\x20\x2f\x3e\x0a\x20\x20\x3c\x70\x61\x74\x68\x0a\x20\x20\x20\x20\ +\x20\x64\x3d\x22\x4d\x20\x34\x2e\x32\x31\x31\x39\x34\x34\x31\x2c\ +\x31\x32\x2e\x36\x36\x32\x34\x33\x32\x20\x48\x20\x32\x30\x2e\x32\ +\x37\x30\x39\x31\x20\x76\x20\x37\x2e\x33\x33\x37\x33\x33\x33\x20\ +\x48\x20\x34\x2e\x32\x37\x30\x39\x30\x39\x38\x20\x4d\x20\x32\x32\ +\x2e\x32\x39\x38\x32\x37\x36\x2c\x31\x32\x2e\x36\x36\x32\x34\x33\ +\x32\x20\x48\x20\x32\x2e\x32\x37\x30\x39\x30\x39\x38\x20\x63\x20\ +\x30\x2c\x30\x20\x2d\x30\x2e\x30\x38\x31\x33\x35\x32\x2c\x32\x2e\ +\x39\x32\x38\x37\x30\x32\x20\x2d\x33\x65\x2d\x37\x2c\x37\x2e\x33\ +\x33\x37\x33\x33\x37\x20\x30\x2c\x31\x2e\x31\x20\x30\x2e\x39\x2c\ +\x32\x20\x32\x2c\x32\x20\x48\x20\x32\x30\x2e\x32\x37\x30\x39\x31\ +\x20\x63\x20\x31\x2e\x31\x2c\x30\x20\x31\x2e\x39\x39\x35\x38\x39\ +\x37\x2c\x2d\x30\x2e\x39\x30\x30\x30\x30\x38\x20\x32\x2c\x2d\x32\ +\x20\x7a\x22\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\ +\x68\x34\x33\x36\x38\x22\x0a\x20\x20\x20\x20\x20\x69\x6e\x6b\x73\ +\x63\x61\x70\x65\x3a\x63\x6f\x6e\x6e\x65\x63\x74\x6f\x72\x2d\x63\ +\x75\x72\x76\x61\x74\x75\x72\x65\x3d\x22\x30\x22\x0a\x20\x20\x20\ +\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x6f\x64\x65\x74\ +\x79\x70\x65\x73\x3d\x22\x63\x63\x63\x63\x63\x63\x63\x73\x73\x73\ +\x63\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x70\x61\x74\x68\x0a\x20\x20\ +\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x6f\x64\x65\ +\x74\x79\x70\x65\x73\x3d\x22\x63\x63\x63\x63\x63\x63\x63\x63\x63\ +\x63\x63\x63\x63\x63\x63\x63\x63\x22\x0a\x20\x20\x20\x20\x20\x69\ +\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x6f\x6e\x6e\x65\x63\x74\x6f\ +\x72\x2d\x63\x75\x72\x76\x61\x74\x75\x72\x65\x3d\x22\x30\x22\x0a\ +\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x34\x33\x38\ +\x39\x22\x0a\x20\x20\x20\x20\x20\x64\x3d\x22\x4d\x20\x32\x32\x2e\ +\x32\x39\x38\x32\x37\x36\x2c\x31\x30\x2e\x35\x33\x36\x39\x35\x37\ +\x20\x48\x20\x32\x30\x2e\x32\x39\x38\x39\x31\x20\x6c\x20\x2d\x32\ +\x2c\x33\x2e\x39\x39\x39\x39\x39\x39\x20\x68\x20\x2d\x32\x2e\x36\ +\x36\x36\x36\x36\x37\x20\x6c\x20\x32\x2c\x2d\x33\x2e\x39\x39\x39\ +\x39\x39\x39\x20\x68\x20\x2d\x32\x2e\x36\x36\x36\x36\x36\x36\x20\ +\x6c\x20\x2d\x32\x2c\x33\x2e\x39\x39\x39\x39\x39\x39\x20\x48\x20\ +\x31\x30\x2e\x32\x39\x38\x39\x31\x20\x6c\x20\x32\x2c\x2d\x33\x2e\ +\x39\x39\x39\x39\x39\x39\x20\x48\x20\x39\x2e\x36\x33\x32\x32\x34\ +\x32\x39\x20\x4c\x20\x37\x2e\x36\x33\x32\x32\x34\x33\x31\x2c\x31\ +\x34\x2e\x35\x33\x36\x39\x35\x36\x20\x48\x20\x34\x2e\x39\x36\x35\ +\x35\x37\x36\x35\x20\x6c\x20\x32\x2c\x2d\x33\x2e\x39\x39\x39\x39\ +\x39\x39\x20\x48\x20\x32\x2e\x32\x39\x38\x32\x37\x35\x39\x20\x76\ +\x20\x34\x2e\x30\x30\x30\x30\x30\x32\x20\x48\x20\x32\x32\x2e\x32\ +\x39\x38\x32\x37\x36\x20\x5a\x22\x20\x2f\x3e\x0a\x20\x20\x3c\x70\ +\x61\x74\x68\x0a\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\ +\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x31\x22\x0a\ +\x20\x20\x20\x20\x20\x73\x6f\x64\x69\x70\x6f\x64\x69\x3a\x6e\x6f\ +\x64\x65\x74\x79\x70\x65\x73\x3d\x22\x63\x63\x63\x63\x63\x63\x63\ +\x63\x63\x63\x63\x63\x63\x63\x63\x63\x63\x22\x0a\x20\x20\x20\x20\ +\x20\x69\x6e\x6b\x73\x63\x61\x70\x65\x3a\x63\x6f\x6e\x6e\x65\x63\ +\x74\x6f\x72\x2d\x63\x75\x72\x76\x61\x74\x75\x72\x65\x3d\x22\x30\ +\x22\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x34\ +\x33\x38\x39\x2d\x38\x22\x0a\x20\x20\x20\x20\x20\x64\x3d\x22\x6d\ +\x20\x32\x30\x2e\x39\x36\x38\x35\x38\x32\x2c\x32\x2e\x33\x32\x36\ +\x30\x30\x37\x34\x20\x2d\x33\x2e\x39\x30\x36\x32\x35\x2c\x30\x2e\ +\x38\x36\x30\x39\x33\x39\x38\x20\x32\x2e\x38\x31\x34\x30\x36\x35\ +\x2c\x33\x2e\x34\x37\x35\x37\x37\x39\x33\x20\x2d\x32\x2e\x36\x30\ +\x34\x31\x36\x37\x2c\x30\x2e\x35\x37\x33\x39\x36\x20\x4c\x20\x31\ +\x34\x2e\x34\x35\x38\x31\x36\x35\x2c\x33\x2e\x37\x36\x30\x39\x30\ +\x37\x20\x31\x31\x2e\x38\x35\x34\x2c\x34\x2e\x33\x33\x34\x38\x36\ +\x36\x35\x20\x31\x34\x2e\x36\x36\x38\x30\x36\x33\x2c\x37\x2e\x38\ +\x31\x30\x36\x34\x36\x33\x20\x31\x32\x2e\x30\x36\x33\x38\x39\x39\ +\x2c\x38\x2e\x33\x38\x34\x36\x30\x35\x36\x20\x39\x2e\x32\x34\x39\ +\x38\x33\x34\x33\x2c\x34\x2e\x39\x30\x38\x38\x32\x36\x31\x20\x36\ +\x2e\x36\x34\x35\x36\x36\x37\x36\x2c\x35\x2e\x34\x38\x32\x37\x38\ +\x35\x39\x20\x39\x2e\x34\x35\x39\x37\x33\x31\x38\x2c\x38\x2e\x39\ +\x35\x38\x35\x36\x35\x35\x20\x36\x2e\x38\x35\x35\x35\x36\x36\x33\ +\x2c\x39\x2e\x35\x33\x32\x35\x32\x35\x20\x34\x2e\x30\x34\x31\x35\ +\x30\x31\x2c\x36\x2e\x30\x35\x36\x37\x34\x35\x37\x20\x31\x2e\x34\ +\x33\x37\x33\x33\x35\x39\x2c\x36\x2e\x36\x33\x30\x37\x30\x35\x32\ +\x20\x32\x2e\x32\x39\x38\x32\x37\x35\x39\x2c\x31\x30\x2e\x35\x33\ +\x36\x39\x35\x37\x20\x32\x31\x2e\x38\x32\x39\x35\x32\x32\x2c\x36\ +\x2e\x32\x33\x32\x32\x35\x38\x37\x20\x5a\x22\x20\x2f\x3e\x0a\x3c\ +\x2f\x73\x76\x67\x3e\x0a\ +\x00\x00\x29\x74\ +\x89\ +\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ +\x00\x01\x00\x00\x00\x01\x00\x08\x06\x00\x00\x00\x5c\x72\xa8\x66\ +\x00\x00\x00\x4f\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\ +\x00\x4d\x61\x74\x70\x6c\x6f\x74\x6c\x69\x62\x20\x76\x65\x72\x73\ +\x69\x6f\x6e\x33\x2e\x33\x2e\x30\x72\x63\x31\x2e\x70\x6f\x73\x74\ +\x35\x35\x39\x2b\x67\x64\x66\x35\x30\x66\x31\x32\x37\x37\x2c\x20\ +\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6d\x61\x74\x70\x6c\x6f\x74\x6c\ +\x69\x62\x2e\x6f\x72\x67\x2f\x8a\xb5\x94\xf9\x00\x00\x00\x09\x70\ +\x48\x59\x73\x00\x00\x2e\x23\x00\x00\x2e\x23\x01\x78\xa5\x3f\x76\ +\x00\x00\x28\xcb\x49\x44\x41\x54\x78\xda\xed\x9d\x07\x78\x54\x55\ +\xf6\xc0\xe9\x0a\x28\x84\x12\x10\x24\x09\x4a\x0b\x06\x42\x4b\x94\ +\x9a\x02\x84\xd0\x51\x10\x22\x3d\x80\xf4\x0e\x01\x42\x60\x29\xd2\ +\xbb\x34\xe9\x55\x40\x14\x29\x56\xc0\x02\x2a\x8a\x22\x45\xb0\x2c\ +\xa8\xbb\xe8\xba\x7f\xd7\x5d\x5d\xa5\xd8\x17\x32\xf7\x7f\x66\xe6\ +\x4e\x5e\xbb\xf7\xbd\xfb\xa6\xbd\x37\x33\x27\xdf\xf7\xfb\x76\xbf\ +\x5d\x9d\xbc\x79\xef\x9e\x5f\xce\x39\xf7\xbe\x7b\x0b\x11\x42\x0a\ +\x21\x08\x12\x99\xe0\x4d\x40\x10\x14\x00\x82\x20\x28\x00\x04\x41\ +\x50\x00\x08\x82\xa0\x00\x10\x04\x41\x01\x20\x21\xf5\x40\x0b\x15\ +\x2a\x0d\xd4\x04\x52\x80\x2c\x60\x02\xf0\x04\xb0\x10\x58\x06\xac\ +\x02\xd6\x02\x1b\x81\x4d\xc0\x7a\x60\x35\xb0\x1c\x58\x0c\xcc\x07\ +\xa6\x03\xd9\x40\x26\x90\x08\x54\x04\x0a\xe3\xfd\x45\x01\x20\xd6\ +\x06\x77\x31\xa0\x3e\xf0\x18\x30\x89\x06\xed\x5e\xe0\x24\x70\x05\ +\xb8\x01\x90\x00\xf1\x3f\xe0\x1f\xc0\x07\xc0\x61\x2a\x8e\x99\xc0\ +\x60\x20\x0d\x28\x83\xcf\x08\x05\x80\xf8\x3f\xd8\x9d\x7f\x89\xd7\ +\x01\xef\x03\xbf\x05\x30\xc0\xfd\xc1\x17\xc0\x33\xc0\x64\x2a\x85\ +\xb2\xf8\x2c\x51\x00\x48\x78\x06\x3b\x4a\x01\x05\x80\xf8\x10\xf4\ +\x15\x80\xfe\xc0\x01\xe0\xe7\x30\x09\x76\x11\x1c\xb4\x8c\xc8\x03\ +\xea\x61\x6f\x01\x05\x10\x49\x41\x5f\x1b\xc8\x01\xde\x01\xf2\x23\ +\x28\xe8\xf5\xb8\x4a\x9b\x90\x6d\x80\xe2\x38\x4e\x50\x00\xe1\x14\ +\xf0\x45\x81\x96\xc0\x52\xda\xa4\xc3\x80\xd7\xe7\x3a\x2d\x17\x7a\ +\x03\x51\x38\x86\x50\x00\xa1\x18\xf4\x85\x69\xad\xbb\x13\xf8\x2f\ +\x06\xb5\xd7\xdc\x02\xde\x04\x86\x03\x77\xe1\xd8\x42\x01\xd8\x3d\ +\xf0\xcb\x02\x63\x80\xbf\x62\xf0\xfa\x9d\x9b\x74\xed\xc2\x03\x38\ +\xd6\x50\x00\x76\x0b\xfc\x44\xba\xa8\xe6\x17\x0c\xd4\xa0\xe0\x5c\ +\xf3\xd0\x13\xfb\x05\x28\x00\x2b\x83\xfe\x0e\xa0\x0f\xf0\x2e\x06\ +\xa4\x65\xfc\x0b\x98\x0b\x54\xc3\x31\x89\x02\x08\x56\xe0\xc7\xd1\ +\x25\xb5\xdf\x63\x00\xda\x86\xdb\xc0\x41\x3a\x8b\x80\x53\x8a\x28\ +\x80\x80\x04\x7e\x0c\xb0\x15\xa7\xee\x6c\xcf\x79\xa0\x1d\x8a\x00\ +\x05\xe0\xaf\xc0\x2f\x4f\x5f\xa0\xf9\x03\x83\x2b\xa4\x70\xce\x1e\ +\x24\xe3\x18\x46\x01\xf8\xf2\x46\x5d\x1e\x9d\x97\xc6\x80\x0a\x5d\ +\x9e\x07\xea\xe0\x98\x46\x01\x88\x06\x7e\x71\x60\x04\xf0\x1d\x06\ +\x4f\x58\xf5\x08\x36\x03\xf7\xe2\x18\x47\x01\xf0\x02\xbf\x08\x7d\ +\x77\xfe\x4b\x0c\x98\xb0\xe5\x77\x60\x09\x50\x0e\xc7\x3c\x0a\x40\ +\x1e\xfc\x29\xb4\x79\x84\x41\x12\x19\x5c\x03\xa6\xe2\x3a\x82\x08\ +\x17\x00\xad\xf3\xd7\x62\x40\x44\xf4\x8c\x41\x22\x0a\x20\x32\x83\ +\x3f\x15\xf8\x3b\x06\x41\xc4\xe3\xdc\xe5\xe8\x2f\x91\x9c\x0d\xe0\ +\x5f\x7d\x04\x89\xe0\x6c\x00\xff\xea\x23\x48\x04\x67\x03\xf8\x57\ +\x1f\x41\x22\x38\x1b\xc0\xbf\xfa\x08\x12\xc1\xd9\x40\x38\xef\xc4\ +\xb3\x10\x07\x32\xe2\x23\x1f\x86\xfb\xdb\x86\xe1\x18\xfc\x65\x80\ +\x97\x70\xf0\x22\x7e\xe2\xdf\x40\x33\x14\x40\x68\x04\x7f\x4d\xdc\ +\x91\x07\x09\x00\x7f\x3a\xb7\x6c\x47\x01\xd8\x3b\xf8\xdb\x02\x3f\ +\xe1\x60\x45\x02\xc8\x4a\xe7\x19\x0e\x28\x00\xfb\x6d\xc2\x39\x8e\ +\xbe\xf4\x81\x83\x14\x09\x34\xc7\xc3\xe9\x7d\x82\x70\xd8\x9a\x6b\ +\x2b\x0e\x4a\xc4\x82\xd3\x8e\xe2\x51\x00\xd6\x06\x7f\x65\xdc\x93\ +\x0f\xb1\x10\xe7\x41\xac\x1d\x50\x00\xd6\x04\x7f\x43\xe0\x1b\x1c\ +\x84\x88\x0d\x8e\x39\xcb\x09\xe5\x2d\xc8\x42\x31\xf8\x5b\x04\xf8\ +\x18\x6c\x04\x31\xcb\xd2\x50\x95\x40\xa8\x05\x7f\x1a\xee\xbf\x8f\ +\xd8\x94\x35\xa1\x28\x81\x50\x0a\xfe\x76\x74\x57\x17\x1c\x6c\x88\ +\x5d\xd9\xe4\xdc\x5d\x0a\x05\xe0\xff\xe0\xef\x4c\x17\x63\xe0\x20\ +\x43\xec\xce\x2e\xe7\x52\x74\x14\x80\xff\x82\xbf\x13\x7d\x39\x03\ +\x07\x17\x12\x2a\xec\x0e\x95\x4c\xc0\xee\xc1\x9f\x81\x7b\xf2\x23\ +\x21\xca\x96\x50\x90\x80\xdd\x5f\xe5\xfd\x0d\x07\x12\x12\xc2\xac\ +\xb5\x7b\x63\xd0\xae\xc1\xdf\x0c\xbb\xfd\x48\x98\xb0\xcc\xce\x12\ +\xb0\x63\xf0\xd7\xc3\x79\x7e\x24\xcc\xc8\x43\x01\x88\x05\x7f\x05\ +\xe0\x2a\x0e\x18\x24\x0c\x79\x18\x05\x60\x7c\x2c\xd7\x09\x1c\x28\ +\x48\x98\xf2\x33\x50\x1f\x05\xc0\x17\x00\x6e\xdc\x89\x84\x3b\xce\ +\xec\xb6\x22\x0a\x40\x1b\xfc\xc3\x70\x70\x20\x11\xc2\x49\x3b\x6d\ +\x36\x6a\x87\xe0\x6f\x05\xdc\xc2\x81\x81\x44\x10\xeb\x51\x00\xee\ +\xe0\x8f\x03\xbe\xc7\x01\x81\x44\x20\xc3\x23\x5a\x00\x85\xdc\x07\ +\x76\x5c\xc4\x81\x80\x44\x28\xce\xac\x37\x25\x22\x05\xe0\x5c\x22\ +\x09\x3c\x8f\x83\x00\x89\x70\x7e\x00\xaa\x47\xa2\x00\x66\xe0\xc3\ +\x47\x10\x17\x97\x80\x92\x11\x23\x00\xf8\x69\x82\x3b\xf8\x22\x88\ +\x82\xe5\x11\x21\x00\xf8\x29\x01\x7c\x82\x0f\x1c\x41\x34\x7b\x0b\ +\x36\x8f\x04\x01\xcc\xc3\x87\x8d\x20\x4c\x3e\xb7\xa2\x14\xc0\xd4\ +\x1f\x41\x22\xb8\x14\x08\xe6\x01\x1e\x98\xfa\x23\x88\xcd\x4a\x01\ +\x4c\xfd\x11\x24\x82\x4b\x01\x4c\xfd\x11\x24\x82\x4b\x01\x4c\xfd\ +\x11\x24\x82\x4b\x81\x40\x0b\x60\x3e\x3e\x4c\x04\xb1\x6f\x29\x10\ +\xe8\xf3\xfb\x30\xf5\x47\x10\xef\x59\x12\xca\x02\x38\x86\x0f\x10\ +\x41\x7c\xc2\x79\x18\x4e\x5c\xc8\x09\x00\x7e\xd2\xf1\xe1\x21\x88\ +\x5f\xd8\x11\x52\x02\x80\x9f\xc2\xc0\x19\x7c\x70\x08\xe2\x17\xf2\ +\x81\x84\x50\x12\x40\x77\x7c\x68\x08\xe2\x57\x8e\x84\x84\x00\xe0\ +\xa7\x18\x70\x05\x1f\x18\x82\xf8\x9d\xe6\xa1\x20\x80\x21\xf8\xa0\ +\x10\x24\x20\xbc\x53\x28\x00\x27\x0c\xf9\x33\xf8\x4b\x02\xff\x87\ +\x0f\x0a\x41\x02\x46\x47\x3b\x0b\x20\x07\x1f\x10\x82\x04\x7c\xf7\ +\xa0\x22\xb6\x13\x00\xfc\x44\x01\x3f\xe1\x03\x42\x90\x80\xd3\xcf\ +\x8e\x02\x58\x80\x0f\x06\x41\x82\xc2\x57\xce\x9d\xb5\x6c\x23\x00\ +\xf8\xb9\xbb\x90\xfb\xdc\x33\x7c\x38\x08\x12\x1c\x06\xda\x49\x00\ +\x23\xf1\x81\x20\x48\x50\xf9\xc0\x16\x02\xa0\xab\xfe\xf0\x75\x5f\ +\x04\x09\x3e\x4d\xec\x20\x80\x56\xf8\x20\x10\xc4\x12\xb6\xda\x41\ +\x00\xcf\xe0\x83\x40\x10\x4b\xf8\x0d\x28\x67\x99\x00\xe0\xe7\x1e\ +\xe0\x7f\xf8\x20\x10\xc4\x32\x26\x58\x29\x00\x3c\xde\x0b\x41\xac\ +\xe5\x0b\x5f\x17\x06\xf9\xf2\xd2\xcf\x37\xf8\x00\x10\xc4\x72\xda\ +\x5a\x21\x80\x6e\x78\xe3\x11\xc4\x16\x1c\xb2\x42\x00\xc7\xf1\xc6\ +\x23\x88\x6d\x36\x0c\xa9\x16\x34\x01\xc0\x4f\x2d\xbc\xe9\x08\x62\ +\x2b\xe6\x05\x53\x00\xb3\xf1\x86\x23\x88\xad\xb8\xea\xed\x5e\x01\ +\xde\x08\xe0\x3c\xde\x70\x04\xb1\x1d\xf5\x02\x2e\x00\x67\xad\x81\ +\x37\x1a\x41\x6c\xc9\xf4\x60\x08\x60\x04\xde\x68\x04\xb1\x25\xef\ +\x07\x43\x00\xaf\xe2\x8d\x46\x10\x5b\xe2\x3c\x4f\xf0\x9e\x80\x09\ +\x00\x7e\xee\x2a\xe4\x3e\xa9\x04\x6f\x36\x82\xd8\x93\x21\x81\x14\ +\x00\xee\xf7\x8f\x20\xf6\xe6\x48\x20\x05\xb0\x13\x6f\x30\x82\xd8\ +\xfe\x0d\xc1\x92\x7e\x17\x00\xfc\x14\x05\xfe\x8b\x37\x18\x41\x6c\ +\x4f\xe7\x40\x08\xa0\x25\xde\x58\x04\x09\x09\x36\x07\x42\x00\x4b\ +\xf1\xc6\x22\x48\x48\xf0\x9d\x99\x57\x84\x45\x05\xf0\x31\xde\x58\ +\x04\x09\x19\x1a\xfa\x4d\x00\xf0\x53\x9a\xbe\x71\x14\xf4\x2f\x12\ +\x5b\xbb\x05\xe9\x32\xe1\x43\xd2\x65\x12\x65\xb2\x44\xe7\xc9\x67\ +\xdd\xe4\x50\xa6\xc8\x98\x7a\x96\x74\x9a\x78\x82\x54\x8e\x4b\x0c\ +\xea\xf5\x16\x2e\x5a\x8c\xb4\x9d\xb4\x97\xb4\x5f\x74\xde\xcd\x62\ +\x60\x09\x65\xe9\x05\xd2\x7e\x19\x65\x39\xb0\xf2\x02\xc9\x5c\x25\ +\x63\xf5\x05\xd2\x72\xd8\x52\xa1\xdf\x93\xb1\xf8\x15\x92\xb1\xe9\ +\x23\x92\xb1\x19\xd8\x72\xd1\xcd\x56\x60\x1b\x65\xc7\x45\xd2\x76\ +\xa7\x8c\xdd\xc0\xd3\x94\x3d\x1f\x91\xa8\x98\xfb\xfd\xfa\xbd\x9b\ +\x8c\x9c\x4a\xd2\x5f\xfd\x98\xa4\x1f\x05\x8e\x51\x8e\x53\x5e\xfb\ +\x98\xa4\xbd\xf1\x89\x9b\x37\xdd\xa4\x9e\x00\x4e\x52\xde\xfe\x84\ +\x34\x18\x3a\x56\xf7\xf3\x4b\x56\xa8\x48\x52\x4f\x7f\xea\x22\xe5\ +\x03\xca\x19\xca\x59\x19\xe7\x28\xe7\x65\x5c\x90\x68\x75\xf1\x33\ +\x25\x97\x64\x7c\xcc\xe0\x53\x0e\x9f\x99\xa3\x6a\xb3\x66\xc1\x8e\ +\x9d\xa1\xfe\x14\x40\x73\xab\x4c\x56\xbf\x79\x3f\x92\x38\x82\x90\ +\xc4\x91\x12\xf5\x47\x53\xc6\x48\xd4\x1b\x4b\x19\xef\x26\x61\x82\ +\x9b\xf4\x99\xd7\x49\x91\xa2\xc5\x83\x76\xbd\x99\xb9\x2f\x91\x1a\ +\x8b\x09\xb9\x7f\x09\x65\x19\x21\xf7\x2d\xa7\xac\x24\xa4\xfa\x2a\ +\xca\x6a\x42\xe2\xd6\x00\xeb\xdc\xc4\xae\x07\x9e\x22\x24\x73\xf9\ +\x19\xa1\xdf\xf3\xd0\xbe\x9b\xa4\xea\xd3\x84\x54\xd9\x0b\xec\x73\ +\x73\xcf\x7e\xe0\x39\x42\x2a\x1f\xa0\x1c\x24\xa4\xd2\x21\xe0\xb0\ +\x9b\xe8\x17\x81\x97\xdd\xb4\xd9\x7f\xd9\xaf\xdf\x3b\x7d\xf3\x0b\ +\xe4\xee\x8f\x88\x8b\xbb\x2e\x01\x9f\xb8\x29\xfd\x19\xf0\x57\x37\ +\xa5\xae\x50\xbe\x20\xa4\xe4\xdf\x28\x57\xdd\xa4\x6c\xda\xa7\xfb\ +\xf9\x65\xab\xdf\x47\x4a\xfc\x9b\xb8\x28\xfe\x83\x8c\x1f\xdd\x14\ +\xbb\x46\xb9\x21\x51\xf4\x67\x19\xbf\x4a\x14\xf9\x5d\xc6\x9f\x12\ +\x85\x6f\x31\xc8\x67\x63\x76\xb9\x5d\x7c\x9f\x3e\xc1\x8e\x9d\x8d\ +\xfe\x14\xc0\x58\xcb\x04\xd0\xac\x2f\x69\x30\x9c\xb8\x70\x89\xc0\ +\x83\x5a\x06\xa3\xf9\x32\x68\xf7\xf8\xee\xa0\x5c\x6b\xb5\xf8\xa6\ +\xe4\x81\x45\xf9\xa4\xc6\x42\xe2\x92\x40\x81\x08\x96\xc9\x44\xb0\ +\x52\x26\x82\xd5\x5a\x11\x08\x0b\x60\x0f\x08\x60\x17\x91\x24\xb0\ +\x57\x29\x81\x02\x11\x1c\x54\x8a\xc0\x25\x01\x2a\x82\xa4\x41\x53\ +\xfd\x27\x80\x4d\x20\x80\xf3\x84\x2f\x81\xcf\xf4\x25\x90\xba\x51\ +\x40\x00\xdf\x91\x90\x95\x80\x05\x02\x38\xeb\x4f\x01\xec\xb4\x52\ +\x00\x0d\x21\x99\x69\x30\x4c\x86\x88\x10\x64\x32\x48\x9e\x74\x8b\ +\x44\xdf\x5b\x37\xf0\x7f\xfd\x17\x7f\x45\x6a\xce\x23\x6e\x16\x10\ +\xb7\x08\x16\x12\xe3\x8c\x40\x26\x82\xf6\xcb\x04\x05\xf0\xf4\x4d\ +\x72\xef\x4e\xe2\x42\x54\x04\x2e\x09\xc8\x44\x90\xf0\xea\x1f\xa4\ +\x54\xb9\x68\xff\x08\x60\xe3\x0b\xa4\x0c\x0c\x39\x97\x04\xce\xcb\ +\x24\x70\x49\x2b\x81\x02\x11\x7c\x21\x89\x20\x75\x83\x81\x00\xe2\ +\xee\x23\x77\x7c\x4b\xdc\x12\x60\x89\xe0\x47\x41\x11\xf0\x24\x10\ +\x60\x11\x58\x20\x00\xe7\x8a\xdd\x12\xfe\x12\xc0\xa7\x96\x09\xa0\ +\x69\x5f\xd2\x68\x30\x21\x0d\x87\x50\x86\x4a\x98\x11\x42\x97\x29\ +\x97\x02\x7a\x9d\x4d\x7b\xe4\x91\xda\xb3\x08\xa9\x35\x07\x78\xc2\ +\x8d\x47\x04\x05\x32\xd0\xc9\x0a\x3c\xa5\x41\xfb\x25\x62\x02\x68\ +\xba\xf3\x26\xa9\xb6\x0d\x04\xb0\x9d\xf8\x24\x82\x8c\xed\x67\xfc\ +\xf2\xfd\x5b\x6f\x38\x42\xca\x7c\x48\xdc\x78\x21\x82\xd4\xa7\x04\ +\x04\xf0\x4f\xe2\x92\x80\x5f\x44\x20\x98\x11\x78\x23\x04\x96\x1c\ +\x2c\x10\x80\x93\xc6\x3e\x0b\xc0\xca\x06\xa0\x47\x00\x8d\xb3\x09\ +\x69\x34\x88\x32\x58\x42\x2d\x05\xdd\x2c\x01\x64\xd0\x30\x65\x70\ +\x40\xae\xf1\x8e\x52\x65\x49\xf3\xf9\xbf\x92\x3a\x33\x89\x4b\x02\ +\x1e\x8c\x64\xc0\xca\x0a\xda\x2f\xfa\x40\xe8\x77\x36\xdb\x71\x93\ +\xc4\x6c\x26\xa4\xda\x16\x22\x89\x60\xbb\x52\x04\xf2\x1e\x81\x4b\ +\x04\xfb\xb5\x22\xa8\x7c\xd8\x41\xea\x75\xcd\xf6\x5d\x00\x4f\x1d\ +\x21\x65\x4f\x13\x52\xf6\x03\x22\x24\x02\x75\x7f\x20\x75\xbd\x81\ +\x00\x62\xab\x93\x3b\xbf\x21\x6e\x09\xb0\x44\x10\x28\x19\x98\x11\ +\x82\x8e\x18\x2c\x12\xc0\x50\x7f\x08\xa0\xb9\x95\xd3\x19\xf5\x1f\ +\xea\x4b\x9a\xf4\x27\xa4\xf1\x00\x19\xd9\xc4\xbc\x14\x40\x06\x19\ +\x93\xaf\x05\xa4\x21\xd8\x61\xe2\x2b\x24\x3e\x8f\xb8\xa8\x33\x83\ +\xb8\x44\xe0\xad\x0c\x3a\x2c\x10\x13\x40\xf3\xad\x37\x5c\x4d\xc3\ +\x98\x8d\x44\x12\x81\x5c\x06\xac\xac\x80\xd3\x2c\x6c\x7c\xe4\x57\ +\x52\xbc\x64\x69\xdf\x04\xb0\xfe\x08\x89\x3a\x45\x48\xd4\x7b\xc4\ +\x2d\x02\xb9\x0c\xce\x1a\x67\x05\xa9\x6b\xf7\x1a\x0a\xc0\xd9\x2b\ +\xb8\xf3\x6b\xe2\x12\x01\x4b\x06\xa6\x33\x03\x41\x21\x30\xa5\xc0\ +\x11\x03\x4f\x10\x16\x09\x60\xa3\x3f\x04\x30\xd6\x4a\x01\x24\x3e\ +\xd8\x87\x34\x1b\x48\x5c\x34\xe9\x47\xe9\x2f\xc1\x95\xc2\x20\xb6\ +\x14\x32\x07\xec\xf2\xef\x34\x65\x7c\x0b\x92\x98\x9b\x4f\xea\x4e\ +\x21\xa4\xee\x34\x37\xf1\xb9\x32\x19\xcc\xe0\xc8\x60\x0e\x5b\x06\ +\x1d\xe6\x09\x0a\x60\xe3\x0d\xc5\xec\x81\x91\x0c\x5c\x22\xd0\x91\ +\x41\xc6\x93\xaf\xfb\x26\x80\xb5\x47\x48\xb9\x93\x84\x94\x7b\x9b\ +\xb8\x45\x20\x97\xc1\x07\x1c\x19\xc8\x1a\x86\xa9\x6b\x04\x04\xa0\ +\x9a\x39\x70\xc9\x40\x47\x08\x6a\x19\x18\x36\x0f\x55\x42\xd0\x48\ +\x81\x21\x06\xae\x1c\x54\xa2\x88\xef\x6d\x89\x00\xce\xfa\x43\x00\ +\x96\xbe\x00\xd4\x20\x39\x8b\x64\xc0\x5f\x70\x0f\xa9\x83\x25\xbc\ +\x11\x43\xb3\xe1\xb7\x48\x74\x95\x78\xbf\x5d\x5f\xc7\x19\x5f\x93\ +\x84\x49\x84\x3c\x30\x99\x92\x43\xdc\x32\x98\x22\x93\x81\x88\x10\ +\x68\x76\xd0\x61\xce\xfb\x42\xbf\xb7\xc5\x53\x37\x34\xb3\x08\x2e\ +\x19\xac\x27\x5e\x65\x06\x31\xcf\x38\x48\xcd\x94\x2e\x5e\xdf\x87\ +\x36\x4f\x1e\x26\xe5\xdf\x20\xa4\xfc\x09\xe2\x16\x01\x4f\x06\x9c\ +\x32\x21\xf5\x49\x63\x01\xc8\x67\x10\x58\x53\x89\xc2\x42\x10\x90\ +\x82\x91\x18\x98\x72\xe0\x08\xc2\x49\xfc\x63\x96\x08\x40\xa8\x11\ +\x68\x24\x80\x4f\xac\x15\x40\x4f\x92\x35\x91\xb8\xe8\x3e\x56\xa2\ +\xd3\x48\x37\x2c\x39\xf0\xc4\xe0\x91\x42\xb7\x31\x17\xfd\x72\x6d\ +\xcd\xbb\xcc\x50\x4c\x39\x7a\xd6\x1e\x98\x12\x82\xaa\x5c\xe8\x30\ +\x4b\x4c\x00\x2d\xd7\xdc\x60\x4e\x2b\xb2\xd6\x16\xb8\x64\x20\x17\ +\x02\xa7\x67\xd0\x6c\xff\x35\xd7\x42\x26\xaf\x04\xb0\xf2\x30\xa9\ +\x70\x8c\xb8\x79\x8d\x14\xc8\xa0\x40\x08\x6f\xcb\x84\xf0\x9e\x36\ +\x3b\x48\x5b\xb9\x47\x5f\x00\x31\xd5\x75\xd7\x15\x88\x08\x41\xb4\ +\x6c\x30\x12\x83\x46\x0e\x3a\x92\xf0\x10\x9f\xd5\xc7\xaa\x18\x6a\ +\xe4\xb5\x00\xac\x6e\x00\x3a\x69\x98\xd4\x9d\x8c\x84\x00\x19\x92\ +\x2b\xd1\x3f\x47\x42\x2d\x07\x8f\x18\xe4\x72\x60\x89\xa1\x61\xf3\ +\x41\x3e\x5d\xd7\x9d\xa5\xa2\x48\xca\xc4\xdf\x94\xb3\x0d\xf2\x75\ +\x08\xf2\x05\x49\x93\x64\x52\xc8\x91\x49\x81\x21\x84\x8e\x33\xc4\ +\x04\xd0\x6a\xe5\x0d\xc3\x19\x05\x6f\xb2\x83\x36\x73\x9e\xf5\x4e\ +\x00\xcb\x0e\xbb\xd6\x16\x54\x7c\x85\x72\x54\x92\x81\x88\x10\xd2\ +\x96\x1b\x0b\x80\xdb\x48\xf4\x46\x0a\xac\x4c\x81\x27\x06\x23\x39\ +\x70\x24\x21\xc7\x42\x01\x0c\xf5\x45\x00\x89\x56\xaf\x69\x6e\x94\ +\xd4\x8d\xcc\x80\x41\x3e\x65\xbe\xc4\xf8\x39\x6e\x9c\x62\xf0\x60\ +\x46\x0e\x4e\x29\xf4\x18\xf7\x5f\x9f\x1a\x82\x9d\x87\xbd\xaa\x69\ +\x32\xca\x67\x1c\xe4\xeb\x11\x74\xb3\x04\x95\x10\x3a\x4d\x3f\x2d\ +\xf4\xfb\x53\x96\x5e\xe7\x37\x13\x59\x42\x60\x95\x0b\x8c\xde\x41\ +\x8d\x1d\xf9\x24\x26\xb1\x85\x79\x01\x2c\x39\xc4\x5c\x6c\xe4\x91\ +\x81\x91\x10\xd2\x96\x1a\x0b\x80\x35\xb3\xe0\x93\x14\x0c\xc4\xa0\ +\x91\x83\x4a\x10\x1a\x49\x30\x44\xe1\x21\xbe\xa7\x65\x02\x58\xee\ +\x8b\x00\x32\xad\x16\x40\xe3\xa4\x4e\x64\x05\x0c\xd2\x45\x6b\x24\ +\xe6\xae\x90\x70\xca\xc1\x48\x10\x6a\x39\x78\xc4\xd0\xf9\xb1\x0d\ +\x5e\x5d\x53\xf5\x3a\xa9\xe4\xc1\xc1\xf9\x8a\x66\xa3\xbc\xd1\x28\ +\x2c\x05\x55\x96\xe0\x14\x42\xa7\x29\x62\x02\x48\x5d\x78\x5d\xd3\ +\x43\xf0\x97\x10\xd2\xb6\xfc\xdb\xbc\x00\x16\x1e\xe2\xae\x3e\x94\ +\x2f\x43\xd6\x64\x08\x54\x0a\x69\x8b\x9e\xd6\x17\x40\xb5\xea\xec\ +\xa6\xa2\x4e\x63\x91\x29\x05\x1d\x31\xf0\xe4\xa0\x27\x08\xae\x28\ +\x54\xb2\xb0\x50\x00\x7b\x7d\x11\x40\xb6\xd5\x02\x68\x92\xd4\x9e\ +\xec\xd8\x43\x5c\x6c\xda\xe1\x66\xed\x16\x09\xa7\x1c\x3c\xa8\x05\ +\xe1\x91\x83\x5c\x10\x72\x39\x8c\x9a\xf9\x07\x89\xae\x5c\xd3\xf4\ +\x35\x3d\x3c\xfa\x6b\x45\x7f\x41\xde\x70\xf4\x4a\x0a\xb2\xd2\xa1\ +\xf3\x24\x31\x01\xa4\x3d\x71\x5d\xd1\x47\x60\x35\x15\x35\xab\x12\ +\x59\x6b\x0f\x38\x25\x43\xda\xd8\xa7\x4c\xdd\x93\xb6\xf3\x0e\x89\ +\xbd\x97\x70\x48\xfb\x5e\x82\x53\x08\x69\x0b\x8c\x05\xc0\x6a\x2c\ +\xea\x35\x17\x75\xb3\x05\x51\x39\xe8\x09\x42\x25\x09\xa6\x2c\x28\ +\xf1\x3d\x7a\x5b\x15\x43\x27\x7d\x11\x40\x9e\xd5\x02\x48\x4a\xca\ +\x20\x2f\xc2\x20\x71\x72\xe0\x90\xc4\xde\x67\x25\xd4\x82\x90\x4b\ +\x42\x2d\x07\xb5\x20\x46\xe7\x9c\x30\x75\x3d\xa9\xed\xa7\x31\x7b\ +\x0c\x8a\xc6\xa3\xbc\xe9\x28\x9f\x85\x90\x4f\x4b\xb2\xa4\x00\x9f\ +\xd9\x65\xfc\x7b\x62\x4b\x6f\x67\x5d\xd7\x36\x17\x39\x33\x0d\xac\ +\x69\x47\xd6\xca\x44\x79\x53\x31\x61\xed\x2d\x12\x7d\x5f\x82\xb8\ +\x00\xe6\x1e\x32\xb7\x10\x49\xf5\xb2\x52\xfa\x5c\x03\x01\xdc\x5b\ +\x5d\x2a\x1d\x18\xb3\x0d\x46\x62\x68\x75\xe2\xff\x48\xeb\x7d\xef\ +\xb8\x79\x86\xb2\x9f\xf2\xac\x8c\xe7\x4e\xb9\x39\x20\xe3\x79\x19\ +\x07\x95\xa4\x1f\x56\x71\x84\xc1\x0b\xa7\x48\xe5\xfa\x8d\xac\x8a\ +\xa1\x2b\xbe\x08\x60\xad\xd5\x02\x48\x4e\x4e\x27\xa7\xe0\xa1\x7a\ +\x78\xf3\x84\xc4\xd1\xe3\x12\x1e\x49\xc8\x45\xa1\x16\x04\x2b\x8b\ +\x70\xca\x21\xe9\xc1\x6e\x42\xd7\x52\xb2\x54\x59\x32\xfa\x2f\xd7\ +\x99\xe5\x84\xba\xc7\x20\x6f\x3c\xea\x66\x0b\x2a\x29\x74\x19\x23\ +\x26\x80\x36\x79\xd7\xf8\x33\x0e\x9c\xd9\x06\xb3\x59\x42\xe6\xca\ +\xbf\x89\x0b\x60\xd6\x41\xa1\x69\x47\x9e\x14\xd2\x67\xef\x36\x14\ +\x00\xaf\x7c\x10\x11\x43\xcb\x99\x6b\x22\x75\x5f\x80\x9b\xbe\x08\ +\xe0\xa0\xd5\x5f\xa0\x4d\x72\x12\x21\x27\x4f\xb8\x79\xf7\x5d\x89\ +\x33\x67\x0a\xf0\xfc\x57\x96\x28\x58\x92\x50\x0b\x62\xd5\xba\xab\ +\xa4\x88\xc0\xf4\xd7\xe0\x91\x7b\x99\x25\x85\xbc\xd7\x20\x17\x83\ +\xbc\xf9\xa8\x97\x2d\xc8\xa5\xd0\x65\xe4\xbb\x62\x01\x37\xe5\x1a\ +\xbf\xc9\xa8\x27\x85\x3c\x41\x29\xd0\x5e\x42\xf3\x3e\x73\xc5\xf6\ +\x27\xc8\x3b\x68\x6a\x61\x92\x5a\x0a\xe9\x33\x0d\x04\x50\x35\x8e\ +\xfb\x6a\xb3\x88\x18\x5a\xe6\x45\xac\x00\x9c\xdc\xe5\xad\x00\x4e\ +\x5b\x7d\xf1\x99\x49\x4d\x08\x39\x74\x40\xe2\xe5\x17\x25\x8e\x1f\ +\x95\xf0\x48\x82\x23\x0a\xb5\x24\xd4\x99\xc4\xc0\xec\x59\xba\xd7\ +\x51\xb3\x56\x12\xd9\xb0\xfd\x96\x22\x73\x60\x95\x14\xde\x88\x41\ +\x9e\x2d\x74\x1d\x26\x26\x80\x8c\x09\xd7\x34\xe5\x83\xbc\xa7\xc0\ +\x6b\x32\x32\xa7\x21\x75\xa4\x90\xb4\xe8\x4f\x52\x36\x3a\xd6\xf8\ +\x7a\xa6\x3d\xaf\xdf\x60\x54\x4d\x41\xaa\xd7\x24\xb4\x9e\x6e\x20\ +\x80\x2a\x71\xcc\x12\x82\xd9\x6c\x64\x34\x1c\x5b\x4e\x5b\x1d\xc9\ +\x02\xa8\xe5\xad\x00\xbe\xb2\xfa\xe2\x3b\x26\x35\x26\x64\xf3\x5a\ +\x37\x3b\x36\x49\xec\xdd\x29\xf1\xdc\x3e\x09\x96\x2c\x58\xa2\x50\ +\x09\xe2\xad\xb7\x6e\x92\xe8\xe8\xaa\xdc\xeb\x58\xbd\xe6\x8c\x26\ +\x7b\x60\x95\x15\xf2\x9e\x83\xa8\x18\xe4\x65\x44\xaf\x11\x62\x3d\ +\x89\xcc\x31\x3f\xf1\x9b\x8d\x9c\x46\x23\x37\x53\x30\x90\x42\xc7\ +\xd9\x1f\x19\x5e\x4f\xbb\x9c\xe7\x4d\xad\x4b\x50\x2f\x54\x6a\x3d\ +\x75\x97\xa1\x00\xb8\x6f\x3b\xea\xed\x81\x40\xe5\xd0\x2a\xe7\xc9\ +\x48\x16\x40\x8a\x69\x01\xc0\x4f\x61\x3b\x9c\x02\xd4\x2d\xa9\x11\ +\x21\x4b\x60\x14\xae\x98\x2b\xb1\x66\x91\x92\x0d\x2b\x25\xf4\x64\ +\xc1\x12\x85\x4c\x12\x47\x16\xcc\x63\x5e\x43\x76\xb7\x6e\xcc\x0c\ +\x82\x55\x5a\xc8\xfb\x0e\xde\x88\x61\xc0\xa8\x63\x62\xeb\x10\x46\ +\xff\xc8\x6c\x38\x72\x9b\x8d\xc3\x95\xd9\x82\xd9\xf2\xa1\x49\xfb\ +\xd1\xfa\x02\x98\x78\xc0\xd4\xcc\x83\x7a\xf5\x62\xeb\xc9\x3b\xf5\ +\x05\x70\x4f\x1c\x7f\x15\x23\xa3\xbf\xa0\x96\x43\xab\x89\x11\x2d\ +\x80\x2c\x6f\x04\x50\xde\x0e\x17\xdf\x23\xa9\x21\x21\x33\x47\x4a\ +\xcc\x19\xaf\x64\xfe\x14\x09\xa7\x28\x3c\xb0\x84\xa1\x27\x0a\x2a\ +\x89\x4e\x0f\x3d\xa8\x1c\x78\xa5\x4b\x93\x1f\xf6\xef\x63\x67\x10\ +\x8c\xf2\x42\xdd\x7b\xd0\xcb\x1a\xd4\xe5\x84\x53\x0a\x23\xc6\xbf\ +\x20\x74\x5f\xfa\x4d\xfa\x5e\xd3\x74\x64\x36\x1c\x59\x53\x93\x9c\ +\x19\x08\xd6\x3a\x05\x8f\x14\x5a\xe5\xfd\x4a\x4a\xde\x55\x9e\x9f\ +\x91\x8c\x3d\xa0\x59\xd6\xcc\x7a\x03\x52\xb3\x49\x0a\xcd\x16\xda\ +\x4c\x30\x16\x00\x77\x69\x33\x15\x03\xb3\xc7\x40\xe5\xd0\x6a\x7c\ +\x44\x0b\x60\xa2\x37\x02\xa8\x67\x87\x8b\xef\x95\xdc\x80\x90\x31\ +\x8f\x48\x4c\xcc\x92\xc8\xe9\xaf\x24\x77\x88\x04\x4b\x18\x2c\x59\ +\xa8\x24\xf1\x8f\x65\xf3\x49\xb1\xa2\x45\x0a\x7e\xff\xee\x71\xa3\ +\xd9\x59\x84\x3c\x83\xf0\x42\x0e\xea\xac\xc1\x23\x86\x09\x39\x07\ +\x84\xee\xcb\xf8\x59\xdf\x72\xcb\x08\xee\x4c\x04\x67\x16\x82\x5b\ +\x42\xa8\xfa\x0a\x9d\x26\xbc\xc5\x17\xc0\xa8\xe7\xbc\x6b\x36\x52\ +\x31\xb4\x19\xbb\x43\x5f\x00\x95\xe3\xb8\xdb\xa8\x69\x1a\x8f\x0c\ +\x39\xb4\x1a\xb3\x2a\x92\x05\xb0\xd4\x1b\x01\x64\xd8\xe1\xe2\xfb\ +\x3c\x98\x08\x79\x71\x33\x37\x83\x53\x95\x0c\xcb\x90\x18\xd9\x49\ +\x42\x44\x18\x3c\x51\x80\x1c\x56\xf4\xee\xe1\x7e\xe3\xae\x4e\x2d\ +\x72\x7b\x71\x9e\x52\x12\x06\x19\x84\x59\x39\xa8\xc5\x90\x9b\xb7\ +\x47\xe8\xbe\xcc\x59\xf2\x8d\x66\x25\xa4\xa7\x8c\x90\xaf\x7c\xe4\ +\x4d\x51\x9a\xce\x16\x40\x0a\x8d\x46\x38\x48\x42\xd3\x2c\xe6\xf5\ +\xb4\x1f\xfe\x2c\xf3\xbd\x07\xd1\xd7\xa4\xdb\x8e\x34\x16\x80\xf0\ +\x46\xab\x0c\x39\xa4\x8c\x8a\xe8\x0c\x60\x8f\x37\x02\xe8\x61\x87\ +\x8b\xef\xdb\xb4\x3e\x21\x03\x1b\x83\x00\x54\xf4\x6f\xa2\xa4\xaf\ +\x0c\x8f\x30\xd4\xd2\x60\x09\x83\x21\x8a\x3f\xa6\x66\x93\x9a\x95\ +\xa3\xc9\x97\x53\x87\x71\x25\xa1\xc9\x22\xd4\x65\x86\x59\x39\x50\ +\x31\xec\x98\x33\x47\xe8\xbe\x38\xa7\x2e\xe5\xa5\x84\xba\xc7\xc0\ +\xea\x2f\xb0\x66\x23\xcc\x66\x0b\x19\xe3\xaf\x93\x62\x25\x4a\x6a\ +\x05\x30\xe4\x59\xb1\x59\x08\x8e\x18\xda\x0e\xdb\xae\xfb\x7d\xa3\ +\x2a\xc5\x9a\xdb\x66\x8d\x21\x07\xb5\x20\xe4\xb3\x13\xea\xec\x41\ +\x31\x53\xa1\x2e\x31\xb6\x2b\x4b\x0d\x45\xb9\x21\xeb\x47\xb8\x4a\ +\x8f\x09\xb6\x98\x7d\x78\xd1\x1b\x01\x64\xd9\x42\x00\xcd\x40\x00\ +\x43\x1a\x2a\x19\xdc\x48\xc9\x20\x15\x4e\x61\x78\xe0\x49\x43\x4f\ +\x16\x20\x88\x9f\xc6\xf5\xe0\x4b\x42\x20\x8b\x30\x25\x07\x59\x59\ +\xb1\x27\x2f\x57\xe8\xbe\x3c\xbd\xf7\x73\x6e\x29\xa1\x6e\x3e\xb2\ +\x1a\x8f\xbc\xd9\x08\xd6\x14\xa5\x7a\xdd\x42\xa7\xec\xc3\xda\x5d\ +\x91\xb2\xf7\x9b\xea\x2d\xa8\xc5\x90\x31\x64\x9b\xbe\x00\xa2\x63\ +\x8d\x37\x56\xd1\x93\x03\x23\x73\x30\x12\x04\x4b\x12\x86\xa2\x90\ +\xc9\xc2\x49\xca\x58\x5b\x64\x1e\xaf\x7a\x23\x80\x7e\x76\x10\x40\ +\xbf\xe6\x20\x80\x11\x89\x12\xc3\x1b\x68\x19\xaa\xa6\xa1\x04\x4f\ +\x1c\x2c\x61\x88\x88\x42\x9d\x4d\x18\x09\xc2\x8c\x1c\x40\x0c\xcf\ +\x4f\x1a\x27\x74\x5f\xae\x3c\xbb\xbf\x20\x6b\x60\xcd\x4e\xb0\x9a\ +\x8f\xbc\xc6\xa3\x99\x32\xc2\x29\x85\xcc\xe1\xf9\xa4\x46\xdd\x36\ +\x4a\x01\xf4\x7f\xc6\xbb\x99\x08\x2a\x86\x8c\x6c\x63\x01\x98\xde\ +\x64\x45\x6f\x1b\x36\x8e\x20\x4c\x49\x42\x26\x0a\x9e\x2c\x6c\x52\ +\x7a\xbc\xe6\x8d\x00\xb2\x6d\x21\x80\x96\x20\x80\xb1\xf5\xdc\x8c\ +\xa9\xaf\x65\xb4\x8a\x51\x89\x4a\x78\xf2\x60\x09\x83\x25\x0b\x23\ +\x49\xe8\x09\x82\x95\x41\xf0\xe4\x40\xcb\x8a\x17\xc7\x0d\x13\xba\ +\x2f\x7f\xdf\xb8\x4e\xbb\xd6\x41\xb5\xbe\x81\xd7\x7c\x64\x4d\x57\ +\x9a\x2d\x23\xb2\x73\xbe\x21\x85\x8b\x14\x2d\xb8\x9e\x2e\xfd\x9e\ +\x16\x2e\x23\x58\xb3\x11\x19\xfd\xb7\xea\x0b\xa0\x62\xac\xe1\xab\ +\xd4\xa2\x72\xe0\x0a\x42\x3d\x53\x21\x22\x09\x96\x28\x64\xb2\x48\ +\x19\x6e\x8b\xe6\xe3\x09\x6f\x04\xf0\xb8\x1d\x04\xd0\xbf\x15\x04\ +\xf5\xe4\x07\x08\x99\x94\xc0\x66\xbc\x9a\x7a\x4a\xc6\xd6\x63\x0b\ +\x84\x27\x0d\xb5\x2c\xf4\x44\xc1\x92\x84\x9e\x20\x04\xe4\x70\x7c\ +\x54\x3f\xa1\xfb\xe2\x9c\xad\x50\x94\x13\xea\x3e\x03\x4b\x0c\x8c\ +\xe6\xa3\x7a\xba\x92\x35\x55\xa9\x5e\xf5\xe8\x11\xc3\x23\xbd\xa5\ +\xc1\xfd\x68\xff\x2d\x3e\xcd\x46\xb4\xeb\x63\x2c\x00\xaf\x37\x5e\ +\x61\x64\x0e\x6a\x41\x18\x65\x10\xdc\x32\x83\x27\x0a\x2a\x8b\x94\ +\xa1\xb6\x10\xc0\x3b\xde\x08\x60\x84\x2d\x04\x90\x02\x01\x3a\xad\ +\xae\x96\x29\x0c\x72\x1e\xd0\x32\x59\x06\x57\x1c\x0c\x61\xb0\x64\ +\x61\x28\x09\x1d\x41\xb0\x32\x08\x86\x1c\x4e\x0e\x7f\x54\xe8\xbe\ +\x7c\x3b\x8b\x53\x4e\xa8\xfb\x0c\x0c\x31\xe4\x1f\x7d\x85\x1c\x5b\ +\xb2\x58\x21\x06\xf5\x74\xa5\x48\x7f\x61\xd9\xba\x3f\x48\xb5\x18\ +\xf7\x1b\x83\x7d\xb2\xd7\x79\xd5\x5f\xf0\x88\xa1\x5d\x96\x81\x00\ +\x2a\xc4\x2a\x77\x7a\x1e\xce\x9f\xae\x64\xca\x81\xd1\x88\xf4\x45\ +\x10\x42\xa2\x98\xe7\xfc\x7e\xb6\x10\xc0\xfb\xa1\x2b\x80\x34\x08\ +\xbc\x99\x75\x94\xe4\xf1\x88\x57\x92\x1b\x4f\x7e\x7c\xa2\xb5\xeb\ +\x3f\x5d\xf0\x04\x62\x24\x0c\xb5\x28\x58\x59\x05\x95\xc4\xcd\x9c\ +\xd6\xe4\xf6\x98\x24\xbe\x20\x0c\xe4\xf0\xee\xf0\xae\x42\xf7\xe5\ +\x3f\x39\x03\xa5\x92\x42\xde\x8c\x9c\x63\x2c\x86\xfc\xed\x1b\x48\ +\x6c\x74\x34\xb9\xf9\xf4\x0e\x65\xc6\xc0\x98\xae\x34\xea\x2f\x2c\ +\xa6\x27\x19\x0d\x1c\xbc\xd2\xb7\xfe\x42\xd6\x3a\x7d\x01\x94\x8f\ +\x35\x7c\xbd\xda\x94\x1c\xf4\xb2\x07\x8e\x20\x14\x92\xd0\x11\x85\ +\x5c\x16\xa9\x03\x57\x86\xac\x00\x6c\x51\x02\x0c\x68\x0d\x81\xf5\ +\x44\x2d\x18\xd8\x3a\xcc\xae\xad\x65\x96\x9b\x27\x1e\x6b\x41\x7e\ +\x5f\xd0\xd4\x40\x22\x4a\x69\x68\x84\xc1\x92\x05\x47\x12\xa3\x32\ +\x9a\x90\x5b\x39\xc9\x6c\x41\xb0\x32\x08\x95\x1c\xde\x1d\x2d\xb6\ +\x33\xef\x8f\xa3\x3a\xeb\x37\x23\x79\x7d\x06\x90\x82\x03\x70\x7e\ +\xc6\xd4\x6e\x9d\xb4\x6b\x1a\x78\xd3\x95\x3a\xfd\x85\x1e\x3d\x46\ +\x93\xa1\xc3\x16\x9a\xee\x2f\xc8\xc5\xd0\x2d\x6b\xa5\x81\x00\x62\ +\x84\xde\xa6\x64\x6e\x09\xcf\x93\x83\x19\x41\xb0\x24\xa1\x23\x0a\ +\x8f\x2c\x52\xfb\xad\x08\xd9\x12\xc0\x16\x4d\xc0\x01\x6d\x20\x70\ +\x16\xd4\xe0\x50\x93\xcd\x3c\x89\x29\x8f\x3c\x48\x66\x65\x35\x73\ +\x4b\x44\x0e\x4f\x20\xb3\x64\xe8\xc9\x82\x21\x89\xcb\x7f\xe9\xe8\ +\xba\xe6\x5b\xd3\x93\xf4\xb3\x08\x1d\x39\xbc\x37\x4e\x4c\x00\x3f\ +\x8d\xc9\x94\x4a\x0a\x56\x39\xa1\x16\x83\x6c\x86\xc2\x01\x62\xf0\ +\x7c\xce\xc7\xb9\x63\xa4\x8c\x41\xbd\xe0\x89\x35\x5d\xc9\xe8\x2f\ +\x5c\x3f\x7e\x9c\x6c\xcc\xcb\xf3\xba\xbf\xe0\xa4\x67\xef\x45\xfa\ +\x02\x28\x77\x2f\x73\x0d\x83\x99\xfd\x17\x98\x99\xc3\x10\x83\x63\ +\xe6\x74\xce\x9c\x94\x4b\x82\x25\x0a\x27\xa9\x7d\x56\x84\x6c\x13\ +\xd0\x16\xd3\x80\x03\x32\x20\x38\x96\xdf\x27\xb1\xec\x7e\x7d\x96\ +\x28\x99\xd2\xc3\xbd\xb6\xff\x1f\x2b\xda\x41\xbe\x5a\xc3\x8d\x9e\ +\x48\x64\xf2\xd0\x95\x85\x4a\x12\xb7\x67\x35\x20\x89\xd5\x2b\xbb\ +\x05\x30\xab\x31\x3f\x8b\x90\x67\x10\x0c\x39\x9c\x9e\xd4\x59\xe8\ +\xbe\x5c\x9b\x98\xa1\x2d\x29\xd4\xbd\x06\x8e\x18\x1c\x40\xc1\xc1\ +\x2b\x31\x55\xc9\x9f\x53\xb3\x95\x19\x83\x7a\xca\x92\x31\x5d\xa9\ +\xee\x2f\xfc\x79\xe0\x19\xee\xe2\x26\x91\xfe\x42\xef\xbe\x73\x0d\ +\x04\x50\x45\x91\x31\xf0\xca\x09\x33\x72\x60\xee\xda\x34\x48\xec\ +\x94\x29\x96\x24\x58\xa2\x48\xcb\x5a\x11\xb2\xd3\x80\xb6\x58\x08\ +\x34\xa0\x1d\x08\x60\x75\x75\x7d\x56\x71\x58\x79\x1f\x99\xd2\x33\ +\xd9\xbd\xac\xb7\x5e\x2c\xc9\x5f\x55\x5b\x29\x13\xb5\x50\xe4\xf2\ +\xf0\xc8\x62\x31\x47\x16\x2a\x49\xec\x1b\xdf\xae\xe0\x9a\x6f\xcd\ +\x6f\xa8\x95\x84\x5e\x06\x21\x93\xc3\xe9\xa9\x9d\xc4\x04\x30\xb5\ +\xad\xb2\xa4\xd0\xeb\x35\xa8\xc4\xe0\x18\x90\xa4\xf8\xac\xa7\x7a\ +\x65\x6a\x67\x26\xcc\xf6\x17\x58\x65\x84\x89\xfe\xc2\x80\x01\x79\ +\x06\x02\xa8\xcc\xec\x31\x98\xd9\x87\x81\x25\x07\x66\x59\xd1\x4f\ +\xe0\xe4\x29\xc1\xf3\x29\xd3\x7a\x2e\x0f\xd9\x85\x40\xb6\x58\x0a\ +\x3c\x20\x13\x04\xb0\x21\xd6\xcd\x7a\x41\xd6\xc5\x15\x30\x25\x2b\ +\xb9\xe0\xb3\x5e\x9d\x03\x59\xc0\x9a\x38\x09\x3d\x89\x80\x3c\x0a\ +\xd0\x93\x05\x08\xe2\xe6\xaa\x96\xe4\x8e\xe2\xd2\x9c\xf8\xad\x25\ +\x89\x4c\x49\xe8\x65\x10\x1e\x39\x9c\xce\x13\x14\x40\x5e\x6b\x65\ +\x49\xc1\xeb\x35\x30\xc4\xe0\x18\xd2\x58\xf1\x59\x45\x0a\x17\x22\ +\xff\x1a\xd3\x49\xca\x18\x58\x53\x96\xea\x85\x4e\x7a\xd9\x82\x48\ +\x19\xa1\x12\x43\xee\xc0\x81\xfa\x02\x88\x8a\x16\x7a\xe5\xda\xcc\ +\x26\x2d\xac\xcc\x41\x2f\x7b\x30\x7d\x3c\x1d\xfc\x7b\x69\xdd\x97\ +\x85\xec\x52\x60\x5b\xbc\x0c\x34\xb0\x03\x0c\xee\x2d\xd5\xf8\x6c\ +\x8e\xd1\x65\x6a\x6f\x49\x00\x77\x97\x2a\x41\x7e\xdb\x9e\x24\x09\ +\x85\x27\x16\x99\x40\x98\xc2\x50\x89\xe2\xf1\x0e\x0d\x14\xd7\x7c\ +\x7b\x55\x3d\x8d\x24\xa4\x4c\xa2\xa6\xae\x1c\xde\xa7\x7d\x04\x23\ +\xae\xcf\x4e\x97\x4a\x0a\x79\xbf\x41\xdd\x6b\x50\x8b\x01\xa4\xe0\ +\x18\xd9\x40\xbb\xd9\x69\x7c\x75\x92\x3f\x34\x59\xbb\xa6\x81\xd7\ +\x5f\x60\x65\x0b\x6a\x31\xc8\xdf\x99\x30\x98\xa6\x9c\xdd\x4f\x7f\ +\xdb\xec\x8a\x51\x51\xba\xfb\x31\x88\xbe\x72\xcd\x6a\x42\xea\x95\ +\x15\x46\x82\x60\x49\x42\x2e\x8a\xf4\x87\x6d\x21\x80\x3d\x21\xfb\ +\x3a\xf0\xc0\x8e\x30\x88\x77\xde\x2b\xce\x76\x25\x53\xfb\x26\x2b\ +\x3e\x6f\x6a\x1f\x18\xe4\xdb\xaa\xb9\x31\x92\xc9\x46\x19\x2c\x61\ +\x80\x20\x3e\x5d\x9d\xa9\xb9\xe6\xdb\xeb\x1e\xd0\x66\x12\xea\x2c\ +\x82\x23\x87\xf7\xe7\x76\x10\x13\xc0\x82\x34\xa9\xa4\xe0\x94\x13\ +\x5a\x31\xb8\xa7\x31\x1d\xe3\xea\x33\x3f\x73\xff\xe0\x0e\xb2\x8c\ +\x81\x33\x5d\xa9\x5e\xc7\x60\x26\x5b\x50\x97\x11\xb2\x6c\x61\x41\ +\x9f\x5e\xba\xdf\x37\x1a\x04\xa0\x37\x23\x21\xf2\xca\x35\x2b\x6b\ +\xd0\xcb\x1c\x58\x3d\x07\xb3\x87\xd0\xb4\xe9\xb6\x28\x64\x5f\x07\ +\xb6\xc5\x86\x20\x03\x3b\xc1\xa0\xdd\x5b\xc5\x3c\x4f\x57\x75\x31\ +\xad\x5f\xb2\xe6\x33\xbf\xda\xd1\x86\x90\x5d\x55\x25\x0c\x24\x52\ +\x20\x8c\x6d\x4a\x59\xdc\xde\x16\x4f\xe2\xe3\x2a\x6a\x05\xb0\x39\ +\x5e\x9b\x4d\xf0\xb2\x08\x95\x1c\x3e\x58\x20\x28\x80\xa5\x29\xda\ +\x92\x42\xdd\x6b\x50\x8b\x81\x36\x20\x1d\x93\xd9\xdb\x7d\x17\x2f\ +\x5a\x84\xfc\x98\x9b\xa1\xc8\x16\x94\x65\x84\x60\xd3\x51\xbe\x2c\ +\x5a\xb0\xb7\xb0\xec\xb1\x47\x74\xbf\x6f\x65\xa7\x00\x78\xfd\x05\ +\x01\x31\x3c\xfe\xf8\x6c\x12\x1b\x5b\x9b\xc4\x14\x50\x07\x88\x57\ +\x51\x17\x78\x80\x92\xa0\xa2\x1e\xa5\xbe\x8b\x6a\x6a\xe2\x12\x29\ +\x0d\x28\x0d\x5d\x94\x2a\x5d\x2e\x64\x37\x04\x71\x6e\x09\xf6\x87\ +\xd5\x17\x9f\xdd\x05\x04\xf0\xdc\x3d\x5e\x33\x6d\x80\x56\x00\x49\ +\x0f\x54\x21\xf9\xcf\x41\xf0\xed\xab\xa2\x84\x23\x11\x17\x0c\x61\ +\xec\x86\x3a\x9c\x75\xcd\xb7\xb7\xd7\x51\x66\x14\xea\x2c\x42\x47\ +\x0e\x67\x96\xb4\x17\xba\x2f\x37\x56\xb4\x92\x4a\x0a\x79\x39\x21\ +\x9f\xad\xd0\x34\x21\xdd\x0d\x48\x47\x6e\x5d\xee\xe7\x76\x69\x52\ +\x9b\x38\x26\xd4\xd7\x4e\x57\x0a\xf4\x16\x98\xcb\xa2\xe5\x4b\xa2\ +\x75\xa6\x28\xd7\x64\xe9\xcf\x7e\x54\x29\x17\xc5\x9f\x8d\x10\x10\ +\xc3\xc8\x1e\xdd\x71\x4b\xb0\x50\xdc\x14\x34\xbb\x2b\x0c\xc0\x43\ +\x95\xbc\x26\x37\x3b\x99\xf9\xb9\x2f\x2c\x6d\x4b\xc8\xc1\xca\x90\ +\x1f\xaa\x50\x4b\x64\xbf\x0a\x2a\x8b\xeb\xfb\x9b\x92\x62\x45\x0b\ +\xb3\x05\xb0\xa7\x16\x3b\x9b\x60\x64\x11\x6a\x39\x9c\x59\x9e\x29\ +\x26\x80\xb5\x2d\xa4\x92\x42\x5e\x4e\xc8\x67\x2b\x38\x0d\x48\xc7\ +\xac\x3a\xba\x9f\x7d\x74\x62\x47\xdd\x12\x42\x21\x05\x79\xb6\xc0\ +\x93\x82\x40\x09\xb1\x29\x4b\x5f\x7c\xf7\x3a\x05\x60\xf4\x56\xa5\ +\xce\xeb\xd6\xe3\x1f\xe9\x16\xc9\x02\x48\x09\xd9\x6d\xc1\xb3\xbb\ +\xc1\x60\x7b\x39\xda\x6b\x72\x07\xb3\x05\x50\xea\xce\x62\xe4\xd7\ +\x97\x61\x50\xbe\x08\xff\xdc\x91\x4a\x4a\xd4\x22\x71\x8a\xc2\x03\ +\x48\xc2\x71\xe0\x1e\xd2\xbf\x13\xff\xd4\x9c\xdb\xcf\xd6\x52\x66\ +\x14\xac\x4c\x42\x2d\x08\x2a\x86\x33\x4f\x0a\x0a\x60\x63\x73\x65\ +\x39\x21\xef\x35\xc8\xfb\x0c\xea\x06\x24\x48\xc1\x31\xbf\x96\xee\ +\x67\x97\xbe\xb3\x38\xb9\x39\x2f\x95\x5b\x42\x98\x96\x82\x40\x5f\ +\x61\xeb\x63\xed\x74\xaf\x29\xa6\x7c\x94\x58\x6f\x81\x33\x1b\x31\ +\xa5\x5b\xe7\x48\x16\x40\xad\x90\x3d\x18\x64\xd0\x23\x30\xb0\x8e\ +\x55\xf0\x9a\xdc\xc7\x93\xb8\x9f\x3d\xae\x2f\x0c\xc0\xa3\x15\x09\ +\x79\x85\x81\x5a\x26\x2f\x4a\x5c\xda\xd5\x56\xf7\x9a\x6f\x1f\xae\ +\xa1\xcd\x28\x54\x19\x84\x46\x0e\x54\x0c\x67\xd6\x8a\x09\xe0\xe6\ +\xf6\x66\xda\x72\x42\xde\x67\xe0\xf4\x18\x9c\x52\x70\x2c\x36\x3e\ +\x0f\x71\x40\x7a\x7d\x57\xa6\xa0\x9e\xa6\x0c\x94\x14\xb6\xf6\xcd\ +\xd0\xbd\x9e\x58\xa7\x00\x78\x0d\x47\x81\x57\xad\x67\x74\xed\x10\ +\xc9\x02\xb8\x2b\x64\x8f\x06\x1b\xd4\x1d\x06\xd1\x89\xf2\x5e\x93\ +\x3b\x2c\x49\xf7\xf3\xbf\x3c\x92\x4e\xc8\x6b\x15\xb4\xa8\x65\xe2\ +\x14\x05\x70\xeb\x78\x0d\x72\x7f\x4c\x94\xbe\x00\x5e\xbe\x8f\x9d\ +\x51\xc8\xb2\x08\x9e\x1c\x3e\xdc\xd0\x4e\x4c\x00\xbb\x9b\xf2\x4b\ +\x09\x03\x29\x38\x56\xde\x2f\xf4\x3b\xde\x9b\xdd\x81\xdd\x57\x60\ +\x35\x1b\xe5\x2b\x1e\xbd\x28\x1f\xb6\xf6\x37\x10\x40\x85\x28\xfd\ +\xbe\x02\xaf\xe1\x48\xc5\x30\xab\x6b\x66\xa4\x06\xff\xcd\x90\x3e\ +\x1c\x74\x50\x0f\x18\x30\x6f\x97\x33\xe6\x54\x14\xf3\x7f\xcf\x1d\ +\xae\x2f\x80\xc4\xf8\x4a\x24\xff\x6d\x08\xa2\x93\xe5\x94\xa8\x65\ +\xf2\x86\x9b\x2d\x73\x53\x0c\xaf\xf9\xf6\xf1\xea\xec\x6c\xc2\x93\ +\x45\xe8\xc8\xe1\xec\xe6\x0c\x31\x01\xec\x7b\x48\x2a\x25\x58\x3d\ +\x06\x79\x03\x52\xde\x7c\x04\x29\x38\xd6\x54\x17\xfa\x1d\x15\xcb\ +\x96\x22\xbf\xad\x7a\x88\xdf\x6c\x34\x9a\x81\x60\x49\x41\xb1\xfc\ +\x59\x6a\x34\x6e\xcb\x16\x10\xc0\x10\x13\xcd\x46\x55\xc3\x71\x7e\ +\xd7\x36\x91\x2a\x80\x2b\x21\x7d\x3c\xf8\xa0\x47\x61\x80\x9c\x2e\ +\xeb\x35\xd3\x47\x26\x19\xfe\x8e\x03\xab\x21\x0b\x78\x2f\x4a\xe2\ +\x94\x0a\x2a\x93\x6b\x27\x93\x49\x91\x22\xc6\xd7\x7c\xfb\x44\x2c\ +\x3b\x9b\xa0\x59\x84\xa2\xec\x50\xc9\xe1\xec\x76\x41\x01\x1c\x78\ +\x50\x5b\x4a\xb0\xfa\x0b\x0c\x29\x38\x36\xc6\x0a\xdf\xff\x71\xdd\ +\x9a\x68\x7b\x0a\x46\x8d\x46\xce\xec\x03\x77\xad\x02\x08\x61\xdb\ +\x60\x03\x01\x54\x8c\xe2\xf7\x14\x78\x52\x90\x35\x1b\x97\x76\x4b\ +\x8b\x54\x01\xf8\x74\x3c\x78\xa6\xd5\x5f\x60\x70\x2f\x10\xc0\x87\ +\x65\xf4\x39\xc7\x67\xfa\x28\x63\x01\x94\x28\x51\x84\xfc\x72\x06\ +\x06\xd4\x07\x65\xb5\x50\x91\x38\x4e\x97\x23\xbd\x3a\xd5\x11\xba\ +\xe6\xfc\x53\x31\xec\x6c\x82\x66\x11\x7a\x72\x38\x6b\xd0\x5f\x28\ +\x10\xc0\x91\x64\x7e\x8f\x81\x27\x05\x9a\x29\x38\xb6\xc5\x98\x7a\ +\x06\x97\x56\xb4\xe7\xf6\x13\x58\x4d\x46\x6f\xb2\x84\xed\x8f\xb7\ +\x35\x16\x80\xde\x72\x67\x03\x29\x5c\x18\xd5\x95\x6c\xca\xca\x70\ +\xb1\xb1\x17\xa5\x67\x06\xd9\xd0\xb3\x9d\x8b\xf5\x3d\x33\x5d\xac\ +\xeb\xd9\x9e\xac\xe9\xd9\xc1\xc5\xea\x9e\x1d\x5d\x3c\xd9\xb3\x93\ +\x8b\x95\x3d\x3b\x93\x15\x3d\xbb\xb8\x58\xd6\xab\xab\x8b\xa5\xbd\ +\xba\xb9\x58\xdc\xeb\x61\x17\x8b\x7a\x3d\xe2\x62\x41\x56\xf7\x02\ +\xe6\x67\xf5\x20\x31\x15\x2b\x58\x15\x43\x7b\x7d\x11\x40\xa2\xe5\ +\x02\xc8\x02\x01\x9c\xbf\x5b\xc9\x47\xe2\x4c\x1f\x9d\x24\xf4\x7b\ +\x86\xf5\xad\xaf\xfc\x1d\x2a\x91\x5c\x38\xd2\x5a\xf8\x9a\xf3\x4f\ +\x57\x63\x67\x13\x9e\xd2\x84\x27\x06\xe0\xdc\x1e\x31\x01\xfc\xfc\ +\x72\x13\x7e\x7f\x41\x2e\x05\x79\xd3\x91\x36\x1c\x1d\xbb\xef\x35\ +\xf5\x0c\xaa\x45\x97\x21\x7f\x6e\x6a\xa8\xed\x27\xa8\x67\x1e\x34\ +\xd3\x91\x35\x95\x59\x02\xab\x97\x40\xa5\xb0\x7d\x98\x80\x00\x78\ +\xfd\x04\x91\x75\x0a\x43\xbd\xd8\xa5\x89\xb5\x6e\xc1\xcb\x5d\xa2\ +\xdb\x37\xa8\x67\x55\x0c\x2d\xf7\x45\x00\xa5\x80\x7c\xcb\x05\x70\ +\xe9\x2e\x37\x9f\x98\x27\x6f\x6c\x92\xf0\xef\xba\xf2\x7a\x9a\xf4\ +\xbb\x3c\x80\x44\x6e\x5d\x8c\x23\x31\x55\xcb\x88\x0b\xe0\x6c\x55\ +\x77\x66\xc2\xc8\x24\x98\xa5\x86\x4c\x0c\xe7\x9e\x11\xab\x55\x7f\ +\x3e\xda\x84\xdf\x5f\x90\x4b\x41\xde\x70\xa4\x59\x82\xe3\x99\xaa\ +\xa6\x9f\xc3\xec\x81\xcd\x34\xbd\x04\xdd\x59\x07\x93\x65\xc3\x8e\ +\x11\x06\x02\x88\x8e\xd2\x99\x75\x48\x60\x2f\x5c\xe2\xae\x68\xd4\ +\x91\x82\xd9\xbd\x1d\x45\xc4\x00\x74\x6c\x98\x60\x55\x0c\x0d\xf5\ +\x5a\x00\x54\x02\x9f\x58\x2a\x80\xde\xf0\x70\x3f\x2b\xed\xe6\xaf\ +\xe6\xc9\x1b\x27\x2e\x80\xf8\x9a\x15\x48\xfe\xe5\x2a\xd2\xef\xa3\ +\x3c\xb5\xb0\xa5\xa9\x6b\xce\xbf\x58\x95\x9d\x49\xc8\xcb\x16\x8e\ +\x18\xce\x3d\x27\x28\x80\xd7\x9b\x48\x25\x04\xab\xaf\xa0\x6e\x38\ +\xca\xb2\x04\xe7\x3a\x06\x6f\x9e\xc5\x97\x9b\x32\x94\xb3\x0e\x7e\ +\x14\xc2\x8e\xd1\x6d\x8c\x05\xe0\xc5\x7b\x0f\x7e\x95\x02\x4f\x0c\ +\x7a\x87\xd0\x50\x39\x74\xb5\x4e\x00\x8d\x7c\x15\xc0\x4e\x4b\x05\ +\xd0\x07\x1e\xe6\x95\x52\x6e\xbe\x30\x4f\xde\xf8\x24\x53\xbf\x6f\ +\xcf\xfa\x54\xf7\xbf\x4b\x7f\xe7\x4f\x97\x92\x4c\x5f\x73\xfe\xa7\ +\x55\xdc\x19\x88\x2a\x93\x70\xc1\x2a\x33\x64\x52\x38\x77\x50\x50\ +\x00\x27\x1b\xb1\x7b\x0b\xc7\x18\x52\x90\x67\x09\x20\x03\xc7\x91\ +\xca\x5e\x3d\x8b\xba\xd5\x2b\x92\x5b\xbb\xe3\x15\x0b\x97\xfc\x25\ +\x84\x1d\x63\x0c\x04\x50\x29\x8a\xf9\xfa\xb4\x6e\x73\x91\x35\xeb\ +\x30\xde\x8b\xad\xda\xbc\xdd\x09\x5a\x26\x87\x87\x1b\x3d\x60\x45\ +\xfc\x38\x4f\xf7\x2e\xe1\xab\x00\xc6\x5a\x2a\x80\xbe\xf0\xe0\xfe\ +\x56\x52\xe2\xaa\x39\xf2\x26\x98\x0b\xe0\x62\xc5\x0a\x93\x73\xaf\ +\xa5\x93\xdf\xbf\x8c\x27\xff\xb9\xf8\x10\x49\x6f\x19\x6b\x5e\x00\ +\x97\xef\x91\xb2\x10\x4f\x26\x21\x2f\x4d\x74\xc4\x70\xfe\xb0\x58\ +\xaf\xe1\x97\x77\x1a\x6a\xfb\x0a\x82\x42\x70\xbc\x5c\xc9\xeb\xe7\ +\xb1\x7a\x42\x2b\x66\x63\x91\x3b\xfd\xa8\xb7\x26\x41\xd6\x43\xd8\ +\x39\x5e\x40\x00\xbc\xc6\x22\x4f\x08\xd3\xc4\x76\x61\x12\xde\xbf\ +\x71\xb8\x09\x29\xa8\xc4\xd0\xbd\x71\x5d\x2b\xe2\xe7\xac\x51\xf0\ +\x8b\x08\xa0\xb9\xe5\x02\xf8\xfa\x4e\x2d\xdf\x88\x91\x37\xa9\x49\ +\xd0\xaf\x39\xff\xcb\xca\x52\x16\x72\x45\x86\xbc\x3c\x51\x8b\x81\ +\x0a\xe1\xfc\x4b\x82\x02\x78\xbf\x81\xb2\x7c\x60\x35\x1a\x39\x42\ +\x70\x1c\xaf\xe8\xd3\xf7\xfb\x76\x6f\x9a\x76\xa6\x41\x6f\xea\x51\ +\x6f\x91\x12\x5d\xce\xbc\x73\xa2\xfe\xf7\x8e\xab\x5c\xd6\xf0\x1d\ +\x07\x61\x21\x18\xed\xa3\xa0\x57\x3a\x78\xb9\x2d\x7c\xf7\x26\x96\ +\x08\x60\xa3\x3f\x04\x60\x69\x23\x70\x70\xbf\x04\x77\x30\xff\xf3\ +\x0e\x3e\xdf\xf2\xc9\x9b\x6c\x81\x00\xae\x56\x92\xb2\x10\x79\xf6\ +\x22\x2f\x4f\x38\x52\xb8\xf0\x4a\xba\x98\x00\x3e\x4c\xd4\xef\x27\ +\xe8\x08\xc1\xf1\x86\x6f\x53\x52\xae\xb7\x29\x9f\xbf\x8f\x3d\xcb\ +\x60\x26\x3b\x90\x95\x0b\x3b\x27\x0b\x08\x40\x6f\x73\x15\xb3\x42\ +\x60\x95\x0d\x66\xb3\x04\x13\x52\xe8\x9e\x54\xd7\x96\x0d\x40\x43\ +\x01\x50\x09\x7c\x6a\x99\x00\xfa\x27\x68\x03\xfb\xbb\x12\xfa\xfc\ +\x47\x22\x2f\xc7\x02\x01\x7c\x53\x49\xca\x42\xe4\x59\x8b\xbc\x3c\ +\xe1\x48\xe1\xc2\x31\x41\x01\x9c\x4f\xd4\xed\x25\x14\x08\x81\x31\ +\xe3\xe0\x38\x59\xde\xe7\xef\xb8\xcb\xb9\x23\x11\x63\x86\x41\x3d\ +\xe5\xc8\xcc\x0e\x18\x4b\x98\x77\x4e\x11\x10\x80\xe0\xcb\x4e\x7e\ +\x17\x82\x48\x96\x60\x20\x85\x1e\xc9\x96\x08\xa0\xb1\xbf\x04\x60\ +\x59\x23\x70\xc8\x80\x04\x66\x60\x2b\xf8\xa1\x38\x97\x19\x53\x2c\ +\x10\xc0\xb7\xd1\x6e\x51\xc9\xb3\x14\x79\x69\xa2\x23\x85\x0b\xaf\ +\x89\xad\x58\xfb\xe5\x62\x22\xbf\xc1\x28\x17\x02\x23\x3b\x70\x9c\ +\xf2\x7d\x93\x0a\xe7\x8a\xc8\x1f\x8e\xb4\xd0\x9d\x6e\x64\x2e\x4a\ +\xe2\x94\x0a\xbb\xa6\x19\x08\xe0\x9e\xb2\x86\x2f\x39\x71\xa7\x1c\ +\x8d\xf6\x4a\x98\x69\xbc\xe5\xbb\xd1\x79\x10\x46\x52\xb0\x40\x00\ +\x42\x0d\x40\x51\x01\x8c\xb5\x54\x00\xac\xe0\xfe\xd1\x80\x6b\xc5\ +\x5c\xcc\x98\x66\x81\x00\xfe\x53\x51\x99\x91\xc8\xb3\x17\x03\x29\ +\x5c\x78\x5d\x50\x00\x9f\xd4\xe7\xf6\x11\x14\x42\x60\x64\x07\x8e\ +\xd3\x51\x7e\xf9\x9e\xad\x1f\x8a\x23\x8e\x97\xaa\x6a\xa7\x1a\xbd\ +\x90\xc1\xae\x5c\x01\x01\x98\x78\xe3\xd1\xab\x72\xc1\x8c\x10\x74\ +\x67\x1b\xb4\x42\xe8\xf1\x60\x5d\x5b\x36\x00\x45\x05\x60\x59\x23\ +\x70\xc8\xc0\x04\x6e\x70\x6b\xb8\xa1\xc5\x12\x01\xfc\x50\x41\x12\ +\x95\x3c\x53\x11\x90\xc2\x85\x37\x05\x05\x70\xb9\x9e\xb2\x8f\x60\ +\x42\x06\x8e\x0f\xcb\xfa\xed\xbb\x1e\x59\xd9\x46\x33\xcd\x68\xb4\ +\x10\x89\x25\x03\xde\xee\x4a\x0a\x01\xa8\xa7\x19\xd7\x31\xb6\x59\ +\x13\xdd\x17\x41\x2f\x3b\x30\xb3\xa5\xfb\x34\xb1\x53\xa3\x7a\x3c\ +\x54\xd7\x96\x0d\x40\x51\x01\x58\xd6\x08\x1c\x92\x9d\x60\x18\xe4\ +\x2e\x7e\x2e\xca\x64\x46\xae\x05\x02\xb8\x56\x5e\x2b\x2d\x79\xf6\ +\xc2\x92\x02\x95\xc1\x85\xb7\xc4\x04\xf0\xeb\x95\x04\x65\x0f\x41\ +\xdd\x54\xd4\x91\x81\xe3\x7c\x19\xbf\x7d\xd7\x12\xc5\x8b\x90\x1b\ +\xc7\x92\xf9\x8b\x90\xf4\x64\x20\x6b\x20\xee\x9e\x21\x20\x00\x9d\ +\x26\xa2\x29\x19\xb0\x4a\x85\x05\x9c\xc3\x60\x66\xeb\x9d\x14\x25\ +\x2e\x84\x47\x9b\xd6\xb5\x65\x03\x50\x48\x00\x54\x02\x1f\x5b\x26\ +\x00\xa3\x40\xff\x95\x8f\x25\x02\xb8\x5e\xde\x7d\xad\xea\x0c\x45\ +\x40\x08\x1f\xbd\x9d\x2a\x26\x80\x2f\x13\xf8\x0d\x45\x83\xcc\xc0\ +\x71\xe9\x6e\xbf\x7e\xdf\x47\xda\xd6\x26\x8e\xe3\x95\x75\x17\x1f\ +\xa9\x57\x23\xaa\xdf\x59\xd8\xfd\x17\xfd\xe6\x67\x5c\x95\xb2\xfa\ +\xdb\xa9\xf1\xd6\x1b\xf8\x22\x03\x91\xec\x40\x50\x08\x16\x08\xa0\ +\xa1\xbf\x05\xb0\xd4\x0a\x01\xb4\x69\x1d\x4b\xae\x5e\x6e\x21\x71\ +\x45\xcb\x57\x9f\x37\xe7\xd2\xeb\xd1\xda\x41\xbf\xe6\xcb\x17\x5b\ +\x49\xd7\xfb\x99\x8a\x4f\x65\x7c\x2c\xe3\x92\x9b\x4d\xab\xc5\x96\ +\x1d\xbf\xb2\x3f\x9d\x7c\x74\x22\x8d\x7c\xf4\x26\xe5\x0d\xca\xeb\ +\x94\xe3\xf0\xff\x1f\xa3\x1c\xa5\xbc\xea\xe6\xfc\xcb\xad\xfd\xfe\ +\x9d\x37\xce\x4a\x21\xe7\xf6\xb6\x75\xbd\xcc\xe4\x62\x37\xb0\x8b\ +\xb2\x13\xd8\x41\xd9\x9e\x41\xce\x6d\xa3\x6c\x05\xb6\xb8\x19\xde\ +\xbd\x81\xfe\xb6\xe0\xe5\x4a\x91\xcf\xe1\x9f\xfb\x7c\x33\x65\x13\ +\xb0\xd1\xcd\x95\x0d\xed\xc8\x95\xa7\x28\xeb\x81\xb5\x94\x35\xc0\ +\x6a\xca\x93\x99\xe4\xca\x2a\xca\x4a\xca\x8a\x4c\x72\x79\x79\x7b\ +\x37\xcb\x28\x4b\x81\xc5\x94\x45\x94\x85\x94\x05\x1d\xdc\xcc\x77\ +\xf3\xd7\x79\xc0\x13\x94\xb9\x94\x39\x94\xd9\x1d\x25\x66\x75\x24\ +\xcd\xe3\x63\x82\x39\x06\xbf\x03\x8a\xf8\x5b\x00\x2d\x23\x78\x4b\ +\x25\x04\x09\x25\x36\x8b\x06\xbf\x19\x01\x14\x05\xfe\x8b\x37\x17\ +\x41\x6c\x4f\x67\xbf\x0b\xc0\x0e\x2f\x06\x21\x08\x62\xc8\x6f\x40\ +\xc9\x40\x09\xa0\x3b\xde\x60\x04\xb1\x35\x47\xcc\x04\xbf\x59\x01\ +\xdc\x45\x57\x18\xe1\x8d\x46\x10\x7b\x32\x24\x60\x02\xa0\x12\x78\ +\x15\x6f\x32\x82\xd8\x12\x07\x50\x39\xd0\x02\x18\x81\x37\x1a\x41\ +\x6c\xc9\xfb\x66\x83\xdf\x1b\x01\x54\xc3\x1b\x8d\x20\xb6\x64\x7a\ +\xc0\x05\x40\x25\x70\x1e\x6f\x36\x82\xd8\x8e\x7a\xc1\x12\xc0\x6c\ +\xbc\xd9\x08\x62\x2b\xae\x02\x85\x83\x25\x80\x5a\x78\xc3\x11\xc4\ +\x56\xcc\xf3\x26\xf8\xbd\x12\x00\x95\xc0\x71\xbc\xe9\x08\x62\x0b\ +\x6e\x3b\x7b\x73\xc1\x16\x40\x37\xbc\xf1\x08\x62\x0b\x0e\x79\x1b\ +\xfc\xbe\x08\xa0\x18\xf0\x0d\xde\x7c\x04\xb1\x9c\xb6\x41\x17\x00\ +\x95\xc0\x0c\xbc\xf9\x08\x62\x29\x9f\x9b\x79\xf5\xd7\xdf\x02\xb8\ +\x07\xf8\x1f\x3e\x04\x04\xb1\x8c\x09\xbe\x04\xbf\x4f\x02\xa0\x12\ +\x78\x06\x1f\x02\x82\x58\xf6\xe6\x5f\x39\xab\x05\xd0\x0a\x1f\x04\ +\x82\x58\xc2\x56\x5f\x83\xdf\x1f\x02\x28\x5c\xc8\xe2\x13\x84\x11\ +\x24\x42\x69\x6c\xb9\x00\xa8\x04\x46\xe2\xc3\x40\x90\xa0\xf2\x81\ +\x3f\x82\xdf\x5f\x02\xb8\x1b\xf8\x19\x1f\x0a\x82\x04\x8d\x81\xb6\ +\x11\x00\x95\xc0\x02\x7c\x28\x08\x12\x14\xbe\x2a\x24\x78\xec\x57\ +\x30\x05\x10\x05\xfc\x84\x0f\x07\x41\x02\x4e\x3f\x7f\x05\xbf\xdf\ +\x04\x40\x25\x90\x83\x0f\x07\x41\x02\xca\x25\x5f\x17\xfe\x04\x52\ +\x00\x25\x81\x7f\xe2\x43\x42\x90\x80\xd1\xd1\x9f\xc1\xef\x57\x01\ +\x50\x09\x0c\xc1\x87\x84\x20\x01\xe1\x1d\x6f\xdf\xf9\x0f\xa6\x00\ +\x9c\x2f\x09\x5d\xc6\x87\x85\x20\x7e\xa7\x99\xbf\x83\xdf\xef\x02\ +\xc0\xf3\x03\x10\x24\x20\x1c\x09\x44\xf0\x07\x4a\x00\xce\xd5\x81\ +\x67\xf0\xa1\x21\x88\x5f\xc8\x07\x12\x42\x46\x00\x54\x02\xe9\xf8\ +\xe0\x10\xc4\x2f\xec\x08\x54\xf0\x07\x4c\x00\x54\x02\xc7\xf0\xe1\ +\x21\x88\x4f\x38\x4f\xe2\x8a\x0b\x55\x01\x34\x28\xe4\xde\xaf\x0c\ +\x1f\x24\x82\x78\xc7\x92\x40\x06\x7f\x40\x05\x40\x25\x30\x1f\x1f\ +\x22\x82\x78\xbd\xdb\x4f\xc9\x50\x17\xc0\x1d\xf8\xba\x30\x82\x98\ +\xc6\x79\xce\x5f\xf3\x40\x07\x7f\xc0\x05\x40\x25\xd0\x04\x4b\x01\ +\x04\x31\xc5\xf2\x60\x04\x7f\x50\x04\x40\x25\x30\x0f\x1f\x2a\x82\ +\xd8\x27\xf5\x0f\xb6\x00\xb0\x14\x40\x10\x1b\xa5\xfe\x41\x15\x00\ +\x96\x02\x08\x62\xaf\xd4\x3f\xe8\x02\xc0\x52\x00\x41\x74\xb9\x12\ +\xcc\xd4\xdf\x2a\x01\x94\x00\x3e\xc6\x87\x8d\x20\x9a\xd4\xbf\x59\ +\xb0\x83\x3f\xe8\x02\xc0\x52\x00\x41\xec\x91\xfa\x5b\x26\x00\x3c\ +\x56\x0c\x41\x34\xbb\xfc\x94\x8c\x34\x01\x14\x01\x0e\xe0\xc3\x47\ +\x22\x9c\x1f\x80\xea\x56\x05\xbf\x65\x02\xa0\x12\x28\x0d\x5c\xc4\ +\x41\x80\x44\x28\xb7\x80\x14\x2b\x83\xdf\x52\x01\x50\x09\xc4\x01\ +\xdf\xe3\x60\x40\x22\x90\x61\x56\x07\xbf\xe5\x02\x90\x9d\x2f\x88\ +\xa7\x0c\x23\x91\xc4\x7a\x3b\x04\xbf\x2d\x04\x40\x25\x30\x14\x07\ +\x05\x12\x21\x9c\x04\x8a\xa3\x00\xb4\x12\x58\x8b\x83\x03\x09\x73\ +\xae\x02\x15\xed\x12\x73\x76\x13\x40\x71\xe0\x4d\x1c\x24\x48\x98\ +\xe2\x3c\x3f\xb3\x9e\x9d\x82\xdf\x56\x02\xa0\x12\xa8\x00\xfc\x1d\ +\x07\x0b\x12\x86\x74\xb3\x5b\xf0\xdb\x4e\x00\x54\x02\x09\xc0\x75\ +\x1c\x30\x48\x18\x91\x67\xc7\xe0\xb7\xa5\x00\xa8\x04\x9a\x01\xbf\ +\xe0\xc0\x41\xc2\x80\xa5\x85\x02\x70\xa2\x4f\x58\x0b\x80\x4a\x20\ +\x15\xf8\x0d\x07\x10\x12\xc2\xac\xb1\x73\xf0\xdb\x5a\x00\x54\x02\ +\x6d\x81\x3f\x70\x20\x21\x21\xc8\xe6\x42\x7e\x3e\xc9\x37\xe2\x04\ +\x40\x25\xd0\x09\x17\x0a\x21\x21\xc6\xee\x50\x08\xfe\x90\x10\x00\ +\x95\x40\xe7\x42\xee\x43\x12\x70\x70\x21\x76\x67\x27\x50\x34\x14\ +\xe2\x2a\x64\x04\x40\x25\xd0\x0e\x7b\x02\x88\xcd\xd9\x14\x2a\x7f\ +\xf9\x43\x4e\x00\x54\x02\x69\x38\x3b\x80\xd8\x94\xd5\x76\x6f\xf8\ +\x85\xbc\x00\xa8\x04\x9a\x03\x37\x70\xc0\x21\x36\x62\x49\x28\x06\ +\x7f\x48\x0a\x80\x4a\xa0\x21\xf0\x0d\x0e\x3c\xc4\x62\x9c\x47\x77\ +\xe7\x84\x6a\xf0\x87\xac\x00\xa8\x04\x2a\x03\xef\xe2\x20\x44\x2c\ +\xc2\x99\x85\x76\x08\xd5\xf8\x09\x79\x01\xc8\x0e\x1c\xd9\x8a\x83\ +\x11\x09\x32\xce\xd3\x7b\xe2\x43\x3d\xf8\x43\x5e\x00\x54\x02\x85\ +\x81\xb1\xb8\xd3\x30\x12\x24\x8e\x01\xe5\xc2\x21\xf8\xc3\x42\x00\ +\xaa\x55\x83\x3f\xe1\x00\x45\x02\xc8\x0a\xa0\x58\xb8\xc4\x4c\x58\ +\x09\x80\x4a\xa0\x26\xf0\x19\x0e\x54\xc4\xcf\x38\x17\xa1\x0d\x0c\ +\xa7\x58\x09\x4b\x01\x50\x09\x94\x01\x5e\xc2\x41\x8b\xf8\x89\xef\ +\x80\xa6\xe1\x18\xfc\x61\x29\x00\x2a\x81\xa2\xc0\x42\x1c\xbc\x88\ +\x8f\x7c\x08\x54\x0b\xd7\xe0\x0f\x5b\x01\xa8\x5e\x29\xc6\x1d\x86\ +\x10\xb3\x38\x5f\x3e\x9b\x69\xa7\xcd\x3b\x51\x00\xbe\x1d\x40\xb2\ +\x06\x07\x35\x22\xc8\x79\x20\x31\xdc\xe3\x22\x62\x04\x80\xd9\x00\ +\x82\x7f\xf5\x51\x00\x98\x0d\x20\xf8\x57\x3f\xd2\x05\x80\xd9\x00\ +\x82\x7f\xf5\x51\x00\x98\x0d\x20\x11\xfd\x57\x1f\x05\xa0\x14\x41\ +\x0a\x70\x0e\x03\x22\x62\xb8\x06\x4c\x89\xe4\xbf\xfa\x28\x00\xad\ +\x04\x8a\x00\xbd\x80\x2f\x31\x40\xc2\x96\xdf\x81\xc5\xe1\xb4\x8e\ +\x1f\x05\x10\x98\xe3\xc9\x86\x03\xff\xc2\x80\x09\x1b\x6e\xd3\xad\ +\xba\xee\xc5\x31\x8e\x02\x10\x15\x41\x29\x60\x3a\x9e\x50\x14\xf2\ +\x1c\x00\xea\xe0\x98\x46\x01\x78\x2b\x82\xf2\xf4\x64\x97\xdf\x31\ +\x98\x42\x0a\xe7\x21\xb3\xc9\x38\x86\x51\x00\xfe\x12\x41\x35\x60\ +\x0b\xdd\x02\x0a\x03\xcc\xde\x9d\xfd\x0c\x1c\xb3\x28\x80\x40\x89\ +\x20\x0e\x58\x00\x7c\x8f\xc1\x66\xab\x1a\xff\x79\xa0\x75\x28\xef\ +\xcd\x87\x02\x08\xbd\xad\xc8\x7a\x03\xa7\x30\x00\x2d\xc3\xd9\xa8\ +\x9d\x83\xcd\x3d\x14\x80\xd5\x32\x48\x04\x36\xe0\x79\x05\x41\xe3\ +\x04\xf0\x28\xce\xe3\xa3\x00\xec\xb8\x11\xc9\x68\xdc\x91\x28\x60\ +\x3b\xf0\x3a\x57\x6d\xd6\xc5\xb1\x86\x02\x08\x85\x4d\x4a\x53\xe9\ +\x19\x71\x3f\x60\xf0\x7a\xcd\x2d\xe0\x0d\x60\x18\x70\x17\x8e\x2d\ +\x14\x40\xa8\xee\x4c\xd4\x82\x9e\x1c\x73\x19\x83\xda\x10\xe7\xba\ +\x8b\x7d\xc0\x63\x40\x14\x8e\x21\x14\x40\xb8\x09\xa1\x36\x30\x19\ +\x78\x1b\xa7\x14\x0b\xb8\x0a\x3c\x49\xbb\xf8\x58\xd7\xa3\x00\x22\ +\x46\x06\x15\x80\x7e\xc0\x73\xc0\xcd\x08\x0a\x78\x07\xf0\x3e\x5d\ +\x69\x99\x80\x53\x77\x28\x00\x7c\x08\x85\x0a\x15\x03\xea\x01\xd9\ +\xc0\x5a\xe0\x74\x18\x1d\x85\xfe\x39\x4d\xeb\x27\xd3\xde\x48\x19\ +\x7c\xe6\x28\x00\x24\x3c\xa5\xe0\x09\xf6\x49\x18\xec\x28\x00\x24\ +\x70\x52\x70\x36\xc9\x26\x02\xcb\x80\x3d\x74\x6e\xfc\x72\x80\x5f\ +\x5e\x72\x1e\x8e\xf1\x35\x4d\xdf\x0f\x01\xeb\x80\x19\xc0\x20\x0c\ +\x76\x14\x00\x62\xaf\x37\x19\x6b\x00\xad\xe8\x1e\x07\xe3\x81\xb9\ +\x74\x09\xb3\xf3\xc5\xa6\x95\x74\x4e\x7d\x03\x7d\x4d\x76\x1d\x6d\ +\xc2\x39\x65\xb2\x08\x98\x47\xeb\xf3\x81\x40\x3b\xa0\x3e\xed\x57\ +\x60\xad\x8e\x02\x40\x10\x04\x05\x80\x20\x08\x0a\x00\x41\x10\x14\ +\x00\x82\x20\x28\x00\x04\x41\x50\x00\x08\x82\x84\x0c\xff\x0f\x4c\ +\x6f\x04\xae\x34\x0e\xca\xb6\x00\x00\x00\x00\x49\x45\x4e\x44\xae\ +\x42\x60\x82\ +" + +qt_resource_name = b"\ +\x00\x09\ +\x0b\x85\x8e\x87\ +\x00\x63\ +\x00\x6c\x00\x65\x00\x61\x00\x72\x00\x2e\x00\x73\x00\x76\x00\x67\ +\x00\x09\ +\x0c\x98\xb7\xc7\ +\x00\x70\ +\x00\x61\x00\x75\x00\x73\x00\x65\x00\x2e\x00\x73\x00\x76\x00\x67\ +\x00\x0b\ +\x06\x79\xcf\xa7\ +\x00\x72\ +\x00\x65\x00\x73\x00\x74\x00\x6f\x00\x72\x00\x65\x00\x2e\x00\x73\x00\x76\x00\x67\ +\x00\x13\ +\x06\x59\x10\xe7\ +\x00\x6d\ +\x00\x6e\x00\x65\x00\x2d\x00\x62\x00\x69\x00\x67\x00\x73\x00\x75\x00\x72\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\ +\x00\x6e\x00\x67\ +\x00\x09\ +\x09\xc7\xab\x47\ +\x00\x72\ +\x00\x65\x00\x73\x00\x65\x00\x74\x00\x2e\x00\x73\x00\x76\x00\x67\ +\x00\x0e\ +\x0a\x70\xcf\x87\ +\x00\x73\ +\x00\x63\x00\x72\x00\x65\x00\x65\x00\x6e\x00\x73\x00\x68\x00\x6f\x00\x74\x00\x2e\x00\x73\x00\x76\x00\x67\ +\x00\x08\ +\x0c\x33\x57\x07\ +\x00\x68\ +\x00\x65\x00\x6c\x00\x70\x00\x2e\x00\x73\x00\x76\x00\x67\ +\x00\x09\ +\x08\x28\xa9\xe7\ +\x00\x73\ +\x00\x63\x00\x61\x00\x6c\x00\x65\x00\x2e\x00\x73\x00\x76\x00\x67\ +\x00\x11\ +\x06\x53\x6a\xc7\ +\x00\x76\ +\x00\x69\x00\x73\x00\x69\x00\x62\x00\x69\x00\x6c\x00\x69\x00\x74\x00\x79\x00\x5f\x00\x6f\x00\x6e\x00\x2e\x00\x73\x00\x76\x00\x67\ +\ +\x00\x08\ +\x02\x8c\x54\x27\ +\x00\x70\ +\x00\x6c\x00\x61\x00\x79\x00\x2e\x00\x73\x00\x76\x00\x67\ +\x00\x0e\ +\x01\x92\x04\x07\ +\x00\x6d\ +\x00\x6e\x00\x65\x00\x2d\x00\x73\x00\x70\x00\x6c\x00\x61\x00\x73\x00\x68\x00\x2e\x00\x70\x00\x6e\x00\x67\ +\x00\x12\ +\x02\xea\x5a\x07\ +\x00\x76\ +\x00\x69\x00\x73\x00\x69\x00\x62\x00\x69\x00\x6c\x00\x69\x00\x74\x00\x79\x00\x5f\x00\x6f\x00\x66\x00\x66\x00\x2e\x00\x73\x00\x76\ +\x00\x67\ +\x00\x09\ +\x0c\xf8\xb2\x07\ +\x00\x6d\ +\x00\x6f\x00\x76\x00\x69\x00\x65\x00\x2e\x00\x73\x00\x76\x00\x67\ +\x00\x0c\ +\x08\xe8\x50\xa7\ +\x00\x6d\ +\x00\x6e\x00\x65\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\ +" + +qt_resource_struct_v1 = b"\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x0e\x00\x00\x00\x01\ +\x00\x00\x01\x1e\x00\x00\x00\x00\x00\x01\x00\x00\xc7\xef\ +\x00\x00\x01\x08\x00\x00\x00\x00\x00\x01\x00\x00\xc6\xbd\ +\x00\x00\x01\x40\x00\x00\x00\x00\x00\x01\x00\x01\x39\x4f\ +\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x01\x00\x00\xc4\xce\ +\x00\x00\x00\x4c\x00\x00\x00\x00\x00\x01\x00\x00\x03\xb2\ +\x00\x00\x00\x30\x00\x00\x00\x00\x00\x01\x00\x00\x02\x37\ +\x00\x00\x00\xc8\x00\x00\x00\x00\x00\x01\x00\x00\xc2\x1f\ +\x00\x00\x01\x82\x00\x00\x00\x00\x00\x01\x00\x01\x47\x71\ +\x00\x00\x00\x78\x00\x00\x00\x00\x00\x01\x00\x00\xbd\x87\ +\x00\x00\x00\x90\x00\x00\x00\x00\x00\x01\x00\x00\xbe\xfd\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ +\x00\x00\x00\xb2\x00\x00\x00\x00\x00\x01\x00\x00\xc0\x8b\ +\x00\x00\x00\x18\x00\x00\x00\x00\x00\x01\x00\x00\x00\xfe\ +\x00\x00\x01\x6a\x00\x00\x00\x00\x00\x01\x00\x01\x3c\xd6\ +" + +qt_resource_struct_v2 = b"\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x0e\x00\x00\x00\x01\ +\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x01\x1e\x00\x00\x00\x00\x00\x01\x00\x00\xc7\xef\ +\x00\x00\x01\x7f\x55\xd8\x1b\xb6\ +\x00\x00\x01\x08\x00\x00\x00\x00\x00\x01\x00\x00\xc6\xbd\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +\x00\x00\x01\x40\x00\x00\x00\x00\x00\x01\x00\x01\x39\x4f\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x01\x00\x00\xc4\xce\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +\x00\x00\x00\x4c\x00\x00\x00\x00\x00\x01\x00\x00\x03\xb2\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +\x00\x00\x00\x30\x00\x00\x00\x00\x00\x01\x00\x00\x02\x37\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +\x00\x00\x00\xc8\x00\x00\x00\x00\x00\x01\x00\x00\xc2\x1f\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +\x00\x00\x01\x82\x00\x00\x00\x00\x00\x01\x00\x01\x47\x71\ +\x00\x00\x01\x7f\x55\xd9\xdc\x7b\ +\x00\x00\x00\x78\x00\x00\x00\x00\x00\x01\x00\x00\xbd\x87\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +\x00\x00\x00\x90\x00\x00\x00\x00\x00\x01\x00\x00\xbe\xfd\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +\x00\x00\x00\xb2\x00\x00\x00\x00\x00\x01\x00\x00\xc0\x8b\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +\x00\x00\x00\x18\x00\x00\x00\x00\x00\x01\x00\x00\x00\xfe\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +\x00\x00\x01\x6a\x00\x00\x00\x00\x00\x01\x00\x01\x3c\xd6\ +\x00\x00\x01\x7e\xb1\x1a\x00\x31\ +" + +qt_version = [int(v) for v in QtCore.qVersion().split('.')] +if qt_version < [5, 8, 0]: + rcc_version = 1 + qt_resource_struct = qt_resource_struct_v1 +else: + rcc_version = 2 + qt_resource_struct = qt_resource_struct_v2 + +def qInitResources(): + QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) + +def qCleanupResources(): + QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) + +qInitResources() diff --git a/python/libs/mne/icons/restore-black-18dp.svg b/python/libs/mne/icons/restore-black-18dp.svg new file mode 100644 index 0000000..c85636e --- /dev/null +++ b/python/libs/mne/icons/restore-black-18dp.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/python/libs/mne/icons/scale-black-18dp.svg b/python/libs/mne/icons/scale-black-18dp.svg new file mode 100644 index 0000000..fb26ce4 --- /dev/null +++ b/python/libs/mne/icons/scale-black-18dp.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/python/libs/mne/icons/screenshot-black-18dp.svg b/python/libs/mne/icons/screenshot-black-18dp.svg new file mode 100644 index 0000000..3a352a1 --- /dev/null +++ b/python/libs/mne/icons/screenshot-black-18dp.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/python/libs/mne/icons/toolbar_move_horizontal@2x.png b/python/libs/mne/icons/toolbar_move_horizontal@2x.png new file mode 100644 index 0000000..143b62e Binary files /dev/null and b/python/libs/mne/icons/toolbar_move_horizontal@2x.png differ diff --git a/python/libs/mne/icons/toolbar_move_vertical@2x.png b/python/libs/mne/icons/toolbar_move_vertical@2x.png new file mode 100644 index 0000000..453d7b7 Binary files /dev/null and b/python/libs/mne/icons/toolbar_move_vertical@2x.png differ diff --git a/python/libs/mne/icons/toolbar_separator_horizontal.png b/python/libs/mne/icons/toolbar_separator_horizontal.png new file mode 100644 index 0000000..ecf2ab7 Binary files /dev/null and b/python/libs/mne/icons/toolbar_separator_horizontal.png differ diff --git a/python/libs/mne/icons/toolbar_separator_horizontal@2x.png b/python/libs/mne/icons/toolbar_separator_horizontal@2x.png new file mode 100644 index 0000000..ac2b343 Binary files /dev/null and b/python/libs/mne/icons/toolbar_separator_horizontal@2x.png differ diff --git a/python/libs/mne/icons/toolbar_separator_vertical@2x.png b/python/libs/mne/icons/toolbar_separator_vertical@2x.png new file mode 100644 index 0000000..2f66e93 Binary files /dev/null and b/python/libs/mne/icons/toolbar_separator_vertical@2x.png differ diff --git a/python/libs/mne/icons/visibility_off-black-18dp.svg b/python/libs/mne/icons/visibility_off-black-18dp.svg new file mode 100644 index 0000000..4c96fc1 --- /dev/null +++ b/python/libs/mne/icons/visibility_off-black-18dp.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/python/libs/mne/icons/visibility_on-black-18dp.svg b/python/libs/mne/icons/visibility_on-black-18dp.svg new file mode 100644 index 0000000..c9de690 --- /dev/null +++ b/python/libs/mne/icons/visibility_on-black-18dp.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/python/libs/mne/inverse_sparse/__init__.py b/python/libs/mne/inverse_sparse/__init__.py new file mode 100644 index 0000000..867becd --- /dev/null +++ b/python/libs/mne/inverse_sparse/__init__.py @@ -0,0 +1,9 @@ +"""Non-Linear sparse inverse solvers.""" + +# Author: Alexandre Gramfort +# +# License: Simplified BSD + +from .mxne_inverse import (mixed_norm, tf_mixed_norm, + make_stc_from_dipoles) +from ._gamma_map import gamma_map diff --git a/python/libs/mne/inverse_sparse/_gamma_map.py b/python/libs/mne/inverse_sparse/_gamma_map.py new file mode 100644 index 0000000..6f71cbe --- /dev/null +++ b/python/libs/mne/inverse_sparse/_gamma_map.py @@ -0,0 +1,293 @@ +# Authors: Alexandre Gramfort +# Martin Luessi +# License: Simplified BSD + +import numpy as np + +from ..forward import is_fixed_orient +from ..minimum_norm.inverse import _check_reference, _log_exp_var +from ..utils import logger, verbose, warn +from .mxne_inverse import (_check_ori, _make_sparse_stc, _prepare_gain, + _reapply_source_weighting, _compute_residual, + _make_dipoles_sparse) + + +@verbose +def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1, + group_size=1, gammas=None, verbose=None): + """Hierarchical Bayes (Gamma-MAP). + + Parameters + ---------- + M : array, shape=(n_sensors, n_times) + Observation. + G : array, shape=(n_sensors, n_sources) + Forward operator. + alpha : float + Regularization parameter (noise variance). + maxit : int + Maximum number of iterations. + tol : float + Tolerance parameter for convergence. + group_size : int + Number of consecutive sources which use the same gamma. + update_mode : int + Update mode, 1: MacKay update (default), 3: Modified MacKay update. + gammas : array, shape=(n_sources,) + Initial values for posterior variances (gammas). If None, a + variance of 1.0 is used. + %(verbose)s + + Returns + ------- + X : array, shape=(n_active, n_times) + Estimated source time courses. + active_set : array, shape=(n_active,) + Indices of active sources. + """ + from scipy import linalg + G = G.copy() + M = M.copy() + + if gammas is None: + gammas = np.ones(G.shape[1], dtype=np.float64) + + eps = np.finfo(float).eps + + n_sources = G.shape[1] + n_sensors, n_times = M.shape + + # apply normalization so the numerical values are sane + M_normalize_constant = np.linalg.norm(np.dot(M, M.T), ord='fro') + M /= np.sqrt(M_normalize_constant) + alpha /= M_normalize_constant + G_normalize_constant = np.linalg.norm(G, ord=np.inf) + G /= G_normalize_constant + + if n_sources % group_size != 0: + raise ValueError('Number of sources has to be evenly dividable by the ' + 'group size') + + n_active = n_sources + active_set = np.arange(n_sources) + + gammas_full_old = gammas.copy() + + if update_mode == 2: + denom_fun = np.sqrt + else: + # do nothing + def denom_fun(x): + return x + + last_size = -1 + for itno in range(maxit): + gammas[np.isnan(gammas)] = 0.0 + + gidx = (np.abs(gammas) > eps) + active_set = active_set[gidx] + gammas = gammas[gidx] + + # update only active gammas (once set to zero it stays at zero) + if n_active > len(active_set): + n_active = active_set.size + G = G[:, gidx] + + CM = np.dot(G * gammas[np.newaxis, :], G.T) + CM.flat[::n_sensors + 1] += alpha + # Invert CM keeping symmetry + U, S, _ = linalg.svd(CM, full_matrices=False) + S = S[np.newaxis, :] + del CM + CMinv = np.dot(U / (S + eps), U.T) + CMinvG = np.dot(CMinv, G) + A = np.dot(CMinvG.T, M) # mult. w. Diag(gamma) in gamma update + + if update_mode == 1: + # MacKay fixed point update (10) in [1] + numer = gammas ** 2 * np.mean((A * A.conj()).real, axis=1) + denom = gammas * np.sum(G * CMinvG, axis=0) + elif update_mode == 2: + # modified MacKay fixed point update (11) in [1] + numer = gammas * np.sqrt(np.mean((A * A.conj()).real, axis=1)) + denom = np.sum(G * CMinvG, axis=0) # sqrt is applied below + else: + raise ValueError('Invalid value for update_mode') + + if group_size == 1: + if denom is None: + gammas = numer + else: + gammas = numer / np.maximum(denom_fun(denom), + np.finfo('float').eps) + else: + numer_comb = np.sum(numer.reshape(-1, group_size), axis=1) + if denom is None: + gammas_comb = numer_comb + else: + denom_comb = np.sum(denom.reshape(-1, group_size), axis=1) + gammas_comb = numer_comb / denom_fun(denom_comb) + + gammas = np.repeat(gammas_comb / group_size, group_size) + + # compute convergence criterion + gammas_full = np.zeros(n_sources, dtype=np.float64) + gammas_full[active_set] = gammas + + err = (np.sum(np.abs(gammas_full - gammas_full_old)) / + np.sum(np.abs(gammas_full_old))) + + gammas_full_old = gammas_full + + breaking = (err < tol or n_active == 0) + if len(gammas) != last_size or breaking: + logger.info('Iteration: %d\t active set size: %d\t convergence: ' + '%0.3e' % (itno, len(gammas), err)) + last_size = len(gammas) + + if breaking: + break + + if itno < maxit - 1: + logger.info('\nConvergence reached !\n') + else: + warn('\nConvergence NOT reached !\n') + + # undo normalization and compute final posterior mean + n_const = np.sqrt(M_normalize_constant) / G_normalize_constant + x_active = n_const * gammas[:, None] * A + + return x_active, active_set + + +@verbose +def gamma_map(evoked, forward, noise_cov, alpha, loose="auto", depth=0.8, + xyz_same_gamma=True, maxit=10000, tol=1e-6, update_mode=1, + gammas=None, pca=True, return_residual=False, + return_as_dipoles=False, rank=None, pick_ori=None, verbose=None): + """Hierarchical Bayes (Gamma-MAP) sparse source localization method. + + Models each source time course using a zero-mean Gaussian prior with an + unknown variance (gamma) parameter. During estimation, most gammas are + driven to zero, resulting in a sparse source estimate, as in + :footcite:`WipfEtAl2007` and :footcite:`WipfNagarajan2009`. + + For fixed-orientation forward operators, a separate gamma is used for each + source time course, while for free-orientation forward operators, the same + gamma is used for the three source time courses at each source space point + (separate gammas can be used in this case by using xyz_same_gamma=False). + + Parameters + ---------- + evoked : instance of Evoked + Evoked data to invert. + forward : dict + Forward operator. + noise_cov : instance of Covariance + Noise covariance to compute whitener. + alpha : float + Regularization parameter (noise variance). + %(loose)s + %(depth)s + xyz_same_gamma : bool + Use same gamma for xyz current components at each source space point. + Recommended for free-orientation forward solutions. + maxit : int + Maximum number of iterations. + tol : float + Tolerance parameter for convergence. + update_mode : int + Update mode, 1: MacKay update (default), 2: Modified MacKay update. + gammas : array, shape=(n_sources,) + Initial values for posterior variances (gammas). If None, a + variance of 1.0 is used. + pca : bool + If True the rank of the data is reduced to the true dimension. + return_residual : bool + If True, the residual is returned as an Evoked instance. + return_as_dipoles : bool + If True, the sources are returned as a list of Dipole instances. + %(rank_none)s + + .. versionadded:: 0.18 + %(pick_ori)s + %(verbose)s + + Returns + ------- + stc : instance of SourceEstimate + Source time courses. + residual : instance of Evoked + The residual a.k.a. data not explained by the sources. + Only returned if return_residual is True. + + References + ---------- + .. footbibliography:: + """ + _check_reference(evoked) + + forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain( + forward, evoked.info, noise_cov, pca, depth, loose, rank) + _check_ori(pick_ori, forward) + + group_size = 1 if (is_fixed_orient(forward) or not xyz_same_gamma) else 3 + + # get the data + sel = [evoked.ch_names.index(name) for name in gain_info['ch_names']] + M = evoked.data[sel] + + # whiten the data + logger.info('Whitening data matrix.') + M = np.dot(whitener, M) + + # run the optimization + X, active_set = _gamma_map_opt(M, gain, alpha, maxit=maxit, tol=tol, + update_mode=update_mode, gammas=gammas, + group_size=group_size, verbose=verbose) + + if len(active_set) == 0: + raise Exception("No active dipoles found. alpha is too big.") + + M_estimate = gain[:, active_set] @ X + + # Reapply weights to have correct unit + X = _reapply_source_weighting(X, source_weighting, active_set) + + if return_residual: + residual = _compute_residual(forward, evoked, X, active_set, + gain_info) + + if group_size == 1 and not is_fixed_orient(forward): + # make sure each source has 3 components + idx, offset = divmod(active_set, 3) + active_src = np.unique(idx) + if len(X) < 3 * len(active_src): + X_xyz = np.zeros((len(active_src), 3, X.shape[1]), dtype=X.dtype) + idx = np.searchsorted(active_src, idx) + X_xyz[idx, offset, :] = X + X_xyz.shape = (len(active_src) * 3, X.shape[1]) + X = X_xyz + active_set = (active_src[:, np.newaxis] * 3 + np.arange(3)).ravel() + source_weighting[source_weighting == 0] = 1 # zeros + gain_active = gain[:, active_set] / source_weighting[active_set] + del source_weighting + + tmin = evoked.times[0] + tstep = 1.0 / evoked.info['sfreq'] + + if return_as_dipoles: + out = _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M, + gain_active, active_is_idx=True) + else: + out = _make_sparse_stc(X, active_set, forward, tmin, tstep, + active_is_idx=True, pick_ori=pick_ori, + verbose=verbose) + + _log_exp_var(M, M_estimate, prefix='') + logger.info('[done]') + + if return_residual: + out = out, residual + + return out diff --git a/python/libs/mne/inverse_sparse/mxne_debiasing.py b/python/libs/mne/inverse_sparse/mxne_debiasing.py new file mode 100644 index 0000000..1ea3ca6 --- /dev/null +++ b/python/libs/mne/inverse_sparse/mxne_debiasing.py @@ -0,0 +1,133 @@ +# Authors: Daniel Strohmeier +# Alexandre Gramfort +# +# License: BSD-3-Clause + +from math import sqrt +import numpy as np + +from ..utils import check_random_state, logger, verbose, fill_doc + + +@fill_doc +def power_iteration_kron(A, C, max_iter=1000, tol=1e-3, random_state=0): + """Find the largest singular value for the matrix kron(C.T, A). + + It uses power iterations. + + Parameters + ---------- + A : array + An array + C : array + An array + max_iter : int + Maximum number of iterations + %(random_state)s + + Returns + ------- + L : float + largest singular value + + Notes + ----- + http://en.wikipedia.org/wiki/Power_iteration + """ + AS_size = C.shape[0] + rng = check_random_state(random_state) + B = rng.randn(AS_size, AS_size) + B /= np.linalg.norm(B, 'fro') + ATA = np.dot(A.T, A) + CCT = np.dot(C, C.T) + L0 = np.inf + for _ in range(max_iter): + Y = np.dot(np.dot(ATA, B), CCT) + L = np.linalg.norm(Y, 'fro') + + if abs(L - L0) < tol: + break + + B = Y / L + L0 = L + return L + + +@verbose +def compute_bias(M, G, X, max_iter=1000, tol=1e-6, n_orient=1, verbose=None): + """Compute scaling to correct amplitude bias. + + It solves the following optimization problem using FISTA: + + min 1/2 * (|| M - GDX ||fro)^2 + s.t. D >= 1 and D is a diagonal matrix + + Reference for the FISTA algorithm: + Amir Beck and Marc Teboulle + A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse + Problems, SIAM J. Imaging Sci., 2(1), 183-202. (20 pages) + http://epubs.siam.org/doi/abs/10.1137/080716542 + + Parameters + ---------- + M : array + measurement data. + G : array + leadfield matrix. + X : array + reconstructed time courses with amplitude bias. + max_iter : int + Maximum number of iterations. + tol : float + The tolerance on convergence. + n_orient : int + The number of orientations (1 for fixed and 3 otherwise). + %(verbose)s + + Returns + ------- + D : array + Debiasing weights. + """ + n_sources = X.shape[0] + + lipschitz_constant = 1.1 * power_iteration_kron(G, X) + + # initializations + D = np.ones(n_sources) + Y = np.ones(n_sources) + t = 1.0 + + for i in range(max_iter): + D0 = D + + # gradient step + R = M - np.dot(G * Y, X) + D = Y + np.sum(np.dot(G.T, R) * X, axis=1) / lipschitz_constant + # Equivalent but faster than: + # D = Y + np.diag(np.dot(np.dot(G.T, R), X.T)) / lipschitz_constant + + # prox ie projection on constraint + if n_orient != 1: # take care of orientations + # The scaling has to be the same for all orientations + D = np.mean(D.reshape(-1, n_orient), axis=1) + D = np.tile(D, [n_orient, 1]).T.ravel() + D = np.maximum(D, 1.0) + + t0 = t + t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2)) + Y.fill(0.0) + dt = (t0 - 1.0) / t + Y = D + dt * (D - D0) + + Ddiff = np.linalg.norm(D - D0, np.inf) + + if Ddiff < tol: + logger.info("Debiasing converged after %d iterations " + "max(|D - D0| = %e < %e)" % (i, Ddiff, tol)) + break + else: + Ddiff = np.linalg.norm(D - D0, np.inf) + logger.info("Debiasing did not converge after %d iterations! " + "max(|D - D0| = %e >= %e)" % (max_iter, Ddiff, tol)) + return D diff --git a/python/libs/mne/inverse_sparse/mxne_inverse.py b/python/libs/mne/inverse_sparse/mxne_inverse.py new file mode 100644 index 0000000..bc2e1d4 --- /dev/null +++ b/python/libs/mne/inverse_sparse/mxne_inverse.py @@ -0,0 +1,896 @@ +# Author: Alexandre Gramfort +# Daniel Strohmeier +# +# License: Simplified BSD + +import numpy as np + +from ..source_estimate import SourceEstimate, _BaseSourceEstimate, _make_stc +from ..minimum_norm.inverse import (combine_xyz, _prepare_forward, + _check_reference, _log_exp_var) +from ..forward import is_fixed_orient +from ..io.pick import pick_channels_evoked +from ..io.proj import deactivate_proj +from ..utils import (logger, verbose, _check_depth, _check_option, sum_squared, + _validate_type, check_random_state, warn) +from ..dipole import Dipole + +from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver, _Phi, + tf_mixed_norm_solver, iterative_tf_mixed_norm_solver, + norm_l2inf, norm_epsilon_inf, groups_norm2) + + +def _check_ori(pick_ori, forward): + """Check pick_ori.""" + _check_option('pick_ori', pick_ori, [None, 'vector']) + if pick_ori == 'vector' and is_fixed_orient(forward): + raise ValueError('pick_ori="vector" cannot be combined with a fixed ' + 'orientation forward solution.') + + +def _prepare_weights(forward, gain, source_weighting, weights, weights_min): + mask = None + if isinstance(weights, _BaseSourceEstimate): + weights = np.max(np.abs(weights.data), axis=1) + weights_max = np.max(weights) + if weights_min > weights_max: + raise ValueError('weights_min > weights_max (%s > %s)' % + (weights_min, weights_max)) + weights_min = weights_min / weights_max + weights = weights / weights_max + n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 + weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T) + if len(weights) != gain.shape[1]: + raise ValueError('weights do not have the correct dimension ' + ' (%d != %d)' % (len(weights), gain.shape[1])) + if len(source_weighting.shape) == 1: + source_weighting *= weights + else: + source_weighting *= weights[:, None] + gain *= weights[None, :] + + if weights_min is not None: + mask = (weights > weights_min) + gain = gain[:, mask] + n_sources = np.sum(mask) // n_dip_per_pos + logger.info("Reducing source space to %d sources" % n_sources) + + return gain, source_weighting, mask + + +def _prepare_gain(forward, info, noise_cov, pca, depth, loose, rank, + weights=None, weights_min=None): + depth = _check_depth(depth, 'depth_sparse') + forward, gain_info, gain, _, _, source_weighting, _, _, whitener = \ + _prepare_forward(forward, info, noise_cov, 'auto', loose, rank, pca, + use_cps=True, **depth) + + if weights is None: + mask = None + else: + gain, source_weighting, mask = _prepare_weights( + forward, gain, source_weighting, weights, weights_min) + + return forward, gain, gain_info, whitener, source_weighting, mask + + +def _reapply_source_weighting(X, source_weighting, active_set): + X *= source_weighting[active_set][:, None] + return X + + +def _compute_residual(forward, evoked, X, active_set, info): + # OK, picking based on row_names is safe + sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']] + residual = evoked.copy() + residual = pick_channels_evoked(residual, include=info['ch_names']) + r_tmp = residual.copy() + + r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X) + + # Take care of proj + active_projs = list() + non_active_projs = list() + for p in evoked.info['projs']: + if p['active']: + active_projs.append(p) + else: + non_active_projs.append(p) + + if len(active_projs) > 0: + with r_tmp.info._unlock(): + r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True, + verbose=False) + r_tmp.apply_proj(verbose=False) + r_tmp.add_proj(non_active_projs, remove_existing=False, verbose=False) + + residual.data -= r_tmp.data + + return residual + + +@verbose +def _make_sparse_stc(X, active_set, forward, tmin, tstep, + active_is_idx=False, pick_ori=None, verbose=None): + source_nn = forward['source_nn'] + vector = False + if not is_fixed_orient(forward): + if pick_ori != 'vector': + logger.info('combining the current components...') + X = combine_xyz(X) + else: + vector = True + source_nn = np.reshape(source_nn, (-1, 3, 3)) + + if not active_is_idx: + active_idx = np.where(active_set)[0] + else: + active_idx = active_set + + n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 + if n_dip_per_pos > 1: + active_idx = np.unique(active_idx // n_dip_per_pos) + + src = forward['src'] + vertices = [] + n_points_so_far = 0 + for this_src in src: + this_n_points_so_far = n_points_so_far + len(this_src['vertno']) + this_active_idx = active_idx[(n_points_so_far <= active_idx) & + (active_idx < this_n_points_so_far)] + this_active_idx -= n_points_so_far + this_vertno = this_src['vertno'][this_active_idx] + n_points_so_far = this_n_points_so_far + vertices.append(this_vertno) + source_nn = source_nn[active_idx] + return _make_stc( + X, vertices, src.kind, tmin, tstep, src[0]['subject_his_id'], + vector=vector, source_nn=source_nn) + + +def _split_gof(M, X, gain): + # parse out the variance explained using an orthogonal basis + # assuming x is estimated using elements of gain, with residual res + # along the first axis + assert M.ndim == X.ndim == gain.ndim == 2, (M.ndim, X.ndim, gain.ndim) + assert gain.shape == (M.shape[0], X.shape[0]) + assert M.shape[1] == X.shape[1] + norm = (M * M.conj()).real.sum(0, keepdims=True) + norm[norm == 0] = np.inf + M_est = gain @ X + assert M.shape == M_est.shape + res = M - M_est + assert gain.shape[0] == M.shape[0], (gain.shape, M.shape) + # find an orthonormal basis for our matrices that spans the actual data + U, s, _ = np.linalg.svd(gain, full_matrices=False) + if U.shape[1] > 0: + U = U[:, s >= s[0] * 1e-6] + # the part that gets explained + fit_orth = U.T @ M + # the part that got over-explained (landed in residual) + res_orth = U.T @ res + # determine the weights by projecting each one onto this basis + w = (U.T @ gain)[:, :, np.newaxis] * X + w_norm = np.linalg.norm(w, axis=1, keepdims=True) + w_norm[w_norm == 0] = 1. + w /= w_norm + # our weights are now unit-norm positive (will presrve power) + fit_back = np.linalg.norm(fit_orth[:, np.newaxis] * w, axis=0) ** 2 + res_back = np.linalg.norm(res_orth[:, np.newaxis] * w, axis=0) ** 2 + # and the resulting goodness of fits + gof_back = 100 * (fit_back - res_back) / norm + assert gof_back.shape == X.shape, (gof_back.shape, X.shape) + return gof_back + + +@verbose +def _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M, + gain_active, active_is_idx=False, + verbose=None): + times = tmin + tstep * np.arange(X.shape[1]) + + if not active_is_idx: + active_idx = np.where(active_set)[0] + else: + active_idx = active_set + + # Compute the GOF split amongst the dipoles + assert M.shape == (gain_active.shape[0], len(times)) + assert gain_active.shape[1] == len(active_idx) == X.shape[0] + gof_split = _split_gof(M, X, gain_active) + assert gof_split.shape == (len(active_idx), len(times)) + assert X.shape[0] in (len(active_idx), 3 * len(active_idx)) + + n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 + if n_dip_per_pos > 1: + active_idx = active_idx // n_dip_per_pos + _, keep = np.unique(active_idx, return_index=True) + keep.sort() # maintain old order + active_idx = active_idx[keep] + gof_split.shape = (len(active_idx), n_dip_per_pos, len(times)) + gof_split = gof_split.sum(1) + assert (gof_split < 100).all() + assert gof_split.shape == (len(active_idx), len(times)) + + dipoles = [] + for k, i_dip in enumerate(active_idx): + i_pos = forward['source_rr'][i_dip][np.newaxis, :] + i_pos = i_pos.repeat(len(times), axis=0) + X_ = X[k * n_dip_per_pos: (k + 1) * n_dip_per_pos] + if n_dip_per_pos == 1: + amplitude = X_[0] + i_ori = forward['source_nn'][i_dip][np.newaxis, :] + i_ori = i_ori.repeat(len(times), axis=0) + else: + if forward['surf_ori']: + X_ = np.dot(forward['source_nn'][ + i_dip * n_dip_per_pos:(i_dip + 1) * n_dip_per_pos].T, X_) + amplitude = np.linalg.norm(X_, axis=0) + i_ori = np.zeros((len(times), 3)) + i_ori[amplitude > 0.] = (X_[:, amplitude > 0.] / + amplitude[amplitude > 0.]).T + + dipoles.append(Dipole(times, i_pos, amplitude, i_ori, gof_split[k])) + + return dipoles + + +@verbose +def make_stc_from_dipoles(dipoles, src, verbose=None): + """Convert a list of spatio-temporal dipoles into a SourceEstimate. + + Parameters + ---------- + dipoles : Dipole | list of instances of Dipole + The dipoles to convert. + src : instance of SourceSpaces + The source space used to generate the forward operator. + %(verbose)s + + Returns + ------- + stc : SourceEstimate + The source estimate. + """ + logger.info('Converting dipoles into a SourceEstimate.') + if isinstance(dipoles, Dipole): + dipoles = [dipoles] + if not isinstance(dipoles, list): + raise ValueError('Dipoles must be an instance of Dipole or ' + 'a list of instances of Dipole. ' + 'Got %s!' % type(dipoles)) + tmin = dipoles[0].times[0] + tstep = dipoles[0].times[1] - tmin + X = np.zeros((len(dipoles), len(dipoles[0].times))) + source_rr = np.concatenate([_src['rr'][_src['vertno'], :] for _src in src], + axis=0) + n_lh_points = len(src[0]['vertno']) + lh_vertno = list() + rh_vertno = list() + for i in range(len(dipoles)): + if not np.all(dipoles[i].pos == dipoles[i].pos[0]): + raise ValueError('Only dipoles with fixed position over time ' + 'are supported!') + X[i] = dipoles[i].amplitude + idx = np.all(source_rr == dipoles[i].pos[0], axis=1) + idx = np.where(idx)[0][0] + if idx < n_lh_points: + lh_vertno.append(src[0]['vertno'][idx]) + else: + rh_vertno.append(src[1]['vertno'][idx - n_lh_points]) + vertices = [np.array(lh_vertno).astype(int), + np.array(rh_vertno).astype(int)] + stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep, + subject=src._subject) + logger.info('[done]') + return stc + + +@verbose +def mixed_norm(evoked, forward, noise_cov, alpha='sure', loose='auto', + depth=0.8, maxit=3000, tol=1e-4, active_set_size=10, + debias=True, time_pca=True, weights=None, weights_min=0., + solver='auto', n_mxne_iter=1, return_residual=False, + return_as_dipoles=False, dgap_freq=10, rank=None, pick_ori=None, + sure_alpha_grid="auto", random_state=None, verbose=None): + """Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE). + + Compute L1/L2 mixed-norm solution :footcite:`GramfortEtAl2012` or L0.5/L2 + :footcite:`StrohmeierEtAl2016` mixed-norm solution on evoked data. + + Parameters + ---------- + evoked : instance of Evoked or list of instances of Evoked + Evoked data to invert. + forward : dict + Forward operator. + noise_cov : instance of Covariance + Noise covariance to compute whitener. + alpha : float | str + Regularization parameter. If float it should be in the range [0, 100): + 0 means no regularization, 100 would give 0 active dipole. + If ``'sure'`` (default), the SURE method from + :footcite:`DeledalleEtAl2014` will be used. + + .. versionchanged:: 0.24 + The default was changed to ``'sure'``. + %(loose)s + %(depth)s + maxit : int + Maximum number of iterations. + tol : float + Tolerance parameter. + active_set_size : int | None + Size of active set increment. If None, no active set strategy is used. + debias : bool + Remove coefficient amplitude bias due to L1 penalty. + time_pca : bool or int + If True the rank of the concatenated epochs is reduced to + its true dimension. If is 'int' the rank is limited to this value. + weights : None | array | SourceEstimate + Weight for penalty in mixed_norm. Can be None, a + 1d array with shape (n_sources,), or a SourceEstimate (e.g. obtained + with wMNE, dSPM, or fMRI). + weights_min : float + Do not consider in the estimation sources for which weights + is less than weights_min. + solver : 'cd' | 'bcd' | 'auto' + The algorithm to use for the optimization. 'cd' uses + coordinate descent, and 'bcd' applies block coordinate descent. + 'cd' is only available for fixed orientation. + n_mxne_iter : int + The number of MxNE iterations. If > 1, iterative reweighting + is applied. + return_residual : bool + If True, the residual is returned as an Evoked instance. + return_as_dipoles : bool + If True, the sources are returned as a list of Dipole instances. + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. Ignored if + solver is 'cd'. + %(rank_none)s + + .. versionadded:: 0.18 + %(pick_ori)s + sure_alpha_grid : array | str + If ``'auto'`` (default), the SURE is evaluated along 15 uniformly + distributed alphas between alpha_max and 0.1 * alpha_max. If array, the + grid is directly specified. Ignored if alpha is not "sure". + + .. versionadded:: 0.24 + random_state : int | None + The random state used in a random number generator for delta and + epsilon used for the SURE computation. Defaults to None. + + .. versionadded:: 0.24 + %(verbose)s + + Returns + ------- + stc : SourceEstimate | list of SourceEstimate + Source time courses for each evoked data passed as input. + residual : instance of Evoked + The residual a.k.a. data not explained by the sources. + Only returned if return_residual is True. + + See Also + -------- + tf_mixed_norm + + References + ---------- + .. footbibliography:: + """ + from scipy import linalg + _validate_type(alpha, ('numeric', str), 'alpha') + if isinstance(alpha, str): + _check_option('alpha', alpha, ('sure',)) + elif not 0. <= alpha < 100: + raise ValueError('If not equal to "sure" alpha must be in [0, 100). ' + 'Got alpha = %s' % alpha) + if n_mxne_iter < 1: + raise ValueError('MxNE has to be computed at least 1 time. ' + 'Requires n_mxne_iter >= 1, got %d' % n_mxne_iter) + if dgap_freq <= 0.: + raise ValueError('dgap_freq must be a positive integer.' + ' Got dgap_freq = %s' % dgap_freq) + if not(isinstance(sure_alpha_grid, (np.ndarray, list)) or + sure_alpha_grid == "auto"): + raise ValueError('If not equal to "auto" sure_alpha_grid must be an ' + 'array. Got %s' % type(sure_alpha_grid)) + if ((isinstance(sure_alpha_grid, str) and sure_alpha_grid != "auto") + and (isinstance(alpha, str) and alpha != "sure")): + raise Exception('If sure_alpha_grid is manually specified, alpha must ' + 'be "sure". Got %s' % alpha) + pca = True + if not isinstance(evoked, list): + evoked = [evoked] + + _check_reference(evoked[0]) + + all_ch_names = evoked[0].ch_names + if not all(all_ch_names == evoked[i].ch_names + for i in range(1, len(evoked))): + raise Exception('All the datasets must have the same good channels.') + + forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain( + forward, evoked[0].info, noise_cov, pca, depth, loose, rank, + weights, weights_min) + _check_ori(pick_ori, forward) + + sel = [all_ch_names.index(name) for name in gain_info['ch_names']] + M = np.concatenate([e.data[sel] for e in evoked], axis=1) + + # Whiten data + logger.info('Whitening data matrix.') + M = np.dot(whitener, M) + + if time_pca: + U, s, Vh = linalg.svd(M, full_matrices=False) + if not isinstance(time_pca, bool) and isinstance(time_pca, int): + U = U[:, :time_pca] + s = s[:time_pca] + Vh = Vh[:time_pca] + M = U * s + + # Scaling to make setting of tol and alpha easy + tol *= sum_squared(M) + n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 + alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False) + alpha_max *= 0.01 + gain /= alpha_max + source_weighting /= alpha_max + + # Alpha selected automatically by SURE minimization + if alpha == "sure": + alpha_grid = sure_alpha_grid + if isinstance(sure_alpha_grid, str) and sure_alpha_grid == "auto": + alpha_grid = np.geomspace(100, 10, num=15) + X, active_set, best_alpha_ = _compute_mxne_sure( + M, gain, alpha_grid, sigma=1, random_state=random_state, + n_mxne_iter=n_mxne_iter, maxit=maxit, tol=tol, + n_orient=n_dip_per_pos, active_set_size=active_set_size, + debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose) + logger.info('Selected alpha: %s' % best_alpha_) + else: + if n_mxne_iter == 1: + X, active_set, E = mixed_norm_solver( + M, gain, alpha, maxit=maxit, tol=tol, + active_set_size=active_set_size, n_orient=n_dip_per_pos, + debias=debias, solver=solver, dgap_freq=dgap_freq, + verbose=verbose) + else: + X, active_set, E = iterative_mixed_norm_solver( + M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol, + n_orient=n_dip_per_pos, active_set_size=active_set_size, + debias=debias, solver=solver, dgap_freq=dgap_freq, + verbose=verbose) + + if time_pca: + X = np.dot(X, Vh) + M = np.dot(M, Vh) + + gain_active = gain[:, active_set] + if mask is not None: + active_set_tmp = np.zeros(len(mask), dtype=bool) + active_set_tmp[mask] = active_set + active_set = active_set_tmp + del active_set_tmp + + if active_set.sum() == 0: + warn("No active dipoles found. alpha is too big.") + M_estimate = np.zeros_like(M) + else: + # Reapply weights to have correct unit + X = _reapply_source_weighting(X, source_weighting, active_set) + source_weighting[source_weighting == 0] = 1 # zeros + gain_active /= source_weighting[active_set] + del source_weighting + M_estimate = np.dot(gain_active, X) + + outs = list() + residual = list() + cnt = 0 + for e in evoked: + tmin = e.times[0] + tstep = 1.0 / e.info['sfreq'] + Xe = X[:, cnt:(cnt + len(e.times))] + if return_as_dipoles: + out = _make_dipoles_sparse( + Xe, active_set, forward, tmin, tstep, + M[:, cnt:(cnt + len(e.times))], + gain_active) + else: + out = _make_sparse_stc( + Xe, active_set, forward, tmin, tstep, pick_ori=pick_ori) + outs.append(out) + cnt += len(e.times) + + if return_residual: + residual.append(_compute_residual(forward, e, Xe, active_set, + gain_info)) + + _log_exp_var(M, M_estimate, prefix='') + logger.info('[done]') + + if len(outs) == 1: + out = outs[0] + if return_residual: + residual = residual[0] + else: + out = outs + + if return_residual: + out = out, residual + + return out + + +def _window_evoked(evoked, size): + """Window evoked (size in seconds).""" + if isinstance(size, (float, int)): + lsize = rsize = float(size) + else: + lsize, rsize = size + evoked = evoked.copy() + sfreq = float(evoked.info['sfreq']) + lsize = int(lsize * sfreq) + rsize = int(rsize * sfreq) + lhann = np.hanning(lsize * 2)[:lsize] + rhann = np.hanning(rsize * 2)[-rsize:] + window = np.r_[lhann, np.ones(len(evoked.times) - lsize - rsize), rhann] + evoked.data *= window[None, :] + return evoked + + +@verbose +def tf_mixed_norm(evoked, forward, noise_cov, + loose='auto', depth=0.8, maxit=3000, + tol=1e-4, weights=None, weights_min=0., pca=True, + debias=True, wsize=64, tstep=4, window=0.02, + return_residual=False, return_as_dipoles=False, alpha=None, + l1_ratio=None, dgap_freq=10, rank=None, pick_ori=None, + n_tfmxne_iter=1, verbose=None): + """Time-Frequency Mixed-norm estimate (TF-MxNE). + + Compute L1/L2 + L1 mixed-norm solution on time-frequency + dictionary. Works with evoked data + :footcite:`GramfortEtAl2013b,GramfortEtAl2011`. + + Parameters + ---------- + evoked : instance of Evoked + Evoked data to invert. + forward : dict + Forward operator. + noise_cov : instance of Covariance + Noise covariance to compute whitener. + %(loose)s + %(depth)s + maxit : int + Maximum number of iterations. + tol : float + Tolerance parameter. + weights : None | array | SourceEstimate + Weight for penalty in mixed_norm. Can be None or + 1d array of length n_sources or a SourceEstimate e.g. obtained + with wMNE or dSPM or fMRI. + weights_min : float + Do not consider in the estimation sources for which weights + is less than weights_min. + pca : bool + If True the rank of the data is reduced to true dimension. + debias : bool + Remove coefficient amplitude bias due to L1 penalty. + wsize : int or array-like + Length of the STFT window in samples (must be a multiple of 4). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep) and each entry of wsize must be a multiple + of 4. See :footcite:`BekhtiEtAl2016`. + tstep : int or array-like + Step between successive windows in samples (must be a multiple of 2, + a divider of wsize and smaller than wsize/2) (default: wsize/2). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep), and each entry of tstep must be a multiple + of 2 and divide the corresponding entry of wsize. See + :footcite:`BekhtiEtAl2016`. + window : float or (float, float) + Length of time window used to take care of edge artifacts in seconds. + It can be one float or float if the values are different for left + and right window length. + return_residual : bool + If True, the residual is returned as an Evoked instance. + return_as_dipoles : bool + If True, the sources are returned as a list of Dipole instances. + alpha : float in [0, 100) or None + Overall regularization parameter. + If alpha and l1_ratio are not None, alpha_space and alpha_time are + overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max + * l1_ratio. 0 means no regularization, 100 would give 0 active dipole. + l1_ratio : float in [0, 1] or None + Proportion of temporal regularization. + If l1_ratio and alpha are not None, alpha_space and alpha_time are + overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max + * l1_ratio. 0 means no time regularization a.k.a. MxNE. + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. + %(rank_none)s + + .. versionadded:: 0.18 + %(pick_ori)s + n_tfmxne_iter : int + Number of TF-MxNE iterations. If > 1, iterative reweighting is applied. + %(verbose)s + + Returns + ------- + stc : instance of SourceEstimate + Source time courses. + residual : instance of Evoked + The residual a.k.a. data not explained by the sources. + Only returned if return_residual is True. + + See Also + -------- + mixed_norm + + References + ---------- + .. footbibliography:: + """ + _check_reference(evoked) + + all_ch_names = evoked.ch_names + info = evoked.info + + if not (0. <= alpha < 100.): + raise ValueError('alpha must be in [0, 100). ' + 'Got alpha = %s' % alpha) + + if not (0. <= l1_ratio <= 1.): + raise ValueError('l1_ratio must be in range [0, 1].' + ' Got l1_ratio = %s' % l1_ratio) + alpha_space = alpha * (1. - l1_ratio) + alpha_time = alpha * l1_ratio + + if n_tfmxne_iter < 1: + raise ValueError('TF-MxNE has to be computed at least 1 time. ' + 'Requires n_tfmxne_iter >= 1, got %s' % n_tfmxne_iter) + + if dgap_freq <= 0.: + raise ValueError('dgap_freq must be a positive integer.' + ' Got dgap_freq = %s' % dgap_freq) + + tstep = np.atleast_1d(tstep) + wsize = np.atleast_1d(wsize) + if len(tstep) != len(wsize): + raise ValueError('The same number of window sizes and steps must be ' + 'passed. Got tstep = %s and wsize = %s' % + (tstep, wsize)) + + forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain( + forward, evoked.info, noise_cov, pca, depth, loose, rank, + weights, weights_min) + _check_ori(pick_ori, forward) + + n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 + + if window is not None: + evoked = _window_evoked(evoked, window) + + sel = [all_ch_names.index(name) for name in gain_info["ch_names"]] + M = evoked.data[sel] + + # Whiten data + logger.info('Whitening data matrix.') + M = np.dot(whitener, M) + + n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int) + n_freqs = wsize // 2 + 1 + n_coefs = n_steps * n_freqs + phi = _Phi(wsize, tstep, n_coefs, evoked.data.shape[1]) + + # Scaling to make setting of tol and alpha easy + tol *= sum_squared(M) + alpha_max = norm_epsilon_inf(gain, M, phi, l1_ratio, n_dip_per_pos) + alpha_max *= 0.01 + gain /= alpha_max + source_weighting /= alpha_max + + if n_tfmxne_iter == 1: + X, active_set, E = tf_mixed_norm_solver( + M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep, + maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos, + dgap_freq=dgap_freq, debias=debias) + else: + X, active_set, E = iterative_tf_mixed_norm_solver( + M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep, + n_tfmxne_iter=n_tfmxne_iter, maxit=maxit, tol=tol, verbose=verbose, + n_orient=n_dip_per_pos, dgap_freq=dgap_freq, debias=debias) + + if active_set.sum() == 0: + raise Exception("No active dipoles found. " + "alpha_space/alpha_time are too big.") + + # Compute estimated whitened sensor data for each dipole (dip, ch, time) + gain_active = gain[:, active_set] + + if mask is not None: + active_set_tmp = np.zeros(len(mask), dtype=bool) + active_set_tmp[mask] = active_set + active_set = active_set_tmp + del active_set_tmp + + X = _reapply_source_weighting(X, source_weighting, active_set) + gain_active /= source_weighting[active_set] + + if return_residual: + residual = _compute_residual( + forward, evoked, X, active_set, gain_info) + + if return_as_dipoles: + out = _make_dipoles_sparse( + X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'], + M, gain_active) + else: + out = _make_sparse_stc( + X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'], + pick_ori=pick_ori) + + logger.info('[done]') + + if return_residual: + out = out, residual + + return out + + +@verbose +def _compute_mxne_sure(M, gain, alpha_grid, sigma, n_mxne_iter, maxit, tol, + n_orient, active_set_size, debias, solver, dgap_freq, + random_state, verbose): + """Stein Unbiased Risk Estimator (SURE). + + Implements the finite-difference Monte-Carlo approximation + of the SURE for Multi-Task LASSO. + + See reference :footcite:`DeledalleEtAl2014`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + gain : array, shape (n_sensors, n_dipoles) + The gain matrix a.k.a. lead field. + alpha_grid : array, shape (n_alphas,) + The grid of alphas used to evaluate the SURE. + sigma : float + The true or estimated noise level in the data. Usually 1 if the data + has been previously whitened using MNE whitener. + n_mxne_iter : int + The number of MxNE iterations. If > 1, iterative reweighting is + applied. + maxit : int + Maximum number of iterations. + tol : float + Tolerance parameter. + n_orient : int + The number of orientation (1 : fixed or 3 : free or loose). + active_set_size : int + Size of active set increase at each iteration. + debias : bool + Debias source estimates. + solver : 'cd' | 'bcd' | 'auto' + The algorithm to use for the optimization. + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. + random_state : int | None + The random state used in a random number generator for delta and + epsilon used for the SURE computation. + + Returns + ------- + X : array, shape (n_active, n_times) + Coefficient matrix. + active_set : array, shape (n_dipoles,) + Array of indices of non-zero coefficients. + best_alpha_ : float + Alpha that minimizes the SURE. + + References + ---------- + .. footbibliography:: + """ + def g(w): + return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient))) + + def gprime(w): + return 2. * np.repeat(g(w), n_orient).ravel() + + def _run_solver(alpha, M, n_mxne_iter, as_init=None, X_init=None, + w_init=None): + if n_mxne_iter == 1: + X, active_set, _ = mixed_norm_solver( + M, gain, alpha, maxit=maxit, tol=tol, + active_set_size=active_set_size, n_orient=n_orient, + debias=debias, solver=solver, dgap_freq=dgap_freq, + active_set_init=as_init, X_init=X_init, verbose=False) + else: + X, active_set, _ = iterative_mixed_norm_solver( + M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol, + n_orient=n_orient, active_set_size=active_set_size, + debias=debias, solver=solver, dgap_freq=dgap_freq, + weight_init=w_init, verbose=False) + return X, active_set + + def _fit_on_grid(gain, M, eps, delta): + coefs_grid_1_0 = np.zeros((len(alpha_grid), gain.shape[1], M.shape[1])) + coefs_grid_2_0 = np.zeros((len(alpha_grid), gain.shape[1], M.shape[1])) + active_sets, active_sets_eps = [], [] + M_eps = M + eps * delta + # warm start - first iteration (leverages convexity) + logger.info('Warm starting...') + for j, alpha in enumerate(alpha_grid): + logger.info('alpha: %s' % alpha) + X, a_set = _run_solver(alpha, M, 1) + X_eps, a_set_eps = _run_solver(alpha, M_eps, 1) + coefs_grid_1_0[j][a_set, :] = X + coefs_grid_2_0[j][a_set_eps, :] = X_eps + active_sets.append(a_set) + active_sets_eps.append(a_set_eps) + # next iterations + if n_mxne_iter == 1: + return coefs_grid_1_0, coefs_grid_2_0, active_sets + else: + coefs_grid_1 = coefs_grid_1_0.copy() + coefs_grid_2 = coefs_grid_2_0.copy() + logger.info('Fitting SURE on grid.') + for j, alpha in enumerate(alpha_grid): + logger.info('alpha: %s' % alpha) + if active_sets[j].sum() > 0: + w = gprime(coefs_grid_1[j]) + X, a_set = _run_solver(alpha, M, n_mxne_iter - 1, + w_init=w) + coefs_grid_1[j][a_set, :] = X + active_sets[j] = a_set + if active_sets_eps[j].sum() > 0: + w_eps = gprime(coefs_grid_2[j]) + X_eps, a_set_eps = _run_solver(alpha, M_eps, + n_mxne_iter - 1, + w_init=w_eps) + coefs_grid_2[j][a_set_eps, :] = X_eps + active_sets_eps[j] = a_set_eps + + return coefs_grid_1, coefs_grid_2, active_sets + + def _compute_sure_val(coef1, coef2, gain, M, sigma, delta, eps): + n_sensors, n_times = gain.shape[0], M.shape[1] + dof = (gain @ (coef2 - coef1) * delta).sum() / eps + df_term = np.linalg.norm(M - gain @ coef1) ** 2 + sure = df_term - n_sensors * n_times * sigma ** 2 + sure += 2 * dof * sigma ** 2 + return sure + + sure_path = np.empty(len(alpha_grid)) + + rng = check_random_state(random_state) + # See Deledalle et al. 20214 Sec. 5.1 + eps = 2 * sigma / (M.shape[0] ** 0.3) + delta = rng.randn(*M.shape) + + coefs_grid_1, coefs_grid_2, active_sets = _fit_on_grid(gain, M, eps, delta) + + logger.info("Computing SURE values on grid.") + for i, (coef1, coef2) in enumerate(zip(coefs_grid_1, coefs_grid_2)): + sure_path[i] = _compute_sure_val( + coef1, coef2, gain, M, sigma, delta, eps) + if verbose: + logger.info("alpha %s :: sure %s" % (alpha_grid[i], sure_path[i])) + best_alpha_ = alpha_grid[np.argmin(sure_path)] + + X = coefs_grid_1[np.argmin(sure_path)] + active_set = active_sets[np.argmin(sure_path)] + + X = X[active_set, :] + + return X, active_set, best_alpha_ diff --git a/python/libs/mne/inverse_sparse/mxne_optim.py b/python/libs/mne/inverse_sparse/mxne_optim.py new file mode 100644 index 0000000..2d08c22 --- /dev/null +++ b/python/libs/mne/inverse_sparse/mxne_optim.py @@ -0,0 +1,1480 @@ +# Author: Alexandre Gramfort +# Daniel Strohmeier +# Mathurin Massias +# License: Simplified BSD + +import functools +from math import sqrt + +import numpy as np + +from .mxne_debiasing import compute_bias +from ..utils import (logger, verbose, sum_squared, warn, _get_blas_funcs, + _validate_type, _check_option) +from ..time_frequency._stft import stft_norm1, stft_norm2, stft, istft + + +@functools.lru_cache(None) +def _get_dgemm(): + return _get_blas_funcs(np.float64, 'gemm') + + +def groups_norm2(A, n_orient): + """Compute squared L2 norms of groups inplace.""" + n_positions = A.shape[0] // n_orient + return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1) + + +def norm_l2inf(A, n_orient, copy=True): + """L2-inf norm.""" + if A.size == 0: + return 0.0 + if copy: + A = A.copy() + return sqrt(np.max(groups_norm2(A, n_orient))) + + +def norm_l21(A, n_orient, copy=True): + """L21 norm.""" + if A.size == 0: + return 0.0 + if copy: + A = A.copy() + return np.sum(np.sqrt(groups_norm2(A, n_orient))) + + +def _primal_l21(M, G, X, active_set, alpha, n_orient): + """Primal objective for the mixed-norm inverse problem. + + See :footcite:`GramfortEtAl2012`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_active) + The gain matrix a.k.a. lead field. + X : array, shape (n_active, n_times) + Sources. + active_set : array of bool, shape (n_sources,) + Mask of active sources. + alpha : float + The regularization parameter. + n_orient : int + Number of dipoles per locations (typically 1 or 3). + + Returns + ------- + p_obj : float + Primal objective. + R : array, shape (n_sensors, n_times) + Current residual (M - G * X). + nR2 : float + Data-fitting term. + GX : array, shape (n_sensors, n_times) + Forward prediction. + """ + GX = np.dot(G[:, active_set], X) + R = M - GX + penalty = norm_l21(X, n_orient, copy=True) + nR2 = sum_squared(R) + p_obj = 0.5 * nR2 + alpha * penalty + return p_obj, R, nR2, GX + + +def dgap_l21(M, G, X, active_set, alpha, n_orient): + """Duality gap for the mixed norm inverse problem. + + See :footcite:`GramfortEtAl2012`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_active) + The gain matrix a.k.a. lead field. + X : array, shape (n_active, n_times) + Sources. + active_set : array of bool, shape (n_sources, ) + Mask of active sources. + alpha : float + The regularization parameter. + n_orient : int + Number of dipoles per locations (typically 1 or 3). + + Returns + ------- + gap : float + Dual gap. + p_obj : float + Primal objective. + d_obj : float + Dual objective. gap = p_obj - d_obj. + R : array, shape (n_sensors, n_times) + Current residual (M - G * X). + + References + ---------- + .. footbibilography:: + """ + p_obj, R, nR2, GX = _primal_l21(M, G, X, active_set, alpha, n_orient) + dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False) + scaling = alpha / dual_norm + scaling = min(scaling, 1.0) + d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX) + + gap = p_obj - d_obj + return gap, p_obj, d_obj, R + + +@verbose +def _mixed_norm_solver_cd(M, G, alpha, lipschitz_constant, maxit=10000, + tol=1e-8, verbose=None, init=None, n_orient=1, + dgap_freq=10): + """Solve L21 inverse problem with coordinate descent.""" + from sklearn.linear_model import MultiTaskLasso + + assert M.ndim == G.ndim and M.shape[0] == G.shape[0] + + clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol / sum_squared(M), + fit_intercept=False, max_iter=maxit, warm_start=True) + if init is not None: + clf.coef_ = init.T + else: + clf.coef_ = np.zeros((G.shape[1], M.shape[1])).T + clf.fit(G, M) + + X = clf.coef_.T + active_set = np.any(X, axis=1) + X = X[active_set] + gap, p_obj, d_obj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient) + return X, active_set, p_obj + + +@verbose +def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200, + tol=1e-8, verbose=None, init=None, n_orient=1, + dgap_freq=10, use_accel=True, K=5): + """Solve L21 inverse problem with block coordinate descent.""" + _, n_times = M.shape + _, n_sources = G.shape + n_positions = n_sources // n_orient + + if init is None: + X = np.zeros((n_sources, n_times)) + R = M.copy() + else: + X = init + R = M - np.dot(G, X) + + E = [] # track primal objective function + highest_d_obj = - np.inf + active_set = np.zeros(n_sources, dtype=bool) # start with full AS + + alpha_lc = alpha / lipschitz_constant + + if use_accel: + last_K_X = np.empty((K + 1, n_sources, n_times)) + U = np.zeros((K, n_sources * n_times)) + + # First make G fortran for faster access to blocks of columns + G = np.asfortranarray(G) + # Ensure these are correct for dgemm + assert R.dtype == np.float64 + assert G.dtype == np.float64 + one_ovr_lc = 1. / lipschitz_constant + + # assert that all the multiplied matrices are fortran contiguous + assert X.T.flags.f_contiguous + assert R.T.flags.f_contiguous + assert G.flags.f_contiguous + # storing list of contiguous arrays + list_G_j_c = [] + for j in range(n_positions): + idx = slice(j * n_orient, (j + 1) * n_orient) + list_G_j_c.append(np.ascontiguousarray(G[:, idx])) + + for i in range(maxit): + _bcd(G, X, R, active_set, one_ovr_lc, n_orient, alpha_lc, list_G_j_c) + + if (i + 1) % dgap_freq == 0: + _, p_obj, d_obj, _ = dgap_l21(M, G, X[active_set], active_set, + alpha, n_orient) + highest_d_obj = max(d_obj, highest_d_obj) + gap = p_obj - highest_d_obj + E.append(p_obj) + logger.debug("Iteration %d :: p_obj %f :: dgap %f :: n_active %d" % + (i + 1, p_obj, gap, np.sum(active_set) / n_orient)) + + if gap < tol: + logger.debug('Convergence reached ! (gap: %s < %s)' + % (gap, tol)) + break + + # using Anderson acceleration of the primal variable for faster + # convergence + if use_accel: + last_K_X[i % (K + 1)] = X + + if i % (K + 1) == K: + for k in range(K): + U[k] = last_K_X[k + 1].ravel() - last_K_X[k].ravel() + C = U @ U.T + one_vec = np.ones(K) + + try: + z = np.linalg.solve(C, one_vec) + except np.linalg.LinAlgError: + # Matrix C is not always expected to be non-singular. If C + # is singular, acceleration is not used at this iteration + # and the solver proceeds with the non-sped-up code. + logger.debug("Iteration %d: LinAlg Error" % (i + 1)) + else: + c = z / z.sum() + X_acc = np.sum( + last_K_X[:-1] * c[:, None, None], axis=0 + ) + _grp_norm2_acc = groups_norm2(X_acc, n_orient) + active_set_acc = _grp_norm2_acc != 0 + if n_orient > 1: + active_set_acc = np.kron( + active_set_acc, np.ones(n_orient, dtype=bool) + ) + p_obj = _primal_l21(M, G, X[active_set], active_set, alpha, + n_orient)[0] + p_obj_acc = _primal_l21(M, G, X_acc[active_set_acc], + active_set_acc, alpha, n_orient)[0] + if p_obj_acc < p_obj: + X = X_acc + active_set = active_set_acc + R = M - G[:, active_set] @ X[active_set] + + X = X[active_set] + + return X, active_set, E + + +def _bcd(G, X, R, active_set, one_ovr_lc, n_orient, alpha_lc, list_G_j_c): + """Implement one full pass of BCD. + + BCD stands for Block Coordinate Descent. + This function make use of scipy.linalg.get_blas_funcs to speed reasons. + + Parameters + ---------- + G : array, shape (n_sensors, n_active) + The gain matrix a.k.a. lead field. + X : array, shape (n_sources, n_times) + Sources, modified in place. + R : array, shape (n_sensors, n_times) + The residuals: R = M - G @ X, modified in place. + active_set : array of bool, shape (n_sources, ) + Mask of active sources, modified in place. + one_ovr_lc : array, shape (n_positions, ) + One over the lipschitz constants. + n_orient : int + Number of dipoles per positions (typically 1 or 3). + n_positions : int + Number of source positions. + alpha_lc: array, shape (n_positions, ) + alpha * (Lipschitz constants). + """ + X_j_new = np.zeros_like(X[:n_orient, :], order='C') + dgemm = _get_dgemm() + + for j, G_j_c in enumerate(list_G_j_c): + idx = slice(j * n_orient, (j + 1) * n_orient) + G_j = G[:, idx] + X_j = X[idx] + dgemm(alpha=one_ovr_lc[j], beta=0., a=R.T, b=G_j, c=X_j_new.T, + overwrite_c=True) + # X_j_new = G_j.T @ R + # Mathurin's trick to avoid checking all the entries + was_non_zero = X_j[0, 0] != 0 + # was_non_zero = np.any(X_j) + if was_non_zero: + dgemm(alpha=1., beta=1., a=X_j.T, b=G_j_c.T, c=R.T, + overwrite_c=True) + # R += np.dot(G_j, X_j) + X_j_new += X_j + block_norm = sqrt(sum_squared(X_j_new)) + if block_norm <= alpha_lc[j]: + X_j.fill(0.) + active_set[idx] = False + else: + shrink = max(1.0 - alpha_lc[j] / block_norm, 0.0) + X_j_new *= shrink + dgemm(alpha=-1., beta=1., a=X_j_new.T, b=G_j_c.T, c=R.T, + overwrite_c=True) + # R -= np.dot(G_j, X_j_new) + X_j[:] = X_j_new + active_set[idx] = True + + +@verbose +def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, + active_set_size=50, debias=True, n_orient=1, + solver='auto', return_gap=False, dgap_freq=10, + active_set_init=None, X_init=None): + """Solve L1/L2 mixed-norm inverse problem with active set strategy. + + See references :footcite:`GramfortEtAl2012,StrohmeierEtAl2016, + BertrandEtAl2020`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_dipoles) + The gain matrix a.k.a. lead field. + alpha : float + The regularization parameter. It should be between 0 and 100. + A value of 100 will lead to an empty active set (no active source). + maxit : int + The number of iterations. + tol : float + Tolerance on dual gap for convergence checking. + %(verbose)s + active_set_size : int + Size of active set increase at each iteration. + debias : bool + Debias source estimates. + n_orient : int + The number of orientation (1 : fixed or 3 : free or loose). + solver : 'cd' | 'bcd' | 'auto' + The algorithm to use for the optimization. Block Coordinate Descent + (BCD) uses Anderson acceleration for faster convergence. + return_gap : bool + Return final duality gap. + dgap_freq : int + The duality gap is computed every dgap_freq iterations of the solver on + the active set. + active_set_init : array, shape (n_dipoles,) or None + The initial active set (boolean array) used at the first iteration. + If None, the usual active set strategy is applied. + X_init : array, shape (n_dipoles, n_times) or None + The initial weight matrix used for warm starting the solver. If None, + the weights are initialized at zero. + + Returns + ------- + X : array, shape (n_active, n_times) + The source estimates. + active_set : array, shape (new_active_set_size,) + The mask of active sources. Note that new_active_set_size is the size + of the active set after convergence of the solver. + E : list + The value of the objective function over the iterations. + gap : float + Final duality gap. Returned only if return_gap is True. + + References + ---------- + .. footbibliography:: + """ + n_dipoles = G.shape[1] + n_positions = n_dipoles // n_orient + _, n_times = M.shape + alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False) + logger.info("-- ALPHA MAX : %s" % alpha_max) + alpha = float(alpha) + X = np.zeros((n_dipoles, n_times), dtype=G.dtype) + + has_sklearn = True + try: + from sklearn.linear_model import MultiTaskLasso # noqa: F401 + except ImportError: + has_sklearn = False + + _validate_type(solver, str, 'solver') + _check_option('solver', solver, ('cd', 'bcd', 'auto')) + if solver == 'auto': + if has_sklearn and (n_orient == 1): + solver = 'cd' + else: + solver = 'bcd' + + if solver == 'cd': + if n_orient == 1 and not has_sklearn: + warn('Scikit-learn >= 0.12 cannot be found. Using block coordinate' + ' descent instead of coordinate descent.') + solver = 'bcd' + if n_orient > 1: + warn('Coordinate descent is only available for fixed orientation. ' + 'Using block coordinate descent instead of coordinate ' + 'descent') + solver = 'bcd' + + if solver == 'cd': + logger.info("Using coordinate descent") + l21_solver = _mixed_norm_solver_cd + lc = None + else: + assert solver == 'bcd' + logger.info("Using block coordinate descent") + l21_solver = _mixed_norm_solver_bcd + G = np.asfortranarray(G) + if n_orient == 1: + lc = np.sum(G * G, axis=0) + else: + lc = np.empty(n_positions) + for j in range(n_positions): + G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + + if active_set_size is not None: + E = list() + highest_d_obj = - np.inf + if X_init is not None and X_init.shape != (n_dipoles, n_times): + raise ValueError('Wrong dim for initialized coefficients.') + active_set = (active_set_init if active_set_init is not None else + np.zeros(n_dipoles, dtype=bool)) + idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient)) + new_active_idx = idx_large_corr[-active_set_size:] + if n_orient > 1: + new_active_idx = (n_orient * new_active_idx[:, None] + + np.arange(n_orient)[None, :]).ravel() + active_set[new_active_idx] = True + as_size = np.sum(active_set) + gap = np.inf + for k in range(maxit): + if solver == 'bcd': + lc_tmp = lc[active_set[::n_orient]] + elif solver == 'cd': + lc_tmp = None + else: + lc_tmp = 1.01 * np.linalg.norm(G[:, active_set], ord=2) ** 2 + X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp, + maxit=maxit, tol=tol, init=X_init, + n_orient=n_orient, dgap_freq=dgap_freq) + active_set[active_set] = as_.copy() + idx_old_active_set = np.where(active_set)[0] + + _, p_obj, d_obj, R = dgap_l21(M, G, X, active_set, alpha, + n_orient) + highest_d_obj = max(d_obj, highest_d_obj) + gap = p_obj - highest_d_obj + E.append(p_obj) + logger.info("Iteration %d :: p_obj %f :: dgap %f :: " + "n_active_start %d :: n_active_end %d" % ( + k + 1, p_obj, gap, as_size // n_orient, + np.sum(active_set) // n_orient)) + if gap < tol: + logger.info('Convergence reached ! (gap: %s < %s)' + % (gap, tol)) + break + + # add sources if not last iteration + if k < (maxit - 1): + idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R), + n_orient)) + new_active_idx = idx_large_corr[-active_set_size:] + if n_orient > 1: + new_active_idx = (n_orient * new_active_idx[:, None] + + np.arange(n_orient)[None, :]) + new_active_idx = new_active_idx.ravel() + active_set[new_active_idx] = True + idx_active_set = np.where(active_set)[0] + as_size = np.sum(active_set) + X_init = np.zeros((as_size, n_times), dtype=X.dtype) + idx = np.searchsorted(idx_active_set, idx_old_active_set) + X_init[idx] = X + else: + warn('Did NOT converge ! (gap: %s > %s)' % (gap, tol)) + else: + X, active_set, E = l21_solver(M, G, alpha, lc, maxit=maxit, + tol=tol, n_orient=n_orient, init=None) + if return_gap: + gap = dgap_l21(M, G, X, active_set, alpha, n_orient)[0] + + if np.any(active_set) and debias: + bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient) + X *= bias[:, np.newaxis] + + logger.info('Final active set size: %s' % (np.sum(active_set) // n_orient)) + + if return_gap: + return X, active_set, E, gap + else: + return X, active_set, E + + +@verbose +def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000, + tol=1e-8, verbose=None, active_set_size=50, + debias=True, n_orient=1, dgap_freq=10, + solver='auto', weight_init=None): + """Solve L0.5/L2 mixed-norm inverse problem with active set strategy. + + See reference :footcite:`StrohmeierEtAl2016`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_dipoles) + The gain matrix a.k.a. lead field. + alpha : float + The regularization parameter. It should be between 0 and 100. + A value of 100 will lead to an empty active set (no active source). + n_mxne_iter : int + The number of MxNE iterations. If > 1, iterative reweighting + is applied. + maxit : int + The number of iterations. + tol : float + Tolerance on dual gap for convergence checking. + %(verbose)s + active_set_size : int + Size of active set increase at each iteration. + debias : bool + Debias source estimates. + n_orient : int + The number of orientation (1 : fixed or 3 : free or loose). + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. + solver : 'cd' | 'bcd' | 'auto' + The algorithm to use for the optimization. + weight_init : array, shape (n_dipoles,) or None + The initial weight used for reweighting the gain matrix. If None, the + weights are initialized with ones. + + Returns + ------- + X : array, shape (n_active, n_times) + The source estimates. + active_set : array + The mask of active sources. + E : list + The value of the objective function over the iterations. + + References + ---------- + .. footbibliography:: + """ + def g(w): + return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient))) + + def gprime(w): + return 2. * np.repeat(g(w), n_orient).ravel() + + E = list() + + if weight_init is not None and weight_init.shape != (G.shape[1],): + raise ValueError('Wrong dimension for weight initialization. Got %s. ' + 'Expected %s.' % (weight_init.shape, (G.shape[1],))) + + weights = weight_init if weight_init is not None else np.ones(G.shape[1]) + active_set = (weights != 0) + weights = weights[active_set] + X = np.zeros((G.shape[1], M.shape[1])) + + for k in range(n_mxne_iter): + X0 = X.copy() + active_set_0 = active_set.copy() + G_tmp = G[:, active_set] * weights[np.newaxis, :] + + if active_set_size is not None: + if np.sum(active_set) > (active_set_size * n_orient): + X, _active_set, _ = mixed_norm_solver( + M, G_tmp, alpha, debias=False, n_orient=n_orient, + maxit=maxit, tol=tol, active_set_size=active_set_size, + dgap_freq=dgap_freq, solver=solver, verbose=verbose) + else: + X, _active_set, _ = mixed_norm_solver( + M, G_tmp, alpha, debias=False, n_orient=n_orient, + maxit=maxit, tol=tol, active_set_size=None, + dgap_freq=dgap_freq, solver=solver, verbose=verbose) + else: + X, _active_set, _ = mixed_norm_solver( + M, G_tmp, alpha, debias=False, n_orient=n_orient, + maxit=maxit, tol=tol, active_set_size=None, + dgap_freq=dgap_freq, solver=solver, verbose=verbose) + + logger.info('active set size %d' % (_active_set.sum() / n_orient)) + + if _active_set.sum() > 0: + active_set[active_set] = _active_set + # Reapply weights to have correct unit + X *= weights[_active_set][:, np.newaxis] + weights = gprime(X) + p_obj = 0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X), + 'fro') ** 2. + alpha * np.sum(g(X)) + E.append(p_obj) + + # Check convergence + if ((k >= 1) and np.all(active_set == active_set_0) and + np.all(np.abs(X - X0) < tol)): + print('Convergence reached after %d reweightings!' % k) + break + else: + active_set = np.zeros_like(active_set) + p_obj = 0.5 * np.linalg.norm(M) ** 2. + E.append(p_obj) + break + + if np.any(active_set) and debias: + bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient) + X *= bias[:, np.newaxis] + + return X, active_set, E + + +############################################################################### +# TF-MxNE + +@verbose +def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None): + """Compute lipschitz constant for FISTA. + + It uses a power iteration method. + """ + n_times = M.shape[1] + n_points = G.shape[1] + iv = np.ones((n_points, n_times), dtype=np.float64) + v = phi(iv) + L = 1e100 + for it in range(100): + L_old = L + logger.info('Lipschitz estimation: iteration = %d' % it) + iv = np.real(phiT(v)) + Gv = np.dot(G, iv) + GtGv = np.dot(G.T, Gv) + w = phi(GtGv) + L = np.max(np.abs(w)) # l_inf norm + v = w / L + if abs((L - L_old) / L_old) < tol: + break + return L + + +def safe_max_abs(A, ia): + """Compute np.max(np.abs(A[ia])) possible with empty A.""" + if np.sum(ia): # ia is not empty + return np.max(np.abs(A[ia])) + else: + return 0. + + +def safe_max_abs_diff(A, ia, B, ib): + """Compute np.max(np.abs(A)) possible with empty A.""" + A = A[ia] if np.sum(ia) else 0.0 + B = B[ib] if np.sum(ia) else 0.0 + return np.max(np.abs(A - B)) + + +class _Phi(object): + """Have phi stft as callable w/o using a lambda that does not pickle.""" + + def __init__(self, wsize, tstep, n_coefs, n_times): # noqa: D102 + self.wsize = np.atleast_1d(wsize) + self.tstep = np.atleast_1d(tstep) + self.n_coefs = np.atleast_1d(n_coefs) + self.n_dicts = len(tstep) + self.n_freqs = wsize // 2 + 1 + self.n_steps = self.n_coefs // self.n_freqs + self.n_times = n_times + # ravel freq+time here + self.ops = list() + for ws, ts in zip(self.wsize, self.tstep): + self.ops.append( + stft(np.eye(n_times), ws, ts, + verbose=False).reshape(n_times, -1)) + + def __call__(self, x): # noqa: D105 + if self.n_dicts == 1: + return x @ self.ops[0] + else: + return np.hstack( + [x @ op for op in self.ops]) / np.sqrt(self.n_dicts) + + def norm(self, z, ord=2): + """Squared L2 norm if ord == 2 and L1 norm if order == 1.""" + if ord not in (1, 2): + raise ValueError('Only supported norm order are 1 and 2. ' + 'Got ord = %s' % ord) + stft_norm = stft_norm1 if ord == 1 else stft_norm2 + norm = 0. + if len(self.n_coefs) > 1: + z_ = np.array_split(np.atleast_2d(z), np.cumsum(self.n_coefs)[:-1], + axis=1) + else: + z_ = [np.atleast_2d(z)] + for i in range(len(z_)): + norm += stft_norm( + z_[i].reshape(-1, self.n_freqs[i], self.n_steps[i])) + return norm + + +class _PhiT(object): + """Have phi.T istft as callable w/o using a lambda that does not pickle.""" + + def __init__(self, tstep, n_freqs, n_steps, n_times): # noqa: D102 + self.tstep = tstep + self.n_freqs = n_freqs + self.n_steps = n_steps + self.n_times = n_times + self.n_dicts = len(tstep) if isinstance(tstep, np.ndarray) else 1 + self.n_coefs = list() + self.op_re = list() + self.op_im = list() + for nf, ns, ts in zip(self.n_freqs, self.n_steps, self.tstep): + nc = nf * ns + self.n_coefs.append(nc) + eye = np.eye(nc).reshape(nf, ns, nf, ns) + self.op_re.append(istft( + eye, ts, n_times).reshape(nc, n_times)) + self.op_im.append(istft( + eye * 1j, ts, n_times).reshape(nc, n_times)) + + def __call__(self, z): # noqa: D105 + if self.n_dicts == 1: + return z.real @ self.op_re[0] + z.imag @ self.op_im[0] + else: + x_out = np.zeros((z.shape[0], self.n_times)) + z_ = np.array_split(z, np.cumsum(self.n_coefs)[:-1], axis=1) + for this_z, op_re, op_im in zip(z_, self.op_re, self.op_im): + x_out += this_z.real @ op_re + this_z.imag @ op_im + return x_out / np.sqrt(self.n_dicts) + + +def norm_l21_tf(Z, phi, n_orient, w_space=None): + """L21 norm for TF.""" + if Z.shape[0]: + l21_norm = np.sqrt( + phi.norm(Z, ord=2).reshape(-1, n_orient).sum(axis=1)) + if w_space is not None: + l21_norm *= w_space + l21_norm = l21_norm.sum() + else: + l21_norm = 0. + return l21_norm + + +def norm_l1_tf(Z, phi, n_orient, w_time): + """L1 norm for TF.""" + if Z.shape[0]: + n_positions = Z.shape[0] // n_orient + Z_ = np.sqrt(np.sum( + (np.abs(Z) ** 2.).reshape((n_orient, -1), order='F'), axis=0)) + Z_ = Z_.reshape((n_positions, -1), order='F') + if w_time is not None: + Z_ *= w_time + l1_norm = phi.norm(Z_, ord=1).sum() + else: + l1_norm = 0. + return l1_norm + + +def norm_epsilon(Y, l1_ratio, phi, w_space=1., w_time=None): + """Weighted epsilon norm. + + The weighted epsilon norm is the dual norm of:: + + w_{space} * (1. - l1_ratio) * ||Y||_2 + l1_ratio * ||Y||_{1, w_{time}}. + + where `||Y||_{1, w_{time}} = (np.abs(Y) * w_time).sum()` + + Warning: it takes into account the fact that Y only contains coefficients + corresponding to the positive frequencies (see `stft_norm2()`): some + entries will be counted twice. It is also assumed that all entries of both + Y and w_time are non-negative. See + :footcite:`NdiayeEtAl2016,BurdakovMerkulov2001`. + + Parameters + ---------- + Y : array, shape (n_coefs,) + The input data. + l1_ratio : float between 0 and 1 + Tradeoff between L2 and L1 regularization. When it is 0, no temporal + regularization is applied. + phi : instance of _Phi + The TF operator. + w_space : float + Scalar weight of the L2 norm. By default, it is taken equal to 1. + w_time : array, shape (n_coefs, ) | None + Weights of each TF coefficient in the L1 norm. If None, weights equal + to 1 are used. + + + Returns + ------- + nu : float + The value of the dual norm evaluated at Y. + + References + ---------- + .. footbibliography:: + """ + # since the solution is invariant to flipped signs in Y, all entries + # of Y are assumed positive + + # Add negative freqs: count all freqs twice except first and last: + freqs_count = np.full(len(Y), 2) + for i, fc in enumerate(np.array_split(freqs_count, + np.cumsum(phi.n_coefs)[:-1])): + fc[:phi.n_steps[i]] = 1 + fc[-phi.n_steps[i]:] = 1 + + # exclude 0 weights: + if w_time is not None: + nonzero_weights = (w_time != 0.0) + Y = Y[nonzero_weights] + freqs_count = freqs_count[nonzero_weights] + w_time = w_time[nonzero_weights] + + norm_inf_Y = np.max(Y / w_time) if w_time is not None else np.max(Y) + if l1_ratio == 1.: + # dual norm of L1 weighted is Linf with inverse weights + return norm_inf_Y + elif l1_ratio == 0.: + # dual norm of L2 is L2 + return np.sqrt(phi.norm(Y[None, :], ord=2).sum()) + + if norm_inf_Y == 0.: + return 0. + + # ignore some values of Y by lower bound on dual norm: + if w_time is None: + idx = Y > l1_ratio * norm_inf_Y + else: + idx = Y > l1_ratio * np.max(Y / (w_space * (1. - l1_ratio) + + l1_ratio * w_time)) + + if idx.sum() == 1: + return norm_inf_Y + + # sort both Y / w_time and freqs_count at the same time + if w_time is not None: + idx_sort = np.argsort(Y[idx] / w_time[idx])[::-1] + w_time = w_time[idx][idx_sort] + else: + idx_sort = np.argsort(Y[idx])[::-1] + + Y = Y[idx][idx_sort] + freqs_count = freqs_count[idx][idx_sort] + + Y = np.repeat(Y, freqs_count) + if w_time is not None: + w_time = np.repeat(w_time, freqs_count) + + K = Y.shape[0] + if w_time is None: + p_sum_Y2 = np.cumsum(Y ** 2) + p_sum_w2 = np.arange(1, K + 1) + p_sum_Yw = np.cumsum(Y) + upper = p_sum_Y2 / Y ** 2 - 2. * p_sum_Yw / Y + p_sum_w2 + else: + p_sum_Y2 = np.cumsum(Y ** 2) + p_sum_w2 = np.cumsum(w_time ** 2) + p_sum_Yw = np.cumsum(Y * w_time) + upper = (p_sum_Y2 / (Y / w_time) ** 2 - + 2. * p_sum_Yw / (Y / w_time) + p_sum_w2) + upper_greater = np.where(upper > w_space ** 2 * (1. - l1_ratio) ** 2 / + l1_ratio ** 2)[0] + + i0 = upper_greater[0] - 1 if upper_greater.size else K - 1 + + p_sum_Y2 = p_sum_Y2[i0] + p_sum_w2 = p_sum_w2[i0] + p_sum_Yw = p_sum_Yw[i0] + + denom = l1_ratio ** 2 * p_sum_w2 - w_space ** 2 * (1. - l1_ratio) ** 2 + if np.abs(denom) < 1e-10: + return p_sum_Y2 / (2. * l1_ratio * p_sum_Yw) + else: + delta = (l1_ratio * p_sum_Yw) ** 2 - p_sum_Y2 * denom + return (l1_ratio * p_sum_Yw - np.sqrt(delta)) / denom + + +def norm_epsilon_inf(G, R, phi, l1_ratio, n_orient, w_space=None, w_time=None): + """Weighted epsilon-inf norm of phi(np.dot(G.T, R)). + + Parameters + ---------- + G : array, shape (n_sensors, n_sources) + Gain matrix a.k.a. lead field. + R : array, shape (n_sensors, n_times) + Residual. + phi : instance of _Phi + The TF operator. + l1_ratio : float between 0 and 1 + Parameter controlling the tradeoff between L21 and L1 regularization. + 0 corresponds to an absence of temporal regularization, ie MxNE. + n_orient : int + Number of dipoles per location (typically 1 or 3). + w_space : array, shape (n_positions,) or None. + Weights for the L2 term of the epsilon norm. If None, weights are + all equal to 1. + w_time : array, shape (n_positions, n_coefs) or None + Weights for the L1 term of the epsilon norm. If None, weights are + all equal to 1. + + Returns + ------- + nu : float + The maximum value of the epsilon norms over groups of n_orient dipoles + (consecutive rows of phi(np.dot(G.T, R))). + """ + n_positions = G.shape[1] // n_orient + GTRPhi = np.abs(phi(np.dot(G.T, R))) + # norm over orientations: + GTRPhi = GTRPhi.reshape((n_orient, -1), order='F') + GTRPhi = np.linalg.norm(GTRPhi, axis=0) + GTRPhi = GTRPhi.reshape((n_positions, -1), order='F') + nu = 0. + for idx in range(n_positions): + GTRPhi_ = GTRPhi[idx] + w_t = w_time[idx] if w_time is not None else None + w_s = w_space[idx] if w_space is not None else 1. + norm_eps = norm_epsilon(GTRPhi_, l1_ratio, phi, w_space=w_s, + w_time=w_t) + if norm_eps > nu: + nu = norm_eps + + return nu + + +def dgap_l21l1(M, G, Z, active_set, alpha_space, alpha_time, phi, phiT, + n_orient, highest_d_obj, w_space=None, w_time=None): + """Duality gap for the time-frequency mixed norm inverse problem. + + See :footcite:`GramfortEtAl2012,NdiayeEtAl2016` + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_sources) + Gain matrix a.k.a. lead field. + Z : array, shape (n_active, n_coefs) + Sources in TF domain. + active_set : array of bool, shape (n_sources, ) + Mask of active sources. + alpha_space : float + The spatial regularization parameter. + alpha_time : float + The temporal regularization parameter. The higher it is the smoother + will be the estimated time series. + phi : instance of _Phi + The TF operator. + phiT : instance of _PhiT + The transpose of the TF operator. + n_orient : int + Number of dipoles per locations (typically 1 or 3). + highest_d_obj : float + The highest value of the dual objective so far. + w_space : array, shape (n_positions, ) + Array of spatial weights. + w_time : array, shape (n_positions, n_coefs) + Array of TF weights. + + Returns + ------- + gap : float + Dual gap + p_obj : float + Primal objective + d_obj : float + Dual objective. gap = p_obj - d_obj + R : array, shape (n_sensors, n_times) + Current residual (M - G * X) + + References + ---------- + .. footbibliography:: + """ + X = phiT(Z) + GX = np.dot(G[:, active_set], X) + R = M - GX + + # some functions need w_time only on active_set, other need it completely + if w_time is not None: + w_time_as = w_time[active_set[::n_orient]] + else: + w_time_as = None + if w_space is not None: + w_space_as = w_space[active_set[::n_orient]] + else: + w_space_as = None + + penaltyl1 = norm_l1_tf(Z, phi, n_orient, w_time_as) + penaltyl21 = norm_l21_tf(Z, phi, n_orient, w_space_as) + nR2 = sum_squared(R) + p_obj = 0.5 * nR2 + alpha_space * penaltyl21 + alpha_time * penaltyl1 + + l1_ratio = alpha_time / (alpha_space + alpha_time) + dual_norm = norm_epsilon_inf(G, R, phi, l1_ratio, n_orient, + w_space=w_space, w_time=w_time) + scaling = min(1., (alpha_space + alpha_time) / dual_norm) + + d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX) + d_obj = max(d_obj, highest_d_obj) + + gap = p_obj - d_obj + return gap, p_obj, d_obj, R + + +def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, + alpha_time, lipschitz_constant, phi, phiT, + w_space=None, w_time=None, n_orient=1, + maxit=200, tol=1e-8, dgap_freq=10, perc=None, + timeit=True, verbose=None): + n_sources = G.shape[1] + n_positions = n_sources // n_orient + + # First make G fortran for faster access to blocks of columns + Gd = np.asfortranarray(G) + G = np.ascontiguousarray( + Gd.T.reshape(n_positions, n_orient, -1).transpose(0, 2, 1)) + + R = M.copy() # residual + active = np.where(active_set[::n_orient])[0] + for idx in active: + R -= np.dot(G[idx], phiT(Z[idx])) + + E = [] # track primal objective function + + if w_time is None: + alpha_time_lc = alpha_time / lipschitz_constant + else: + alpha_time_lc = alpha_time * w_time / lipschitz_constant[:, None] + if w_space is None: + alpha_space_lc = alpha_space / lipschitz_constant + else: + alpha_space_lc = alpha_space * w_space / lipschitz_constant + + converged = False + d_obj = - np.inf + + for i in range(maxit): + for jj in candidates: + ids = jj * n_orient + ide = ids + n_orient + + G_j = G[jj] + Z_j = Z[jj] + active_set_j = active_set[ids:ide] + + was_active = np.any(active_set_j) + + # gradient step + GTR = np.dot(G_j.T, R) / lipschitz_constant[jj] + X_j_new = GTR.copy() + + if was_active: + X_j = phiT(Z_j) + R += np.dot(G_j, X_j) + X_j_new += X_j + + rows_norm = np.linalg.norm(X_j_new, 'fro') + if rows_norm <= alpha_space_lc[jj]: + if was_active: + Z[jj] = 0.0 + active_set_j[:] = False + else: + GTR_phi = phi(GTR) + if was_active: + Z_j_new = Z_j + GTR_phi + else: + Z_j_new = GTR_phi + col_norm = np.linalg.norm(Z_j_new, axis=0) + + if np.all(col_norm <= alpha_time_lc[jj]): + Z[jj] = 0.0 + active_set_j[:] = False + else: + # l1 + shrink = np.maximum(1.0 - alpha_time_lc[jj] / np.maximum( + col_norm, alpha_time_lc[jj]), 0.0) + if w_time is not None: + shrink[w_time[jj] == 0.0] = 0.0 + Z_j_new *= shrink[np.newaxis, :] + + # l21 + shape_init = Z_j_new.shape + row_norm = np.sqrt(phi.norm(Z_j_new, ord=2).sum()) + if row_norm <= alpha_space_lc[jj]: + Z[jj] = 0.0 + active_set_j[:] = False + else: + shrink = np.maximum( + 1.0 - alpha_space_lc[jj] / + np.maximum(row_norm, alpha_space_lc[jj]), 0.0) + Z_j_new *= shrink + Z[jj] = Z_j_new.reshape(-1, *shape_init[1:]).copy() + active_set_j[:] = True + Z_j_phi_T = phiT(Z[jj]) + R -= np.dot(G_j, Z_j_phi_T) + + if (i + 1) % dgap_freq == 0: + Zd = np.vstack([Z[pos] for pos in range(n_positions) + if np.any(Z[pos])]) + gap, p_obj, d_obj, _ = dgap_l21l1( + M, Gd, Zd, active_set, alpha_space, alpha_time, phi, phiT, + n_orient, d_obj, w_space=w_space, w_time=w_time) + converged = (gap < tol) + E.append(p_obj) + logger.info("\n Iteration %d :: n_active %d" % ( + i + 1, np.sum(active_set) / n_orient)) + logger.info(" dgap %.2e :: p_obj %f :: d_obj %f" % ( + gap, p_obj, d_obj)) + + if converged: + break + + if perc is not None: + if np.sum(active_set) / float(n_orient) <= perc * n_positions: + break + + return Z, active_set, E, converged + + +@verbose +def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, + lipschitz_constant, phi, phiT, + Z_init=None, w_space=None, + w_time=None, n_orient=1, maxit=200, + tol=1e-8, dgap_freq=10, + verbose=None): + + n_sensors, n_times = M.shape + n_sources = G.shape[1] + n_positions = n_sources // n_orient + + Z = dict.fromkeys(np.arange(n_positions), 0.0) + active_set = np.zeros(n_sources, dtype=bool) + active = [] + if Z_init is not None: + if Z_init.shape != (n_sources, phi.n_coefs.sum()): + raise Exception('Z_init must be None or an array with shape ' + '(n_sources, n_coefs).') + for ii in range(n_positions): + if np.any(Z_init[ii * n_orient:(ii + 1) * n_orient]): + active_set[ii * n_orient:(ii + 1) * n_orient] = True + active.append(ii) + if len(active): + Z.update(dict(zip(active, + np.vsplit(Z_init[active_set], len(active))))) + + E = [] + candidates = range(n_positions) + d_obj = -np.inf + + while True: + # single BCD pass on all positions: + Z_init = dict.fromkeys(np.arange(n_positions), 0.0) + Z_init.update(dict(zip(active, Z.values()))) + Z, active_set, E_tmp, _ = _tf_mixed_norm_solver_bcd_( + M, G, Z_init, active_set, candidates, alpha_space, alpha_time, + lipschitz_constant, phi, phiT, w_space=w_space, w_time=w_time, + n_orient=n_orient, maxit=1, tol=tol, perc=None, verbose=verbose) + + E += E_tmp + + # multiple BCD pass on active positions: + active = np.where(active_set[::n_orient])[0] + Z_init = dict(zip(range(len(active)), [Z[idx] for idx in active])) + candidates_ = range(len(active)) + if w_space is not None: + w_space_as = w_space[active_set[::n_orient]] + else: + w_space_as = None + if w_time is not None: + w_time_as = w_time[active_set[::n_orient]] + else: + w_time_as = None + + Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_( + M, G[:, active_set], Z_init, + np.ones(len(active) * n_orient, dtype=bool), + candidates_, alpha_space, alpha_time, + lipschitz_constant[active_set[::n_orient]], phi, phiT, + w_space=w_space_as, w_time=w_time_as, + n_orient=n_orient, maxit=maxit, tol=tol, + dgap_freq=dgap_freq, perc=0.5, + verbose=verbose) + active = np.where(active_set[::n_orient])[0] + active_set[active_set] = as_.copy() + E += E_tmp + + converged = True + if converged: + Zd = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])]) + gap, p_obj, d_obj, _ = dgap_l21l1( + M, G, Zd, active_set, alpha_space, alpha_time, + phi, phiT, n_orient, d_obj, w_space, w_time) + logger.info("\ndgap %.2e :: p_obj %f :: d_obj %f :: n_active %d" + % (gap, p_obj, d_obj, np.sum(active_set) / n_orient)) + if gap < tol: + logger.info("\nConvergence reached!\n") + break + + if active_set.sum(): + Z = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])]) + X = phiT(Z) + else: + Z = np.zeros((0, phi.n_coefs.sum()), dtype=np.complex128) + X = np.zeros((0, n_times)) + + return X, Z, active_set, E, gap + + +@verbose +def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4, + n_orient=1, maxit=200, tol=1e-8, + active_set_size=None, debias=True, return_gap=False, + dgap_freq=10, verbose=None): + """Solve TF L21+L1 inverse solver with BCD and active set approach. + + See :footcite:`GramfortEtAl2013b,GramfortEtAl2011,BekhtiEtAl2016`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_dipoles) + The gain matrix a.k.a. lead field. + alpha_space : float + The spatial regularization parameter. + alpha_time : float + The temporal regularization parameter. The higher it is the smoother + will be the estimated time series. + wsize: int or array-like + Length of the STFT window in samples (must be a multiple of 4). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep) and each entry of wsize must be a multiple + of 4. + tstep: int or array-like + Step between successive windows in samples (must be a multiple of 2, + a divider of wsize and smaller than wsize/2) (default: wsize/2). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep), and each entry of tstep must be a multiple + of 2 and divide the corresponding entry of wsize. + n_orient : int + The number of orientation (1 : fixed or 3 : free or loose). + maxit : int + The number of iterations. + tol : float + If absolute difference between estimates at 2 successive iterations + is lower than tol, the convergence is reached. + debias : bool + Debias source estimates. + return_gap : bool + Return final duality gap. + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. + %(verbose)s + + Returns + ------- + X : array, shape (n_active, n_times) + The source estimates. + active_set : array + The mask of active sources. + E : list + The value of the objective function every dgap_freq iteration. If + log_objective is False or dgap_freq is np.inf, it will be empty. + gap : float + Final duality gap. Returned only if return_gap is True. + + References + ---------- + .. footbibliography:: + """ + n_sensors, n_times = M.shape + n_sensors, n_sources = G.shape + n_positions = n_sources // n_orient + + tstep = np.atleast_1d(tstep) + wsize = np.atleast_1d(wsize) + if len(tstep) != len(wsize): + raise ValueError('The same number of window sizes and steps must be ' + 'passed. Got tstep = %s and wsize = %s' % + (tstep, wsize)) + + n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int) + n_freqs = wsize // 2 + 1 + n_coefs = n_steps * n_freqs + phi = _Phi(wsize, tstep, n_coefs, n_times) + phiT = _PhiT(tstep, n_freqs, n_steps, n_times) + + if n_orient == 1: + lc = np.sum(G * G, axis=0) + else: + lc = np.empty(n_positions) + for j in range(n_positions): + G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + + logger.info("Using block coordinate descent with active set approach") + X, Z, active_set, E, gap = _tf_mixed_norm_solver_bcd_active_set( + M, G, alpha_space, alpha_time, lc, phi, phiT, + Z_init=None, n_orient=n_orient, maxit=maxit, tol=tol, + dgap_freq=dgap_freq, verbose=None) + + if np.any(active_set) and debias: + bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient) + X *= bias[:, np.newaxis] + + if return_gap: + return X, active_set, E, gap + else: + return X, active_set, E + + +@verbose +def iterative_tf_mixed_norm_solver(M, G, alpha_space, alpha_time, + n_tfmxne_iter, wsize=64, tstep=4, + maxit=3000, tol=1e-8, debias=True, + n_orient=1, dgap_freq=10, verbose=None): + """Solve TF L0.5/L1 + L0.5 inverse problem with BCD + active set approach. + + Parameters + ---------- + M: array, shape (n_sensors, n_times) + The data. + G: array, shape (n_sensors, n_dipoles) + The gain matrix a.k.a. lead field. + alpha_space: float + The spatial regularization parameter. The higher it is the less there + will be active sources. + alpha_time : float + The temporal regularization parameter. The higher it is the smoother + will be the estimated time series. 0 means no temporal regularization, + a.k.a. irMxNE. + n_tfmxne_iter : int + Number of TF-MxNE iterations. If > 1, iterative reweighting is applied. + wsize : int or array-like + Length of the STFT window in samples (must be a multiple of 4). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep) and each entry of wsize must be a multiple + of 4. + tstep : int or array-like + Step between successive windows in samples (must be a multiple of 2, + a divider of wsize and smaller than wsize/2) (default: wsize/2). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep), and each entry of tstep must be a multiple + of 2 and divide the corresponding entry of wsize. + maxit : int + The maximum number of iterations for each TF-MxNE problem. + tol : float + If absolute difference between estimates at 2 successive iterations + is lower than tol, the convergence is reached. Also used as criterion + on duality gap for each TF-MxNE problem. + debias : bool + Debias source estimates. + n_orient : int + The number of orientation (1 : fixed or 3 : free or loose). + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. + %(verbose)s + + Returns + ------- + X : array, shape (n_active, n_times) + The source estimates. + active_set : array + The mask of active sources. + E : list + The value of the objective function over iterations. + """ + n_sensors, n_times = M.shape + n_sources = G.shape[1] + n_positions = n_sources // n_orient + + tstep = np.atleast_1d(tstep) + wsize = np.atleast_1d(wsize) + if len(tstep) != len(wsize): + raise ValueError('The same number of window sizes and steps must be ' + 'passed. Got tstep = %s and wsize = %s' % + (tstep, wsize)) + + n_steps = np.ceil(n_times / tstep.astype(float)).astype(int) + n_freqs = wsize // 2 + 1 + n_coefs = n_steps * n_freqs + phi = _Phi(wsize, tstep, n_coefs, n_times) + phiT = _PhiT(tstep, n_freqs, n_steps, n_times) + + if n_orient == 1: + lc = np.sum(G * G, axis=0) + else: + lc = np.empty(n_positions) + for j in range(n_positions): + G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + + # space and time penalties, and inverse of their derivatives: + def g_space(Z): + return np.sqrt(np.sqrt(phi.norm(Z, ord=2).reshape( + -1, n_orient).sum(axis=1))) + + def g_space_prime_inv(Z): + return 2. * g_space(Z) + + def g_time(Z): + return np.sqrt(np.sqrt(np.sum((np.abs(Z) ** 2.).reshape( + (n_orient, -1), order='F'), axis=0)).reshape( + (-1, Z.shape[1]), order='F')) + + def g_time_prime_inv(Z): + return 2. * g_time(Z) + + E = list() + + active_set = np.ones(n_sources, dtype=bool) + Z = np.zeros((n_sources, phi.n_coefs.sum()), dtype=np.complex128) + + for k in range(n_tfmxne_iter): + active_set_0 = active_set.copy() + Z0 = Z.copy() + + if k == 0: + w_space = None + w_time = None + else: + w_space = 1. / g_space_prime_inv(Z) + w_time = g_time_prime_inv(Z) + w_time[w_time == 0.0] = -1. + w_time = 1. / w_time + w_time[w_time < 0.0] = 0.0 + + X, Z, active_set_, E_, _ = _tf_mixed_norm_solver_bcd_active_set( + M, G[:, active_set], alpha_space, alpha_time, + lc[active_set[::n_orient]], phi, phiT, + Z_init=Z, w_space=w_space, w_time=w_time, n_orient=n_orient, + maxit=maxit, tol=tol, dgap_freq=dgap_freq, verbose=None) + + active_set[active_set] = active_set_ + + if active_set.sum() > 0: + l21_penalty = np.sum(g_space(Z.copy())) + l1_penalty = phi.norm(g_time(Z.copy()), ord=1).sum() + + p_obj = (0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X), + 'fro') ** 2. + alpha_space * l21_penalty + + alpha_time * l1_penalty) + E.append(p_obj) + + logger.info('Iteration %d: active set size=%d, E=%f' % ( + k + 1, active_set.sum() / n_orient, p_obj)) + + # Check convergence + if np.array_equal(active_set, active_set_0): + max_diff = np.amax(np.abs(Z - Z0)) + if (max_diff < tol): + print('Convergence reached after %d reweightings!' % k) + break + else: + p_obj = 0.5 * np.linalg.norm(M) ** 2. + E.append(p_obj) + logger.info('Iteration %d: as_size=%d, E=%f' % ( + k + 1, active_set.sum() / n_orient, p_obj)) + break + + if debias: + if active_set.sum() > 0: + bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient) + X *= bias[:, np.newaxis] + + return X, active_set, E diff --git a/python/libs/mne/inverse_sparse/tests/__init__.py b/python/libs/mne/inverse_sparse/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/inverse_sparse/tests/test_gamma_map.py b/python/libs/mne/inverse_sparse/tests/test_gamma_map.py new file mode 100644 index 0000000..527efcf --- /dev/null +++ b/python/libs/mne/inverse_sparse/tests/test_gamma_map.py @@ -0,0 +1,161 @@ +# Author: Martin Luessi +# +# License: Simplified BSD + +import os.path as op + +import pytest +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_allclose + +import mne +from mne.datasets import testing +from mne import (read_cov, read_forward_solution, read_evokeds, + convert_forward_solution, VectorSourceEstimate) +from mne.cov import regularize +from mne.inverse_sparse import gamma_map +from mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles +from mne.minimum_norm.tests.test_inverse import (assert_stc_res, + assert_var_exp_log) +from mne import pick_types_forward +from mne.utils import assert_stcs_equal, catch_logging +from mne.dipole import Dipole + +data_path = testing.data_path(download=False) +fname_evoked = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif') +fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif') +fname_fwd = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif') +subjects_dir = op.join(data_path, 'subjects') + + +def _check_stc(stc, evoked, idx, hemi, fwd, dist_limit=0., ratio=50., + res=None, atol=1e-20): + """Check correctness.""" + assert_array_almost_equal(stc.times, evoked.times, 5) + stc_orig = stc + if isinstance(stc, VectorSourceEstimate): + assert stc.data.any(1).any(1).all() # all dipoles should have some + stc = stc.magnitude() + amps = np.sum(stc.data ** 2, axis=1) + order = np.argsort(amps)[::-1] + amps = amps[order] + verts = np.concatenate(stc.vertices)[order] + hemi_idx = int(order[0] >= len(stc.vertices[1])) + hemis = ['lh', 'rh'] + assert hemis[hemi_idx] == hemi + dist = np.linalg.norm(np.diff(fwd['src'][hemi_idx]['rr'][[idx, verts[0]]], + axis=0)[0]) * 1000. + assert dist <= dist_limit + assert amps[0] > ratio * amps[1] + if res is not None: + assert_stc_res(evoked, stc_orig, fwd, res, atol=atol) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_gamma_map_standard(): + """Test Gamma MAP inverse.""" + forward = read_forward_solution(fname_fwd) + forward = convert_forward_solution(forward, surf_ori=True) + + forward = pick_types_forward(forward, meg=False, eeg=True) + evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0), + proj=False) + evoked.resample(50, npad=100) + evoked.crop(tmin=0.1, tmax=0.14) # crop to window around peak + + cov = read_cov(fname_cov) + cov = regularize(cov, evoked.info, rank=None) + + alpha = 0.5 + with catch_logging() as log: + stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4, + xyz_same_gamma=True, update_mode=1, verbose=True) + _check_stc(stc, evoked, 68477, 'lh', fwd=forward) + assert_var_exp_log(log.getvalue(), 20, 22) + + with catch_logging() as log: + stc_vec, res = gamma_map( + evoked, forward, cov, alpha, tol=1e-4, xyz_same_gamma=True, + update_mode=1, pick_ori='vector', return_residual=True, + verbose=True) + assert_var_exp_log(log.getvalue(), 20, 22) + assert_stcs_equal(stc_vec.magnitude(), stc) + _check_stc(stc_vec, evoked, 68477, 'lh', fwd=forward, res=res) + + stc, res = gamma_map( + evoked, forward, cov, alpha, tol=1e-4, xyz_same_gamma=False, + update_mode=1, pick_ori='vector', return_residual=True) + _check_stc(stc, evoked, 82010, 'lh', fwd=forward, dist_limit=6., ratio=2., + res=res) + + with catch_logging() as log: + dips = gamma_map(evoked, forward, cov, alpha, tol=1e-4, + xyz_same_gamma=False, update_mode=1, + return_as_dipoles=True, verbose=True) + exp_var = assert_var_exp_log(log.getvalue(), 58, 60) + dip_exp_var = np.mean(sum(dip.gof for dip in dips)) + assert_allclose(exp_var, dip_exp_var, atol=10) # not really equiv, close + assert (isinstance(dips[0], Dipole)) + stc_dip = make_stc_from_dipoles(dips, forward['src']) + assert_stcs_equal(stc.magnitude(), stc_dip) + + # force fixed orientation + stc, res = gamma_map(evoked, forward, cov, alpha, tol=1e-4, + xyz_same_gamma=False, update_mode=2, + loose=0, return_residual=True) + _check_stc(stc, evoked, 85739, 'lh', fwd=forward, ratio=20., res=res) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_gamma_map_vol_sphere(): + """Gamma MAP with a sphere forward and volumic source space.""" + evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0), + proj=False) + evoked.resample(50, npad=100) + evoked.crop(tmin=0.1, tmax=0.16) # crop to window around peak + + cov = read_cov(fname_cov) + cov = regularize(cov, evoked.info, rank=None) + + info = evoked.info + sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080) + src = mne.setup_volume_source_space(subject=None, pos=30., mri=None, + sphere=(0.0, 0.0, 0.0, 0.08), + bem=None, mindist=5.0, + exclude=2.0, sphere_units='m') + fwd = mne.make_forward_solution(info, trans=None, src=src, bem=sphere, + eeg=False, meg=True) + + alpha = 0.5 + pytest.raises(ValueError, gamma_map, evoked, fwd, cov, alpha, + loose=0, return_residual=False) + + pytest.raises(ValueError, gamma_map, evoked, fwd, cov, alpha, + loose=0.2, return_residual=False) + + stc = gamma_map(evoked, fwd, cov, alpha, tol=1e-4, + xyz_same_gamma=False, update_mode=2, + return_residual=False) + + assert_array_almost_equal(stc.times, evoked.times, 5) + + # Compare orientation obtained using fit_dipole and gamma_map + # for a simulated evoked containing a single dipole + stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4), + vertices=[stc.vertices[0][:1]], + tmin=stc.tmin, + tstep=stc.tstep) + evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9, + use_cps=True) + + dip_gmap = gamma_map(evoked_dip, fwd, cov, 0.1, return_as_dipoles=True) + + amp_max = [np.max(d.amplitude) for d in dip_gmap] + dip_gmap = dip_gmap[np.argmax(amp_max)] + assert (dip_gmap[0].pos[0] in src[0]['rr'][stc.vertices[0]]) + + dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0] + assert (np.abs(np.dot(dip_fit.ori[0], dip_gmap.ori[0])) > 0.99) diff --git a/python/libs/mne/inverse_sparse/tests/test_mxne_debiasing.py b/python/libs/mne/inverse_sparse/tests/test_mxne_debiasing.py new file mode 100644 index 0000000..a81a65c --- /dev/null +++ b/python/libs/mne/inverse_sparse/tests/test_mxne_debiasing.py @@ -0,0 +1,22 @@ +# Authors: Daniel Strohmeier +# Alexandre Gramfort +# +# License: BSD-3-Clause + +import numpy as np +from numpy.testing import assert_almost_equal + +from mne.inverse_sparse.mxne_debiasing import compute_bias + + +def test_compute_debiasing(): + """Test source amplitude debiasing.""" + rng = np.random.RandomState(42) + G = rng.randn(10, 4) + X = rng.randn(4, 20) + debias_true = np.arange(1, 5, dtype=np.float64) + M = np.dot(G, X * debias_true[:, np.newaxis]) + debias = compute_bias(M, G, X, max_iter=10000, n_orient=1, tol=1e-7) + assert_almost_equal(debias, debias_true, decimal=5) + debias = compute_bias(M, G, X, max_iter=10000, n_orient=2, tol=1e-5) + assert_almost_equal(debias, [1.8, 1.8, 3.72, 3.72], decimal=2) diff --git a/python/libs/mne/inverse_sparse/tests/test_mxne_inverse.py b/python/libs/mne/inverse_sparse/tests/test_mxne_inverse.py new file mode 100644 index 0000000..4da13b6 --- /dev/null +++ b/python/libs/mne/inverse_sparse/tests/test_mxne_inverse.py @@ -0,0 +1,435 @@ +# Author: Alexandre Gramfort +# Daniel Strohmeier +# +# License: Simplified BSD + +import os.path as op + +import numpy as np +from numpy.testing import (assert_array_almost_equal, assert_allclose, + assert_array_less, assert_array_equal) +import pytest + +import mne +from mne.datasets import testing +from mne.label import read_label +from mne import (read_cov, read_forward_solution, read_evokeds, + convert_forward_solution) +from mne.inverse_sparse import mixed_norm, tf_mixed_norm +from mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles, _split_gof +from mne.inverse_sparse.mxne_inverse import _compute_mxne_sure +from mne.inverse_sparse.mxne_optim import norm_l2inf +from mne.minimum_norm import apply_inverse, make_inverse_operator +from mne.minimum_norm.tests.test_inverse import \ + assert_var_exp_log, assert_stc_res +from mne.utils import assert_stcs_equal, catch_logging, _record_warnings +from mne.dipole import Dipole +from mne.source_estimate import VolSourceEstimate +from mne.simulation import simulate_sparse_stc, simulate_evoked + + +data_path = testing.data_path(download=False) +# NOTE: These use the ave and cov from sample dataset (no _trunc) +fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif') +fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif') +fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') +fname_fwd = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif') +label = 'Aud-rh' +fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label) + + +@pytest.fixture(scope='module', params=[testing._pytest_param]) +def forward(): + """Get a forward solution.""" + # module scope it for speed (but don't overwrite in use!) + return read_forward_solution(fname_fwd) + + +@testing.requires_testing_data +@pytest.mark.timeout(150) # ~30 sec on Travis Linux +@pytest.mark.slowtest +def test_mxne_inverse_standard(forward): + """Test (TF-)MxNE inverse computation.""" + # Read noise covariance matrix + cov = read_cov(fname_cov) + + # Handling average file + loose = 0.0 + depth = 0.9 + + evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0)) + evoked.crop(tmin=-0.05, tmax=0.2) + + evoked_l21 = evoked.copy() + evoked_l21.crop(tmin=0.081, tmax=0.1) + label = read_label(fname_label) + assert label.hemi == 'rh' + + forward = convert_forward_solution(forward, surf_ori=True) + + # Reduce source space to make test computation faster + inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov, + loose=loose, depth=depth, + fixed=True, use_cps=True) + stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9., + method='dSPM') + stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0 + stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1. + weights_min = 0.5 + + # MxNE tests + alpha = 70 # spatial regularization parameter + + with _record_warnings(): # CD + stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, + depth=depth, maxit=300, tol=1e-8, + active_set_size=10, weights=stc_dspm, + weights_min=weights_min, solver='cd') + stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, + depth=depth, maxit=300, tol=1e-8, active_set_size=10, + weights=stc_dspm, weights_min=weights_min, + solver='bcd') + assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5) + assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5) + assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0) + assert stc_cd.vertices[1][0] in label.vertices + assert stc_bcd.vertices[1][0] in label.vertices + + # vector + with _record_warnings(): # no convergence + stc = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2) + with _record_warnings(): # no convergence + stc_vec = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2, + pick_ori='vector') + assert_stcs_equal(stc_vec.magnitude(), stc) + with _record_warnings(), \ + pytest.raises(ValueError, match='pick_ori='): + mixed_norm(evoked_l21, forward, cov, alpha, loose=0, maxit=2, + pick_ori='vector') + + with _record_warnings(), catch_logging() as log: # CD + dips = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, + depth=depth, maxit=300, tol=1e-8, active_set_size=10, + weights=stc_dspm, weights_min=weights_min, + solver='cd', return_as_dipoles=True, verbose=True) + stc_dip = make_stc_from_dipoles(dips, forward['src']) + assert isinstance(dips[0], Dipole) + assert stc_dip.subject == "sample" + assert_stcs_equal(stc_cd, stc_dip) + assert_var_exp_log(log.getvalue(), 51, 53) # 51.8 + + # Single time point things should match + with _record_warnings(), catch_logging() as log: + dips = mixed_norm(evoked_l21.copy().crop(0.081, 0.081), + forward, cov, alpha, loose=loose, + depth=depth, maxit=300, tol=1e-8, active_set_size=10, + weights=stc_dspm, weights_min=weights_min, + solver='cd', return_as_dipoles=True, verbose=True) + assert_var_exp_log(log.getvalue(), 37.8, 38.0) # 37.9 + gof = sum(dip.gof[0] for dip in dips) # these are now partial exp vars + assert_allclose(gof, 37.9, atol=0.1) + + with _record_warnings(), catch_logging() as log: + stc, res = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, + depth=depth, maxit=300, tol=1e-8, + weights=stc_dspm, # gh-6382 + active_set_size=10, return_residual=True, + solver='cd', verbose=True) + assert_array_almost_equal(stc.times, evoked_l21.times, 5) + assert stc.vertices[1][0] in label.vertices + assert_var_exp_log(log.getvalue(), 51, 53) # 51.8 + assert stc.data.min() < -1e-9 # signed + assert_stc_res(evoked_l21, stc, forward, res) + + # irMxNE tests + with _record_warnings(), catch_logging() as log: # CD + stc, residual = mixed_norm( + evoked_l21, forward, cov, alpha, n_mxne_iter=5, loose=0.0001, + depth=depth, maxit=300, tol=1e-8, active_set_size=10, + solver='cd', return_residual=True, pick_ori='vector', verbose=True) + assert_array_almost_equal(stc.times, evoked_l21.times, 5) + assert stc.vertices[1][0] in label.vertices + assert stc.vertices == [[63152], [79017]] + assert_var_exp_log(log.getvalue(), 51, 53) # 51.8 + assert_stc_res(evoked_l21, stc, forward, residual) + + # Do with TF-MxNE for test memory savings + alpha = 60. # overall regularization parameter + l1_ratio = 0.01 # temporal regularization proportion + + stc, _ = tf_mixed_norm(evoked, forward, cov, + loose=loose, depth=depth, maxit=100, tol=1e-4, + tstep=4, wsize=16, window=0.1, weights=stc_dspm, + weights_min=weights_min, return_residual=True, + alpha=alpha, l1_ratio=l1_ratio) + assert_array_almost_equal(stc.times, evoked.times, 5) + assert stc.vertices[1][0] in label.vertices + + # vector + stc_nrm = tf_mixed_norm( + evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4, + tstep=4, wsize=16, window=0.1, weights=stc_dspm, + weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio) + stc_vec, residual = tf_mixed_norm( + evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4, + tstep=4, wsize=16, window=0.1, weights=stc_dspm, + weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio, + pick_ori='vector', return_residual=True) + assert_stcs_equal(stc_vec.magnitude(), stc_nrm) + + pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov, + alpha=101, l1_ratio=0.03) + pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov, + alpha=50., l1_ratio=1.01) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_mxne_vol_sphere(): + """Test (TF-)MxNE with a sphere forward and volumic source space.""" + evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0)) + evoked.crop(tmin=-0.05, tmax=0.2) + cov = read_cov(fname_cov) + + evoked_l21 = evoked.copy() + evoked_l21.crop(tmin=0.081, tmax=0.1) + + info = evoked.info + sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080) + src = mne.setup_volume_source_space(subject=None, pos=15., mri=None, + sphere=(0.0, 0.0, 0.0, 0.08), + bem=None, mindist=5.0, + exclude=2.0, sphere_units='m') + fwd = mne.make_forward_solution(info, trans=None, src=src, + bem=sphere, eeg=False, meg=True) + + alpha = 80. + pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha, + loose=0.0, return_residual=False, + maxit=3, tol=1e-8, active_set_size=10) + + pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha, + loose=0.2, return_residual=False, + maxit=3, tol=1e-8, active_set_size=10) + + # irMxNE tests + with catch_logging() as log: + stc = mixed_norm(evoked_l21, fwd, cov, alpha, + n_mxne_iter=1, maxit=30, tol=1e-8, + active_set_size=10, verbose=True) + assert isinstance(stc, VolSourceEstimate) + assert_array_almost_equal(stc.times, evoked_l21.times, 5) + assert_var_exp_log(log.getvalue(), 9, 11) # 10.2 + + # Compare orientation obtained using fit_dipole and gamma_map + # for a simulated evoked containing a single dipole + stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4), + vertices=[stc.vertices[0][:1]], + tmin=stc.tmin, + tstep=stc.tstep) + evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9, + use_cps=True) + + dip_mxne = mixed_norm(evoked_dip, fwd, cov, alpha=80, + n_mxne_iter=1, maxit=30, tol=1e-8, + active_set_size=10, return_as_dipoles=True) + + amp_max = [np.max(d.amplitude) for d in dip_mxne] + dip_mxne = dip_mxne[np.argmax(amp_max)] + assert dip_mxne.pos[0] in src[0]['rr'][stc.vertices[0]] + + dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0] + assert np.abs(np.dot(dip_fit.ori[0], dip_mxne.ori[0])) > 0.99 + dist = 1000 * np.linalg.norm(dip_fit.pos[0] - dip_mxne.pos[0]) + assert dist < 4. # within 4 mm + + # Do with TF-MxNE for test memory savings + alpha = 60. # overall regularization parameter + l1_ratio = 0.01 # temporal regularization proportion + + stc, _ = tf_mixed_norm(evoked, fwd, cov, maxit=3, tol=1e-4, + tstep=16, wsize=32, window=0.1, alpha=alpha, + l1_ratio=l1_ratio, return_residual=True) + assert isinstance(stc, VolSourceEstimate) + assert_array_almost_equal(stc.times, evoked.times, 5) + + +@pytest.mark.parametrize('mod', ( + None, 'mult', 'augment', 'sign', 'zero', 'less')) +def test_split_gof_basic(mod): + """Test splitting the goodness of fit.""" + # first a trivial case + gain = np.array([[0., 1., 1.], [1., 1., 0.]]).T + M = np.ones((3, 1)) + X = np.ones((2, 1)) + M_est = gain @ X + assert_allclose(M_est, np.array([[1., 2., 1.]]).T) # a reasonable estimate + if mod == 'mult': + gain *= [1., -0.5] + X[1] *= -2 + elif mod == 'augment': + gain = np.concatenate((gain, np.zeros((3, 1))), axis=1) + X = np.concatenate((X, [[1.]])) + elif mod == 'sign': + gain[1] *= -1 + M[1] *= -1 + M_est[1] *= -1 + elif mod in ('zero', 'less'): + gain = np.array([[1, 1., 1.], [1., 1., 1.]]).T + if mod == 'zero': + X[:, 0] = [1., 0.] + else: + X[:, 0] = [1., 0.5] + M_est = gain @ X + else: + assert mod is None + res = M - M_est + gof = 100 * (1. - (res * res).sum() / (M * M).sum()) + gof_split = _split_gof(M, X, gain) + assert_allclose(gof_split.sum(), gof) + want = gof_split[[0, 0]] + if mod == 'augment': + want = np.concatenate((want, [[0]])) + if mod in ('mult', 'less'): + assert_array_less(gof_split[1], gof_split[0]) + elif mod == 'zero': + assert_allclose(gof_split[0], gof_split.sum(0)) + assert_allclose(gof_split[1], 0., atol=1e-6) + else: + assert_allclose(gof_split, want, atol=1e-12) + + +@testing.requires_testing_data +@pytest.mark.parametrize('idx, weights', [ + # empirically determined approximately orthogonal columns: 0, 15157, 19448 + ([0], [1]), + ([0, 15157], [1, 1]), + ([0, 15157], [1, 3]), + ([0, 15157], [5, -1]), + ([0, 15157, 19448], [1, 1, 1]), + ([0, 15157, 19448], [1e-2, 1, 5]), +]) +def test_split_gof_meg(forward, idx, weights): + """Test GOF splitting on MEG data.""" + gain = forward['sol']['data'][:, idx] + # close to orthogonal + norms = np.linalg.norm(gain, axis=0) + triu = np.triu_indices(len(idx), 1) + prods = np.abs(np.dot(gain.T, gain) / np.outer(norms, norms))[triu] + assert_array_less(prods, 5e-3) # approximately orthogonal + # first, split across time (one dipole per time point) + M = gain * weights + gof_split = _split_gof(M, np.diag(weights), gain) + assert_allclose(gof_split.sum(0), 100., atol=1e-5) # all sum to 100 + assert_allclose(gof_split, 100 * np.eye(len(weights)), atol=1) # loc + # next, summed to a single time point (all dipoles active at one time pt) + weights = np.array(weights)[:, np.newaxis] + x = gain @ weights + assert x.shape == (gain.shape[0], 1) + gof_split = _split_gof(x, weights, gain) + want = (norms * weights.T).T ** 2 + want = 100 * want / want.sum() + assert_allclose(gof_split, want, atol=1e-3, rtol=1e-2) + assert_allclose(gof_split.sum(), 100, rtol=1e-5) + + +@pytest.mark.parametrize('n_sensors, n_dipoles, n_times', [ + (10, 15, 7), + (20, 60, 20), +]) +@pytest.mark.parametrize('nnz', [2, 4]) +@pytest.mark.parametrize('corr', [0.75]) +@pytest.mark.parametrize('n_orient', [1, 3]) +def test_mxne_inverse_sure_synthetic(n_sensors, n_dipoles, n_times, nnz, corr, + n_orient, snr=4): + """Tests SURE criterion for automatic alpha selection on synthetic data.""" + rng = np.random.RandomState(0) + sigma = np.sqrt(1 - corr ** 2) + U = rng.randn(n_sensors) + # generate gain matrix + G = np.empty([n_sensors, n_dipoles], order='F') + G[:, :n_orient] = np.expand_dims(U, axis=-1) + n_dip_per_pos = n_dipoles // n_orient + for j in range(1, n_dip_per_pos): + U *= corr + U += sigma * rng.randn(n_sensors) + G[:, j * n_orient:(j + 1) * n_orient] = np.expand_dims(U, axis=-1) + # generate coefficient matrix + support = rng.choice(n_dip_per_pos, nnz, replace=False) + X = np.zeros((n_dipoles, n_times)) + for k in support: + X[k * n_orient:(k + 1) * n_orient, :] = rng.normal( + size=(n_orient, n_times)) + # generate measurement matrix + M = G @ X + noise = rng.randn(n_sensors, n_times) + sigma = 1 / np.linalg.norm(noise) * np.linalg.norm(M) / snr + M += sigma * noise + # inverse modeling with sure + alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False) + alpha_grid = np.geomspace(alpha_max, alpha_max / 10, num=15) + _, active_set, _ = _compute_mxne_sure(M, G, alpha_grid, sigma=sigma, + n_mxne_iter=5, maxit=3000, tol=1e-4, + n_orient=n_orient, + active_set_size=10, debias=True, + solver="auto", dgap_freq=10, + random_state=0, verbose=False) + assert np.count_nonzero(active_set, axis=-1) == n_orient * nnz + + +@pytest.mark.slowtest # slow on Azure +@testing.requires_testing_data +def test_mxne_inverse_sure(): + """Tests SURE criterion for automatic alpha selection on MEG data.""" + def data_fun(times): + data = np.zeros(times.shape) + data[times >= 0] = 50e-9 + return data + n_dipoles = 2 + raw = mne.io.read_raw_fif(fname_raw) + info = mne.io.read_info(fname_data) + with info._unlock(): + info['projs'] = [] + noise_cov = mne.make_ad_hoc_cov(info) + label_names = ['Aud-lh', 'Aud-rh'] + labels = [ + mne.read_label(data_path / 'MEG' / 'sample' / 'labels' / f'{ln}.label') + for ln in label_names] + fname_fwd = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') + forward = mne.read_forward_solution(fname_fwd) + forward = mne.pick_types_forward(forward, meg="grad", eeg=False, + exclude=raw.info['bads']) + times = np.arange(100, dtype=np.float64) / raw.info['sfreq'] - 0.1 + stc = simulate_sparse_stc(forward['src'], n_dipoles=n_dipoles, times=times, + random_state=1, labels=labels, data_fun=data_fun) + nave = 30 + evoked = simulate_evoked(forward, stc, info, noise_cov, nave=nave, + use_cps=False, iir_filter=None) + evoked = evoked.crop(tmin=0, tmax=10e-3) + stc_ = mixed_norm(evoked, forward, noise_cov, loose=0.9, n_mxne_iter=5, + depth=0.9) + assert_array_equal(stc_.vertices, stc.vertices) + + +@pytest.mark.slowtest # slow on Azure +@testing.requires_testing_data +def test_mxne_inverse_empty(): + """Tests solver with too high alpha.""" + evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0)) + evoked.pick("grad", exclude="bads") + fname_fwd = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') + forward = mne.read_forward_solution(fname_fwd) + forward = mne.pick_types_forward(forward, meg="grad", eeg=False, + exclude=evoked.info['bads']) + cov = read_cov(fname_cov) + with pytest.warns(RuntimeWarning, match='too big'): + stc, residual = mixed_norm( + evoked, forward, cov, n_mxne_iter=3, alpha=99, + return_residual=True) + assert stc.data.size == 0 + assert stc.vertices[0].size == 0 + assert stc.vertices[1].size == 0 + assert_allclose(evoked.data, residual.data) diff --git a/python/libs/mne/inverse_sparse/tests/test_mxne_optim.py b/python/libs/mne/inverse_sparse/tests/test_mxne_optim.py new file mode 100644 index 0000000..18dbcd5 --- /dev/null +++ b/python/libs/mne/inverse_sparse/tests/test_mxne_optim.py @@ -0,0 +1,329 @@ +# Author: Alexandre Gramfort +# Daniel Strohmeier +# +# License: Simplified BSD + +import pytest +import numpy as np +from numpy.testing import (assert_array_equal, assert_array_almost_equal, + assert_allclose, assert_array_less) + +from mne.inverse_sparse.mxne_optim import (mixed_norm_solver, + tf_mixed_norm_solver, + iterative_mixed_norm_solver, + iterative_tf_mixed_norm_solver, + norm_epsilon_inf, norm_epsilon, + _Phi, _PhiT, dgap_l21l1) +from mne.time_frequency._stft import stft_norm2 +from mne.utils import catch_logging, _record_warnings + + +def _generate_tf_data(): + n, p, t = 30, 40, 64 + rng = np.random.RandomState(0) + G = rng.randn(n, p) + G /= np.std(G, axis=0)[None, :] + X = np.zeros((p, t)) + active_set = [0, 4] + times = np.linspace(0, 2 * np.pi, t) + X[0] = np.sin(times) + X[4] = -2 * np.sin(4 * times) + X[4, times <= np.pi / 2] = 0 + X[4, times >= np.pi] = 0 + M = np.dot(G, X) + M += 1 * rng.randn(*M.shape) + return M, G, active_set + + +def test_l21_mxne(): + """Test convergence of MxNE solver.""" + n, p, t, alpha = 30, 40, 20, 1. + rng = np.random.RandomState(0) + G = rng.randn(n, p) + G /= np.std(G, axis=0)[None, :] + X = np.zeros((p, t)) + X[0] = 3 + X[4] = -2 + M = np.dot(G, X) + + args = (M, G, alpha, 1000, 1e-8) + with _record_warnings(): # CD + X_hat_cd, active_set, _, gap_cd = mixed_norm_solver( + *args, active_set_size=None, + debias=True, solver='cd', return_gap=True) + assert_array_less(gap_cd, 1e-8) + assert_array_equal(np.where(active_set)[0], [0, 4]) + with _record_warnings(): # CD + X_hat_bcd, active_set, E, gap_bcd = mixed_norm_solver( + M, G, alpha, maxit=1000, tol=1e-8, active_set_size=None, + debias=True, solver='bcd', return_gap=True) + assert_array_less(gap_bcd, 9.6e-9) + assert_array_equal(np.where(active_set)[0], [0, 4]) + assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2) + + with _record_warnings(): # CD + X_hat_cd, active_set, _ = mixed_norm_solver( + *args, active_set_size=2, debias=True, solver='cd') + assert_array_equal(np.where(active_set)[0], [0, 4]) + with _record_warnings(): # CD + X_hat_bcd, active_set, _ = mixed_norm_solver( + *args, active_set_size=2, debias=True, solver='bcd') + assert_array_equal(np.where(active_set)[0], [0, 4]) + assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2) + + with _record_warnings(): # CD + X_hat_bcd, active_set, _ = mixed_norm_solver( + *args, active_set_size=2, debias=True, n_orient=2, solver='bcd') + assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) + + # suppress a coordinate-descent warning here + with pytest.warns(RuntimeWarning, match='descent'): + X_hat_cd, active_set, _ = mixed_norm_solver( + *args, active_set_size=2, debias=True, n_orient=2, solver='cd') + assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) + assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2) + + with _record_warnings(): # CD + X_hat_bcd, active_set, _ = mixed_norm_solver( + *args, active_set_size=2, debias=True, n_orient=5, solver='bcd') + assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) + with pytest.warns(RuntimeWarning, match='descent'): + X_hat_cd, active_set, _ = mixed_norm_solver( + *args, active_set_size=2, debias=True, n_orient=5, solver='cd') + + assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) + assert_allclose(X_hat_bcd, X_hat_cd) + + +@pytest.mark.slowtest +def test_non_convergence(): + """Test non-convergence of MxNE solver to catch unexpected bugs.""" + n, p, t, alpha = 30, 40, 20, 1. + rng = np.random.RandomState(0) + G = rng.randn(n, p) + G /= np.std(G, axis=0)[None, :] + X = np.zeros((p, t)) + X[0] = 3 + X[4] = -2 + M = np.dot(G, X) + + # Impossible to converge with only 1 iteration and tol 1e-12 + # In case of non-convegence, we test that no error is returned. + args = (M, G, alpha, 1, 1e-12) + with catch_logging() as log: + mixed_norm_solver(*args, active_set_size=None, debias=True, + solver='bcd', verbose=True) + log = log.getvalue() + assert 'Convergence reached' not in log + + +def test_tf_mxne(): + """Test convergence of TF-MxNE solver.""" + alpha_space = 10. + alpha_time = 5. + + M, G, active_set = _generate_tf_data() + + with _record_warnings(): # CD + X_hat_tf, active_set_hat_tf, E, gap_tfmxne = tf_mixed_norm_solver( + M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, verbose=True, + n_orient=1, tstep=4, wsize=32, return_gap=True) + assert_array_less(gap_tfmxne, 1e-8) + assert_array_equal(np.where(active_set_hat_tf)[0], active_set) + + +def test_norm_epsilon(): + """Test computation of espilon norm on TF coefficients.""" + tstep = np.array([2]) + wsize = np.array([4]) + n_times = 10 + n_steps = np.ceil(n_times / tstep.astype(float)).astype(int) + n_freqs = wsize // 2 + 1 + n_coefs = n_steps * n_freqs + phi = _Phi(wsize, tstep, n_coefs, n_times) + + Y = np.zeros(n_steps * n_freqs) + l1_ratio = 0.03 + assert_allclose(norm_epsilon(Y, l1_ratio, phi), 0.) + + Y[0] = 2. + assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y)) + + l1_ratio = 1. + assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y)) + # dummy value without random: + Y = np.arange(n_steps * n_freqs).reshape(-1, ) + l1_ratio = 0.0 + assert_allclose(norm_epsilon(Y, l1_ratio, phi) ** 2, + stft_norm2(Y.reshape(-1, n_freqs[0], n_steps[0]))) + + l1_ratio = 0.03 + # test that vanilla epsilon norm = weights equal to 1 + w_time = np.ones(n_coefs[0]) + Y = np.abs(np.random.randn(n_coefs[0])) + assert_allclose(norm_epsilon(Y, l1_ratio, phi), + norm_epsilon(Y, l1_ratio, phi, w_time=w_time)) + + # scaling w_time and w_space by the same amount should divide + # epsilon norm by the same amount + Y = np.arange(n_coefs) + 1 + mult = 2. + assert_allclose( + norm_epsilon(Y, l1_ratio, phi, w_space=1, + w_time=np.ones(n_coefs)) / mult, + norm_epsilon(Y, l1_ratio, phi, w_space=mult, + w_time=mult * np.ones(n_coefs))) + + +@pytest.mark.slowtest # slow-ish on Travis OSX +@pytest.mark.timeout(60) # ~30 sec on Travis OSX and Linux OpenBLAS +def test_dgapl21l1(): + """Test duality gap for L21 + L1 regularization.""" + n_orient = 2 + M, G, active_set = _generate_tf_data() + n_times = M.shape[1] + n_sources = G.shape[1] + tstep, wsize = np.array([4, 2]), np.array([64, 16]) + n_steps = np.ceil(n_times / tstep.astype(float)).astype(int) + n_freqs = wsize // 2 + 1 + n_coefs = n_steps * n_freqs + phi = _Phi(wsize, tstep, n_coefs, n_times) + phiT = _PhiT(tstep, n_freqs, n_steps, n_times) + + for l1_ratio in [0.05, 0.1]: + alpha_max = norm_epsilon_inf(G, M, phi, l1_ratio, n_orient) + alpha_space = (1. - l1_ratio) * alpha_max + alpha_time = l1_ratio * alpha_max + + Z = np.zeros([n_sources, phi.n_coefs.sum()]) + # for alpha = alpha_max, Z = 0 is the solution so the dgap is 0 + gap = dgap_l21l1(M, G, Z, np.ones(n_sources, dtype=bool), + alpha_space, alpha_time, phi, phiT, + n_orient, -np.inf)[0] + + assert_allclose(0., gap) + # check that solution for alpha smaller than alpha_max is non 0: + X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver( + M, G, alpha_space / 1.01, alpha_time / 1.01, maxit=200, tol=1e-8, + verbose=True, debias=False, n_orient=n_orient, tstep=tstep, + wsize=wsize, return_gap=True) + # allow possible small numerical errors (negative gap) + assert_array_less(-1e-10, gap) + assert_array_less(gap, 1e-8) + assert_array_less(1, len(active_set_hat_tf)) + + X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver( + M, G, alpha_space / 5., alpha_time / 5., maxit=200, tol=1e-8, + verbose=True, debias=False, n_orient=n_orient, tstep=tstep, + wsize=wsize, return_gap=True) + assert_array_less(-1e-10, gap) + assert_array_less(gap, 1e-8) + assert_array_less(1, len(active_set_hat_tf)) + + +def test_tf_mxne_vs_mxne(): + """Test equivalence of TF-MxNE (with alpha_time=0) and MxNE.""" + alpha_space = 60. + alpha_time = 0. + + M, G, active_set = _generate_tf_data() + + X_hat_tf, active_set_hat_tf, E = tf_mixed_norm_solver( + M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, + verbose=True, debias=False, n_orient=1, tstep=4, wsize=32) + + # Also run L21 and check that we get the same + X_hat_l21, _, _ = mixed_norm_solver( + M, G, alpha_space, maxit=200, tol=1e-8, verbose=False, n_orient=1, + active_set_size=None, debias=False) + + assert_allclose(X_hat_tf, X_hat_l21, rtol=1e-1) + + +@pytest.mark.slowtest # slow-ish on Travis OSX +def test_iterative_reweighted_mxne(): + """Test convergence of irMxNE solver.""" + n, p, t, alpha = 30, 40, 20, 1 + rng = np.random.RandomState(0) + G = rng.randn(n, p) + G /= np.std(G, axis=0)[None, :] + X = np.zeros((p, t)) + X[0] = 3 + X[4] = -2 + M = np.dot(G, X) + + with _record_warnings(): # CD + X_hat_l21, _, _ = mixed_norm_solver( + M, G, alpha, maxit=1000, tol=1e-8, verbose=False, n_orient=1, + active_set_size=None, debias=False, solver='bcd') + with _record_warnings(): # CD + X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( + M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None, + debias=False, solver='bcd') + assert_allclose(X_hat_bcd, X_hat_l21, rtol=1e-3) + + with _record_warnings(): # CD + X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( + M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, + debias=True, solver='bcd') + assert_array_equal(np.where(active_set)[0], [0, 4]) + with _record_warnings(): # CD + X_hat_cd, active_set, _ = iterative_mixed_norm_solver( + M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None, + debias=True, solver='cd') + assert_array_equal(np.where(active_set)[0], [0, 4]) + assert_array_almost_equal(X_hat_bcd, X_hat_cd, 5) + + with _record_warnings(): # CD + X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( + M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, + debias=True, n_orient=2, solver='bcd') + assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) + # suppress a coordinate-descent warning here + with pytest.warns(RuntimeWarning, match='descent'): + X_hat_cd, active_set, _ = iterative_mixed_norm_solver( + M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, + debias=True, n_orient=2, solver='cd') + assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) + assert_allclose(X_hat_bcd, X_hat_cd) + + X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( + M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True, + n_orient=5) + assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) + with pytest.warns(RuntimeWarning, match='descent'): + X_hat_cd, active_set, _ = iterative_mixed_norm_solver( + M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, + debias=True, n_orient=5, solver='cd') + assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) + assert_allclose(X_hat_bcd, X_hat_cd) + + +@pytest.mark.slowtest +def test_iterative_reweighted_tfmxne(): + """Test convergence of irTF-MxNE solver.""" + M, G, true_active_set = _generate_tf_data() + alpha_space = 38. + alpha_time = 0.5 + tstep, wsize = [4, 2], [64, 16] + + X_hat_tf, _, _ = tf_mixed_norm_solver( + M, G, alpha_space, alpha_time, maxit=1000, tol=1e-4, wsize=wsize, + tstep=tstep, verbose=False, n_orient=1, debias=False) + X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver( + M, G, alpha_space, alpha_time, 1, wsize=wsize, tstep=tstep, + maxit=1000, tol=1e-4, debias=False, verbose=False) + assert_allclose(X_hat_tf, X_hat_bcd, rtol=1e-3) + assert_array_equal(np.where(active_set)[0], true_active_set) + + alpha_space = 50. + X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver( + M, G, alpha_space, alpha_time, 3, wsize=wsize, tstep=tstep, + n_orient=5, maxit=1000, tol=1e-4, debias=False, verbose=False) + assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) + + alpha_space = 40. + X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver( + M, G, alpha_space, alpha_time, 2, wsize=wsize, tstep=tstep, + n_orient=2, maxit=1000, tol=1e-4, debias=False, verbose=False) + assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) diff --git a/python/libs/mne/io/__init__.py b/python/libs/mne/io/__init__.py new file mode 100644 index 0000000..6b33c7c --- /dev/null +++ b/python/libs/mne/io/__init__.py @@ -0,0 +1,66 @@ +"""IO module for reading raw data.""" + +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# +# License: BSD-3-Clause + +from .open import fiff_open, show_fiff, _fiff_get_fid +from .meas_info import (read_fiducials, write_fiducials, read_info, write_info, + _empty_info, _merge_info, _force_update_info, Info, + anonymize_info, _writing_info_hdf5) + +from .proj import make_eeg_average_ref_proj, Projection +from .tag import _loc_to_coil_trans, _coil_trans_to_loc, _loc_to_eeg_loc +from .base import BaseRaw + +from . import array +from . import base +from . import brainvision +from . import bti +from . import cnt +from . import ctf +from . import constants +from . import edf +from . import egi +from . import fiff +from . import kit +from . import nicolet +from . import nirx +from . import boxy +from . import persyst +from . import eeglab +from . import pick +from . import nihon + +from .array import RawArray +from .brainvision import read_raw_brainvision +from .bti import read_raw_bti +from .cnt import read_raw_cnt +from .ctf import read_raw_ctf +from .curry import read_raw_curry +from .edf import read_raw_edf, read_raw_bdf, read_raw_gdf +from .egi import read_raw_egi, read_evokeds_mff +from .kit import read_raw_kit, read_epochs_kit +from .fiff import read_raw_fif +from .nedf import read_raw_nedf +from .nicolet import read_raw_nicolet +from .artemis123 import read_raw_artemis123 +from .eeglab import read_raw_eeglab, read_epochs_eeglab +from .eximia import read_raw_eximia +from .hitachi import read_raw_hitachi +from .nirx import read_raw_nirx +from .boxy import read_raw_boxy +from .snirf import read_raw_snirf +from .persyst import read_raw_persyst +from .fieldtrip import (read_raw_fieldtrip, read_epochs_fieldtrip, + read_evoked_fieldtrip) +from .nihon import read_raw_nihon +from ._read_raw import read_raw + +# for backward compatibility +from .fiff import Raw +from .fiff import Raw as RawFIF +from .base import concatenate_raws +from .reference import (set_eeg_reference, set_bipolar_reference, + add_reference_channels) diff --git a/python/libs/mne/io/_digitization.py b/python/libs/mne/io/_digitization.py new file mode 100644 index 0000000..d3f8f10 --- /dev/null +++ b/python/libs/mne/io/_digitization.py @@ -0,0 +1,492 @@ +# -*- coding: utf-8 -*- +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Teon Brooks +# Stefan Appelhoff +# Joan Massich +# +# License: BSD-3-Clause + +import heapq +from collections import Counter + +import datetime +import os.path as op + +import numpy as np + +from ..utils import logger, warn, Bunch, _validate_type, _check_fname, verbose + +from .constants import FIFF, _coord_frame_named +from .tree import dir_tree_find +from .tag import read_tag +from .write import (start_and_end_file, write_dig_points) + +from ..transforms import (apply_trans, Transform, + get_ras_to_neuromag_trans, combine_transforms, + invert_transform, _to_const, _str_to_frame, + _coord_frame_name) +from .. import __version__ + +_dig_kind_dict = { + 'cardinal': FIFF.FIFFV_POINT_CARDINAL, + 'hpi': FIFF.FIFFV_POINT_HPI, + 'eeg': FIFF.FIFFV_POINT_EEG, + 'extra': FIFF.FIFFV_POINT_EXTRA, +} +_dig_kind_ints = tuple(sorted(_dig_kind_dict.values())) +_dig_kind_proper = {'cardinal': 'Cardinal', + 'hpi': 'HPI', + 'eeg': 'EEG', + 'extra': 'Extra', + 'unknown': 'Unknown'} +_dig_kind_rev = {val: key for key, val in _dig_kind_dict.items()} +_cardinal_kind_rev = {1: 'LPA', 2: 'Nasion', 3: 'RPA', 4: 'Inion'} + + +def _format_dig_points(dig, enforce_order=False): + """Format the dig points nicely.""" + if enforce_order and dig is not None: + # reorder points based on type: + # Fiducials/HPI, EEG, extra (headshape) + fids_digpoints = [] + hpi_digpoints = [] + eeg_digpoints = [] + extra_digpoints = [] + head_digpoints = [] + + # use a heap to enforce order on FIDS, EEG, Extra + for idx, digpoint in enumerate(dig): + ident = digpoint['ident'] + kind = digpoint['kind'] + + # push onto heap based on 'ident' (for the order) for + # each of the possible DigPoint 'kind's + # keep track of 'idx' in case of any clashes in + # the 'ident' variable, which can occur when + # user passes in DigMontage + DigMontage + if kind == FIFF.FIFFV_POINT_CARDINAL: + heapq.heappush(fids_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_HPI: + heapq.heappush(hpi_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_EEG: + heapq.heappush(eeg_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_EXTRA: + heapq.heappush(extra_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_HEAD: + heapq.heappush(head_digpoints, (ident, idx, digpoint)) + + # now recreate dig based on sorted order + fids_digpoints.sort(), hpi_digpoints.sort() + eeg_digpoints.sort() + extra_digpoints.sort(), head_digpoints.sort() + new_dig = [] + for idx, d in enumerate(fids_digpoints + hpi_digpoints + + extra_digpoints + eeg_digpoints + + head_digpoints): + new_dig.append(d[-1]) + dig = new_dig + + return [DigPoint(d) for d in dig] if dig is not None else dig + + +def _get_dig_eeg(dig): + return [d for d in dig if d['kind'] == FIFF.FIFFV_POINT_EEG] + + +def _count_points_by_type(dig): + """Get the number of points of each type.""" + occurrences = Counter([d['kind'] for d in dig]) + return dict( + fid=occurrences[FIFF.FIFFV_POINT_CARDINAL], + hpi=occurrences[FIFF.FIFFV_POINT_HPI], + eeg=occurrences[FIFF.FIFFV_POINT_EEG], + extra=occurrences[FIFF.FIFFV_POINT_EXTRA], + ) + + +_dig_keys = {'kind', 'ident', 'r', 'coord_frame'} + + +class DigPoint(dict): + """Container for a digitization point. + + This is a simple subclass of the standard dict type designed to provide + a readable string representation. + + Parameters + ---------- + kind : int + The kind of channel, + e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``. + r : array, shape (3,) + 3D position in m. and coord_frame. + ident : int + Number specifying the identity of the point. + e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``, + or 42 if kind is ``FIFFV_POINT_EEG``. + coord_frame : int + The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. + """ + + def __repr__(self): # noqa: D105 + if self['kind'] == FIFF.FIFFV_POINT_CARDINAL: + id_ = _cardinal_kind_rev.get(self['ident'], 'Unknown cardinal') + else: + id_ = _dig_kind_proper[ + _dig_kind_rev.get(self['kind'], 'unknown')] + id_ = ('%s #%s' % (id_, self['ident'])) + id_ = id_.rjust(10) + cf = _coord_frame_name(self['coord_frame']) + if 'voxel' in cf: + pos = ('(%0.1f, %0.1f, %0.1f)' % tuple(self['r'])).ljust(25) + else: + pos = ('(%0.1f, %0.1f, %0.1f) mm' % + tuple(1000 * self['r'])).ljust(25) + return ('' % (id_, pos, cf)) + + # speed up info copy by only deep copying the mutable item + def __deepcopy__(self, memodict): + """Make a deepcopy.""" + return DigPoint( + kind=self['kind'], r=self['r'].copy(), + ident=self['ident'], coord_frame=self['coord_frame']) + + def __eq__(self, other): # noqa: D105 + """Compare two DigPoints. + + Two digpoints are equal if they are the same kind, share the same + coordinate frame and position. + """ + my_keys = ['kind', 'ident', 'coord_frame'] + if set(self.keys()) != set(other.keys()): + return False + elif any(self[_] != other[_] for _ in my_keys): + return False + else: + return np.allclose(self['r'], other['r']) + + +def _read_dig_fif(fid, meas_info): + """Read digitizer data from a FIFF file.""" + isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK) + dig = None + if len(isotrak) == 0: + logger.info('Isotrak not found') + elif len(isotrak) > 1: + warn('Multiple Isotrak found') + else: + isotrak = isotrak[0] + coord_frame = FIFF.FIFFV_COORD_HEAD + dig = [] + for k in range(isotrak['nent']): + kind = isotrak['directory'][k].kind + pos = isotrak['directory'][k].pos + if kind == FIFF.FIFF_DIG_POINT: + tag = read_tag(fid, pos) + dig.append(tag.data) + elif kind == FIFF.FIFF_MNE_COORD_FRAME: + tag = read_tag(fid, pos) + coord_frame = _coord_frame_named.get(int(tag.data)) + for d in dig: + d['coord_frame'] = coord_frame + return _format_dig_points(dig) + + +@verbose +def write_dig(fname, pts, coord_frame=None, *, overwrite=False, verbose=None): + """Write digitization data to a FIF file. + + Parameters + ---------- + fname : path-like + Destination file name. + pts : iterator of dict + Iterator through digitizer points. Each point is a dictionary with + the keys 'kind', 'ident' and 'r'. + coord_frame : int | str | None + If all the points have the same coordinate frame, specify the type + here. Can be None (default) if the points could have varying + coordinate frames. + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + + .. versionadded:: 1.0 + """ + fname = _check_fname(fname, overwrite=overwrite) + if coord_frame is not None: + coord_frame = _to_const(coord_frame) + pts_frames = {pt.get('coord_frame', coord_frame) for pt in pts} + bad_frames = pts_frames - {coord_frame} + if len(bad_frames) > 0: + raise ValueError( + 'Points have coord_frame entries that are incompatible with ' + 'coord_frame=%i: %s.' % (coord_frame, str(tuple(bad_frames)))) + + with start_and_end_file(fname) as fid: + write_dig_points(fid, pts, block=True, coord_frame=coord_frame) + + +_cardinal_ident_mapping = { + FIFF.FIFFV_POINT_NASION: 'nasion', + FIFF.FIFFV_POINT_LPA: 'lpa', + FIFF.FIFFV_POINT_RPA: 'rpa', +} + + +# XXXX: +# This does something really similar to _read_dig_montage_fif but: +# - does not check coord_frame +# - does not do any operation that implies assumptions with the names +def _get_data_as_dict_from_dig(dig, exclude_ref_channel=True): + """Obtain coordinate data from a Dig. + + Parameters + ---------- + dig : list of dicts + A container of DigPoints to be added to the info['dig']. + + Returns + ------- + ch_pos : dict + The container of all relevant channel positions inside dig. + """ + # Split up the dig points by category + hsp, hpi, elp = list(), list(), list() + fids, dig_ch_pos_location = dict(), list() + + for d in dig: + if d['kind'] == FIFF.FIFFV_POINT_CARDINAL: + fids[_cardinal_ident_mapping[d['ident']]] = d['r'] + elif d['kind'] == FIFF.FIFFV_POINT_HPI: + hpi.append(d['r']) + elp.append(d['r']) + elif d['kind'] == FIFF.FIFFV_POINT_EXTRA: + hsp.append(d['r']) + elif d['kind'] == FIFF.FIFFV_POINT_EEG: + if d['ident'] != 0 or not exclude_ref_channel: + dig_ch_pos_location.append(d['r']) + + dig_coord_frames = set([d['coord_frame'] for d in dig]) + if len(dig_coord_frames) != 1: + raise RuntimeError('Only single coordinate frame in dig is supported, ' + f'got {dig_coord_frames}') + + return Bunch( + nasion=fids.get('nasion', None), + lpa=fids.get('lpa', None), + rpa=fids.get('rpa', None), + hsp=np.array(hsp) if len(hsp) else None, + hpi=np.array(hpi) if len(hpi) else None, + elp=np.array(elp) if len(elp) else None, + dig_ch_pos_location=np.array(dig_ch_pos_location), + coord_frame=dig_coord_frames.pop(), + ) + + +def _get_fid_coords(dig, raise_error=True): + fid_coords = Bunch(nasion=None, lpa=None, rpa=None) + fid_coord_frames = dict() + + for d in dig: + if d['kind'] == FIFF.FIFFV_POINT_CARDINAL: + key = _cardinal_ident_mapping[d['ident']] + fid_coords[key] = d['r'] + fid_coord_frames[key] = d['coord_frame'] + + if len(fid_coord_frames) > 0 and raise_error: + if set(fid_coord_frames.keys()) != set(['nasion', 'lpa', 'rpa']): + raise ValueError("Some fiducial points are missing (got %s)." % + fid_coords.keys()) + + if len(set(fid_coord_frames.values())) > 1: + raise ValueError( + 'All fiducial points must be in the same coordinate system ' + '(got %s)' % len(fid_coord_frames) + ) + + coord_frame = fid_coord_frames.popitem()[1] if fid_coord_frames else None + + return fid_coords, coord_frame + + +def _write_dig_points(fname, dig_points): + """Write points to text file. + + Parameters + ---------- + fname : str + Path to the file to write. The kind of file to write is determined + based on the extension: '.txt' for tab separated text file. + dig_points : numpy.ndarray, shape (n_points, 3) + Points. + """ + _, ext = op.splitext(fname) + dig_points = np.asarray(dig_points) + if (dig_points.ndim != 2) or (dig_points.shape[1] != 3): + err = ("Points must be of shape (n_points, 3), " + "not %s" % (dig_points.shape,)) + raise ValueError(err) + + if ext == '.txt': + with open(fname, 'wb') as fid: + version = __version__ + now = datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y") + fid.write(b'%% Ascii 3D points file created by mne-python version' + b' %s at %s\n' % (version.encode(), now.encode())) + fid.write(b'%% %d 3D points, x y z per line\n' % len(dig_points)) + np.savetxt(fid, dig_points, delimiter='\t', newline='\n') + else: + msg = "Unrecognized extension: %r. Need '.txt'." % ext + raise ValueError(msg) + + +def _coord_frame_const(coord_frame): + if not isinstance(coord_frame, str) or coord_frame not in _str_to_frame: + raise ValueError('coord_frame must be one of %s, got %s' + % (sorted(_str_to_frame.keys()), coord_frame)) + return _str_to_frame[coord_frame] + + +def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None, + extra_points=None, dig_ch_pos=None, + coord_frame='head'): + """Construct digitizer info for the info. + + Parameters + ---------- + nasion : array-like | numpy.ndarray, shape (3,) | None + Point designated as the nasion point. + lpa : array-like | numpy.ndarray, shape (3,) | None + Point designated as the left auricular point. + rpa : array-like | numpy.ndarray, shape (3,) | None + Point designated as the right auricular point. + hpi : array-like | numpy.ndarray, shape (n_points, 3) | None + Points designated as head position indicator points. + extra_points : array-like | numpy.ndarray, shape (n_points, 3) + Points designed as the headshape points. + dig_ch_pos : dict + Dict of EEG channel positions. + coord_frame : str + The coordinate frame of the points. Usually this is "unknown" + for native digitizer space. Defaults to "head". + + Returns + ------- + dig : list of dicts + A container of DigPoints to be added to the info['dig']. + """ + coord_frame = _coord_frame_const(coord_frame) + + dig = [] + if lpa is not None: + lpa = np.asarray(lpa) + if lpa.shape != (3,): + raise ValueError('LPA should have the shape (3,) instead of %s' + % (lpa.shape,)) + dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA, + 'kind': FIFF.FIFFV_POINT_CARDINAL, + 'coord_frame': coord_frame}) + if nasion is not None: + nasion = np.asarray(nasion) + if nasion.shape != (3,): + raise ValueError('Nasion should have the shape (3,) instead of %s' + % (nasion.shape,)) + dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION, + 'kind': FIFF.FIFFV_POINT_CARDINAL, + 'coord_frame': coord_frame}) + if rpa is not None: + rpa = np.asarray(rpa) + if rpa.shape != (3,): + raise ValueError('RPA should have the shape (3,) instead of %s' + % (rpa.shape,)) + dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA, + 'kind': FIFF.FIFFV_POINT_CARDINAL, + 'coord_frame': coord_frame}) + if hpi is not None: + hpi = np.asarray(hpi) + if hpi.ndim != 2 or hpi.shape[1] != 3: + raise ValueError('HPI should have the shape (n_points, 3) instead ' + 'of %s' % (hpi.shape,)) + for idx, point in enumerate(hpi): + dig.append({'r': point, 'ident': idx + 1, + 'kind': FIFF.FIFFV_POINT_HPI, + 'coord_frame': coord_frame}) + if extra_points is not None: + extra_points = np.asarray(extra_points) + if len(extra_points) and extra_points.shape[1] != 3: + raise ValueError('Points should have the shape (n_points, 3) ' + 'instead of %s' % (extra_points.shape,)) + for idx, point in enumerate(extra_points): + dig.append({'r': point, 'ident': idx + 1, + 'kind': FIFF.FIFFV_POINT_EXTRA, + 'coord_frame': coord_frame}) + if dig_ch_pos is not None: + try: # use the last 3 as int if possible (e.g., EEG001->1) + idents = [] + for key in dig_ch_pos: + _validate_type(key, str, 'dig_ch_pos') + idents.append(int(key[-3:])) + except ValueError: # and if any conversion fails, simply use arange + idents = np.arange(1, len(dig_ch_pos) + 1) + for key, ident in zip(dig_ch_pos, idents): + dig.append({'r': dig_ch_pos[key], 'ident': int(ident), + 'kind': FIFF.FIFFV_POINT_EEG, + 'coord_frame': coord_frame}) + + return _format_dig_points(dig) + + +def _call_make_dig_points(nasion, lpa, rpa, hpi, extra, convert=True): + if convert: + neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa) + nasion = apply_trans(neuromag_trans, nasion) + lpa = apply_trans(neuromag_trans, lpa) + rpa = apply_trans(neuromag_trans, rpa) + + if hpi is not None: + hpi = apply_trans(neuromag_trans, hpi) + + extra = apply_trans(neuromag_trans, extra).astype(np.float32) + else: + neuromag_trans = None + + ctf_head_t = Transform(fro='ctf_head', to='head', trans=neuromag_trans) + + info_dig = _make_dig_points(nasion=nasion, + lpa=lpa, + rpa=rpa, + hpi=hpi, + extra_points=extra) + + return info_dig, ctf_head_t + + +############################################################################## +# From artemis123 (we have modified the function a bit) +def _artemis123_read_pos(nas, lpa, rpa, hpi, extra): + # move into MNE head coords + dig_points, _ = _call_make_dig_points(nas, lpa, rpa, hpi, extra) + return dig_points + + +############################################################################## +# From bti +def _make_bti_dig_points(nasion, lpa, rpa, hpi, extra, + convert=False, use_hpi=False, + bti_dev_t=False, dev_ctf_t=False): + + _hpi = hpi if use_hpi else None + info_dig, ctf_head_t = _call_make_dig_points(nasion, lpa, rpa, _hpi, extra, + convert) + + if convert: + t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t, + 'meg', 'ctf_head') + dev_head_t = combine_transforms(t, ctf_head_t, 'meg', 'head') + else: + dev_head_t = Transform('meg', 'head', trans=None) + + return info_dig, dev_head_t, ctf_head_t # ctf_head_t should not be needed diff --git a/python/libs/mne/io/_read_raw.py b/python/libs/mne/io/_read_raw.py new file mode 100644 index 0000000..a1d8c74 --- /dev/null +++ b/python/libs/mne/io/_read_raw.py @@ -0,0 +1,98 @@ +"""Generic wrapper function read_raw for specific read_raw_xxx readers.""" + +# Authors: Clemens Brunner +# +# License: BSD-3-Clause + + +from pathlib import Path +from functools import partial + +from . import (read_raw_edf, read_raw_bdf, read_raw_gdf, read_raw_brainvision, + read_raw_fif, read_raw_eeglab, read_raw_cnt, read_raw_egi, + read_raw_eximia, read_raw_nirx, read_raw_fieldtrip, + read_raw_artemis123, read_raw_nicolet, read_raw_kit, + read_raw_ctf, read_raw_boxy, read_raw_snirf) +from ..utils import fill_doc + + +def _read_unsupported(fname, **kwargs): + ext = "".join(Path(fname).suffixes) + msg = f"Unsupported file type ({ext})." + suggest = kwargs.get("suggest") + if suggest is not None: + msg += f" Try reading a {suggest} file instead." + msg += " Consider using a dedicated reader function for more options." + raise ValueError(msg) + + +# supported read file formats +supported = { + ".edf": read_raw_edf, + ".bdf": read_raw_bdf, + ".gdf": read_raw_gdf, + ".vhdr": read_raw_brainvision, + ".fif": read_raw_fif, + ".fif.gz": read_raw_fif, + ".set": read_raw_eeglab, + ".cnt": read_raw_cnt, + ".mff": read_raw_egi, + ".nxe": read_raw_eximia, + ".hdr": read_raw_nirx, + ".snirf": read_raw_snirf, + ".mat": read_raw_fieldtrip, + ".bin": read_raw_artemis123, + ".data": read_raw_nicolet, + ".sqd": read_raw_kit, + ".con": read_raw_kit, + ".ds": read_raw_ctf, + ".txt": read_raw_boxy, +} + +# known but unsupported file formats +suggested = {".vmrk": partial(_read_unsupported, suggest=".vhdr"), + ".eeg": partial(_read_unsupported, suggest=".vhdr")} + +# all known file formats +readers = {**supported, **suggested} + + +@fill_doc +def read_raw(fname, *, preload=False, verbose=None, **kwargs): + """Read raw file. + + This function is a convenient wrapper for readers defined in `mne.io`. The + correct reader is automatically selected based on the detected file format. + All function arguments are passed to the respective reader. + + The following readers are currently supported: + + `~mne.io.read_raw_artemis123`, `~mne.io.read_raw_bdf`, + `~mne.io.read_raw_boxy`, `~mne.io.read_raw_brainvision`, + `~mne.io.read_raw_cnt`, `~mne.io.read_raw_ctf`, `~mne.io.read_raw_edf`, + `~mne.io.read_raw_eeglab`, `~mne.io.read_raw_egi`, + `~mne.io.read_raw_eximia`, `~mne.io.read_raw_fieldtrip`, + `~mne.io.read_raw_fif`, `~mne.io.read_raw_gdf`, `~mne.io.read_raw_kit`, + `~mne.io.read_raw_nicolet`, and `~mne.io.read_raw_nirx`. + + Parameters + ---------- + fname : path-like + Name of the file to read. + %(preload)s + %(verbose)s + **kwargs + Additional keyword arguments to pass to the underlying reader. For + details, see the arguments of the reader for the respective file + format. + + Returns + ------- + raw : mne.io.Raw + Raw object. + """ + ext = "".join(Path(fname).suffixes) + if ext in readers: + return readers[ext](fname, preload=preload, verbose=verbose, **kwargs) + else: + _read_unsupported(fname) diff --git a/python/libs/mne/io/array/__init__.py b/python/libs/mne/io/array/__init__.py new file mode 100644 index 0000000..35778e4 --- /dev/null +++ b/python/libs/mne/io/array/__init__.py @@ -0,0 +1,5 @@ +"""Module to convert user data to FIF.""" + +# Author: Eric Larson + +from .array import RawArray diff --git a/python/libs/mne/io/array/array.py b/python/libs/mne/io/array/array.py new file mode 100644 index 0000000..58ba004 --- /dev/null +++ b/python/libs/mne/io/array/array.py @@ -0,0 +1,86 @@ +"""Tools for creating Raw objects from numpy arrays.""" + +# Authors: Eric Larson +# +# License: BSD-3-Clause + +import numpy as np + +from ..base import BaseRaw +from ...utils import verbose, logger, _validate_type, fill_doc, _check_option + + +@fill_doc +class RawArray(BaseRaw): + """Raw object from numpy array. + + Parameters + ---------- + data : array, shape (n_channels, n_times) + The channels' time series. See notes for proper units of measure. + %(info_not_none)s Consider using :func:`mne.create_info` to populate + this structure. This may be modified in place by the class. + first_samp : int + First sample offset used during recording (default 0). + + .. versionadded:: 0.12 + copy : {'data', 'info', 'both', 'auto', None} + Determines what gets copied on instantiation. "auto" (default) + will copy info, and copy "data" only if necessary to get to + double floating point precision. + + .. versionadded:: 0.18 + %(verbose)s + + See Also + -------- + mne.EpochsArray + mne.EvokedArray + mne.create_info + + Notes + ----- + Proper units of measure: + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog + * T: mag + * T/m: grad + * M: hbo, hbr + * Am: dipole + * AU: misc + """ + + @verbose + def __init__(self, data, info, first_samp=0, copy='auto', + verbose=None): # noqa: D102 + _validate_type(info, 'info', 'info') + _check_option('copy', copy, ('data', 'info', 'both', 'auto', None)) + dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 + orig_data = data + data = np.asanyarray(orig_data, dtype=dtype) + if data.ndim != 2: + raise ValueError('Data must be a 2D array of shape (n_channels, ' + 'n_samples), got shape %s' % (data.shape,)) + if len(data) != len(info['ch_names']): + raise ValueError('len(data) (%s) does not match ' + 'len(info["ch_names"]) (%s)' + % (len(data), len(info['ch_names']))) + assert len(info['ch_names']) == info['nchan'] + if copy in ('auto', 'info', 'both'): + info = info.copy() + if copy in ('data', 'both'): + if data is orig_data: + data = data.copy() + elif copy != 'auto' and data is not orig_data: + raise ValueError('data copying was not requested by copy=%r but ' + 'it was required to get to double floating point ' + 'precision' % (copy,)) + logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s' + % (dtype.__name__, data.shape[0], data.shape[1])) + super(RawArray, self).__init__(info, data, + first_samps=(int(first_samp),), + dtype=dtype, verbose=verbose) + logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % ( + self.first_samp, self.last_samp, + float(self.first_samp) / info['sfreq'], + float(self.last_samp) / info['sfreq'])) + logger.info('Ready.') diff --git a/python/libs/mne/io/array/tests/__init__.py b/python/libs/mne/io/array/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/array/tests/test_array.py b/python/libs/mne/io/array/tests/test_array.py new file mode 100644 index 0000000..a771e3b --- /dev/null +++ b/python/libs/mne/io/array/tests/test_array.py @@ -0,0 +1,181 @@ +# Author: Eric Larson +# +# License: BSD-3-Clause + +import os.path as op + +import numpy as np +from numpy.testing import (assert_array_almost_equal, assert_allclose, + assert_equal) +import pytest +import matplotlib.pyplot as plt + +from mne import find_events, Epochs, pick_types +from mne.io import read_raw_fif +from mne.io.array import RawArray +from mne.io.tests.test_raw import _test_raw_reader +from mne.io.meas_info import create_info +from mne.io.pick import get_channel_type_constants +from mne.channels import make_dig_montage + +base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data') +fif_fname = op.join(base_dir, 'test_raw.fif') + + +def test_long_names(): + """Test long name support.""" + info = create_info(['a' * 15 + 'b', 'a' * 16], 1000., verbose='error') + data = np.empty((2, 1000)) + raw = RawArray(data, info) + assert raw.ch_names == ['a' * 15 + 'b', 'a' * 16] + # and a way to get the old behavior + raw.rename_channels({k: k[:13] for k in raw.ch_names}, + allow_duplicates=True, verbose='error') + assert raw.ch_names == ['a' * 13 + '-0', 'a' * 13 + '-1'] + info = create_info(['a' * 16] * 11, 1000., verbose='error') + data = np.empty((11, 1000)) + raw = RawArray(data, info) + assert raw.ch_names == ['a' * 16 + '-%s' % ii for ii in range(11)] + + +def test_array_copy(): + """Test copying during construction.""" + info = create_info(1, 1000.) + data = np.empty((1, 1000)) + # 'auto' (default) + raw = RawArray(data, info) + assert raw._data is data + assert raw.info is not info + raw = RawArray(data.astype(np.float32), info) + assert raw._data is not data + assert raw.info is not info + # 'info' (more restrictive) + raw = RawArray(data, info, copy='info') + assert raw._data is data + assert raw.info is not info + with pytest.raises(ValueError, match="data copying was not .* copy='info"): + RawArray(data.astype(np.float32), info, copy='info') + # 'data' + raw = RawArray(data, info, copy='data') + assert raw._data is not data + assert raw.info is info + # 'both' + raw = RawArray(data, info, copy='both') + assert raw._data is not data + assert raw.info is not info + raw = RawArray(data.astype(np.float32), info, copy='both') + assert raw._data is not data + assert raw.info is not info + # None + raw = RawArray(data, info, copy=None) + assert raw._data is data + assert raw.info is info + with pytest.raises(ValueError, match='data copying was not .* copy=None'): + RawArray(data.astype(np.float32), info, copy=None) + + +@pytest.mark.slowtest +def test_array_raw(): + """Test creating raw from array.""" + # creating + raw = read_raw_fif(fif_fname).crop(2, 5) + data, times = raw[:, :] + sfreq = raw.info['sfreq'] + ch_names = [(ch[4:] if 'STI' not in ch else ch) + for ch in raw.info['ch_names']] # change them, why not + types = list() + for ci in range(101): + types.extend(('grad', 'grad', 'mag')) + types.extend(['ecog', 'seeg', 'hbo']) # really 4 meg channels + types.extend(['stim'] * 9) + types.extend(['dbs']) # really eeg channel + types.extend(['eeg'] * 60) + picks = np.concatenate([pick_types(raw.info, meg=True)[::20], + pick_types(raw.info, meg=False, stim=True), + pick_types(raw.info, meg=False, eeg=True)[::20]]) + del raw + data = data[picks] + ch_names = np.array(ch_names)[picks].tolist() + types = np.array(types)[picks].tolist() + types.pop(-1) + # wrong length + pytest.raises(ValueError, create_info, ch_names, sfreq, types) + # bad entry + types.append('foo') + pytest.raises(KeyError, create_info, ch_names, sfreq, types) + types[-1] = 'eog' + # default type + info = create_info(ch_names, sfreq) + assert_equal(info['chs'][0]['kind'], + get_channel_type_constants()['misc']['kind']) + # use real types + info = create_info(ch_names, sfreq, types) + raw2 = _test_raw_reader(RawArray, test_preloading=False, + data=data, info=info, first_samp=2 * data.shape[1]) + data2, times2 = raw2[:, :] + assert_allclose(data, data2) + assert_allclose(times, times2) + assert ('RawArray' in repr(raw2)) + pytest.raises(TypeError, RawArray, info, data) + + # filtering + picks = pick_types(raw2.info, meg=True, misc=True, exclude='bads')[:4] + assert_equal(len(picks), 4) + raw_lp = raw2.copy() + kwargs = dict(fir_design='firwin', picks=picks) + raw_lp.filter(None, 4.0, h_trans_bandwidth=4., **kwargs) + raw_hp = raw2.copy() + raw_hp.filter(16.0, None, l_trans_bandwidth=4., **kwargs) + raw_bp = raw2.copy() + raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4., h_trans_bandwidth=4., + **kwargs) + raw_bs = raw2.copy() + raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4., + **kwargs) + data, _ = raw2[picks, :] + lp_data, _ = raw_lp[picks, :] + hp_data, _ = raw_hp[picks, :] + bp_data, _ = raw_bp[picks, :] + bs_data, _ = raw_bs[picks, :] + sig_dec = 15 + assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec) + assert_array_almost_equal(data, bp_data + bs_data, sig_dec) + + # plotting + raw2.plot() + raw2.plot_psd(tmax=2., average=True, n_fft=1024, + spatial_colors=False) + plt.close('all') + + # epoching + events = find_events(raw2, stim_channel='STI 014') + events[:, 2] = 1 + assert len(events) > 2 + epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True) + evoked = epochs.average() + assert_equal(evoked.nave, len(events) - 1) + + # complex data + rng = np.random.RandomState(0) + data = rng.randn(1, 100) + 1j * rng.randn(1, 100) + raw = RawArray(data, create_info(1, 1000., 'eeg')) + assert_allclose(raw._data, data) + + # Using digital montage to give MNI electrode coordinates + n_elec = 10 + ts_size = 10000 + Fs = 512. + ch_names = [str(i) for i in range(n_elec)] + ch_pos_loc = np.random.randint(60, size=(n_elec, 3)).tolist() + + data = np.random.rand(n_elec, ts_size) + montage = make_dig_montage( + ch_pos=dict(zip(ch_names, ch_pos_loc)), + coord_frame='head' + ) + info = create_info(ch_names, Fs, 'ecog') + + raw = RawArray(data, info) + raw.set_montage(montage) + raw.plot_psd(average=False) # looking for nonexistent layout + raw.plot_psd_topo() diff --git a/python/libs/mne/io/artemis123/__init__.py b/python/libs/mne/io/artemis123/__init__.py new file mode 100644 index 0000000..d9becf4 --- /dev/null +++ b/python/libs/mne/io/artemis123/__init__.py @@ -0,0 +1,7 @@ +"""artemis123 module for conversion to FIF.""" + +# Author: Luke Bloy +# +# License: BSD-3-Clause + +from .artemis123 import read_raw_artemis123 diff --git a/python/libs/mne/io/artemis123/artemis123.py b/python/libs/mne/io/artemis123/artemis123.py new file mode 100644 index 0000000..7bb5ed1 --- /dev/null +++ b/python/libs/mne/io/artemis123/artemis123.py @@ -0,0 +1,469 @@ +# Author: Luke Bloy +# +# License: BSD-3-Clause + +import numpy as np +import os.path as op +import datetime +import calendar + +from .utils import _load_mne_locs, _read_pos +from ...utils import logger, warn, verbose, _check_fname +from ..utils import _read_segments_file +from ..base import BaseRaw +from ..meas_info import _empty_info +from .._digitization import _make_dig_points, DigPoint +from ..constants import FIFF +from ...transforms import get_ras_to_neuromag_trans, apply_trans, Transform + + +@verbose +def read_raw_artemis123(input_fname, preload=False, verbose=None, + pos_fname=None, add_head_trans=True): + """Read Artemis123 data as raw object. + + Parameters + ---------- + input_fname : str + Path to the data file (extension ``.bin``). The header file with the + same file name stem and an extension ``.txt`` is expected to be found + in the same directory. + %(preload)s + %(verbose)s + pos_fname : str or None (default None) + If not None, load digitized head points from this file. + add_head_trans : bool (default True) + If True attempt to perform initial head localization. Compute initial + device to head coordinate transform using HPI coils. If no + HPI coils are in info['dig'] hpi coils are assumed to be in canonical + order of fiducial points (nas, rpa, lpa). + + Returns + ------- + raw : instance of Raw + A Raw object containing the data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawArtemis123(input_fname, preload=preload, verbose=verbose, + pos_fname=pos_fname, add_head_trans=add_head_trans) + + +def _get_artemis123_info(fname, pos_fname=None): + """Generate info struct from artemis123 header file.""" + fname = op.splitext(fname)[0] + header = fname + '.txt' + + logger.info('Reading header...') + + # key names for artemis channel info... + chan_keys = ['name', 'scaling', 'FLL_Gain', 'FLL_Mode', 'FLL_HighPass', + 'FLL_AutoReset', 'FLL_ResetLock'] + + header_info = dict() + header_info['filter_hist'] = [] + header_info['comments'] = '' + header_info['channels'] = [] + + with open(header, 'r') as fid: + # section flag + # 0 - None + # 1 - main header + # 2 - channel header + # 3 - comments + # 4 - length + # 5 - filtering History + sectionFlag = 0 + for line in fid: + # skip emptylines or header line for channel info + if ((not line.strip()) or + (sectionFlag == 2 and line.startswith('DAQ Map'))): + continue + + # set sectionFlag + if line.startswith('"): + sectionFlag = 1 + elif line.startswith(""): + sectionFlag = 2 + elif line.startswith(""): + sectionFlag = 3 + elif line.startswith(""): + sectionFlag = 4 + elif line.startswith(""): + sectionFlag = 5 + else: + # parse header info lines + # part of main header - lines are name value pairs + if sectionFlag == 1: + values = line.strip().split('\t') + if len(values) == 1: + values.append('') + header_info[values[0]] = values[1] + # part of channel header - lines are Channel Info + elif sectionFlag == 2: + values = line.strip().split('\t') + if len(values) != 7: + raise IOError('Error parsing line \n\t:%s\n' % line + + 'from file %s' % header) + tmp = dict() + for k, v in zip(chan_keys, values): + tmp[k] = v + header_info['channels'].append(tmp) + elif sectionFlag == 3: + header_info['comments'] = '%s%s' \ + % (header_info['comments'], line.strip()) + elif sectionFlag == 4: + header_info['num_samples'] = int(line.strip()) + elif sectionFlag == 5: + header_info['filter_hist'].append(line.strip()) + + for k in ['Temporal Filter Active?', 'Decimation Active?', + 'Spatial Filter Active?']: + if(header_info[k] != 'FALSE'): + warn('%s - set to but is not supported' % k) + if(header_info['filter_hist']): + warn('Non-Empty Filter history found, BUT is not supported' % k) + + # build mne info struct + info = _empty_info(float(header_info['DAQ Sample Rate'])) + + # Attempt to get time/date from fname + # Artemis123 files saved from the scanner observe the following + # naming convention 'Artemis_Data_YYYY-MM-DD-HHh-MMm_[chosen by user].bin' + try: + date = datetime.datetime.strptime( + op.basename(fname).split('_')[2], '%Y-%m-%d-%Hh-%Mm') + meas_date = (calendar.timegm(date.utctimetuple()), 0) + except Exception: + meas_date = None + + # build subject info must be an integer (as per FIFF) + try: + subject_info = {'id': int(header_info['Subject ID'])} + except ValueError: + subject_info = {'id': 0} + + # build description + desc = '' + for k in ['Purpose', 'Notes']: + desc += '{} : {}\n'.format(k, header_info[k]) + desc += 'Comments : {}'.format(header_info['comments']) + + info.update({'meas_date': meas_date, + 'description': desc, + 'subject_info': subject_info, + 'proj_name': header_info['Project Name']}) + + # Channel Names by type + ref_mag_names = ['REF_001', 'REF_002', 'REF_003', + 'REF_004', 'REF_005', 'REF_006'] + + ref_grad_names = ['REF_007', 'REF_008', 'REF_009', + 'REF_010', 'REF_011', 'REF_012'] + + # load mne loc dictionary + loc_dict = _load_mne_locs() + info['chs'] = [] + info['bads'] = [] + + for i, chan in enumerate(header_info['channels']): + # build chs struct + t = {'cal': float(chan['scaling']), 'ch_name': chan['name'], + 'logno': i + 1, 'scanno': i + 1, 'range': 1.0, + 'unit_mul': FIFF.FIFF_UNITM_NONE, + 'coord_frame': FIFF.FIFFV_COORD_DEVICE} + # REF_018 has a zero cal which can cause problems. Let's set it to + # a value of another ref channel to make writers/readers happy. + if t['cal'] == 0: + t['cal'] = 4.716e-10 + info['bads'].append(t['ch_name']) + t['loc'] = loc_dict.get(chan['name'], np.zeros(12)) + + if (chan['name'].startswith('MEG')): + t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_GRAD + t['kind'] = FIFF.FIFFV_MEG_CH + # While gradiometer units are T/m, the meg sensors referred to as + # gradiometers report the field difference between 2 pick-up coils. + # Therefore the units of the measurements should be T + # *AND* the baseline (difference between pickup coils) + # should not be used in leadfield / forwardfield computations. + t['unit'] = FIFF.FIFF_UNIT_T + t['unit_mul'] = FIFF.FIFF_UNITM_F + + # 3 axis reference magnetometers + elif (chan['name'] in ref_mag_names): + t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG + t['kind'] = FIFF.FIFFV_REF_MEG_CH + t['unit'] = FIFF.FIFF_UNIT_T + t['unit_mul'] = FIFF.FIFF_UNITM_F + + # reference gradiometers + elif (chan['name'] in ref_grad_names): + t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD + t['kind'] = FIFF.FIFFV_REF_MEG_CH + # While gradiometer units are T/m, the meg sensors referred to as + # gradiometers report the field difference between 2 pick-up coils. + # Therefore the units of the measurements should be T + # *AND* the baseline (difference between pickup coils) + # should not be used in leadfield / forwardfield computations. + t['unit'] = FIFF.FIFF_UNIT_T + t['unit_mul'] = FIFF.FIFF_UNITM_F + + # other reference channels are unplugged and should be ignored. + elif (chan['name'].startswith('REF')): + t['coil_type'] = FIFF.FIFFV_COIL_NONE + t['kind'] = FIFF.FIFFV_MISC_CH + t['unit'] = FIFF.FIFF_UNIT_V + info['bads'].append(t['ch_name']) + + elif (chan['name'].startswith(('AUX', 'TRG', 'MIO'))): + t['coil_type'] = FIFF.FIFFV_COIL_NONE + t['unit'] = FIFF.FIFF_UNIT_V + if (chan['name'].startswith('TRG')): + t['kind'] = FIFF.FIFFV_STIM_CH + else: + t['kind'] = FIFF.FIFFV_MISC_CH + else: + raise ValueError('Channel does not match expected' + + ' channel Types:"%s"' % chan['name']) + + # incorporate multiplier (unit_mul) into calibration + t['cal'] *= 10 ** t['unit_mul'] + t['unit_mul'] = FIFF.FIFF_UNITM_NONE + + # append this channel to the info + info['chs'].append(t) + if chan['FLL_ResetLock'] == 'TRUE': + info['bads'].append(t['ch_name']) + + # reduce info['bads'] to unique set + info['bads'] = list(set(info['bads'])) + + # HPI information + # print header_info.keys() + hpi_sub = dict() + # Don't know what event_channel is don't think we have it HPIs are either + # always on or always off. + # hpi_sub['event_channel'] = ??? + hpi_sub['hpi_coils'] = [dict(), dict(), dict(), dict()] + hpi_coils = [dict(), dict(), dict(), dict()] + drive_channels = ['MIO_001', 'MIO_003', 'MIO_009', 'MIO_011'] + key_base = 'Head Tracking %s %d' + + # set default HPI frequencies + if info['sfreq'] == 1000: + default_freqs = [140, 150, 160, 40] + else: + default_freqs = [700, 750, 800, 40] + + for i in range(4): + # build coil structure + hpi_coils[i]['number'] = i + 1 + hpi_coils[i]['drive_chan'] = drive_channels[i] + this_freq = header_info.pop(key_base % ('Frequency', i + 1), + default_freqs[i]) + hpi_coils[i]['coil_freq'] = this_freq + + # check if coil is on + if header_info[key_base % ('Channel', i + 1)] == 'OFF': + hpi_sub['hpi_coils'][i]['event_bits'] = [0] + else: + hpi_sub['hpi_coils'][i]['event_bits'] = [256] + + info['hpi_subsystem'] = hpi_sub + info['hpi_meas'] = [{'hpi_coils': hpi_coils}] + # read in digitized points if supplied + if pos_fname is not None: + info['dig'] = _read_pos(pos_fname) + else: + info['dig'] = [] + + info._unlocked = False + info._update_redundant() + return info, header_info + + +class RawArtemis123(BaseRaw): + """Raw object from Artemis123 file. + + Parameters + ---------- + input_fname : str + Path to the Artemis123 data file (ending in ``'.bin'``). + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, input_fname, preload=False, verbose=None, + pos_fname=None, add_head_trans=True): # noqa: D102 + from scipy.spatial.distance import cdist + from ...chpi import (compute_chpi_amplitudes, compute_chpi_locs, + _fit_coil_order_dev_head_trans) + input_fname = _check_fname(input_fname, 'read', True, 'input_fname') + fname, ext = op.splitext(input_fname) + if ext == '.txt': + input_fname = fname + '.bin' + elif ext != '.bin': + raise RuntimeError('Valid artemis123 files must end in "txt"' + + ' or ".bin".') + + if not op.exists(input_fname): + raise RuntimeError('%s - Not Found' % input_fname) + + info, header_info = _get_artemis123_info(input_fname, + pos_fname=pos_fname) + + last_samps = [header_info.get('num_samples', 1) - 1] + + super(RawArtemis123, self).__init__( + info, preload, filenames=[input_fname], raw_extras=[header_info], + last_samps=last_samps, orig_format=np.float32, + verbose=verbose) + + if add_head_trans: + n_hpis = 0 + for d in info['hpi_subsystem']['hpi_coils']: + if d['event_bits'] == [256]: + n_hpis += 1 + if n_hpis < 3: + warn('%d HPIs active. At least 3 needed to perform' % n_hpis + + 'head localization\n *NO* head localization performed') + else: + # Localized HPIs using the 1st 250 milliseconds of data. + with info._unlock(): + info['hpi_results'] = [dict( + dig_points=[dict( + r=np.zeros(3), + coord_frame=FIFF.FIFFV_COORD_DEVICE, + ident=ii + 1) for ii in range(n_hpis)], + coord_trans=Transform('meg', 'head'))] + coil_amplitudes = compute_chpi_amplitudes( + self, tmin=0, tmax=0.25, t_window=0.25, t_step_min=0.25) + assert len(coil_amplitudes['times']) == 1 + coil_locs = compute_chpi_locs(self.info, coil_amplitudes) + with info._unlock(): + info['hpi_results'] = None + hpi_g = coil_locs['gofs'][0] + hpi_dev = coil_locs['rrs'][0] + + # only use HPI coils with localizaton goodness_of_fit > 0.98 + bad_idx = [] + for i, g in enumerate(hpi_g): + msg = 'HPI coil %d - location goodness of fit (%0.3f)' + if g < 0.98: + bad_idx.append(i) + msg += ' *Removed from coregistration*' + logger.info(msg % (i + 1, g)) + hpi_dev = np.delete(hpi_dev, bad_idx, axis=0) + hpi_g = np.delete(hpi_g, bad_idx, axis=0) + + if pos_fname is not None: + # Digitized HPI points are needed. + hpi_head = np.array([d['r'] + for d in self.info.get('dig', []) + if d['kind'] == FIFF.FIFFV_POINT_HPI]) + + if (len(hpi_head) != len(hpi_dev)): + mesg = ("number of digitized (%d) and " + + "active (%d) HPI coils are " + + "not the same.") + raise RuntimeError(mesg % (len(hpi_head), + len(hpi_dev))) + + # compute initial head to dev transform and hpi ordering + head_to_dev_t, order, trans_g = \ + _fit_coil_order_dev_head_trans(hpi_dev, hpi_head) + + # set the device to head transform + self.info['dev_head_t'] = \ + Transform(FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_COORD_HEAD, head_to_dev_t) + + # add hpi_meg_dev to dig... + for idx, point in enumerate(hpi_dev): + d = {'r': point, 'ident': idx + 1, + 'kind': FIFF.FIFFV_POINT_HPI, + 'coord_frame': FIFF.FIFFV_COORD_DEVICE} + self.info['dig'].append(DigPoint(d)) + + dig_dists = cdist(hpi_head[order], hpi_head[order]) + dev_dists = cdist(hpi_dev, hpi_dev) + tmp_dists = np.abs(dig_dists - dev_dists) + dist_limit = tmp_dists.max() * 1.1 + + msg = 'HPI-Dig corrregsitration\n' + msg += '\tGOF : %0.3f\n' % trans_g + msg += '\tMax Coil Error : %0.3f cm\n' % (100 * + tmp_dists.max()) + logger.info(msg) + + else: + logger.info('Assuming Cardinal HPIs') + nas = hpi_dev[0] + lpa = hpi_dev[2] + rpa = hpi_dev[1] + t = get_ras_to_neuromag_trans(nas, lpa, rpa) + with self.info._unlock(): + self.info['dev_head_t'] = \ + Transform(FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_COORD_HEAD, t) + + # transform fiducial points + nas = apply_trans(t, nas) + lpa = apply_trans(t, lpa) + rpa = apply_trans(t, rpa) + + hpi = apply_trans(self.info['dev_head_t'], hpi_dev) + with self.info._unlock(): + self.info['dig'] = _make_dig_points(nasion=nas, + lpa=lpa, + rpa=rpa, + hpi=hpi) + order = np.array([0, 1, 2]) + dist_limit = 0.005 + + # fill in hpi_results + hpi_result = dict() + + # add HPI points in device coords... + dig = [] + for idx, point in enumerate(hpi_dev): + dig.append({'r': point, 'ident': idx + 1, + 'kind': FIFF.FIFFV_POINT_HPI, + 'coord_frame': FIFF.FIFFV_COORD_DEVICE}) + hpi_result['dig_points'] = dig + + # attach Transform + hpi_result['coord_trans'] = self.info['dev_head_t'] + + # 1 based indexing + hpi_result['order'] = order + 1 + hpi_result['used'] = np.arange(3) + 1 + hpi_result['dist_limit'] = dist_limit + hpi_result['good_limit'] = 0.98 + + # Warn for large discrepancies between digitized and fit + # cHPI locations + if hpi_result['dist_limit'] > 0.005: + warn('Large difference between digitized geometry' + + ' and HPI geometry. Max coil to coil difference' + + ' is %0.2f cm\n' % (100. * tmp_dists.max()) + + 'beware of *POOR* head localization') + + # store it + with self.info._unlock(): + self.info['hpi_results'] = [hpi_result] + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + _read_segments_file( + self, data, idx, fi, start, stop, cals, mult, dtype='>f4') diff --git a/python/libs/mne/io/artemis123/resources/Artemis123_ChannelMap.csv b/python/libs/mne/io/artemis123/resources/Artemis123_ChannelMap.csv new file mode 100644 index 0000000..1ee9325 --- /dev/null +++ b/python/libs/mne/io/artemis123/resources/Artemis123_ChannelMap.csv @@ -0,0 +1,146 @@ +name,Channel Type,CAD X+ (INCH),CAD Y+ (INCH),CAD Z+ (INCH),CAD X- (INCH),CAD Y- (INCH),CAD Z- (INCH) +Derived from '90-0395 Channel Map for 6th cooldown 2-01-13.xls',,,,,,, +MEG_059,MEG_GRAD,-1.97677,1.56552,2.91489,-4.18768,2.50074,5.40664 +MEG_045,MEG_GRAD,-1.61144,0.93037,3.41137,-3.33479,1.92534,6.24186 +MEG_029,MEG_GRAD,-0.91075,1.72387,3.473,-1.93587,2.72988,6.62081 +MEG_073,MEG_GRAD,-2.38955,0.86972,2.76491,-4.94504,1.79985,4.90406 +MEG_043,MEG_GRAD,-1.59926,2.33243,2.93122,-3.46787,3.39595,5.64209 +MEG_085,MEG_GRAD,-2.78631,1.40783,1.84839,-5.89386,2.21359,3.13893 +REF_013,UNUSED,,,,,, +MEG_071,MEG_GRAD,-2.43321,2.17533,2.12153,-5.27622,3.05529,3.88634 +MEG_032,MEG_GRAD,0.93037,-1.61144,3.41137,1.92534,-3.33479,6.24186 +MEG_048,MEG_GRAD,1.27145,-2.20222,2.76491,2.6312,-4.55737,4.90406 +MEG_018,MEG_GRAD,0.44157,-2.50427,2.76491,0.91381,-5.18245,4.90406 +MEG_006,MEG_GRAD,0,-3.0105,1.94967,0,-6.23006,3.21696 +MEG_005,MEG_GRAD,0,-1.86073,3.41137,0,-3.85068,6.24186 +MEG_049,MEG_GRAD,-1.27145,-2.20222,2.76491,-2.6312,-4.55737,4.90406 +MEG_019,MEG_GRAD,-0.44157,-2.50427,2.76491,-0.91381,-5.18245,4.90406 +MEG_033,MEG_GRAD,-0.93037,-1.61144,3.41137,-1.92534,-3.33479,6.24186 +MEG_021,MEG_GRAD,-0.56074,-3.168,1.10519,-1.13708,-6.39559,2.21066 +MEG_020,MEG_GRAD,0.56022,-3.16809,1.10519,1.13604,-6.39578,2.21066 +MEG_034,MEG_GRAD,1.02965,-2.82894,1.94967,2.13081,-5.85434,3.21696 +MEG_077,MEG_GRAD,-2.47272,-2.0647,1.06346,-5.01426,-4.15829,2.12604 +MEG_035,MEG_GRAD,-1.02965,-2.82894,1.94967,-2.13081,-5.85434,3.21696 +MEG_007,MEG_GRAD,0,-3.27147,0.25764,0,-6.63351,1.0751 +MEG_023,MEG_GRAD,-0.576,-3.27431,-0.5962,-1.16503,-6.58484,0.21931 +MEG_022,MEG_GRAD,0.56022,-3.27709,-0.59609,1.14872,-6.58771,0.21942 +MEG_047,MEG_GRAD,-1.61144,-0.93037,3.41137,-3.33479,-1.92534,6.24186 +MEG_061,MEG_GRAD,-1.86073,0,3.41137,-3.85068,0,6.24186 +MEG_087,MEG_GRAD,-2.5429,0,2.76491,-5.2624,0,4.90406 +MEG_113,MEG_GRAD,-3.22769,0.0086,0.98505,-6.5452,0.046,1.96703 +MEG_101,MEG_GRAD,-2.96476,-0.52277,1.94967,-6.13541,-1.08184,3.21696 +MEG_099,MEG_GRAD,-2.96476,0.52277,1.94967,-6.13541,1.08184,3.21696 +MEG_063,MEG_GRAD,-1.94798,-1.63455,2.76491,-4.03123,-3.38261,4.90406 +MEG_075,MEG_GRAD,-2.38955,-0.86972,2.76491,-4.94504,-1.79985,4.90406 +MEG_089,MEG_GRAD,-2.60717,-1.50525,1.94967,-5.39539,-3.11503,3.21696 +MEG_123,MEG_GRAD,-3.24454,-0.65992,-1.54654,-6.63007,-1.24165,-1.13258 +MEG_103,MEG_GRAD,-3.03312,-1.09456,1.02677,-6.15066,-2.19102,2.05164 +MEG_119,MEG_GRAD,-3.27163,-0.04807,-0.71822,-6.66217,-0.02172,-0.02891 +MEG_121,MEG_GRAD,-3.24454,0.48346,-1.58979,-6.63007,1.0948,-1.22095 +MEG_105,MEG_GRAD,-3.07707,-1.16672,-0.67591,-6.26323,-2.29919,0.05723 +MEG_091,MEG_GRAD,-2.81455,-1.64764,0.19622,-5.75085,-3.31563,0.94961 +MEG_115,MEG_GRAD,-3.20059,-0.58777,0.15614,-6.53962,-1.15004,0.86771 +MEG_037,MEG_GRAD,-1.11155,-3.07561,0.25023,-2.27119,-6.23333,1.05996 +MEG_067,MEG_GRAD,-2.08904,-2.51166,0.2289,-4.26844,-5.08104,1.01638 +MEG_079,MEG_GRAD,-2.51137,-2.1514,-0.63867,-5.10885,-4.30296,0.13301 +MEG_093,MEG_GRAD,-2.8532,-1.73435,-1.50591,-5.7542,-3.37531,-0.57691 +MEG_051,MEG_GRAD,-1.61407,-2.7848,1.0907,-3.27306,-5.61852,2.18127 +MEG_065,MEG_GRAD,-1.93511,-2.30617,1.94967,-4.00461,-4.7725,3.21696 +REF_014,UNUSED,,,,,, +MEG_053,MEG_GRAD,-1.64275,-2.88336,-0.61098,-3.33826,-5.79135,0.1893 +MEG_039,MEG_GRAD,-1.37821,4.03301,0.38766,-2.98972,7.09471,0.3625 +MEG_041,MEG_GRAD,-1.59926,3.67934,1.66789,-3.46787,6.31662,2.90266 +MEG_055,MEG_GRAD,-2.06278,3.53364,0.8475,-4.47296,6.00069,1.12372 +MEG_069,MEG_GRAD,-2.43321,2.88136,1.45931,-5.27622,4.58626,2.45038 +MEG_027,MEG_GRAD,-1.02514,3.32279,2.63742,-2.22293,5.54346,5.00502 +MEG_025,MEG_GRAD,-0.92333,4.17235,1.20548,-2.00217,7.38566,1.89996 +MEG_057,MEG_GRAD,-1.84667,3.00588,2.29955,-4.00435,4.85628,4.27238 +REF_015,UNUSED,,,,,, +MEG_083,MEG_GRAD,-2.81067,2.32514,1.52142,-6.13327,3.15736,2.01067 +MEG_095,MEG_GRAD,-2.85632,2.16654,0.82155,-6.24599,2.85761,0.88605 +MEG_117,MEG_GRAD,-3.14455,0.87829,-0.52294,-6.53422,1.56936,-0.45844 +MEG_109,MEG_GRAD,-3.0226,1.3925,0.37679,-6.41227,2.08357,0.44129 +MEG_107,MEG_GRAD,-2.7791,2.44789,0.19401,-6.01824,3.66345,0.23867 +MEG_111,MEG_GRAD,-3.20059,0.54013,0.11348,-6.53962,1.15454,0.78055 +MEG_097,MEG_GRAD,-3.04326,1.22292,1.10768,-6.3884,1.94226,1.62169 +MEG_081,MEG_GRAD,-2.54021,2.92425,0.68688,-5.5195,4.68347,0.71098 +REF_001,REF_MAG,-2.26079604,3.98626183,5.04439808,-2.20703425,3.92437924,4.93090704 +REF_002,REF_MAG,1.93013445,4.03046866,5.17689263,1.8763992,3.96852956,5.06341985 +REF_004,REF_MAG,1.70031266,4.21202221,5.57217923,1.57144014,4.22797498,5.62449924 +REF_012,REF_GRAD,4.64675,-0.89642,-0.43802,6.03162,-1.01804,-0.22614 +REF_006,REF_MAG,2.07781,3.83073028,5.60154279,2.08802749,3.70619491,5.66468189 +REF_008,REF_GRAD,4.50056,0.78066,1.76423,5.88573,0.92199,1.96135 +REF_010,REF_GRAD,4.31926,2.18698,-0.37055,5.69806,2.46181,-0.34022 +MEG_094,REF_GRAD,2.85632,2.16654,0.82155,6.24599,2.85761,0.88605 +REF_016,UNUSED,,,,,, +REF_003,REF_MAG,-2.73073962,4.07852721,5.1569653,-2.8596759,4.06162797,5.1051015 +REF_017,UNUSED,,,,,, +REF_011,REF_GRAD,-4.64675,-0.89642,-0.43802,-6.03162,-1.01804,-0.22614 +REF_009,REF_GRAD,-4.31926,2.18698,-0.37055,-5.69806,2.46181,-0.34022 +REF_007,REF_GRAD,-4.50056,0.78066,1.76423,-5.88573,0.92199,1.96135 +REF_018,UNUSED,,,,,, +REF_005,REF_MAG,-2.4058382,3.78665997,5.47001894,-2.41506358,3.66222139,5.53350068 +MEG_090,MEG_GRAD,2.81455,-1.64764,0.19622,5.75085,-3.31563,0.94961 +MEG_088,MEG_GRAD,2.60717,-1.50525,1.94967,5.39539,-3.11503,3.21696 +MEG_102,MEG_GRAD,3.03294,-1.09506,1.02679,6.1503,-2.19202,2.05167 +MEG_122,MEG_GRAD,3.24454,-0.65992,-1.54654,6.63007,-1.24165,-1.13258 +MEG_114,MEG_GRAD,3.20059,-0.58777,0.15614,6.53962,-1.15004,0.86771 +MEG_104,MEG_GRAD,3.07159,-1.18176,-0.67534,6.25756,-2.31475,0.05782 +MEG_120,MEG_GRAD,3.24454,0.48346,-1.58979,6.63007,1.0948,-1.22094 +MEG_118,MEG_GRAD,3.27163,-0.06408,-0.71761,6.66217,-0.03828,-0.02828 +MEG_106,MEG_GRAD,2.7791,2.44789,0.19401,6.01824,3.66345,0.23867 +MEG_082,MEG_GRAD,2.81067,2.32514,1.52142,6.13327,3.15736,2.01067 +MEG_110,MEG_GRAD,3.20059,0.54013,0.11348,6.53962,1.15454,0.78055 +MEG_116,MEG_GRAD,3.14455,0.87829,-0.52294,6.53422,1.56936,-0.45844 +MEG_096,MEG_GRAD,3.04326,1.22292,1.10768,6.3884,1.94226,1.62169 +MEG_080,MEG_GRAD,2.54021,2.92425,0.68688,5.5195,4.68347,0.71098 +MEG_108,MEG_GRAD,3.0226,1.3925,0.37679,6.41227,2.08357,0.44129 +REF_019,UNUSED,,,,,, +MEG_009,MEG_GRAD,-0.48824,4.32904,0.13976,-1.05817,7.74156,0.10133 +MEG_003,MEG_GRAD,0,3.44805,2.77097,0,5.81508,5.29461 +MEG_010,MEG_GRAD,0.51257,3.97032,2.03007,1.11147,6.94759,3.68802 +MEG_012,MEG_GRAD,0.51257,2.67525,3.24478,1.11147,4.13933,6.32201 +MEG_004,MEG_GRAD,0,4.3528,1.03622,0,7.77696,1.53295 +MEG_011,MEG_GRAD,-0.51257,3.97032,2.03007,-1.11147,6.94759,3.68802 +MEG_008,MEG_GRAD,0.48824,4.32904,0.13976,1.05817,7.74156,0.10133 +MEG_013,MEG_GRAD,-0.51257,2.67525,3.24478,-1.11147,4.13933,6.32201 +MEG_024,MEG_GRAD,0.92333,4.17235,1.20548,2.00217,7.38566,1.89996 +REF_020,UNUSED,,,,,, +MEG_068,MEG_GRAD,2.43321,2.88136,1.45931,5.27622,4.58626,2.45038 +MEG_026,MEG_GRAD,1.02514,3.32279,2.63742,2.22293,5.54346,5.00502 +MEG_038,MEG_GRAD,1.37821,4.03301,0.38766,2.98972,7.09471,0.3625 +MEG_040,MEG_GRAD,1.59926,3.67934,1.66789,3.46787,6.31662,2.90266 +MEG_054,MEG_GRAD,2.06278,3.53364,0.8475,4.47296,6.00069,1.12372 +MEG_056,MEG_GRAD,1.84667,3.00588,2.29955,4.00435,4.85628,4.27238 +MEG_058,MEG_GRAD,2.00892,1.56358,2.88668,4.25593,2.49543,5.34722 +MEG_042,MEG_GRAD,1.59926,2.33243,2.93122,3.46787,3.39595,5.64209 +MEG_028,MEG_GRAD,0.90968,1.7238,3.47337,1.93358,2.72985,6.62156 +MEG_070,MEG_GRAD,2.43321,2.17533,2.12153,5.27622,3.05529,3.88634 +REF_021,UNUSED,,,,,, +MEG_072,MEG_GRAD,2.38955,0.86972,2.76491,4.94504,1.79985,4.90406 +MEG_044,MEG_GRAD,1.61144,0.93037,3.41137,3.33479,1.92534,6.24186 +MEG_084,MEG_GRAD,2.78632,1.40783,1.84839,5.89386,2.21359,3.13893 +MEG_046,MEG_GRAD,1.61144,-0.93037,3.41137,3.33479,-1.92534,6.24186 +MEG_098,MEG_GRAD,2.96476,0.52277,1.94967,6.13541,1.08184,3.21696 +MEG_060,MEG_GRAD,1.8607,0,3.41137,3.85068,0,6.24186 +MEG_100,MEG_GRAD,2.96476,-0.52277,1.94967,6.13541,-1.08184,3.21696 +MEG_074,MEG_GRAD,2.38955,-0.86972,2.76491,4.94504,-1.79985,4.90406 +MEG_086,MEG_GRAD,2.5429,0,2.76491,5.2624,0,4.90406 +MEG_062,MEG_GRAD,1.94798,-1.63455,2.76491,4.03123,-3.38261,4.90406 +MEG_112,MEG_GRAD,3.22769,0.00807,0.98507,6.5452,0.04494,1.96707 +MEG_016,MEG_GRAD,0.50538,-0.87535,3.83752,0.89368,-1.5479,7.20924 +MEG_031,MEG_GRAD,-1.01076,0,3.83752,-1.78736,0,7.20924 +MEG_015,MEG_GRAD,-0.50538,0.87535,3.83752,-0.89368,1.5479,7.20924 +MEG_001,MEG_GRAD,0,0,4,0,0,7.46 +MEG_002,MEG_GRAD,0,1.80611,3.59215,0,2.82922,6.89743 +MEG_017,MEG_GRAD,-0.50538,-0.87535,3.83752,-0.89368,-1.5479,7.20924 +MEG_014,MEG_GRAD,0.50538,0.87535,3.83752,0.89368,1.5479,7.20924 +MEG_030,MEG_GRAD,1.01076,0,3.83752,1.78736,0,7.20924 +MEG_050,MEG_GRAD,1.61362,-2.78506,1.09071,3.27214,-5.61905,2.18129 +MEG_064,MEG_GRAD,1.93511,-2.30617,1.94967,4.00461,-4.7725,3.21696 +MEG_076,MEG_GRAD,2.47238,-2.0651,1.06348,5.01358,-4.1591,2.12607 +MEG_078,MEG_GRAD,2.50107,-2.16367,-0.6382,5.0982,-4.31565,0.13349 +MEG_066,MEG_GRAD,2.08904,-2.51166,0.2289,4.26844,-5.08104,1.01638 +MEG_036,MEG_GRAD,1.11155,-3.07561,0.25023,2.27119,-6.23333,1.05996 +MEG_052,MEG_GRAD,1.62888,-2.89137,-0.61068,3.32391,-5.79963,0.18962 +MEG_092,MEG_GRAD,2.8532,-1.73435,-1.50591,5.7542,-3.37531,-0.57691 diff --git a/python/libs/mne/io/artemis123/resources/Artemis123_mneLoc.csv b/python/libs/mne/io/artemis123/resources/Artemis123_mneLoc.csv new file mode 100644 index 0000000..cdad771 --- /dev/null +++ b/python/libs/mne/io/artemis123/resources/Artemis123_mneLoc.csv @@ -0,0 +1,144 @@ +MEG_001,0.0,0.0,0.10160000191,1.0,-0.0,-0.0,-0.0,1.0,-0.0,0.0,0.0,1.0 +MEG_002,0.0,0.0458751948625,0.0912406117153,1.0,-0.0,-0.0,-0.0,0.955282042035,-0.295696161906,0.0,0.295696161906,0.955282042035 +MEG_003,0.0,0.0875804716465,0.0703826393232,1.0,-0.0,-0.0,-0.0,0.729376031116,-0.684113006186,0.0,0.684113006186,0.729376031116 +MEG_004,0.0,0.110561122079,0.0263199884948,1.0,-0.0,-0.0,-0.0,0.143563509474,-0.989641106032,0.0,0.989641106032,0.143563509474 +MEG_005,0.0,-0.0472625428885,0.086648799629,1.0,0.0,-0.0,0.0,0.818061560022,0.575130666904,0.0,-0.575130666904,0.818061560022 +MEG_006,0.0,-0.0764667014376,0.049521618931,1.0,0.0,-0.0,0.0,0.366268930876,0.930509038255,0.0,-0.930509038255,0.366268930876 +MEG_007,0.0,-0.0830953395622,0.00654405612303,1.0,0.0,-0.0,0.0,0.236260571358,0.971689735678,0.0,-0.971689735678,0.236260571358 +MEG_008,0.0124012962331,0.109957618067,0.00354990406674,0.972562667953,-0.164284112711,-0.164719723212,-0.164284112711,0.0163303909087,-0.986277875978,0.164719723212,0.986277875978,-0.0111069411385 +MEG_009,-0.0124012962331,0.109957618067,0.00354990406674,0.972562667953,0.164284112711,0.164719723212,0.164284112711,0.0163303909087,-0.986277875978,-0.164719723212,0.986277875978,-0.0111069411385 +MEG_010,0.0130192782448,0.100846129896,0.0515637789694,0.979744824976,-0.100693145673,-0.173092369408,-0.100693145673,0.499431154085,-0.860482081594,0.173092369408,0.860482081594,0.479175979061 +MEG_011,-0.0130192782448,0.100846129896,0.0515637789694,0.979744824976,0.100693145673,0.173092369408,0.100693145673,0.499431154085,-0.860482081594,-0.173092369408,0.860482081594,0.479175979061 +MEG_012,0.0130192782448,0.0679513512775,0.0824174135494,0.984142307767,-0.038765954324,-0.17309280415,-0.038765954324,0.905232161619,-0.423145287527,0.17309280415,0.423145287527,0.889374469386 +MEG_013,-0.0130192782448,0.0679513512775,0.0824174135494,0.984142307767,0.038765954324,0.17309280415,0.038765954324,0.905232161619,-0.423145287527,-0.17309280415,0.423145287527,0.889374469386 +MEG_014,0.0128366522413,0.022233890418,0.0974730098325,0.993621350642,-0.0110480572384,-0.112225451567,-0.0110480572384,0.980864355149,-0.194378643965,0.112225451567,0.194378643965,0.974485705791 +MEG_015,-0.0128366522413,0.022233890418,0.0974730098325,0.993621350642,0.0110480572384,0.112225451567,0.0110480572384,0.980864355149,-0.194378643965,-0.112225451567,0.194378643965,0.974485705791 +MEG_016,0.0128366522413,-0.022233890418,0.0974730098325,0.993621350642,0.0110480572384,-0.112225451567,0.0110480572384,0.980864355149,0.194378643965,0.112225451567,-0.194378643965,0.974485705791 +MEG_017,-0.0128366522413,-0.022233890418,0.0974730098325,0.993621350642,-0.0110480572384,0.112225451567,-0.0110480572384,0.980864355149,0.194378643965,-0.112225451567,-0.194378643965,0.974485705791 +MEG_018,0.0112158782109,-0.0636084591958,0.0702287153203,0.988488638046,0.0652835409099,-0.136485426846,0.0652835409099,0.629762253104,0.774039768908,0.136485426846,-0.774039768908,0.61825089115 +MEG_019,-0.0112158782109,-0.0636084591958,0.0702287153203,0.988488638046,-0.0652835409099,0.136485426846,-0.0652835409099,0.629762253104,0.774039768908,-0.136485426846,-0.774039768908,0.61825089115 +MEG_020,0.0142295882675,-0.0804694875128,0.0280718265278,0.979010049739,0.117656650617,-0.166421858768,0.117656650617,0.340489745704,0.93285778425,0.166421858768,-0.93285778425,0.319499795443 +MEG_021,-0.0142427962678,-0.0804672015128,0.0280718265278,0.978972050612,-0.117759654311,0.166572470526,-0.117759654311,0.340528364062,0.932830690472,-0.166572470526,-0.932830690472,0.319500414673 +MEG_022,0.0142295882675,-0.0832380875649,-0.0151406862846,0.976588506526,0.131701883642,-0.170086750705,0.131701883642,0.259108088321,0.956826845574,0.170086750705,-0.956826845574,0.235696594848 +MEG_023,-0.0146304002751,-0.0831674755635,-0.0151434802847,0.976546368958,-0.131816629327,0.170239729521,-0.131816629327,0.259149948413,0.956799707604,-0.170239729521,-0.956799707604,0.235696317371 +MEG_024,0.0234525824409,0.105977691992,0.0306191925756,0.919030275794,-0.241167202261,-0.311803997292,-0.241167202261,0.281686827798,-0.928703888007,0.311803997292,0.928703888007,0.200717103592 +MEG_025,-0.0234525824409,0.105977691992,0.0306191925756,0.919030275794,0.241167202261,0.311803997292,0.241167202261,0.281686827798,-0.928703888007,-0.311803997292,0.928703888007,0.200717103592 +MEG_026,0.0260385564895,0.0843988675867,0.0669904692594,0.928846648352,-0.131916373824,-0.346181995721,-0.131916373824,0.755430639878,-0.641811980763,0.346181995721,0.641811980763,0.68427728823 +MEG_027,-0.0260385564895,0.0843988675867,0.0669904692594,0.928846648352,0.131916373824,0.346181995721,0.131916373824,0.755430639878,-0.641811980763,-0.346181995721,0.641811980763,0.68427728823 +MEG_028,0.0231058724344,0.0437845208231,0.0882235996586,0.954148215535,-0.0450524345745,-0.295924755521,-0.0450524345745,0.955732979975,-0.290765797726,0.295924755521,0.290765797726,0.90988119551 +MEG_029,-0.0231330504349,0.0437862988232,0.0882142016584,0.954036318954,0.0451068389737,0.296277024411,0.0451068389737,0.955734030088,-0.290753911081,-0.296277024411,0.290753911081,0.909770349043 +MEG_030,0.0256733044827,0.0,0.0974730098325,0.974485414074,-0.0,-0.224450835944,-0.0,1.0,-0.0,0.224450835944,0.0,0.974485414074 +MEG_031,-0.0256733044827,0.0,0.0974730098325,0.974485414074,0.0,0.224450835944,0.0,1.0,-0.0,-0.224450835944,0.0,0.974485414074 +MEG_032,0.0236313984443,-0.0409305767695,0.086648799629,0.954515845737,0.078781387629,-0.287563894118,0.078781387629,0.863545730655,0.498078572146,0.287563894118,-0.498078572146,0.818061576392 +MEG_033,-0.0236313984443,-0.0409305767695,0.086648799629,0.954515845737,-0.078781387629,0.287563894118,-0.078781387629,0.863545730655,0.498078572146,-0.287563894118,-0.498078572146,0.818061576392 +MEG_034,0.0261531104917,-0.0718550773509,0.049521618931,0.925866960833,0.203678027441,-0.318254036858,0.203678027441,0.440401481873,0.874392243734,0.318254036858,-0.874392243734,0.366268442706 +MEG_035,-0.0261531104917,-0.0718550773509,0.049521618931,0.925866960833,-0.203678027441,0.318254036858,-0.203678027441,0.440401481873,0.874392243734,-0.318254036858,-0.874392243734,0.366268442706 +MEG_036,0.0282333705308,-0.0781204954687,0.00635584211949,0.908973236603,0.247867468623,-0.335155744599,0.247867468623,0.325052548187,0.91263495381,0.335155744599,-0.91263495381,0.23402578479 +MEG_037,-0.0282333705308,-0.0781204954687,0.00635584211949,0.908973236603,-0.247867468623,0.335155744599,-0.247867468623,0.325052548187,0.91263495381,-0.335155744599,-0.91263495381,0.23402578479 +MEG_038,0.0350065346581,0.102438455926,0.00984656418512,0.781484001521,-0.415157481208,-0.465754249753,-0.415157481208,0.211244323513,-0.884884230609,0.465754249753,0.884884230609,-0.00727167496558 +MEG_039,-0.0350065346581,0.102438455926,0.00984656418512,0.781484001521,0.415157481208,0.465754249753,0.415157481208,0.211244323513,-0.884884230609,-0.465754249753,0.884884230609,-0.00727167496558 +MEG_040,0.0406212047637,0.093455237757,0.0423644067965,0.785045408535,-0.303378150058,-0.540060556425,-0.303378150058,0.57182444299,-0.762219459517,0.540060556425,0.762219459517,0.356869851524 +MEG_041,-0.0406212047637,0.093455237757,0.0423644067965,0.785045408535,0.303378150058,0.540060556425,0.303378150058,0.57182444299,-0.762219459517,-0.540060556425,0.762219459517,0.356869851524 +MEG_042,0.0406212047637,0.0592437231138,0.0744529893997,0.836463385382,-0.0930769183398,-0.540060822675,-0.0930769183398,0.947025241119,-0.307375795983,0.540060822675,0.307375795983,0.7834886265 +MEG_043,-0.0406212047637,0.0592437231138,0.0744529893997,0.836463385382,0.0930769183398,0.540060822675,0.0930769183398,0.947025241119,-0.307375795983,-0.540060822675,0.307375795983,0.7834886265 +MEG_044,0.0409305767695,0.0236313984443,0.086648799629,0.863545730655,-0.078781387629,-0.498078572146,-0.078781387629,0.954515845737,-0.287563894118,0.498078572146,0.287563894118,0.818061576392 +MEG_045,-0.0409305767695,0.0236313984443,0.086648799629,0.863545730655,0.078781387629,0.498078572146,0.078781387629,0.954515845737,-0.287563894118,-0.498078572146,0.287563894118,0.818061576392 +MEG_046,0.0409305767695,-0.0236313984443,0.086648799629,0.863545730655,0.078781387629,-0.498078572146,0.078781387629,0.954515845737,0.287563894118,0.498078572146,-0.287563894118,0.818061576392 +MEG_047,-0.0409305767695,-0.0236313984443,0.086648799629,0.863545730655,-0.078781387629,0.498078572146,-0.078781387629,0.954515845737,0.287563894118,-0.498078572146,-0.287563894118,0.818061576392 +MEG_048,0.0322948306071,-0.0559363890516,0.0702287153203,0.904562399003,0.165302346745,-0.392991094644,0.165302346745,0.713688676641,0.680678784005,0.392991094644,-0.680678784005,0.618251075645 +MEG_049,-0.0322948306071,-0.0559363890516,0.0702287153203,0.904562399003,-0.165302346745,0.392991094644,-0.165302346745,0.713688676641,0.680678784005,-0.392991094644,-0.680678784005,0.618251075645 +MEG_050,0.0409859487705,-0.0707405253299,0.0277040345208,0.825297111533,0.298522923381,-0.479341988471,0.298522923381,0.489900043633,0.819073874241,0.479341988471,-0.819073874241,0.315197155166 +MEG_051,-0.0409973787708,-0.0707339213298,0.0277037805208,0.825197788666,-0.298579570884,0.479477683976,-0.298579570884,0.489996382374,0.818995595294,-0.479477683976,-0.818995595294,0.31519417104 +MEG_052,0.0413735527778,-0.0734407993807,-0.0155112722916,0.805087785644,0.334422043575,-0.489893411036,0.334422043575,0.426212956438,0.840538168399,0.489893411036,-0.840538168399,0.231300742083 +MEG_053,-0.0417258507844,-0.0732373453769,-0.0155188922918,0.804976833568,-0.334486625117,0.490031626568,-0.334486625117,0.426317886079,0.840459253996,-0.490031626568,-0.840459253996,0.231294719647 +MEG_054,0.052394612985,0.0897544576874,0.0215265004047,0.550644162251,-0.459958724875,-0.696583791076,-0.459958724875,0.529188204946,-0.713020206696,0.696583791076,0.713020206696,0.0798323671971 +MEG_055,-0.052394612985,0.0897544576874,0.0215265004047,0.550644162251,0.459958724875,0.696583791076,0.459958724875,0.529188204946,-0.713020206696,-0.696583791076,0.713020206696,0.0798323671971 +MEG_056,0.0469054188818,0.0763493534354,0.0584085710981,0.752331243475,-0.212397698952,-0.623606380317,-0.212397698952,0.817850328992,-0.534797210957,0.623606380317,0.534797210957,0.570181572467 +MEG_057,-0.0469054188818,0.0763493534354,0.0584085710981,0.752331243475,0.212397698952,0.623606380317,0.212397698952,0.817850328992,-0.534797210957,-0.623606380317,0.534797210957,0.570181572467 +MEG_058,0.0510265689593,0.0397149327466,0.0733216733784,0.75352606525,-0.102214380931,-0.649423351381,-0.102214380931,0.95761101603,-0.269320185484,0.649423351381,0.269320185484,0.71113708128 +MEG_059,-0.0502099589439,0.0397642087476,0.0740382073919,0.762632097113,0.100407167247,0.638991928915,0.100407167247,0.957527538003,-0.27029505125,-0.638991928915,0.27029505125,0.720159635116 +MEG_060,0.0472617808885,0.0,0.086648799629,0.818057480605,-0.0,-0.575136469394,-0.0,1.0,-0.0,0.575136469394,0.0,0.818057480605 +MEG_061,-0.0472625428885,0.0,0.086648799629,0.818061560022,0.0,0.575130666904,0.0,1.0,-0.0,-0.575130666904,0.0,0.818061560022 +MEG_062,0.0494786929302,-0.0415175707805,0.0702287153203,0.775981248219,0.187974664221,-0.602095198473,0.187974664221,0.842270014862,0.505219504448,0.602095198473,-0.505219504448,0.618251263081 +MEG_063,-0.0494786929302,-0.0415175707805,0.0702287153203,0.775981248219,-0.187974664221,0.602095198473,-0.187974664221,0.842270014862,0.505219504448,-0.602095198473,-0.505219504448,0.618251263081 +MEG_064,0.0491517949241,-0.0585767191012,0.049521618931,0.738156783089,0.312052080775,-0.598120441436,0.312052080775,0.628111423833,0.712811011513,0.598120441436,-0.712811011513,0.366268206923 +MEG_065,-0.0491517949241,-0.0585767191012,0.049521618931,0.738156783089,-0.312052080775,0.598120441436,-0.312052080775,0.628111423833,0.712811011513,-0.598120441436,-0.712811011513,0.366268206923 +MEG_066,0.0530616169976,-0.0637961651994,0.0058140601093,0.676804202704,0.381028180993,-0.629883796022,0.381028180993,0.550790957291,0.742594671847,0.629883796022,-0.742594671847,0.227595159994 +MEG_067,-0.0530616169976,-0.0637961651994,0.0058140601093,0.676804202704,-0.381028180993,0.629883796022,-0.381028180993,0.550790957291,0.742594671847,-0.629883796022,-0.742594671847,0.227595159994 +MEG_068,0.0618035351619,0.0731865453759,0.0370664746968,0.475173275464,-0.314728784866,-0.821678860785,-0.314728784866,0.811263025695,-0.492745466865,0.821678860785,0.492745466865,0.286436301159 +MEG_069,-0.0618035351619,0.0731865453759,0.0370664746968,0.475173275464,0.314728784866,0.821678860785,0.314728784866,0.811263025695,-0.492745466865,-0.821678860785,0.492745466865,0.286436301159 +MEG_070,0.0618035351619,0.0552533830388,0.0538868630131,0.552894017073,-0.138386914129,-0.821679540869,-0.138386914129,0.957166893906,-0.254323807789,0.821679540869,0.254323807789,0.510060910979 +MEG_071,-0.0618035351619,0.0552533830388,0.0538868630131,0.552894017073,0.138386914129,0.821679540869,0.138386914129,0.957166893906,-0.254323807789,-0.821679540869,0.254323807789,0.510060910979 +MEG_072,0.0606945711411,0.0220908884153,0.0702287153203,0.662907428432,-0.12269267874,-0.738579885939,-0.12269267874,0.955343146999,-0.268823321284,0.738579885939,0.268823321284,0.61825057543 +MEG_073,-0.0606945711411,0.0220908884153,0.0702287153203,0.662907428432,0.12269267874,0.738579885939,0.12269267874,0.955343146999,-0.268823321284,-0.738579885939,0.268823321284,0.61825057543 +MEG_074,0.0606945711411,-0.0220908884153,0.0702287153203,0.662907428432,0.12269267874,-0.738579885939,0.12269267874,0.955343146999,0.268823321284,0.738579885939,-0.268823321284,0.61825057543 +MEG_075,-0.0606945711411,-0.0220908884153,0.0702287153203,0.662907428432,-0.12269267874,0.738579885939,-0.12269267874,0.955343146999,0.268823321284,-0.738579885939,-0.268823321284,0.61825057543 +MEG_076,0.0627984531806,-0.0524535409861,0.0270123925078,0.587320034467,0.340056606259,-0.73444991773,0.340056606259,0.719786504995,0.605201529878,0.73444991773,-0.605201529878,0.307106539462 +MEG_077,-0.0628070891808,-0.0524433809859,0.0270118845078,0.587208379993,-0.340036516337,0.734548491268,-0.340036516337,0.719895397972,0.605083286446,-0.734548491268,-0.605083286446,0.307103777966 +MEG_078,0.0635271791943,-0.0549572190332,-0.0162102803048,0.539322307512,0.381717195782,-0.750615368258,0.381717195782,0.683709413476,0.621959339803,0.750615368258,-0.621959339803,0.223031720988 +MEG_079,-0.0637887991992,-0.0546455610273,-0.016222218305,0.539196876386,-0.381695169412,0.750716675014,-0.381695169412,0.683831999207,0.621838077403,-0.750716675014,-0.621838077403,0.223028875593 +MEG_080,0.064521335213,0.0742759513964,0.017446752328,0.263693428198,-0.434776489447,-0.861066304154,-0.434776489447,0.743271888347,-0.508444986421,0.861066304154,0.508444986421,0.00696531654525 +MEG_081,-0.064521335213,0.0742759513964,0.017446752328,0.263693428198,0.434776489447,0.861066304154,0.434776489447,0.743271888347,-0.508444986421,-0.861066304154,0.508444986421,0.00696531654525 +MEG_082,0.0713910193422,0.0590585571103,0.0386440687265,0.192087187177,-0.202359959395,-0.960287956478,-0.202359959395,0.949314390716,-0.240525745844,0.960287956478,0.240525745844,0.141401577893 +MEG_083,-0.0713910193422,0.0590585571103,0.0386440687265,0.192087187177,0.202359959395,0.960287956478,0.202359959395,0.949314390716,-0.240525745844,-0.960287956478,0.240525745844,0.141401577893 +MEG_084,0.0707725293305,0.0357588826723,0.0469491068826,0.412488973038,-0.15233685973,-0.89813491653,-0.15233685973,0.960500283795,-0.232879123147,0.89813491653,0.232879123147,0.372989256833 +MEG_085,-0.0707722753305,0.0357588826723,0.0469491068826,0.412487827635,0.152336666507,0.898135475354,0.152336666507,0.960500461005,-0.232878518647,-0.898135475354,0.232878518647,0.37298828864 +MEG_086,0.0645896612143,0.0,0.0702287153203,0.618250335472,-0.0,-0.785981248306,-0.0,1.0,-0.0,0.785981248306,0.0,0.618250335472 +MEG_087,-0.0645896612143,0.0,0.0702287153203,0.618250335472,0.0,0.785981248306,0.0,1.0,-0.0,-0.785981248306,0.0,0.618250335472 +MEG_088,0.066222119245,-0.0382333507188,0.049521618931,0.524701809918,0.274413611706,-0.80584438968,0.274413611706,0.841567184852,0.46525460029,0.80584438968,-0.46525460029,0.36626899477 +MEG_089,-0.066222119245,-0.0382333507188,0.049521618931,0.524701809918,-0.274413611706,0.80584438968,-0.274413611706,0.841567184852,0.46525460029,-0.80584438968,-0.46525460029,0.36626899477 +MEG_090,0.071489571344,-0.0418500567868,0.0049839880937,0.408585986848,0.335957722235,-0.848640029826,0.335957722235,0.809156380101,0.482077132224,0.848640029826,-0.482077132224,0.217742366948 +MEG_091,-0.071489571344,-0.0418500567868,0.0049839880937,0.408585986848,-0.335957722235,0.848640029826,-0.335957722235,0.809156380101,0.482077132224,-0.848640029826,-0.482077132224,0.217742366948 +MEG_092,0.0724712813625,-0.0440524908282,-0.0382501147191,0.445815918958,0.313476011591,-0.83843959625,0.313476011591,0.822681283702,0.474266059932,0.83843959625,-0.474266059932,0.26849720266 +MEG_093,-0.0724712813625,-0.0440524908282,-0.0382501147191,0.445815918958,-0.313476011591,0.83843959625,-0.313476011591,0.822681283702,0.474266059932,-0.83843959625,-0.474266059932,0.26849720266 +MEG_094,0.0725505293639,0.0550301170346,0.0208673703923,0.0578041209796,-0.192090470788,-0.979673381608,-0.192090470788,0.96083749697,-0.199731208002,0.979673381608,0.199731208002,0.0186416179491 +MEG_095,-0.0725505293639,0.0550301170346,0.0208673703923,0.0578041209796,0.192090470788,0.979673381608,0.192090470788,0.96083749697,-0.199731208002,-0.979673381608,0.199731208002,0.0186416179491 +MEG_096,0.0772988054532,0.031062168584,0.0281350725289,0.186190165142,-0.175001933135,-0.966802743999,-0.175001933135,0.962367527045,-0.20790157837,0.966802743999,0.20790157837,0.148557692187 +MEG_097,-0.0772988054532,0.031062168584,0.0281350725289,0.186190165142,0.175001933135,0.966802743999,0.175001933135,0.962367527045,-0.20790157837,-0.966802743999,0.20790157837,0.148557692187 +MEG_098,0.0753049054157,0.0132783582496,0.049521618931,0.385377976058,-0.108374224505,-0.91637265511,-0.108374224505,0.980890739219,-0.1615808936,0.91637265511,0.1615808936,0.366268715277 +MEG_099,-0.0753049054157,0.0132783582496,0.049521618931,0.385377976058,0.108374224505,0.91637265511,0.108374224505,0.980890739219,-0.1615808936,-0.91637265511,0.1615808936,0.366268715277 +MEG_100,0.0753049054157,-0.0132783582496,0.049521618931,0.385377976058,0.108374224505,-0.91637265511,0.108374224505,0.980890739219,0.1615808936,0.91637265511,-0.1615808936,0.366268715277 +MEG_101,-0.0753049054157,-0.0132783582496,0.049521618931,0.385377976058,-0.108374224505,0.91637265511,-0.108374224505,0.980890739219,0.1615808936,-0.91637265511,-0.1615808936,0.366268715277 +MEG_102,0.0770366774483,-0.0278145245229,0.0260804664903,0.373752636547,0.220368615692,-0.900969832953,0.220368615692,0.922455039947,0.31704001718,0.900969832953,-0.31704001718,0.296207676495 +MEG_103,-0.0770412494484,-0.0278018245227,0.0260799584903,0.373679152588,-0.220281297547,0.901021665041,-0.220281297547,0.92252557096,0.31689544155,-0.901021665041,-0.31689544155,0.296204723548 +MEG_104,0.0780183874667,-0.0300167045643,-0.0171536363225,0.300373897506,0.24880001314,-0.920800779299,0.24880001314,0.911522102566,0.327453828799,0.920800779299,-0.327453828799,0.211896000073 +MEG_105,-0.0781575794694,-0.0296346885571,-0.0171681143228,0.300287289279,-0.248701776907,0.920855564169,-0.248701776907,0.911602900892,0.327303494098,-0.920855564169,-0.327303494098,0.211890190171 +MEG_106,0.0705891413271,0.0621764071689,0.00492785409264,0.134758903688,-0.324701145067,-0.936167295022,-0.324701145067,0.878148606143,-0.351317793345,0.936167295022,0.351317793345,0.0129075098315 +MEG_107,-0.0705891413271,0.0621764071689,0.00492785409264,0.134758903688,0.324701145067,0.936167295022,0.324701145067,0.878148606143,-0.351317793345,-0.936167295022,0.351317793345,0.0129075098315 +MEG_108,0.0767740414434,0.0353695006649,0.00957046617992,0.0578041209796,-0.192090470788,-0.979673381608,-0.192090470788,0.96083749697,-0.199731208002,0.979673381608,0.199731208002,0.0186416179491 +MEG_109,-0.0767740414434,0.0353695006649,0.00957046617992,0.0578041209796,0.192090470788,0.979673381608,0.192090470788,0.96083749697,-0.199731208002,-0.979673381608,0.199731208002,0.0186416179491 +MEG_110,0.0812949875283,0.0137193022579,0.00288239205419,0.219230938619,-0.143668166804,-0.965037436268,-0.143668166804,0.973563831901,-0.177575119486,0.965037436268,0.177575119486,0.192794770521 +MEG_111,-0.0812949875283,0.0137193022579,0.00288239205419,0.219230938619,0.143668166804,0.965037436268,0.143668166804,0.973563831901,-0.177575119486,-0.965037436268,0.177575119486,0.192794770521 +MEG_112,0.0819833275413,0.000204978003854,0.0250207784704,0.283903999524,-0.00795851694118,-0.958819681203,-0.00795851694118,0.999911550977,-0.010656088948,0.958819681203,0.010656088948,0.283815550501 +MEG_113,-0.0819833275413,0.000218440004107,0.0250202704704,0.283900779742,0.00807295557139,0.958819677859,0.00807295557139,0.999908989411,-0.0108092683826,-0.958819677859,0.0108092683826,0.283809769153 +MEG_114,0.0812949875283,-0.0149293582807,0.00396595607456,0.227559595527,0.130073723873,-0.965037541675,0.130073723873,0.978096467321,0.162505775197,0.965037541675,-0.162505775197,0.205656062847 +MEG_115,-0.0812949875283,-0.0149293582807,0.00396595607456,0.227559595527,-0.130073723873,0.965037541675,-0.130073723873,0.978096467321,0.162505775197,-0.965037541675,-0.162505775197,0.205656062847 +MEG_116,0.0798715715016,0.0223085664194,-0.0132826762497,0.0578041209796,-0.192090470788,-0.979673381608,-0.192090470788,0.96083749697,-0.199731208002,0.979673381608,0.199731208002,0.0186416179491 +MEG_117,-0.0798715715016,0.0223085664194,-0.0132826762497,0.0578041209796,0.192090470788,0.979673381608,0.192090470788,0.96083749697,-0.199731208002,-0.979673381608,0.199731208002,0.0186416179491 +MEG_118,0.0830994035623,-0.0016276320306,-0.0182272943427,0.199274663362,-0.00609304526277,-0.979924733508,-0.00609304526277,0.999953635537,-0.00745664647062,0.979924733508,0.00745664647062,0.199228298899 +MEG_119,-0.0830994035623,-0.00122097802295,-0.018242788343,0.199270871862,0.00622296522868,0.979924688091,0.00622296522868,0.999951637458,-0.00761560563545,-0.979924688091,0.00761560563545,0.19922250932 +MEG_120,0.0824113175493,0.0122798842309,-0.0403806667592,0.134815219165,-0.156230210311,-0.978476866394,-0.156230210311,0.971788825746,-0.176687859065,0.978476866394,0.176687859065,0.106604044912 +MEG_121,-0.0824113175493,0.0122798842309,-0.0403806667592,0.134812452063,0.156230709979,0.978477167862,0.156230709979,0.971788735519,-0.176687913503,-0.978477167862,0.176687913503,0.106601187582 +MEG_122,0.0824113175493,-0.0167619683151,-0.0392821167385,0.144888827116,0.146932333372,-0.978477448481,0.146932333372,0.974752861061,0.168130155723,0.978477448481,-0.168130155723,0.119641688177 +MEG_123,-0.0824113175493,-0.0167619683151,-0.0392821167385,0.144888827116,-0.146932333372,0.978477448481,-0.146932333372,0.974752861061,0.168130155723,-0.978477448481,-0.168130155723,0.119641688177 +REF_001,-0.0574242204956,0.101251052386,0.128127713641,0.221198761686,0.896440347728,-0.384012774259,0.896440347728,-0.0318490232173,0.442018486814,0.384012774259,-0.442018486814,-0.810650261531 +REF_002,0.0490254159517,0.102373905889,0.131493075274,0.222503034056,-0.896198721013,0.383823204472,-0.896198721013,-0.0330228704748,0.442422131545,-0.383823204472,-0.442422131545,-0.810519836419 +REF_003,-0.069360787652,0.103594593082,0.130986921083,-0.347310972431,-0.17658747001,0.920973373049,-0.17658747001,0.976855280479,0.120708849866,-0.920973373049,-0.120708849866,-0.370455691952 +REF_004,0.0431879423759,0.106985366145,0.141533355103,0.383166262537,0.0763561288473,0.920517982899,0.0763561288473,0.990548087664,-0.113948355026,-0.920517982899,0.113948355026,0.3737143502 +REF_005,-0.0611082914288,0.0961811650462,0.138938483688,0.997012450802,-0.040298218601,0.0658955728709,-0.040298218601,0.456428559123,0.888847019455,-0.0658955728709,-0.888847019455,0.453441009925 +REF_006,0.0527763749922,0.0973005509413,0.142279189541,0.99632914816,0.0447419955488,-0.072982068763,0.0447419955488,0.454664406796,0.889538324653,0.072982068763,-0.889538324653,0.450993554956 +REF_007,-0.114314226149,0.0198287643728,0.0448114428425,0.149033474209,0.0868247934117,0.985012933323,0.0868247934117,0.991141197071,-0.100501655296,-0.985012933323,0.100501655296,0.14017467128 +REF_008,0.114314226149,0.0198287643728,0.0448114428425,0.149033474209,-0.0868247934117,-0.985012933323,-0.0868247934117,0.991141197071,-0.100501655296,0.985012933323,0.100501655296,0.14017467128 +REF_009,-0.109709206063,0.0555492930443,-0.00941197017695,0.0589562738411,0.187574011648,0.98047954998,0.187574011648,0.96261171626,-0.195434576966,-0.98047954998,0.195434576966,0.0215679901007 +REF_010,0.109709206063,0.0555492930443,-0.00941197017695,0.0589562738411,-0.187574011648,-0.98047954998,-0.187574011648,0.96261171626,-0.195434576966,0.98047954998,0.195434576966,0.0215679901007 +REF_011,-0.118027452219,-0.0227690684281,-0.0111257082092,0.157170103306,-0.0740177576494,0.984793851615,-0.0740177576494,0.993499722223,0.0864851056297,-0.984793851615,-0.0864851056297,0.150669825529 +REF_012,0.118027452219,-0.0227690684281,-0.0111257082092,0.157170103306,0.0740177576494,-0.984793851615,0.0740177576494,0.993499722223,0.0864851056297,0.984793851615,-0.0864851056297,0.150669825529 +REF_013,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_014,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_015,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_016,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_017,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_018,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_019,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_020,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_021,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 diff --git a/python/libs/mne/io/artemis123/tests/__init__.py b/python/libs/mne/io/artemis123/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/artemis123/tests/test_artemis123.py b/python/libs/mne/io/artemis123/tests/test_artemis123.py new file mode 100644 index 0000000..1148724 --- /dev/null +++ b/python/libs/mne/io/artemis123/tests/test_artemis123.py @@ -0,0 +1,115 @@ + +# Author: Luke Bloy +# +# License: BSD-3-Clause + +import os.path as op + +import numpy as np +from numpy.testing import assert_allclose, assert_equal +import pytest + +from mne.io import read_raw_artemis123 +from mne.io.tests.test_raw import _test_raw_reader +from mne.datasets import testing +from mne.io.artemis123.utils import _generate_mne_locs_file, _load_mne_locs +from mne import pick_types +from mne.transforms import rot_to_quat, _angle_between_quats +from mne.io.constants import FIFF + +artemis123_dir = op.join(testing.data_path(download=False), 'ARTEMIS123') + +short_HPI_dip_fname = op.join(artemis123_dir, + 'Artemis_Data_2017-04-04-15h-44m-' + + '22s_Motion_Translation-z.bin') + +dig_fname = op.join(artemis123_dir, 'Phantom_040417_dig.pos') + +short_hpi_1kz_fname = op.join(artemis123_dir, 'Artemis_Data_2017-04-14-10h' + + '-38m-59s_Phantom_1k_HPI_1s.bin') + + +# XXX this tol is way too high, but it's not clear which is correct +# (old or new) +def _assert_trans(actual, desired, dist_tol=0.017, angle_tol=5.): + __tracebackhide__ = True + trans_est = actual[0:3, 3] + quat_est = rot_to_quat(actual[0:3, 0:3]) + trans = desired[0:3, 3] + quat = rot_to_quat(desired[0:3, 0:3]) + + angle = np.rad2deg(_angle_between_quats(quat_est, quat)) + dist = np.linalg.norm(trans - trans_est) + assert dist <= dist_tol, \ + '%0.3f > %0.3f mm translation' % (1000 * dist, 1000 * dist_tol) + assert angle <= angle_tol, \ + '%0.3f > %0.3f° rotation' % (angle, angle_tol) + + +@pytest.mark.timeout(60) # ~25 sec on Travis Linux OpenBLAS +@testing.requires_testing_data +def test_artemis_reader(): + """Test reading raw Artemis123 files.""" + _test_raw_reader(read_raw_artemis123, input_fname=short_hpi_1kz_fname, + pos_fname=dig_fname, verbose='error') + + +@pytest.mark.timeout(60) +@testing.requires_testing_data +def test_dev_head_t(): + """Test dev_head_t computation for Artemis123.""" + # test a random selected point + raw = read_raw_artemis123(short_hpi_1kz_fname, preload=True, + add_head_trans=False) + meg_picks = pick_types(raw.info, meg=True, eeg=False) + + # checked against matlab reader. + assert_allclose(raw[meg_picks[12]][0][0][123], 1.08239606023e-11) + + dev_head_t_1 = np.array([[9.713e-01, 2.340e-01, -4.164e-02, 1.302e-04], + [-2.371e-01, 9.664e-01, -9.890e-02, 1.977e-03], + [1.710e-02, 1.059e-01, 9.942e-01, -8.159e-03], + [0.0, 0.0, 0.0, 1.0]]) + + dev_head_t_2 = np.array([[9.890e-01, 1.475e-01, -8.090e-03, 4.997e-04], + [-1.476e-01, 9.846e-01, -9.389e-02, 1.962e-03], + [-5.888e-03, 9.406e-02, 9.955e-01, -1.610e-02], + [0.0, 0.0, 0.0, 1.0]]) + + expected_dev_hpi_rr = np.array([[-0.01579644, 0.06527367, 0.00152648], + [0.06666813, 0.0148956, 0.00545488], + [-0.06699212, -0.01732376, 0.0112027]]) + # test with head loc no digitization + raw = read_raw_artemis123(short_HPI_dip_fname, add_head_trans=True) + _assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_1) + assert_equal(raw.info['sfreq'], 5000.0) + + # test with head loc and digitization + with pytest.warns(RuntimeWarning, match='Large difference'): + raw = read_raw_artemis123(short_HPI_dip_fname, add_head_trans=True, + pos_fname=dig_fname) + _assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_1) + + # test cHPI localization.. + dev_hpi_rr = np.array([p['r'] for p in raw.info['dig'] + if p['coord_frame'] == FIFF.FIFFV_COORD_DEVICE]) + # points should be within 0.1 mm (1e-4m) and within 1% + assert_allclose(dev_hpi_rr, expected_dev_hpi_rr, atol=1e-4, rtol=0.01) + + # test 1kz hpi head loc (different freq) + raw = read_raw_artemis123(short_hpi_1kz_fname, add_head_trans=True) + _assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_2) + assert_equal(raw.info['sfreq'], 1000.0) + + +def test_utils(tmp_path): + """Test artemis123 utils.""" + # make a tempfile + tmp_dir = str(tmp_path) + tmp_fname = op.join(tmp_dir, 'test_gen_mne_locs.csv') + _generate_mne_locs_file(tmp_fname) + installed_locs = _load_mne_locs() + generated_locs = _load_mne_locs(tmp_fname) + assert_equal(set(installed_locs.keys()), set(generated_locs.keys())) + for key in installed_locs.keys(): + assert_allclose(installed_locs[key], generated_locs[key], atol=1e-7) diff --git a/python/libs/mne/io/artemis123/utils.py b/python/libs/mne/io/artemis123/utils.py new file mode 100644 index 0000000..de7e98c --- /dev/null +++ b/python/libs/mne/io/artemis123/utils.py @@ -0,0 +1,119 @@ +import numpy as np +import os.path as op +from .._digitization import _artemis123_read_pos +from ...utils import logger +from ...transforms import rotation3d_align_z_axis + + +def _load_mne_locs(fname=None): + """Load MNE locs structure from file (if exists) or recreate it.""" + if (not fname): + # find input file + resource_dir = op.join(op.dirname(op.abspath(__file__)), 'resources') + fname = op.join(resource_dir, 'Artemis123_mneLoc.csv') + + if not op.exists(fname): + raise IOError('MNE locs file "%s" does not exist' % (fname)) + + logger.info('Loading mne loc file {}'.format(fname)) + locs = dict() + with open(fname, 'r') as fid: + for line in fid: + vals = line.strip().split(',') + locs[vals[0]] = np.array(vals[1::], np.float64) + + return locs + + +def _generate_mne_locs_file(output_fname): + """Generate mne coil locs and save to supplied file.""" + logger.info('Converting Tristan coil file to mne loc file...') + resource_dir = op.join(op.dirname(op.abspath(__file__)), 'resources') + chan_fname = op.join(resource_dir, 'Artemis123_ChannelMap.csv') + chans = _load_tristan_coil_locs(chan_fname) + + # compute a dict of loc structs + locs = {n: _compute_mne_loc(cinfo) for n, cinfo in chans.items()} + + # write it out to output_fname + with open(output_fname, 'w') as fid: + for n in sorted(locs.keys()): + fid.write('%s,' % n) + fid.write(','.join(locs[n].astype(str))) + fid.write('\n') + + +def _load_tristan_coil_locs(coil_loc_path): + """Load the Coil locations from Tristan CAD drawings.""" + channel_info = dict() + with open(coil_loc_path, 'r') as fid: + # skip 2 Header lines + fid.readline() + fid.readline() + for line in fid: + line = line.strip() + vals = line.split(',') + channel_info[vals[0]] = dict() + if vals[6]: + channel_info[vals[0]]['inner_coil'] = \ + np.array(vals[2:5], np.float64) + channel_info[vals[0]]['outer_coil'] = \ + np.array(vals[5:8], np.float64) + else: # nothing supplied + channel_info[vals[0]]['inner_coil'] = np.zeros(3) + channel_info[vals[0]]['outer_coil'] = np.zeros(3) + return channel_info + + +def _compute_mne_loc(coil_loc): + """Convert a set of coils to an mne Struct. + + Note input coil locations are in inches. + """ + loc = np.zeros((12)) + if (np.linalg.norm(coil_loc['inner_coil']) == 0) and \ + (np.linalg.norm(coil_loc['outer_coil']) == 0): + return loc + + # channel location is inner coil location converted to meters From inches + loc[0:3] = coil_loc['inner_coil'] / 39.370078 + + # figure out rotation + z_axis = coil_loc['outer_coil'] - coil_loc['inner_coil'] + R = rotation3d_align_z_axis(z_axis) + loc[3:13] = R.T.reshape(9) + return loc + + +def _read_pos(fname): + """Read the .pos file and return positions as dig points.""" + nas, lpa, rpa, hpi, extra = None, None, None, None, None + with open(fname, 'r') as fid: + for line in fid: + line = line.strip() + if len(line) > 0: + parts = line.split() + # The lines can have 4 or 5 parts. First part is for the id, + # which can be an int or a string. The last three are for xyz + # coordinates. The extra part is for additional info + # (e.g. 'Pz', 'Cz') which is ignored. + if len(parts) not in [4, 5]: + continue + + if parts[0].lower() == 'nasion': + nas = np.array([float(p) for p in parts[-3:]]) / 100. + elif parts[0].lower() == 'left': + lpa = np.array([float(p) for p in parts[-3:]]) / 100. + elif parts[0].lower() == 'right': + rpa = np.array([float(p) for p in parts[-3:]]) / 100. + elif 'hpi' in parts[0].lower(): + if hpi is None: + hpi = list() + hpi.append(np.array([float(p) for p in parts[-3:]]) / 100.) + else: + if extra is None: + extra = list() + extra.append(np.array([float(p) + for p in parts[-3:]]) / 100.) + + return _artemis123_read_pos(nas, lpa, rpa, hpi, extra) diff --git a/python/libs/mne/io/base.py b/python/libs/mne/io/base.py new file mode 100644 index 0000000..af86215 --- /dev/null +++ b/python/libs/mne/io/base.py @@ -0,0 +1,2586 @@ +# -*- coding: utf-8 -*- +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Martin Luessi +# Denis Engemann +# Teon Brooks +# Marijn van Vliet +# Stefan Appelhoff +# Clemens Brunner +# +# License: BSD-3-Clause + +from contextlib import nullcontext +from copy import deepcopy +from datetime import timedelta +import os +import os.path as op +import shutil +from collections import defaultdict + +import numpy as np + +from .constants import FIFF +from .utils import _construct_bids_filename, _check_orig_units +from .pick import (pick_types, pick_channels, pick_info, _picks_to_idx, + channel_type) +from .meas_info import write_meas_info, _ensure_infos_match, ContainsMixin +from .proj import setup_proj, activate_proj, _proj_equal, ProjMixin +from ..channels.channels import (UpdateChannelsMixin, SetChannelsMixin, + InterpolationMixin, _unit2human) +from .compensator import set_current_comp, make_compensator +from .write import (start_and_end_file, start_block, end_block, + write_dau_pack16, write_float, write_double, + write_complex64, write_complex128, write_int, + write_id, write_string, _get_split_size, _NEXT_FILE_BUFFER) + +from ..annotations import (_annotations_starts_stops, _write_annotations, + _handle_meas_date) +from ..filter import (FilterMixin, notch_filter, resample, _resamp_ratio_len, + _resample_stim_channels, _check_fun) +from ..parallel import parallel_func +from ..utils import (_check_fname, _check_pandas_installed, sizeof_fmt, + _check_pandas_index_arguments, fill_doc, copy_doc, + check_fname, _get_stim_channel, _stamp_to_dt, + logger, verbose, _time_mask, warn, SizeMixin, + copy_function_doc_to_method_doc, _validate_type, + _check_preload, _get_argvalues, _check_option, + _build_data_frame, _convert_times, _scale_dataframe_data, + _check_time_format, _arange_div, _VerboseDep) +from ..defaults import _handle_default +from ..viz import plot_raw, plot_raw_psd, plot_raw_psd_topo, _RAW_CLIP_DEF +from ..event import find_events, concatenate_events +from ..annotations import Annotations, _combine_annotations, _sync_onset + + +class TimeMixin(object): + """Class to add sfreq and time_as_index capabilities to certain classes.""" + + # Overridden method signature does not match call... + def time_as_index(self, times, use_rounding=False): # lgtm + """Convert time to indices. + + Parameters + ---------- + times : list-like | float | int + List of numbers or a number representing points in time. + use_rounding : bool + If True, use rounding (instead of truncation) when converting + times to indices. This can help avoid non-unique indices. + + Returns + ------- + index : ndarray + Indices corresponding to the times supplied. + """ + from ..source_estimate import _BaseSourceEstimate + if isinstance(self, _BaseSourceEstimate): + sfreq = 1. / self.tstep + else: + sfreq = self.info['sfreq'] + index = (np.atleast_1d(times) - self.times[0]) * sfreq + if use_rounding: + index = np.round(index) + return index.astype(int) + + def _handle_tmin_tmax(self, tmin, tmax): + """Convert seconds to index into data. + + Parameters + ---------- + tmin : int | float | None + Start time of data to get in seconds. + tmax : int | float | None + End time of data to get in seconds. + + Returns + ------- + start : int + Integer index into data corresponding to tmin. + stop : int + Integer index into data corresponding to tmax. + + """ + _validate_type(tmin, types=('numeric', None), item_name='tmin', + type_name="int, float, None") + _validate_type(tmax, types=('numeric', None), item_name='tmax', + type_name='int, float, None') + + # handle tmin/tmax as start and stop indices into data array + n_times = self.times.size + start = 0 if tmin is None else self.time_as_index(tmin)[0] + stop = n_times if tmax is None else self.time_as_index(tmax)[0] + + # truncate start/stop to the open interval [0, n_times] + start = min(max(0, start), n_times) + stop = min(max(0, stop), n_times) + + return start, stop + + +@fill_doc +class BaseRaw(ProjMixin, ContainsMixin, UpdateChannelsMixin, SetChannelsMixin, + InterpolationMixin, TimeMixin, SizeMixin, FilterMixin, + _VerboseDep): + """Base class for Raw data. + + Parameters + ---------- + %(info_not_none)s + preload : bool | str | ndarray + Preload data into memory for data manipulation and faster indexing. + If True, the data will be preloaded into memory (fast, requires + large amount of memory). If preload is a string, preload is the + file name of a memory-mapped file which is used to store the data + on the hard drive (slower, requires less memory). If preload is an + ndarray, the data are taken from that array. If False, data are not + read until save. + first_samps : iterable + Iterable of the first sample number from each raw file. For unsplit raw + files this should be a length-one list or tuple. + last_samps : iterable | None + Iterable of the last sample number from each raw file. For unsplit raw + files this should be a length-one list or tuple. If None, then preload + must be an ndarray. + filenames : tuple + Tuple of length one (for unsplit raw files) or length > 1 (for split + raw files). + raw_extras : list of dict + The data necessary for on-demand reads for the given reader format. + Should be the same length as ``filenames``. Will have the entry + ``raw_extras['orig_nchan']`` added to it for convenience. + orig_format : str + The data format of the original raw file (e.g., ``'double'``). + dtype : dtype | None + The dtype of the raw data. If preload is an ndarray, its dtype must + match what is passed here. + buffer_size_sec : float + The buffer size in seconds that should be written by default using + :meth:`mne.io.Raw.save`. + orig_units : dict | None + Dictionary mapping channel names to their units as specified in + the header file. Example: {'FC1': 'nV'}. + + .. versionadded:: 0.17 + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + This class is public to allow for stable type-checking in user + code (i.e., ``isinstance(my_raw_object, BaseRaw)``) but should not be used + as a constructor for `Raw` objects (use instead one of the subclass + constructors, or one of the ``mne.io.read_raw_*`` functions). + + Subclasses must provide the following methods: + + * _read_segment_file(self, data, idx, fi, start, stop, cals, mult) + (only needed for types that support on-demand disk reads) + """ + + @verbose + def __init__(self, info, preload=False, + first_samps=(0,), last_samps=None, + filenames=(None,), raw_extras=(None,), + orig_format='double', dtype=np.float64, + buffer_size_sec=1., orig_units=None, + *, verbose=None): # noqa: D102 + # wait until the end to preload data, but triage here + if isinstance(preload, np.ndarray): + # some functions (e.g., filtering) only work w/64-bit data + if preload.dtype not in (np.float64, np.complex128): + raise RuntimeError('datatype must be float64 or complex128, ' + 'not %s' % preload.dtype) + if preload.dtype != dtype: + raise ValueError('preload and dtype must match') + self._data = preload + self.preload = True + assert len(first_samps) == 1 + last_samps = [first_samps[0] + self._data.shape[1] - 1] + load_from_disk = False + else: + if last_samps is None: + raise ValueError('last_samps must be given unless preload is ' + 'an ndarray') + if not preload: + self.preload = False + load_from_disk = False + else: + load_from_disk = True + self._last_samps = np.array(last_samps) + self._first_samps = np.array(first_samps) + orig_ch_names = info['ch_names'] + with info._unlock(check_after=True): + # be permissive of old code + if isinstance(info['meas_date'], tuple): + info['meas_date'] = _stamp_to_dt(info['meas_date']) + self.info = info + self.buffer_size_sec = float(buffer_size_sec) + cals = np.empty(info['nchan']) + for k in range(info['nchan']): + cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal'] + bad = np.where(cals == 0)[0] + if len(bad) > 0: + raise ValueError('Bad cals for channels %s' + % {ii: self.ch_names[ii] for ii in bad}) + self._cals = cals + self._raw_extras = list(dict() if r is None else r for r in raw_extras) + for r in self._raw_extras: + r['orig_nchan'] = info['nchan'] + self._read_picks = [np.arange(info['nchan']) + for _ in range(len(raw_extras))] + # deal with compensation (only relevant for CTF data, either CTF + # reader or MNE-C converted CTF->FIF files) + self._read_comp_grade = self.compensation_grade # read property + if self._read_comp_grade is not None and len(info['comps']): + logger.info('Current compensation grade : %d' + % self._read_comp_grade) + self._comp = None + self._filenames = list(filenames) + self.orig_format = orig_format + # Sanity check and set original units, if provided by the reader: + + if orig_units: + if not isinstance(orig_units, dict): + raise ValueError('orig_units must be of type dict, but got ' + ' {}'.format(type(orig_units))) + + # original units need to be truncated to 15 chars or renamed + # to match MNE conventions (channel name unique and less than + # 15 characters). + orig_units = deepcopy(orig_units) + for old_ch, new_ch in zip(orig_ch_names, info['ch_names']): + if old_ch in orig_units: + this_unit = orig_units[old_ch] + del orig_units[old_ch] + orig_units[new_ch] = this_unit + + # STI 014 channel is native only to fif ... for all other formats + # this was artificially added by the IO procedure, so remove it + ch_names = list(info['ch_names']) + if ('STI 014' in ch_names) and not \ + (self.filenames[0].endswith('.fif')): + ch_names.remove('STI 014') + + # Each channel in the data must have a corresponding channel in + # the original units. + ch_correspond = [ch in orig_units for ch in ch_names] + if not all(ch_correspond): + ch_without_orig_unit = ch_names[ch_correspond.index(False)] + raise ValueError('Channel {} has no associated original ' + 'unit.'.format(ch_without_orig_unit)) + + # Final check of orig_units, editing a unit if it is not a valid + # unit + orig_units = _check_orig_units(orig_units) + self._orig_units = orig_units + self._projectors = list() + self._projector = None + self._dtype_ = dtype + self.set_annotations(None) + self._cropped_samp = first_samps[0] + # If we have True or a string, actually do the preloading + if load_from_disk: + self._preload_data(preload) + self._init_kwargs = _get_argvalues() + + @verbose + def apply_gradient_compensation(self, grade, verbose=None): + """Apply CTF gradient compensation. + + .. warning:: The compensation matrices are stored with single + precision, so repeatedly switching between different + of compensation (e.g., 0->1->3->2) can increase + numerical noise, especially if data are saved to + disk in between changing grades. It is thus best to + only use a single gradient compensation level in + final analyses. + + Parameters + ---------- + grade : int + CTF gradient compensation level. + %(verbose)s + + Returns + ------- + raw : instance of Raw + The modified Raw instance. Works in-place. + """ + grade = int(grade) + current_comp = self.compensation_grade + if current_comp != grade: + if self.proj: + raise RuntimeError('Cannot change compensation on data where ' + 'projectors have been applied') + # Figure out what operator to use (varies depending on preload) + from_comp = current_comp if self.preload else self._read_comp_grade + comp = make_compensator(self.info, from_comp, grade) + logger.info('Compensator constructed to change %d -> %d' + % (current_comp, grade)) + set_current_comp(self.info, grade) + # We might need to apply it to our data now + if self.preload: + logger.info('Applying compensator to loaded data') + lims = np.concatenate([np.arange(0, len(self.times), 10000), + [len(self.times)]]) + for start, stop in zip(lims[:-1], lims[1:]): + self._data[:, start:stop] = np.dot( + comp, self._data[:, start:stop]) + else: + self._comp = comp # store it for later use + return self + + @property + def _dtype(self): + """Datatype for loading data (property so subclasses can override).""" + # most classes only store real data, they won't need anything special + return self._dtype_ + + @verbose + def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None, + projector=None, verbose=None): + """Read a chunk of raw data. + + Parameters + ---------- + start : int, (optional) + first sample to include (first is 0). If omitted, defaults to the + first sample in data. + stop : int, (optional) + First sample to not include. + If omitted, data is included to the end. + sel : array, optional + Indices of channels to select. + data_buffer : array or str, optional + numpy array to fill with data read, must have the correct shape. + If str, a np.memmap with the correct data type will be used + to store the data. + projector : array + SSP operator to apply to the data. + %(verbose)s + + Returns + ------- + data : array, [channels x samples] + the data matrix (channels x samples). + """ + # Initial checks + start = int(start) + stop = self.n_times if stop is None else min([int(stop), self.n_times]) + + if start >= stop: + raise ValueError('No data in this range') + + # Initialize the data and calibration vector + if sel is None: + n_out = self.info['nchan'] + idx = slice(None) + else: + n_out = len(sel) + idx = _convert_slice(sel) + del sel + assert n_out <= self.info['nchan'] + data_shape = (n_out, stop - start) + dtype = self._dtype + if isinstance(data_buffer, np.ndarray): + if data_buffer.shape != data_shape: + raise ValueError('data_buffer has incorrect shape: %s != %s' + % (data_buffer.shape, data_shape)) + data = data_buffer + else: + data = _allocate_data(data_buffer, data_shape, dtype) + + # deal with having multiple files accessed by the raw object + cumul_lens = np.concatenate(([0], np.array(self._raw_lengths, + dtype='int'))) + cumul_lens = np.cumsum(cumul_lens) + files_used = np.logical_and(np.less(start, cumul_lens[1:]), + np.greater_equal(stop - 1, + cumul_lens[:-1])) + + # set up cals and mult (cals, compensation, and projector) + n_out = len(np.arange(len(self.ch_names))[idx]) + cals = self._cals.ravel()[np.newaxis, :] + if projector is not None: + assert projector.shape[0] == projector.shape[1] == cals.shape[1] + if self._comp is not None: + if projector is not None: + mult = self._comp * cals + mult = np.dot(projector[idx], mult) + else: + mult = self._comp[idx] * cals + elif projector is not None: + mult = projector[idx] * cals + else: + mult = None + del projector + + if mult is None: + cals = cals.T[idx] + assert cals.shape == (n_out, 1) + need_idx = idx # sufficient just to read the given channels + else: + cals = None # shouldn't be used + assert mult.shape == (n_out, len(self.ch_names)) + # read all necessary for proj + need_idx = np.where(np.any(mult, axis=0))[0] + mult = mult[:, need_idx] + logger.debug( + f'Reading {len(need_idx)}/{len(self.ch_names)} channels ' + f'due to projection') + assert (mult is None) ^ (cals is None) # xor + + # read from necessary files + offset = 0 + for fi in np.nonzero(files_used)[0]: + start_file = self._first_samps[fi] + # first iteration (only) could start in the middle somewhere + if offset == 0: + start_file += start - cumul_lens[fi] + stop_file = np.min([stop - cumul_lens[fi] + self._first_samps[fi], + self._last_samps[fi] + 1]) + if start_file < self._first_samps[fi] or stop_file < start_file: + raise ValueError('Bad array indexing, could be a bug') + n_read = stop_file - start_file + this_sl = slice(offset, offset + n_read) + # reindex back to original file + orig_idx = _convert_slice(self._read_picks[fi][need_idx]) + _ReadSegmentFileProtector(self)._read_segment_file( + data[:, this_sl], orig_idx, fi, + int(start_file), int(stop_file), cals, mult) + offset += n_read + return data + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file. + + Only needs to be implemented for readers that support + ``preload=False``. Any implementation should only make use of: + + - self._raw_extras[fi] + - self._filenames[fi] + + So be sure to store any information necessary for reading raw data + in self._raw_extras[fi]. Things like ``info`` can be decoupled + from the original data (e.g., different subsets of channels) due + to picking before preload, for example. + + Parameters + ---------- + data : ndarray, shape (n_out, stop - start + 1) + The data array. Should be modified inplace. + idx : ndarray | slice + The requested channel indices. + fi : int + The file index that must be read from. + start : int + The start sample in the given file. + stop : int + The stop sample in the given file (inclusive). + cals : ndarray, shape (len(idx), 1) + Channel calibrations (already sub-indexed). + mult : ndarray, shape (n_out, len(idx) | None + The compensation + projection + cals matrix, if applicable. + """ + raise NotImplementedError + + def _check_bad_segment(self, start, stop, picks, + reject_start, reject_stop, + reject_by_annotation=False): + """Check if data segment is bad. + + If the slice is good, returns the data in desired range. + If rejected based on annotation, returns description of the + bad segment as a string. + + Parameters + ---------- + start : int + First sample of the slice. + stop : int + End of the slice. + picks : array of int + Channel picks. + reject_start : int + First sample to check for overlaps with bad annotations. + reject_stop : int + Last sample to check for overlaps with bad annotations. + reject_by_annotation : bool + Whether to perform rejection based on annotations. + False by default. + + Returns + ------- + data : array | str + Data in the desired range (good segment) or description of the bad + segment. + """ + if start < 0: + return None + if reject_by_annotation and len(self.annotations) > 0: + annot = self.annotations + sfreq = self.info['sfreq'] + onset = _sync_onset(self, annot.onset) + overlaps = np.where(onset < reject_stop / sfreq) + overlaps = np.where(onset[overlaps] + annot.duration[overlaps] > + reject_start / sfreq) + for descr in annot.description[overlaps]: + if descr.lower().startswith('bad'): + return descr + return self._getitem((picks, slice(start, stop)), return_times=False) + + @verbose + def load_data(self, verbose=None): + """Load raw data. + + Parameters + ---------- + %(verbose)s + + Returns + ------- + raw : instance of Raw + The raw object with data. + + Notes + ----- + This function will load raw data if it was not already preloaded. + If data were already preloaded, it will do nothing. + + .. versionadded:: 0.10.0 + """ + if not self.preload: + self._preload_data(True) + return self + + def _preload_data(self, preload): + """Actually preload the data.""" + data_buffer = preload + if isinstance(preload, (bool, np.bool_)) and not preload: + data_buffer = None + logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' % + (0, len(self.times) - 1, 0., self.times[-1])) + self._data = self._read_segment( + data_buffer=data_buffer, projector=self._projector) + assert len(self._data) == self.info['nchan'] + self.preload = True + self._comp = None # no longer needed + self.close() + + @property + def _first_time(self): + return self.first_samp / float(self.info['sfreq']) + + @property + def first_samp(self): + """The first data sample. + + See :term:`first_samp`. + """ + return self._cropped_samp + + @property + def first_time(self): + """The first time point (including first_samp but not meas_date).""" + return self._first_time + + @property + def last_samp(self): + """The last data sample.""" + return self.first_samp + sum(self._raw_lengths) - 1 + + @property + def _last_time(self): + return self.last_samp / float(self.info['sfreq']) + + # "Overridden method signature does not match call..." in LGTM + def time_as_index(self, times, use_rounding=False, origin=None): # lgtm + """Convert time to indices. + + Parameters + ---------- + times : list-like | float | int + List of numbers or a number representing points in time. + use_rounding : bool + If True, use rounding (instead of truncation) when converting + times to indices. This can help avoid non-unique indices. + origin : datetime | float | int | None + Time reference for times. If None, ``times`` are assumed to be + relative to :term:`first_samp`. + + .. versionadded:: 0.17.0 + + Returns + ------- + index : ndarray + Indices relative to :term:`first_samp` corresponding to the times + supplied. + """ + origin = _handle_meas_date(origin) + if origin is None: + delta = 0 + elif self.info['meas_date'] is None: + raise ValueError('origin must be None when info["meas_date"] ' + 'is None, got %s' % (origin,)) + else: + first_samp_in_abs_time = (self.info['meas_date'] + + timedelta(0, self._first_time)) + delta = (origin - first_samp_in_abs_time).total_seconds() + times = np.atleast_1d(times) + delta + + return super(BaseRaw, self).time_as_index(times, use_rounding) + + @property + def _raw_lengths(self): + return [l - f + 1 for f, l in zip(self._first_samps, self._last_samps)] + + @property + def annotations(self): # noqa: D401 + """:class:`~mne.Annotations` for marking segments of data.""" + return self._annotations + + @property + def filenames(self): + """The filenames used.""" + return tuple(self._filenames) + + @verbose + def set_annotations(self, annotations, emit_warning=True, + on_missing='raise', *, verbose=None): + """Setter for annotations. + + This setter checks if they are inside the data range. + + Parameters + ---------- + annotations : instance of mne.Annotations | None + Annotations to set. If None, the annotations is defined + but empty. + emit_warning : bool + Whether to emit warnings when cropping or omitting annotations. + %(on_missing_ch_names)s + %(verbose)s + + Returns + ------- + self : instance of Raw + The raw object with annotations. + """ + meas_date = _handle_meas_date(self.info['meas_date']) + if annotations is None: + self._annotations = Annotations([], [], [], meas_date) + else: + _validate_type(annotations, Annotations, 'annotations') + + if meas_date is None and annotations.orig_time is not None: + raise RuntimeError('Ambiguous operation. Setting an Annotation' + ' object with known ``orig_time`` to a raw' + ' object which has ``meas_date`` set to' + ' None is ambiguous. Please, either set a' + ' meaningful ``meas_date`` to the raw' + ' object; or set ``orig_time`` to None in' + ' which case the annotation onsets would be' + ' taken in reference to the first sample of' + ' the raw object.') + + delta = 1. / self.info['sfreq'] + new_annotations = annotations.copy() + new_annotations._prune_ch_names(self.info, on_missing) + if annotations.orig_time is None: + new_annotations.crop(0, self.times[-1] + delta, + emit_warning=emit_warning) + new_annotations.onset += self._first_time + else: + tmin = meas_date + timedelta(0, self._first_time) + tmax = tmin + timedelta(seconds=self.times[-1] + delta) + new_annotations.crop(tmin=tmin, tmax=tmax, + emit_warning=emit_warning) + new_annotations.onset -= ( + meas_date - new_annotations.orig_time).total_seconds() + new_annotations._orig_time = meas_date + + self._annotations = new_annotations + + return self + + def __del__(self): # noqa: D105 + # remove file for memmap + if hasattr(self, '_data') and \ + getattr(self._data, 'filename', None) is not None: + # First, close the file out; happens automatically on del + filename = self._data.filename + del self._data + # Now file can be removed + try: + os.remove(filename) + except OSError: + pass # ignore file that no longer exists + + def __enter__(self): + """Entering with block.""" + return self + + def __exit__(self, exception_type, exception_val, trace): + """Exit with block.""" + try: + self.close() + except Exception: + return exception_type, exception_val, trace + + def _parse_get_set_params(self, item): + """Parse the __getitem__ / __setitem__ tuples.""" + # make sure item is a tuple + if not isinstance(item, tuple): # only channel selection passed + item = (item, slice(None, None, None)) + + if len(item) != 2: # should be channels and time instants + raise RuntimeError("Unable to access raw data (need both channels " + "and time)") + + sel = _picks_to_idx(self.info, item[0]) + + if isinstance(item[1], slice): + time_slice = item[1] + start, stop, step = (time_slice.start, time_slice.stop, + time_slice.step) + else: + item1 = item[1] + # Let's do automated type conversion to integer here + if np.array(item[1]).dtype.kind == 'i': + item1 = int(item1) + if isinstance(item1, (int, np.integer)): + start, stop, step = item1, item1 + 1, 1 + else: + raise ValueError('Must pass int or slice to __getitem__') + + if start is None: + start = 0 + if step is not None and step != 1: + raise ValueError('step needs to be 1 : %d given' % step) + + if isinstance(sel, (int, np.integer)): + sel = np.array([sel]) + + if sel is not None and len(sel) == 0: + raise ValueError("Empty channel list") + + return sel, start, stop + + def __getitem__(self, item): + """Get raw data and times. + + Parameters + ---------- + item : tuple or array-like + See below for use cases. + + Returns + ------- + data : ndarray, shape (n_channels, n_times) + The raw data. + times : ndarray, shape (n_times,) + The times associated with the data. + + Examples + -------- + Generally raw data is accessed as:: + + >>> data, times = raw[picks, time_slice] # doctest: +SKIP + + To get all data, you can thus do either of:: + + >>> data, times = raw[:] # doctest: +SKIP + + Which will be equivalent to: + + >>> data, times = raw[:, :] # doctest: +SKIP + + To get only the good MEG data from 10-20 seconds, you could do:: + + >>> picks = mne.pick_types(raw.info, meg=True, exclude='bads') # doctest: +SKIP + >>> t_idx = raw.time_as_index([10., 20.]) # doctest: +SKIP + >>> data, times = raw[picks, t_idx[0]:t_idx[1]] # doctest: +SKIP + + """ # noqa: E501 + return self._getitem(item) + + def _getitem(self, item, return_times=True): + sel, start, stop = self._parse_get_set_params(item) + if self.preload: + data = self._data[sel, start:stop] + else: + data = self._read_segment(start=start, stop=stop, sel=sel, + projector=self._projector) + + if return_times: + # Rather than compute the entire thing just compute the subset + # times = self.times[start:stop] + # stop can be None here so don't use it directly + times = np.arange(start, start + data.shape[1], dtype=float) + times /= self.info['sfreq'] + return data, times + else: + return data + + def __setitem__(self, item, value): + """Set raw data content.""" + _check_preload(self, 'Modifying data of Raw') + sel, start, stop = self._parse_get_set_params(item) + # set the data + self._data[sel, start:stop] = value + + @verbose + def get_data(self, picks=None, start=0, stop=None, + reject_by_annotation=None, return_times=False, units=None, + *, tmin=None, tmax=None, verbose=None): + """Get data in the given range. + + Parameters + ---------- + %(picks_all)s + start : int + The first sample to include. Defaults to 0. + stop : int | None + End sample (first not to include). If None (default), the end of + the data is used. + reject_by_annotation : None | 'omit' | 'NaN' + Whether to reject by annotation. If None (default), no rejection is + done. If 'omit', segments annotated with description starting with + 'bad' are omitted. If 'NaN', the bad samples are filled with NaNs. + return_times : bool + Whether to return times as well. Defaults to False. + %(units)s + tmin : int | float | None + Start time of data to get in seconds. The ``tmin`` parameter is + ignored if the ``start`` parameter is bigger than 0. + + .. versionadded:: 0.24.0 + tmax : int | float | None + End time of data to get in seconds. The ``tmax`` parameter is + ignored if the ``stop`` parameter is defined. + + .. versionadded:: 0.24.0 + %(verbose)s + + Returns + ------- + data : ndarray, shape (n_channels, n_times) + Copy of the data in the given range. + times : ndarray, shape (n_times,) + Times associated with the data samples. Only returned if + return_times=True. + + Notes + ----- + .. versionadded:: 0.14.0 + """ + # validate types + _validate_type(start, types=('int-like'), item_name='start', + type_name='int') + _validate_type(stop, types=('int-like', None), item_name='stop', + type_name='int, None') + + picks = _picks_to_idx(self.info, picks, 'all', exclude=()) + + # Get channel factors for conversion into specified unit + # (vector of ones if no conversion needed) + if units is not None: + ch_factors = _get_ch_factors(self, units, picks) + + # convert to ints + picks = np.atleast_1d(np.arange(self.info['nchan'])[picks]) + + # handle start/tmin stop/tmax + tmin_start, tmax_stop = self._handle_tmin_tmax(tmin, tmax) + + # tmin/tmax are ignored if start/stop are defined to + # something other than their defaults + start = tmin_start if start == 0 else start + stop = tmax_stop if stop is None else stop + + # truncate start/stop to the open interval [0, n_times] + start = min(max(0, start), self.n_times) + stop = min(max(0, stop), self.n_times) + + if len(self.annotations) == 0 or reject_by_annotation is None: + getitem = self._getitem( + (picks, slice(start, stop)), return_times=return_times) + if return_times: + data, times = getitem + if units is not None: + data *= ch_factors[:, np.newaxis] + return data, times + if units is not None: + getitem *= ch_factors[:, np.newaxis] + return getitem + _check_option('reject_by_annotation', reject_by_annotation.lower(), + ['omit', 'nan']) + onsets, ends = _annotations_starts_stops(self, ['BAD']) + keep = (onsets < stop) & (ends > start) + onsets = np.maximum(onsets[keep], start) + ends = np.minimum(ends[keep], stop) + if len(onsets) == 0: + data, times = self[picks, start:stop] + if units is not None: + data *= ch_factors[:, np.newaxis] + if return_times: + return data, times + return data + n_samples = stop - start # total number of samples + used = np.ones(n_samples, bool) + for onset, end in zip(onsets, ends): + if onset >= end: + continue + used[onset - start: end - start] = False + used = np.concatenate([[False], used, [False]]) + starts = np.where(~used[:-1] & used[1:])[0] + start + stops = np.where(used[:-1] & ~used[1:])[0] + start + n_kept = (stops - starts).sum() # kept samples + n_rejected = n_samples - n_kept # rejected samples + if n_rejected > 0: + if reject_by_annotation == 'omit': + msg = ("Omitting {} of {} ({:.2%}) samples, retaining {}" + " ({:.2%}) samples.") + logger.info(msg.format(n_rejected, n_samples, + n_rejected / n_samples, + n_kept, n_kept / n_samples)) + data = np.zeros((len(picks), n_kept)) + times = np.zeros(data.shape[1]) + idx = 0 + for start, stop in zip(starts, stops): # get the data + if start == stop: + continue + end = idx + stop - start + data[:, idx:end], times[idx:end] = self[picks, start:stop] + idx = end + else: + msg = ("Setting {} of {} ({:.2%}) samples to NaN, retaining {}" + " ({:.2%}) samples.") + logger.info(msg.format(n_rejected, n_samples, + n_rejected / n_samples, + n_kept, n_kept / n_samples)) + data, times = self[picks, start:stop] + data[:, ~used[1:-1]] = np.nan + else: + data, times = self[picks, start:stop] + + if units is not None: + data *= ch_factors[:, np.newaxis] + if return_times: + return data, times + return data + + @verbose + def apply_function(self, fun, picks=None, dtype=None, n_jobs=1, + channel_wise=True, verbose=None, **kwargs): + """Apply a function to a subset of channels. + + %(applyfun_summary_raw)s + + Parameters + ---------- + %(fun_applyfun)s + %(picks_all_data_noref)s + %(dtype_applyfun)s + %(n_jobs)s + %(channel_wise_applyfun)s + + .. versionadded:: 0.18 + %(verbose)s + %(kwargs_fun)s + + Returns + ------- + self : instance of Raw + The raw object with transformed data. + """ + _check_preload(self, 'raw.apply_function') + picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) + + if not callable(fun): + raise ValueError('fun needs to be a function') + + data_in = self._data + if dtype is not None and dtype != self._data.dtype: + self._data = self._data.astype(dtype) + + if channel_wise: + if n_jobs == 1: + # modify data inplace to save memory + for idx in picks: + self._data[idx, :] = _check_fun(fun, data_in[idx, :], + **kwargs) + else: + # use parallel function + parallel, p_fun, _ = parallel_func(_check_fun, n_jobs) + data_picks_new = parallel( + p_fun(fun, data_in[p], **kwargs) for p in picks) + for pp, p in enumerate(picks): + self._data[p, :] = data_picks_new[pp] + else: + self._data[picks, :] = _check_fun( + fun, data_in[picks, :], **kwargs) + + return self + + # Need a separate method because the default pad is different for raw + @copy_doc(FilterMixin.filter) + def filter(self, l_freq, h_freq, picks=None, filter_length='auto', + l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=1, + method='fir', iir_params=None, phase='zero', + fir_window='hamming', fir_design='firwin', + skip_by_annotation=('edge', 'bad_acq_skip'), + pad='reflect_limited', verbose=None): # noqa: D102 + return super().filter( + l_freq, h_freq, picks, filter_length, l_trans_bandwidth, + h_trans_bandwidth, n_jobs, method, iir_params, phase, + fir_window, fir_design, skip_by_annotation, pad, verbose) + + @verbose + def notch_filter(self, freqs, picks=None, filter_length='auto', + notch_widths=None, trans_bandwidth=1.0, n_jobs=1, + method='fir', iir_params=None, mt_bandwidth=None, + p_value=0.05, phase='zero', fir_window='hamming', + fir_design='firwin', pad='reflect_limited', verbose=None): + """Notch filter a subset of channels. + + Parameters + ---------- + freqs : float | array of float | None + Specific frequencies to filter out from data, e.g., + ``np.arange(60, 241, 60)`` in the US or ``np.arange(50, 251, 50)`` + in Europe. ``None`` can only be used with the mode + ``'spectrum_fit'``, where an F test is used to find sinusoidal + components. + %(picks_all_data)s + %(filter_length_notch)s + notch_widths : float | array of float | None + Width of each stop band (centred at each freq in freqs) in Hz. + If None, ``freqs / 200`` is used. + trans_bandwidth : float + Width of the transition band in Hz. + Only used for ``method='fir'``. + %(n_jobs_fir)s + %(method_fir)s + %(iir_params)s + mt_bandwidth : float | None + The bandwidth of the multitaper windowing function in Hz. + Only used in 'spectrum_fit' mode. + p_value : float + P-value to use in F-test thresholding to determine significant + sinusoidal components to remove when ``method='spectrum_fit'`` and + ``freqs=None``. Note that this will be Bonferroni corrected for the + number of frequencies, so large p-values may be justified. + %(phase)s + %(fir_window)s + %(fir_design)s + %(pad_fir)s + The default is ``'reflect_limited'``. + + .. versionadded:: 0.15 + %(verbose)s + + Returns + ------- + raw : instance of Raw + The raw instance with filtered data. + + See Also + -------- + mne.filter.notch_filter + mne.io.Raw.filter + + Notes + ----- + Applies a zero-phase notch filter to the channels selected by + "picks". By default the data of the Raw object is modified inplace. + + The Raw object has to have the data loaded e.g. with ``preload=True`` + or ``self.load_data()``. + + .. note:: If n_jobs > 1, more memory is required as + ``len(picks) * n_times`` additional time points need to + be temporaily stored in memory. + + For details, see :func:`mne.filter.notch_filter`. + """ + fs = float(self.info['sfreq']) + picks = _picks_to_idx(self.info, picks, exclude=(), none='data_or_ica') + _check_preload(self, 'raw.notch_filter') + self._data = notch_filter( + self._data, fs, freqs, filter_length=filter_length, + notch_widths=notch_widths, trans_bandwidth=trans_bandwidth, + method=method, iir_params=iir_params, mt_bandwidth=mt_bandwidth, + p_value=p_value, picks=picks, n_jobs=n_jobs, copy=False, + phase=phase, fir_window=fir_window, fir_design=fir_design, + pad=pad) + return self + + @verbose + def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, + n_jobs=1, events=None, pad='reflect_limited', + verbose=None): # lgtm + """Resample all channels. + + If appropriate, an anti-aliasing filter is applied before resampling. + See :ref:`resampling-and-decimating` for more information. + + .. warning:: The intended purpose of this function is primarily to + speed up computations (e.g., projection calculation) when + precise timing of events is not required, as downsampling + raw data effectively jitters trigger timings. It is + generally recommended not to epoch downsampled data, + but instead epoch and then downsample, as epoching + downsampled data jitters triggers. + For more, see + `this illustrative gist + `_. + + If resampling the continuous data is desired, it is + recommended to construct events using the original data. + The event onsets can be jointly resampled with the raw + data using the 'events' parameter (a resampled copy is + returned). + + Parameters + ---------- + sfreq : float + New sample rate to use. + %(npad)s + %(window_resample)s + stim_picks : list of int | None + Stim channels. These channels are simply subsampled or + supersampled (without applying any filtering). This reduces + resampling artifacts in stim channels, but may lead to missing + triggers. If None, stim channels are automatically chosen using + :func:`mne.pick_types`. + %(n_jobs_cuda)s + events : 2D array, shape (n_events, 3) | None + An optional event matrix. When specified, the onsets of the events + are resampled jointly with the data. NB: The input events are not + modified, but a new array is returned with the raw instead. + %(pad)s + The default is ``'reflect_limited'``. + + .. versionadded:: 0.15 + %(verbose)s + + Returns + ------- + raw : instance of Raw + The resampled version of the raw object. + events : array, shape (n_events, 3) | None + If events are jointly resampled, these are returned with the raw. + + See Also + -------- + mne.io.Raw.filter + mne.Epochs.resample + + Notes + ----- + For some data, it may be more accurate to use ``npad=0`` to reduce + artifacts. This is dataset dependent -- check your data! + + For optimum performance and to make use of ``n_jobs > 1``, the raw + object has to have the data loaded e.g. with ``preload=True`` or + ``self.load_data()``, but this increases memory requirements. The + resulting raw object will have the data loaded into memory. + """ + # When no event object is supplied, some basic detection of dropped + # events is performed to generate a warning. Finding events can fail + # for a variety of reasons, e.g. if no stim channel is present or it is + # corrupted. This should not stop the resampling from working. The + # warning should simply not be generated in this case. + if events is None: + try: + original_events = find_events(self) + except Exception: + pass + + sfreq = float(sfreq) + o_sfreq = float(self.info['sfreq']) + + offsets = np.concatenate(([0], np.cumsum(self._raw_lengths))) + + # set up stim channel processing + if stim_picks is None: + stim_picks = pick_types(self.info, meg=False, ref_meg=False, + stim=True, exclude=[]) + else: + stim_picks = _picks_to_idx(self.info, stim_picks, exclude=(), + with_ref_meg=False) + + kwargs = dict(up=sfreq, down=o_sfreq, npad=npad, window=window, + n_jobs=n_jobs, pad=pad) + ratio, n_news = zip(*(_resamp_ratio_len(sfreq, o_sfreq, old_len) + for old_len in self._raw_lengths)) + ratio, n_news = ratio[0], np.array(n_news, int) + new_offsets = np.cumsum([0] + list(n_news)) + if self.preload: + new_data = np.empty( + (len(self.ch_names), new_offsets[-1]), self._data.dtype) + for ri, (n_orig, n_new) in enumerate(zip(self._raw_lengths, n_news)): + this_sl = slice(new_offsets[ri], new_offsets[ri + 1]) + if self.preload: + data_chunk = self._data[:, offsets[ri]:offsets[ri + 1]] + new_data[:, this_sl] = resample(data_chunk, **kwargs) + # In empirical testing, it was faster to resample all channels + # (above) and then replace the stim channels than it was to + # only resample the proper subset of channels and then use + # np.insert() to restore the stims. + if len(stim_picks) > 0: + new_data[stim_picks, this_sl] = _resample_stim_channels( + data_chunk[stim_picks], n_new, data_chunk.shape[1]) + else: # this will not be I/O efficient, but will be mem efficient + for ci in range(len(self.ch_names)): + data_chunk = self.get_data( + ci, offsets[ri], offsets[ri + 1], verbose='error')[0] + if ci == 0 and ri == 0: + new_data = np.empty( + (len(self.ch_names), new_offsets[-1]), + data_chunk.dtype) + if ci in stim_picks: + resamp = _resample_stim_channels( + data_chunk, n_new, data_chunk.shape[-1])[0] + else: + resamp = resample(data_chunk, **kwargs) + new_data[ci, this_sl] = resamp + + self._cropped_samp = int(np.round(self._cropped_samp * ratio)) + self._first_samps = np.round(self._first_samps * ratio).astype(int) + self._last_samps = (np.array(self._first_samps) + n_news - 1) + self._raw_lengths[ri] = list(n_news) + assert np.array_equal(n_news, self._last_samps - self._first_samps + 1) + self._data = new_data + self.preload = True + lowpass = self.info.get('lowpass') + lowpass = np.inf if lowpass is None else lowpass + with self.info._unlock(): + self.info['lowpass'] = min(lowpass, sfreq / 2.) + self.info['sfreq'] = sfreq + + # See the comment above why we ignore all errors here. + if events is None: + try: + # Did we loose events? + resampled_events = find_events(self) + if len(resampled_events) != len(original_events): + warn('Resampling of the stim channels caused event ' + 'information to become unreliable. Consider finding ' + 'events on the original data and passing the event ' + 'matrix as a parameter.') + except Exception: + pass + + return self + else: + # always make a copy of events + events = events.copy() + + events[:, 0] = np.minimum( + np.round(events[:, 0] * ratio).astype(int), + self._data.shape[1] + self.first_samp - 1 + ) + return self, events + + @verbose + def crop(self, tmin=0.0, tmax=None, include_tmax=True, *, verbose=None): + """Crop raw data file. + + Limit the data from the raw file to go between specific times. Note + that the new ``tmin`` is assumed to be ``t=0`` for all subsequently + called functions (e.g., :meth:`~mne.io.Raw.time_as_index`, or + :class:`~mne.Epochs`). New :term:`first_samp` and :term:`last_samp` + are set accordingly. + + Thus function operates in-place on the instance. + Use :meth:`mne.io.Raw.copy` if operation on a copy is desired. + + Parameters + ---------- + %(tmin_raw)s + %(tmax_raw)s + %(include_tmax)s + %(verbose)s + + Returns + ------- + raw : instance of Raw + The cropped raw object, modified in-place. + """ + max_time = (self.n_times - 1) / self.info['sfreq'] + if tmax is None: + tmax = max_time + + if tmin > tmax: + raise ValueError('tmin (%s) must be less than tmax (%s)' + % (tmin, tmax)) + if tmin < 0.0: + raise ValueError('tmin (%s) must be >= 0' % (tmin,)) + elif tmax > max_time: + raise ValueError('tmax (%s) must be less than or equal to the max ' + 'time (%0.4f sec)' % (tmax, max_time)) + + smin, smax = np.where(_time_mask( + self.times, tmin, tmax, sfreq=self.info['sfreq'], + include_tmax=include_tmax))[0][[0, -1]] + cumul_lens = np.concatenate(([0], np.array(self._raw_lengths, + dtype='int'))) + cumul_lens = np.cumsum(cumul_lens) + keepers = np.logical_and(np.less(smin, cumul_lens[1:]), + np.greater_equal(smax, cumul_lens[:-1])) + keepers = np.where(keepers)[0] + # if we drop file(s) from the beginning, we need to keep track of + # how many samples we dropped relative to that one + self._cropped_samp += smin + self._first_samps = np.atleast_1d(self._first_samps[keepers]) + # Adjust first_samp of first used file! + self._first_samps[0] += smin - cumul_lens[keepers[0]] + self._last_samps = np.atleast_1d(self._last_samps[keepers]) + self._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax + self._read_picks = [self._read_picks[ri] for ri in keepers] + assert all(len(r) == len(self._read_picks[0]) + for r in self._read_picks) + self._raw_extras = [self._raw_extras[ri] for ri in keepers] + self._filenames = [self._filenames[ri] for ri in keepers] + if self.preload: + # slice and copy to avoid the reference to large array + self._data = self._data[:, smin:smax + 1].copy() + + annotations = self.annotations + # now call setter to filter out annotations outside of interval + if annotations.orig_time is None: + assert self.info['meas_date'] is None + # When self.info['meas_date'] is None (which is guaranteed if + # self.annotations.orig_time is None), when we do the + # self.set_annotations, it's assumed that the annotations onset + # are relative to first_time, so we have to subtract it, then + # set_annotations will put it back. + annotations.onset -= self.first_time + self.set_annotations(annotations, False) + + return self + + @verbose + def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, + drop_small_buffer=False, proj=False, fmt='single', + overwrite=False, split_size='2GB', split_naming='neuromag', + verbose=None): + """Save raw data to file. + + Parameters + ---------- + fname : str + File name of the new dataset. This has to be a new filename + unless data have been preloaded. Filenames should end with + ``raw.fif`` (common raw data), ``raw_sss.fif`` + (Maxwell-filtered continuous data), + ``raw_tsss.fif`` (temporally signal-space-separated data), + ``_meg.fif`` (common MEG data), ``_eeg.fif`` (common EEG data), + or ``_ieeg.fif`` (common intracranial EEG data). You may also + append an additional ``.gz`` suffix to enable gzip compression. + %(picks_all)s + %(tmin_raw)s + %(tmax_raw)s + buffer_size_sec : float | None + Size of data chunks in seconds. If None (default), the buffer + size of the original file is used. + drop_small_buffer : bool + Drop or not the last buffer. It is required by maxfilter (SSS) + that only accepts raw files with buffers of the same size. + proj : bool + If True the data is saved with the projections applied (active). + + .. note:: If ``apply_proj()`` was used to apply the projections, + the projectons will be active even if ``proj`` is False. + fmt : 'single' | 'double' | 'int' | 'short' + Format to use to save raw data. Valid options are 'double', + 'single', 'int', and 'short' for 64- or 32-bit float, or 32- or + 16-bit integers, respectively. It is **strongly** recommended to + use 'single', as this is backward-compatible, and is standard for + maintaining precision. Note that using 'short' or 'int' may result + in loss of precision, complex data cannot be saved as 'short', + and neither complex data types nor real data stored as 'double' + can be loaded with the MNE command-line tools. See raw.orig_format + to determine the format the original data were stored in. + %(overwrite)s + To overwrite original file (the same one that was loaded), + data must be preloaded upon reading. + split_size : str | int + Large raw files are automatically split into multiple pieces. This + parameter specifies the maximum size of each piece. If the + parameter is an integer, it specifies the size in Bytes. It is + also possible to pass a human-readable string, e.g., 100MB. + + .. note:: Due to FIFF file limitations, the maximum split + size is 2GB. + %(split_naming)s + + .. versionadded:: 0.17 + %(verbose)s + + Notes + ----- + If Raw is a concatenation of several raw files, **be warned** that + only the measurement information from the first raw file is stored. + This likely means that certain operations with external tools may not + work properly on a saved concatenated file (e.g., probably some + or all forms of SSS). It is recommended not to concatenate and + then save raw files for this reason. + + Samples annotated ``BAD_ACQ_SKIP`` are not stored in order to optimize + memory. Whatever values, they will be loaded as 0s when reading file. + """ + endings = ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif', + '_meg.fif', '_eeg.fif', '_ieeg.fif') + endings += tuple([f'{e}.gz' for e in endings]) + endings_err = ('.fif', '.fif.gz') + + # convert to str, check for overwrite a few lines later + fname = _check_fname(fname, overwrite=True, verbose="error") + check_fname(fname, 'raw', endings, endings_err=endings_err) + + split_size = _get_split_size(split_size) + if not self.preload and fname in self._filenames: + raise ValueError('You cannot save data to the same file.' + ' Please use a different filename.') + + if self.preload: + if np.iscomplexobj(self._data): + warn('Saving raw file with complex data. Loading with ' + 'command-line MNE tools will not work.') + + type_dict = dict(short=FIFF.FIFFT_DAU_PACK16, + int=FIFF.FIFFT_INT, + single=FIFF.FIFFT_FLOAT, + double=FIFF.FIFFT_DOUBLE) + _check_option('fmt', fmt, type_dict.keys()) + reset_dict = dict(short=False, int=False, single=True, double=True) + reset_range = reset_dict[fmt] + data_type = type_dict[fmt] + + data_test = self[0, 0][0] + if fmt == 'short' and np.iscomplexobj(data_test): + raise ValueError('Complex data must be saved as "single" or ' + '"double", not "short"') + + # check for file existence and expand `~` if present + fname = _check_fname(fname=fname, overwrite=overwrite, + verbose="error") + + if proj: + info = deepcopy(self.info) + projector, info = setup_proj(info) + activate_proj(info['projs'], copy=False) + else: + info = self.info + projector = None + + # + # Set up the reading parameters + # + + # Convert to samples + start, stop = self._tmin_tmax_to_start_stop(tmin, tmax) + buffer_size = self._get_buffer_size(buffer_size_sec) + + # write the raw file + _validate_type(split_naming, str, 'split_naming') + _check_option('split_naming', split_naming, ('neuromag', 'bids')) + _write_raw(fname, self, info, picks, fmt, data_type, reset_range, + start, stop, buffer_size, projector, drop_small_buffer, + split_size, split_naming, 0, None, overwrite) + + @verbose + def export(self, fname, fmt='auto', physical_range='auto', + add_ch_type=False, *, overwrite=False, verbose=None): + """Export Raw to external formats. + + Supported formats: EEGLAB (set, uses :mod:`eeglabio`) + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + %(fmt_export_params)s + %(physical_range_export_params)s + %(add_ch_type_export_params)s + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_raw)s + %(export_eeglab_note)s + %(export_edf_note)s + """ + from ..export import export_raw + export_raw(fname, self, fmt, physical_range=physical_range, + add_ch_type=add_ch_type, overwrite=overwrite, + verbose=verbose) + + def _tmin_tmax_to_start_stop(self, tmin, tmax): + start = int(np.floor(tmin * self.info['sfreq'])) + + # "stop" is the first sample *not* to save, so we need +1's here + if tmax is None: + stop = np.inf + else: + stop = self.time_as_index(float(tmax), use_rounding=True)[0] + 1 + stop = min(stop, self.last_samp - self.first_samp + 1) + if stop <= start or stop <= 0: + raise ValueError('tmin (%s) and tmax (%s) yielded no samples' + % (tmin, tmax)) + return start, stop + + @copy_function_doc_to_method_doc(plot_raw) + def plot(self, events=None, duration=10.0, start=0.0, n_channels=20, + bgcolor='w', color=None, bad_color='lightgray', + event_color='cyan', scalings=None, remove_dc=True, order=None, + show_options=False, title=None, show=True, block=False, + highpass=None, lowpass=None, filtorder=4, clipping=_RAW_CLIP_DEF, + show_first_samp=False, proj=True, group_by='type', + butterfly=False, decim='auto', noise_cov=None, event_id=None, + show_scrollbars=True, show_scalebars=True, time_format='float', + precompute=None, use_opengl=None, *, theme=None, verbose=None): + return plot_raw(self, events, duration, start, n_channels, bgcolor, + color, bad_color, event_color, scalings, remove_dc, + order, show_options, title, show, block, highpass, + lowpass, filtorder, clipping, show_first_samp, + proj, group_by, butterfly, decim, noise_cov=noise_cov, + event_id=event_id, show_scrollbars=show_scrollbars, + show_scalebars=show_scalebars, time_format=time_format, + precompute=precompute, use_opengl=use_opengl, + theme=theme, verbose=verbose) + + @verbose + @copy_function_doc_to_method_doc(plot_raw_psd) + def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False, + n_fft=None, n_overlap=0, reject_by_annotation=True, + picks=None, ax=None, color='black', xscale='linear', + area_mode='std', area_alpha=0.33, dB=True, estimate='auto', + show=True, n_jobs=1, average=False, line_alpha=None, + spatial_colors=True, sphere=None, window='hamming', + exclude='bads', verbose=None): + return plot_raw_psd(self, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, + proj=proj, n_fft=n_fft, n_overlap=n_overlap, + reject_by_annotation=reject_by_annotation, + picks=picks, ax=ax, color=color, xscale=xscale, + area_mode=area_mode, area_alpha=area_alpha, + dB=dB, estimate=estimate, show=show, n_jobs=n_jobs, + average=average, line_alpha=line_alpha, + spatial_colors=spatial_colors, sphere=sphere, + window=window, exclude=exclude, verbose=verbose) + + @copy_function_doc_to_method_doc(plot_raw_psd_topo) + def plot_psd_topo(self, tmin=0., tmax=None, fmin=0, fmax=100, proj=False, + n_fft=2048, n_overlap=0, layout=None, color='w', + fig_facecolor='k', axis_facecolor='k', dB=True, + show=True, block=False, n_jobs=1, axes=None, + verbose=None): + return plot_raw_psd_topo(self, tmin=tmin, tmax=tmax, fmin=fmin, + fmax=fmax, proj=proj, n_fft=n_fft, + n_overlap=n_overlap, layout=layout, + color=color, fig_facecolor=fig_facecolor, + axis_facecolor=axis_facecolor, dB=dB, + show=show, block=block, n_jobs=n_jobs, + axes=axes, verbose=verbose) + + @property + def ch_names(self): + """Channel names.""" + return self.info['ch_names'] + + @property + def times(self): + """Time points.""" + out = _arange_div(self.n_times, float(self.info['sfreq'])) + out.flags['WRITEABLE'] = False + return out + + @property + def n_times(self): + """Number of time points.""" + return self.last_samp - self.first_samp + 1 + + def __len__(self): + """Return the number of time points. + + Returns + ------- + len : int + The number of time points. + + Examples + -------- + This can be used as:: + + >>> len(raw) # doctest: +SKIP + 1000 + """ + return self.n_times + + @verbose + def load_bad_channels(self, bad_file=None, force=False, verbose=None): + """Mark channels as bad from a text file. + + This function operates mostly in the style of the C function + ``mne_mark_bad_channels``. Each line in the text file will be + interpreted as a name of a bad channel. + + Parameters + ---------- + bad_file : path-like | None + File name of the text file containing bad channels. + If ``None`` (default), bad channels are cleared, but this + is more easily done directly with ``raw.info['bads'] = []``. + force : bool + Whether or not to force bad channel marking (of those + that exist) if channels are not found, instead of + raising an error. Defaults to ``False``. + %(verbose)s + """ + prev_bads = self.info['bads'] + new_bads = [] + if bad_file is not None: + # Check to make sure bad channels are there + names = frozenset(self.info['ch_names']) + with open(bad_file) as fid: + bad_names = [line for line in fid.read().splitlines() if line] + new_bads = [ci for ci in bad_names if ci in names] + count_diff = len(bad_names) - len(new_bads) + + if count_diff > 0: + msg = (f'{count_diff} bad channel(s) from:' + f'\n{bad_file}\nnot found in:\n{self.filenames[0]}') + if not force: + raise ValueError(msg) + else: + warn(msg) + + if prev_bads != new_bads: + logger.info(f'Updating bad channels: {prev_bads} -> {new_bads}') + self.info['bads'] = new_bads + else: + logger.info(f'No channels updated. Bads are: {prev_bads}') + + @fill_doc + def append(self, raws, preload=None): + """Concatenate raw instances as if they were continuous. + + .. note:: Boundaries of the raw files are annotated bad. If you wish to + use the data as continuous recording, you can remove the + boundary annotations after concatenation (see + :meth:`mne.Annotations.delete`). + + Parameters + ---------- + raws : list, or Raw instance + List of Raw instances to concatenate to the current instance + (in order), or a single raw instance to concatenate. + %(preload_concatenate)s + """ + if not isinstance(raws, list): + raws = [raws] + + # make sure the raws are compatible + all_raws = [self] + all_raws += raws + _check_raw_compatibility(all_raws) + + # deal with preloading data first (while files are separate) + all_preloaded = self.preload and all(r.preload for r in raws) + if preload is None: + if all_preloaded: + preload = True + else: + preload = False + + if preload is False: + if self.preload: + self._data = None + self.preload = False + else: + # do the concatenation ourselves since preload might be a string + nchan = self.info['nchan'] + c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)]) + nsamp = c_ns[-1] + + if not self.preload: + this_data = self._read_segment(projector=self._projector) + else: + this_data = self._data + + # allocate the buffer + _data = _allocate_data(preload, (nchan, nsamp), this_data.dtype) + _data[:, 0:c_ns[0]] = this_data + + for ri in range(len(raws)): + if not raws[ri].preload: + # read the data directly into the buffer + data_buffer = _data[:, c_ns[ri]:c_ns[ri + 1]] + raws[ri]._read_segment(data_buffer=data_buffer, + projector=self._projector) + else: + _data[:, c_ns[ri]:c_ns[ri + 1]] = raws[ri]._data + self._data = _data + self.preload = True + + # now combine information from each raw file to construct new self + annotations = self.annotations + assert annotations.orig_time == self.info['meas_date'] + edge_samps = list() + for ri, r in enumerate(raws): + n_samples = self.last_samp - self.first_samp + 1 + annotations = _combine_annotations( + annotations, r.annotations, n_samples, + self.first_samp, r.first_samp, + self.info['sfreq']) + edge_samps.append(sum(self._last_samps) - + sum(self._first_samps) + (ri + 1)) + self._first_samps = np.r_[self._first_samps, r._first_samps] + self._last_samps = np.r_[self._last_samps, r._last_samps] + self._read_picks += r._read_picks + self._raw_extras += r._raw_extras + self._filenames += r._filenames + assert annotations.orig_time == self.info['meas_date'] + # The above _combine_annotations gets everything synchronized to + # first_samp. set_annotations (with no absolute time reference) assumes + # that the annotations being set are relative to first_samp, and will + # add it back on. So here we have to remove it: + if annotations.orig_time is None: + annotations.onset -= self.first_samp / self.info['sfreq'] + self.set_annotations(annotations) + for edge_samp in edge_samps: + onset = _sync_onset(self, (edge_samp) / self.info['sfreq'], True) + self.annotations.append(onset, 0., 'BAD boundary') + self.annotations.append(onset, 0., 'EDGE boundary') + if not (len(self._first_samps) == len(self._last_samps) == + len(self._raw_extras) == len(self._filenames) == + len(self._read_picks)): + raise RuntimeError('Append error') # should never happen + + def close(self): + """Clean up the object. + + Does nothing for objects that close their file descriptors. + Things like RawFIF will override this method. + """ + pass # noqa + + def copy(self): + """Return copy of Raw instance. + + Returns + ------- + inst : instance of Raw + A copy of the instance. + """ + return deepcopy(self) + + def __repr__(self): # noqa: D105 + name = self.filenames[0] + name = '' if name is None else op.basename(name) + ', ' + size_str = str(sizeof_fmt(self._size)) # str in case it fails -> None + size_str += ', data%s loaded' % ('' if self.preload else ' not') + s = ('%s%s x %s (%0.1f s), ~%s' + % (name, len(self.ch_names), self.n_times, self.times[-1], + size_str)) + return "<%s | %s>" % (self.__class__.__name__, s) + + def _repr_html_(self, caption=None): + from ..html_templates import repr_templates_env + basenames = [ + os.path.basename(f) for f in self._filenames if f is not None + ] + m, s = divmod(self._last_time - self.first_time, 60) + h, m = divmod(m, 60) + duration = f'{int(h):02d}:{int(m):02d}:{int(s):02d}' + raw_template = repr_templates_env.get_template('raw.html.jinja') + return raw_template.render( + info_repr=self.info._repr_html_(caption=caption), + filenames=basenames, duration=duration + ) + + def add_events(self, events, stim_channel=None, replace=False): + """Add events to stim channel. + + Parameters + ---------- + events : ndarray, shape (n_events, 3) + Events to add. The first column specifies the sample number of + each event, the second column is ignored, and the third column + provides the event value. If events already exist in the Raw + instance at the given sample numbers, the event values will be + added together. + stim_channel : str | None + Name of the stim channel to add to. If None, the config variable + 'MNE_STIM_CHANNEL' is used. If this is not found, it will default + to 'STI 014'. + replace : bool + If True the old events on the stim channel are removed before + adding the new ones. + + Notes + ----- + Data must be preloaded in order to add events. + """ + _check_preload(self, 'Adding events') + events = np.asarray(events) + if events.ndim != 2 or events.shape[1] != 3: + raise ValueError('events must be shape (n_events, 3)') + stim_channel = _get_stim_channel(stim_channel, self.info) + pick = pick_channels(self.ch_names, stim_channel) + if len(pick) == 0: + raise ValueError('Channel %s not found' % stim_channel) + pick = pick[0] + idx = events[:, 0].astype(int) + if np.any(idx < self.first_samp) or np.any(idx > self.last_samp): + raise ValueError('event sample numbers must be between %s and %s' + % (self.first_samp, self.last_samp)) + if not all(idx == events[:, 0]): + raise ValueError('event sample numbers must be integers') + if replace: + self._data[pick, :] = 0. + self._data[pick, idx - self.first_samp] += events[:, 2] + + def _get_buffer_size(self, buffer_size_sec=None): + """Get the buffer size.""" + if buffer_size_sec is None: + buffer_size_sec = self.buffer_size_sec + buffer_size_sec = float(buffer_size_sec) + return int(np.ceil(buffer_size_sec * self.info['sfreq'])) + + @verbose + def to_data_frame(self, picks=None, index=None, + scalings=None, copy=True, start=None, stop=None, + long_format=False, time_format='ms', *, + verbose=None): + """Export data in tabular structure as a pandas DataFrame. + + Channels are converted to columns in the DataFrame. By default, an + additional column "time" is added, unless ``index`` is not ``None`` + (in which case time values form the DataFrame's index). + + Parameters + ---------- + %(picks_all)s + %(index_df_raw)s + Defaults to ``None``. + %(scalings_df)s + %(copy_df)s + start : int | None + Starting sample index for creating the DataFrame from a temporal + span of the Raw object. ``None`` (the default) uses the first + sample. + stop : int | None + Ending sample index for creating the DataFrame from a temporal span + of the Raw object. ``None`` (the default) uses the last sample. + %(long_format_df_raw)s + %(time_format_df_raw)s + + .. versionadded:: 0.20 + %(verbose)s + + Returns + ------- + %(df_return)s + """ + # check pandas once here, instead of in each private utils function + pd = _check_pandas_installed() # noqa + # arg checking + valid_index_args = ['time'] + valid_time_formats = ['ms', 'timedelta', 'datetime'] + index = _check_pandas_index_arguments(index, valid_index_args) + time_format = _check_time_format(time_format, valid_time_formats, + self.info['meas_date']) + # get data + picks = _picks_to_idx(self.info, picks, 'all', exclude=()) + data, times = self[picks, start:stop] + data = data.T + if copy: + data = data.copy() + data = _scale_dataframe_data(self, data, picks, scalings) + # prepare extra columns / multiindex + mindex = list() + times = _convert_times(self, times, time_format) + mindex.append(('time', times)) + # build DataFrame + df = _build_data_frame(self, data, picks, long_format, mindex, index, + default_index=['time']) + return df + + def describe(self, data_frame=False): + """Describe channels (name, type, descriptive statistics). + + Parameters + ---------- + data_frame : bool + If True, return results in a pandas.DataFrame. If False, only print + results. Columns 'ch', 'type', and 'unit' indicate channel index, + channel type, and unit of the remaining five columns. These columns + are 'min' (minimum), 'Q1' (first quartile or 25% percentile), + 'median', 'Q3' (third quartile or 75% percentile), and 'max' + (maximum). + + Returns + ------- + result : None | pandas.DataFrame + If data_frame=False, returns None. If data_frame=True, returns + results in a pandas.DataFrame (requires pandas). + """ + from scipy.stats import scoreatpercentile as q + nchan = self.info["nchan"] + + # describe each channel + cols = defaultdict(list) + cols["name"] = self.ch_names + for i in range(nchan): + ch = self.info["chs"][i] + data = self[i][0] + cols["type"].append(channel_type(self.info, i)) + cols["unit"].append(_unit2human[ch["unit"]]) + cols["min"].append(np.min(data)) + cols["Q1"].append(q(data, 25)) + cols["median"].append(np.median(data)) + cols["Q3"].append(q(data, 75)) + cols["max"].append(np.max(data)) + + if data_frame: # return data frame + import pandas as pd + df = pd.DataFrame(cols) + df.index.name = "ch" + return df + + # convert into commonly used units + scalings = _handle_default("scalings") + units = _handle_default("units") + for i in range(nchan): + unit = units.get(cols['type'][i]) + scaling = scalings.get(cols['type'][i], 1) + if scaling != 1: + cols['unit'][i] = unit + for col in ["min", "Q1", "median", "Q3", "max"]: + cols[col][i] *= scaling + + lens = {"ch": max(2, len(str(nchan))), + "name": max(4, max([len(n) for n in cols["name"]])), + "type": max(4, max([len(t) for t in cols["type"]])), + "unit": max(4, max([len(u) for u in cols["unit"]]))} + + # print description, start with header + print(self) + print(f"{'ch':>{lens['ch']}} " + f"{'name':<{lens['name']}} " + f"{'type':<{lens['type']}} " + f"{'unit':<{lens['unit']}} " + f"{'min':>9} " + f"{'Q1':>9} " + f"{'median':>9} " + f"{'Q3':>9} " + f"{'max':>9}") + # print description for each channel + for i in range(nchan): + msg = (f"{i:>{lens['ch']}} " + f"{cols['name'][i]:<{lens['name']}} " + f"{cols['type'][i].upper():<{lens['type']}} " + f"{cols['unit'][i]:<{lens['unit']}} ") + for col in ["min", "Q1", "median", "Q3"]: + msg += f"{cols[col][i]:>9.2f} " + msg += f"{cols['max'][i]:>9.2f}" + print(msg) + + +def _allocate_data(preload, shape, dtype): + """Allocate data in memory or in memmap for preloading.""" + if preload in (None, True): # None comes from _read_segment + data = np.zeros(shape, dtype) + else: + _validate_type(preload, 'path-like', 'preload') + data = np.memmap(str(preload), mode='w+', dtype=dtype, shape=shape) + return data + + +def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False): + """Convert indices to time. + + Parameters + ---------- + index : list-like | int + List of ints or int representing points in time. + use_first_samp : boolean + If True, the time returned is relative to the session onset, else + relative to the recording onset. + + Returns + ------- + times : ndarray + Times corresponding to the index supplied. + """ + times = np.atleast_1d(index) + (first_samp if use_first_samp else 0) + return times / sfreq + + +def _convert_slice(sel): + if len(sel) and (np.diff(sel) == 1).all(): + return slice(sel[0], sel[-1] + 1) + else: + return sel + + +def _get_ch_factors(inst, units, picks_idxs): + """Get scaling factors for data, given units. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + The instance. + %(units)s + picks_idxs : ndarray + The picks as provided through _picks_to_idx. + + Returns + ------- + ch_factors : ndarray of floats, shape(len(picks),) + The sacling factors for each channel, ordered according + to picks. + + """ + _validate_type(units, types=(None, str, dict), item_name="units") + ch_factors = np.ones(len(picks_idxs)) + si_units = _handle_default('si_units') + ch_types = inst.get_channel_types(picks=picks_idxs) + # Convert to dict if str units + if isinstance(units, str): + # Check that there is only one channel type + unit_ch_type = list(set(ch_types) & set(si_units.keys())) + if len(unit_ch_type) > 1: + raise ValueError('"units" cannot be str if there is more than ' + 'one channel type with a unit ' + f'{unit_ch_type}.') + units = {unit_ch_type[0]: units} # make the str argument a dict + # Loop over the dict to get channel factors + if isinstance(units, dict): + for ch_type, ch_unit in units.items(): + # Get the scaling factors + scaling = _get_scaling(ch_type, ch_unit) + if scaling != 1: + indices = [i_ch for i_ch, ch in enumerate(ch_types) + if ch == ch_type] + ch_factors[indices] *= scaling + + return ch_factors + + +def _get_scaling(ch_type, target_unit): + """Return the scaling factor based on the channel type and a target unit. + + Parameters + ---------- + ch_type : str + The channel type. + target_unit : str + The target unit for the provided channel type. + + Returns + ------- + scaling : float + The scaling factor to convert from the si_unit (used by default for MNE + objects) to the target unit. + """ + scaling = 1. + si_units = _handle_default('si_units') + si_units_splitted = {key: si_units[key].split('/') for key in si_units} + prefixes = _handle_default('prefixes') + prefix_list = list(prefixes.keys()) + + # Check that the provided unit exists for the ch_type + unit_list = target_unit.split('/') + if ch_type not in si_units.keys(): + raise KeyError( + f'{ch_type} is not a channel type that can be scaled ' + 'from units.') + si_unit_list = si_units_splitted[ch_type] + if len(unit_list) != len(si_unit_list): + raise ValueError( + f'{target_unit} is not a valid unit for {ch_type}, use a ' + f'sub-multiple of {si_units[ch_type]} instead.') + for i, unit in enumerate(unit_list): + valid = [prefix + si_unit_list[i] + for prefix in prefix_list] + if unit not in valid: + raise ValueError( + f'{target_unit} is not a valid unit for {ch_type}, use a ' + f'sub-multiple of {si_units[ch_type]} instead.') + + # Get the scaling factors + for i, unit in enumerate(unit_list): + has_square = False + # XXX power normally not used as csd cannot get_data() + if unit[-1] == '²': + has_square = True + if unit == 'm' or unit == 'm²': + factor = 1. + elif unit[0] in prefixes.keys(): + factor = prefixes[unit[0]] + else: + factor = 1. + if factor != 1: + if has_square: + factor *= factor + if i == 0: + scaling = scaling * factor + elif i == 1: + scaling = scaling / factor + return scaling + + +class _ReadSegmentFileProtector(object): + """Ensure only _filenames, _raw_extras, and _read_segment_file are used.""" + + def __init__(self, raw): + self.__raw = raw + assert hasattr(raw, '_projector') + self._filenames = raw._filenames + self._raw_extras = raw._raw_extras + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + return self.__raw.__class__._read_segment_file( + self, data, idx, fi, start, stop, cals, mult) + + +class _RawShell(object): + """Create a temporary raw object.""" + + def __init__(self): # noqa: D102 + self.first_samp = None + self.last_samp = None + self._first_time = None + self._last_time = None + self._cals = None + self._rawdir = None + self._projector = None + + @property + def n_times(self): # noqa: D102 + return self.last_samp - self.first_samp + 1 + + @property + def annotations(self): # noqa: D102 + return self._annotations + + def set_annotations(self, annotations): + if annotations is None: + annotations = Annotations([], [], [], None) + self._annotations = annotations.copy() + + +############################################################################### +# Writing +def _write_raw(fname, raw, info, picks, fmt, data_type, reset_range, start, + stop, buffer_size, projector, drop_small_buffer, + split_size, split_naming, part_idx, prev_fname, overwrite): + """Write raw file with splitting.""" + # we've done something wrong if we hit this + n_times_max = len(raw.times) + if start >= stop or stop > n_times_max: + raise RuntimeError('Cannot write raw file with no data: %s -> %s ' + '(max: %s) requested' % (start, stop, n_times_max)) + + # Expand `~` if present + fname = _check_fname(fname=fname, overwrite=overwrite) + + base, ext = op.splitext(fname) + if part_idx > 0: + if split_naming == 'neuromag': + # insert index in filename + use_fname = '%s-%d%s' % (base, part_idx, ext) + else: + assert split_naming == 'bids' + use_fname = _construct_bids_filename(base, ext, part_idx + 1) + # check for file existence + _check_fname(use_fname, overwrite) + else: + use_fname = fname + # reserve our BIDS split fname in case we need to split + if split_naming == 'bids' and part_idx == 0: + # reserve our possible split name + reserved_fname = _construct_bids_filename(base, ext, part_idx + 1) + logger.info( + f'Reserving possible split file {op.basename(reserved_fname)}') + _check_fname(reserved_fname, overwrite) + ctx = _ReservedFilename(reserved_fname) + else: + reserved_fname = use_fname + ctx = nullcontext() + logger.info('Writing %s' % use_fname) + + picks = _picks_to_idx(info, picks, 'all', ()) + with start_and_end_file(use_fname) as fid: + cals = _start_writing_raw(fid, info, picks, data_type, + reset_range, raw.annotations) + with ctx: + final_fname = _write_raw_fid( + raw, info, picks, fid, cals, part_idx, start, stop, + buffer_size, prev_fname, split_size, use_fname, + projector, drop_small_buffer, fmt, fname, reserved_fname, + data_type, reset_range, split_naming, + overwrite=True # we've started writing already above + ) + if final_fname != use_fname: + assert split_naming == 'bids' + logger.info(f'Renaming BIDS split file {op.basename(final_fname)}') + ctx.remove = False + shutil.move(use_fname, final_fname) + if part_idx == 0: + logger.info('[done]') + return final_fname, part_idx + + +class _ReservedFilename: + + def __init__(self, fname): + self.fname = fname + assert op.isdir(op.dirname(fname)), fname + with open(fname, 'w'): + pass + self.remove = True + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if self.remove: + os.remove(self.fname) + + +def _write_raw_fid(raw, info, picks, fid, cals, part_idx, start, stop, + buffer_size, prev_fname, split_size, use_fname, + projector, drop_small_buffer, fmt, fname, reserved_fname, + data_type, reset_range, split_naming, overwrite): + first_samp = raw.first_samp + start + if first_samp != 0: + write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first_samp) + + # previous file name and id + if part_idx > 0 and prev_fname is not None: + start_block(fid, FIFF.FIFFB_REF) + write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_PREV_FILE) + write_string(fid, FIFF.FIFF_REF_FILE_NAME, prev_fname) + if info['meas_id'] is not None: + write_id(fid, FIFF.FIFF_REF_FILE_ID, info['meas_id']) + write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx - 1) + end_block(fid, FIFF.FIFFB_REF) + + pos_prev = fid.tell() + if pos_prev > split_size: + raise ValueError('file is larger than "split_size" after writing ' + 'measurement information, you must use a larger ' + 'value for split size: %s plus enough bytes for ' + 'the chosen buffer_size' % pos_prev) + + # Check to see if this has acquisition skips and, if so, if we can + # write out empty buffers instead of zeroes + firsts = list(range(start, stop, buffer_size)) + lasts = np.array(firsts) + buffer_size + if lasts[-1] > stop: + lasts[-1] = stop + sk_onsets, sk_ends = _annotations_starts_stops(raw, 'bad_acq_skip') + do_skips = False + if len(sk_onsets) > 0: + if np.in1d(sk_onsets, firsts).all() and np.in1d(sk_ends, lasts).all(): + do_skips = True + else: + if part_idx == 0: + warn('Acquisition skips detected but did not fit evenly into ' + 'output buffer_size, will be written as zeroes.') + + n_current_skip = 0 + final_fname = use_fname + for first, last in zip(firsts, lasts): + if do_skips: + if ((first >= sk_onsets) & (last <= sk_ends)).any(): + # Track how many we have + n_current_skip += 1 + continue + elif n_current_skip > 0: + # Write out an empty buffer instead of data + write_int(fid, FIFF.FIFF_DATA_SKIP, n_current_skip) + # These two NOPs appear to be optional (MaxFilter does not do + # it, but some acquisition machines do) so let's not bother. + # write_nop(fid) + # write_nop(fid) + n_current_skip = 0 + data, times = raw[picks, first:last] + assert len(times) == last - first + + if projector is not None: + data = np.dot(projector, data) + + if ((drop_small_buffer and (first > start) and + (len(times) < buffer_size))): + logger.info('Skipping data chunk due to small buffer ... ' + '[done]') + break + logger.debug(f'Writing FIF {first:6d} ... {last:6d} ...') + _write_raw_buffer(fid, data, cals, fmt) + + pos = fid.tell() + this_buff_size_bytes = pos - pos_prev + overage = pos - split_size + _NEXT_FILE_BUFFER + if overage > 0: + # This should occur on the first buffer write of the file, so + # we should mention the space required for the meas info + raise ValueError( + 'buffer size (%s) is too large for the given split size (%s) ' + 'by %s bytes after writing info (%s) and leaving enough space ' + 'for end tags (%s): decrease "buffer_size_sec" or increase ' + '"split_size".' % (this_buff_size_bytes, split_size, overage, + pos_prev, _NEXT_FILE_BUFFER)) + + # Split files if necessary, leave some space for next file info + # make sure we check to make sure we actually *need* another buffer + # with the "and" check + if pos >= split_size - this_buff_size_bytes - _NEXT_FILE_BUFFER and \ + first + buffer_size < stop: + final_fname = reserved_fname + next_fname, next_idx = _write_raw( + fname, raw, info, picks, fmt, + data_type, reset_range, first + buffer_size, stop, buffer_size, + projector, drop_small_buffer, split_size, split_naming, + part_idx + 1, final_fname, overwrite) + + start_block(fid, FIFF.FIFFB_REF) + write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE) + write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname)) + if info['meas_id'] is not None: + write_id(fid, FIFF.FIFF_REF_FILE_ID, info['meas_id']) + write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx) + end_block(fid, FIFF.FIFFB_REF) + break + pos_prev = pos + + logger.info('Closing %s' % use_fname) + if info.get('maxshield', False): + end_block(fid, FIFF.FIFFB_IAS_RAW_DATA) + else: + end_block(fid, FIFF.FIFFB_RAW_DATA) + end_block(fid, FIFF.FIFFB_MEAS) + return final_fname + + +@fill_doc +def _start_writing_raw(fid, info, sel, data_type, + reset_range, annotations): + """Start write raw data in file. + + Parameters + ---------- + fid : file + The created file. + %(info_not_none)s + sel : array of int | None + Indices of channels to include. If None, all channels + are included. + data_type : int + The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), + 5 (FIFFT_DOUBLE), 16 (FIFFT_DAU_PACK16), or 3 (FIFFT_INT) for raw data. + reset_range : bool + If True, the info['chs'][k]['range'] parameter will be set to unity. + annotations : instance of Annotations + The annotations to write. + + Returns + ------- + fid : file + The file descriptor. + cals : list + calibration factors. + """ + # + # Measurement info + # + info = pick_info(info, sel) + + # + # Create the file and save the essentials + # + start_block(fid, FIFF.FIFFB_MEAS) + write_id(fid, FIFF.FIFF_BLOCK_ID) + if info['meas_id'] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id']) + + cals = [] + for k in range(info['nchan']): + # + # Scan numbers may have been messed up + # + info['chs'][k]['scanno'] = k + 1 # scanno starts at 1 in FIF format + if reset_range is True: + info['chs'][k]['range'] = 1.0 + cals.append(info['chs'][k]['cal'] * info['chs'][k]['range']) + + write_meas_info(fid, info, data_type=data_type, reset_range=reset_range) + + # + # Annotations + # + if len(annotations) > 0: # don't save empty annot + _write_annotations(fid, annotations) + + # + # Start the raw data + # + if info.get('maxshield', False): + start_block(fid, FIFF.FIFFB_IAS_RAW_DATA) + else: + start_block(fid, FIFF.FIFFB_RAW_DATA) + + return cals + + +def _write_raw_buffer(fid, buf, cals, fmt): + """Write raw buffer. + + Parameters + ---------- + fid : file descriptor + an open raw data file. + buf : array + The buffer to write. + cals : array + Calibration factors. + fmt : str + 'short', 'int', 'single', or 'double' for 16/32 bit int or 32/64 bit + float for each item. This will be doubled for complex datatypes. Note + that short and int formats cannot be used for complex data. + """ + if buf.shape[0] != len(cals): + raise ValueError('buffer and calibration sizes do not match') + + _check_option('fmt', fmt, ['short', 'int', 'single', 'double']) + + cast_int = False # allow unsafe cast + if np.isrealobj(buf): + if fmt == 'short': + write_function = write_dau_pack16 + cast_int = True + elif fmt == 'int': + write_function = write_int + cast_int = True + elif fmt == 'single': + write_function = write_float + else: + write_function = write_double + else: + if fmt == 'single': + write_function = write_complex64 + elif fmt == 'double': + write_function = write_complex128 + else: + raise ValueError('only "single" and "double" supported for ' + 'writing complex data') + + buf = buf / np.ravel(cals)[:, None] + if cast_int: + buf = buf.astype(np.int32) + write_function(fid, FIFF.FIFF_DATA_BUFFER, buf) + + +def _check_raw_compatibility(raw): + """Ensure all instances of Raw have compatible parameters.""" + for ri in range(1, len(raw)): + if not isinstance(raw[ri], type(raw[0])): + raise ValueError(f'raw[{ri}] type must match') + for key in ('nchan', 'bads', 'sfreq'): + a, b = raw[ri].info[key], raw[0].info[key] + if a != b: + raise ValueError( + f'raw[{ri}].info[{key}] must match:\n' + f'{repr(a)} != {repr(b)}') + if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']): + raise ValueError('raw[%d][\'info\'][\'ch_names\'] must match' % ri) + if not all(raw[ri]._cals == raw[0]._cals): + raise ValueError('raw[%d]._cals must match' % ri) + if len(raw[0].info['projs']) != len(raw[ri].info['projs']): + raise ValueError('SSP projectors in raw files must be the same') + if not all(_proj_equal(p1, p2) for p1, p2 in + zip(raw[0].info['projs'], raw[ri].info['projs'])): + raise ValueError('SSP projectors in raw files must be the same') + if not all(r.orig_format == raw[0].orig_format for r in raw): + warn('raw files do not all have the same data format, could result in ' + 'precision mismatch. Setting raw.orig_format="unknown"') + raw[0].orig_format = 'unknown' + + +@verbose +def concatenate_raws(raws, preload=None, events_list=None, *, + on_mismatch='raise', verbose=None): + """Concatenate `~mne.io.Raw` instances as if they were continuous. + + .. note:: ``raws[0]`` is modified in-place to achieve the concatenation. + Boundaries of the raw files are annotated bad. If you wish to use + the data as continuous recording, you can remove the boundary + annotations after concatenation (see + :meth:`mne.Annotations.delete`). + + Parameters + ---------- + raws : list + List of `~mne.io.Raw` instances to concatenate (in order). + %(preload_concatenate)s + events_list : None | list + The events to concatenate. Defaults to ``None``. + %(on_mismatch_info)s + %(verbose)s + + Returns + ------- + raw : instance of Raw + The result of the concatenation (first Raw instance passed in). + events : ndarray of int, shape (n_events, 3) + The events. Only returned if ``event_list`` is not None. + """ + for idx, raw in enumerate(raws[1:], start=1): + _ensure_infos_match(info1=raws[0].info, info2=raw.info, + name=f'raws[{idx}]', on_mismatch=on_mismatch) + + if events_list is not None: + if len(events_list) != len(raws): + raise ValueError('`raws` and `event_list` are required ' + 'to be of the same length') + first, last = zip(*[(r.first_samp, r.last_samp) for r in raws]) + events = concatenate_events(events_list, first, last) + raws[0].append(raws[1:], preload) + + if events_list is None: + return raws[0] + else: + return raws[0], events + + +def _check_maxshield(allow_maxshield): + """Warn or error about MaxShield.""" + msg = ('This file contains raw Internal Active ' + 'Shielding data. It may be distorted. Elekta ' + 'recommends it be run through MaxFilter to ' + 'produce reliable results. Consider closing ' + 'the file and running MaxFilter on the data.') + if allow_maxshield: + if not (isinstance(allow_maxshield, str) and + allow_maxshield == 'yes'): + warn(msg) + else: + msg += (' Use allow_maxshield=True if you are sure you' + ' want to load the data despite this warning.') + raise ValueError(msg) diff --git a/python/libs/mne/io/boxy/__init__.py b/python/libs/mne/io/boxy/__init__.py new file mode 100644 index 0000000..701f5fd --- /dev/null +++ b/python/libs/mne/io/boxy/__init__.py @@ -0,0 +1,7 @@ +"""fNIRS module for conversion to FIF.""" + +# Authors: Kyle Mathewson, Jonathan Kuziek +# +# License: BSD-3-Clause + +from .boxy import read_raw_boxy diff --git a/python/libs/mne/io/boxy/boxy.py b/python/libs/mne/io/boxy/boxy.py new file mode 100644 index 0000000..bc82087 --- /dev/null +++ b/python/libs/mne/io/boxy/boxy.py @@ -0,0 +1,263 @@ +# Authors: Kyle Mathewson, Jonathan Kuziek +# +# License: BSD-3-Clause + +import re as re + +import numpy as np + +from ..base import BaseRaw +from ..meas_info import create_info +from ..utils import _mult_cal_one +from ...utils import logger, verbose, fill_doc, _check_fname +from ...annotations import Annotations + + +@fill_doc +def read_raw_boxy(fname, preload=False, verbose=None): + """Reader for an optical imaging recording. + + This function has been tested using the ISS Imagent I and II systems + and versions 0.40/0.84 of the BOXY recording software. + + Parameters + ---------- + fname : str + Path to the BOXY data file. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawBOXY + A Raw object containing BOXY data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawBOXY(fname, preload, verbose) + + +@fill_doc +class RawBOXY(BaseRaw): + """Raw object from a BOXY optical imaging file. + + Parameters + ---------- + fname : str + Path to the BOXY data file. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + logger.info('Loading %s' % fname) + + # Read header file and grab some info. + start_line = np.inf + col_names = mrk_col = filetype = mrk_data = end_line = None + raw_extras = dict() + raw_extras['offsets'] = list() # keep track of our offsets + sfreq = None + fname = _check_fname(fname, 'read', True, 'fname') + with open(fname, 'r') as fid: + line_num = 0 + i_line = fid.readline() + while i_line: + # most of our lines will be data lines, so check that first + if line_num >= start_line: + assert col_names is not None + assert filetype is not None + if '#DATA ENDS' in i_line: + # Data ends just before this. + end_line = line_num + break + if mrk_col is not None: + if filetype == 'non-parsed': + # Non-parsed files have different lines lengths. + crnt_line = i_line.rsplit(' ')[0] + temp_data = re.findall( + r'[-+]?\d*\.?\d+', crnt_line) + if len(temp_data) == len(col_names): + mrk_data.append(float( + re.findall(r'[-+]?\d*\.?\d+', crnt_line) + [mrk_col])) + else: + crnt_line = i_line.rsplit(' ')[0] + mrk_data.append(float(re.findall( + r'[-+]?\d*\.?\d+', crnt_line)[mrk_col])) + raw_extras['offsets'].append(fid.tell()) + # now proceed with more standard header parsing + elif 'BOXY.EXE:' in i_line: + boxy_ver = re.findall(r'\d*\.\d+', + i_line.rsplit(' ')[-1])[0] + # Check that the BOXY version is supported + if boxy_ver not in ['0.40', '0.84']: + raise RuntimeError('MNE has not been tested with BOXY ' + 'version (%s)' % boxy_ver) + elif 'Detector Channels' in i_line: + raw_extras['detect_num'] = int(i_line.rsplit(' ')[0]) + elif 'External MUX Channels' in i_line: + raw_extras['source_num'] = int(i_line.rsplit(' ')[0]) + elif 'Update Rate (Hz)' in i_line or \ + 'Updata Rate (Hz)' in i_line: + # Version 0.40 of the BOXY recording software + # (and possibly other versions lower than 0.84) contains a + # typo in the raw data file where 'Update Rate' is spelled + # "Updata Rate. This will account for this typo. + sfreq = float(i_line.rsplit(' ')[0]) + elif '#DATA BEGINS' in i_line: + # Data should start a couple lines later. + start_line = line_num + 3 + elif line_num == start_line - 2: + # Grab names for each column of data. + raw_extras['col_names'] = col_names = re.findall( + r'\w+\-\w+|\w+\-\d+|\w+', i_line.rsplit(' ')[0]) + if 'exmux' in col_names: + # Change filetype based on data organisation. + filetype = 'non-parsed' + else: + filetype = 'parsed' + if 'digaux' in col_names: + mrk_col = col_names.index('digaux') + mrk_data = list() + # raw_extras['offsets'].append(fid.tell()) + elif line_num == start_line - 1: + raw_extras['offsets'].append(fid.tell()) + line_num += 1 + i_line = fid.readline() + assert sfreq is not None + raw_extras.update( + filetype=filetype, start_line=start_line, end_line=end_line) + + # Label each channel in our data, for each data type (DC, AC, Ph). + # Data is organised by channels x timepoint, where the first + # 'source_num' rows correspond to the first detector, the next + # 'source_num' rows correspond to the second detector, and so on. + ch_names = list() + ch_types = list() + cals = list() + for det_num in range(raw_extras['detect_num']): + for src_num in range(raw_extras['source_num']): + for i_type, ch_type in [ + ('DC', 'fnirs_cw_amplitude'), + ('AC', 'fnirs_fd_ac_amplitude'), + ('Ph', 'fnirs_fd_phase')]: + ch_names.append( + f'S{src_num + 1}_D{det_num + 1} {i_type}') + ch_types.append(ch_type) + cals.append(np.pi / 180. if i_type == 'Ph' else 1.) + + # Create info structure. + info = create_info(ch_names, sfreq, ch_types) + for ch, cal in zip(info['chs'], cals): + ch['cal'] = cal + + # Determine how long our data is. + delta = end_line - start_line + assert len(raw_extras['offsets']) == delta + 1 + if filetype == 'non-parsed': + delta //= (raw_extras['source_num']) + super(RawBOXY, self).__init__( + info, preload, filenames=[fname], first_samps=[0], + last_samps=[delta - 1], raw_extras=[raw_extras], verbose=verbose) + + # Now let's grab our markers, if they are present. + if mrk_data is not None: + mrk_data = np.array(mrk_data, float) + # We only want the first instance of each trigger. + prev_mrk = 0 + mrk_idx = list() + duration = list() + tmp_dur = 0 + for i_num, i_mrk in enumerate(mrk_data): + if i_mrk != 0 and i_mrk != prev_mrk: + mrk_idx.append(i_num) + if i_mrk != 0 and i_mrk == prev_mrk: + tmp_dur += 1 + if i_mrk == 0 and i_mrk != prev_mrk: + duration.append((tmp_dur + 1) / sfreq) + tmp_dur = 0 + prev_mrk = i_mrk + onset = np.array(mrk_idx) / sfreq + description = mrk_data[mrk_idx] + annot = Annotations(onset, duration, description) + self.set_annotations(annot) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file. + + Boxy file organises data in two ways, parsed or un-parsed. + Regardless of type, output has (n_montages x n_sources x n_detectors + + n_marker_channels) rows, and (n_timepoints x n_blocks) columns. + """ + source_num = self._raw_extras[fi]['source_num'] + detect_num = self._raw_extras[fi]['detect_num'] + start_line = self._raw_extras[fi]['start_line'] + end_line = self._raw_extras[fi]['end_line'] + filetype = self._raw_extras[fi]['filetype'] + col_names = self._raw_extras[fi]['col_names'] + offsets = self._raw_extras[fi]['offsets'] + boxy_file = self._filenames[fi] + + # Non-parsed multiplexes sources, so we need source_num times as many + # lines in that case + if filetype == 'parsed': + start_read = start_line + start + stop_read = start_read + (stop - start) + else: + assert filetype == 'non-parsed' + start_read = start_line + start * source_num + stop_read = start_read + (stop - start) * source_num + assert start_read >= start_line + assert stop_read <= end_line + + # Possible detector names. + detectors = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[:detect_num] + + # Loop through our data. + one = np.zeros((len(col_names), stop_read - start_read)) + with open(boxy_file, 'r') as fid: + # Just a more efficient version of this: + # ii = 0 + # for line_num, i_line in enumerate(fid): + # if line_num >= start_read: + # if line_num >= stop_read: + # break + # # Grab actual data. + # i_data = i_line.strip().split() + # one[:len(i_data), ii] = i_data + # ii += 1 + fid.seek(offsets[start_read - start_line], 0) + for oo in one.T: + i_data = fid.readline().strip().split() + oo[:len(i_data)] = i_data + + # in theory we could index in the loop above, but it's painfully slow, + # so let's just take a hopefully minor memory hit + if filetype == 'non-parsed': + ch_idxs = [col_names.index(f'{det}-{i_type}') + for det in detectors + for i_type in ['DC', 'AC', 'Ph']] + one = one[ch_idxs].reshape( # each "time point" multiplexes srcs + len(detectors), 3, -1, source_num + ).transpose( # reorganize into (det, source, DC/AC/Ph, t) order + 0, 3, 1, 2 + ).reshape( # reshape the way we store it (det x source x DAP, t) + len(detectors) * source_num * 3, -1) + else: + assert filetype == 'parsed' + ch_idxs = [col_names.index(f'{det}-{i_type}{si + 1}') + for det in detectors + for si in range(source_num) + for i_type in ['DC', 'AC', 'Ph']] + one = one[ch_idxs] + + # Place our data into the data object in place. + _mult_cal_one(data, one, idx, cals, mult) diff --git a/python/libs/mne/io/boxy/tests/__init__.py b/python/libs/mne/io/boxy/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/boxy/tests/test_boxy.py b/python/libs/mne/io/boxy/tests/test_boxy.py new file mode 100644 index 0000000..700c22d --- /dev/null +++ b/python/libs/mne/io/boxy/tests/test_boxy.py @@ -0,0 +1,190 @@ +# Authors: Kyle Mathewson, Jonathan Kuziek +# +# License: BSD-3-Clause + +import os.path as op + +import pytest +import numpy as np +from numpy.testing import (assert_allclose, assert_array_equal, + assert_array_less) +import scipy.io as spio + +from mne import pick_types +from mne.datasets import testing +from mne.io import read_raw_boxy +from mne.io.tests.test_raw import _test_raw_reader + +data_path = testing.data_path(download=False) +boxy_0_40 = op.join( + data_path, 'BOXY', 'boxy_0_40_recording', + 'boxy_0_40_notriggers_unparsed.txt') +p_pod_0_40 = op.join( + data_path, 'BOXY', 'boxy_0_40_recording', 'p_pod_10_6_3_loaded_data', + 'p_pod_10_6_3_notriggers_unparsed.mat') +boxy_0_84 = op.join( + data_path, 'BOXY', 'boxy_0_84_digaux_recording', + 'boxy_0_84_triggers_unparsed.txt') +boxy_0_84_parsed = op.join( + data_path, 'BOXY', 'boxy_0_84_digaux_recording', + 'boxy_0_84_triggers_parsed.txt') +p_pod_0_84 = op.join( + data_path, 'BOXY', 'boxy_0_84_digaux_recording', + 'p_pod_10_6_3_loaded_data', 'p_pod_10_6_3_triggers_unparsed.mat') + + +def _assert_ppod(raw, p_pod_file): + have_types = raw.get_channel_types(unique=True) + assert 'fnirs_fd_phase' in raw, have_types + assert 'fnirs_cw_amplitude' in raw, have_types + assert 'fnirs_fd_ac_amplitude' in raw, have_types + ppod_data = spio.loadmat(p_pod_file) + + # Compare MNE loaded data to p_pod loaded data. + map_ = dict(dc='fnirs_cw_amplitude', ac='fnirs_fd_ac_amplitude', + ph='fnirs_fd_phase') + for key, value in map_.items(): + ppod = ppod_data[key].T + m = np.median(np.abs(ppod)) + assert 1e-1 < m < 1e5, key # our atol is meaningful + atol = m * 1e-10 + py = raw.get_data(value) + if key == 'ph': # radians + assert_array_less(-np.pi, py) + assert_array_less(py, 3 * np.pi) + py = np.rad2deg(py) + assert_allclose(py, ppod, atol=atol, err_msg=key) + + +@testing.requires_testing_data +def test_boxy_load(): + """Test reading BOXY files.""" + raw = read_raw_boxy(boxy_0_40, verbose=True) + assert raw.info['sfreq'] == 62.5 + _assert_ppod(raw, p_pod_0_40) + + # Grab our different data types. + mne_ph = raw.copy().pick(picks='fnirs_fd_phase') + mne_dc = raw.copy().pick(picks='fnirs_cw_amplitude') + mne_ac = raw.copy().pick(picks='fnirs_fd_ac_amplitude') + + # Check channel names. + first_chans = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', 'S5_D1', + 'S6_D1', 'S7_D1', 'S8_D1', 'S9_D1', 'S10_D1'] + last_chans = ['S1_D8', 'S2_D8', 'S3_D8', 'S4_D8', 'S5_D8', + 'S6_D8', 'S7_D8', 'S8_D8', 'S9_D8', 'S10_D8'] + + assert mne_dc.info['ch_names'][:10] == [i_chan + ' ' + 'DC' + for i_chan in first_chans] + assert mne_ac.info['ch_names'][:10] == [i_chan + ' ' + 'AC' + for i_chan in first_chans] + assert mne_ph.info['ch_names'][:10] == [i_chan + ' ' + 'Ph' + for i_chan in first_chans] + + assert mne_dc.info['ch_names'][70::] == [i_chan + ' ' + 'DC' + for i_chan in last_chans] + assert mne_ac.info['ch_names'][70::] == [i_chan + ' ' + 'AC' + for i_chan in last_chans] + assert mne_ph.info['ch_names'][70::] == [i_chan + ' ' + 'Ph' + for i_chan in last_chans] + + # Since this data set has no 'digaux' for creating trigger annotations, + # let's make sure our Raw object has no annotations. + assert len(raw.annotations) == 0 + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname', (boxy_0_84, boxy_0_84_parsed)) +def test_boxy_filetypes(fname): + """Test reading parsed and unparsed BOXY data files.""" + # BOXY data files can be saved in two formats (parsed and unparsed) which + # mostly determines how the data is organised. + # For parsed files, each row is a single timepoint and all + # source/detector combinations are represented as columns. + # For unparsed files, each row is a source and each group of n rows + # represents a timepoint. For example, if there are ten sources in the raw + # data then the first ten rows represent the ten sources at timepoint 1 + # while the next set of ten rows are the ten sources at timepoint 2. + # Detectors are represented as columns. + + # Since p_pod is designed to only load unparsed files, we will first + # compare MNE and p_pod loaded data from an unparsed data file. If those + # files are comparable, then we will compare the MNE loaded data between + # parsed and unparsed files. + raw = read_raw_boxy(fname, verbose=True) + assert raw.info['sfreq'] == 79.4722 + _assert_ppod(raw, p_pod_0_84) + + # Grab our different data types. + unp_dc = raw.copy().pick('fnirs_cw_amplitude') + unp_ac = raw.copy().pick('fnirs_fd_ac_amplitude') + unp_ph = raw.copy().pick('fnirs_fd_phase') + + # Check channel names. + chans = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', + 'S5_D1', 'S6_D1', 'S7_D1', 'S8_D1'] + + assert unp_dc.info['ch_names'] == [i_chan + ' ' + 'DC' + for i_chan in chans] + assert unp_ac.info['ch_names'] == [i_chan + ' ' + 'AC' + for i_chan in chans] + assert unp_ph.info['ch_names'] == [i_chan + ' ' + 'Ph' + for i_chan in chans] + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname', (boxy_0_84, boxy_0_84_parsed)) +def test_boxy_digaux(fname): + """Test reading BOXY files and generating annotations from digaux.""" + srate = 79.4722 + raw = read_raw_boxy(fname, verbose=True) + + # Grab our different data types. + picks_dc = pick_types(raw.info, fnirs='fnirs_cw_amplitude') + picks_ac = pick_types(raw.info, fnirs='fnirs_fd_ac_amplitude') + picks_ph = pick_types(raw.info, fnirs='fnirs_fd_phase') + assert_array_equal(picks_dc, np.arange(0, 8) * 3 + 0) + assert_array_equal(picks_ac, np.arange(0, 8) * 3 + 1) + assert_array_equal(picks_ph, np.arange(0, 8) * 3 + 2) + + # Check that our event order matches what we expect. + event_list = ['1.0', '2.0', '3.0', '4.0', '5.0'] + assert_array_equal(raw.annotations.description, event_list) + + # Check that our event timings are what we expect. + event_onset = [i_time * (1.0 / srate) for i_time in + [105, 185, 265, 344, 424]] + assert_allclose(raw.annotations.onset, event_onset, atol=1e-6) + + # Now let's compare parsed and unparsed events to p_pod loaded digaux. + # Load our p_pod data. + ppod_data = spio.loadmat(p_pod_0_84) + ppod_digaux = np.transpose(ppod_data['digaux'])[0] + + # Now let's get our triggers from the p_pod digaux. + # We only want the first instance of each trigger. + prev_mrk = 0 + mrk_idx = list() + duration = list() + tmp_dur = 0 + for i_num, i_mrk in enumerate(ppod_digaux): + if i_mrk != 0 and i_mrk != prev_mrk: + mrk_idx.append(i_num) + if i_mrk != 0 and i_mrk == prev_mrk: + tmp_dur += 1 + if i_mrk == 0 and i_mrk != prev_mrk: + duration.append((tmp_dur + 1) * (1.0 / srate)) + tmp_dur = 0 + prev_mrk = i_mrk + onset = np.asarray([i_mrk * (1.0 / srate) for i_mrk in mrk_idx]) + description = np.asarray([str(float(i_mrk))for i_mrk in + ppod_digaux[mrk_idx]]) + assert_array_equal(raw.annotations.description, description) + assert_allclose(raw.annotations.onset, onset, atol=1e-6) + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname', (boxy_0_40, boxy_0_84, boxy_0_84_parsed)) +def test_raw_properties(fname): + """Test raw reader properties.""" + _test_raw_reader(read_raw_boxy, fname=fname, boundary_decimal=1) diff --git a/python/libs/mne/io/brainvision/__init__.py b/python/libs/mne/io/brainvision/__init__.py new file mode 100644 index 0000000..f51241e --- /dev/null +++ b/python/libs/mne/io/brainvision/__init__.py @@ -0,0 +1,8 @@ +"""BrainVision module for conversion to FIF.""" + +# Author: Teon Brooks +# Stefan Appelhoff +# +# License: BSD-3-Clause + +from .brainvision import read_raw_brainvision diff --git a/python/libs/mne/io/brainvision/brainvision.py b/python/libs/mne/io/brainvision/brainvision.py new file mode 100644 index 0000000..04119c8 --- /dev/null +++ b/python/libs/mne/io/brainvision/brainvision.py @@ -0,0 +1,993 @@ +# -*- coding: utf-8 -*- +"""Conversion tool from BrainVision EEG to FIF.""" +# Authors: Teon Brooks +# Christian Brodbeck +# Eric Larson +# Jona Sassenhagen +# Phillip Alday +# Okba Bekhelifi +# Stefan Appelhoff +# +# License: BSD-3-Clause + +import configparser +import os +import os.path as op +import re +from datetime import datetime, timezone +from io import StringIO + +import numpy as np + +from ...utils import verbose, logger, warn, fill_doc, _DefaultEventParser +from ..constants import FIFF +from ..meas_info import _empty_info +from ..base import BaseRaw +from ..utils import _read_segments_file, _mult_cal_one +from ...annotations import Annotations, read_annotations +from ...channels import make_dig_montage +from ...defaults import HEAD_SIZE_DEFAULT + + +@fill_doc +class RawBrainVision(BaseRaw): + """Raw object from Brain Vision EEG file. + + Parameters + ---------- + vhdr_fname : str + Path to the EEG header file. + eog : list or tuple + Names of channels or list of indices that should be designated + EOG channels. Values should correspond to the vhdr file. + Default is ``('HEOGL', 'HEOGR', 'VEOGb')``. + misc : list or tuple of str | 'auto' + Names of channels or list of indices that should be designated + MISC channels. Values should correspond to the electrodes + in the vhdr file. If 'auto', units in vhdr file are used for inferring + misc channels. Default is ``'auto'``. + scale : float + The scaling factor for EEG data. Unless specified otherwise by + header file, units are in microvolts. Default scale factor is 1. + %(preload)s + %(verbose)s + + Attributes + ---------- + impedances : dict + A dictionary of all electrodes and their impedances. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, vhdr_fname, + eog=('HEOGL', 'HEOGR', 'VEOGb'), misc='auto', + scale=1., preload=False, verbose=None): # noqa: D107 + # Channel info and events + logger.info('Extracting parameters from %s...' % vhdr_fname) + vhdr_fname = op.abspath(vhdr_fname) + (info, data_fname, fmt, order, n_samples, mrk_fname, montage, + orig_units) = _get_vhdr_info(vhdr_fname, eog, misc, scale) + + with open(data_fname, 'rb') as f: + if isinstance(fmt, dict): # ASCII, this will be slow :( + if order == 'F': # multiplexed, channels in columns + n_skip = 0 + for ii in range(int(fmt['skiplines'])): + n_skip += len(f.readline()) + offsets = np.cumsum([n_skip] + [len(line) for line in f]) + n_samples = len(offsets) - 1 + elif order == 'C': # vectorized, channels, in rows + raise NotImplementedError() + else: + n_data_ch = int(info['nchan']) + f.seek(0, os.SEEK_END) + n_samples = f.tell() + dtype_bytes = _fmt_byte_dict[fmt] + offsets = None + n_samples = n_samples // (dtype_bytes * n_data_ch) + + raw_extras = dict( + offsets=offsets, fmt=fmt, order=order, n_samples=n_samples) + super(RawBrainVision, self).__init__( + info, last_samps=[n_samples - 1], filenames=[data_fname], + orig_format=fmt, preload=preload, verbose=verbose, + raw_extras=[raw_extras], orig_units=orig_units) + + self.set_montage(montage) + + settings, cfg, cinfo, _ = _aux_vhdr_info(vhdr_fname) + split_settings = settings.splitlines() + self.impedances = _parse_impedance(split_settings, + self.info['meas_date']) + + # Get annotations from vmrk file + annots = read_annotations(mrk_fname, info['sfreq']) + self.set_annotations(annots) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + # read data + n_data_ch = self._raw_extras[fi]['orig_nchan'] + fmt = self._raw_extras[fi]['fmt'] + if self._raw_extras[fi]['order'] == 'C': + _read_segments_c(self, data, idx, fi, start, stop, cals, mult) + elif isinstance(fmt, str): + dtype = _fmt_dtype_dict[fmt] + _read_segments_file(self, data, idx, fi, start, stop, cals, mult, + dtype=dtype, n_channels=n_data_ch) + else: + offsets = self._raw_extras[fi]['offsets'] + with open(self._filenames[fi], 'rb') as fid: + fid.seek(offsets[start]) + block = np.empty((n_data_ch, stop - start)) + for ii in range(stop - start): + line = fid.readline().decode('ASCII') + line = line.strip() + + # Not sure why we special-handle the "," character here, + # but let's just keep this for historical and backward- + # compat reasons + if (isinstance(fmt, dict) and + 'decimalsymbol' in fmt and + fmt['decimalsymbol'] != '.'): + line = line.replace(',', '.') + + if ' ' in line: + line_data = line.split() + elif ',' in line: + # likely exported from BrainVision Analyzer? + line_data = line.split(',') + else: + raise RuntimeError( + 'Unknown BrainVision data format encountered. ' + 'Please contact the MNE-Python developers.' + ) + + block[:n_data_ch, ii] = [float(part) for part in line_data] + _mult_cal_one(data, block, idx, cals, mult) + + +def _read_segments_c(raw, data, idx, fi, start, stop, cals, mult): + """Read chunk of vectorized raw data.""" + n_samples = raw._raw_extras[fi]['n_samples'] + fmt = raw._raw_extras[fi]['fmt'] + dtype = _fmt_dtype_dict[fmt] + n_bytes = _fmt_byte_dict[fmt] + n_channels = raw._raw_extras[fi]['orig_nchan'] + block = np.zeros((n_channels, stop - start)) + with open(raw._filenames[fi], 'rb', buffering=0) as fid: + ids = np.arange(idx.start, idx.stop) if isinstance(idx, slice) else idx + for ch_id in ids: + fid.seek(start * n_bytes + ch_id * n_bytes * n_samples) + block[ch_id] = np.fromfile(fid, dtype, stop - start) + _mult_cal_one(data, block, idx, cals, mult) + + +def _read_vmrk(fname): + """Read annotations from a vmrk file. + + Parameters + ---------- + fname : str + vmrk file to be read. + + Returns + ------- + onset : array, shape (n_annots,) + The onsets in seconds. + duration : array, shape (n_annots,) + The onsets in seconds. + description : array, shape (n_annots,) + The description of each annotation. + date_str : str + The recording time as a string. Defaults to empty string if no + recording time is found. + """ + # read vmrk file + with open(fname, 'rb') as fid: + txt = fid.read() + + # we don't actually need to know the coding for the header line. + # the characters in it all belong to ASCII and are thus the + # same in Latin-1 and UTF-8 + header = txt.decode('ascii', 'ignore').split('\n')[0].strip() + _check_bv_version(header, 'marker') + + # although the markers themselves are guaranteed to be ASCII (they + # consist of numbers and a few reserved words), we should still + # decode the file properly here because other (currently unused) + # blocks, such as that the filename are specifying are not + # guaranteed to be ASCII. + + try: + # if there is an explicit codepage set, use it + # we pretend like it's ascii when searching for the codepage + cp_setting = re.search('Codepage=(.+)', + txt.decode('ascii', 'ignore'), + re.IGNORECASE & re.MULTILINE) + codepage = 'utf-8' + if cp_setting: + codepage = cp_setting.group(1).strip() + # BrainAmp Recorder also uses ANSI codepage + # an ANSI codepage raises a LookupError exception + # python recognize ANSI decoding as cp1252 + if codepage == 'ANSI': + codepage = 'cp1252' + txt = txt.decode(codepage) + except UnicodeDecodeError: + # if UTF-8 (new standard) or explicit codepage setting fails, + # fallback to Latin-1, which is Windows default and implicit + # standard in older recordings + txt = txt.decode('latin-1') + + # extract Marker Infos block + m = re.search(r"\[Marker Infos\]", txt, re.IGNORECASE) + if not m: + return np.array(list()), np.array(list()), np.array(list()), '' + + mk_txt = txt[m.end():] + m = re.search(r"^\[.*\]$", mk_txt) + if m: + mk_txt = mk_txt[:m.start()] + + # extract event information + items = re.findall(r"^Mk\d+=(.*)", mk_txt, re.MULTILINE) + onset, duration, description = list(), list(), list() + date_str = '' + for info in items: + info_data = info.split(',') + mtype, mdesc, this_onset, this_duration = info_data[:4] + # commas in mtype and mdesc are handled as "\1". convert back to comma + mtype = mtype.replace(r'\1', ',') + mdesc = mdesc.replace(r'\1', ',') + if date_str == '' and len(info_data) == 5 and mtype == 'New Segment': + # to handle the origin of time and handle the presence of multiple + # New Segment annotations. We only keep the first one that is + # different from an empty string for date_str. + date_str = info_data[-1] + + this_duration = (int(this_duration) + if this_duration.isdigit() else 0) + duration.append(this_duration) + onset.append(int(this_onset) - 1) # BV is 1-indexed, not 0-indexed + description.append(mtype + '/' + mdesc) + + return np.array(onset), np.array(duration), np.array(description), date_str + + +def _read_annotations_brainvision(fname, sfreq='auto'): + """Create Annotations from BrainVision vmrk. + + This function reads a .vmrk file and makes an + :class:`mne.Annotations` object. + + Parameters + ---------- + fname : str | object + The path to the .vmrk file. + sfreq : float | 'auto' + The sampling frequency in the file. It's necessary + as Annotations are expressed in seconds and vmrk + files are in samples. If set to 'auto' then + the sfreq is taken from the .vhdr file that + has the same name (without file extension). So + data.vmrk looks for sfreq in data.vhdr. + + Returns + ------- + annotations : instance of Annotations + The annotations present in the file. + """ + onset, duration, description, date_str = _read_vmrk(fname) + orig_time = _str_to_meas_date(date_str) + + if sfreq == 'auto': + vhdr_fname = op.splitext(fname)[0] + '.vhdr' + logger.info("Finding 'sfreq' from header file: %s" % vhdr_fname) + _, _, _, info = _aux_vhdr_info(vhdr_fname) + sfreq = info['sfreq'] + + onset = np.array(onset, dtype=float) / sfreq + duration = np.array(duration, dtype=float) / sfreq + annotations = Annotations(onset=onset, duration=duration, + description=description, + orig_time=orig_time) + return annotations + + +def _check_bv_version(header, kind): + """Check the header version.""" + _data_err = """\ + MNE-Python currently only supports %s versions 1.0 and 2.0, got unparsable\ + %r. Contact MNE-Python developers for support.""" + # optional space, optional Core, Version/Header, optional comma, 1/2 + _data_re = r'Brain ?Vision( Core)? Data Exchange %s File,? Version %s\.0' + + assert kind in ('header', 'marker') + + if header == '': + warn(f'Missing header in {kind} file.') + for version in range(1, 3): + this_re = _data_re % (kind.capitalize(), version) + if re.search(this_re, header) is not None: + return version + else: + warn(_data_err % (kind, header)) + + +_orientation_dict = dict(MULTIPLEXED='F', VECTORIZED='C') +_fmt_dict = dict(INT_16='short', INT_32='int', IEEE_FLOAT_32='single') +_fmt_byte_dict = dict(short=2, int=4, single=4) +_fmt_dtype_dict = dict(short=' 0: + misc += to_misc + warn('No coordinate information found for channels {}. ' + 'Setting channel types to misc. To avoid this warning, set ' + 'channel types explicitly.'.format(to_misc)) + + if np.isnan(cals).any(): + raise RuntimeError('Missing channel units') + + # Attempts to extract filtering info from header. If not found, both are + # set to zero. + settings = settings.splitlines() + idx = None + + if 'Channels' in settings: + idx = settings.index('Channels') + settings = settings[idx + 1:] + hp_col, lp_col = 4, 5 + for idx, setting in enumerate(settings): + if re.match(r'#\s+Name', setting): + break + else: + idx = None + + # If software filters are active, then they override the hardware setup + # But we still want to be able to double check the channel names + # for alignment purposes, we keep track of the hardware setting idx + idx_amp = idx + filter_list_has_ch_name = True + + if 'S o f t w a r e F i l t e r s' in settings: + idx = settings.index('S o f t w a r e F i l t e r s') + for idx, setting in enumerate(settings[idx + 1:], idx + 1): + if re.match(r'#\s+Low Cutoff', setting): + hp_col, lp_col = 1, 2 + filter_list_has_ch_name = False + warn('Online software filter detected. Using software ' + 'filter settings and ignoring hardware values') + break + else: + idx = idx_amp + + if idx: + lowpass = [] + highpass = [] + + # for newer BV files, the unit is specified for every channel + # separated by a single space, while for older files, the unit is + # specified in the column headers + divider = r'\s+' + if 'Resolution / Unit' in settings[idx]: + shift = 1 # shift for unit + else: + shift = 0 + + # Extract filter units and convert from seconds to Hz if necessary. + # this cannot be done as post-processing as the inverse t-f + # relationship means that the min/max comparisons don't make sense + # unless we know the units. + # + # For reasoning about the s to Hz conversion, see this reference: + # `Ebersole, J. S., & Pedley, T. A. (Eds.). (2003). + # Current practice of clinical electroencephalography. + # Lippincott Williams & Wilkins.`, page 40-41 + header = re.split(r'\s\s+', settings[idx]) + hp_s = '[s]' in header[hp_col] + lp_s = '[s]' in header[lp_col] + + for i, ch in enumerate(ch_names, 1): + # double check alignment with channel by using the hw settings + if idx == idx_amp: + line_amp = settings[idx + i] + else: + line_amp = settings[idx_amp + i] + assert line_amp.find(ch) > -1 + + # Correct shift for channel names with spaces + # Header already gives 1 therefore has to be subtracted + if filter_list_has_ch_name: + ch_name_parts = re.split(divider, ch) + real_shift = shift + len(ch_name_parts) - 1 + else: + real_shift = shift + + line = re.split(divider, settings[idx + i]) + highpass.append(line[hp_col + real_shift]) + lowpass.append(line[lp_col + real_shift]) + + if len(highpass) == 0: + pass + elif len(set(highpass)) == 1: + if highpass[0] in ('NaN', 'Off'): + pass # Placeholder for future use. Highpass set in _empty_info + elif highpass[0] == 'DC': + info['highpass'] = 0. + else: + info['highpass'] = float(highpass[0]) + if hp_s: + # filter time constant t [secs] to Hz conversion: 1/2*pi*t + info['highpass'] = 1. / (2 * np.pi * info['highpass']) + + else: + heterogeneous_hp_filter = True + if hp_s: + # We convert channels with disabled filters to having + # highpass relaxed / no filters + highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC') + else np.Inf for filt in highpass] + info['highpass'] = np.max(np.array(highpass, dtype=np.float64)) + # Coveniently enough 1 / np.Inf = 0.0, so this works for + # DC / no highpass filter + # filter time constant t [secs] to Hz conversion: 1/2*pi*t + info['highpass'] = 1. / (2 * np.pi * info['highpass']) + + # not exactly the cleanest use of FP, but this makes us + # more conservative in *not* warning. + if info['highpass'] == 0.0 and len(set(highpass)) == 1: + # not actually heterogeneous in effect + # ... just heterogeneously disabled + heterogeneous_hp_filter = False + else: + highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC') + else 0.0 for filt in highpass] + info['highpass'] = np.min(np.array(highpass, dtype=np.float64)) + if info['highpass'] == 0.0 and len(set(highpass)) == 1: + # not actually heterogeneous in effect + # ... just heterogeneously disabled + heterogeneous_hp_filter = False + + if heterogeneous_hp_filter: + warn('Channels contain different highpass filters. ' + 'Lowest (weakest) filter setting (%0.2f Hz) ' + 'will be stored.' % info['highpass']) + + if len(lowpass) == 0: + pass + elif len(set(lowpass)) == 1: + if lowpass[0] in ('NaN', 'Off'): + pass # Placeholder for future use. Lowpass set in _empty_info + else: + info['lowpass'] = float(lowpass[0]) + if lp_s: + # filter time constant t [secs] to Hz conversion: 1/2*pi*t + info['lowpass'] = 1. / (2 * np.pi * info['lowpass']) + + else: + heterogeneous_lp_filter = True + if lp_s: + # We convert channels with disabled filters to having + # infinitely relaxed / no filters + lowpass = [float(filt) if filt not in ('NaN', 'Off') + else 0.0 for filt in lowpass] + info['lowpass'] = np.min(np.array(lowpass, dtype=np.float64)) + try: + # filter time constant t [secs] to Hz conversion: 1/2*pi*t + info['lowpass'] = 1. / (2 * np.pi * info['lowpass']) + + except ZeroDivisionError: + if len(set(lowpass)) == 1: + # No lowpass actually set for the weakest setting + # so we set lowpass to the Nyquist frequency + info['lowpass'] = info['sfreq'] / 2. + # not actually heterogeneous in effect + # ... just heterogeneously disabled + heterogeneous_lp_filter = False + else: + # no lowpass filter is the weakest filter, + # but it wasn't the only filter + pass + else: + # We convert channels with disabled filters to having + # infinitely relaxed / no filters + lowpass = [float(filt) if filt not in ('NaN', 'Off') + else np.Inf for filt in lowpass] + info['lowpass'] = np.max(np.array(lowpass, dtype=np.float64)) + + if np.isinf(info['lowpass']): + # No lowpass actually set for the weakest setting + # so we set lowpass to the Nyquist frequency + info['lowpass'] = info['sfreq'] / 2. + if len(set(lowpass)) == 1: + # not actually heterogeneous in effect + # ... just heterogeneously disabled + heterogeneous_lp_filter = False + + if heterogeneous_lp_filter: + # this isn't clean FP, but then again, we only want to provide + # the Nyquist hint when the lowpass filter was actually + # calculated from dividing the sampling frequency by 2, so the + # exact/direct comparison (instead of tolerance) makes sense + if info['lowpass'] == info['sfreq'] / 2.0: + nyquist = ', Nyquist limit' + else: + nyquist = "" + warn('Channels contain different lowpass filters. ' + 'Highest (weakest) filter setting (%0.2f Hz%s) ' + 'will be stored.' % (info['lowpass'], nyquist)) + + # Creates a list of dicts of eeg channels for raw.info + logger.info('Setting channel info structure...') + info['chs'] = [] + for idx, ch_name in enumerate(ch_names): + if ch_name in eog or idx in eog or idx - nchan in eog: + kind = FIFF.FIFFV_EOG_CH + coil_type = FIFF.FIFFV_COIL_NONE + unit = FIFF.FIFF_UNIT_V + elif ch_name in misc or idx in misc or idx - nchan in misc: + kind = FIFF.FIFFV_MISC_CH + coil_type = FIFF.FIFFV_COIL_NONE + if ch_name in misc_chs: + unit = misc_chs[ch_name] + else: + unit = FIFF.FIFF_UNIT_NONE + elif ch_name == 'STI 014': + kind = FIFF.FIFFV_STIM_CH + coil_type = FIFF.FIFFV_COIL_NONE + unit = FIFF.FIFF_UNIT_NONE + else: + kind = FIFF.FIFFV_EEG_CH + coil_type = FIFF.FIFFV_COIL_EEG + unit = FIFF.FIFF_UNIT_V + info['chs'].append(dict( + ch_name=ch_name, coil_type=coil_type, kind=kind, logno=idx + 1, + scanno=idx + 1, cal=cals[idx], range=ranges[idx], + loc=np.full(12, np.nan), + unit=unit, unit_mul=FIFF.FIFF_UNITM_NONE, + coord_frame=FIFF.FIFFV_COORD_HEAD)) + + info._unlocked = False + info._update_redundant() + return (info, data_fname, fmt, order, n_samples, mrk_fname, montage, + orig_units) + + +@fill_doc +def read_raw_brainvision(vhdr_fname, + eog=('HEOGL', 'HEOGR', 'VEOGb'), misc='auto', + scale=1., preload=False, verbose=None): + """Reader for Brain Vision EEG file. + + Parameters + ---------- + vhdr_fname : str + Path to the EEG header file. + eog : list or tuple of str + Names of channels or list of indices that should be designated + EOG channels. Values should correspond to the vhdr file + Default is ``('HEOGL', 'HEOGR', 'VEOGb')``. + misc : list or tuple of str | 'auto' + Names of channels or list of indices that should be designated + MISC channels. Values should correspond to the electrodes + in the vhdr file. If 'auto', units in vhdr file are used for inferring + misc channels. Default is ``'auto'``. + scale : float + The scaling factor for EEG data. Unless specified otherwise by + header file, units are in microvolts. Default scale factor is 1. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawBrainVision + A Raw object containing BrainVision data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawBrainVision(vhdr_fname=vhdr_fname, eog=eog, + misc=misc, scale=scale, preload=preload, + verbose=verbose) + + +_BV_EVENT_IO_OFFSETS = {'Event/': 0, 'Stimulus/S': 0, 'Response/R': 1000, + 'Optic/O': 2000} +_OTHER_ACCEPTED_MARKERS = { + 'New Segment/': 99999, 'SyncStatus/Sync On': 99998 +} +_OTHER_OFFSET = 10001 # where to start "unknown" event_ids + + +class _BVEventParser(_DefaultEventParser): + """Parse standard brainvision events, accounting for non-standard ones.""" + + def __call__(self, description): + """Parse BrainVision event codes (like `Stimulus/S 11`) to ints.""" + offsets = _BV_EVENT_IO_OFFSETS + + maybe_digit = description[-3:].strip() + kind = description[:-3] + if maybe_digit.isdigit() and kind in offsets: + code = int(maybe_digit) + offsets[kind] + elif description in _OTHER_ACCEPTED_MARKERS: + code = _OTHER_ACCEPTED_MARKERS[description] + else: + code = (super(_BVEventParser, self) + .__call__(description, offset=_OTHER_OFFSET)) + return code + + +def _check_bv_annot(descriptions): + markers_basename = set([dd.rstrip('0123456789 ') for dd in descriptions]) + bv_markers = (set(_BV_EVENT_IO_OFFSETS.keys()) + .union(set(_OTHER_ACCEPTED_MARKERS.keys()))) + return len(markers_basename - bv_markers) == 0 + + +def _parse_impedance(settings, recording_date=None): + """Parse impedances from the header file. + + Parameters + ---------- + settings : list + The header settings lines fom the VHDR file. + recording_date : datetime.datetime | None + The date of the recording as extracted from the VMRK file. + + Returns + ------- + impedances : dict + A dictionary of all electrodes and their impedances. + """ + ranges = _parse_impedance_ranges(settings) + impedance_setting_lines = [i for i in settings if + i.startswith('Impedance [') and + i.endswith(' :')] + impedances = dict() + if len(impedance_setting_lines) > 0: + idx = settings.index(impedance_setting_lines[0]) + impedance_setting = impedance_setting_lines[0].split() + impedance_unit = impedance_setting[1].lstrip('[').rstrip(']') + impedance_time = None + + # If we have a recording date, we can update it with the time of + # impedance measurement + if recording_date is not None: + meas_time = [int(i) for i in impedance_setting[3].split(':')] + impedance_time = recording_date.replace(hour=meas_time[0], + minute=meas_time[1], + second=meas_time[2], + microsecond=0) + for setting in settings[idx + 1:]: + # Parse channel impedances until we find a line that doesn't start + # with a channel name and optional +/- polarity for passive elecs + match = re.match(r'[ a-zA-Z0-9_+-]+:', setting) + if match: + channel_name = match.group().rstrip(':') + channel_imp_line = setting.split() + imp_as_number = re.findall(r"[-+]?\d*\.\d+|\d+", + channel_imp_line[-1]) + channel_impedance = dict( + imp=float(imp_as_number[0]) if imp_as_number else np.nan, + imp_unit=impedance_unit, + ) + if impedance_time is not None: + channel_impedance.update({'imp_meas_time': impedance_time}) + + if channel_name == 'Ref' and 'Reference' in ranges: + channel_impedance.update(ranges['Reference']) + elif channel_name == 'Gnd' and 'Ground' in ranges: + channel_impedance.update(ranges['Ground']) + elif 'Data' in ranges: + channel_impedance.update(ranges['Data']) + impedances[channel_name] = channel_impedance + else: + break + return impedances + + +def _parse_impedance_ranges(settings): + """Parse the selected electrode impedance ranges from the header. + + Parameters + ---------- + settings : list + The header settings lines fom the VHDR file. + + Returns + ------- + electrode_imp_ranges : dict + A dictionary of impedance ranges for each type of electrode. + """ + impedance_ranges = [item for item in settings if + "Selected Impedance Measurement Range" in item] + electrode_imp_ranges = dict() + if impedance_ranges: + if len(impedance_ranges) == 1: + img_range = impedance_ranges[0].split() + for electrode_type in ['Data', 'Reference', 'Ground']: + electrode_imp_ranges[electrode_type] = { + "imp_lower_bound": float(img_range[-4]), + "imp_upper_bound": float(img_range[-2]), + "imp_range_unit": img_range[-1] + } + else: + for electrode_range in impedance_ranges: + electrode_range = electrode_range.split() + electrode_imp_ranges[electrode_range[0]] = { + "imp_lower_bound": float(electrode_range[6]), + "imp_upper_bound": float(electrode_range[8]), + "imp_range_unit": electrode_range[9] + } + return electrode_imp_ranges diff --git a/python/libs/mne/io/brainvision/tests/__init__.py b/python/libs/mne/io/brainvision/tests/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/python/libs/mne/io/brainvision/tests/__init__.py @@ -0,0 +1 @@ + diff --git a/python/libs/mne/io/brainvision/tests/test_brainvision.py b/python/libs/mne/io/brainvision/tests/test_brainvision.py new file mode 100644 index 0000000..22a9409 --- /dev/null +++ b/python/libs/mne/io/brainvision/tests/test_brainvision.py @@ -0,0 +1,794 @@ +# -*- coding: utf-8 -*- +"""Test reading of BrainVision format.""" +# Author: Teon Brooks +# Stefan Appelhoff +# +# License: BSD-3-Clause +import os.path as op +import re +import shutil + +import numpy as np +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_allclose, assert_equal) +import pytest + +import datetime +from mne.utils import _stamp_to_dt, object_diff +from mne import pick_types, read_annotations, concatenate_raws +from mne.io.constants import FIFF +from mne.io import read_raw_fif, read_raw_brainvision +from mne.io.tests.test_raw import _test_raw_reader +from mne.datasets import testing +from mne.annotations import events_from_annotations + +data_dir = op.join(op.dirname(__file__), 'data') +vhdr_path = op.join(data_dir, 'test.vhdr') +vmrk_path = op.join(data_dir, 'test.vmrk') +eeg_path = op.join(data_dir, 'test.eeg') + +vhdr_partially_disabled_hw_filter_path = op.join(data_dir, + 'test_partially_disabled' + '_hw_filter.vhdr') + +vhdr_old_path = op.join( + data_dir, 'test_old_layout_latin1_software_filter.vhdr') +vhdr_old_longname_path = op.join( + data_dir, 'test_old_layout_latin1_software_filter_longname.vhdr') + +vhdr_v2_path = op.join(data_dir, 'testv2.vhdr') + +vhdr_highpass_path = op.join(data_dir, 'test_highpass.vhdr') +vhdr_mixed_highpass_path = op.join(data_dir, 'test_mixed_highpass.vhdr') +vhdr_highpass_hz_path = op.join(data_dir, 'test_highpass_hz.vhdr') +vhdr_mixed_highpass_hz_path = op.join(data_dir, 'test_mixed_highpass_hz.vhdr') + +# Not a typo: we can reuse the highpass file for the lowpass (Hz) test +vhdr_lowpass_path = op.join(data_dir, 'test_highpass.vhdr') +vhdr_mixed_lowpass_path = op.join(data_dir, 'test_mixed_lowpass.vhdr') +vhdr_lowpass_s_path = op.join(data_dir, 'test_lowpass_s.vhdr') +vhdr_mixed_lowpass_s_path = op.join(data_dir, 'test_mixed_lowpass_s.vhdr') + +# VHDR exported with neuroone +data_path = testing.data_path(download=False) +neuroone_vhdr = op.join(data_path, 'Brainvision', 'test_NO.vhdr') + +# Test for nanovolts as unit +vhdr_units_path = op.join(data_dir, 'test_units.vhdr') + +# Test bad date +vhdr_bad_date = op.join(data_dir, 'test_bad_date.vhdr') + +eeg_bin = op.join(data_dir, 'test_bin_raw.fif') +eog = ['HL', 'HR', 'Vb'] + +# XXX: BUG we cannot parse test.hpts FastSCAN file to create a DigMontage +# (plus I've removed montage from all the read_raw_brainvision and nothing +# broke, so we were not testing that set_montage in brainvision was +# working) +# This should be amend in its own PR. +montage = op.join(data_dir, 'test.hpts') + + +def test_orig_units(recwarn): + """Test exposure of original channel units.""" + raw = read_raw_brainvision(vhdr_path) + orig_units = raw._orig_units + assert len(orig_units) == 32 + assert orig_units['FP1'] == 'µV' + + # no unit specified in the vhdr, ensure we default to µV here + assert orig_units['FP2'] == 'µV' + assert orig_units['F3'] == 'µV' + + sum([v == 'µV' for v in orig_units.values()]) == 26 + + assert orig_units['CP5'] == 'n/a' # originally BS, not a valid unit + assert orig_units['CP6'] == 'µS' + assert orig_units['HL'] == 'n/a' # originally ARU, not a valid unit + assert orig_units['HR'] == 'n/a' # originally uS ... + assert orig_units['Vb'] == 'S' + assert orig_units['ReRef'] == 'C' + + +DATE_TEST_CASES = np.array([ + ('Mk1=New Segment,,1,1,0,20131113161403794232\n', # content + [1384359243, 794232], # meas_date internal representation + '2013-11-13 16:14:03 UTC'), # meas_date representation + + (('Mk1=New Segment,,1,1,0,20070716122240937454\n' + 'Mk2=New Segment,,2,1,0,20070716122240937455\n'), + [1184588560, 937454], + '2007-07-16 12:22:40 UTC'), + + ('Mk1=New Segment,,1,1,0,\nMk2=New Segment,,2,1,0,20070716122240937454\n', + [1184588560, 937454], + '2007-07-16 12:22:40 UTC'), + + ('Mk1=STATUS,,1,1,0\n', None, 'unspecified'), + ('Mk1=New Segment,,1,1,0,\n', None, 'unspecified'), + ('Mk1=New Segment,,1,1,0\n', None, 'unspecified'), + ('Mk1=New Segment,,1,1,0,00000000000304125000', None, 'unspecified'), + +], dtype=np.dtype({ + 'names': ['content', 'meas_date', 'meas_date_repr'], + 'formats': [object, object, 'U22'] +})) + + +@pytest.fixture(scope='session') +def _mocked_meas_date_data(tmp_path_factory): + """Prepare files for mocked_meas_date_file fixture.""" + # Prepare the files + tmp_path = str(tmp_path_factory.mktemp('brainvision_mocked_meas_date')) + vhdr_fname, vmrk_fname, eeg_fname = [ + op.join(tmp_path, op.basename(ff)) + for ff in [vhdr_path, vmrk_path, eeg_path] + ] + for orig, dest in zip([vhdr_path, eeg_path], [vhdr_fname, eeg_fname]): + shutil.copyfile(orig, dest) + + # Get the marker information + with open(vmrk_path, 'r') as fin: + lines = fin.readlines() + + return vhdr_fname, vmrk_fname, lines + + +@pytest.fixture(scope='session', params=[tt for tt in DATE_TEST_CASES]) +def mocked_meas_date_file(_mocked_meas_date_data, request): + """Prepare a generator for use in test_meas_date.""" + MEAS_DATE_LINE = 11 # see test.vmrk file + vhdr_fname, vmrk_fname, lines = _mocked_meas_date_data + + lines[MEAS_DATE_LINE] = request.param['content'] + with open(vmrk_fname, 'w') as fout: + fout.writelines(lines) + meas_date = request.param['meas_date'] + if meas_date is not None: + meas_date = _stamp_to_dt(meas_date) + + yield vhdr_fname, meas_date, request.param['meas_date_repr'] + + +def test_meas_date(mocked_meas_date_file): + """Test successful extraction of measurement date.""" + vhdr_f, expected_meas, expected_meas_repr = mocked_meas_date_file + raw = read_raw_brainvision(vhdr_f) + assert expected_meas_repr in repr(raw.info) + if expected_meas is None: + assert raw.info['meas_date'] is None + else: + assert raw.info['meas_date'] == expected_meas + + +def test_vhdr_codepage_ansi(tmp_path): + """Test BV reading with ANSI codepage.""" + raw_init = read_raw_brainvision(vhdr_path) + data_expected, times_expected = raw_init[:] + tempdir = str(tmp_path) + ansi_vhdr_path = op.join(tempdir, op.split(vhdr_path)[-1]) + ansi_vmrk_path = op.join(tempdir, op.split(vmrk_path)[-1]) + ansi_eeg_path = op.join(tempdir, op.split(eeg_path)[-1]) + # copy data file + shutil.copy(eeg_path, ansi_eeg_path) + # modify header file + with open(ansi_vhdr_path, 'wb') as fout: + with open(vhdr_path, 'rb') as fin: + for line in fin: + # Common Infos section + if line.startswith(b'Codepage'): + line = b'Codepage=ANSI\n' + fout.write(line) + # modify marker file + with open(ansi_vmrk_path, 'wb') as fout: + with open(vmrk_path, 'rb') as fin: + for line in fin: + # Common Infos section + if line.startswith(b'Codepage'): + line = b'Codepage=ANSI\n' + fout.write(line) + + raw = read_raw_brainvision(ansi_vhdr_path) + data_new, times_new = raw[:] + + assert_equal(raw_init.ch_names, raw.ch_names) + assert_allclose(data_new, data_expected, atol=1e-15) + assert_allclose(times_new, times_expected, atol=1e-15) + + +@pytest.mark.parametrize('header', [ + b'BrainVision Data Exchange %s File Version 1.0\n', + # 2.0, space, core, comma + b'Brain Vision Core Data Exchange %s File, Version 2.0\n', + # unsupported version + b'Brain Vision Core Data Exchange %s File, Version 3.0\n', + # missing header + b'\n', +]) +def test_vhdr_versions(tmp_path, header): + """Test BV reading with different header variants.""" + raw_init = read_raw_brainvision(vhdr_path) + data_expected, times_expected = raw_init[:] + use_vhdr_path = op.join(tmp_path, op.split(vhdr_path)[-1]) + use_vmrk_path = op.join(tmp_path, op.split(vmrk_path)[-1]) + use_eeg_path = op.join(tmp_path, op.split(eeg_path)[-1]) + shutil.copy(eeg_path, use_eeg_path) + with open(use_vhdr_path, 'wb') as fout: + with open(vhdr_path, 'rb') as fin: + for line in fin: + # Common Infos section + if line.startswith(b'Brain'): + if header != b'\n': + line = header % b'Header' + else: + line = header + fout.write(line) + with open(use_vmrk_path, 'wb') as fout: + with open(vmrk_path, 'rb') as fin: + for line in fin: + # Common Infos section + if line.startswith(b'Brain'): + if header != b'\n': + line = header % b'Marker' + else: + line = header + fout.write(line) + + if (b'3.0' in header): # unsupported version + with pytest.warns(RuntimeWarning, match=r'3\.0.*Contact MNE-Python'): + read_raw_brainvision(use_vhdr_path) + return + elif header == b'\n': # no version header + with pytest.warns(RuntimeWarning, match='Missing header'): + read_raw_brainvision(use_vhdr_path) + return + else: + raw = read_raw_brainvision(use_vhdr_path) + data_new, _ = raw[:] + assert_allclose(data_new, data_expected, atol=1e-15) + + +@pytest.mark.parametrize('data_sep', (b' ', b',', b'+')) +def test_ascii(tmp_path, data_sep): + """Test ASCII BV reading.""" + raw = read_raw_brainvision(vhdr_path) + ascii_vhdr_path = op.join(tmp_path, op.split(vhdr_path)[-1]) + # copy marker file + shutil.copy(vhdr_path.replace('.vhdr', '.vmrk'), + ascii_vhdr_path.replace('.vhdr', '.vmrk')) + # modify header file + skipping = False + with open(ascii_vhdr_path, 'wb') as fout: + with open(vhdr_path, 'rb') as fin: + for line in fin: + # Common Infos section + if line.startswith(b'DataFormat'): + line = b'DataFormat=ASCII\n' + elif line.startswith(b'DataFile='): + line = b'DataFile=test.dat\n' + # Replace the "'Binary Infos'" section + elif line.startswith(b'[Binary Infos]'): + skipping = True + fout.write(b'[ASCII Infos]\nDecimalSymbol=.\nSkipLines=1\n' + b'SkipColumns=0\n\n') + elif skipping and line.startswith(b'['): + skipping = False + if not skipping: + fout.write(line) + # create the .dat file + data, times = raw[:] + with open(ascii_vhdr_path.replace('.vhdr', '.dat'), 'wb') as fid: + fid.write(data_sep.join(ch_name.encode('ASCII') + for ch_name in raw.ch_names) + b'\n') + fid.write(b'\n'.join(b' '.join(b'%.3f' % dd for dd in d) + for d in data.T / raw._cals)) + + if data_sep == b';': + with pytest.raises(RuntimeError, match='Unknown.*data format'): + read_raw_brainvision(ascii_vhdr_path) + return + + raw = read_raw_brainvision(ascii_vhdr_path) + data_new, times_new = raw[:] + assert_allclose(data_new, data, atol=1e-15) + assert_allclose(times_new, times) + + +def test_ch_names_comma(tmp_path): + """Test that channel names containing commas are properly read.""" + # commas in BV are encoded as \1 + replace_dict = { + r"^Ch4=F4,": r"Ch4=F4\\1foo,", + r"^4\s\s\s\s\sF4": "4 F4,foo ", + } + + # Copy existing vhdr file to tmp_path and manipulate to contain + # a channel with comma + for src, dest in zip((vhdr_path, vmrk_path, eeg_path), + ('test.vhdr', 'test.vmrk', 'test.eeg')): + shutil.copyfile(src, tmp_path / dest) + + comma_vhdr = tmp_path / 'test.vhdr' + with open(comma_vhdr, 'r') as fin: + lines = fin.readlines() + + new_lines = [] + nperformed_replacements = 0 + for line in lines: + for to_replace, replacement in replace_dict.items(): + match = re.search(to_replace, line) + if match is not None: + new = re.sub(to_replace, replacement, line) + new_lines.append(new) + nperformed_replacements += 1 + break + else: + new_lines.append(line) + assert nperformed_replacements == len(replace_dict) + + with open(comma_vhdr, 'w') as fout: + fout.writelines(new_lines) + + # Read the line containing a "comma channel name" + raw = read_raw_brainvision(comma_vhdr) + assert "F4,foo" in raw.ch_names + + +def test_brainvision_data_highpass_filters(): + """Test reading raw Brain Vision files with amplifier filter settings.""" + # Homogeneous highpass in seconds (default measurement unit) + raw = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_highpass_path, eog=eog + ) + + assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) + assert_equal(raw.info['lowpass'], 250.) + + # Heterogeneous highpass in seconds (default measurement unit) + with pytest.warns(RuntimeWarning, match='different .*pass filters') as w: + raw = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_mixed_highpass_path, + eog=eog) + + lowpass_warning = ['different lowpass filters' in str(ww.message) + for ww in w] + highpass_warning = ['different highpass filters' in str(ww.message) + for ww in w] + + expected_warnings = zip(lowpass_warning, highpass_warning) + + assert (all(any([lp, hp]) for lp, hp in expected_warnings)) + + assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) + assert_equal(raw.info['lowpass'], 250.) + + # Homogeneous highpass in Hertz + raw = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_highpass_hz_path, + eog=eog) + + assert_equal(raw.info['highpass'], 10.) + assert_equal(raw.info['lowpass'], 250.) + + # Heterogeneous highpass in Hertz + with pytest.warns(RuntimeWarning, match='different .*pass filters') as w: + raw = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_mixed_highpass_hz_path, + eog=eog) + + trigger_warning = ['will be dropped' in str(ww.message) + for ww in w] + lowpass_warning = ['different lowpass filters' in str(ww.message) + for ww in w] + highpass_warning = ['different highpass filters' in str(ww.message) + for ww in w] + + expected_warnings = zip(trigger_warning, lowpass_warning, highpass_warning) + + assert (all(any([trg, lp, hp]) for trg, lp, hp in expected_warnings)) + + assert_equal(raw.info['highpass'], 5.) + assert_equal(raw.info['lowpass'], 250.) + + +def test_brainvision_data_lowpass_filters(): + """Test files with amplifier LP filter settings.""" + # Homogeneous lowpass in Hertz (default measurement unit) + raw = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_lowpass_path, eog=eog + ) + + assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) + assert_equal(raw.info['lowpass'], 250.) + + # Heterogeneous lowpass in Hertz (default measurement unit) + with pytest.warns(RuntimeWarning) as w: # event parsing + raw = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_mixed_lowpass_path, eog=eog + ) + + lowpass_warning = ['different lowpass filters' in str(ww.message) + for ww in w] + highpass_warning = ['different highpass filters' in str(ww.message) + for ww in w] + + expected_warnings = zip(lowpass_warning, highpass_warning) + + assert (all(any([lp, hp]) for lp, hp in expected_warnings)) + + assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) + assert_equal(raw.info['lowpass'], 250.) + + # Homogeneous lowpass in seconds + raw = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_lowpass_s_path, eog=eog + ) + + assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) + assert_equal(raw.info['lowpass'], 1. / (2 * np.pi * 0.004)) + + # Heterogeneous lowpass in seconds + with pytest.warns(RuntimeWarning) as w: # filter settings + raw = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_mixed_lowpass_s_path, eog=eog + ) + + lowpass_warning = ['different lowpass filters' in str(ww.message) + for ww in w] + highpass_warning = ['different highpass filters' in str(ww.message) + for ww in w] + + expected_warnings = zip(lowpass_warning, highpass_warning) + + assert (all(any([lp, hp]) for lp, hp in expected_warnings)) + + assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) + assert_equal(raw.info['lowpass'], 1. / (2 * np.pi * 0.004)) + + +def test_brainvision_data_partially_disabled_hw_filters(): + """Test heterogeneous filter settings including non-numeric values.""" + with pytest.warns(RuntimeWarning) as w: # event parsing + raw = _test_raw_reader( + read_raw_brainvision, + vhdr_fname=vhdr_partially_disabled_hw_filter_path, eog=eog + ) + + trigger_warning = ['will be dropped' in str(ww.message) + for ww in w] + lowpass_warning = ['different lowpass filters' in str(ww.message) + for ww in w] + highpass_warning = ['different highpass filters' in str(ww.message) + for ww in w] + + expected_warnings = zip(trigger_warning, lowpass_warning, highpass_warning) + + assert (all(any([trg, lp, hp]) for trg, lp, hp in expected_warnings)) + + assert_equal(raw.info['highpass'], 0.) + assert_equal(raw.info['lowpass'], 500.) + + +def test_brainvision_data_software_filters_latin1_global_units(): + """Test reading raw Brain Vision files.""" + with pytest.warns(RuntimeWarning, match='software filter'): + raw = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_old_path, + eog=("VEOGo", "VEOGu", "HEOGli", "HEOGre"), misc=("A2",)) + + assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 0.9)) + assert_equal(raw.info['lowpass'], 50.) + + # test sensor name with spaces (#9299) + with pytest.warns(RuntimeWarning, match='software filter'): + raw = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_old_longname_path, + eog=("VEOGo", "VEOGu", "HEOGli", "HEOGre"), misc=("A2",)) + + assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 0.9)) + assert_equal(raw.info['lowpass'], 50.) + + +def test_brainvision_data(): + """Test reading raw Brain Vision files.""" + pytest.raises(IOError, read_raw_brainvision, vmrk_path) + pytest.raises(ValueError, read_raw_brainvision, vhdr_path, + preload=True, scale="foo") + + raw_py = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_path, eog=eog, misc='auto' + ) + + assert ('RawBrainVision' in repr(raw_py)) + + assert_equal(raw_py.info['highpass'], 0.) + assert_equal(raw_py.info['lowpass'], 250.) + + picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads') + data_py, times_py = raw_py[picks] + + # compare with a file that was generated using MNE-C + raw_bin = read_raw_fif(eeg_bin, preload=True) + picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads') + data_bin, times_bin = raw_bin[picks] + + assert_array_almost_equal(data_py, data_bin) + assert_array_almost_equal(times_py, times_bin) + + # Make sure EOG channels are marked correctly + for ch in raw_py.info['chs']: + if ch['ch_name'] in eog: + assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH) + elif ch['ch_name'] == 'STI 014': + assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH) + elif ch['ch_name'] in ('CP5', 'CP6'): + assert_equal(ch['kind'], FIFF.FIFFV_MISC_CH) + assert_equal(ch['unit'], FIFF.FIFF_UNIT_NONE) + elif ch['ch_name'] == 'ReRef': + assert_equal(ch['kind'], FIFF.FIFFV_MISC_CH) + assert_equal(ch['unit'], FIFF.FIFF_UNIT_CEL) + elif ch['ch_name'] in raw_py.info['ch_names']: + assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH) + assert_equal(ch['unit'], FIFF.FIFF_UNIT_V) + else: + raise RuntimeError("Unknown Channel: %s" % ch['ch_name']) + + # test loading v2 + read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True, + verbose='error') + # test different units with alternative header file + raw_units = _test_raw_reader( + read_raw_brainvision, vhdr_fname=vhdr_units_path, eog=eog, misc='auto' + ) + assert_equal(raw_units.info['chs'][0]['ch_name'], 'FP1') + assert_equal(raw_units.info['chs'][0]['kind'], FIFF.FIFFV_EEG_CH) + data_units, _ = raw_units[0] + assert_array_almost_equal(data_py[0, :], data_units.squeeze()) + + assert_equal(raw_units.info['chs'][1]['ch_name'], 'FP2') + assert_equal(raw_units.info['chs'][1]['kind'], FIFF.FIFFV_EEG_CH) + data_units, _ = raw_units[1] + assert_array_almost_equal(data_py[1, :], data_units.squeeze()) + + assert_equal(raw_units.info['chs'][2]['ch_name'], 'F3') + assert_equal(raw_units.info['chs'][2]['kind'], FIFF.FIFFV_EEG_CH) + data_units, _ = raw_units[2] + assert_array_almost_equal(data_py[2, :], data_units.squeeze()) + + +def test_brainvision_vectorized_data(): + """Test reading BrainVision data files with vectorized data.""" + with pytest.warns(RuntimeWarning, match='software filter'): + raw = read_raw_brainvision(vhdr_old_path, preload=True) + + assert_array_equal(raw._data.shape, (29, 251)) + + first_two_samples_all_chs = np.array([[+5.22000008e-06, +5.10000000e-06], + [+2.10000000e-06, +2.27000008e-06], + [+1.15000000e-06, +1.33000002e-06], + [+4.00000000e-07, +4.00000000e-07], + [-3.02999992e-06, -2.82000008e-06], + [+2.71000004e-06, +2.45000000e-06], + [+2.41000004e-06, +2.36000004e-06], + [+1.01999998e-06, +1.18000002e-06], + [-1.33999996e-06, -1.25000000e-06], + [-2.60000000e-06, -2.46000004e-06], + [+6.80000019e-07, +8.00000000e-07], + [+1.48000002e-06, +1.48999996e-06], + [+1.61000004e-06, +1.51000004e-06], + [+7.19999981e-07, +8.60000038e-07], + [-3.00000000e-07, -4.00000006e-08], + [-1.20000005e-07, +6.00000024e-08], + [+8.19999981e-07, +9.89999962e-07], + [+1.13000002e-06, +1.28000002e-06], + [+1.08000002e-06, +1.33999996e-06], + [+2.20000005e-07, +5.69999981e-07], + [-4.09999990e-07, +4.00000006e-08], + [+5.19999981e-07, +9.39999962e-07], + [+1.01000004e-06, +1.51999998e-06], + [+1.01000004e-06, +1.55000000e-06], + [-1.43000002e-06, -1.13999996e-06], + [+3.65000000e-06, +3.65999985e-06], + [+4.15999985e-06, +3.79000015e-06], + [+9.26999969e-06, +8.95999985e-06], + [-7.35999985e-06, -7.18000031e-06], + ]) + + assert_array_almost_equal(raw._data[:, :2], first_two_samples_all_chs) + + +def test_coodinates_extraction(): + """Test reading of [Coordinates] section if present.""" + # vhdr 2 has a Coordinates section + with pytest.warns(RuntimeWarning, match='coordinate information'): + raw = read_raw_brainvision(vhdr_v2_path) + + # Basic check of extracted coordinates + assert raw.info['dig'] is not None + diglist = raw.info['dig'] + coords = np.array([dig['r'] for dig in diglist]) + EXPECTED_SHAPE = ( + len(raw.ch_names) - 4, # HL, HR, Vb, ReRef are not set in dig + 3, + ) + assert coords.shape == EXPECTED_SHAPE + + # Make sure the scaling seems right + # a coordinate more than 20cm away from origin is implausible + assert coords.max() < 0.2 + + # vhdr 1 does not have a Coordinates section + raw2 = read_raw_brainvision(vhdr_path) + assert raw2.info['dig'] is None + + +@testing.requires_testing_data +def test_brainvision_neuroone_export(): + """Test Brainvision file exported with neuroone system.""" + raw = read_raw_brainvision(neuroone_vhdr, verbose='error') + assert raw.info['meas_date'] is None + assert len(raw.info['chs']) == 65 + assert raw.info['sfreq'] == 5000. + + +@testing.requires_testing_data +def test_read_vmrk_annotations(tmp_path): + """Test load brainvision annotations.""" + sfreq = 1000.0 + + # Test vmrk file without annotations + # delete=False is for Windows compatibility + with open(vmrk_path) as myfile: + head = [next(myfile) for x in range(6)] + fname = tmp_path / 'temp.vmrk' + with open(str(fname), 'w') as temp: + for item in head: + temp.write(item) + read_annotations(fname, sfreq=sfreq) + + +@testing.requires_testing_data +def test_read_vhdr_annotations_and_events(tmp_path): + """Test load brainvision annotations and parse them to events.""" + # First we add a custom event that contains a comma in its description + for src, dest in zip((vhdr_path, vmrk_path, eeg_path), + ('test.vhdr', 'test.vmrk', 'test.eeg')): + shutil.copyfile(src, tmp_path / dest) + + # Commas are encoded as "\1" + with open(tmp_path / 'test.vmrk', 'a') as fout: + fout.write(r"Mk15=Comma\1Type,CommaValue\11,7800,1,0\n") + + sfreq = 1000.0 + expected_orig_time = _stamp_to_dt((1384359243, 794232)) + expected_onset_latency = np.array( + [0, 486., 496., 1769., 1779., 3252., 3262., 4935., 4945., 5999., 6619., + 6629., 7629., 7699., 7799.] + ) + expected_annot_description = [ + 'New Segment/', 'Stimulus/S253', 'Stimulus/S255', 'Event/254', + 'Stimulus/S255', 'Event/254', 'Stimulus/S255', 'Stimulus/S253', + 'Stimulus/S255', 'Response/R255', 'Event/254', 'Stimulus/S255', + 'SyncStatus/Sync On', 'Optic/O 1', 'Comma,Type/CommaValue,1' + ] + expected_events = np.stack([ + expected_onset_latency, + np.zeros_like(expected_onset_latency), + [99999, 253, 255, 254, 255, 254, 255, 253, 255, 1255, 254, 255, 99998, + 2001, 10001], + ]).astype('int64').T + expected_event_id = {'New Segment/': 99999, 'Stimulus/S253': 253, + 'Stimulus/S255': 255, 'Event/254': 254, + 'Response/R255': 1255, 'SyncStatus/Sync On': 99998, + 'Optic/O 1': 2001, 'Comma,Type/CommaValue,1': 10001} + + raw = read_raw_brainvision(tmp_path / 'test.vhdr', eog=eog) + + # validate annotations + assert raw.annotations.orig_time == expected_orig_time + assert_allclose(raw.annotations.onset, expected_onset_latency / sfreq) + assert_array_equal(raw.annotations.description, expected_annot_description) + + # validate event extraction + events, event_id = events_from_annotations(raw) + assert_array_equal(events, expected_events) + assert event_id == expected_event_id + + # validate that None gives us a sorted list + expected_none_event_id = {desc: idx + 1 for idx, desc in enumerate(sorted( + event_id.keys()))} + events, event_id = events_from_annotations(raw, event_id=None) + assert event_id == expected_none_event_id + + # Add some custom ones, plus a 2-digit one + s_10 = 'Stimulus/S 10' + raw.annotations.append([1, 2, 3], 10, ['ZZZ', s_10, 'YYY']) + # others starting at 10001 ... + # we already have "Comma,Type/CommaValue,1" as 10001 + expected_event_id.update(YYY=10002, ZZZ=10003) + expected_event_id[s_10] = 10 + _, event_id = events_from_annotations(raw) + assert event_id == expected_event_id + + # Concatenating two shouldn't change the resulting event_id + # (BAD and EDGE should be ignored) + with pytest.warns(RuntimeWarning, match='expanding outside'): + raw_concat = concatenate_raws([raw.copy(), raw.copy()]) + _, event_id = events_from_annotations(raw_concat) + assert event_id == expected_event_id + + +@testing.requires_testing_data +def test_automatic_vmrk_sfreq_recovery(): + """Test proper sfreq inference by checking the onsets.""" + assert_array_equal(read_annotations(vmrk_path, sfreq='auto'), + read_annotations(vmrk_path, sfreq=1000.0)) + + +@testing.requires_testing_data +def test_event_id_stability_when_save_and_fif_reload(tmp_path): + """Test load events from brainvision annotations when read_raw_fif.""" + fname = tmp_path / 'bv-raw.fif' + raw = read_raw_brainvision(vhdr_path, eog=eog) + original_events, original_event_id = events_from_annotations(raw) + + raw.save(fname) + raw = read_raw_fif(fname) + events, event_id = events_from_annotations(raw) + + assert event_id == original_event_id + assert_array_equal(events, original_events) + + +def test_parse_impedance(): + """Test case for parsing the impedances from header.""" + expected_imp_meas_time = datetime.datetime(2013, 11, 13, 16, 12, 27, + tzinfo=datetime.timezone.utc) + expected_imp_unit = 'kOhm' + expected_electrodes = [ + 'FP1', 'FP2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'F7', + 'F8', 'P7', 'P8', 'Fz', 'FCz', 'Cz', 'CPz', 'Pz', 'POz', 'FC1', 'FC2', + 'CP1', 'CP2', 'FC5', 'FC6', 'CP5', 'CP6', 'HL', 'HR', 'Vb', 'ReRef', + 'Ref', 'Gnd' + ] + n_electrodes = len(expected_electrodes) + expected_imps = [np.nan] * (n_electrodes - 2) + [0., 4.] + expected_imp_lower_bound = 0. + expected_imp_upper_bound = [100.] * (n_electrodes - 2) + [10., 10.] + + expected_impedances = {elec: { + 'imp': expected_imps[i], + 'imp_unit': expected_imp_unit, + 'imp_meas_time': expected_imp_meas_time, + 'imp_lower_bound': expected_imp_lower_bound, + 'imp_upper_bound': expected_imp_upper_bound[i], + 'imp_range_unit': expected_imp_unit, + } for i, elec in enumerate(expected_electrodes)} + + raw = read_raw_brainvision(vhdr_path, eog=eog) + assert object_diff(expected_impedances, raw.impedances) == '' + + # Test "Impedances Imported from actiCAP Control Software" + expected_imp_meas_time = expected_imp_meas_time.replace(hour=10, + minute=17, + second=2) + tmpidx = expected_electrodes.index('CP6') + expected_electrodes = expected_electrodes[:tmpidx] + [ + 'CP 6', 'ECG+', 'ECG-', 'HEOG+', 'HEOG-', 'VEOG+', 'VEOG-', 'ReRef', + 'Ref', 'Gnd' + ] + n_electrodes = len(expected_electrodes) + expected_imps = [np.nan] * (n_electrodes - 9) + [ + 35., 46., 6., 8., 3., 4., 0., 8., 2.5 + ] + expected_impedances = {elec: { + 'imp': expected_imps[i], + 'imp_unit': expected_imp_unit, + 'imp_meas_time': expected_imp_meas_time, + } for i, elec in enumerate(expected_electrodes)} + + with pytest.warns(RuntimeWarning, match='different .*pass filters'): + raw = read_raw_brainvision(vhdr_mixed_lowpass_path, + eog=['HEOG', 'VEOG'], misc=['ECG']) + assert object_diff(expected_impedances, raw.impedances) == '' diff --git a/python/libs/mne/io/bti/__init__.py b/python/libs/mne/io/bti/__init__.py new file mode 100644 index 0000000..aeb4d18 --- /dev/null +++ b/python/libs/mne/io/bti/__init__.py @@ -0,0 +1,5 @@ +"""BTi module for conversion to FIF.""" + +# Author: Denis A. Engemann + +from .bti import read_raw_bti diff --git a/python/libs/mne/io/bti/bti.py b/python/libs/mne/io/bti/bti.py new file mode 100644 index 0000000..2a1f261 --- /dev/null +++ b/python/libs/mne/io/bti/bti.py @@ -0,0 +1,1292 @@ +# Authors: Denis A. Engemann +# Martin Luessi +# Alexandre Gramfort +# Matti Hämäläinen +# Yuval Harpaz +# Joan Massich +# Teon Brooks +# +# simplified BSD-3 license + +import os.path as op +from io import BytesIO +from itertools import count + +import numpy as np + +from ...utils import logger, verbose, _stamp_to_dt +from ...transforms import (combine_transforms, invert_transform, + Transform) +from .._digitization import _make_bti_dig_points +from ..constants import FIFF +from .. import BaseRaw, _coil_trans_to_loc, _loc_to_coil_trans, _empty_info +from ..utils import _mult_cal_one, read_str +from .constants import BTI +from .read import (read_int32, read_int16, read_float, read_double, + read_transform, read_char, read_int64, read_uint16, + read_uint32, read_double_matrix, read_float_matrix, + read_int16_matrix, read_dev_header) + +FIFF_INFO_DIG_FIELDS = ('kind', 'ident', 'r', 'coord_frame') +FIFF_INFO_DIG_DEFAULTS = (None, None, None, FIFF.FIFFV_COORD_HEAD) + +BTI_WH2500_REF_MAG = ('MxA', 'MyA', 'MzA', 'MxaA', 'MyaA', 'MzaA') +BTI_WH2500_REF_GRAD = ('GxxA', 'GyyA', 'GyxA', 'GzaA', 'GzyA') + +dtypes = zip(list(range(1, 5)), ('>i2', '>i4', '>f4', '>f8')) +DTYPES = {i: np.dtype(t) for i, t in dtypes} + + +def _instantiate_default_info_chs(): + """Populate entries in info['chs'] with default values.""" + return dict(loc=np.array([0, 0, 0, 1] * 3, dtype='f4'), + ch_name=None, + unit_mul=FIFF.FIFF_UNITM_NONE, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + coil_type=FIFF.FIFFV_COIL_NONE, + range=1.0, + unit=FIFF.FIFF_UNIT_V, + cal=1.0, + scanno=None, + kind=FIFF.FIFFV_ECG_CH, + logno=None) + + +class _bytes_io_mock_context(): + """Make a context for BytesIO.""" + + def __init__(self, target): # noqa: D102 + self.target = target + + def __enter__(self): # noqa: D105 + return self.target + + def __exit__(self, type, value, tb): # noqa: D105 + pass + + +def _bti_open(fname, *args, **kwargs): + """Handle BytesIO.""" + if isinstance(fname, str): + return open(fname, *args, **kwargs) + elif isinstance(fname, BytesIO): + return _bytes_io_mock_context(fname) + else: + raise RuntimeError('Cannot mock this.') + + +def _get_bti_dev_t(adjust=0., translation=(0.0, 0.02, 0.11)): + """Get the general Magnes3600WH to Neuromag coordinate transform. + + Parameters + ---------- + adjust : float | None + Degrees to tilt x-axis for sensor frame misalignment. + If None, no adjustment will be applied. + translation : array-like + The translation to place the origin of coordinate system + to the center of the head. + + Returns + ------- + m_nm_t : ndarray + 4 x 4 rotation, translation, scaling matrix. + """ + flip_t = np.array([[0., -1., 0.], + [1., 0., 0.], + [0., 0., 1.]]) + rad = np.deg2rad(adjust) + adjust_t = np.array([[1., 0., 0.], + [0., np.cos(rad), -np.sin(rad)], + [0., np.sin(rad), np.cos(rad)]]) + m_nm_t = np.eye(4) + m_nm_t[:3, :3] = np.dot(flip_t, adjust_t) + m_nm_t[:3, 3] = translation + return m_nm_t + + +def _rename_channels(names, ecg_ch='E31', eog_ch=('E63', 'E64')): + """Rename appropriately ordered list of channel names. + + Parameters + ---------- + names : list of str + Lists of 4-D channel names in ascending order + + Returns + ------- + new : list + List of names, channel names in Neuromag style + """ + new = list() + ref_mag, ref_grad, eog, eeg, ext = [count(1) for _ in range(5)] + for i, name in enumerate(names, 1): + if name.startswith('A'): + name = 'MEG %3.3d' % i + elif name == 'RESPONSE': + name = 'STI 013' + elif name == 'TRIGGER': + name = 'STI 014' + elif any(name == k for k in eog_ch): + name = 'EOG %3.3d' % next(eog) + elif name == ecg_ch: + name = 'ECG 001' + elif name.startswith('E'): + name = 'EEG %3.3d' % next(eeg) + elif name == 'UACurrent': + name = 'UTL 001' + elif name.startswith('M'): + name = 'RFM %3.3d' % next(ref_mag) + elif name.startswith('G'): + name = 'RFG %3.3d' % next(ref_grad) + elif name.startswith('X'): + name = 'EXT %3.3d' % next(ext) + + new += [name] + + return new + + +# read the points +def _read_head_shape(fname): + """Read the head shape.""" + with _bti_open(fname, 'rb') as fid: + fid.seek(BTI.FILE_HS_N_DIGPOINTS) + _n_dig_points = read_int32(fid) + idx_points = read_double_matrix(fid, BTI.DATA_N_IDX_POINTS, 3) + dig_points = read_double_matrix(fid, _n_dig_points, 3) + + # reorder to lpa, rpa, nasion so = is direct. + nasion, lpa, rpa = [idx_points[_, :] for _ in [2, 0, 1]] + hpi = idx_points[3:len(idx_points), :] + + return nasion, lpa, rpa, hpi, dig_points + + +def _check_nan_dev_head_t(dev_ctf_t): + """Make sure we deal with nans.""" + has_nan = np.isnan(dev_ctf_t['trans']) + if np.any(has_nan): + logger.info('Missing values BTI dev->head transform. ' + 'Replacing with identity matrix.') + dev_ctf_t['trans'] = np.identity(4) + + +def _convert_coil_trans(coil_trans, dev_ctf_t, bti_dev_t): + """Convert the coil trans.""" + t = combine_transforms(invert_transform(dev_ctf_t), bti_dev_t, + 'ctf_head', 'meg') + t = np.dot(t['trans'], coil_trans) + return t + + +def _correct_offset(fid): + """Align fid pointer.""" + current = fid.tell() + if ((current % BTI.FILE_CURPOS) != 0): + offset = current % BTI.FILE_CURPOS + fid.seek(BTI.FILE_CURPOS - (offset), 1) + + +def _read_config(fname): + """Read BTi system config file. + + Parameters + ---------- + fname : str + The absolute path to the config file + + Returns + ------- + cfg : dict + The config blocks found. + """ + with _bti_open(fname, 'rb') as fid: + cfg = dict() + cfg['hdr'] = {'version': read_int16(fid), + 'site_name': read_str(fid, 32), + 'dap_hostname': read_str(fid, 16), + 'sys_type': read_int16(fid), + 'sys_options': read_int32(fid), + 'supply_freq': read_int16(fid), + 'total_chans': read_int16(fid), + 'system_fixed_gain': read_float(fid), + 'volts_per_bit': read_float(fid), + 'total_sensors': read_int16(fid), + 'total_user_blocks': read_int16(fid), + 'next_der_chan_no': read_int16(fid)} + + fid.seek(2, 1) + + cfg['checksum'] = read_uint32(fid) + cfg['reserved'] = read_char(fid, 32) + cfg['transforms'] = [read_transform(fid) for t in + range(cfg['hdr']['total_sensors'])] + + cfg['user_blocks'] = dict() + for block in range(cfg['hdr']['total_user_blocks']): + ub = dict() + + ub['hdr'] = {'nbytes': read_uint32(fid), + 'kind': read_str(fid, 20), + 'checksum': read_int32(fid), + 'username': read_str(fid, 32), + 'timestamp': read_uint32(fid), + 'user_space_size': read_uint32(fid), + 'reserved': read_char(fid, 32)} + + _correct_offset(fid) + start_bytes = fid.tell() + kind = ub['hdr'].pop('kind') + if not kind: # make sure reading goes right. Should never be empty + raise RuntimeError('Could not read user block. Probably you ' + 'acquired data using a BTi version ' + 'currently not supported. Please contact ' + 'the mne-python developers.') + dta, cfg['user_blocks'][kind] = dict(), ub + if kind in [v for k, v in BTI.items() if k[:5] == 'UB_B_']: + if kind == BTI.UB_B_MAG_INFO: + dta['version'] = read_int32(fid) + fid.seek(20, 1) + dta['headers'] = list() + for hdr in range(6): + d = {'name': read_str(fid, 16), + 'transform': read_transform(fid), + 'units_per_bit': read_float(fid)} + dta['headers'] += [d] + fid.seek(20, 1) + + elif kind == BTI.UB_B_COH_POINTS: + dta['n_points'] = read_int32(fid) + dta['status'] = read_int32(fid) + dta['points'] = [] + for pnt in range(16): + d = {'pos': read_double_matrix(fid, 1, 3), + 'direction': read_double_matrix(fid, 1, 3), + 'error': read_double(fid)} + dta['points'] += [d] + + elif kind == BTI.UB_B_CCP_XFM_BLOCK: + dta['method'] = read_int32(fid) + # handle difference btw/ linux (0) and solaris (4) + size = 0 if ub['hdr']['user_space_size'] == 132 else 4 + fid.seek(size, 1) + dta['transform'] = read_transform(fid) + + elif kind == BTI.UB_B_EEG_LOCS: + dta['electrodes'] = [] + while True: + d = {'label': read_str(fid, 16), + 'location': read_double_matrix(fid, 1, 3)} + if not d['label']: + break + dta['electrodes'] += [d] + + elif kind in [BTI.UB_B_WHC_CHAN_MAP_VER, + BTI.UB_B_WHS_SUBSYS_VER]: + dta['version'] = read_int16(fid) + dta['struct_size'] = read_int16(fid) + dta['entries'] = read_int16(fid) + + fid.seek(8, 1) + + elif kind == BTI.UB_B_WHC_CHAN_MAP: + num_channels = None + for name, data in cfg['user_blocks'].items(): + if name == BTI.UB_B_WHC_CHAN_MAP_VER: + num_channels = data['entries'] + break + + if num_channels is None: + raise ValueError('Cannot find block %s to determine ' + 'number of channels' + % BTI.UB_B_WHC_CHAN_MAP_VER) + + dta['channels'] = list() + for i in range(num_channels): + d = {'subsys_type': read_int16(fid), + 'subsys_num': read_int16(fid), + 'card_num': read_int16(fid), + 'chan_num': read_int16(fid), + 'recdspnum': read_int16(fid)} + dta['channels'] += [d] + fid.seek(8, 1) + + elif kind == BTI.UB_B_WHS_SUBSYS: + num_subsys = None + for name, data in cfg['user_blocks'].items(): + if name == BTI.UB_B_WHS_SUBSYS_VER: + num_subsys = data['entries'] + break + + if num_subsys is None: + raise ValueError('Cannot find block %s to determine' + ' number of subsystems' + % BTI.UB_B_WHS_SUBSYS_VER) + + dta['subsys'] = list() + for sub_key in range(num_subsys): + d = {'subsys_type': read_int16(fid), + 'subsys_num': read_int16(fid), + 'cards_per_sys': read_int16(fid), + 'channels_per_card': read_int16(fid), + 'card_version': read_int16(fid)} + + fid.seek(2, 1) + + d.update({'offsetdacgain': read_float(fid), + 'squid_type': read_int32(fid), + 'timesliceoffset': read_int16(fid), + 'padding': read_int16(fid), + 'volts_per_bit': read_float(fid)}) + + dta['subsys'] += [d] + + elif kind == BTI.UB_B_CH_LABELS: + dta['version'] = read_int32(fid) + dta['entries'] = read_int32(fid) + fid.seek(16, 1) + + dta['labels'] = list() + for label in range(dta['entries']): + dta['labels'] += [read_str(fid, 16)] + + elif kind == BTI.UB_B_CALIBRATION: + dta['sensor_no'] = read_int16(fid) + fid.seek(2, 1) + dta['timestamp'] = read_int32(fid) + dta['logdir'] = read_str(fid, 256) + + elif kind == BTI.UB_B_SYS_CONFIG_TIME: + # handle difference btw/ linux (256) and solaris (512) + size = 256 if ub['hdr']['user_space_size'] == 260 else 512 + dta['sysconfig_name'] = read_str(fid, size) + dta['timestamp'] = read_int32(fid) + + elif kind == BTI.UB_B_DELTA_ENABLED: + dta['delta_enabled'] = read_int16(fid) + + elif kind in [BTI.UB_B_E_TABLE_USED, BTI.UB_B_E_TABLE]: + dta['hdr'] = {'version': read_int32(fid), + 'entry_size': read_int32(fid), + 'n_entries': read_int32(fid), + 'filtername': read_str(fid, 16), + 'n_e_values': read_int32(fid), + 'reserved': read_str(fid, 28)} + + if dta['hdr']['version'] == 2: + size = 16 + dta['ch_names'] = [read_str(fid, size) for ch in + range(dta['hdr']['n_entries'])] + dta['e_ch_names'] = [read_str(fid, size) for ch in + range(dta['hdr']['n_e_values'])] + + rows = dta['hdr']['n_entries'] + cols = dta['hdr']['n_e_values'] + dta['etable'] = read_float_matrix(fid, rows, cols) + else: # handle MAGNES2500 naming scheme + dta['ch_names'] = ['WH2500'] * dta['hdr']['n_e_values'] + dta['hdr']['n_e_values'] = 6 + dta['e_ch_names'] = BTI_WH2500_REF_MAG + rows = dta['hdr']['n_entries'] + cols = dta['hdr']['n_e_values'] + dta['etable'] = read_float_matrix(fid, rows, cols) + + elif any([kind == BTI.UB_B_WEIGHTS_USED, + kind[:4] == BTI.UB_B_WEIGHT_TABLE]): + dta['hdr'] = dict( + version=read_int32(fid), + n_bytes=read_uint32(fid), + n_entries=read_uint32(fid), + name=read_str(fid, 32)) + if dta['hdr']['version'] == 2: + dta['hdr'].update( + description=read_str(fid, 80), + n_anlg=read_uint32(fid), + n_dsp=read_uint32(fid), + reserved=read_str(fid, 72)) + dta['ch_names'] = [read_str(fid, 16) for ch in + range(dta['hdr']['n_entries'])] + dta['anlg_ch_names'] = [read_str(fid, 16) for ch in + range(dta['hdr']['n_anlg'])] + + dta['dsp_ch_names'] = [read_str(fid, 16) for ch in + range(dta['hdr']['n_dsp'])] + dta['dsp_wts'] = read_float_matrix( + fid, dta['hdr']['n_entries'], dta['hdr']['n_dsp']) + dta['anlg_wts'] = read_int16_matrix( + fid, dta['hdr']['n_entries'], dta['hdr']['n_anlg']) + else: # handle MAGNES2500 naming scheme + fid.seek(start_bytes + ub['hdr']['user_space_size'] - + dta['hdr']['n_bytes'] * + dta['hdr']['n_entries'], 0) + + dta['hdr']['n_dsp'] = dta['hdr']['n_bytes'] // 4 - 2 + assert (dta['hdr']['n_dsp'] == + len(BTI_WH2500_REF_MAG) + + len(BTI_WH2500_REF_GRAD)) + dta['ch_names'] = ['WH2500'] * dta['hdr']['n_entries'] + dta['hdr']['n_anlg'] = 3 + # These orders could be wrong, so don't set them + # for now + # dta['anlg_ch_names'] = BTI_WH2500_REF_MAG[:3] + # dta['dsp_ch_names'] = (BTI_WH2500_REF_GRAD + + # BTI_WH2500_REF_MAG) + dta['anlg_wts'] = np.zeros( + (dta['hdr']['n_entries'], dta['hdr']['n_anlg']), + dtype='i2') + dta['dsp_wts'] = np.zeros( + (dta['hdr']['n_entries'], dta['hdr']['n_dsp']), + dtype='f4') + for n in range(dta['hdr']['n_entries']): + dta['anlg_wts'][n] = read_int16_matrix( + fid, 1, dta['hdr']['n_anlg']) + read_int16(fid) + dta['dsp_wts'][n] = read_float_matrix( + fid, 1, dta['hdr']['n_dsp']) + + elif kind == BTI.UB_B_TRIG_MASK: + dta['version'] = read_int32(fid) + dta['entries'] = read_int32(fid) + fid.seek(16, 1) + + dta['masks'] = [] + for entry in range(dta['entries']): + d = {'name': read_str(fid, 20), + 'nbits': read_uint16(fid), + 'shift': read_uint16(fid), + 'mask': read_uint32(fid)} + dta['masks'] += [d] + fid.seek(8, 1) + + else: + dta['unknown'] = {'hdr': read_char(fid, + ub['hdr']['user_space_size'])} + + n_read = fid.tell() - start_bytes + if n_read != ub['hdr']['user_space_size']: + raise RuntimeError('Internal MNE reading error, read size %d ' + '!= %d expected size for kind %s' + % (n_read, ub['hdr']['user_space_size'], + kind)) + ub.update(dta) # finally update the userblock data + _correct_offset(fid) # after reading. + + cfg['chs'] = list() + + # prepare reading channels + for channel in range(cfg['hdr']['total_chans']): + ch = {'name': read_str(fid, 16), + 'chan_no': read_int16(fid), + 'ch_type': read_uint16(fid), + 'sensor_no': read_int16(fid), + 'data': dict()} + + fid.seek(2, 1) + ch.update({'gain': read_float(fid), + 'units_per_bit': read_float(fid), + 'yaxis_label': read_str(fid, 16), + 'aar_val': read_double(fid), + 'checksum': read_int32(fid), + 'reserved': read_str(fid, 32)}) + + cfg['chs'] += [ch] + _correct_offset(fid) # before and after + dta = dict() + if ch['ch_type'] in [BTI.CHTYPE_MEG, BTI.CHTYPE_REFERENCE]: + dev = {'device_info': read_dev_header(fid), + 'inductance': read_float(fid), + 'padding': read_str(fid, 4), + 'transform': _correct_trans(read_transform(fid), False), + 'xform_flag': read_int16(fid), + 'total_loops': read_int16(fid)} + + fid.seek(4, 1) + dev['reserved'] = read_str(fid, 32) + dta.update({'dev': dev, 'loops': []}) + for loop in range(dev['total_loops']): + d = {'position': read_double_matrix(fid, 1, 3), + 'orientation': read_double_matrix(fid, 1, 3), + 'radius': read_double(fid), + 'wire_radius': read_double(fid), + 'turns': read_int16(fid)} + fid.seek(2, 1) + d['checksum'] = read_int32(fid) + d['reserved'] = read_str(fid, 32) + dta['loops'] += [d] + + elif ch['ch_type'] == BTI.CHTYPE_EEG: + dta = {'device_info': read_dev_header(fid), + 'impedance': read_float(fid), + 'padding': read_str(fid, 4), + 'transform': read_transform(fid), + 'reserved': read_char(fid, 32)} + + elif ch['ch_type'] == BTI.CHTYPE_EXTERNAL: + dta = {'device_info': read_dev_header(fid), + 'user_space_size': read_int32(fid), + 'reserved': read_str(fid, 32)} + + elif ch['ch_type'] == BTI.CHTYPE_TRIGGER: + dta = {'device_info': read_dev_header(fid), + 'user_space_size': read_int32(fid)} + fid.seek(2, 1) + dta['reserved'] = read_str(fid, 32) + + elif ch['ch_type'] in [BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]: + dta = {'device_info': read_dev_header(fid), + 'user_space_size': read_int32(fid), + 'reserved': read_str(fid, 32)} + + elif ch['ch_type'] == BTI.CHTYPE_SHORTED: + dta = {'device_info': read_dev_header(fid), + 'reserved': read_str(fid, 32)} + + ch.update(dta) # add data collected + _correct_offset(fid) # after each reading + + return cfg + + +def _read_epoch(fid): + """Read BTi PDF epoch.""" + out = {'pts_in_epoch': read_int32(fid), + 'epoch_duration': read_float(fid), + 'expected_iti': read_float(fid), + 'actual_iti': read_float(fid), + 'total_var_events': read_int32(fid), + 'checksum': read_int32(fid), + 'epoch_timestamp': read_int32(fid)} + + fid.seek(28, 1) + + return out + + +def _read_channel(fid): + """Read BTi PDF channel.""" + out = {'chan_label': read_str(fid, 16), + 'chan_no': read_int16(fid), + 'attributes': read_int16(fid), + 'scale': read_float(fid), + 'yaxis_label': read_str(fid, 16), + 'valid_min_max': read_int16(fid)} + + fid.seek(6, 1) + out.update({'ymin': read_double(fid), + 'ymax': read_double(fid), + 'index': read_int32(fid), + 'checksum': read_int32(fid), + 'off_flag': read_str(fid, 4), + 'offset': read_float(fid)}) + + fid.seek(24, 1) + + return out + + +def _read_event(fid): + """Read BTi PDF event.""" + out = {'event_name': read_str(fid, 16), + 'start_lat': read_float(fid), + 'end_lat': read_float(fid), + 'step_size': read_float(fid), + 'fixed_event': read_int16(fid), + 'checksum': read_int32(fid)} + + fid.seek(32, 1) + _correct_offset(fid) + + return out + + +def _read_process(fid): + """Read BTi PDF process.""" + out = {'nbytes': read_int32(fid), + 'process_type': read_str(fid, 20), + 'checksum': read_int32(fid), + 'user': read_str(fid, 32), + 'timestamp': read_int32(fid), + 'filename': read_str(fid, 256), + 'total_steps': read_int32(fid)} + + fid.seek(32, 1) + _correct_offset(fid) + out['processing_steps'] = list() + for step in range(out['total_steps']): + this_step = {'nbytes': read_int32(fid), + 'process_type': read_str(fid, 20), + 'checksum': read_int32(fid)} + ptype = this_step['process_type'] + if ptype == BTI.PROC_DEFAULTS: + this_step['scale_option'] = read_int32(fid) + + fid.seek(4, 1) + this_step['scale'] = read_double(fid) + this_step['dtype'] = read_int32(fid) + this_step['selected'] = read_int16(fid) + this_step['color_display'] = read_int16(fid) + + fid.seek(32, 1) + elif ptype in BTI.PROC_FILTER: + this_step['freq'] = read_float(fid) + fid.seek(32, 1) + elif ptype in BTI.PROC_BPFILTER: + this_step['high_freq'] = read_float(fid) + this_step['low_freq'] = read_float(fid) + else: + jump = this_step['user_space_size'] = read_int32(fid) + fid.seek(32, 1) + fid.seek(jump, 1) + + out['processing_steps'] += [this_step] + _correct_offset(fid) + + return out + + +def _read_assoc_file(fid): + """Read BTi PDF assocfile.""" + out = {'file_id': read_int16(fid), + 'length': read_int16(fid)} + + fid.seek(32, 1) + out['checksum'] = read_int32(fid) + + return out + + +def _read_pfid_ed(fid): + """Read PDF ed file.""" + out = {'comment_size': read_int32(fid), + 'name': read_str(fid, 17)} + + fid.seek(9, 1) + out.update({'pdf_number': read_int16(fid), + 'total_events': read_int32(fid), + 'timestamp': read_int32(fid), + 'flags': read_int32(fid), + 'de_process': read_int32(fid), + 'checksum': read_int32(fid), + 'ed_id': read_int32(fid), + 'win_width': read_float(fid), + 'win_offset': read_float(fid)}) + + fid.seek(8, 1) + + return out + + +def _read_coil_def(fid): + """Read coil definition.""" + coildef = {'position': read_double_matrix(fid, 1, 3), + 'orientation': read_double_matrix(fid, 1, 3), + 'radius': read_double(fid), + 'wire_radius': read_double(fid), + 'turns': read_int16(fid)} + + fid.seek(fid, 2, 1) + coildef['checksum'] = read_int32(fid) + coildef['reserved'] = read_str(fid, 32) + + +def _read_ch_config(fid): + """Read BTi channel config.""" + cfg = {'name': read_str(fid, BTI.FILE_CONF_CH_NAME), + 'chan_no': read_int16(fid), + 'ch_type': read_uint16(fid), + 'sensor_no': read_int16(fid)} + + fid.seek(fid, BTI.FILE_CONF_CH_NEXT, 1) + + cfg.update({'gain': read_float(fid), + 'units_per_bit': read_float(fid), + 'yaxis_label': read_str(fid, BTI.FILE_CONF_CH_YLABEL), + 'aar_val': read_double(fid), + 'checksum': read_int32(fid), + 'reserved': read_str(fid, BTI.FILE_CONF_CH_RESERVED)}) + + _correct_offset(fid) + + # Then the channel info + ch_type, chan = cfg['ch_type'], dict() + chan['dev'] = {'size': read_int32(fid), + 'checksum': read_int32(fid), + 'reserved': read_str(fid, 32)} + if ch_type in [BTI.CHTYPE_MEG, BTI.CHTYPE_REF]: + chan['loops'] = [_read_coil_def(fid) for d in + range(chan['dev']['total_loops'])] + + elif ch_type == BTI.CHTYPE_EEG: + chan['impedance'] = read_float(fid) + chan['padding'] = read_str(fid, BTI.FILE_CONF_CH_PADDING) + chan['transform'] = read_transform(fid) + chan['reserved'] = read_char(fid, BTI.FILE_CONF_CH_RESERVED) + + elif ch_type in [BTI.CHTYPE_TRIGGER, BTI.CHTYPE_EXTERNAL, + BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]: + chan['user_space_size'] = read_int32(fid) + if ch_type == BTI.CHTYPE_TRIGGER: + fid.seek(2, 1) + chan['reserved'] = read_str(fid, BTI.FILE_CONF_CH_RESERVED) + + elif ch_type == BTI.CHTYPE_SHORTED: + chan['reserved'] = read_str(fid, BTI.FILE_CONF_CH_RESERVED) + + cfg['chan'] = chan + + _correct_offset(fid) + + return cfg + + +def _read_bti_header_pdf(pdf_fname): + """Read header from pdf file.""" + with _bti_open(pdf_fname, 'rb') as fid: + fid.seek(-8, 2) + start = fid.tell() + header_position = read_int64(fid) + check_value = header_position & BTI.FILE_MASK + + if ((start + BTI.FILE_CURPOS - check_value) <= BTI.FILE_MASK): + header_position = check_value + + # Check header position for alignment issues + if ((header_position % 8) != 0): + header_position += (8 - (header_position % 8)) + + fid.seek(header_position, 0) + + # actual header starts here + info = {'version': read_int16(fid), + 'file_type': read_str(fid, 5), + 'hdr_size': start - header_position, # add for convenience + 'start': start} + + fid.seek(1, 1) + + info.update({'data_format': read_int16(fid), + 'acq_mode': read_int16(fid), + 'total_epochs': read_int32(fid), + 'input_epochs': read_int32(fid), + 'total_events': read_int32(fid), + 'total_fixed_events': read_int32(fid), + 'sample_period': read_float(fid), + 'xaxis_label': read_str(fid, 16), + 'total_processes': read_int32(fid), + 'total_chans': read_int16(fid)}) + + fid.seek(2, 1) + info.update({'checksum': read_int32(fid), + 'total_ed_classes': read_int32(fid), + 'total_associated_files': read_int16(fid), + 'last_file_index': read_int16(fid), + 'timestamp': read_int32(fid)}) + + fid.seek(20, 1) + _correct_offset(fid) + + # actual header ends here, so dar seems ok. + + info['epochs'] = [_read_epoch(fid) for epoch in + range(info['total_epochs'])] + + info['chs'] = [_read_channel(fid) for ch in + range(info['total_chans'])] + + info['events'] = [_read_event(fid) for event in + range(info['total_events'])] + + info['processes'] = [_read_process(fid) for process in + range(info['total_processes'])] + + info['assocfiles'] = [_read_assoc_file(fid) for af in + range(info['total_associated_files'])] + + info['edclasses'] = [_read_pfid_ed(fid) for ed_class in + range(info['total_ed_classes'])] + + info['extra_data'] = fid.read(start - fid.tell()) + info['pdf_fname'] = pdf_fname + + info['total_slices'] = sum(e['pts_in_epoch'] for e in + info['epochs']) + + info['dtype'] = DTYPES[info['data_format']] + bps = info['dtype'].itemsize * info['total_chans'] + info['bytes_per_slice'] = bps + return info + + +def _read_bti_header(pdf_fname, config_fname, sort_by_ch_name=True): + """Read bti PDF header.""" + info = _read_bti_header_pdf(pdf_fname) if pdf_fname is not None else dict() + cfg = _read_config(config_fname) + info['bti_transform'] = cfg['transforms'] + + # augment channel list by according info from config. + # get channels from config present in PDF + chans = info.get('chs', None) + if chans is not None: + chans_cfg = [c for c in cfg['chs'] if c['chan_no'] + in [c_['chan_no'] for c_ in chans]] + + # sort chans_cfg and chans + chans = sorted(chans, key=lambda k: k['chan_no']) + chans_cfg = sorted(chans_cfg, key=lambda k: k['chan_no']) + + # check all pdf channels are present in config + match = [c['chan_no'] for c in chans_cfg] == \ + [c['chan_no'] for c in chans] + + if not match: + raise RuntimeError('Could not match raw data channels with' + ' config channels. Some of the channels' + ' found are not described in config.') + else: + chans_cfg = cfg['chs'] + chans = [dict() for _ in chans_cfg] + + # transfer channel info from config to channel info + for ch, ch_cfg in zip(chans, chans_cfg): + ch['upb'] = ch_cfg['units_per_bit'] + ch['gain'] = ch_cfg['gain'] + ch['name'] = ch_cfg['name'] + if ch_cfg.get('dev', dict()).get('transform', None) is not None: + ch['loc'] = _coil_trans_to_loc(ch_cfg['dev']['transform']) + else: + ch['loc'] = None + if pdf_fname is not None: + if info['data_format'] <= 2: # see DTYPES, implies integer + ch['cal'] = ch['scale'] * ch['upb'] / float(ch['gain']) + else: # float + ch['cal'] = ch['scale'] * ch['gain'] + else: # if we are in this mode we don't read data, only channel info. + ch['cal'] = ch['scale'] = 1.0 # so we put a trivial default value + + if sort_by_ch_name: + by_index = [(i, d['index']) for i, d in enumerate(chans)] + by_index.sort(key=lambda c: c[1]) + by_index = [idx[0] for idx in by_index] + chs = [chans[pos] for pos in by_index] + + sort_by_name_idx = [(i, d['name']) for i, d in enumerate(chs)] + a_chs = [c for c in sort_by_name_idx if c[1].startswith('A')] + other_chs = [c for c in sort_by_name_idx if not c[1].startswith('A')] + sort_by_name_idx = sorted( + a_chs, key=lambda c: int(c[1][1:])) + sorted(other_chs) + + sort_by_name_idx = [idx[0] for idx in sort_by_name_idx] + + info['chs'] = [chans[pos] for pos in sort_by_name_idx] + info['order'] = sort_by_name_idx + else: + info['chs'] = chans + info['order'] = np.arange(len(chans)) + + # finally add some important fields from the config + info['e_table'] = cfg['user_blocks'][BTI.UB_B_E_TABLE_USED] + info['weights'] = cfg['user_blocks'][BTI.UB_B_WEIGHTS_USED] + + return info + + +def _correct_trans(t, check=True): + """Convert to a transformation matrix.""" + t = np.array(t, np.float64) + t[:3, :3] *= t[3, :3][:, np.newaxis] # apply scalings + t[3, :3] = 0. # remove them + if check: + assert t[3, 3] == 1. + else: + t[3, 3] = 1. + return t + + +class RawBTi(BaseRaw): + """Raw object from 4D Neuroimaging MagnesWH3600 data. + + Parameters + ---------- + pdf_fname : str + Path to the processed data file (PDF). + config_fname : str + Path to system config file. + head_shape_fname : str | None + Path to the head shape file. + rotation_x : float + Degrees to tilt x-axis for sensor frame misalignment. Ignored + if convert is True. + translation : array-like, shape (3,) + The translation to place the origin of coordinate system + to the center of the head. Ignored if convert is True. + convert : bool + Convert to Neuromag coordinates or not. + rename_channels : bool + Whether to keep original 4D channel labels or not. Defaults to True. + sort_by_ch_name : bool + Reorder channels according to channel label. 4D channels don't have + monotonically increasing numbers in their labels. Defaults to True. + ecg_ch : str | None + The 4D name of the ECG channel. If None, the channel will be treated + as regular EEG channel. + eog_ch : tuple of str | None + The 4D names of the EOG channels. If None, the channels will be treated + as regular EEG channels. + %(preload)s + + .. versionadded:: 0.11 + + %(verbose)s + """ + + @verbose + def __init__(self, pdf_fname, config_fname='config', + head_shape_fname='hs_file', rotation_x=0., + translation=(0.0, 0.02, 0.11), convert=True, + rename_channels=True, sort_by_ch_name=True, + ecg_ch='E31', eog_ch=('E63', 'E64'), + preload=False, verbose=None): # noqa: D102 + info, bti_info = _get_bti_info( + pdf_fname=pdf_fname, config_fname=config_fname, + head_shape_fname=head_shape_fname, rotation_x=rotation_x, + translation=translation, convert=convert, ecg_ch=ecg_ch, + rename_channels=rename_channels, + sort_by_ch_name=sort_by_ch_name, eog_ch=eog_ch) + self.bti_ch_labels = [c['chan_label'] for c in bti_info['chs']] + # make Raw repr work if we have a BytesIO as input + if isinstance(pdf_fname, BytesIO): + pdf_fname = repr(pdf_fname) + super(RawBTi, self).__init__( + info, preload, filenames=[pdf_fname], raw_extras=[bti_info], + last_samps=[bti_info['total_slices'] - 1], verbose=verbose) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file.""" + bti_info = self._raw_extras[fi] + fname = bti_info['pdf_fname'] + dtype = bti_info['dtype'] + assert len(bti_info['chs']) == self._raw_extras[fi]['orig_nchan'] + n_channels = len(bti_info['chs']) + n_bytes = np.dtype(dtype).itemsize + data_left = (stop - start) * n_channels + read_cals = np.empty((bti_info['total_chans'],)) + for ch in bti_info['chs']: + read_cals[ch['index']] = ch['cal'] + + block_size = ((int(100e6) // n_bytes) // n_channels) * n_channels + block_size = min(data_left, block_size) + # extract data in chunks + with _bti_open(fname, 'rb') as fid: + fid.seek(bti_info['bytes_per_slice'] * start, 0) + for sample_start in np.arange(0, data_left, + block_size) // n_channels: + count = min(block_size, data_left - sample_start * n_channels) + if isinstance(fid, BytesIO): + block = np.frombuffer(fid.getvalue(), dtype, count) + else: + block = np.fromfile(fid, dtype, count) + sample_stop = sample_start + count // n_channels + shape = (sample_stop - sample_start, bti_info['total_chans']) + block.shape = shape + data_view = data[:, sample_start:sample_stop] + one = np.empty(block.shape[::-1]) + + for ii, b_i_o in enumerate(bti_info['order']): + one[ii] = block[:, b_i_o] * read_cals[b_i_o] + _mult_cal_one(data_view, one, idx, cals, mult) + + +def _make_bti_digitization( + info, head_shape_fname, convert, use_hpi, bti_dev_t, dev_ctf_t): + with info._unlock(): + if head_shape_fname: + logger.info('... Reading digitization points from %s' % + head_shape_fname) + + nasion, lpa, rpa, hpi, dig_points = _read_head_shape( + head_shape_fname) + info['dig'], dev_head_t, ctf_head_t = _make_bti_dig_points( + nasion, lpa, rpa, hpi, dig_points, + convert, use_hpi, bti_dev_t, dev_ctf_t) + else: + logger.info('... no headshape file supplied, doing nothing.') + info['dig'] = None + dev_head_t = Transform('meg', 'head', trans=None) + ctf_head_t = Transform('ctf_head', 'head', trans=None) + + info.update(dev_head_t=dev_head_t, dev_ctf_t=dev_ctf_t, + ctf_head_t=ctf_head_t) + + return info + + +def _get_bti_info(pdf_fname, config_fname, head_shape_fname, rotation_x, + translation, convert, ecg_ch, eog_ch, rename_channels=True, + sort_by_ch_name=True): + """Read BTI info. + + Note. This helper supports partial construction of infos when `pdf_fname` + is None. Some datasets, such as the HCP, are shipped as a large collection + of zipped files where it can be more efficient to only read the needed + information. In such a situation, some information can neither be accessed + directly nor guessed based on the `config`. + + These fields will thus be set to None: + - 'lowpass' + - 'highpass' + - 'sfreq' + - 'meas_date' + + """ + if pdf_fname is None: + logger.info('No pdf_fname passed, trying to construct partial info ' + 'from config') + if pdf_fname is not None and not isinstance(pdf_fname, BytesIO): + if not op.isabs(pdf_fname): + pdf_fname = op.abspath(pdf_fname) + + if not isinstance(config_fname, BytesIO): + if not op.isabs(config_fname): + config_tries = [op.abspath(config_fname), + op.abspath(op.join(op.dirname(pdf_fname), + config_fname))] + for config_try in config_tries: + if op.isfile(config_try): + config_fname = config_try + break + if not op.isfile(config_fname): + raise ValueError('Could not find the config file %s. Please check' + ' whether you are in the right directory ' + 'or pass the full name' % config_fname) + + if head_shape_fname is not None and not isinstance( + head_shape_fname, BytesIO): + orig_name = head_shape_fname + if not op.isfile(head_shape_fname): + head_shape_fname = op.join(op.dirname(pdf_fname), + head_shape_fname) + + if not op.isfile(head_shape_fname): + raise ValueError('Could not find the head_shape file "%s". ' + 'You should check whether you are in the ' + 'right directory, pass the full file name, ' + 'or pass head_shape_fname=None.' + % orig_name) + + logger.info('Reading 4D PDF file %s...' % pdf_fname) + bti_info = _read_bti_header( + pdf_fname, config_fname, sort_by_ch_name=sort_by_ch_name) + + dev_ctf_t = Transform('ctf_meg', 'ctf_head', + _correct_trans(bti_info['bti_transform'][0])) + + _check_nan_dev_head_t(dev_ctf_t) + # for old backward compatibility and external processing + rotation_x = 0. if rotation_x is None else rotation_x + bti_dev_t = _get_bti_dev_t(rotation_x, translation) if convert else None + bti_dev_t = Transform('ctf_meg', 'meg', bti_dev_t) + + use_hpi = False # hard coded, but marked as later option. + logger.info('Creating Neuromag info structure ...') + if 'sample_period' in bti_info.keys(): + sfreq = 1. / bti_info['sample_period'] + else: + sfreq = None + + if pdf_fname is not None: + info = _empty_info(sfreq) + date = bti_info['processes'][0]['timestamp'] + info['meas_date'] = _stamp_to_dt((date, 0)) + else: # these cannot be guessed from config, see docstring + info = _empty_info(1.0) + info['sfreq'] = None + info['lowpass'] = None + info['highpass'] = None + info['meas_date'] = None + bti_info['processes'] = list() + + # browse processing info for filter specs. + hp, lp = info['highpass'], info['lowpass'] + for proc in bti_info['processes']: + if 'filt' in proc['process_type']: + for step in proc['processing_steps']: + if 'high_freq' in step: + hp, lp = step['high_freq'], step['low_freq'] + elif 'hp' in step['process_type']: + hp = step['freq'] + elif 'lp' in step['process_type']: + lp = step['freq'] + + info['highpass'] = hp + info['lowpass'] = lp + chs = [] + + # Note that 'name' and 'chan_label' are not the same. + # We want the configured label if out IO parsed it + # except for the MEG channels for which we keep the config name + bti_ch_names = list() + for ch in bti_info['chs']: + # we have always relied on 'A' as indicator of MEG data channels. + ch_name = ch['name'] + if not ch_name.startswith('A'): + ch_name = ch.get('chan_label', ch_name) + bti_ch_names.append(ch_name) + + neuromag_ch_names = _rename_channels( + bti_ch_names, ecg_ch=ecg_ch, eog_ch=eog_ch) + ch_mapping = zip(bti_ch_names, neuromag_ch_names) + + logger.info('... Setting channel info structure.') + for idx, (chan_4d, chan_neuromag) in enumerate(ch_mapping): + chan_info = _instantiate_default_info_chs() + chan_info['ch_name'] = chan_neuromag if rename_channels else chan_4d + chan_info['logno'] = idx + BTI.FIFF_LOGNO + chan_info['scanno'] = idx + 1 + chan_info['cal'] = float(bti_info['chs'][idx]['scale']) + + if any(chan_4d.startswith(k) for k in ('A', 'M', 'G')): + loc = bti_info['chs'][idx]['loc'] + if loc is not None: + if convert: + if idx == 0: + logger.info('... putting coil transforms in Neuromag ' + 'coordinates') + t = _loc_to_coil_trans(bti_info['chs'][idx]['loc']) + t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t) + loc = _coil_trans_to_loc(t) + chan_info['loc'] = loc + + # BTI sensors are natively stored in 4D head coords we believe + meg_frame = (FIFF.FIFFV_COORD_DEVICE if convert else + FIFF.FIFFV_MNE_COORD_4D_HEAD) + eeg_frame = (FIFF.FIFFV_COORD_HEAD if convert else + FIFF.FIFFV_MNE_COORD_4D_HEAD) + if chan_4d.startswith('A'): + chan_info['kind'] = FIFF.FIFFV_MEG_CH + chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_MAG + chan_info['coord_frame'] = meg_frame + chan_info['unit'] = FIFF.FIFF_UNIT_T + + elif chan_4d.startswith('M'): + chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH + chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_MAG + chan_info['coord_frame'] = meg_frame + chan_info['unit'] = FIFF.FIFF_UNIT_T + + elif chan_4d.startswith('G'): + chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH + chan_info['coord_frame'] = meg_frame + chan_info['unit'] = FIFF.FIFF_UNIT_T_M + if chan_4d in ('GxxA', 'GyyA'): + chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_GRAD + elif chan_4d in ('GyxA', 'GzxA', 'GzyA'): + chan_info['coil_type'] = \ + FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD + + elif chan_4d.startswith('EEG'): + chan_info['kind'] = FIFF.FIFFV_EEG_CH + chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG + chan_info['coord_frame'] = eeg_frame + chan_info['unit'] = FIFF.FIFF_UNIT_V + + elif chan_4d == 'RESPONSE': + chan_info['kind'] = FIFF.FIFFV_STIM_CH + elif chan_4d == 'TRIGGER': + chan_info['kind'] = FIFF.FIFFV_STIM_CH + elif chan_4d.startswith('EOG') or chan_4d in eog_ch: + chan_info['kind'] = FIFF.FIFFV_EOG_CH + elif chan_4d == ecg_ch: + chan_info['kind'] = FIFF.FIFFV_ECG_CH + elif chan_4d.startswith('X'): + chan_info['kind'] = FIFF.FIFFV_MISC_CH + elif chan_4d == 'UACurrent': + chan_info['kind'] = FIFF.FIFFV_MISC_CH + + chs.append(chan_info) + + info['chs'] = chs + + # ### Dig stuff + info = _make_bti_digitization( + info, head_shape_fname, convert, use_hpi, bti_dev_t, dev_ctf_t) + + logger.info( + 'Currently direct inclusion of 4D weight tables is not supported.' + ' For critical use cases please take into account the MNE command' + ' "mne_create_comp_data" to include weights as printed out by ' + 'the 4D "print_table" routine.') + + # check that the info is complete + info._unlocked = False + info._update_redundant() + info._check_consistency() + return info, bti_info + + +@verbose +def read_raw_bti(pdf_fname, config_fname='config', + head_shape_fname='hs_file', rotation_x=0., + translation=(0.0, 0.02, 0.11), convert=True, + rename_channels=True, sort_by_ch_name=True, + ecg_ch='E31', eog_ch=('E63', 'E64'), preload=False, + verbose=None): + """Raw object from 4D Neuroimaging MagnesWH3600 data. + + .. note:: + 1. Currently direct inclusion of reference channel weights + is not supported. Please use ``mne_create_comp_data`` to include + the weights or use the low level functions from this module to + include them by yourself. + 2. The informed guess for the 4D name is E31 for the ECG channel and + E63, E63 for the EOG channels. Please check and adjust if those + channels are present in your dataset but 'ECG 01' and 'EOG 01', + 'EOG 02' don't appear in the channel names of the raw object. + + Parameters + ---------- + pdf_fname : str + Path to the processed data file (PDF). + config_fname : str + Path to system config file. + head_shape_fname : str | None + Path to the head shape file. + rotation_x : float + Degrees to tilt x-axis for sensor frame misalignment. Ignored + if convert is True. + translation : array-like, shape (3,) + The translation to place the origin of coordinate system + to the center of the head. Ignored if convert is True. + convert : bool + Convert to Neuromag coordinates or not. + rename_channels : bool + Whether to keep original 4D channel labels or not. Defaults to True. + sort_by_ch_name : bool + Reorder channels according to channel label. 4D channels don't have + monotonically increasing numbers in their labels. Defaults to True. + ecg_ch : str | None + The 4D name of the ECG channel. If None, the channel will be treated + as regular EEG channel. + eog_ch : tuple of str | None + The 4D names of the EOG channels. If None, the channels will be treated + as regular EEG channels. + %(preload)s + + .. versionadded:: 0.11 + %(verbose)s + + Returns + ------- + raw : instance of RawBTi + A Raw object containing BTI data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawBTi(pdf_fname, config_fname=config_fname, + head_shape_fname=head_shape_fname, + rotation_x=rotation_x, translation=translation, + convert=convert, rename_channels=rename_channels, + sort_by_ch_name=sort_by_ch_name, ecg_ch=ecg_ch, + eog_ch=eog_ch, preload=preload, verbose=verbose) diff --git a/python/libs/mne/io/bti/constants.py b/python/libs/mne/io/bti/constants.py new file mode 100644 index 0000000..ca09e44 --- /dev/null +++ b/python/libs/mne/io/bti/constants.py @@ -0,0 +1,99 @@ +# Authors: Denis Engemann +# +# License: BSD-3-Clause + +from ...utils import BunchConst + +BTI = BunchConst() + +BTI.ELEC_STATE_NOT_COLLECTED = 0 +BTI.ELEC_STATE_COLLECTED = 1 +BTI.ELEC_STATE_SKIPPED = 2 +BTI.ELEC_STATE_NOT_APPLICABLE = 3 +# +## Byte offesets and data sizes for different files +# +BTI.FILE_MASK = 2147483647 +BTI.FILE_CURPOS = 8 +BTI.FILE_END = -8 + +BTI.FILE_HS_VERSION = 0 +BTI.FILE_HS_TIMESTAMP = 4 +BTI.FILE_HS_CHECKSUM = 8 +BTI.FILE_HS_N_DIGPOINTS = 12 +BTI.FILE_HS_N_INDEXPOINTS = 16 + +BTI.FILE_PDF_H_ENTER = 1 +BTI.FILE_PDF_H_FTYPE = 5 +BTI.FILE_PDF_H_XLABEL = 16 +BTI.FILE_PDF_H_NEXT = 2 +BTI.FILE_PDF_H_EXIT = 20 + +BTI.FILE_PDF_EPOCH_EXIT = 28 + +BTI.FILE_PDF_CH_NEXT = 6 +BTI.FILE_PDF_CH_LABELSIZE = 16 +BTI.FILE_PDF_CH_YLABEL = 16 +BTI.FILE_PDF_CH_OFF_FLAG = 16 +BTI.FILE_PDF_CH_EXIT = 12 + +BTI.FILE_PDF_EVENT_NAME = 16 +BTI.FILE_PDF_EVENT_EXIT = 32 + +BTI.FILE_PDF_PROCESS_BLOCKTYPE = 20 +BTI.FILE_PDF_PROCESS_USER = 32 +BTI.FILE_PDF_PROCESS_FNAME = 256 +BTI.FILE_PDF_PROCESS_EXIT = 32 + +BTI.FILE_PDF_ASSOC_NEXT = 32 + +BTI.FILE_PDFED_NAME = 17 +BTI.FILE_PDFED_NEXT = 9 +BTI.FILE_PDFED_EXIT = 8 + +# +## General data constants +# +BTI.DATA_N_IDX_POINTS = 5 +BTI.DATA_ROT_N_ROW = 3 +BTI.DATA_ROT_N_COL = 3 +BTI.DATA_XFM_N_COL = 4 +BTI.DATA_XFM_N_ROW = 4 +BTI.FIFF_LOGNO = 111 +# +## Channel Types +# +BTI.CHTYPE_MEG = 1 +BTI.CHTYPE_EEG = 2 +BTI.CHTYPE_REFERENCE = 3 +BTI.CHTYPE_EXTERNAL = 4 +BTI.CHTYPE_TRIGGER = 5 +BTI.CHTYPE_UTILITY = 6 +BTI.CHTYPE_DERIVED = 7 +BTI.CHTYPE_SHORTED = 8 +# +## Processes +# +BTI.PROC_DEFAULTS = 'BTi_defaults' +BTI.PROC_FILTER = 'b_filt_hp,b_filt_lp,b_filt_notch' +BTI.PROC_BPFILTER = 'b_filt_b_pass,b_filt_b_reject' +# +## User blocks +# +BTI.UB_B_MAG_INFO = 'B_Mag_Info' +BTI.UB_B_COH_POINTS = 'B_COH_Points' +BTI.UB_B_CCP_XFM_BLOCK = 'b_ccp_xfm_block' +BTI.UB_B_EEG_LOCS = 'b_eeg_elec_locs' +BTI.UB_B_WHC_CHAN_MAP_VER = 'B_WHChanMapVer' +BTI.UB_B_WHC_CHAN_MAP = 'B_WHChanMap' +BTI.UB_B_WHS_SUBSYS_VER = 'B_WHSubsysVer' # B_WHSubsysVer +BTI.UB_B_WHS_SUBSYS = 'B_WHSubsys' +BTI.UB_B_CH_LABELS = 'B_ch_labels' +BTI.UB_B_CALIBRATION = 'B_Calibration' +BTI.UB_B_SYS_CONFIG_TIME = 'B_SysConfigTime' +BTI.UB_B_DELTA_ENABLED = 'B_DELTA_ENABLED' +BTI.UB_B_E_TABLE_USED = 'B_E_table_used' +BTI.UB_B_E_TABLE = 'B_E_TABLE' +BTI.UB_B_WEIGHTS_USED = 'B_weights_used' +BTI.UB_B_TRIG_MASK = 'B_trig_mask' +BTI.UB_B_WEIGHT_TABLE = 'BWT_' diff --git a/python/libs/mne/io/bti/read.py b/python/libs/mne/io/bti/read.py new file mode 100644 index 0000000..210ff82 --- /dev/null +++ b/python/libs/mne/io/bti/read.py @@ -0,0 +1,116 @@ +# Authors: Denis A. Engemann +# simplified BSD-3 license + +import numpy as np + +from ..utils import read_str + + +def _unpack_matrix(fid, rows, cols, dtype, out_dtype): + """Unpack matrix.""" + dtype = np.dtype(dtype) + + string = fid.read(int(dtype.itemsize * rows * cols)) + out = np.frombuffer(string, dtype=dtype).reshape( + rows, cols).astype(out_dtype) + return out + + +def _unpack_simple(fid, dtype, out_dtype): + """Unpack a NumPy type.""" + dtype = np.dtype(dtype) + string = fid.read(dtype.itemsize) + out = np.frombuffer(string, dtype=dtype).astype(out_dtype) + + if len(out) > 0: + out = out[0] + return out + + +def read_char(fid, count=1): + """Read character from bti file.""" + return _unpack_simple(fid, '>S%s' % count, 'S') + + +def read_bool(fid): + """Read bool value from bti file.""" + return _unpack_simple(fid, '>?', bool) + + +def read_uint8(fid): + """Read unsigned 8bit integer from bti file.""" + return _unpack_simple(fid, '>u1', np.uint8) + + +def read_int8(fid): + """Read 8bit integer from bti file.""" + return _unpack_simple(fid, '>i1', np.int8) + + +def read_uint16(fid): + """Read unsigned 16bit integer from bti file.""" + return _unpack_simple(fid, '>u2', np.uint16) + + +def read_int16(fid): + """Read 16bit integer from bti file.""" + return _unpack_simple(fid, '>i2', np.int16) + + +def read_uint32(fid): + """Read unsigned 32bit integer from bti file.""" + return _unpack_simple(fid, '>u4', np.uint32) + + +def read_int32(fid): + """Read 32bit integer from bti file.""" + return _unpack_simple(fid, '>i4', np.int32) + + +def read_uint64(fid): + """Read unsigned 64bit integer from bti file.""" + return _unpack_simple(fid, '>u8', np.uint64) + + +def read_int64(fid): + """Read 64bit integer from bti file.""" + return _unpack_simple(fid, '>u8', np.int64) + + +def read_float(fid): + """Read 32bit float from bti file.""" + return _unpack_simple(fid, '>f4', np.float32) + + +def read_double(fid): + """Read 64bit float from bti file.""" + return _unpack_simple(fid, '>f8', np.float64) + + +def read_int16_matrix(fid, rows, cols): + """Read 16bit integer matrix from bti file.""" + return _unpack_matrix(fid, rows, cols, dtype='>i2', + out_dtype=np.int16) + + +def read_float_matrix(fid, rows, cols): + """Read 32bit float matrix from bti file.""" + return _unpack_matrix(fid, rows, cols, dtype='>f4', + out_dtype=np.float32) + + +def read_double_matrix(fid, rows, cols): + """Read 64bit float matrix from bti file.""" + return _unpack_matrix(fid, rows, cols, dtype='>f8', + out_dtype=np.float64) + + +def read_transform(fid): + """Read 64bit float matrix transform from bti file.""" + return read_double_matrix(fid, rows=4, cols=4) + + +def read_dev_header(x): + """Create a dev header.""" + return dict(size=read_int32(x), checksum=read_int32(x), + reserved=read_str(x, 32)) diff --git a/python/libs/mne/io/bti/tests/__init__.py b/python/libs/mne/io/bti/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/bti/tests/test_bti.py b/python/libs/mne/io/bti/tests/test_bti.py new file mode 100644 index 0000000..77ecf86 --- /dev/null +++ b/python/libs/mne/io/bti/tests/test_bti.py @@ -0,0 +1,366 @@ +# Authors: Denis Engemann +# +# License: BSD-3-Clause + +from io import BytesIO +import os +import os.path as op +from functools import reduce, partial + +import numpy as np +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_allclose, assert_equal) +import pytest + +from mne.datasets import testing +from mne.io import read_raw_fif, read_raw_bti +from mne.io._digitization import _make_bti_dig_points +from mne.io.bti.bti import (_read_config, + _read_bti_header, _get_bti_dev_t, + _correct_trans, _get_bti_info, + _loc_to_coil_trans, _convert_coil_trans, + _check_nan_dev_head_t, _rename_channels) +from mne.io.bti.bti import _read_head_shape +from mne.io.tests.test_raw import _test_raw_reader +from mne.io.pick import pick_info +from mne.io.constants import FIFF +from mne import pick_types +from mne.utils import assert_dig_allclose +from mne.transforms import Transform, combine_transforms, invert_transform + +base_dir = op.join(op.abspath(op.dirname(__file__)), 'data') + +archs = 'linux', 'solaris' +pdf_fnames = [op.join(base_dir, 'test_pdf_%s' % a) for a in archs] +config_fnames = [op.join(base_dir, 'test_config_%s' % a) for a in archs] +hs_fnames = [op.join(base_dir, 'test_hs_%s' % a) for a in archs] +exported_fnames = [op.join(base_dir, 'exported4D_%s_raw.fif' % a) + for a in archs] +tmp_raw_fname = op.join(base_dir, 'tmp_raw.fif') + +fname_2500 = op.join(testing.data_path(download=False), 'BTi', 'erm_HFH', + 'c,rfDC') +fname_sim = op.join(testing.data_path(download=False), 'BTi', '4Dsim', + 'c,rfDC') +fname_sim_filt = op.join(testing.data_path(download=False), 'BTi', '4Dsim', + 'c,rfDC,fn50,o') + +# the 4D exporter doesn't export all channels, so we confine our comparison +NCH = 248 + + +@testing.requires_testing_data +def test_read_2500(): + """Test reading data from 2500 system.""" + _test_raw_reader(read_raw_bti, pdf_fname=fname_2500, head_shape_fname=None) + + +def test_read_config(): + """Test read bti config file.""" + # for config in config_fname, config_solaris_fname: + for config in config_fnames: + cfg = _read_config(config) + assert all('unknown' not in block.lower() and block != '' + for block in cfg['user_blocks']) + + +def test_crop_append(): + """Test crop and append raw.""" + raw = _test_raw_reader( + read_raw_bti, pdf_fname=pdf_fnames[0], + config_fname=config_fnames[0], head_shape_fname=hs_fnames[0]) + y, t = raw[:] + t0, t1 = 0.25 * t[-1], 0.75 * t[-1] + mask = (t0 <= t) * (t <= t1) + raw_ = raw.copy().crop(t0, t1) + y_, _ = raw_[:] + assert (y_.shape[1] == mask.sum()) + assert (y_.shape[0] == y.shape[0]) + + +def test_transforms(): + """Test transformations.""" + bti_trans = (0.0, 0.02, 0.11) + bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans)) + for pdf, config, hs, in zip(pdf_fnames, config_fnames, hs_fnames): + raw = read_raw_bti(pdf, config, hs, preload=False) + dev_ctf_t = raw.info['dev_ctf_t'] + dev_head_t_old = raw.info['dev_head_t'] + ctf_head_t = raw.info['ctf_head_t'] + + # 1) get BTI->Neuromag + bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans)) + + # 2) get Neuromag->BTI head + t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t, + 'meg', 'ctf_head') + # 3) get Neuromag->head + dev_head_t_new = combine_transforms(t, ctf_head_t, 'meg', 'head') + + assert_array_equal(dev_head_t_new['trans'], dev_head_t_old['trans']) + + +@pytest.mark.slowtest +def test_raw(): + """Test bti conversion to Raw object.""" + for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames, + exported_fnames): + # rx = 2 if 'linux' in pdf else 0 + pytest.raises(ValueError, read_raw_bti, pdf, 'eggs', preload=False) + pytest.raises(ValueError, read_raw_bti, pdf, config, 'spam', + preload=False) + if op.exists(tmp_raw_fname): + os.remove(tmp_raw_fname) + ex = read_raw_fif(exported, preload=True) + ra = read_raw_bti(pdf, config, hs, preload=False) + assert ('RawBTi' in repr(ra)) + assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH]) + assert_array_almost_equal(ex.info['dev_head_t']['trans'], + ra.info['dev_head_t']['trans'], 7) + assert len(ex.info['dig']) in (3563, 5154) + assert_dig_allclose(ex.info, ra.info, limit=100) + coil1, coil2 = [np.concatenate([d['loc'].flatten() + for d in r_.info['chs'][:NCH]]) + for r_ in (ra, ex)] + assert_array_almost_equal(coil1, coil2, 7) + + loc1, loc2 = [np.concatenate([d['loc'].flatten() + for d in r_.info['chs'][:NCH]]) + for r_ in (ra, ex)] + assert_allclose(loc1, loc2) + + assert_allclose(ra[:NCH][0], ex[:NCH][0]) + assert_array_equal([c['range'] for c in ra.info['chs'][:NCH]], + [c['range'] for c in ex.info['chs'][:NCH]]) + assert_array_equal([c['cal'] for c in ra.info['chs'][:NCH]], + [c['cal'] for c in ex.info['chs'][:NCH]]) + assert_array_equal(ra._cals[:NCH], ex._cals[:NCH]) + + # check our transforms + for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): + if ex.info[key] is None: + pass + else: + assert (ra.info[key] is not None) + for ent in ('to', 'from', 'trans'): + assert_allclose(ex.info[key][ent], + ra.info[key][ent]) + + ra.save(tmp_raw_fname) + re = read_raw_fif(tmp_raw_fname) + print(re) + for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): + assert (isinstance(re.info[key], dict)) + this_t = re.info[key]['trans'] + assert_equal(this_t.shape, (4, 4)) + # check that matrix by is not identity + assert (not np.allclose(this_t, np.eye(4))) + os.remove(tmp_raw_fname) + + +def test_info_no_rename_no_reorder_no_pdf(): + """Test private renaming, reordering and partial construction option.""" + for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): + info, bti_info = _get_bti_info( + pdf_fname=pdf, config_fname=config, head_shape_fname=hs, + rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, + ecg_ch='E31', eog_ch=('E63', 'E64'), + rename_channels=False, sort_by_ch_name=False) + info2, bti_info = _get_bti_info( + pdf_fname=None, config_fname=config, head_shape_fname=hs, + rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, + ecg_ch='E31', eog_ch=('E63', 'E64'), + rename_channels=False, sort_by_ch_name=False) + + assert_equal(info['ch_names'], + [ch['ch_name'] for ch in info['chs']]) + assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5], + ['A22', 'A2', 'A104', 'A241', 'A138']) + assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:], + ['A133', 'A158', 'A44', 'A134', 'A216']) + + info = pick_info(info, pick_types(info, meg=True, stim=True, + resp=True)) + info2 = pick_info(info2, pick_types(info2, meg=True, stim=True, + resp=True)) + + assert (info['sfreq'] is not None) + assert (info['lowpass'] is not None) + assert (info['highpass'] is not None) + assert (info['meas_date'] is not None) + + assert_equal(info2['sfreq'], None) + assert_equal(info2['lowpass'], None) + assert_equal(info2['highpass'], None) + assert_equal(info2['meas_date'], None) + + assert_equal(info['ch_names'], info2['ch_names']) + assert_equal(info['ch_names'], info2['ch_names']) + for key in ['dev_ctf_t', 'dev_head_t', 'ctf_head_t']: + assert_array_equal(info[key]['trans'], info2[key]['trans']) + + assert_array_equal( + np.array([ch['loc'] for ch in info['chs']]), + np.array([ch['loc'] for ch in info2['chs']])) + + # just check reading data | corner case + raw1 = read_raw_bti( + pdf_fname=pdf, config_fname=config, head_shape_fname=None, + sort_by_ch_name=False, preload=True) + # just check reading data | corner case + raw2 = read_raw_bti( + pdf_fname=pdf, config_fname=config, head_shape_fname=None, + rename_channels=False, + sort_by_ch_name=True, preload=True) + + sort_idx = [raw1.bti_ch_labels.index(ch) for ch in raw2.bti_ch_labels] + raw1._data = raw1._data[sort_idx] + assert_array_equal(raw1._data, raw2._data) + assert_array_equal(raw2.bti_ch_labels, raw2.ch_names) + + +def test_no_conversion(): + """Test bti no-conversion option.""" + get_info = partial( + _get_bti_info, + rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, + ecg_ch='E31', eog_ch=('E63', 'E64'), + rename_channels=False, sort_by_ch_name=False) + + for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): + raw_info, _ = get_info(pdf, config, hs, convert=False) + raw_info_con = read_raw_bti( + pdf_fname=pdf, config_fname=config, head_shape_fname=hs, + convert=True, preload=False).info + + pick_info(raw_info_con, + pick_types(raw_info_con, meg=True, ref_meg=True), + copy=False) + pick_info(raw_info, + pick_types(raw_info, meg=True, ref_meg=True), copy=False) + bti_info = _read_bti_header(pdf, config) + dev_ctf_t = _correct_trans(bti_info['bti_transform'][0]) + assert_array_equal(dev_ctf_t, raw_info['dev_ctf_t']['trans']) + assert_array_equal(raw_info['dev_head_t']['trans'], np.eye(4)) + assert_array_equal(raw_info['ctf_head_t']['trans'], np.eye(4)) + + nasion, lpa, rpa, hpi, dig_points = _read_head_shape(hs) + dig, t, _ = _make_bti_dig_points(nasion, lpa, rpa, hpi, dig_points, + convert=False, use_hpi=False) + + assert_array_equal(t['trans'], np.eye(4)) + + for ii, (old, new, con) in enumerate(zip( + dig, raw_info['dig'], raw_info_con['dig'])): + assert_equal(old['ident'], new['ident']) + assert_array_equal(old['r'], new['r']) + assert (not np.allclose(old['r'], con['r'])) + + if ii > 10: + break + + ch_map = {ch['chan_label']: ch['loc'] for ch in bti_info['chs']} + + for ii, ch_label in enumerate(raw_info['ch_names']): + if not ch_label.startswith('A'): + continue + t1 = ch_map[ch_label] # correction already performed in bti_info + t2 = raw_info['chs'][ii]['loc'] + t3 = raw_info_con['chs'][ii]['loc'] + assert_allclose(t1, t2, atol=1e-15) + assert (not np.allclose(t1, t3)) + idx_a = raw_info_con['ch_names'].index('MEG 001') + idx_b = raw_info['ch_names'].index('A22') + assert_equal( + raw_info_con['chs'][idx_a]['coord_frame'], + FIFF.FIFFV_COORD_DEVICE) + assert_equal( + raw_info['chs'][idx_b]['coord_frame'], + FIFF.FIFFV_MNE_COORD_4D_HEAD) + + +def test_bytes_io(): + """Test bti bytes-io API.""" + for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): + raw = read_raw_bti(pdf, config, hs, convert=True, preload=False) + + with open(pdf, 'rb') as fid: + pdf = BytesIO(fid.read()) + with open(config, 'rb') as fid: + config = BytesIO(fid.read()) + with open(hs, 'rb') as fid: + hs = BytesIO(fid.read()) + + raw2 = read_raw_bti(pdf, config, hs, convert=True, preload=False) + repr(raw2) + assert_array_equal(raw[:][0], raw2[:][0]) + + +def test_setup_headshape(): + """Test reading bti headshape.""" + for hs in hs_fnames: + nasion, lpa, rpa, hpi, dig_points = _read_head_shape(hs) + dig, t, _ = _make_bti_dig_points(nasion, lpa, rpa, hpi, dig_points) + + expected = {'kind', 'ident', 'r'} + found = set(reduce(lambda x, y: list(x) + list(y), + [d.keys() for d in dig])) + assert (not expected - found) + + +def test_nan_trans(): + """Test unlikely case that the device to head transform is empty.""" + for ii, pdf_fname in enumerate(pdf_fnames): + bti_info = _read_bti_header( + pdf_fname, config_fnames[ii], sort_by_ch_name=True) + + dev_ctf_t = Transform('ctf_meg', 'ctf_head', + _correct_trans(bti_info['bti_transform'][0])) + + # reading params + convert = True + rotation_x = 0. + translation = (0.0, 0.02, 0.11) + bti_dev_t = _get_bti_dev_t(rotation_x, translation) + bti_dev_t = Transform('ctf_meg', 'meg', bti_dev_t) + ecg_ch = 'E31' + eog_ch = ('E63', 'E64') + + # read parts of info to get trans + bti_ch_names = list() + for ch in bti_info['chs']: + ch_name = ch['name'] + if not ch_name.startswith('A'): + ch_name = ch.get('chan_label', ch_name) + bti_ch_names.append(ch_name) + + neuromag_ch_names = _rename_channels( + bti_ch_names, ecg_ch=ecg_ch, eog_ch=eog_ch) + ch_mapping = zip(bti_ch_names, neuromag_ch_names) + + # add some nan in some locations! + dev_ctf_t['trans'][:, 3] = np.nan + _check_nan_dev_head_t(dev_ctf_t) + for idx, (chan_4d, chan_neuromag) in enumerate(ch_mapping): + loc = bti_info['chs'][idx]['loc'] + if loc is not None: + if convert: + t = _loc_to_coil_trans(bti_info['chs'][idx]['loc']) + t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t) + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname', (fname_sim, fname_sim_filt)) +@pytest.mark.parametrize('preload', (True, False)) +def test_bti_ch_data(fname, preload): + """Test for gh-6048.""" + read_raw_bti(fname, preload=preload) # used to fail with ascii decode err + + +@testing.requires_testing_data +def test_bti_set_eog(): + """Check that EOG channels can be set (gh-10092).""" + raw = read_raw_bti(fname_sim, + preload=False, + eog_ch=('X65', 'X67', 'X69', 'X66', 'X68')) + assert_equal(len(pick_types(raw.info, eog=True)), 5) diff --git a/python/libs/mne/io/cnt/__init__.py b/python/libs/mne/io/cnt/__init__.py new file mode 100644 index 0000000..5021fd7 --- /dev/null +++ b/python/libs/mne/io/cnt/__init__.py @@ -0,0 +1,3 @@ +"""CNT data reader.""" + +from .cnt import read_raw_cnt diff --git a/python/libs/mne/io/cnt/_utils.py b/python/libs/mne/io/cnt/_utils.py new file mode 100644 index 0000000..19e459a --- /dev/null +++ b/python/libs/mne/io/cnt/_utils.py @@ -0,0 +1,142 @@ +# Author: Joan Massich +# +# License: BSD-3-Clause + +from struct import Struct +from collections import namedtuple +from math import modf +from datetime import datetime +from os import SEEK_END +import numpy as np + + +from ...utils import warn + + +def _read_teeg(f, teeg_offset): + """ + Read TEEG structure from an open CNT file. + + # from TEEG structure in http://paulbourke.net/dataformats/eeg/ + typedef struct { + char Teeg; /* Either 1 or 2 */ + long Size; /* Total length of all the events */ + long Offset; /* Hopefully always 0 */ + } TEEG; + """ + # we use a more descriptive names based on TEEG doc comments + Teeg = namedtuple('Teeg', 'event_type total_length offset') + teeg_parser = Struct('3 range 0-15 bit coded response pad */ +# /* 4->7 values 0xd=Accept 0xc=Reject */ +# long Offset; /* file offset of event */ +# } EVENT1; + + +CNTEventType2 = namedtuple('CNTEventType2', + ('StimType KeyBoard KeyPad_Accept Offset Type ' + 'Code Latency EpochEvent Accept2 Accuracy')) +# unsigned short StimType; /* range 0-65535 */ +# unsigned char KeyBoard; /* range 0-11 corresponding to fcn keys +1 */ +# char KeyPad_Accept; /* 0->3 range 0-15 bit coded response pad */ +# /* 4->7 values 0xd=Accept 0xc=Reject */ +# long Offset; /* file offset of event */ +# short Type; +# short Code; +# float Latency; +# char EpochEvent; +# char Accept2; +# char Accuracy; + + +# needed for backward compat: EVENT type 3 has the same structure as type 2 +CNTEventType3 = namedtuple('CNTEventType3', + ('StimType KeyBoard KeyPad_Accept Offset Type ' + 'Code Latency EpochEvent Accept2 Accuracy')) + + +def _get_event_parser(event_type): + if event_type == 1: + event_maker = CNTEventType1 + struct_pattern = ' +# Joan Massich +# +# License: BSD-3-Clause +from os import path + +import numpy as np + +from ...utils import warn, fill_doc, _check_option +from ...channels.layout import _topo_to_sphere +from ..constants import FIFF +from ..utils import (_mult_cal_one, _find_channels, _create_chs, read_str) +from ..meas_info import _empty_info +from ..base import BaseRaw +from ...annotations import Annotations + + +from ._utils import (_read_teeg, _get_event_parser, _session_date_2_meas_date, + _compute_robust_event_table_position, CNTEventType3) + + +def _read_annotations_cnt(fname, data_format='int16'): + """CNT Annotation File Reader. + + This method opens the .cnt files, searches all the metadata to construct + the annotations and parses the event table. Notice that CNT files, can + point to a different file containing the events. This case when the + event table is separated from the main .cnt is not supported. + + Parameters + ---------- + fname: str + path to cnt file containing the annotations. + data_format : 'int16' | 'int32' + Defines the data format the data is read in. + + Returns + ------- + annot : instance of Annotations + The annotations. + """ + # Offsets from SETUP structure in http://paulbourke.net/dataformats/eeg/ + SETUP_NCHANNELS_OFFSET = 370 + SETUP_RATE_OFFSET = 376 + + def _translating_function(offset, n_channels, event_type, + data_format=data_format): + n_bytes = 2 if data_format == 'int16' else 4 + if event_type == CNTEventType3: + offset *= n_bytes * n_channels + event_time = offset - 900 - (75 * n_channels) + event_time //= n_channels * n_bytes + return event_time - 1 + + with open(fname, 'rb') as fid: + fid.seek(SETUP_NCHANNELS_OFFSET) + (n_channels,) = np.frombuffer(fid.read(2), dtype='`_ with + :func:`mne.channels.read_dig_dat` + - Other reader functions are listed under *See Also* at + :class:`mne.channels.DigMontage` + + Parameters + ---------- + input_fname : str + Path to the data file. + eog : list | tuple | 'auto' | 'header' + Names of channels or list of indices that should be designated + EOG channels. If 'header', VEOG and HEOG channels assigned in the file + header are used. If 'auto', channel names containing 'EOG' are used. + Defaults to empty tuple. + misc : list | tuple + Names of channels or list of indices that should be designated + MISC channels. Defaults to empty tuple. + ecg : list | tuple | 'auto' + Names of channels or list of indices that should be designated + ECG channels. If 'auto', the channel names containing 'ECG' are used. + Defaults to empty tuple. + emg : list | tuple + Names of channels or list of indices that should be designated + EMG channels. If 'auto', the channel names containing 'EMG' are used. + Defaults to empty tuple. + data_format : 'auto' | 'int16' | 'int32' + Defines the data format the data is read in. If 'auto', it is + determined from the file header using ``numsamples`` field. + Defaults to 'auto'. + date_format : 'mm/dd/yy' | 'dd/mm/yy' + Format of date in the header. Defaults to 'mm/dd/yy'. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawCNT. + The raw data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + .. versionadded:: 0.12 + """ + return RawCNT(input_fname, eog=eog, misc=misc, ecg=ecg, + emg=emg, data_format=data_format, date_format=date_format, + preload=preload, verbose=verbose) + + +def _get_cnt_info(input_fname, eog, ecg, emg, misc, data_format, date_format): + """Read the cnt header.""" + data_offset = 900 # Size of the 'SETUP' header. + cnt_info = dict() + # Reading only the fields of interest. Structure of the whole header at + # http://paulbourke.net/dataformats/eeg/ + with open(input_fname, 'rb', buffering=0) as fid: + fid.seek(21) + patient_id = read_str(fid, 20) + patient_id = int(patient_id) if patient_id.isdigit() else 0 + fid.seek(121) + patient_name = read_str(fid, 20).split() + last_name = patient_name[0] if len(patient_name) > 0 else '' + first_name = patient_name[-1] if len(patient_name) > 0 else '' + fid.seek(2, 1) + sex = read_str(fid, 1) + if sex == 'M': + sex = FIFF.FIFFV_SUBJ_SEX_MALE + elif sex == 'F': + sex = FIFF.FIFFV_SUBJ_SEX_FEMALE + else: # can be 'U' + sex = FIFF.FIFFV_SUBJ_SEX_UNKNOWN + hand = read_str(fid, 1) + if hand == 'R': + hand = FIFF.FIFFV_SUBJ_HAND_RIGHT + elif hand == 'L': + hand = FIFF.FIFFV_SUBJ_HAND_LEFT + else: # can be 'M' for mixed or 'U' + hand = None + fid.seek(205) + session_label = read_str(fid, 20) + + session_date = ('%s %s' % (read_str(fid, 10), read_str(fid, 12))) + meas_date = _session_date_2_meas_date(session_date, date_format) + + fid.seek(370) + n_channels = np.fromfile(fid, dtype='= 0] + fid.seek(438) + lowpass_toggle = np.fromfile(fid, 'i1', count=1)[0] + highpass_toggle = np.fromfile(fid, 'i1', count=1)[0] + + # Header has a field for number of samples, but it does not seem to be + # too reliable. That's why we have option for setting n_bytes manually. + fid.seek(864) + n_samples = np.fromfile(fid, dtype=' 1: + cnt_info['channel_offset'] //= n_bytes + else: + cnt_info['channel_offset'] = 1 + + ch_names, cals, baselines, chs, pos = ( + list(), list(), list(), list(), list() + ) + + bads = list() + for ch_idx in range(n_channels): # ELECTLOC fields + fid.seek(data_offset + 75 * ch_idx) + ch_name = read_str(fid, 10) + ch_names.append(ch_name) + fid.seek(data_offset + 75 * ch_idx + 4) + if np.fromfile(fid, dtype='u1', count=1)[0]: + bads.append(ch_name) + fid.seek(data_offset + 75 * ch_idx + 19) + xy = np.fromfile(fid, dtype='f4', count=2) + xy[1] *= -1 # invert y-axis + pos.append(xy) + fid.seek(data_offset + 75 * ch_idx + 47) + # Baselines are subtracted before scaling the data. + baselines.append(np.fromfile(fid, dtype='i2', count=1)[0]) + fid.seek(data_offset + 75 * ch_idx + 59) + sensitivity = np.fromfile(fid, dtype='f4', count=1)[0] + fid.seek(data_offset + 75 * ch_idx + 71) + cal = np.fromfile(fid, dtype='f4', count=1)[0] + cals.append(cal * sensitivity * 1e-6 / 204.8) + + info = _empty_info(sfreq) + if lowpass_toggle == 1: + info['lowpass'] = highcutoff + if highpass_toggle == 1: + info['highpass'] = lowcutoff + subject_info = {'hand': hand, 'id': patient_id, 'sex': sex, + 'first_name': first_name, 'last_name': last_name} + + if eog == 'auto': + eog = _find_channels(ch_names, 'EOG') + if ecg == 'auto': + ecg = _find_channels(ch_names, 'ECG') + if emg == 'auto': + emg = _find_channels(ch_names, 'EMG') + + chs = _create_chs(ch_names, cals, FIFF.FIFFV_COIL_EEG, + FIFF.FIFFV_EEG_CH, eog, ecg, emg, misc) + eegs = [idx for idx, ch in enumerate(chs) if + ch['coil_type'] == FIFF.FIFFV_COIL_EEG] + coords = _topo_to_sphere(pos, eegs) + locs = np.full((len(chs), 12), np.nan) + locs[:, :3] = coords + for ch, loc in zip(chs, locs): + ch.update(loc=loc) + + cnt_info.update(baselines=np.array(baselines), n_samples=n_samples, + n_bytes=n_bytes) + + session_label = None if str(session_label) == '' else str(session_label) + info.update(meas_date=meas_date, + description=session_label, bads=bads, + subject_info=subject_info, chs=chs) + info._unlocked = False + info._update_redundant() + return info, cnt_info + + +@fill_doc +class RawCNT(BaseRaw): + """Raw object from Neuroscan CNT file. + + .. Note:: + The channel positions are read from the file header. Channels that are + not assigned with keywords ``eog``, ``ecg``, ``emg`` and ``misc`` are + assigned as eeg channels. All the eeg channel locations are fit to a + sphere when computing the z-coordinates for the channels. If channels + assigned as eeg channels have locations far away from the head (i.e. + x and y coordinates don't fit to a sphere), all the channel locations + will be distorted. If you are not sure that the channel locations in + the header are correct, it is probably safer to use a (standard) + montage. See :func:`mne.channels.make_standard_montage` + + Parameters + ---------- + input_fname : str + Path to the CNT file. + eog : list | tuple + Names of channels or list of indices that should be designated + EOG channels. If 'auto', the channel names beginning with + ``EOG`` are used. Defaults to empty tuple. + misc : list | tuple + Names of channels or list of indices that should be designated + MISC channels. Defaults to empty tuple. + ecg : list | tuple + Names of channels or list of indices that should be designated + ECG channels. If 'auto', the channel names beginning with + ``ECG`` are used. Defaults to empty tuple. + emg : list | tuple + Names of channels or list of indices that should be designated + EMG channels. If 'auto', the channel names beginning with + ``EMG`` are used. Defaults to empty tuple. + data_format : 'auto' | 'int16' | 'int32' + Defines the data format the data is read in. If 'auto', it is + determined from the file header using ``numsamples`` field. + Defaults to 'auto'. + date_format : 'mm/dd/yy' | 'dd/mm/yy' + Format of date in the header. Defaults to 'mm/dd/yy'. + %(preload)s + stim_channel : bool | None + Add a stim channel from the events. Defaults to None to trigger a + future warning. + + .. warning:: This defaults to True in 0.18 but will change to False in + 0.19 (when no stim channel synthesis will be allowed) + and be removed in 0.20; migrate code to use + :func:`mne.events_from_annotations` instead. + + .. versionadded:: 0.18 + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + def __init__(self, input_fname, eog=(), misc=(), + ecg=(), emg=(), data_format='auto', date_format='mm/dd/yy', + preload=False, verbose=None): # noqa: D102 + + _check_option('date_format', date_format, ['mm/dd/yy', 'dd/mm/yy']) + if date_format == 'dd/mm/yy': + _date_format = '%d/%m/%y %H:%M:%S' + else: + _date_format = '%m/%d/%y %H:%M:%S' + + input_fname = path.abspath(input_fname) + info, cnt_info = _get_cnt_info(input_fname, eog, ecg, emg, misc, + data_format, _date_format) + last_samps = [cnt_info['n_samples'] - 1] + super(RawCNT, self).__init__( + info, preload, filenames=[input_fname], raw_extras=[cnt_info], + last_samps=last_samps, orig_format='int', verbose=verbose) + + data_format = 'int32' if cnt_info['n_bytes'] == 4 else 'int16' + self.set_annotations( + _read_annotations_cnt(input_fname, data_format=data_format)) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Take a chunk of raw data, multiply by mult or cals, and store.""" + n_channels = self._raw_extras[fi]['orig_nchan'] + if 'stim_channel' in self._raw_extras[fi]: + f_channels = n_channels - 1 # Stim channel already read. + stim_ch = self._raw_extras[fi]['stim_channel'] + else: + f_channels = n_channels + stim_ch = None + + channel_offset = self._raw_extras[fi]['channel_offset'] + baselines = self._raw_extras[fi]['baselines'] + n_bytes = self._raw_extras[fi]['n_bytes'] + dtype = '= (channel_offset / 2): # Extend at the end. + extra_samps += chunk_size + count = n_samps // channel_offset * chunk_size + extra_samps + n_chunks = count // chunk_size + samps = np.fromfile(fid, dtype=dtype, count=count) + samps = samps.reshape((n_chunks, f_channels, channel_offset), + order='C') + + # Intermediate shaping to chunk sizes. + block = np.zeros((n_channels, channel_offset * n_chunks)) + for set_idx, row in enumerate(samps): # Final shape. + block_slice = slice(set_idx * channel_offset, + (set_idx + 1) * channel_offset) + block[:f_channels, block_slice] = row + if 'stim_channel' in self._raw_extras[fi]: + _data_start = start + sample_start + _data_stop = start + sample_stop + block[-1] = stim_ch[_data_start:_data_stop] + one[idx] = block[idx, s_offset:n_samps + s_offset] + + one[idx] -= baselines[idx][:, None] + _mult_cal_one(data[:, sample_start:sample_stop], one, idx, + cals, mult) diff --git a/python/libs/mne/io/cnt/tests/__init__.py b/python/libs/mne/io/cnt/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/cnt/tests/test_cnt.py b/python/libs/mne/io/cnt/tests/test_cnt.py new file mode 100644 index 0000000..fe2476b --- /dev/null +++ b/python/libs/mne/io/cnt/tests/test_cnt.py @@ -0,0 +1,55 @@ + +# Author: Jaakko Leppakangas +# Joan Massich +# +# License: BSD-3-Clause + +import os.path as op + +import numpy as np +from numpy.testing import assert_array_equal +import pytest + +from mne import pick_types +from mne.datasets import testing +from mne.io.tests.test_raw import _test_raw_reader +from mne.io.cnt import read_raw_cnt +from mne.annotations import read_annotations + +data_path = testing.data_path(download=False) +fname = op.join(data_path, 'CNT', 'scan41_short.cnt') + + +@testing.requires_testing_data +def test_data(): + """Test reading raw cnt files.""" + with pytest.warns(RuntimeWarning, match='number of bytes'): + raw = _test_raw_reader(read_raw_cnt, input_fname=fname, + eog='auto', misc=['NA1', 'LEFT_EAR']) + + # make sure we use annotations event if we synthesized stim + assert len(raw.annotations) == 6 + + eog_chs = pick_types(raw.info, eog=True, exclude=[]) + assert len(eog_chs) == 2 # test eog='auto' + assert raw.info['bads'] == ['LEFT_EAR', 'VEOGR'] # test bads + + # the data has "05/10/200 17:35:31" so it is set to None + assert raw.info['meas_date'] is None + + +@testing.requires_testing_data +def test_compare_events_and_annotations(): + """Test comparing annotations and events.""" + with pytest.warns(RuntimeWarning, match='Could not parse meas date'): + raw = read_raw_cnt(fname) + events = np.array([[333, 0, 7], + [1010, 0, 7], + [1664, 0, 109], + [2324, 0, 7], + [2984, 0, 109]]) + + annot = read_annotations(fname) + assert len(annot) == 6 + assert_array_equal(annot.onset[:-1], events[:, 0] / raw.info['sfreq']) + assert 'STI 014' not in raw.info['ch_names'] diff --git a/python/libs/mne/io/compensator.py b/python/libs/mne/io/compensator.py new file mode 100644 index 0000000..220de1f --- /dev/null +++ b/python/libs/mne/io/compensator.py @@ -0,0 +1,165 @@ +import numpy as np + +from .constants import FIFF +from ..utils import fill_doc + + +def get_current_comp(info): + """Get the current compensation in effect in the data.""" + comp = None + first_comp = -1 + for k, chan in enumerate(info['chs']): + if chan['kind'] == FIFF.FIFFV_MEG_CH: + comp = int(chan['coil_type']) >> 16 + if first_comp < 0: + first_comp = comp + elif comp != first_comp: + raise ValueError('Compensation is not set equally on ' + 'all MEG channels') + return comp + + +def set_current_comp(info, comp): + """Set the current compensation in effect in the data.""" + comp_now = get_current_comp(info) + for k, chan in enumerate(info['chs']): + if chan['kind'] == FIFF.FIFFV_MEG_CH: + rem = chan['coil_type'] - (comp_now << 16) + chan['coil_type'] = int(rem + (comp << 16)) + + +def _make_compensator(info, grade): + """Auxiliary function for make_compensator.""" + for k in range(len(info['comps'])): + if info['comps'][k]['kind'] == grade: + this_data = info['comps'][k]['data'] + + # Create the preselector + presel = np.zeros((this_data['ncol'], info['nchan'])) + for col, col_name in enumerate(this_data['col_names']): + ind = [k for k, ch in enumerate(info['ch_names']) + if ch == col_name] + if len(ind) == 0: + raise ValueError('Channel %s is not available in ' + 'data' % col_name) + elif len(ind) > 1: + raise ValueError('Ambiguous channel %s' % col_name) + presel[col, ind[0]] = 1.0 + + # Create the postselector (zero entries for channels not found) + postsel = np.zeros((info['nchan'], this_data['nrow'])) + for c, ch_name in enumerate(info['ch_names']): + ind = [k for k, ch in enumerate(this_data['row_names']) + if ch == ch_name] + if len(ind) > 1: + raise ValueError('Ambiguous channel %s' % ch_name) + elif len(ind) == 1: + postsel[c, ind[0]] = 1.0 + # else, don't use it at all (postsel[c, ?] = 0.0) by allocation + this_comp = np.dot(postsel, np.dot(this_data['data'], presel)) + return this_comp + + raise ValueError('Desired compensation matrix (grade = %d) not' + ' found' % grade) + + +@fill_doc +def make_compensator(info, from_, to, exclude_comp_chs=False): + """Return compensation matrix eg. for CTF system. + + Create a compensation matrix to bring the data from one compensation + state to another. + + Parameters + ---------- + %(info_not_none)s + from_ : int + Compensation in the input data. + to : int + Desired compensation in the output. + exclude_comp_chs : bool + Exclude compensation channels from the output. + + Returns + ------- + comp : array | None. + The compensation matrix. Might be None if no compensation + is needed (from == to). + """ + if from_ == to: + return None + + # s_orig = s_from + C1*s_from = (I + C1)*s_from + # s_to = s_orig - C2*s_orig = (I - C2)*s_orig + # s_to = (I - C2)*(I + C1)*s_from = (I + C1 - C2 - C2*C1)*s_from + if from_ != 0: + C1 = _make_compensator(info, from_) + comp_from_0 = np.linalg.inv(np.eye(info['nchan']) - C1) + if to != 0: + C2 = _make_compensator(info, to) + comp_0_to = np.eye(info['nchan']) - C2 + if from_ != 0: + if to != 0: + # This is mathematically equivalent, but has higher numerical + # error than using the inverse to always go to zero and back + # comp = np.eye(info['nchan']) + C1 - C2 - np.dot(C2, C1) + comp = np.dot(comp_0_to, comp_from_0) + else: + comp = comp_from_0 + else: + # from == 0, to != 0 guaranteed here + comp = comp_0_to + + if exclude_comp_chs: + pick = [k for k, c in enumerate(info['chs']) + if c['kind'] != FIFF.FIFFV_REF_MEG_CH] + + if len(pick) == 0: + raise ValueError('Nothing remains after excluding the ' + 'compensation channels') + + comp = comp[pick, :] + + return comp + + +# @verbose +# def compensate_to(data, to, verbose=None): +# """ +# % +# % [newdata] = mne_compensate_to(data,to) +# % +# % Apply compensation to the data as desired +# % +# """ +# +# newdata = data.copy() +# now = get_current_comp(newdata['info']) +# +# # Are we there already? +# if now == to: +# logger.info('Data are already compensated as desired') +# +# # Make the compensator and apply it to all data sets +# comp = make_compensator(newdata['info'], now, to) +# for k in range(len(newdata['evoked'])): +# newdata['evoked'][k]['epochs'] = np.dot(comp, +# newdata['evoked'][k]['epochs']) +# +# # Update the compensation info in the channel descriptors +# newdata['info']['chs'] = set_current_comp(newdata['info']['chs'], to) +# return newdata + + +# def set_current_comp(chs, value): +# """Set the current compensation value in the channel info structures +# """ +# new_chs = chs +# +# lower_half = int('FFFF', 16) # hex2dec('FFFF') +# for k in range(len(chs)): +# if chs[k]['kind'] == FIFF.FIFFV_MEG_CH: +# coil_type = float(chs[k]['coil_type']) & lower_half +# new_chs[k]['coil_type'] = int(coil_type | (value << 16)) +# +# return new_chs diff --git a/python/libs/mne/io/constants.py b/python/libs/mne/io/constants.py new file mode 100644 index 0000000..5000ff6 --- /dev/null +++ b/python/libs/mne/io/constants.py @@ -0,0 +1,1062 @@ +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# +# License: BSD-3-Clause + +from ..utils._bunch import BunchConstNamed + +FIFF = BunchConstNamed() + +# +# FIFF version number in use +# +FIFF.FIFFC_MAJOR_VERSION = 1 +FIFF.FIFFC_MINOR_VERSION = 4 +FIFF.FIFFC_VERSION = FIFF.FIFFC_MAJOR_VERSION << 16 | FIFF.FIFFC_MINOR_VERSION + +# +# Blocks +# +FIFF.FIFFB_ROOT = 999 +FIFF.FIFFB_MEAS = 100 +FIFF.FIFFB_MEAS_INFO = 101 +FIFF.FIFFB_RAW_DATA = 102 +FIFF.FIFFB_PROCESSED_DATA = 103 +FIFF.FIFFB_EVOKED = 104 +FIFF.FIFFB_ASPECT = 105 +FIFF.FIFFB_SUBJECT = 106 +FIFF.FIFFB_ISOTRAK = 107 +FIFF.FIFFB_HPI_MEAS = 108 # HPI measurement +FIFF.FIFFB_HPI_RESULT = 109 # Result of a HPI fitting procedure +FIFF.FIFFB_HPI_COIL = 110 # Data acquired from one HPI coil +FIFF.FIFFB_PROJECT = 111 +FIFF.FIFFB_CONTINUOUS_DATA = 112 +FIFF.FIFFB_CH_INFO = 113 # Extra channel information +FIFF.FIFFB_VOID = 114 +FIFF.FIFFB_EVENTS = 115 +FIFF.FIFFB_INDEX = 116 +FIFF.FIFFB_DACQ_PARS = 117 +FIFF.FIFFB_REF = 118 +FIFF.FIFFB_IAS_RAW_DATA = 119 +FIFF.FIFFB_IAS_ASPECT = 120 +FIFF.FIFFB_HPI_SUBSYSTEM = 121 +# FIFF.FIFFB_PHANTOM_SUBSYSTEM = 122 +# FIFF.FIFFB_STATUS_SUBSYSTEM = 123 +FIFF.FIFFB_DEVICE = 124 +FIFF.FIFFB_HELIUM = 125 +FIFF.FIFFB_CHANNEL_INFO = 126 + +FIFF.FIFFB_SPHERE = 300 # Concentric sphere model related +FIFF.FIFFB_BEM = 310 # Boundary-element method +FIFF.FIFFB_BEM_SURF = 311 # Boundary-element method surfaces +FIFF.FIFFB_CONDUCTOR_MODEL = 312 # One conductor model definition +FIFF.FIFFB_PROJ = 313 +FIFF.FIFFB_PROJ_ITEM = 314 +FIFF.FIFFB_MRI = 200 +FIFF.FIFFB_MRI_SET = 201 +FIFF.FIFFB_MRI_SLICE = 202 +FIFF.FIFFB_MRI_SCENERY = 203 # These are for writing unrelated 'slices' +FIFF.FIFFB_MRI_SCENE = 204 # Which are actually 3D scenes... +FIFF.FIFFB_MRI_SEG = 205 # MRI segmentation data +FIFF.FIFFB_MRI_SEG_REGION = 206 # One MRI segmentation region +FIFF.FIFFB_PROCESSING_HISTORY = 900 +FIFF.FIFFB_PROCESSING_RECORD = 901 + +FIFF.FIFFB_DATA_CORRECTION = 500 +FIFF.FIFFB_CHANNEL_DECOUPLER = 501 +FIFF.FIFFB_SSS_INFO = 502 +FIFF.FIFFB_SSS_CAL = 503 +FIFF.FIFFB_SSS_ST_INFO = 504 +FIFF.FIFFB_SSS_BASES = 505 +FIFF.FIFFB_IAS = 510 +# +# Of general interest +# +FIFF.FIFF_FILE_ID = 100 +FIFF.FIFF_DIR_POINTER = 101 +FIFF.FIFF_BLOCK_ID = 103 +FIFF.FIFF_BLOCK_START = 104 +FIFF.FIFF_BLOCK_END = 105 +FIFF.FIFF_FREE_LIST = 106 +FIFF.FIFF_FREE_BLOCK = 107 +FIFF.FIFF_NOP = 108 +FIFF.FIFF_PARENT_FILE_ID = 109 +FIFF.FIFF_PARENT_BLOCK_ID = 110 +FIFF.FIFF_BLOCK_NAME = 111 +FIFF.FIFF_BLOCK_VERSION = 112 +FIFF.FIFF_CREATOR = 113 # Program that created the file (string) +FIFF.FIFF_MODIFIER = 114 # Program that modified the file (string) +FIFF.FIFF_REF_ROLE = 115 +FIFF.FIFF_REF_FILE_ID = 116 +FIFF.FIFF_REF_FILE_NUM = 117 +FIFF.FIFF_REF_FILE_NAME = 118 +# +# Megacq saves the parameters in these tags +# +FIFF.FIFF_DACQ_PARS = 150 +FIFF.FIFF_DACQ_STIM = 151 + +FIFF.FIFF_DEVICE_TYPE = 152 +FIFF.FIFF_DEVICE_MODEL = 153 +FIFF.FIFF_DEVICE_SERIAL = 154 +FIFF.FIFF_DEVICE_SITE = 155 + +FIFF.FIFF_HE_LEVEL_RAW = 156 +FIFF.FIFF_HELIUM_LEVEL = 157 +FIFF.FIFF_ORIG_FILE_GUID = 158 +FIFF.FIFF_UTC_OFFSET = 159 + +FIFF.FIFF_NCHAN = 200 +FIFF.FIFF_SFREQ = 201 +FIFF.FIFF_DATA_PACK = 202 +FIFF.FIFF_CH_INFO = 203 +FIFF.FIFF_MEAS_DATE = 204 +FIFF.FIFF_SUBJECT = 205 +FIFF.FIFF_COMMENT = 206 +FIFF.FIFF_NAVE = 207 +FIFF.FIFF_FIRST_SAMPLE = 208 # The first sample of an epoch +FIFF.FIFF_LAST_SAMPLE = 209 # The last sample of an epoch +FIFF.FIFF_ASPECT_KIND = 210 +FIFF.FIFF_REF_EVENT = 211 +FIFF.FIFF_EXPERIMENTER = 212 +FIFF.FIFF_DIG_POINT = 213 +FIFF.FIFF_CH_POS = 214 +FIFF.FIFF_HPI_SLOPES = 215 # HPI data +FIFF.FIFF_HPI_NCOIL = 216 +FIFF.FIFF_REQ_EVENT = 217 +FIFF.FIFF_REQ_LIMIT = 218 +FIFF.FIFF_LOWPASS = 219 +FIFF.FIFF_BAD_CHS = 220 +FIFF.FIFF_ARTEF_REMOVAL = 221 +FIFF.FIFF_COORD_TRANS = 222 +FIFF.FIFF_HIGHPASS = 223 +FIFF.FIFF_CH_CALS = 224 # This will not occur in new files +FIFF.FIFF_HPI_BAD_CHS = 225 # List of channels considered to be bad in hpi +FIFF.FIFF_HPI_CORR_COEFF = 226 # HPI curve fit correlations +FIFF.FIFF_EVENT_COMMENT = 227 # Comment about the events used in averaging +FIFF.FIFF_NO_SAMPLES = 228 # Number of samples in an epoch +FIFF.FIFF_FIRST_TIME = 229 # Time scale minimum + +FIFF.FIFF_SUBAVE_SIZE = 230 # Size of a subaverage +FIFF.FIFF_SUBAVE_FIRST = 231 # The first epoch # contained in the subaverage +FIFF.FIFF_NAME = 233 # Intended to be a short name. +FIFF.FIFF_DESCRIPTION = FIFF.FIFF_COMMENT # (Textual) Description of an object +FIFF.FIFF_DIG_STRING = 234 # String of digitized points +FIFF.FIFF_LINE_FREQ = 235 # Line frequency +FIFF.FIFF_GANTRY_ANGLE = 282 # Tilt angle of the gantry in degrees. + +# +# HPI fitting program tags +# +FIFF.FIFF_HPI_COIL_FREQ = 236 # HPI coil excitation frequency +FIFF.FIFF_HPI_COIL_MOMENTS = 240 # Estimated moment vectors for the HPI coil magnetic dipoles +FIFF.FIFF_HPI_FIT_GOODNESS = 241 # Three floats indicating the goodness of fit +FIFF.FIFF_HPI_FIT_ACCEPT = 242 # Bitmask indicating acceptance (see below) +FIFF.FIFF_HPI_FIT_GOOD_LIMIT = 243 # Limit for the goodness-of-fit +FIFF.FIFF_HPI_FIT_DIST_LIMIT = 244 # Limit for the coil distance difference +FIFF.FIFF_HPI_COIL_NO = 245 # Coil number listed by HPI measurement +FIFF.FIFF_HPI_COILS_USED = 246 # List of coils finally used when the transformation was computed +FIFF.FIFF_HPI_DIGITIZATION_ORDER = 247 # Which Isotrak digitization point corresponds to each of the coils energized + + +# +# Tags used for storing channel info +# +FIFF.FIFF_CH_SCAN_NO = 250 # Channel scan number. Corresponds to fiffChInfoRec.scanNo field +FIFF.FIFF_CH_LOGICAL_NO = 251 # Channel logical number. Corresponds to fiffChInfoRec.logNo field +FIFF.FIFF_CH_KIND = 252 # Channel type. Corresponds to fiffChInfoRec.kind field" +FIFF.FIFF_CH_RANGE = 253 # Conversion from recorded number to (possibly virtual) voltage at the output" +FIFF.FIFF_CH_CAL = 254 # Calibration coefficient from output voltage to some real units +FIFF.FIFF_CH_LOC = 255 # Channel loc +FIFF.FIFF_CH_UNIT = 256 # Unit of the data +FIFF.FIFF_CH_UNIT_MUL = 257 # Unit multiplier exponent +FIFF.FIFF_CH_DACQ_NAME = 258 # Name of the channel in the data acquisition system. Corresponds to fiffChInfoRec.name. +FIFF.FIFF_CH_COIL_TYPE = 350 # Coil type in coil_def.dat +FIFF.FIFF_CH_COORD_FRAME = 351 # Coordinate frame (integer) + +# +# Pointers +# +FIFF.FIFFV_NEXT_SEQ = 0 +FIFF.FIFFV_NEXT_NONE = -1 +# +# Channel types +# +FIFF.FIFFV_BIO_CH = 102 +FIFF.FIFFV_MEG_CH = 1 +FIFF.FIFFV_REF_MEG_CH = 301 +FIFF.FIFFV_EEG_CH = 2 +FIFF.FIFFV_MCG_CH = 201 +FIFF.FIFFV_STIM_CH = 3 +FIFF.FIFFV_EOG_CH = 202 +FIFF.FIFFV_EMG_CH = 302 +FIFF.FIFFV_ECG_CH = 402 +FIFF.FIFFV_MISC_CH = 502 +FIFF.FIFFV_RESP_CH = 602 # Respiration monitoring +FIFF.FIFFV_SEEG_CH = 802 # stereotactic EEG +FIFF.FIFFV_DBS_CH = 803 # deep brain stimulation +FIFF.FIFFV_SYST_CH = 900 # some system status information (on Triux systems only) +FIFF.FIFFV_ECOG_CH = 902 +FIFF.FIFFV_IAS_CH = 910 # Internal Active Shielding data (maybe on Triux only) +FIFF.FIFFV_EXCI_CH = 920 # flux excitation channel used to be a stimulus channel +FIFF.FIFFV_DIPOLE_WAVE = 1000 # Dipole time curve (xplotter/xfit) +FIFF.FIFFV_GOODNESS_FIT = 1001 # Goodness of fit (xplotter/xfit) +FIFF.FIFFV_FNIRS_CH = 1100 # Functional near-infrared spectroscopy +_ch_kind_named = {key: key for key in ( + FIFF.FIFFV_BIO_CH, + FIFF.FIFFV_MEG_CH, + FIFF.FIFFV_REF_MEG_CH, + FIFF.FIFFV_EEG_CH, + FIFF.FIFFV_MCG_CH, + FIFF.FIFFV_STIM_CH, + FIFF.FIFFV_EOG_CH, + FIFF.FIFFV_EMG_CH, + FIFF.FIFFV_ECG_CH, + FIFF.FIFFV_MISC_CH, + FIFF.FIFFV_RESP_CH, + FIFF.FIFFV_SEEG_CH, + FIFF.FIFFV_DBS_CH, + FIFF.FIFFV_SYST_CH, + FIFF.FIFFV_ECOG_CH, + FIFF.FIFFV_IAS_CH, + FIFF.FIFFV_EXCI_CH, + FIFF.FIFFV_DIPOLE_WAVE, + FIFF.FIFFV_GOODNESS_FIT, + FIFF.FIFFV_FNIRS_CH, +)} + +# +# Quaternion channels for head position monitoring +# +FIFF.FIFFV_QUAT_0 = 700 # Quaternion param q0 obsolete for unit quaternion +FIFF.FIFFV_QUAT_1 = 701 # Quaternion param q1 rotation +FIFF.FIFFV_QUAT_2 = 702 # Quaternion param q2 rotation +FIFF.FIFFV_QUAT_3 = 703 # Quaternion param q3 rotation +FIFF.FIFFV_QUAT_4 = 704 # Quaternion param q4 translation +FIFF.FIFFV_QUAT_5 = 705 # Quaternion param q5 translation +FIFF.FIFFV_QUAT_6 = 706 # Quaternion param q6 translation +FIFF.FIFFV_HPI_G = 707 # Goodness-of-fit in continuous hpi +FIFF.FIFFV_HPI_ERR = 708 # Estimation error in continuous hpi +FIFF.FIFFV_HPI_MOV = 709 # Estimated head movement speed in continuous hpi +# +# Coordinate frames +# +FIFF.FIFFV_COORD_UNKNOWN = 0 +FIFF.FIFFV_COORD_DEVICE = 1 +FIFF.FIFFV_COORD_ISOTRAK = 2 +FIFF.FIFFV_COORD_HPI = 3 +FIFF.FIFFV_COORD_HEAD = 4 +FIFF.FIFFV_COORD_MRI = 5 +FIFF.FIFFV_COORD_MRI_SLICE = 6 +FIFF.FIFFV_COORD_MRI_DISPLAY = 7 +FIFF.FIFFV_COORD_DICOM_DEVICE = 8 +FIFF.FIFFV_COORD_IMAGING_DEVICE = 9 +_coord_frame_named = {key: key for key in ( + FIFF.FIFFV_COORD_UNKNOWN, + FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_COORD_ISOTRAK, + FIFF.FIFFV_COORD_HPI, + FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_COORD_MRI, + FIFF.FIFFV_COORD_MRI_SLICE, + FIFF.FIFFV_COORD_MRI_DISPLAY, + FIFF.FIFFV_COORD_DICOM_DEVICE, + FIFF.FIFFV_COORD_IMAGING_DEVICE, +)} +# +# Needed for raw and evoked-response data +# +FIFF.FIFF_DATA_BUFFER = 300 # Buffer containing measurement data +FIFF.FIFF_DATA_SKIP = 301 # Data skip in buffers +FIFF.FIFF_EPOCH = 302 # Buffer containing one epoch and channel +FIFF.FIFF_DATA_SKIP_SAMP = 303 # Data skip in samples + +# +# Info on subject +# +FIFF.FIFF_SUBJ_ID = 400 # Subject ID +FIFF.FIFF_SUBJ_FIRST_NAME = 401 # First name of the subject +FIFF.FIFF_SUBJ_MIDDLE_NAME = 402 # Middle name of the subject +FIFF.FIFF_SUBJ_LAST_NAME = 403 # Last name of the subject +FIFF.FIFF_SUBJ_BIRTH_DAY = 404 # Birthday of the subject +FIFF.FIFF_SUBJ_SEX = 405 # Sex of the subject +FIFF.FIFF_SUBJ_HAND = 406 # Handedness of the subject +FIFF.FIFF_SUBJ_WEIGHT = 407 # Weight of the subject in kg +FIFF.FIFF_SUBJ_HEIGHT = 408 # Height of the subject in m +FIFF.FIFF_SUBJ_COMMENT = 409 # Comment about the subject +FIFF.FIFF_SUBJ_HIS_ID = 410 # ID used in the Hospital Information System + +FIFF.FIFFV_SUBJ_HAND_RIGHT = 1 # Righthanded +FIFF.FIFFV_SUBJ_HAND_LEFT = 2 # Lefthanded +FIFF.FIFFV_SUBJ_HAND_AMBI = 3 # Ambidextrous + +FIFF.FIFFV_SUBJ_SEX_UNKNOWN = 0 # Unknown gender +FIFF.FIFFV_SUBJ_SEX_MALE = 1 # Male +FIFF.FIFFV_SUBJ_SEX_FEMALE = 2 # Female + +FIFF.FIFF_PROJ_ID = 500 +FIFF.FIFF_PROJ_NAME = 501 +FIFF.FIFF_PROJ_AIM = 502 +FIFF.FIFF_PROJ_PERSONS = 503 +FIFF.FIFF_PROJ_COMMENT = 504 + +FIFF.FIFF_EVENT_CHANNELS = 600 # Event channel numbers +FIFF.FIFF_EVENT_LIST = 601 # List of events (integers: +FIFF.FIFF_EVENT_CHANNEL = 602 # Event channel +FIFF.FIFF_EVENT_BITS = 603 # Event bits array + +# +# Tags used in saving SQUID characteristics etc. +# +FIFF.FIFF_SQUID_BIAS = 701 +FIFF.FIFF_SQUID_OFFSET = 702 +FIFF.FIFF_SQUID_GATE = 703 +# +# Aspect values used to save characteristic curves of SQUIDs. (mjk) +# +FIFF.FIFFV_ASPECT_IFII_LOW = 1100 +FIFF.FIFFV_ASPECT_IFII_HIGH = 1101 +FIFF.FIFFV_ASPECT_GATE = 1102 + +# +# Values for file references +# +FIFF.FIFFV_ROLE_PREV_FILE = 1 +FIFF.FIFFV_ROLE_NEXT_FILE = 2 + +# +# References +# +FIFF.FIFF_REF_PATH = 1101 + +# +# Different aspects of data +# +FIFF.FIFFV_ASPECT_AVERAGE = 100 # Normal average of epochs +FIFF.FIFFV_ASPECT_STD_ERR = 101 # Std. error of mean +FIFF.FIFFV_ASPECT_SINGLE = 102 # Single epoch cut out from the continuous data +FIFF.FIFFV_ASPECT_SUBAVERAGE = 103 # Partial average (subaverage) +FIFF.FIFFV_ASPECT_ALTAVERAGE = 104 # Alternating subaverage +FIFF.FIFFV_ASPECT_SAMPLE = 105 # A sample cut out by graph +FIFF.FIFFV_ASPECT_POWER_DENSITY = 106 # Power density spectrum +FIFF.FIFFV_ASPECT_DIPOLE_WAVE = 200 # Dipole amplitude curve + +# +# BEM surface IDs +# +FIFF.FIFFV_BEM_SURF_ID_UNKNOWN = -1 +FIFF.FIFFV_BEM_SURF_ID_NOT_KNOWN = 0 +FIFF.FIFFV_BEM_SURF_ID_BRAIN = 1 +FIFF.FIFFV_BEM_SURF_ID_CSF = 2 +FIFF.FIFFV_BEM_SURF_ID_SKULL = 3 +FIFF.FIFFV_BEM_SURF_ID_HEAD = 4 + +FIFF.FIFF_SPHERE_ORIGIN = 3001 +FIFF.FIFF_SPHERE_RADIUS = 3002 + +FIFF.FIFF_BEM_SURF_ID = 3101 # int surface number +FIFF.FIFF_BEM_SURF_NAME = 3102 # string surface name +FIFF.FIFF_BEM_SURF_NNODE = 3103 # int number of nodes on a surface +FIFF.FIFF_BEM_SURF_NTRI = 3104 # int number of triangles on a surface +FIFF.FIFF_BEM_SURF_NODES = 3105 # float surface nodes (nnode,3) +FIFF.FIFF_BEM_SURF_TRIANGLES = 3106 # int surface triangles (ntri,3) +FIFF.FIFF_BEM_SURF_NORMALS = 3107 # float surface node normal unit vectors + +FIFF.FIFF_BEM_POT_SOLUTION = 3110 # float ** The solution matrix +FIFF.FIFF_BEM_APPROX = 3111 # int approximation method, see below +FIFF.FIFF_BEM_COORD_FRAME = 3112 # The coordinate frame of the model +FIFF.FIFF_BEM_SIGMA = 3113 # Conductivity of a compartment +FIFF.FIFFV_BEM_APPROX_CONST = 1 # The constant potential approach +FIFF.FIFFV_BEM_APPROX_LINEAR = 2 # The linear potential approach + +# +# More of those defined in MNE +# +FIFF.FIFFV_MNE_SURF_UNKNOWN = -1 +FIFF.FIFFV_MNE_SURF_LEFT_HEMI = 101 +FIFF.FIFFV_MNE_SURF_RIGHT_HEMI = 102 +FIFF.FIFFV_MNE_SURF_MEG_HELMET = 201 # Use this irrespective of the system +# +# These relate to the Isotrak data (enum(point)) +# +FIFF.FIFFV_POINT_CARDINAL = 1 +FIFF.FIFFV_POINT_HPI = 2 +FIFF.FIFFV_POINT_EEG = 3 +FIFF.FIFFV_POINT_ECG = FIFF.FIFFV_POINT_EEG +FIFF.FIFFV_POINT_EXTRA = 4 +FIFF.FIFFV_POINT_HEAD = 5 # Point on the surface of the head +_dig_kind_named = {key: key for key in( + FIFF.FIFFV_POINT_CARDINAL, + FIFF.FIFFV_POINT_HPI, + FIFF.FIFFV_POINT_EEG, + FIFF.FIFFV_POINT_EXTRA, + FIFF.FIFFV_POINT_HEAD, +)} +# +# Cardinal point types (enum(cardinal_point)) +# +FIFF.FIFFV_POINT_LPA = 1 +FIFF.FIFFV_POINT_NASION = 2 +FIFF.FIFFV_POINT_RPA = 3 +FIFF.FIFFV_POINT_INION = 4 +_dig_cardinal_named = {key: key for key in ( + FIFF.FIFFV_POINT_LPA, + FIFF.FIFFV_POINT_NASION, + FIFF.FIFFV_POINT_RPA, + FIFF.FIFFV_POINT_INION, +)} +# +# SSP +# +FIFF.FIFF_PROJ_ITEM_KIND = 3411 +FIFF.FIFF_PROJ_ITEM_TIME = 3412 +FIFF.FIFF_PROJ_ITEM_NVEC = 3414 +FIFF.FIFF_PROJ_ITEM_VECTORS = 3415 +FIFF.FIFF_PROJ_ITEM_DEFINITION = 3416 +FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST = 3417 +# XPlotter +FIFF.FIFF_XPLOTTER_LAYOUT = 3501 # string - "Xplotter layout tag" +# +# MRIs +# +FIFF.FIFF_MRI_SOURCE_PATH = FIFF.FIFF_REF_PATH +FIFF.FIFF_MRI_SOURCE_FORMAT = 2002 +FIFF.FIFF_MRI_PIXEL_ENCODING = 2003 +FIFF.FIFF_MRI_PIXEL_DATA_OFFSET = 2004 +FIFF.FIFF_MRI_PIXEL_SCALE = 2005 +FIFF.FIFF_MRI_PIXEL_DATA = 2006 +FIFF.FIFF_MRI_PIXEL_OVERLAY_ENCODING = 2007 +FIFF.FIFF_MRI_PIXEL_OVERLAY_DATA = 2008 +FIFF.FIFF_MRI_BOUNDING_BOX = 2009 +FIFF.FIFF_MRI_WIDTH = 2010 +FIFF.FIFF_MRI_WIDTH_M = 2011 +FIFF.FIFF_MRI_HEIGHT = 2012 +FIFF.FIFF_MRI_HEIGHT_M = 2013 +FIFF.FIFF_MRI_DEPTH = 2014 +FIFF.FIFF_MRI_DEPTH_M = 2015 +FIFF.FIFF_MRI_THICKNESS = 2016 +FIFF.FIFF_MRI_SCENE_AIM = 2017 +FIFF.FIFF_MRI_ORIG_SOURCE_PATH = 2020 +FIFF.FIFF_MRI_ORIG_SOURCE_FORMAT = 2021 +FIFF.FIFF_MRI_ORIG_PIXEL_ENCODING = 2022 +FIFF.FIFF_MRI_ORIG_PIXEL_DATA_OFFSET = 2023 +FIFF.FIFF_MRI_VOXEL_DATA = 2030 +FIFF.FIFF_MRI_VOXEL_ENCODING = 2031 +FIFF.FIFF_MRI_MRILAB_SETUP = 2100 +FIFF.FIFF_MRI_SEG_REGION_ID = 2200 +# +FIFF.FIFFV_MRI_PIXEL_UNKNOWN = 0 +FIFF.FIFFV_MRI_PIXEL_BYTE = 1 +FIFF.FIFFV_MRI_PIXEL_WORD = 2 +FIFF.FIFFV_MRI_PIXEL_SWAP_WORD = 3 +FIFF.FIFFV_MRI_PIXEL_FLOAT = 4 +FIFF.FIFFV_MRI_PIXEL_BYTE_INDEXED_COLOR = 5 +FIFF.FIFFV_MRI_PIXEL_BYTE_RGB_COLOR = 6 +FIFF.FIFFV_MRI_PIXEL_BYTE_RLE_RGB_COLOR = 7 +FIFF.FIFFV_MRI_PIXEL_BIT_RLE = 8 +# +# These are the MNE fiff definitions (range 350-390 reserved for MNE) +# +FIFF.FIFFB_MNE = 350 +FIFF.FIFFB_MNE_SOURCE_SPACE = 351 +FIFF.FIFFB_MNE_FORWARD_SOLUTION = 352 +FIFF.FIFFB_MNE_PARENT_MRI_FILE = 353 +FIFF.FIFFB_MNE_PARENT_MEAS_FILE = 354 +FIFF.FIFFB_MNE_COV = 355 +FIFF.FIFFB_MNE_INVERSE_SOLUTION = 356 +FIFF.FIFFB_MNE_NAMED_MATRIX = 357 +FIFF.FIFFB_MNE_ENV = 358 +FIFF.FIFFB_MNE_BAD_CHANNELS = 359 +FIFF.FIFFB_MNE_VERTEX_MAP = 360 +FIFF.FIFFB_MNE_EVENTS = 361 +FIFF.FIFFB_MNE_MORPH_MAP = 362 +FIFF.FIFFB_MNE_SURFACE_MAP = 363 +FIFF.FIFFB_MNE_SURFACE_MAP_GROUP = 364 + +# +# CTF compensation data +# +FIFF.FIFFB_MNE_CTF_COMP = 370 +FIFF.FIFFB_MNE_CTF_COMP_DATA = 371 +FIFF.FIFFB_MNE_DERIVATIONS = 372 + +FIFF.FIFFB_MNE_EPOCHS = 373 +FIFF.FIFFB_MNE_ICA = 374 +# +# Fiff tags associated with MNE computations (3500...) +# +# +# 3500... Bookkeeping +# +FIFF.FIFF_MNE_ROW_NAMES = 3502 +FIFF.FIFF_MNE_COL_NAMES = 3503 +FIFF.FIFF_MNE_NROW = 3504 +FIFF.FIFF_MNE_NCOL = 3505 +FIFF.FIFF_MNE_COORD_FRAME = 3506 # Coordinate frame employed. Defaults: + # FIFFB_MNE_SOURCE_SPACE FIFFV_COORD_MRI + # FIFFB_MNE_FORWARD_SOLUTION FIFFV_COORD_HEAD + # FIFFB_MNE_INVERSE_SOLUTION FIFFV_COORD_HEAD +FIFF.FIFF_MNE_CH_NAME_LIST = 3507 +FIFF.FIFF_MNE_FILE_NAME = 3508 # This removes the collision with fiff_file.h (used to be 3501) +# +# 3510... 3590... Source space or surface +# +FIFF.FIFF_MNE_SOURCE_SPACE_POINTS = 3510 # The vertices +FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS = 3511 # The vertex normals +FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS = 3512 # How many vertices +FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION = 3513 # Which are selected to the source space +FIFF.FIFF_MNE_SOURCE_SPACE_NUSE = 3514 # How many are in use +FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST = 3515 # Nearest source space vertex for all vertices +FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST = 3516 # Distance to the Nearest source space vertex for all vertices +FIFF.FIFF_MNE_SOURCE_SPACE_ID = 3517 # Identifier +FIFF.FIFF_MNE_SOURCE_SPACE_TYPE = 3518 # Surface or volume +FIFF.FIFF_MNE_SOURCE_SPACE_VERTICES = 3519 # List of vertices (zero based) + +FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS = 3596 # Voxel space dimensions in a volume source space +FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR = 3597 # Matrix to interpolate a volume source space into a mri volume +FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE = 3598 # MRI file used in the interpolation + +FIFF.FIFF_MNE_SOURCE_SPACE_NTRI = 3590 # Number of triangles +FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES = 3591 # The triangulation +FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI = 3592 # Number of triangles corresponding to the number of vertices in use +FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES = 3593 # The triangulation of the used vertices in the source space +FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS = 3594 # Number of neighbors for each source space point (used for volume source spaces) +FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS = 3595 # Neighbors for each source space point (used for volume source spaces) + +FIFF.FIFF_MNE_SOURCE_SPACE_DIST = 3599 # Distances between vertices in use (along the surface) +FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT = 3600 # If distance is above this limit (in the volume) it has not been calculated + +FIFF.FIFF_MNE_SURFACE_MAP_DATA = 3610 # Surface map data +FIFF.FIFF_MNE_SURFACE_MAP_KIND = 3611 # Type of map + +# +# 3520... Forward solution +# +FIFF.FIFF_MNE_FORWARD_SOLUTION = 3520 +FIFF.FIFF_MNE_SOURCE_ORIENTATION = 3521 # Fixed or free +FIFF.FIFF_MNE_INCLUDED_METHODS = 3522 +FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD = 3523 +# +# 3530... Covariance matrix +# +FIFF.FIFF_MNE_COV_KIND = 3530 # What kind of a covariance matrix +FIFF.FIFF_MNE_COV_DIM = 3531 # Matrix dimension +FIFF.FIFF_MNE_COV = 3532 # Full matrix in packed representation (lower triangle) +FIFF.FIFF_MNE_COV_DIAG = 3533 # Diagonal matrix +FIFF.FIFF_MNE_COV_EIGENVALUES = 3534 # Eigenvalues and eigenvectors of the above +FIFF.FIFF_MNE_COV_EIGENVECTORS = 3535 +FIFF.FIFF_MNE_COV_NFREE = 3536 # Number of degrees of freedom +FIFF.FIFF_MNE_COV_METHOD = 3537 # The estimator used +FIFF.FIFF_MNE_COV_SCORE = 3538 # Negative log-likelihood + +# +# 3540... Inverse operator +# +# We store the inverse operator as the eigenleads, eigenfields, +# and weights +# +FIFF.FIFF_MNE_INVERSE_LEADS = 3540 # The eigenleads +FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED = 3546 # The eigenleads (already weighted with R^0.5) +FIFF.FIFF_MNE_INVERSE_FIELDS = 3541 # The eigenfields +FIFF.FIFF_MNE_INVERSE_SING = 3542 # The singular values +FIFF.FIFF_MNE_PRIORS_USED = 3543 # Which kind of priors have been used for the source covariance matrix +FIFF.FIFF_MNE_INVERSE_FULL = 3544 # Inverse operator as one matrix + # This matrix includes the whitening operator as well + # The regularization is applied +FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS = 3545 # Contains the orientation of one source per row + # The source orientations must be expressed in the coordinate system + # given by FIFF_MNE_COORD_FRAME +FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT = 3547 # Are the sources given in Am or Am/m^2 ? +# +# 3550... Saved environment info +# +FIFF.FIFF_MNE_ENV_WORKING_DIR = 3550 # Working directory where the file was created +FIFF.FIFF_MNE_ENV_COMMAND_LINE = 3551 # The command used to create the file +FIFF.FIFF_MNE_EXTERNAL_BIG_ENDIAN = 3552 # Reference to an external binary file (big-endian) */ +FIFF.FIFF_MNE_EXTERNAL_LITTLE_ENDIAN = 3553 # Reference to an external binary file (little-endian) */ +# +# 3560... Miscellaneous +# +FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE = 3560 # Is this projection item active? +FIFF.FIFF_MNE_EVENT_LIST = 3561 # An event list (for STI101 / STI 014) +FIFF.FIFF_MNE_HEMI = 3562 # Hemisphere association for general purposes +FIFF.FIFF_MNE_DATA_SKIP_NOP = 3563 # A data skip turned off in the raw data +FIFF.FIFF_MNE_ORIG_CH_INFO = 3564 # Channel information before any changes +FIFF.FIFF_MNE_EVENT_TRIGGER_MASK = 3565 # Mask applied to the trigger channel values +FIFF.FIFF_MNE_EVENT_COMMENTS = 3566 # Event comments merged into one long string +FIFF.FIFF_MNE_CUSTOM_REF = 3567 # Whether a custom reference was applied to the data +FIFF.FIFF_MNE_BASELINE_MIN = 3568 # Time of baseline beginning +FIFF.FIFF_MNE_BASELINE_MAX = 3569 # Time of baseline end +# +# 3570... Morphing maps +# +FIFF.FIFF_MNE_MORPH_MAP = 3570 # Mapping of closest vertices on the sphere +FIFF.FIFF_MNE_MORPH_MAP_FROM = 3571 # Which subject is this map from +FIFF.FIFF_MNE_MORPH_MAP_TO = 3572 # Which subject is this map to +# +# 3580... CTF compensation data +# +FIFF.FIFF_MNE_CTF_COMP_KIND = 3580 # What kind of compensation +FIFF.FIFF_MNE_CTF_COMP_DATA = 3581 # The compensation data itself +FIFF.FIFF_MNE_CTF_COMP_CALIBRATED = 3582 # Are the coefficients calibrated? + +FIFF.FIFF_MNE_DERIVATION_DATA = 3585 # Used to store information about EEG and other derivations +# +# 3601... values associated with ICA decomposition +# +FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS = 3601 # ICA interface parameters +FIFF.FIFF_MNE_ICA_CHANNEL_NAMES = 3602 # ICA channel names +FIFF.FIFF_MNE_ICA_WHITENER = 3603 # ICA whitener +FIFF.FIFF_MNE_ICA_PCA_COMPONENTS = 3604 # PCA components +FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR = 3605 # PCA explained variance +FIFF.FIFF_MNE_ICA_PCA_MEAN = 3606 # PCA mean +FIFF.FIFF_MNE_ICA_MATRIX = 3607 # ICA unmixing matrix +FIFF.FIFF_MNE_ICA_BADS = 3608 # ICA bad sources +FIFF.FIFF_MNE_ICA_MISC_PARAMS = 3609 # ICA misc params +# +# Miscellaneous +# +FIFF.FIFF_MNE_KIT_SYSTEM_ID = 3612 # Unique ID assigned to KIT systems +# +# Maxfilter tags +# +FIFF.FIFF_SSS_FRAME = 263 +FIFF.FIFF_SSS_JOB = 264 +FIFF.FIFF_SSS_ORIGIN = 265 +FIFF.FIFF_SSS_ORD_IN = 266 +FIFF.FIFF_SSS_ORD_OUT = 267 +FIFF.FIFF_SSS_NMAG = 268 +FIFF.FIFF_SSS_COMPONENTS = 269 +FIFF.FIFF_SSS_CAL_CHANS = 270 +FIFF.FIFF_SSS_CAL_CORRS = 271 +FIFF.FIFF_SSS_ST_CORR = 272 +FIFF.FIFF_SSS_NFREE = 278 +FIFF.FIFF_SSS_ST_LENGTH = 279 +FIFF.FIFF_DECOUPLER_MATRIX = 800 +# +# Fiff values associated with MNE computations +# +FIFF.FIFFV_MNE_UNKNOWN_ORI = 0 +FIFF.FIFFV_MNE_FIXED_ORI = 1 +FIFF.FIFFV_MNE_FREE_ORI = 2 + +FIFF.FIFFV_MNE_MEG = 1 +FIFF.FIFFV_MNE_EEG = 2 +FIFF.FIFFV_MNE_MEG_EEG = 3 + +FIFF.FIFFV_MNE_PRIORS_NONE = 0 +FIFF.FIFFV_MNE_PRIORS_DEPTH = 1 +FIFF.FIFFV_MNE_PRIORS_LORETA = 2 +FIFF.FIFFV_MNE_PRIORS_SULCI = 3 + +FIFF.FIFFV_MNE_UNKNOWN_COV = 0 +FIFF.FIFFV_MNE_SENSOR_COV = 1 +FIFF.FIFFV_MNE_NOISE_COV = 1 # This is what it should have been called +FIFF.FIFFV_MNE_SOURCE_COV = 2 +FIFF.FIFFV_MNE_FMRI_PRIOR_COV = 3 +FIFF.FIFFV_MNE_SIGNAL_COV = 4 # This will be potentially employed in beamformers +FIFF.FIFFV_MNE_DEPTH_PRIOR_COV = 5 # The depth weighting prior +FIFF.FIFFV_MNE_ORIENT_PRIOR_COV = 6 # The orientation prior + +# +# Output map types +# +FIFF.FIFFV_MNE_MAP_UNKNOWN = -1 # Unspecified +FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT = 1 # Scalar current value +FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_SIZE = 2 # Absolute value of the above +FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT = 3 # Current vector components +FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_SIZE = 4 # Vector current size +FIFF.FIFFV_MNE_MAP_T_STAT = 5 # Student's t statistic +FIFF.FIFFV_MNE_MAP_F_STAT = 6 # F statistic +FIFF.FIFFV_MNE_MAP_F_STAT_SQRT = 7 # Square root of the F statistic +FIFF.FIFFV_MNE_MAP_CHI2_STAT = 8 # (Approximate) chi^2 statistic +FIFF.FIFFV_MNE_MAP_CHI2_STAT_SQRT = 9 # Square root of the (approximate) chi^2 statistic +FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_NOISE = 10 # Current noise approximation (scalar) +FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_NOISE = 11 # Current noise approximation (vector) +# +# Source space types (values of FIFF_MNE_SOURCE_SPACE_TYPE) +# +FIFF.FIFFV_MNE_SPACE_UNKNOWN = -1 +FIFF.FIFFV_MNE_SPACE_SURFACE = 1 +FIFF.FIFFV_MNE_SPACE_VOLUME = 2 +FIFF.FIFFV_MNE_SPACE_DISCRETE = 3 +# +# Covariance matrix channel classification +# +FIFF.FIFFV_MNE_COV_CH_UNKNOWN = -1 # No idea +FIFF.FIFFV_MNE_COV_CH_MEG_MAG = 0 # Axial gradiometer or magnetometer [T] +FIFF.FIFFV_MNE_COV_CH_MEG_GRAD = 1 # Planar gradiometer [T/m] +FIFF.FIFFV_MNE_COV_CH_EEG = 2 # EEG [V] +# +# Projection item kinds +# +FIFF.FIFFV_PROJ_ITEM_NONE = 0 +FIFF.FIFFV_PROJ_ITEM_FIELD = 1 +FIFF.FIFFV_PROJ_ITEM_DIP_FIX = 2 +FIFF.FIFFV_PROJ_ITEM_DIP_ROT = 3 +FIFF.FIFFV_PROJ_ITEM_HOMOG_GRAD = 4 +FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD = 5 +FIFF.FIFFV_PROJ_ITEM_EEG_AVREF = 10 # Linear projection related to EEG average reference +FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF = FIFF.FIFFV_PROJ_ITEM_EEG_AVREF # backward compat alias +# +# Custom EEG references +# +FIFF.FIFFV_MNE_CUSTOM_REF_OFF = 0 +FIFF.FIFFV_MNE_CUSTOM_REF_ON = 1 +FIFF.FIFFV_MNE_CUSTOM_REF_CSD = 2 +# +# SSS job options +# +FIFF.FIFFV_SSS_JOB_NOTHING = 0 # No SSS, just copy input to output +FIFF.FIFFV_SSS_JOB_CTC = 1 # No SSS, only cross-talk correction +FIFF.FIFFV_SSS_JOB_FILTER = 2 # Spatial maxwell filtering +FIFF.FIFFV_SSS_JOB_VIRT = 3 # Transform data to another sensor array +FIFF.FIFFV_SSS_JOB_HEAD_POS = 4 # Estimate head positions, no SSS +FIFF.FIFFV_SSS_JOB_MOVEC_FIT = 5 # Estimate and compensate head movement +FIFF.FIFFV_SSS_JOB_MOVEC_QUA = 6 # Compensate head movement from previously estimated head positions +FIFF.FIFFV_SSS_JOB_REC_ALL = 7 # Reconstruct inside and outside signals +FIFF.FIFFV_SSS_JOB_REC_IN = 8 # Reconstruct inside signals +FIFF.FIFFV_SSS_JOB_REC_OUT = 9 # Reconstruct outside signals +FIFF.FIFFV_SSS_JOB_ST = 10 # Spatio-temporal maxwell filtering +FIFF.FIFFV_SSS_JOB_TPROJ = 11 # Temporal projection, no SSS +FIFF.FIFFV_SSS_JOB_XSSS = 12 # Cross-validation SSS +FIFF.FIFFV_SSS_JOB_XSUB = 13 # Cross-validation subtraction, no SSS +FIFF.FIFFV_SSS_JOB_XWAV = 14 # Cross-validation noise waveforms +FIFF.FIFFV_SSS_JOB_NCOV = 15 # Noise covariance estimation +FIFF.FIFFV_SSS_JOB_SCOV = 16 # SSS sample covariance estimation +#} + +# +# Additional coordinate frames +# +FIFF.FIFFV_MNE_COORD_TUFTS_EEG = 300 # For Tufts EEG data +FIFF.FIFFV_MNE_COORD_CTF_DEVICE = 1001 # CTF device coordinates +FIFF.FIFFV_MNE_COORD_CTF_HEAD = 1004 # CTF head coordinates +FIFF.FIFFV_MNE_COORD_DIGITIZER = FIFF.FIFFV_COORD_ISOTRAK # Original (Polhemus) digitizer coordinates +FIFF.FIFFV_MNE_COORD_SURFACE_RAS = FIFF.FIFFV_COORD_MRI # The surface RAS coordinates +FIFF.FIFFV_MNE_COORD_MRI_VOXEL = 2001 # The MRI voxel coordinates +FIFF.FIFFV_MNE_COORD_RAS = 2002 # Surface RAS coordinates with non-zero origin +FIFF.FIFFV_MNE_COORD_MNI_TAL = 2003 # MNI Talairach coordinates +FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ = 2004 # FreeSurfer Talairach coordinates (MNI z > 0) +FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ = 2005 # FreeSurfer Talairach coordinates (MNI z < 0) +FIFF.FIFFV_MNE_COORD_FS_TAL = 2006 # FreeSurfer Talairach coordinates +# +# 4D and KIT use the same head coordinate system definition as CTF +# +FIFF.FIFFV_MNE_COORD_4D_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD +FIFF.FIFFV_MNE_COORD_KIT_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD + +# +# FWD Types +# + +FWD = BunchConstNamed() + +FWD.COIL_UNKNOWN = 0 +FWD.COILC_UNKNOWN = 0 +FWD.COILC_EEG = 1000 +FWD.COILC_MAG = 1 +FWD.COILC_AXIAL_GRAD = 2 +FWD.COILC_PLANAR_GRAD = 3 +FWD.COILC_AXIAL_GRAD2 = 4 + +FWD.COIL_ACCURACY_POINT = 0 +FWD.COIL_ACCURACY_NORMAL = 1 +FWD.COIL_ACCURACY_ACCURATE = 2 + +FWD.BEM_UNKNOWN = -1 +FWD.BEM_CONSTANT_COLL = 1 +FWD.BEM_LINEAR_COLL = 2 + +FWD.BEM_IP_APPROACH_LIMIT = 0.1 + +FWD.BEM_LIN_FIELD_SIMPLE = 1 +FWD.BEM_LIN_FIELD_FERGUSON = 2 +FWD.BEM_LIN_FIELD_URANKAR = 3 + +# +# Data types +# +FIFF.FIFFT_VOID = 0 +FIFF.FIFFT_BYTE = 1 +FIFF.FIFFT_SHORT = 2 +FIFF.FIFFT_INT = 3 +FIFF.FIFFT_FLOAT = 4 +FIFF.FIFFT_DOUBLE = 5 +FIFF.FIFFT_JULIAN = 6 +FIFF.FIFFT_USHORT = 7 +FIFF.FIFFT_UINT = 8 +FIFF.FIFFT_ULONG = 9 +FIFF.FIFFT_STRING = 10 +FIFF.FIFFT_LONG = 11 +FIFF.FIFFT_DAU_PACK13 = 13 +FIFF.FIFFT_DAU_PACK14 = 14 +FIFF.FIFFT_DAU_PACK16 = 16 +FIFF.FIFFT_COMPLEX_FLOAT = 20 +FIFF.FIFFT_COMPLEX_DOUBLE = 21 +FIFF.FIFFT_OLD_PACK = 23 +FIFF.FIFFT_CH_INFO_STRUCT = 30 +FIFF.FIFFT_ID_STRUCT = 31 +FIFF.FIFFT_DIR_ENTRY_STRUCT = 32 +FIFF.FIFFT_DIG_POINT_STRUCT = 33 +FIFF.FIFFT_CH_POS_STRUCT = 34 +FIFF.FIFFT_COORD_TRANS_STRUCT = 35 +FIFF.FIFFT_DIG_STRING_STRUCT = 36 +FIFF.FIFFT_STREAM_SEGMENT_STRUCT = 37 +# +# Units of measurement +# +FIFF.FIFF_UNIT_NONE = -1 +# +# SI base units +# +FIFF.FIFF_UNIT_UNITLESS = 0 +FIFF.FIFF_UNIT_M = 1 # meter +FIFF.FIFF_UNIT_KG = 2 # kilogram +FIFF.FIFF_UNIT_SEC = 3 # second +FIFF.FIFF_UNIT_A = 4 # ampere +FIFF.FIFF_UNIT_K = 5 # Kelvin +FIFF.FIFF_UNIT_MOL = 6 # mole +# +# SI Supplementary units +# +FIFF.FIFF_UNIT_RAD = 7 # radian +FIFF.FIFF_UNIT_SR = 8 # steradian +# +# SI base candela +# +FIFF.FIFF_UNIT_CD = 9 # candela +# +# SI derived units +# +FIFF.FIFF_UNIT_MOL_M3 = 10 # mol/m^3 +FIFF.FIFF_UNIT_HZ = 101 # hertz +FIFF.FIFF_UNIT_N = 102 # Newton +FIFF.FIFF_UNIT_PA = 103 # pascal +FIFF.FIFF_UNIT_J = 104 # joule +FIFF.FIFF_UNIT_W = 105 # watt +FIFF.FIFF_UNIT_C = 106 # coulomb +FIFF.FIFF_UNIT_V = 107 # volt +FIFF.FIFF_UNIT_F = 108 # farad +FIFF.FIFF_UNIT_OHM = 109 # ohm +FIFF.FIFF_UNIT_MHO = 110 # one per ohm +FIFF.FIFF_UNIT_WB = 111 # weber +FIFF.FIFF_UNIT_T = 112 # tesla +FIFF.FIFF_UNIT_H = 113 # Henry +FIFF.FIFF_UNIT_CEL = 114 # celsius +FIFF.FIFF_UNIT_LM = 115 # lumen +FIFF.FIFF_UNIT_LX = 116 # lux +FIFF.FIFF_UNIT_V_M2 = 117 # V/m^2 +# +# Others we need +# +FIFF.FIFF_UNIT_T_M = 201 # T/m +FIFF.FIFF_UNIT_AM = 202 # Am +FIFF.FIFF_UNIT_AM_M2 = 203 # Am/m^2 +FIFF.FIFF_UNIT_AM_M3 = 204 # Am/m^3 +_ch_unit_named = {key: key for key in( + FIFF.FIFF_UNIT_NONE, FIFF.FIFF_UNIT_UNITLESS, FIFF.FIFF_UNIT_M, + FIFF.FIFF_UNIT_KG, FIFF.FIFF_UNIT_SEC, FIFF.FIFF_UNIT_A, FIFF.FIFF_UNIT_K, + FIFF.FIFF_UNIT_MOL, FIFF.FIFF_UNIT_RAD, FIFF.FIFF_UNIT_SR, + FIFF.FIFF_UNIT_CD, FIFF.FIFF_UNIT_MOL_M3, FIFF.FIFF_UNIT_HZ, + FIFF.FIFF_UNIT_N, FIFF.FIFF_UNIT_PA, FIFF.FIFF_UNIT_J, FIFF.FIFF_UNIT_W, + FIFF.FIFF_UNIT_C, FIFF.FIFF_UNIT_V, FIFF.FIFF_UNIT_F, FIFF.FIFF_UNIT_OHM, + FIFF.FIFF_UNIT_MHO, FIFF.FIFF_UNIT_WB, FIFF.FIFF_UNIT_T, FIFF.FIFF_UNIT_H, + FIFF.FIFF_UNIT_CEL, FIFF.FIFF_UNIT_LM, FIFF.FIFF_UNIT_LX, + FIFF.FIFF_UNIT_V_M2, FIFF.FIFF_UNIT_T_M, FIFF.FIFF_UNIT_AM, + FIFF.FIFF_UNIT_AM_M2, FIFF.FIFF_UNIT_AM_M3, +)} +# +# Multipliers +# +FIFF.FIFF_UNITM_E = 18 +FIFF.FIFF_UNITM_PET = 15 +FIFF.FIFF_UNITM_T = 12 +FIFF.FIFF_UNITM_GIG = 9 +FIFF.FIFF_UNITM_MEG = 6 +FIFF.FIFF_UNITM_K = 3 +FIFF.FIFF_UNITM_H = 2 +FIFF.FIFF_UNITM_DA = 1 +FIFF.FIFF_UNITM_NONE = 0 +FIFF.FIFF_UNITM_D = -1 +FIFF.FIFF_UNITM_C = -2 +FIFF.FIFF_UNITM_M = -3 +FIFF.FIFF_UNITM_MU = -6 +FIFF.FIFF_UNITM_N = -9 +FIFF.FIFF_UNITM_P = -12 +FIFF.FIFF_UNITM_F = -15 +FIFF.FIFF_UNITM_A = -18 +_ch_unit_mul_named = {key: key for key in ( + FIFF.FIFF_UNITM_E, FIFF.FIFF_UNITM_PET, FIFF.FIFF_UNITM_T, + FIFF.FIFF_UNITM_GIG, FIFF.FIFF_UNITM_MEG, FIFF.FIFF_UNITM_K, + FIFF.FIFF_UNITM_H, FIFF.FIFF_UNITM_DA, FIFF.FIFF_UNITM_NONE, + FIFF.FIFF_UNITM_D, FIFF.FIFF_UNITM_C, FIFF.FIFF_UNITM_M, + FIFF.FIFF_UNITM_MU, FIFF.FIFF_UNITM_N, FIFF.FIFF_UNITM_P, + FIFF.FIFF_UNITM_F, FIFF.FIFF_UNITM_A, +)} + +# +# Coil types +# +FIFF.FIFFV_COIL_NONE = 0 # The location info contains no data +FIFF.FIFFV_COIL_EEG = 1 # EEG electrode position in r0 +FIFF.FIFFV_COIL_NM_122 = 2 # Neuromag 122 coils +FIFF.FIFFV_COIL_NM_24 = 3 # Old 24 channel system in HUT +FIFF.FIFFV_COIL_NM_MCG_AXIAL = 4 # The axial devices in the HUCS MCG system +FIFF.FIFFV_COIL_EEG_BIPOLAR = 5 # Bipolar EEG lead +FIFF.FIFFV_COIL_EEG_CSD = 6 # CSD-transformed EEG lead + +FIFF.FIFFV_COIL_DIPOLE = 200 # Time-varying dipole definition +# The coil info contains dipole location (r0) and +# direction (ex) +FIFF.FIFFV_COIL_FNIRS_HBO = 300 # fNIRS oxyhemoglobin +FIFF.FIFFV_COIL_FNIRS_HBR = 301 # fNIRS deoxyhemoglobin +FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE = 302 # fNIRS continuous wave amplitude +FIFF.FIFFV_COIL_FNIRS_OD = 303 # fNIRS optical density +FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE = 304 # fNIRS frequency domain AC amplitude +FIFF.FIFFV_COIL_FNIRS_FD_PHASE = 305 # fNIRS frequency domain phase +FIFF.FIFFV_COIL_FNIRS_RAW = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE # old alias + +FIFF.FIFFV_COIL_MCG_42 = 1000 # For testing the MCG software + +FIFF.FIFFV_COIL_POINT_MAGNETOMETER = 2000 # Simple point magnetometer +FIFF.FIFFV_COIL_AXIAL_GRAD_5CM = 2001 # Generic axial gradiometer + +FIFF.FIFFV_COIL_VV_PLANAR_W = 3011 # VV prototype wirewound planar sensor +FIFF.FIFFV_COIL_VV_PLANAR_T1 = 3012 # Vectorview SQ20483N planar gradiometer +FIFF.FIFFV_COIL_VV_PLANAR_T2 = 3013 # Vectorview SQ20483N-A planar gradiometer +FIFF.FIFFV_COIL_VV_PLANAR_T3 = 3014 # Vectorview SQ20950N planar gradiometer +FIFF.FIFFV_COIL_VV_PLANAR_T4 = 3015 # Vectorview planar gradiometer (MEG-MRI) +FIFF.FIFFV_COIL_VV_MAG_W = 3021 # VV prototype wirewound magnetometer +FIFF.FIFFV_COIL_VV_MAG_T1 = 3022 # Vectorview SQ20483N magnetometer +FIFF.FIFFV_COIL_VV_MAG_T2 = 3023 # Vectorview SQ20483-A magnetometer +FIFF.FIFFV_COIL_VV_MAG_T3 = 3024 # Vectorview SQ20950N magnetometer +FIFF.FIFFV_COIL_VV_MAG_T4 = 3025 # Vectorview magnetometer (MEG-MRI) + +FIFF.FIFFV_COIL_MAGNES_MAG = 4001 # Magnes WH magnetometer +FIFF.FIFFV_COIL_MAGNES_GRAD = 4002 # Magnes WH gradiometer +# +# Magnes reference sensors +# +FIFF.FIFFV_COIL_MAGNES_REF_MAG = 4003 +FIFF.FIFFV_COIL_MAGNES_REF_GRAD = 4004 +FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD = 4005 +FIFF.FIFFV_COIL_MAGNES_R_MAG = FIFF.FIFFV_COIL_MAGNES_REF_MAG +FIFF.FIFFV_COIL_MAGNES_R_GRAD = FIFF.FIFFV_COIL_MAGNES_REF_GRAD +FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF = FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD + +# +# CTF coil and channel types +# +FIFF.FIFFV_COIL_CTF_GRAD = 5001 +FIFF.FIFFV_COIL_CTF_REF_MAG = 5002 +FIFF.FIFFV_COIL_CTF_REF_GRAD = 5003 +FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD = 5004 +# +# KIT system coil types +# +FIFF.FIFFV_COIL_KIT_GRAD = 6001 +FIFF.FIFFV_COIL_KIT_REF_MAG = 6002 +# +# BabySQUID sensors +# +FIFF.FIFFV_COIL_BABY_GRAD = 7001 +# +# BabyMEG sensors +# +FIFF.FIFFV_COIL_BABY_MAG = 7002 +FIFF.FIFFV_COIL_BABY_REF_MAG = 7003 +FIFF.FIFFV_COIL_BABY_REF_MAG2 = 7004 +# +# Artemis123 sensors +# +FIFF.FIFFV_COIL_ARTEMIS123_GRAD = 7501 +FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG = 7502 +FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD = 7503 +# +# QuSpin sensors +# +FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG = 8001 +FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2 = 8002 +# +# FieldLine sensors +# +FIFF.FIFFV_COIL_FIELDLINE_OPM_MAG_GEN1 = 8101 +# +# Kernel sensors +# +FIFF.FIFFV_COIL_KERNEL_OPM_MAG_GEN1 = 8201 +# +# KRISS sensors +# +FIFF.FIFFV_COIL_KRISS_GRAD = 9001 +# +# Compumedics adult/pediatric gradiometer +# +FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD = 9101 +FIFF.FIFFV_COIL_COMPUMEDICS_PEDIATRIC_GRAD = 9102 +_ch_coil_type_named = {key: key for key in ( + FIFF.FIFFV_COIL_NONE, FIFF.FIFFV_COIL_EEG, FIFF.FIFFV_COIL_NM_122, + FIFF.FIFFV_COIL_NM_24, FIFF.FIFFV_COIL_NM_MCG_AXIAL, + FIFF.FIFFV_COIL_EEG_BIPOLAR, FIFF.FIFFV_COIL_EEG_CSD, + FIFF.FIFFV_COIL_DIPOLE, FIFF.FIFFV_COIL_FNIRS_HBO, + FIFF.FIFFV_COIL_FNIRS_HBR, FIFF.FIFFV_COIL_FNIRS_RAW, + FIFF.FIFFV_COIL_FNIRS_OD, FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE, + FIFF.FIFFV_COIL_FNIRS_FD_PHASE, FIFF.FIFFV_COIL_MCG_42, + FIFF.FIFFV_COIL_POINT_MAGNETOMETER, FIFF.FIFFV_COIL_AXIAL_GRAD_5CM, + FIFF.FIFFV_COIL_VV_PLANAR_W, FIFF.FIFFV_COIL_VV_PLANAR_T1, + FIFF.FIFFV_COIL_VV_PLANAR_T2, FIFF.FIFFV_COIL_VV_PLANAR_T3, + FIFF.FIFFV_COIL_VV_PLANAR_T4, FIFF.FIFFV_COIL_VV_MAG_W, + FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2, + FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFFV_COIL_VV_MAG_T4, + FIFF.FIFFV_COIL_MAGNES_MAG, FIFF.FIFFV_COIL_MAGNES_GRAD, + FIFF.FIFFV_COIL_MAGNES_REF_MAG, FIFF.FIFFV_COIL_MAGNES_REF_GRAD, + FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD, FIFF.FIFFV_COIL_CTF_GRAD, + FIFF.FIFFV_COIL_CTF_REF_MAG, FIFF.FIFFV_COIL_CTF_REF_GRAD, + FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD, FIFF.FIFFV_COIL_KIT_GRAD, + FIFF.FIFFV_COIL_KIT_REF_MAG, FIFF.FIFFV_COIL_BABY_GRAD, + FIFF.FIFFV_COIL_BABY_MAG, FIFF.FIFFV_COIL_BABY_REF_MAG, + FIFF.FIFFV_COIL_BABY_REF_MAG2, FIFF.FIFFV_COIL_ARTEMIS123_GRAD, + FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG, FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD, + FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG, FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2, + FIFF.FIFFV_COIL_FIELDLINE_OPM_MAG_GEN1, + FIFF.FIFFV_COIL_KERNEL_OPM_MAG_GEN1, + FIFF.FIFFV_COIL_KRISS_GRAD, FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD, + FIFF.FIFFV_COIL_COMPUMEDICS_PEDIATRIC_GRAD, +)} + +# MNE RealTime +FIFF.FIFF_MNE_RT_COMMAND = 3700 # realtime command +FIFF.FIFF_MNE_RT_CLIENT_ID = 3701 # realtime client + +# MNE epochs bookkeeping +FIFF.FIFF_MNE_EPOCHS_SELECTION = 3800 # the epochs selection +FIFF.FIFF_MNE_EPOCHS_DROP_LOG = 3801 # the drop log +FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT = 3802 # rejection and flat params +FIFF.FIFF_MNE_EPOCHS_RAW_SFREQ = 3803 # original raw sfreq + +# MNE annotations +FIFF.FIFFB_MNE_ANNOTATIONS = 3810 # annotations block + +# MNE Metadata Dataframes +FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block + +# Table to match unrecognized channel location names to their known aliases +CHANNEL_LOC_ALIASES = { + # this set of aliases are published in doi:10.1097/WNP.0000000000000316 and + # doi:10.1016/S1388-2457(00)00527-7. + 'Cb1': 'POO7', + 'Cb2': 'POO8', + 'CB1': 'POO7', + 'CB2': 'POO8', + 'T1': 'T9', + 'T2': 'T10', + 'T3': 'T7', + 'T4': 'T8', + 'T5': 'T9', + 'T6': 'T10', + 'M1': 'TP9', + 'M2': 'TP10' + # add a comment here (with doi of a published source) above any new + # aliases, as they are added +} diff --git a/python/libs/mne/io/ctf/__init__.py b/python/libs/mne/io/ctf/__init__.py new file mode 100644 index 0000000..61481f2 --- /dev/null +++ b/python/libs/mne/io/ctf/__init__.py @@ -0,0 +1,7 @@ +"""CTF module for conversion to FIF.""" + +# Author: Eric Larson +# +# License: BSD-3-Clause + +from .ctf import read_raw_ctf, RawCTF diff --git a/python/libs/mne/io/ctf/constants.py b/python/libs/mne/io/ctf/constants.py new file mode 100644 index 0000000..16b53dc --- /dev/null +++ b/python/libs/mne/io/ctf/constants.py @@ -0,0 +1,39 @@ +"""CTF constants.""" + +# Authors: Matti Hämäläinen +# Eric Larson +# +# License: BSD-3-Clause + +from ...utils import BunchConst + + +CTF = BunchConst() + +# ctf_types.h +CTF.CTFV_MAX_AVERAGE_BINS = 8 +CTF.CTFV_MAX_COILS = 8 +CTF.CTFV_MAX_BALANCING = 50 +CTF.CTFV_SENSOR_LABEL = 31 + +CTF.CTFV_COIL_LPA = 1 +CTF.CTFV_COIL_RPA = 2 +CTF.CTFV_COIL_NAS = 3 +CTF.CTFV_COIL_SPARE = 4 + +CTF.CTFV_REF_MAG_CH = 0 +CTF.CTFV_REF_GRAD_CH = 1 +CTF.CTFV_MEG_CH = 5 +CTF.CTFV_EEG_CH = 9 +CTF.CTFV_STIM_CH = 11 + +CTF.CTFV_FILTER_LOWPASS = 1 +CTF.CTFV_FILTER_HIGHPASS = 2 + +# read_res4.c +CTF.FUNNY_POS = 1844 + +# read_write_data.c +CTF.HEADER_SIZE = 8 +CTF.BLOCK_SIZE = 2000 +CTF.SYSTEM_CLOCK_CH = 'SCLK01-177' diff --git a/python/libs/mne/io/ctf/ctf.py b/python/libs/mne/io/ctf/ctf.py new file mode 100644 index 0000000..73d21c8 --- /dev/null +++ b/python/libs/mne/io/ctf/ctf.py @@ -0,0 +1,254 @@ +"""Conversion tool from CTF to FIF.""" + +# Authors: Matti Hämäläinen +# Eric Larson +# +# License: BSD-3-Clause + +import os + +import numpy as np + +from .._digitization import _format_dig_points +from ...utils import (verbose, logger, _clean_names, fill_doc, _check_option, + _check_fname) + +from ..base import BaseRaw +from ..utils import _mult_cal_one, _blk_read_lims + +from .res4 import _read_res4, _make_ctf_name +from .hc import _read_hc +from .eeg import _read_eeg, _read_pos +from .trans import _make_ctf_coord_trans_set +from .info import _compose_meas_info, _read_bad_chans, _annotate_bad_segments +from .constants import CTF +from .markers import _read_annotations_ctf_call + + +@fill_doc +def read_raw_ctf(directory, system_clock='truncate', preload=False, + clean_names=False, verbose=None): + """Raw object from CTF directory. + + Parameters + ---------- + directory : str + Path to the CTF data (ending in ``'.ds'``). + system_clock : str + How to treat the system clock. Use "truncate" (default) to truncate + the data file when the system clock drops to zero, and use "ignore" + to ignore the system clock (e.g., if head positions are measured + multiple times during a recording). + %(preload)s + clean_names : bool, optional + If True main channel names and compensation channel names will + be cleaned from CTF suffixes. The default is False. + %(verbose)s + + Returns + ------- + raw : instance of RawCTF + The raw data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + .. versionadded:: 0.11 + + To read in the Polhemus digitization data (for example, from + a .pos file), include the file in the CTF directory. The + points will then automatically be read into the `mne.io.Raw` + instance via `mne.io.read_raw_ctf`. + """ + return RawCTF(directory, system_clock, preload=preload, + clean_names=clean_names, verbose=verbose) + + +@fill_doc +class RawCTF(BaseRaw): + """Raw object from CTF directory. + + Parameters + ---------- + directory : str + Path to the CTF data (ending in ``'.ds'``). + system_clock : str + How to treat the system clock. Use "truncate" (default) to truncate + the data file when the system clock drops to zero, and use "ignore" + to ignore the system clock (e.g., if head positions are measured + multiple times during a recording). + %(preload)s + clean_names : bool, optional + If True main channel names and compensation channel names will + be cleaned from CTF suffixes. The default is False. + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, directory, system_clock='truncate', preload=False, + verbose=None, clean_names=False): # noqa: D102 + # adapted from mne_ctf2fiff.c + directory = _check_fname(directory, 'read', True, 'directory', + need_dir=True) + if not directory.endswith('.ds'): + raise TypeError('directory must be a directory ending with ".ds", ' + f'got {directory}') + _check_option('system_clock', system_clock, ['ignore', 'truncate']) + logger.info('ds directory : %s' % directory) + res4 = _read_res4(directory) # Read the magical res4 file + coils = _read_hc(directory) # Read the coil locations + eeg = _read_eeg(directory) # Read the EEG electrode loc info + + # Investigate the coil location data to get the coordinate trans + coord_trans = _make_ctf_coord_trans_set(res4, coils) + + digs = _read_pos(directory, coord_trans) + + # Compose a structure which makes fiff writing a piece of cake + info = _compose_meas_info(res4, coils, coord_trans, eeg) + with info._unlock(): + info['dig'] += digs + info['dig'] = _format_dig_points(info['dig']) + info['bads'] += _read_bad_chans(directory, info) + + # Determine how our data is distributed across files + fnames = list() + last_samps = list() + raw_extras = list() + while(True): + suffix = 'meg4' if len(fnames) == 0 else ('%d_meg4' % len(fnames)) + meg4_name = _make_ctf_name(directory, suffix, raise_error=False) + if meg4_name is None: + break + # check how much data is in the file + sample_info = _get_sample_info(meg4_name, res4, system_clock) + if sample_info['n_samp'] == 0: + break + if len(fnames) == 0: + buffer_size_sec = sample_info['block_size'] / info['sfreq'] + else: + buffer_size_sec = 1. + fnames.append(meg4_name) + last_samps.append(sample_info['n_samp'] - 1) + raw_extras.append(sample_info) + first_samps = [0] * len(last_samps) + super(RawCTF, self).__init__( + info, preload, first_samps=first_samps, + last_samps=last_samps, filenames=fnames, + raw_extras=raw_extras, orig_format='int', + buffer_size_sec=buffer_size_sec, verbose=verbose) + + # Add bad segments as Annotations (correct for start time) + start_time = -res4['pre_trig_pts'] / float(info['sfreq']) + annot = _annotate_bad_segments(directory, start_time, + info['meas_date']) + marker_annot = _read_annotations_ctf_call( + directory=directory, + total_offset=(res4['pre_trig_pts'] / res4['sfreq']), + trial_duration=(res4['nsamp'] / res4['sfreq']), + meas_date=info['meas_date'] + ) + annot = marker_annot if annot is None else annot + marker_annot + self.set_annotations(annot) + + if clean_names: + self._clean_names() + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + si = self._raw_extras[fi] + offset = 0 + trial_start_idx, r_lims, d_lims = _blk_read_lims(start, stop, + int(si['block_size'])) + with open(self._filenames[fi], 'rb') as fid: + for bi in range(len(r_lims)): + samp_offset = (bi + trial_start_idx) * si['res4_nsamp'] + n_read = min(si['n_samp_tot'] - samp_offset, si['block_size']) + # read the chunk of data + pos = CTF.HEADER_SIZE + pos += samp_offset * si['n_chan'] * 4 + fid.seek(pos, 0) + this_data = np.fromfile(fid, '>i4', + count=si['n_chan'] * n_read) + this_data.shape = (si['n_chan'], n_read) + this_data = this_data[:, r_lims[bi, 0]:r_lims[bi, 1]] + data_view = data[:, d_lims[bi, 0]:d_lims[bi, 1]] + _mult_cal_one(data_view, this_data, idx, cals, mult) + offset += n_read + + def _clean_names(self): + """Clean up CTF suffixes from channel names.""" + mapping = dict(zip(self.ch_names, _clean_names(self.ch_names))) + + self.rename_channels(mapping) + + for comp in self.info['comps']: + for key in ('row_names', 'col_names'): + comp['data'][key] = _clean_names(comp['data'][key]) + + +def _get_sample_info(fname, res4, system_clock): + """Determine the number of valid samples.""" + logger.info('Finding samples for %s: ' % (fname,)) + if CTF.SYSTEM_CLOCK_CH in res4['ch_names']: + clock_ch = res4['ch_names'].index(CTF.SYSTEM_CLOCK_CH) + else: + clock_ch = None + for k, ch in enumerate(res4['chs']): + if ch['ch_name'] == CTF.SYSTEM_CLOCK_CH: + clock_ch = k + break + with open(fname, 'rb') as fid: + fid.seek(0, os.SEEK_END) + st_size = fid.tell() + fid.seek(0, 0) + if (st_size - CTF.HEADER_SIZE) % (4 * res4['nsamp'] * + res4['nchan']) != 0: + raise RuntimeError('The number of samples is not an even multiple ' + 'of the trial size') + n_samp_tot = (st_size - CTF.HEADER_SIZE) // (4 * res4['nchan']) + n_trial = n_samp_tot // res4['nsamp'] + n_samp = n_samp_tot + if clock_ch is None: + logger.info(' System clock channel is not available, assuming ' + 'all samples to be valid.') + elif system_clock == 'ignore': + logger.info(' System clock channel is available, but ignored.') + else: # use it + logger.info(' System clock channel is available, checking ' + 'which samples are valid.') + for t in range(n_trial): + # Skip to the correct trial + samp_offset = t * res4['nsamp'] + offset = CTF.HEADER_SIZE + (samp_offset * res4['nchan'] + + (clock_ch * res4['nsamp'])) * 4 + fid.seek(offset, 0) + this_data = np.fromfile(fid, '>i4', res4['nsamp']) + if len(this_data) != res4['nsamp']: + raise RuntimeError('Cannot read data for trial %d' + % (t + 1)) + end = np.where(this_data == 0)[0] + if len(end) > 0: + n_samp = samp_offset + end[0] + break + if n_samp < res4['nsamp']: + n_trial = 1 + logger.info(' %d x %d = %d samples from %d chs' + % (n_trial, n_samp, n_samp, res4['nchan'])) + else: + n_trial = n_samp // res4['nsamp'] + n_omit = n_samp_tot - n_samp + logger.info(' %d x %d = %d samples from %d chs' + % (n_trial, res4['nsamp'], n_samp, res4['nchan'])) + if n_omit != 0: + logger.info(' %d samples omitted at the end' % n_omit) + + return dict(n_samp=n_samp, n_samp_tot=n_samp_tot, block_size=res4['nsamp'], + res4_nsamp=res4['nsamp'], n_chan=res4['nchan']) diff --git a/python/libs/mne/io/ctf/eeg.py b/python/libs/mne/io/ctf/eeg.py new file mode 100644 index 0000000..915b4a1 --- /dev/null +++ b/python/libs/mne/io/ctf/eeg.py @@ -0,0 +1,95 @@ +"""Read .eeg files.""" + +# Author: Eric Larson +# +# License: BSD-3-Clause + +import numpy as np +from os.path import join +from os import listdir + +from ...utils import logger, warn +from ..constants import FIFF +from .res4 import _make_ctf_name +from ...transforms import apply_trans + + +_cardinal_dict = dict(nasion=FIFF.FIFFV_POINT_NASION, + lpa=FIFF.FIFFV_POINT_LPA, left=FIFF.FIFFV_POINT_LPA, + rpa=FIFF.FIFFV_POINT_RPA, right=FIFF.FIFFV_POINT_RPA) + + +def _read_eeg(directory): + """Read the .eeg file.""" + # Missing file is ok + fname = _make_ctf_name(directory, 'eeg', raise_error=False) + if fname is None: + logger.info(' Separate EEG position data file not present.') + return + eeg = dict(labels=list(), kinds=list(), ids=list(), rr=list(), np=0, + assign_to_chs=True, coord_frame=FIFF.FIFFV_MNE_COORD_CTF_HEAD) + with open(fname, 'rb') as fid: + for line in fid: + line = line.strip() + if len(line) > 0: + parts = line.decode('utf-8').split() + if len(parts) != 5: + raise RuntimeError('Illegal data in EEG position file: %s' + % line) + r = np.array([float(p) for p in parts[2:]]) / 100. + if (r * r).sum() > 1e-4: + label = parts[1] + eeg['labels'].append(label) + eeg['rr'].append(r) + id_ = _cardinal_dict.get(label.lower(), int(parts[0])) + if label.lower() in _cardinal_dict: + kind = FIFF.FIFFV_POINT_CARDINAL + else: + kind = FIFF.FIFFV_POINT_EXTRA + eeg['ids'].append(id_) + eeg['kinds'].append(kind) + eeg['np'] += 1 + logger.info(' Separate EEG position data file read.') + return eeg + + +def _read_pos(directory, transformations): + """Read the .pos file and return eeg positions as dig extra points.""" + fname = [join(directory, f) for f in listdir(directory) if + f.endswith('.pos')] + if len(fname) < 1: + return list() + elif len(fname) > 1: + warn(' Found multiple pos files. Extra digitizer points not added.') + return list() + logger.info(' Reading digitizer points from %s...' % fname) + if transformations['t_ctf_head_head'] is None: + warn(' No transformation found. Extra digitizer points not added.') + return list() + fname = fname[0] + digs = list() + i = 2000 + with open(fname, 'r') as fid: + for line in fid: + line = line.strip() + if len(line) > 0: + parts = line.split() + # The lines can have 4 or 5 parts. First part is for the id, + # which can be an int or a string. The last three are for xyz + # coordinates. The extra part is for additional info + # (e.g. 'Pz', 'Cz') which is ignored. + if len(parts) not in [4, 5]: + continue + try: + ident = int(parts[0]) + 1000 + except ValueError: # if id is not an int + ident = i + i += 1 + dig = dict(kind=FIFF.FIFFV_POINT_EXTRA, ident=ident, r=list(), + coord_frame=FIFF.FIFFV_COORD_HEAD) + r = np.array([float(p) for p in parts[-3:]]) / 100. # cm to m + if (r * r).sum() > 1e-4: + r = apply_trans(transformations['t_ctf_head_head'], r) + dig['r'] = r + digs.append(dig) + return digs diff --git a/python/libs/mne/io/ctf/hc.py b/python/libs/mne/io/ctf/hc.py new file mode 100644 index 0000000..ea62301 --- /dev/null +++ b/python/libs/mne/io/ctf/hc.py @@ -0,0 +1,84 @@ +"""Read .hc files.""" + +# Author: Eric Larson +# +# License: BSD-3-Clause + +import numpy as np + +from ...utils import logger +from .res4 import _make_ctf_name +from .constants import CTF +from ..constants import FIFF + + +_kind_dict = {'nasion': CTF.CTFV_COIL_NAS, 'left ear': CTF.CTFV_COIL_LPA, + 'right ear': CTF.CTFV_COIL_RPA, 'spare': CTF.CTFV_COIL_SPARE} + +_coord_dict = {'relative to dewar': FIFF.FIFFV_MNE_COORD_CTF_DEVICE, + 'relative to head': FIFF.FIFFV_MNE_COORD_CTF_HEAD} + + +def _read_one_coil_point(fid): + """Read coil coordinate information from the hc file.""" + # Descriptor + one = '#' + while len(one) > 0 and one[0] == '#': + one = fid.readline() + if len(one) == 0: + return None + one = one.strip().decode('utf-8') + if 'Unable' in one: + raise RuntimeError("HPI information not available") + + # Hopefully this is an unambiguous interpretation + p = dict() + p['valid'] = ('measured' in one) + for key, val in _coord_dict.items(): + if key in one: + p['coord_frame'] = val + break + else: + p['coord_frame'] = -1 + + for key, val in _kind_dict.items(): + if key in one: + p['kind'] = val + break + else: + p['kind'] = -1 + + # Three coordinates + p['r'] = np.empty(3) + for ii, coord in enumerate('xyz'): + sp = fid.readline().decode('utf-8').strip() + if len(sp) == 0: # blank line + continue + sp = sp.split(' ') + if len(sp) != 3 or sp[0] != coord or sp[1] != '=': + raise RuntimeError('Bad line: %s' % one) + # We do not deal with centimeters + p['r'][ii] = float(sp[2]) / 100.0 + return p + + +def _read_hc(directory): + """Read the hc file to get the HPI info and to prepare for coord trans.""" + fname = _make_ctf_name(directory, 'hc', raise_error=False) + if fname is None: + logger.info(' hc data not present') + return None + s = list() + with open(fname, 'rb') as fid: + while(True): + p = _read_one_coil_point(fid) + if p is None: + # First point bad indicates that the file is empty + if len(s) == 0: + logger.info('hc file empty, no data present') + return None + # Returns None if at EOF + logger.info(' hc data read.') + return s + if p['valid']: + s.append(p) diff --git a/python/libs/mne/io/ctf/info.py b/python/libs/mne/io/ctf/info.py new file mode 100644 index 0000000..995d349 --- /dev/null +++ b/python/libs/mne/io/ctf/info.py @@ -0,0 +1,481 @@ +"""Populate measurement info.""" + +# Author: Eric Larson +# +# License: BSD-3-Clause + +from time import strptime +from calendar import timegm +import os.path as op + +import numpy as np + +from ...utils import logger, warn, _clean_names +from ...transforms import (apply_trans, _coord_frame_name, invert_transform, + combine_transforms) +from ...annotations import Annotations + +from ..meas_info import _empty_info +from ..write import get_new_file_id +from ..ctf_comp import _add_kind, _calibrate_comp +from ..constants import FIFF + +from .constants import CTF + + +_ctf_to_fiff = {CTF.CTFV_COIL_LPA: FIFF.FIFFV_POINT_LPA, + CTF.CTFV_COIL_RPA: FIFF.FIFFV_POINT_RPA, + CTF.CTFV_COIL_NAS: FIFF.FIFFV_POINT_NASION} + + +def _pick_isotrak_and_hpi_coils(res4, coils, t): + """Pick the HPI coil locations given in device coordinates.""" + if coils is None: + return list(), list() + dig = list() + hpi_result = dict(dig_points=list()) + n_coil_dev = 0 + n_coil_head = 0 + for p in coils: + if p['valid']: + if p['kind'] in [CTF.CTFV_COIL_LPA, CTF.CTFV_COIL_RPA, + CTF.CTFV_COIL_NAS]: + kind = FIFF.FIFFV_POINT_CARDINAL + ident = _ctf_to_fiff[p['kind']] + else: # CTF.CTFV_COIL_SPARE + kind = FIFF.FIFFV_POINT_HPI + ident = p['kind'] + if p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE: + if t is None or t['t_ctf_dev_dev'] is None: + raise RuntimeError('No coordinate transformation ' + 'available for HPI coil locations') + d = dict(kind=kind, ident=ident, + r=apply_trans(t['t_ctf_dev_dev'], p['r']), + coord_frame=FIFF.FIFFV_COORD_UNKNOWN) + hpi_result['dig_points'].append(d) + n_coil_dev += 1 + elif p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + if t is None or t['t_ctf_head_head'] is None: + raise RuntimeError('No coordinate transformation ' + 'available for (virtual) Polhemus data') + d = dict(kind=kind, ident=ident, + r=apply_trans(t['t_ctf_head_head'], p['r']), + coord_frame=FIFF.FIFFV_COORD_HEAD) + dig.append(d) + n_coil_head += 1 + if n_coil_head > 0: + logger.info(' Polhemus data for %d HPI coils added' % n_coil_head) + if n_coil_dev > 0: + logger.info(' Device coordinate locations for %d HPI coils added' + % n_coil_dev) + return dig, [hpi_result] + + +def _convert_time(date_str, time_str): + """Convert date and time strings to float time.""" + for fmt in ("%d/%m/%Y", "%d-%b-%Y", "%a, %b %d, %Y"): + try: + date = strptime(date_str.strip(), fmt) + except ValueError: + pass + else: + break + else: + raise RuntimeError( + 'Illegal date: %s.\nIf the language of the date does not ' + 'correspond to your local machine\'s language try to set the ' + 'locale to the language of the date string:\n' + 'locale.setlocale(locale.LC_ALL, "en_US")' % date_str) + + for fmt in ('%H:%M:%S', '%H:%M'): + try: + time = strptime(time_str, fmt) + except ValueError: + pass + else: + break + else: + raise RuntimeError('Illegal time: %s' % time_str) + # MNE-C uses mktime which uses local time, but here we instead decouple + # conversion location from the process, and instead assume that the + # acquisition was in GMT. This will be wrong for most sites, but at least + # the value we obtain here won't depend on the geographical location + # that the file was converted. + res = timegm((date.tm_year, date.tm_mon, date.tm_mday, + time.tm_hour, time.tm_min, time.tm_sec, + date.tm_wday, date.tm_yday, date.tm_isdst)) + return res + + +def _get_plane_vectors(ez): + """Get two orthogonal vectors orthogonal to ez (ez will be modified).""" + assert ez.shape == (3,) + ez_len = np.sqrt(np.sum(ez * ez)) + if ez_len == 0: + raise RuntimeError('Zero length normal. Cannot proceed.') + if np.abs(ez_len - np.abs(ez[2])) < 1e-5: # ez already in z-direction + ex = np.array([1., 0., 0.]) + else: + ex = np.zeros(3) + if ez[1] < ez[2]: + ex[0 if ez[0] < ez[1] else 1] = 1. + else: + ex[0 if ez[0] < ez[2] else 2] = 1. + ez /= ez_len + ex -= np.dot(ez, ex) * ez + ex /= np.sqrt(np.sum(ex * ex)) + ey = np.cross(ez, ex) + return ex, ey + + +def _at_origin(x): + """Determine if a vector is at the origin.""" + return (np.sum(x * x) < 1e-8) + + +def _check_comp_ch(cch, kind, desired=None): + if desired is None: + desired = cch['grad_order_no'] + if cch['grad_order_no'] != desired: + raise RuntimeError('%s channel with inconsistent compensation ' + 'grade %s, should be %s' + % (kind, cch['grad_order_no'], desired)) + return desired + + +def _convert_channel_info(res4, t, use_eeg_pos): + """Convert CTF channel information to fif format.""" + nmeg = neeg = nstim = nmisc = nref = 0 + chs = list() + this_comp = None + for k, cch in enumerate(res4['chs']): + cal = float(1. / (cch['proper_gain'] * cch['qgain'])) + ch = dict(scanno=k + 1, range=1., cal=cal, loc=np.full(12, np.nan), + unit_mul=FIFF.FIFF_UNITM_NONE, ch_name=cch['ch_name'][:15], + coil_type=FIFF.FIFFV_COIL_NONE) + del k + chs.append(ch) + # Create the channel position information + if cch['sensor_type_index'] in (CTF.CTFV_REF_MAG_CH, + CTF.CTFV_REF_GRAD_CH, + CTF.CTFV_MEG_CH): + # Extra check for a valid MEG channel + if np.sum(cch['coil']['pos'][0] ** 2) < 1e-6 or \ + np.sum(cch['coil']['norm'][0] ** 2) < 1e-6: + nmisc += 1 + ch.update(logno=nmisc, coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V) + text = 'MEG' + if cch['sensor_type_index'] != CTF.CTFV_MEG_CH: + text += ' ref' + warn('%s channel %s did not have position assigned, so ' + 'it was changed to a MISC channel' + % (text, ch['ch_name'])) + continue + ch['unit'] = FIFF.FIFF_UNIT_T + # Set up the local coordinate frame + r0 = cch['coil']['pos'][0].copy() + ez = cch['coil']['norm'][0].copy() + # It turns out that positive proper_gain requires swapping + # of the normal direction + if cch['proper_gain'] > 0.0: + ez *= -1 + # Check how the other vectors should be defined + off_diag = False + # Default: ex and ey are arbitrary in the plane normal to ez + if cch['sensor_type_index'] == CTF.CTFV_REF_GRAD_CH: + # The off-diagonal gradiometers are an exception: + # + # We use the same convention for ex as for Neuromag planar + # gradiometers: ex pointing in the positive gradient direction + diff = cch['coil']['pos'][0] - cch['coil']['pos'][1] + size = np.sqrt(np.sum(diff * diff)) + if size > 0.: + diff /= size + # Is ez normal to the line joining the coils? + if np.abs(np.dot(diff, ez)) < 1e-3: + off_diag = True + # Handle the off-diagonal gradiometer coordinate system + r0 -= size * diff / 2.0 + ex = diff + ey = np.cross(ez, ex) + else: + ex, ey = _get_plane_vectors(ez) + else: + ex, ey = _get_plane_vectors(ez) + # Transform into a Neuromag-like device coordinate system + ch['loc'] = np.concatenate([ + apply_trans(t['t_ctf_dev_dev'], r0), + apply_trans(t['t_ctf_dev_dev'], ex, move=False), + apply_trans(t['t_ctf_dev_dev'], ey, move=False), + apply_trans(t['t_ctf_dev_dev'], ez, move=False)]) + del r0, ex, ey, ez + # Set the coil type + if cch['sensor_type_index'] == CTF.CTFV_REF_MAG_CH: + ch['kind'] = FIFF.FIFFV_REF_MEG_CH + ch['coil_type'] = FIFF.FIFFV_COIL_CTF_REF_MAG + nref += 1 + ch['logno'] = nref + elif cch['sensor_type_index'] == CTF.CTFV_REF_GRAD_CH: + ch['kind'] = FIFF.FIFFV_REF_MEG_CH + if off_diag: + ch['coil_type'] = FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD + else: + ch['coil_type'] = FIFF.FIFFV_COIL_CTF_REF_GRAD + nref += 1 + ch['logno'] = nref + else: + this_comp = _check_comp_ch(cch, 'Gradiometer', this_comp) + ch['kind'] = FIFF.FIFFV_MEG_CH + ch['coil_type'] = FIFF.FIFFV_COIL_CTF_GRAD + nmeg += 1 + ch['logno'] = nmeg + # Encode the software gradiometer order + ch['coil_type'] = int( + ch['coil_type'] | (cch['grad_order_no'] << 16)) + ch['coord_frame'] = FIFF.FIFFV_COORD_DEVICE + elif cch['sensor_type_index'] == CTF.CTFV_EEG_CH: + coord_frame = FIFF.FIFFV_COORD_HEAD + if use_eeg_pos: + # EEG electrode coordinates may be present but in the + # CTF head frame + ch['loc'][:3] = cch['coil']['pos'][0] + if not _at_origin(ch['loc'][:3]): + if t['t_ctf_head_head'] is None: + warn('EEG electrode (%s) location omitted because of ' + 'missing HPI information' % ch['ch_name']) + ch['loc'].fill(np.nan) + coord_frame = FIFF.FIFFV_MNE_COORD_CTF_HEAD + else: + ch['loc'][:3] = apply_trans( + t['t_ctf_head_head'], ch['loc'][:3]) + neeg += 1 + ch.update(logno=neeg, kind=FIFF.FIFFV_EEG_CH, + unit=FIFF.FIFF_UNIT_V, coord_frame=coord_frame, + coil_type=FIFF.FIFFV_COIL_EEG) + elif cch['sensor_type_index'] == CTF.CTFV_STIM_CH: + nstim += 1 + ch.update(logno=nstim, coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_STIM_CH, unit=FIFF.FIFF_UNIT_V) + else: + nmisc += 1 + ch.update(logno=nmisc, coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V) + return chs + + +def _comp_sort_keys(c): + """Sort the compensation data.""" + return (int(c['coeff_type']), int(c['scanno'])) + + +def _check_comp(comp): + """Check that conversion to named matrices is possible.""" + ref_sens = None + kind = -1 + for k, c_k in enumerate(comp): + if c_k['coeff_type'] != kind: + c_ref = c_k + ref_sens = c_ref['sensors'] + kind = c_k['coeff_type'] + elif not c_k['sensors'] == ref_sens: + raise RuntimeError('Cannot use an uneven compensation matrix') + + +def _conv_comp(comp, first, last, chs): + """Add a new converted compensation data item.""" + ch_names = [c['ch_name'] for c in chs] + n_col = comp[first]['ncoeff'] + col_names = comp[first]['sensors'][:n_col] + row_names = [comp[p]['sensor_name'] for p in range(first, last + 1)] + mask = np.in1d(col_names, ch_names) # missing channels excluded + col_names = np.array(col_names)[mask].tolist() + n_col = len(col_names) + n_row = len(row_names) + ccomp = dict(ctfkind=np.array([comp[first]['coeff_type']]), + save_calibrated=False) + _add_kind(ccomp) + + data = np.empty((n_row, n_col)) + for ii, coeffs in enumerate(comp[first:last + 1]): + # Pick the elements to the matrix + data[ii, :] = coeffs['coeffs'][mask] + ccomp['data'] = dict(row_names=row_names, col_names=col_names, + data=data, nrow=len(row_names), ncol=len(col_names)) + mk = ('proper_gain', 'qgain') + _calibrate_comp(ccomp, chs, row_names, col_names, mult_keys=mk, flip=True) + return ccomp + + +def _convert_comp_data(res4): + """Convert the compensation data into named matrices.""" + if res4['ncomp'] == 0: + return + # Sort the coefficients in our favorite order + res4['comp'] = sorted(res4['comp'], key=_comp_sort_keys) + # Check that all items for a given compensation type have the correct + # number of channels + _check_comp(res4['comp']) + # Create named matrices + first = 0 + kind = -1 + comps = list() + for k in range(len(res4['comp'])): + if res4['comp'][k]['coeff_type'] != kind: + if k > 0: + comps.append(_conv_comp(res4['comp'], first, k - 1, + res4['chs'])) + kind = res4['comp'][k]['coeff_type'] + first = k + comps.append(_conv_comp(res4['comp'], first, k, res4['chs'])) + return comps + + +def _pick_eeg_pos(c): + """Pick EEG positions.""" + eeg = dict(coord_frame=FIFF.FIFFV_COORD_HEAD, assign_to_chs=False, + labels=list(), ids=list(), rr=list(), kinds=list(), np=0) + for ch in c['chs']: + if ch['kind'] == FIFF.FIFFV_EEG_CH and not _at_origin(ch['loc'][:3]): + eeg['labels'].append(ch['ch_name']) + eeg['ids'].append(ch['logno']) + eeg['rr'].append(ch['loc'][:3]) + eeg['kinds'].append(FIFF.FIFFV_POINT_EEG) + eeg['np'] += 1 + if eeg['np'] == 0: + return None + logger.info('Picked positions of %d EEG channels from channel info' + % eeg['np']) + return eeg + + +def _add_eeg_pos(eeg, t, c): + """Pick the (virtual) EEG position data.""" + if eeg is None: + return + if t is None or t['t_ctf_head_head'] is None: + raise RuntimeError('No coordinate transformation available for EEG ' + 'position data') + eeg_assigned = 0 + if eeg['assign_to_chs']: + for k in range(eeg['np']): + # Look for a channel name match + for ch in c['chs']: + if ch['ch_name'].lower() == eeg['labels'][k].lower(): + r0 = ch['loc'][:3] + r0[:] = eeg['rr'][k] + if eeg['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + r0[:] = apply_trans(t['t_ctf_head_head'], r0) + elif eeg['coord_frame'] != FIFF.FIFFV_COORD_HEAD: + raise RuntimeError( + 'Illegal coordinate frame for EEG electrode ' + 'positions : %s' + % _coord_frame_name(eeg['coord_frame'])) + # Use the logical channel number as an identifier + eeg['ids'][k] = ch['logno'] + eeg['kinds'][k] = FIFF.FIFFV_POINT_EEG + eeg_assigned += 1 + break + + # Add these to the Polhemus data + fid_count = eeg_count = extra_count = 0 + for k in range(eeg['np']): + d = dict(r=eeg['rr'][k].copy(), kind=eeg['kinds'][k], + ident=eeg['ids'][k], coord_frame=FIFF.FIFFV_COORD_HEAD) + c['dig'].append(d) + if eeg['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + d['r'] = apply_trans(t['t_ctf_head_head'], d['r']) + elif eeg['coord_frame'] != FIFF.FIFFV_COORD_HEAD: + raise RuntimeError('Illegal coordinate frame for EEG electrode ' + 'positions: %s' + % _coord_frame_name(eeg['coord_frame'])) + if eeg['kinds'][k] == FIFF.FIFFV_POINT_CARDINAL: + fid_count += 1 + elif eeg['kinds'][k] == FIFF.FIFFV_POINT_EEG: + eeg_count += 1 + else: + extra_count += 1 + if eeg_assigned > 0: + logger.info(' %d EEG electrode locations assigned to channel info.' + % eeg_assigned) + for count, kind in zip((fid_count, eeg_count, extra_count), + ('fiducials', 'EEG locations', 'extra points')): + if count > 0: + logger.info(' %d %s added to Polhemus data.' % (count, kind)) + + +_filt_map = {CTF.CTFV_FILTER_LOWPASS: 'lowpass', + CTF.CTFV_FILTER_HIGHPASS: 'highpass'} + + +def _compose_meas_info(res4, coils, trans, eeg): + """Create meas info from CTF data.""" + info = _empty_info(res4['sfreq']) + + # Collect all the necessary data from the structures read + info['meas_id'] = get_new_file_id() + info['meas_id']['usecs'] = 0 + info['meas_id']['secs'] = _convert_time(res4['data_date'], + res4['data_time']) + info['meas_date'] = (info['meas_id']['secs'], info['meas_id']['usecs']) + info['experimenter'] = res4['nf_operator'] + info['subject_info'] = dict(his_id=res4['nf_subject_id']) + for filt in res4['filters']: + if filt['type'] in _filt_map: + info[_filt_map[filt['type']]] = filt['freq'] + info['dig'], info['hpi_results'] = _pick_isotrak_and_hpi_coils( + res4, coils, trans) + if trans is not None: + if len(info['hpi_results']) > 0: + info['hpi_results'][0]['coord_trans'] = trans['t_ctf_head_head'] + if trans['t_dev_head'] is not None: + info['dev_head_t'] = trans['t_dev_head'] + info['dev_ctf_t'] = combine_transforms( + trans['t_dev_head'], + invert_transform(trans['t_ctf_head_head']), + FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_MNE_COORD_CTF_HEAD) + if trans['t_ctf_head_head'] is not None: + info['ctf_head_t'] = trans['t_ctf_head_head'] + info['chs'] = _convert_channel_info(res4, trans, eeg is None) + info['comps'] = _convert_comp_data(res4) + if eeg is None: + # Pick EEG locations from chan info if not read from a separate file + eeg = _pick_eeg_pos(info) + _add_eeg_pos(eeg, trans, info) + logger.info(' Measurement info composed.') + info._unlocked = False + info._update_redundant() + return info + + +def _read_bad_chans(directory, info): + """Read Bad channel list and match to internal names.""" + fname = op.join(directory, 'BadChannels') + if not op.exists(fname): + return [] + mapping = dict(zip(_clean_names(info['ch_names']), info['ch_names'])) + with open(fname, 'r') as fid: + bad_chans = [mapping[f.strip()] for f in fid.readlines()] + return bad_chans + + +def _annotate_bad_segments(directory, start_time, meas_date): + fname = op.join(directory, 'bad.segments') + if not op.exists(fname): + return None + + # read in bad segment file + onsets = [] + durations = [] + desc = [] + with open(fname, 'r') as fid: + for f in fid.readlines(): + tmp = f.strip().split() + desc.append('bad_%s' % tmp[0]) + onsets.append(np.float64(tmp[1]) - start_time) + durations.append(np.float64(tmp[2]) - np.float64(tmp[1])) + # return None if there are no bad segments + if len(onsets) == 0: + return None + + return Annotations(onsets, durations, desc, meas_date) diff --git a/python/libs/mne/io/ctf/markers.py b/python/libs/mne/io/ctf/markers.py new file mode 100644 index 0000000..2f4c7d0 --- /dev/null +++ b/python/libs/mne/io/ctf/markers.py @@ -0,0 +1,82 @@ +# Author: Joan Massich +# +# License: BSD-3-Clause + +import numpy as np +import os.path as op +from io import BytesIO + +from ...annotations import Annotations +from .res4 import _read_res4 +from .info import _convert_time + + +def _get_markers(fname): + def consume(fid, predicate): # just a consumer to move around conveniently + while(predicate(fid.readline())): + pass + + def parse_marker(string): # XXX: there should be a nicer way to do that + data = np.genfromtxt( + BytesIO(string.encode()), dtype=[('trial', int), ('sync', float)]) + return int(data['trial']), float(data['sync']) + + markers = dict() + with open(fname) as fid: + consume(fid, lambda l: not l.startswith('NUMBER OF MARKERS:')) + num_of_markers = int(fid.readline()) + + for _ in range(num_of_markers): + consume(fid, lambda l: not l.startswith('NAME:')) + label = fid.readline().strip('\n') + + consume(fid, lambda l: not l.startswith('NUMBER OF SAMPLES:')) + n_markers = int(fid.readline()) + + consume(fid, lambda l: not l.startswith('LIST OF SAMPLES:')) + next(fid) # skip the samples header + markers[label] = [ + parse_marker(next(fid)) for _ in range(n_markers) + ] + + return markers + + +def _get_res4_info_needed_by_markers(directory): + """Get required information from CTF res4 information file.""" + # we only need a few values from res4. Maybe we can read them directly + # instead of parsing the entire res4 file. + res4 = _read_res4(directory) + + total_offset_duration = res4['pre_trig_pts'] / res4['sfreq'] + trial_duration = res4['nsamp'] / res4['sfreq'] + + meas_date = (_convert_time(res4['data_date'], + res4['data_time']), 0) + return total_offset_duration, trial_duration, meas_date + + +def _read_annotations_ctf(directory): + total_offset, trial_duration, meas_date \ + = _get_res4_info_needed_by_markers(directory) + return _read_annotations_ctf_call(directory, total_offset, trial_duration, + meas_date) + + +def _read_annotations_ctf_call(directory, total_offset, trial_duration, + meas_date): + fname = op.join(directory, 'MarkerFile.mrk') + if not op.exists(fname): + return Annotations(list(), list(), list(), orig_time=meas_date) + else: + markers = _get_markers(fname) + + onset = [synctime + (trialnum * trial_duration) + total_offset + for _, m in markers.items() for (trialnum, synctime) in m] + + description = np.concatenate([ + np.repeat(label, len(m)) for label, m in markers.items() + ]) + + return Annotations(onset=onset, duration=np.zeros_like(onset), + description=description, orig_time=meas_date) diff --git a/python/libs/mne/io/ctf/res4.py b/python/libs/mne/io/ctf/res4.py new file mode 100644 index 0000000..8279d89 --- /dev/null +++ b/python/libs/mne/io/ctf/res4.py @@ -0,0 +1,201 @@ +"""Read .res4 files.""" + +# Authors: Matti Hämäläinen +# Eric Larson +# +# License: BSD-3-Clause + +import os.path as op + +import numpy as np + +from ...utils import logger +from .constants import CTF + + +def _make_ctf_name(directory, extra, raise_error=True): + """Make a CTF name.""" + fname = op.join(directory, op.basename(directory)[:-3] + '.' + extra) + if not op.isfile(fname): + if raise_error: + raise IOError('Standard file %s not found' % fname) + else: + return None + return fname + + +def _read_double(fid, n=1): + """Read a double.""" + return np.fromfile(fid, '>f8', n) + + +def _read_string(fid, n_bytes, decode=True): + """Read string.""" + s0 = fid.read(n_bytes) + s = s0.split(b'\x00')[0] + return s.decode('utf-8') if decode else s + + +def _read_ustring(fid, n_bytes): + """Read unsigned character string.""" + return np.fromfile(fid, '>B', n_bytes) + + +def _read_int2(fid): + """Read int from short.""" + return np.fromfile(fid, '>i2', 1)[0] + + +def _read_int(fid): + """Read a 32-bit integer.""" + return np.fromfile(fid, '>i4', 1)[0] + + +def _move_to_next(fid, byte=8): + """Move to next byte boundary.""" + now = fid.tell() + if now % byte != 0: + now = now - (now % byte) + byte + fid.seek(now, 0) + + +def _read_filter(fid): + """Read filter information.""" + f = dict() + f['freq'] = _read_double(fid)[0] + f['class'] = _read_int(fid) + f['type'] = _read_int(fid) + f['npar'] = _read_int2(fid) + f['pars'] = _read_double(fid, f['npar']) + return f + + +def _read_comp_coeff(fid, d): + """Read compensation coefficients.""" + # Read the coefficients and initialize + d['ncomp'] = _read_int2(fid) + d['comp'] = list() + # Read each record + dt = np.dtype([ + ('sensor_name', 'S32'), + ('coeff_type', '>i4'), ('d0', '>i4'), + ('ncoeff', '>i2'), + ('sensors', 'S%s' % CTF.CTFV_SENSOR_LABEL, CTF.CTFV_MAX_BALANCING), + ('coeffs', '>f8', CTF.CTFV_MAX_BALANCING)]) + comps = np.fromfile(fid, dt, d['ncomp']) + for k in range(d['ncomp']): + comp = dict() + d['comp'].append(comp) + comp['sensor_name'] = \ + comps['sensor_name'][k].split(b'\x00')[0].decode('utf-8') + comp['coeff_type'] = comps['coeff_type'][k] + comp['ncoeff'] = comps['ncoeff'][k] + comp['sensors'] = [s.split(b'\x00')[0].decode('utf-8') + for s in comps['sensors'][k][:comp['ncoeff']]] + comp['coeffs'] = comps['coeffs'][k][:comp['ncoeff']] + comp['scanno'] = d['ch_names'].index(comp['sensor_name']) + + +def _read_res4(dsdir): + """Read the magical res4 file.""" + # adapted from read_res4.c + name = _make_ctf_name(dsdir, 'res4') + res = dict() + with open(name, 'rb') as fid: + # Read the fields + res['head'] = _read_string(fid, 8) + res['appname'] = _read_string(fid, 256) + res['origin'] = _read_string(fid, 256) + res['desc'] = _read_string(fid, 256) + res['nave'] = _read_int2(fid) + res['data_time'] = _read_string(fid, 255) + res['data_date'] = _read_string(fid, 255) + # Seems that date and time can be swapped + # (are they entered manually?!) + if '/' in res['data_time'] and ':' in res['data_date']: + data_date = res['data_date'] + res['data_date'] = res['data_time'] + res['data_time'] = data_date + res['nsamp'] = _read_int(fid) + res['nchan'] = _read_int2(fid) + _move_to_next(fid, 8) + res['sfreq'] = _read_double(fid)[0] + res['epoch_time'] = _read_double(fid)[0] + res['no_trials'] = _read_int2(fid) + _move_to_next(fid, 4) + res['pre_trig_pts'] = _read_int(fid) + res['no_trials_done'] = _read_int2(fid) + res['no_trials_bst_message_windowlay'] = _read_int2(fid) + _move_to_next(fid, 4) + res['save_trials'] = _read_int(fid) + res['primary_trigger'] = fid.read(1) + res['secondary_trigger'] = [fid.read(1) + for k in range(CTF.CTFV_MAX_AVERAGE_BINS)] + res['trigger_polarity_mask'] = fid.read(1) + res['trigger_mode'] = _read_int2(fid) + _move_to_next(fid, 4) + res['accept_reject'] = _read_int(fid) + res['run_time_bst_message_windowlay'] = _read_int2(fid) + _move_to_next(fid, 4) + res['zero_head'] = _read_int(fid) + _move_to_next(fid, 4) + res['artifact_mode'] = _read_int(fid) + _read_int(fid) # padding + res['nf_run_name'] = _read_string(fid, 32) + res['nf_run_title'] = _read_string(fid, 256) + res['nf_instruments'] = _read_string(fid, 32) + res['nf_collect_descriptor'] = _read_string(fid, 32) + res['nf_subject_id'] = _read_string(fid, 32) + res['nf_operator'] = _read_string(fid, 32) + if len(res['nf_operator']) == 0: + res['nf_operator'] = None + res['nf_sensor_file_name'] = _read_ustring(fid, 60) + _move_to_next(fid, 4) + res['rdlen'] = _read_int(fid) + fid.seek(CTF.FUNNY_POS, 0) + + if res['rdlen'] > 0: + res['run_desc'] = _read_string(fid, res['rdlen']) + + # Filters + res['nfilt'] = _read_int2(fid) + res['filters'] = list() + for k in range(res['nfilt']): + res['filters'].append(_read_filter(fid)) + + # Channel information (names, then data) + res['ch_names'] = list() + for k in range(res['nchan']): + ch_name = _read_string(fid, 32) + res['ch_names'].append(ch_name) + _coil_dt = np.dtype([ + ('pos', '>f8', 3), ('d0', '>f8'), + ('norm', '>f8', 3), ('d1', '>f8'), + ('turns', '>i2'), ('d2', '>i4'), ('d3', '>i2'), + ('area', '>f8')]) + _ch_dt = np.dtype([ + ('sensor_type_index', '>i2'), + ('original_run_no', '>i2'), + ('coil_type', '>i4'), + ('proper_gain', '>f8'), + ('qgain', '>f8'), + ('io_gain', '>f8'), + ('io_offset', '>f8'), + ('num_coils', '>i2'), + ('grad_order_no', '>i2'), ('d0', '>i4'), + ('coil', _coil_dt, CTF.CTFV_MAX_COILS), + ('head_coil', _coil_dt, CTF.CTFV_MAX_COILS)]) + chs = np.fromfile(fid, _ch_dt, res['nchan']) + for coil in (chs['coil'], chs['head_coil']): + coil['pos'] /= 100. + coil['area'] *= 1e-4 + # convert to dict + chs = [dict(zip(chs.dtype.names, x)) for x in chs] + res['chs'] = chs + for k in range(res['nchan']): + res['chs'][k]['ch_name'] = res['ch_names'][k] + + # The compensation coefficients + _read_comp_coeff(fid, res) + logger.info(' res4 data read.') + return res diff --git a/python/libs/mne/io/ctf/tests/__init__.py b/python/libs/mne/io/ctf/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/ctf/tests/test_ctf.py b/python/libs/mne/io/ctf/tests/test_ctf.py new file mode 100644 index 0000000..4995cef --- /dev/null +++ b/python/libs/mne/io/ctf/tests/test_ctf.py @@ -0,0 +1,429 @@ +# Authors: Eric Larson +# +# License: BSD-3-Clause + +import copy +import os +from os import path as op +import shutil + +import numpy as np +from numpy import array_equal +from numpy.testing import assert_allclose, assert_array_equal +import pytest + +import mne +from mne import (pick_types, read_annotations, create_info, + events_from_annotations, make_forward_solution) +from mne.transforms import apply_trans +from mne.io import read_raw_fif, read_raw_ctf, RawArray +from mne.io.compensator import get_current_comp +from mne.io.ctf.constants import CTF +from mne.io.tests.test_raw import _test_raw_reader +from mne.tests.test_annotations import _assert_annotations_equal +from mne.utils import (_clean_names, catch_logging, _stamp_to_dt, + _record_warnings) +from mne.datasets import testing, spm_face, brainstorm +from mne.io.constants import FIFF + +ctf_dir = op.join(testing.data_path(download=False), 'CTF') +ctf_fname_continuous = 'testdata_ctf.ds' +ctf_fname_1_trial = 'testdata_ctf_short.ds' +ctf_fname_2_trials = 'testdata_ctf_pseudocontinuous.ds' +ctf_fname_discont = 'testdata_ctf_short_discontinuous.ds' +ctf_fname_somato = 'somMDYO-18av.ds' +ctf_fname_catch = 'catch-alp-good-f.ds' +somato_fname = op.join( + brainstorm.bst_raw.data_path(download=False), 'MEG', 'bst_raw', + 'subj001_somatosensory_20111109_01_AUX-f.ds' +) +spm_path = spm_face.data_path(download=False) + +block_sizes = { + ctf_fname_continuous: 12000, + ctf_fname_1_trial: 4801, + ctf_fname_2_trials: 12000, + ctf_fname_discont: 1201, + ctf_fname_somato: 313, + ctf_fname_catch: 2500, +} +single_trials = ( + ctf_fname_continuous, + ctf_fname_1_trial, +) + +ctf_fnames = tuple(sorted(block_sizes.keys())) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_read_ctf(tmp_path): + """Test CTF reader.""" + temp_dir = str(tmp_path) + out_fname = op.join(temp_dir, 'test_py_raw.fif') + + # Create a dummy .eeg file so we can test our reading/application of it + os.mkdir(op.join(temp_dir, 'randpos')) + ctf_eeg_fname = op.join(temp_dir, 'randpos', ctf_fname_catch) + shutil.copytree(op.join(ctf_dir, ctf_fname_catch), ctf_eeg_fname) + with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): + raw = _test_raw_reader(read_raw_ctf, directory=ctf_eeg_fname) + picks = pick_types(raw.info, meg=False, eeg=True) + pos = np.random.RandomState(42).randn(len(picks), 3) + fake_eeg_fname = op.join(ctf_eeg_fname, 'catch-alp-good-f.eeg') + # Create a bad file + with open(fake_eeg_fname, 'wb') as fid: + fid.write('foo\n'.encode('ascii')) + pytest.raises(RuntimeError, read_raw_ctf, ctf_eeg_fname) + # Create a good file + with open(fake_eeg_fname, 'wb') as fid: + for ii, ch_num in enumerate(picks): + args = (str(ch_num + 1), raw.ch_names[ch_num],) + tuple( + '%0.5f' % x for x in 100 * pos[ii]) # convert to cm + fid.write(('\t'.join(args) + '\n').encode('ascii')) + pos_read_old = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) + with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): + raw = read_raw_ctf(ctf_eeg_fname) # read modified data + pos_read = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) + assert_allclose(apply_trans(raw.info['ctf_head_t'], pos), pos_read, + rtol=1e-5, atol=1e-5) + assert (pos_read == pos_read_old).mean() < 0.1 + shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_randpos_raw.fif'), + op.join(temp_dir, 'randpos', 'catch-alp-good-f.ds_raw.fif')) + + # Create a version with no hc, starting out *with* EEG pos (error) + os.mkdir(op.join(temp_dir, 'nohc')) + ctf_no_hc_fname = op.join(temp_dir, 'no_hc', ctf_fname_catch) + shutil.copytree(ctf_eeg_fname, ctf_no_hc_fname) + remove_base = op.join(ctf_no_hc_fname, op.basename(ctf_fname_catch[:-3])) + os.remove(remove_base + '.hc') + with pytest.warns(RuntimeWarning, match='MISC channel'): + pytest.raises(RuntimeError, read_raw_ctf, ctf_no_hc_fname) + os.remove(remove_base + '.eeg') + shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_nohc_raw.fif'), + op.join(temp_dir, 'no_hc', 'catch-alp-good-f.ds_raw.fif')) + + # All our files + use_fnames = [op.join(ctf_dir, c) for c in ctf_fnames] + for fname in use_fnames: + raw_c = read_raw_fif(fname + '_raw.fif', preload=True) + # sometimes matches "MISC channel" + with _record_warnings(): + raw = read_raw_ctf(fname) + + # check info match + assert_array_equal(raw.ch_names, raw_c.ch_names) + assert_allclose(raw.times, raw_c.times) + assert_allclose(raw._cals, raw_c._cals) + assert (raw.info['meas_id']['version'] == + raw_c.info['meas_id']['version'] + 1) + for t in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): + assert_allclose(raw.info[t]['trans'], raw_c.info[t]['trans'], + rtol=1e-4, atol=1e-7) + # XXX 2019/11/29 : MNC-C FIF conversion files don't have meas_date set. + # Consider adding meas_date to below checks once this is addressed in + # MNE-C + for key in ('acq_pars', 'acq_stim', 'bads', + 'ch_names', 'custom_ref_applied', 'description', + 'events', 'experimenter', 'highpass', 'line_freq', + 'lowpass', 'nchan', 'proj_id', 'proj_name', + 'projs', 'sfreq', 'subject_info'): + assert raw.info[key] == raw_c.info[key], key + if op.basename(fname) not in single_trials: + # We don't force buffer size to be smaller like MNE-C + assert raw.buffer_size_sec == raw_c.buffer_size_sec + assert len(raw.info['comps']) == len(raw_c.info['comps']) + for c1, c2 in zip(raw.info['comps'], raw_c.info['comps']): + for key in ('colcals', 'rowcals'): + assert_allclose(c1[key], c2[key]) + assert c1['save_calibrated'] == c2['save_calibrated'] + for key in ('row_names', 'col_names', 'nrow', 'ncol'): + assert_array_equal(c1['data'][key], c2['data'][key]) + assert_allclose(c1['data']['data'], c2['data']['data'], atol=1e-7, + rtol=1e-5) + assert_allclose(raw.info['hpi_results'][0]['coord_trans']['trans'], + raw_c.info['hpi_results'][0]['coord_trans']['trans'], + rtol=1e-5, atol=1e-7) + assert len(raw.info['chs']) == len(raw_c.info['chs']) + for ii, (c1, c2) in enumerate(zip(raw.info['chs'], raw_c.info['chs'])): + for key in ('kind', 'scanno', 'unit', 'ch_name', 'unit_mul', + 'range', 'coord_frame', 'coil_type', 'logno'): + if c1['ch_name'] == 'RMSP' and \ + 'catch-alp-good-f' in fname and \ + key in ('kind', 'unit', 'coord_frame', 'coil_type', + 'logno'): + continue # XXX see below... + if key == 'coil_type' and c1[key] == FIFF.FIFFV_COIL_EEG: + # XXX MNE-C bug that this is not set + assert c2[key] == FIFF.FIFFV_COIL_NONE + continue + assert c1[key] == c2[key], key + for key in ('cal',): + assert_allclose(c1[key], c2[key], atol=1e-6, rtol=1e-4, + err_msg='raw.info["chs"][%d][%s]' % (ii, key)) + # XXX 2016/02/24: fixed bug with normal computation that used + # to exist, once mne-C tools are updated we should update our FIF + # conversion files, then the slices can go away (and the check + # can be combined with that for "cal") + for key in ('loc',): + if c1['ch_name'] == 'RMSP' and 'catch-alp-good-f' in fname: + continue + if (c2[key][:3] == 0.).all(): + check = [np.nan] * 3 + else: + check = c2[key][:3] + assert_allclose(c1[key][:3], check, atol=1e-6, rtol=1e-4, + err_msg='raw.info["chs"][%d][%s]' % (ii, key)) + if (c2[key][3:] == 0.).all(): + check = [np.nan] * 3 + else: + check = c2[key][9:12] + assert_allclose(c1[key][9:12], check, atol=1e-6, rtol=1e-4, + err_msg='raw.info["chs"][%d][%s]' % (ii, key)) + + # Make sure all digitization points are in the MNE head coord frame + for p in raw.info['dig']: + assert p['coord_frame'] == FIFF.FIFFV_COORD_HEAD, \ + 'dig points must be in FIFF.FIFFV_COORD_HEAD' + + if fname.endswith('catch-alp-good-f.ds'): # omit points from .pos file + with raw.info._unlock(): + raw.info['dig'] = raw.info['dig'][:-10] + + # XXX: Next test would fail because c-tools assign the fiducials from + # CTF data as HPI. Should eventually clarify/unify with Matti. + # assert_dig_allclose(raw.info, raw_c.info) + + # check data match + raw_c.save(out_fname, overwrite=True, buffer_size_sec=1.) + raw_read = read_raw_fif(out_fname) + + # so let's check tricky cases based on sample boundaries + rng = np.random.RandomState(0) + pick_ch = rng.permutation(np.arange(len(raw.ch_names)))[:10] + bnd = int(round(raw.info['sfreq'] * raw.buffer_size_sec)) + assert bnd == raw._raw_extras[0]['block_size'] + assert bnd == block_sizes[op.basename(fname)] + slices = (slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd), + slice(3, 300), slice(None)) + if len(raw.times) >= 2 * bnd: # at least two complete blocks + slices = slices + (slice(bnd, 2 * bnd), slice(bnd, bnd + 1), + slice(0, bnd + 100)) + for sl_time in slices: + assert_allclose(raw[pick_ch, sl_time][0], + raw_c[pick_ch, sl_time][0]) + assert_allclose(raw_read[pick_ch, sl_time][0], + raw_c[pick_ch, sl_time][0]) + # all data / preload + raw.load_data() + assert_allclose(raw[:][0], raw_c[:][0], atol=1e-15) + # test bad segment annotations + if 'testdata_ctf_short.ds' in fname: + assert 'bad' in raw.annotations.description[0] + assert_allclose(raw.annotations.onset, [2.15]) + assert_allclose(raw.annotations.duration, [0.0225]) + + with pytest.raises(TypeError, match='path-like'): + read_raw_ctf(1) + with pytest.raises(FileNotFoundError, match='does not exist'): + read_raw_ctf(ctf_fname_continuous + 'foo.ds') + # test ignoring of system clock + read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'ignore') + with pytest.raises(ValueError, match='system_clock'): + read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'foo') + + +@testing.requires_testing_data +def test_rawctf_clean_names(): + """Test RawCTF _clean_names method.""" + # read test data + with pytest.warns(RuntimeWarning, match='ref channel RMSP did not'): + raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch)) + raw_cleaned = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch), + clean_names=True) + test_channel_names = _clean_names(raw.ch_names) + test_info_comps = copy.deepcopy(raw.info['comps']) + + # channel names should not be cleaned by default + assert raw.ch_names != test_channel_names + + chs_ch_names = [ch['ch_name'] for ch in raw.info['chs']] + + assert chs_ch_names != test_channel_names + + for test_comp, comp in zip(test_info_comps, raw.info['comps']): + for key in ('row_names', 'col_names'): + assert not array_equal(_clean_names(test_comp['data'][key]), + comp['data'][key]) + + # channel names should be cleaned if clean_names=True + assert raw_cleaned.ch_names == test_channel_names + + for ch, test_ch_name in zip(raw_cleaned.info['chs'], test_channel_names): + assert ch['ch_name'] == test_ch_name + + for test_comp, comp in zip(test_info_comps, raw_cleaned.info['comps']): + for key in ('row_names', 'col_names'): + assert _clean_names(test_comp['data'][key]) == comp['data'][key] + + +@spm_face.requires_spm_data +def test_read_spm_ctf(): + """Test CTF reader with omitted samples.""" + raw_fname = op.join(spm_path, 'MEG', 'spm', + 'SPM_CTF_MEG_example_faces1_3D.ds') + raw = read_raw_ctf(raw_fname) + extras = raw._raw_extras[0] + assert extras['n_samp'] == raw.n_times + assert extras['n_samp'] != extras['n_samp_tot'] + + # Test that LPA, nasion and RPA are correct. + coord_frames = np.array([d['coord_frame'] for d in raw.info['dig']]) + assert np.all(coord_frames == FIFF.FIFFV_COORD_HEAD) + cardinals = {d['ident']: d['r'] for d in raw.info['dig']} + assert cardinals[1][0] < cardinals[2][0] < cardinals[3][0] # x coord + assert cardinals[1][1] < cardinals[2][1] # y coord + assert cardinals[3][1] < cardinals[2][1] # y coord + for key in cardinals.keys(): + assert_allclose(cardinals[key][2], 0, atol=1e-6) # z coord + + +@testing.requires_testing_data +@pytest.mark.parametrize('comp_grade', [0, 1]) +def test_saving_picked(tmp_path, comp_grade): + """Test saving picked CTF instances.""" + temp_dir = str(tmp_path) + out_fname = op.join(temp_dir, 'test_py_raw.fif') + raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_1_trial)) + assert raw.info['meas_date'] == _stamp_to_dt((1367228160, 0)) + raw.crop(0, 1).load_data() + assert raw.compensation_grade == get_current_comp(raw.info) == 0 + assert len(raw.info['comps']) == 5 + pick_kwargs = dict(meg=True, ref_meg=False, verbose=True) + + raw.apply_gradient_compensation(comp_grade) + with catch_logging() as log: + raw_pick = raw.copy().pick_types(**pick_kwargs) + assert len(raw.info['comps']) == 5 + assert len(raw_pick.info['comps']) == 0 + log = log.getvalue() + assert 'Removing 5 compensators' in log + raw_pick.save(out_fname, overwrite=True) # should work + raw2 = read_raw_fif(out_fname) + assert (raw_pick.ch_names == raw2.ch_names) + assert_array_equal(raw_pick.times, raw2.times) + assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, + atol=1e-20) # atol is very small but > 0 + + raw2 = read_raw_fif(out_fname, preload=True) + assert (raw_pick.ch_names == raw2.ch_names) + assert_array_equal(raw_pick.times, raw2.times) + assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, + atol=1e-20) # atol is very small but > 0 + + +@brainstorm.bst_raw.requires_bstraw_data +def test_read_ctf_annotations(): + """Test reading CTF marker file.""" + EXPECTED_LATENCIES = np.array([ + 5640, 7950, 9990, 12253, 14171, 16557, 18896, 20846, # noqa + 22702, 24990, 26830, 28974, 30906, 33077, 34985, 36907, # noqa + 38922, 40760, 42881, 45222, 47457, 49618, 51802, 54227, # noqa + 56171, 58274, 60394, 62375, 64444, 66767, 68827, 71109, # noqa + 73499, 75807, 78146, 80415, 82554, 84508, 86403, 88426, # noqa + 90746, 92893, 94779, 96822, 98996, 99001, 100949, 103325, # noqa + 105322, 107678, 109667, 111844, 113682, 115817, 117691, 119663, # noqa + 121966, 123831, 126110, 128490, 130521, 132808, 135204, 137210, # noqa + 139130, 141390, 143660, 145748, 147889, 150205, 152528, 154646, # noqa + 156897, 159191, 161446, 163722, 166077, 168467, 170624, 172519, # noqa + 174719, 176886, 179062, 181405, 183709, 186034, 188454, 190330, # noqa + 192660, 194682, 196834, 199161, 201035, 203008, 204999, 207409, # noqa + 209661, 211895, 213957, 216005, 218040, 220178, 222137, 224305, # noqa + 226297, 228654, 230755, 232909, 235205, 237373, 239723, 241762, # noqa + 243748, 245762, 247801, 250055, 251886, 254252, 256441, 258354, # noqa + 260680, 263026, 265048, 267073, 269235, 271556, 273927, 276197, # noqa + 278436, 280536, 282691, 284933, 287061, 288936, 290941, 293183, # noqa + 295369, 297729, 299626, 301546, 303449, 305548, 307882, 310124, # noqa + 312374, 314509, 316815, 318789, 320981, 322879, 324878, 326959, # noqa + 329341, 331200, 331201, 333469, 335584, 337984, 340143, 342034, # noqa + 344360, 346309, 348544, 350970, 353052, 355227, 357449, 359603, # noqa + 361725, 363676, 365735, 367799, 369777, 371904, 373856, 376204, # noqa + 378391, 380800, 382859, 385161, 387093, 389434, 391624, 393785, # noqa + 396093, 398214, 400198, 402166, 404104, 406047, 408372, 410686, # noqa + 413029, 414975, 416850, 418797, 420824, 422959, 425026, 427215, # noqa + 429278, 431668 # noqa + ]) - 1 # Fieldtrip has 1 sample difference with MNE + + raw = RawArray( + data=np.empty((1, 432000), dtype=np.float64), + info=create_info(ch_names=1, sfreq=1200.0)) + raw.set_meas_date(read_raw_ctf(somato_fname).info['meas_date']) + raw.set_annotations(read_annotations(somato_fname)) + + events, _ = events_from_annotations(raw) + latencies = np.sort(events[:, 0]) + assert_allclose(latencies, EXPECTED_LATENCIES, atol=1e-6) + + +@testing.requires_testing_data +def test_read_ctf_annotations_smoke_test(): + """Test reading CTF marker file. + + `testdata_ctf_mc.ds` has no trials or offsets therefore its a plain reading + of whatever is in the MarkerFile.mrk. + """ + EXPECTED_ONSET = [ + 0., 0.1425, 0.285, 0.42833333, 0.57083333, 0.71416667, 0.85666667, + 0.99916667, 1.1425, 1.285, 1.4275, 1.57083333, 1.71333333, 1.85666667, + 1.99916667, 2.14166667, 2.285, 2.4275, 2.57083333, 2.71333333, + 2.85583333, 2.99916667, 3.14166667, 3.28416667, 3.4275, 3.57, + 3.71333333, 3.85583333, 3.99833333, 4.14166667, 4.28416667, 4.42666667, + 4.57, 4.7125, 4.85583333, 4.99833333 + ] + fname = op.join(ctf_dir, 'testdata_ctf_mc.ds') + annot = read_annotations(fname) + assert_allclose(annot.onset, EXPECTED_ONSET) + + raw = read_raw_ctf(fname) + _assert_annotations_equal(raw.annotations, annot, 1e-6) + + +def _read_res4_mag_comp(dsdir): + res = mne.io.ctf.res4._read_res4(dsdir) + for ch in res['chs']: + if ch['sensor_type_index'] == CTF.CTFV_REF_MAG_CH: + ch['grad_order_no'] = 1 + return res + + +def _bad_res4_grad_comp(dsdir): + res = mne.io.ctf.res4._read_res4(dsdir) + for ch in res['chs']: + if ch['sensor_type_index'] == CTF.CTFV_MEG_CH: + ch['grad_order_no'] = 1 + break + return res + + +@testing.requires_testing_data +def test_read_ctf_mag_bad_comp(tmp_path, monkeypatch): + """Test CTF reader with mag comps and bad comps.""" + path = op.join(ctf_dir, ctf_fname_continuous) + raw_orig = read_raw_ctf(path) + assert raw_orig.compensation_grade == 0 + monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _read_res4_mag_comp) + raw_mag_comp = read_raw_ctf(path) + assert raw_mag_comp.compensation_grade == 0 + sphere = mne.make_sphere_model() + src = mne.setup_volume_source_space(pos=50., exclude=5., bem=sphere) + assert src[0]['nuse'] == 26 + for grade in (0, 1): + raw_orig.apply_gradient_compensation(grade) + raw_mag_comp.apply_gradient_compensation(grade) + args = (None, src, sphere, True, False) + fwd_orig = make_forward_solution(raw_orig.info, *args) + fwd_mag_comp = make_forward_solution(raw_mag_comp.info, *args) + assert_allclose(fwd_orig['sol']['data'], fwd_mag_comp['sol']['data']) + monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _bad_res4_grad_comp) + with pytest.raises(RuntimeError, match='inconsistent compensation grade'): + read_raw_ctf(path) diff --git a/python/libs/mne/io/ctf/trans.py b/python/libs/mne/io/ctf/trans.py new file mode 100644 index 0000000..0497518 --- /dev/null +++ b/python/libs/mne/io/ctf/trans.py @@ -0,0 +1,114 @@ +"""Create coordinate transforms.""" + +# Author: Eric Larson +# +# License: BSD-3-Clause + +import numpy as np + +from ...transforms import (combine_transforms, invert_transform, Transform, + _quat_to_affine, _fit_matched_points, apply_trans, + get_ras_to_neuromag_trans) +from ...utils import logger +from ..constants import FIFF +from .constants import CTF + + +def _make_transform_card(fro, to, r_lpa, r_nasion, r_rpa): + """Make a transform from cardinal landmarks.""" + return invert_transform(Transform( + to, fro, get_ras_to_neuromag_trans(r_nasion, r_lpa, r_rpa))) + + +def _quaternion_align(from_frame, to_frame, from_pts, to_pts, diff_tol=1e-4): + """Perform an alignment using the unit quaternions (modifies points).""" + assert from_pts.shape[1] == to_pts.shape[1] == 3 + trans = _quat_to_affine(_fit_matched_points(from_pts, to_pts)[0]) + + # Test the transformation and print the results + logger.info(' Quaternion matching (desired vs. transformed):') + for fro, to in zip(from_pts, to_pts): + rr = apply_trans(trans, fro) + diff = np.linalg.norm(to - rr) + logger.info(' %7.2f %7.2f %7.2f mm <-> %7.2f %7.2f %7.2f mm ' + '(orig : %7.2f %7.2f %7.2f mm) diff = %8.3f mm' + % (tuple(1000 * to) + tuple(1000 * rr) + + tuple(1000 * fro) + (1000 * diff,))) + if diff > diff_tol: + raise RuntimeError('Something is wrong: quaternion matching did ' + 'not work (see above)') + return Transform(from_frame, to_frame, trans) + + +def _make_ctf_coord_trans_set(res4, coils): + """Figure out the necessary coordinate transforms.""" + # CTF head > Neuromag head + lpa = rpa = nas = T1 = T2 = T3 = T5 = None + if coils is not None: + for p in coils: + if p['valid'] and (p['coord_frame'] == + FIFF.FIFFV_MNE_COORD_CTF_HEAD): + if lpa is None and p['kind'] == CTF.CTFV_COIL_LPA: + lpa = p + elif rpa is None and p['kind'] == CTF.CTFV_COIL_RPA: + rpa = p + elif nas is None and p['kind'] == CTF.CTFV_COIL_NAS: + nas = p + if lpa is None or rpa is None or nas is None: + raise RuntimeError('Some of the mandatory HPI device-coordinate ' + 'info was not there.') + t = _make_transform_card('head', 'ctf_head', + lpa['r'], nas['r'], rpa['r']) + T3 = invert_transform(t) + + # CTF device -> Neuromag device + # + # Rotate the CTF coordinate frame by 45 degrees and shift by 190 mm + # in z direction to get a coordinate system comparable to the Neuromag one + # + R = np.eye(4) + R[:3, 3] = [0., 0., 0.19] + val = 0.5 * np.sqrt(2.) + R[0, 0] = val + R[0, 1] = -val + R[1, 0] = val + R[1, 1] = val + T4 = Transform('ctf_meg', 'meg', R) + + # CTF device -> CTF head + # We need to make the implicit transform explicit! + h_pts = dict() + d_pts = dict() + kinds = (CTF.CTFV_COIL_LPA, CTF.CTFV_COIL_RPA, CTF.CTFV_COIL_NAS, + CTF.CTFV_COIL_SPARE) + if coils is not None: + for p in coils: + if p['valid']: + if p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + for kind in kinds: + if kind not in h_pts and p['kind'] == kind: + h_pts[kind] = p['r'] + elif p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE: + for kind in kinds: + if kind not in d_pts and p['kind'] == kind: + d_pts[kind] = p['r'] + if any(kind not in h_pts for kind in kinds[:-1]): + raise RuntimeError('Some of the mandatory HPI device-coordinate ' + 'info was not there.') + if any(kind not in d_pts for kind in kinds[:-1]): + raise RuntimeError('Some of the mandatory HPI head-coordinate ' + 'info was not there.') + use_kinds = [kind for kind in kinds + if (kind in h_pts and kind in d_pts)] + r_head = np.array([h_pts[kind] for kind in use_kinds]) + r_dev = np.array([d_pts[kind] for kind in use_kinds]) + T2 = _quaternion_align('ctf_meg', 'ctf_head', r_dev, r_head) + + # The final missing transform + if T3 is not None and T2 is not None: + T5 = combine_transforms(T2, T3, 'ctf_meg', 'head') + T1 = combine_transforms(invert_transform(T4), T5, 'meg', 'head') + s = dict(t_dev_head=T1, t_ctf_dev_ctf_head=T2, t_ctf_head_head=T3, + t_ctf_dev_dev=T4, t_ctf_dev_head=T5) + logger.info(' Coordinate transformations established.') + return s diff --git a/python/libs/mne/io/ctf_comp.py b/python/libs/mne/io/ctf_comp.py new file mode 100644 index 0000000..61fca91 --- /dev/null +++ b/python/libs/mne/io/ctf_comp.py @@ -0,0 +1,186 @@ +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Denis Engemann +# +# License: BSD-3-Clause + +from copy import deepcopy + +import numpy as np + +from .constants import FIFF +from .tag import read_tag +from .tree import dir_tree_find +from .write import start_block, end_block, write_int +from .matrix import write_named_matrix, _read_named_matrix + +from ..utils import logger, verbose, _pl + + +def _add_kind(one): + """Convert CTF kind to MNE kind.""" + if one['ctfkind'] == int('47314252', 16): + one['kind'] = 1 + elif one['ctfkind'] == int('47324252', 16): + one['kind'] = 2 + elif one['ctfkind'] == int('47334252', 16): + one['kind'] = 3 + else: + one['kind'] = int(one['ctfkind']) + + +def _calibrate_comp(comp, chs, row_names, col_names, + mult_keys=('range', 'cal'), flip=False): + """Get row and column cals.""" + ch_names = [c['ch_name'] for c in chs] + row_cals = np.zeros(len(row_names)) + col_cals = np.zeros(len(col_names)) + for names, cals, inv in zip((row_names, col_names), (row_cals, col_cals), + (False, True)): + for ii in range(len(cals)): + p = ch_names.count(names[ii]) + if p != 1: + raise RuntimeError('Channel %s does not appear exactly once ' + 'in data, found %d instance%s' + % (names[ii], p, _pl(p))) + idx = ch_names.index(names[ii]) + val = chs[idx][mult_keys[0]] * chs[idx][mult_keys[1]] + val = float(1. / val) if inv else float(val) + val = 1. / val if flip else val + cals[ii] = val + comp['rowcals'] = row_cals + comp['colcals'] = col_cals + comp['data']['data'] = (row_cals[:, None] * + comp['data']['data'] * col_cals[None, :]) + + +@verbose +def read_ctf_comp(fid, node, chs, verbose=None): + """Read the CTF software compensation data from the given node. + + Parameters + ---------- + fid : file + The file descriptor. + node : dict + The node in the FIF tree. + chs : list + The list of channels from info['chs'] to match with + compensators that are read. + %(verbose)s + + Returns + ------- + compdata : list + The compensation data + """ + return _read_ctf_comp(fid, node, chs, None) + + +def _read_ctf_comp(fid, node, chs, ch_names_mapping): + """Read the CTF software compensation data from the given node. + + Parameters + ---------- + fid : file + The file descriptor. + node : dict + The node in the FIF tree. + chs : list + The list of channels from info['chs'] to match with + compensators that are read. + ch_names_mapping : dict | None + The channel renaming to use. + %(verbose)s + + Returns + ------- + compdata : list + The compensation data + """ + from .meas_info import _rename_comps + ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping + compdata = [] + comps = dir_tree_find(node, FIFF.FIFFB_MNE_CTF_COMP_DATA) + + for node in comps: + # Read the data we need + mat = _read_named_matrix(fid, node, FIFF.FIFF_MNE_CTF_COMP_DATA) + for p in range(node['nent']): + kind = node['directory'][p].kind + pos = node['directory'][p].pos + if kind == FIFF.FIFF_MNE_CTF_COMP_KIND: + tag = read_tag(fid, pos) + break + else: + raise Exception('Compensation type not found') + + # Get the compensation kind and map it to a simple number + one = dict(ctfkind=tag.data) + del tag + _add_kind(one) + for p in range(node['nent']): + kind = node['directory'][p].kind + pos = node['directory'][p].pos + if kind == FIFF.FIFF_MNE_CTF_COMP_CALIBRATED: + tag = read_tag(fid, pos) + calibrated = tag.data + break + else: + calibrated = False + + one['save_calibrated'] = bool(calibrated) + one['data'] = mat + _rename_comps([one], ch_names_mapping) + if not calibrated: + # Calibrate... + _calibrate_comp(one, chs, mat['row_names'], mat['col_names']) + else: + one['rowcals'] = np.ones(mat['data'].shape[0], dtype=np.float64) + one['colcals'] = np.ones(mat['data'].shape[1], dtype=np.float64) + + compdata.append(one) + + if len(compdata) > 0: + logger.info(' Read %d compensation matrices' % len(compdata)) + + return compdata + + +############################################################################### +# Writing + +def write_ctf_comp(fid, comps): + """Write the CTF compensation data into a fif file. + + Parameters + ---------- + fid : file + The open FIF file descriptor + + comps : list + The compensation data to write + """ + if len(comps) <= 0: + return + + # This is very simple in fact + start_block(fid, FIFF.FIFFB_MNE_CTF_COMP) + for comp in comps: + start_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA) + # Write the compensation kind + write_int(fid, FIFF.FIFF_MNE_CTF_COMP_KIND, comp['ctfkind']) + if comp.get('save_calibrated', False): + write_int(fid, FIFF.FIFF_MNE_CTF_COMP_CALIBRATED, + comp['save_calibrated']) + + if not comp.get('save_calibrated', True): + # Undo calibration + comp = deepcopy(comp) + data = ((1. / comp['rowcals'][:, None]) * comp['data']['data'] * + (1. / comp['colcals'][None, :])) + comp['data']['data'] = data + write_named_matrix(fid, FIFF.FIFF_MNE_CTF_COMP_DATA, comp['data']) + end_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA) + + end_block(fid, FIFF.FIFFB_MNE_CTF_COMP) diff --git a/python/libs/mne/io/curry/__init__.py b/python/libs/mne/io/curry/__init__.py new file mode 100644 index 0000000..ae51436 --- /dev/null +++ b/python/libs/mne/io/curry/__init__.py @@ -0,0 +1,7 @@ +"""Reader for CURRY data.""" + +# Author: Dirk Gütlin +# +# License: BSD-3-Clause + +from .curry import read_raw_curry diff --git a/python/libs/mne/io/curry/curry.py b/python/libs/mne/io/curry/curry.py new file mode 100644 index 0000000..f3cb506 --- /dev/null +++ b/python/libs/mne/io/curry/curry.py @@ -0,0 +1,541 @@ +# -*- coding: UTF-8 -*- +# +# Authors: Dirk Gütlin +# +# +# License: BSD-3-Clause + +import os.path as op +from collections import namedtuple +import re +import numpy as np +from datetime import datetime, timezone + +from ..base import BaseRaw +from ..meas_info import create_info +from ..tag import _coil_trans_to_loc +from ..utils import _read_segments_file, _mult_cal_one +from ..constants import FIFF +from ..ctf.trans import _quaternion_align +from ...surface import _normal_orth +from ...transforms import (apply_trans, Transform, get_ras_to_neuromag_trans, + combine_transforms, invert_transform, + _angle_between_quats, rot_to_quat) +from ...utils import (check_fname, check_version, logger, verbose, warn, + _check_fname) +from ...annotations import Annotations + +FILE_EXTENSIONS = { + "Curry 7": { + "info": ".dap", + "data": ".dat", + "labels": ".rs3", + "events_cef": ".cef", + "events_ceo": ".ceo", + "hpi": ".hpi", + }, + "Curry 8": { + "info": ".cdt.dpa", + "data": ".cdt", + "labels": ".cdt.dpa", + "events_cef": ".cdt.cef", + "events_ceo": ".cdt.ceo", + "hpi": ".cdt.hpi", + } +} +CHANTYPES = {"meg": "_MAG1", "eeg": "", "misc": "_OTHERS"} +FIFFV_CHANTYPES = {"meg": FIFF.FIFFV_MEG_CH, "eeg": FIFF.FIFFV_EEG_CH, + "misc": FIFF.FIFFV_MISC_CH} +FIFFV_COILTYPES = {"meg": FIFF.FIFFV_COIL_CTF_GRAD, "eeg": FIFF.FIFFV_COIL_EEG, + "misc": FIFF.FIFFV_COIL_NONE} +SI_UNITS = dict(V=FIFF.FIFF_UNIT_V, T=FIFF.FIFF_UNIT_T) +SI_UNIT_SCALE = dict(c=1e-2, m=1e-3, u=1e-6, µ=1e-6, n=1e-9, p=1e-12, f=1e-15) + +CurryParameters = namedtuple('CurryParameters', + 'n_samples, sfreq, is_ascii, unit_dict, ' + 'n_chans, dt_start, chanidx_in_file') + + +def _get_curry_version(file_extension): + """Check out the curry file version.""" + return "Curry 8" if "cdt" in file_extension else "Curry 7" + + +def _get_curry_file_structure(fname, required=()): + """Store paths to a dict and check for required files.""" + _msg = "The following required files cannot be found: {0}.\nPlease make " \ + "sure all required files are located in the same directory as {1}." + fname = _check_fname(fname, 'read', True, 'fname') + + # we don't use os.path.splitext to also handle extensions like .cdt.dpa + fname_base, ext = fname.split(".", maxsplit=1) + version = _get_curry_version(ext) + my_curry = dict() + for key in ('info', 'data', 'labels', 'events_cef', 'events_ceo', 'hpi'): + fname = fname_base + FILE_EXTENSIONS[version][key] + if op.isfile(fname): + _key = 'events' if key.startswith('events') else key + my_curry[_key] = fname + + missing = [field for field in required if field not in my_curry] + if missing: + raise FileNotFoundError(_msg.format(np.unique(missing), fname)) + + return my_curry + + +def _read_curry_lines(fname, regex_list): + """Read through the lines of a curry parameter files and save data. + + Parameters + ---------- + fname : str + Path to a curry file. + regex_list : list of str + A list of strings or regular expressions to search within the file. + Each element `regex` in `regex_list` must be formulated so that + `regex + " START_LIST"` initiates the start and `regex + " END_LIST"` + initiates the end of the elements that should be saved. + + Returns + ------- + data_dict : dict + A dictionary containing the extracted data. For each element `regex` + in `regex_list` a dictionary key `data_dict[regex]` is created, which + contains a list of the according data. + + """ + save_lines = {} + data_dict = {} + + for regex in regex_list: + save_lines[regex] = False + data_dict[regex] = [] + + with open(fname) as fid: + for line in fid: + for regex in regex_list: + if re.match(regex + " END_LIST", line): + save_lines[regex] = False + + if save_lines[regex] and line != "\n": + result = line.replace("\n", "") + if "\t" in result: + result = result.split("\t") + data_dict[regex].append(result) + + if re.match(regex + " START_LIST", line): + save_lines[regex] = True + + return data_dict + + +def _read_curry_parameters(fname): + """Extract Curry params from a Curry info file.""" + _msg_match = "The sampling frequency and the time steps extracted from " \ + "the parameter file do not match." + _msg_invalid = "sfreq must be greater than 0. Got sfreq = {0}" + + var_names = ['NumSamples', 'SampleFreqHz', + 'DataFormat', 'SampleTimeUsec', + 'NumChannels', + 'StartYear', 'StartMonth', 'StartDay', 'StartHour', + 'StartMin', 'StartSec', 'StartMillisec', + 'NUM_SAMPLES', 'SAMPLE_FREQ_HZ', + 'DATA_FORMAT', 'SAMPLE_TIME_USEC', + 'NUM_CHANNELS', + 'START_YEAR', 'START_MONTH', 'START_DAY', 'START_HOUR', + 'START_MIN', 'START_SEC', 'START_MILLISEC'] + + param_dict = dict() + unit_dict = dict() + + with open(fname) as fid: + for line in iter(fid): + if any(var_name in line for var_name in var_names): + key, val = line.replace(" ", "").replace("\n", "").split("=") + param_dict[key.lower().replace("_", "")] = val + for type in CHANTYPES: + if "DEVICE_PARAMETERS" + CHANTYPES[type] + " START" in line: + data_unit = next(fid) + unit_dict[type] = data_unit.replace(" ", "") \ + .replace("\n", "").split("=")[-1] + + # look for CHAN_IN_FILE sections, which may or may not exist; issue #8391 + types = ["meg", "eeg", "misc"] + chanidx_in_file = _read_curry_lines(fname, + ["CHAN_IN_FILE" + + CHANTYPES[key] for key in types]) + + n_samples = int(param_dict["numsamples"]) + sfreq = float(param_dict["samplefreqhz"]) + time_step = float(param_dict["sampletimeusec"]) * 1e-6 + is_ascii = param_dict["dataformat"] == "ASCII" + n_channels = int(param_dict["numchannels"]) + try: + dt_start = datetime(int(param_dict["startyear"]), + int(param_dict["startmonth"]), + int(param_dict["startday"]), + int(param_dict["starthour"]), + int(param_dict["startmin"]), + int(param_dict["startsec"]), + int(param_dict["startmillisec"]) * 1000, + timezone.utc) + # Note that the time zone information is not stored in the Curry info + # file, and it seems the start time info is in the local timezone + # of the acquisition system (which is unknown); therefore, just set + # the timezone to be UTC. If the user knows otherwise, they can + # change it later. (Some Curry files might include StartOffsetUTCMin, + # but its presence is unpredictable, so we won't rely on it.) + except (ValueError, KeyError): + dt_start = None # if missing keywords or illegal values, don't set + + if time_step == 0: + true_sfreq = sfreq + elif sfreq == 0: + true_sfreq = 1 / time_step + elif not np.isclose(sfreq, 1 / time_step): + raise ValueError(_msg_match) + else: # they're equal and != 0 + true_sfreq = sfreq + if true_sfreq <= 0: + raise ValueError(_msg_invalid.format(true_sfreq)) + + return CurryParameters(n_samples, true_sfreq, is_ascii, unit_dict, + n_channels, dt_start, chanidx_in_file) + + +def _read_curry_info(curry_paths): + """Extract info from curry parameter files.""" + curry_params = _read_curry_parameters(curry_paths['info']) + R = np.eye(4) + R[[0, 1], [0, 1]] = -1 # rotate 180 deg + # shift down and back + # (chosen by eyeballing to make the CTF helmet look roughly correct) + R[:3, 3] = [0., -0.015, -0.12] + curry_dev_dev_t = Transform('ctf_meg', 'meg', R) + + # read labels from label files + label_fname = curry_paths['labels'] + types = ["meg", "eeg", "misc"] + labels = _read_curry_lines(label_fname, + ["LABELS" + CHANTYPES[key] for key in types]) + sensors = _read_curry_lines(label_fname, + ["SENSORS" + CHANTYPES[key] for key in types]) + normals = _read_curry_lines(label_fname, + ['NORMALS' + CHANTYPES[key] for key in types]) + assert len(labels) == len(sensors) == len(normals) + + all_chans = list() + for key in ["meg", "eeg", "misc"]: + chanidx_is_explicit = (len(curry_params.chanidx_in_file["CHAN_IN_FILE" + + CHANTYPES[key]]) > 0) # channel index + # position in the datafile may or may not be explicitly declared, + # based on the CHAN_IN_FILE section in info file + for ind, chan in enumerate(labels["LABELS" + CHANTYPES[key]]): + chanidx = len(all_chans) + 1 # by default, just assume the + # channel index in the datafile is in order of the channel + # names as we found them in the labels file + if chanidx_is_explicit: # but, if explicitly declared, use + # that index number + chanidx = int(curry_params.chanidx_in_file["CHAN_IN_FILE" + + CHANTYPES[key]][ind]) + if chanidx <= 0: # if chanidx was explicitly declared to be ' 0', + # it means the channel is not actually saved in the data file + # (e.g. the "Ref" channel), so don't add it to our list. + # Git issue #8391 + continue + ch = {"ch_name": chan, + "unit": curry_params.unit_dict[key], + "kind": FIFFV_CHANTYPES[key], + "coil_type": FIFFV_COILTYPES[key], + "ch_idx": chanidx + } + if key == "eeg": + loc = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float) + # XXX just the sensor, where is ref (next 3)? + assert loc.shape == (3,) + loc /= 1000. # to meters + loc = np.concatenate([loc, np.zeros(9)]) + ch['loc'] = loc + # XXX need to check/ensure this + ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD + elif key == 'meg': + pos = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float) + pos /= 1000. # to meters + pos = pos[:3] # just the inner coil + pos = apply_trans(curry_dev_dev_t, pos) + nn = np.array(normals["NORMALS" + CHANTYPES[key]][ind], float) + assert np.isclose(np.linalg.norm(nn), 1., atol=1e-4) + nn /= np.linalg.norm(nn) + nn = apply_trans(curry_dev_dev_t, nn, move=False) + trans = np.eye(4) + trans[:3, 3] = pos + trans[:3, :3] = _normal_orth(nn).T + ch['loc'] = _coil_trans_to_loc(trans) + ch['coord_frame'] = FIFF.FIFFV_COORD_DEVICE + all_chans.append(ch) + + ch_count = len(all_chans) + assert (ch_count == curry_params.n_chans) # ensure that we have assembled + # the same number of channels as declared in the info (.DAP) file in the + # DATA_PARAMETERS section. Git issue #8391 + + # sort the channels to assure they are in the order that matches how + # recorded in the datafile. In general they most likely are already in + # the correct order, but if the channel index in the data file was + # explicitly declared we might as well use it. + all_chans = sorted(all_chans, key=lambda ch: ch['ch_idx']) + + ch_names = [chan["ch_name"] for chan in all_chans] + info = create_info(ch_names, curry_params.sfreq) + with info._unlock(): + info['meas_date'] = curry_params.dt_start # for Git issue #8398 + _make_trans_dig(curry_paths, info, curry_dev_dev_t) + + for ind, ch_dict in enumerate(info["chs"]): + all_chans[ind].pop('ch_idx') + ch_dict.update(all_chans[ind]) + assert ch_dict['loc'].shape == (12,) + ch_dict['unit'] = SI_UNITS[all_chans[ind]['unit'][1]] + ch_dict['cal'] = SI_UNIT_SCALE[all_chans[ind]['unit'][0]] + + return info, curry_params.n_samples, curry_params.is_ascii + + +_card_dict = {'Left ear': FIFF.FIFFV_POINT_LPA, + 'Nasion': FIFF.FIFFV_POINT_NASION, + 'Right ear': FIFF.FIFFV_POINT_RPA} + + +def _make_trans_dig(curry_paths, info, curry_dev_dev_t): + # Coordinate frame transformations and definitions + no_msg = 'Leaving device<->head transform as None' + info['dev_head_t'] = None + label_fname = curry_paths['labels'] + key = 'LANDMARKS' + CHANTYPES['meg'] + lm = _read_curry_lines(label_fname, [key])[key] + lm = np.array(lm, float) + lm.shape = (-1, 3) + if len(lm) == 0: + # no dig + logger.info(no_msg + ' (no landmarks found)') + return + lm /= 1000. + key = 'LM_REMARKS' + CHANTYPES['meg'] + remarks = _read_curry_lines(label_fname, [key])[key] + assert len(remarks) == len(lm) + with info._unlock(): + info['dig'] = list() + cards = dict() + for remark, r in zip(remarks, lm): + kind = ident = None + if remark in _card_dict: + kind = FIFF.FIFFV_POINT_CARDINAL + ident = _card_dict[remark] + cards[ident] = r + elif remark.startswith('HPI'): + kind = FIFF.FIFFV_POINT_HPI + ident = int(remark[3:]) - 1 + if kind is not None: + info['dig'].append(dict( + kind=kind, ident=ident, r=r, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN)) + with info._unlock(): + info['dig'].sort(key=lambda x: (x['kind'], x['ident'])) + has_cards = len(cards) == 3 + has_hpi = 'hpi' in curry_paths + if has_cards and has_hpi: # have all three + logger.info('Composing device<->head transformation from dig points') + hpi_u = np.array([d['r'] for d in info['dig'] + if d['kind'] == FIFF.FIFFV_POINT_HPI], float) + hpi_c = np.ascontiguousarray( + _first_hpi(curry_paths['hpi'])[:len(hpi_u), 1:4]) + unknown_curry_t = _quaternion_align( + 'unknown', 'ctf_meg', hpi_u, hpi_c, 1e-2) + angle = np.rad2deg(_angle_between_quats( + np.zeros(3), rot_to_quat(unknown_curry_t['trans'][:3, :3]))) + dist = 1000 * np.linalg.norm(unknown_curry_t['trans'][:3, 3]) + logger.info(' Fit a %0.1f° rotation, %0.1f mm translation' + % (angle, dist)) + unknown_dev_t = combine_transforms( + unknown_curry_t, curry_dev_dev_t, 'unknown', 'meg') + unknown_head_t = Transform( + 'unknown', 'head', + get_ras_to_neuromag_trans( + *(cards[key] for key in (FIFF.FIFFV_POINT_NASION, + FIFF.FIFFV_POINT_LPA, + FIFF.FIFFV_POINT_RPA)))) + with info._unlock(): + info['dev_head_t'] = combine_transforms( + invert_transform(unknown_dev_t), unknown_head_t, 'meg', 'head') + for d in info['dig']: + d.update(coord_frame=FIFF.FIFFV_COORD_HEAD, + r=apply_trans(unknown_head_t, d['r'])) + else: + if has_cards: + no_msg += ' (no .hpi file found)' + elif has_hpi: + no_msg += ' (not all cardinal points found)' + else: + no_msg += ' (neither cardinal points nor .hpi file found)' + logger.info(no_msg) + + +def _first_hpi(fname): + # Get the first HPI result + with open(fname, 'r') as fid: + for line in fid: + line = line.strip() + if any(x in line for x in ('FileVersion', 'NumCoils')) or not line: + continue + hpi = np.array(line.split(), float) + break + else: + raise RuntimeError('Could not find valid HPI in %s' % (fname,)) + # t is the first enttry + assert hpi.ndim == 1 + hpi = hpi[1:] + hpi.shape = (-1, 5) + hpi /= 1000. + return hpi + + +def _read_events_curry(fname): + """Read events from Curry event files. + + Parameters + ---------- + fname : str + Path to a curry event file with extensions .cef, .ceo, + .cdt.cef, or .cdt.ceo + + Returns + ------- + events : ndarray, shape (n_events, 3) + The array of events. + """ + check_fname(fname, 'curry event', ('.cef', '.ceo', '.cdt.cef', '.cdt.ceo'), + endings_err=('.cef', '.ceo', '.cdt.cef', '.cdt.ceo')) + + events_dict = _read_curry_lines(fname, ["NUMBER_LIST"]) + # The first 3 column seem to contain the event information + curry_events = np.array(events_dict["NUMBER_LIST"], dtype=int)[:, 0:3] + + return curry_events + + +def _read_annotations_curry(fname, sfreq='auto'): + r"""Read events from Curry event files. + + Parameters + ---------- + fname : str + The filename. + sfreq : float | 'auto' + The sampling frequency in the file. If set to 'auto' then the + ``sfreq`` is taken from the respective info file of the same name with + according file extension (\*.dap for Curry 7; \*.cdt.dpa for Curry8). + So data.cef looks in data.dap and data.cdt.cef looks in data.cdt.dpa. + + Returns + ------- + annot : instance of Annotations | None + The annotations. + """ + required = ["events", "info"] if sfreq == 'auto' else ["events"] + curry_paths = _get_curry_file_structure(fname, required) + events = _read_events_curry(curry_paths['events']) + + if sfreq == 'auto': + sfreq = _read_curry_parameters(curry_paths['info']).sfreq + + onset = events[:, 0] / sfreq + duration = np.zeros(events.shape[0]) + description = events[:, 2] + + return Annotations(onset, duration, description) + + +@verbose +def read_raw_curry(fname, preload=False, verbose=None): + """Read raw data from Curry files. + + Parameters + ---------- + fname : str + Path to a curry file with extensions .dat, .dap, .rs3, .cdt, cdt.dpa, + .cdt.cef or .cef. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawCurry + A Raw object containing Curry data. + """ + return RawCurry(fname, preload, verbose) + + +class RawCurry(BaseRaw): + """Raw object from Curry file. + + Parameters + ---------- + fname : str + Path to a curry file with extensions .dat, .dap, .rs3, .cdt, cdt.dpa, + .cdt.cef or .cef. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + + curry_paths = _get_curry_file_structure( + fname, required=["info", "data", "labels"]) + + data_fname = op.abspath(curry_paths['data']) + + info, n_samples, is_ascii = _read_curry_info(curry_paths) + + last_samps = [n_samples - 1] + raw_extras = dict(is_ascii=is_ascii) + + super(RawCurry, self).__init__( + info, preload, filenames=[data_fname], last_samps=last_samps, + orig_format='int', raw_extras=[raw_extras], verbose=verbose) + + if 'events' in curry_paths: + logger.info('Event file found. Extracting Annotations from' + ' %s...' % curry_paths['events']) + annots = _read_annotations_curry(curry_paths['events'], + sfreq=self.info["sfreq"]) + self.set_annotations(annots) + else: + logger.info('Event file not found. No Annotations set.') + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + if self._raw_extras[fi]['is_ascii']: + if isinstance(idx, slice): + idx = np.arange(idx.start, idx.stop) + kwargs = dict(skiprows=start, usecols=idx) + if check_version("numpy", "1.16.0"): + kwargs['max_rows'] = stop - start + else: + warn("Data reading might take longer for ASCII files. Update " + "numpy to version 1.16.0 or greater for more efficient " + "data reading.") + block = np.loadtxt(self._filenames[0], **kwargs)[:stop - start].T + data_view = data[:, :block.shape[1]] + _mult_cal_one(data_view, block, idx, cals, mult) + + else: + _read_segments_file( + self, data, idx, fi, start, stop, cals, mult, dtype=" +# +# +# License: BSD-3-Clause + +from datetime import datetime, timezone +import os.path as op +from shutil import copyfile + +import pytest +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal + +from mne.annotations import events_from_annotations +from mne.bem import _fit_sphere +from mne.datasets import testing +from mne.event import find_events +from mne.io import _loc_to_coil_trans +from mne.io.constants import FIFF +from mne.io.edf import read_raw_bdf +from mne.io.bti import read_raw_bti +from mne.io.curry import read_raw_curry +from mne.utils import check_version, catch_logging, _record_warnings +from mne.annotations import read_annotations +from mne.io.curry.curry import (_get_curry_version, _get_curry_file_structure, + _read_events_curry, FILE_EXTENSIONS) + + +data_dir = testing.data_path(download=False) +curry_dir = op.join(data_dir, "curry") + +bdf_file = op.join(data_dir, 'BDF', 'test_bdf_stim_channel.bdf') + +bti_rfDC_file = op.join(data_dir, 'BTi', 'erm_HFH', 'c,rfDC') + +curry7_rfDC_file = op.join(curry_dir, "c,rfDC Curry 7.dat") +curry8_rfDC_file = op.join(curry_dir, "c,rfDC Curry 8.cdt") + +curry7_bdf_file = op.join(curry_dir, "test_bdf_stim_channel Curry 7.dat") +curry7_bdf_ascii_file = op.join(curry_dir, + "test_bdf_stim_channel Curry 7 ASCII.dat") + +curry8_bdf_file = op.join(curry_dir, "test_bdf_stim_channel Curry 8.cdt") +curry8_bdf_ascii_file = op.join(curry_dir, + "test_bdf_stim_channel Curry 8 ASCII.cdt") + +missing_event_file = op.join(curry_dir, "test_sfreq_0.dat") + +Ref_chan_omitted_file = op.join(curry_dir, 'Ref_channel_omitted Curry7.dat') +Ref_chan_omitted_reordered_file = op.join(curry_dir, 'Ref_channel_omitted ' + 'reordered Curry7.dat') + + +@pytest.fixture(scope='session') +def bdf_curry_ref(): + """Return a view of the reference bdf used to create the curry files.""" + raw = read_raw_bdf(bdf_file, preload=True).drop_channels(['Status']) + return raw + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname,tol', [ + pytest.param(curry7_bdf_file, 1e-7, id='curry 7'), + pytest.param(curry8_bdf_file, 1e-7, id='curry 8'), + pytest.param(curry7_bdf_ascii_file, 1e-4, id='curry 7 ascii'), + pytest.param(curry8_bdf_ascii_file, 1e-4, id='curry 8 ascii'), +]) +@pytest.mark.parametrize('preload', [True, False]) +def test_read_raw_curry(fname, tol, preload, bdf_curry_ref): + """Test reading CURRY files.""" + with _record_warnings() as wrn: + raw = read_raw_curry(fname, preload=preload) + + if not check_version('numpy', '1.16') and preload and fname.endswith( + 'ASCII.dat'): + assert len(wrn) > 0 + else: + assert len(wrn) == 0 + + assert hasattr(raw, '_data') == preload + assert raw.n_times == bdf_curry_ref.n_times + assert raw.info['sfreq'] == bdf_curry_ref.info['sfreq'] + + for field in ['kind', 'ch_name']: + assert_array_equal([ch[field] for ch in raw.info['chs']], + [ch[field] for ch in bdf_curry_ref.info['chs']]) + + assert_allclose(raw.get_data(verbose='error'), + bdf_curry_ref.get_data(), atol=tol) + + picks, start, stop = ["C3", "C4"], 200, 800 + assert_allclose( + raw.get_data(picks=picks, start=start, stop=stop, verbose='error'), + bdf_curry_ref.get_data(picks=picks, start=start, stop=stop), + rtol=tol) + assert raw.info['dev_head_t'] is None + + +# These values taken from a different recording but allow us to test +# using our existing filres + +HPI_CONTENT = """\ +FileVersion: 804 +NumCoils: 10 + +0 1 -50.67 50.98 133.15 0.006406 1 46.45 51.51 143.15 0.006789 1 39.38 -26.67 155.51 0.008034 1 -36.72 -39.95 142.83 0.007700 1 1.61 16.95 172.76 0.001788 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 +""" # noqa: E501 + + +LM_CONTENT = """ + +LANDMARKS_MAG1 START + ListDescription = functional landmark positions + ListUnits = mm + ListNrColumns = 3 + ListNrRows = 8 + ListNrTimepts = 1 + ListNrBlocks = 1 + ListBinary = 0 + ListType = 1 + ListTrafoType = 1 + ListGridType = 2 + ListFirstColumn = 1 + ListIndexMin = -1 + ListIndexMax = -1 + ListIndexAbsMax = -1 +LANDMARKS_MAG1 END + +LANDMARKS_MAG1 START_LIST # Do not edit! + 75.4535 5.32907e-15 2.91434e-16 + 1.42109e-14 -75.3212 9.71445e-16 + -74.4568 -1.42109e-14 2.51188e-15 + -59.7558 35.5804 66.822 + 43.15 43.4107 78.0027 + 38.8415 -41.1884 81.9941 + -36.683 -59.5119 66.4338 + -1.07259 -1.88025 103.747 +LANDMARKS_MAG1 END_LIST + +LM_INDICES_MAG1 START + ListDescription = functional landmark PAN info + ListUnits = + ListNrColumns = 1 + ListNrRows = 3 + ListNrTimepts = 1 + ListNrBlocks = 1 + ListBinary = 0 + ListType = 0 + ListTrafoType = 0 + ListGridType = 2 + ListFirstColumn = 1 + ListIndexMin = -1 + ListIndexMax = -1 + ListIndexAbsMax = -1 +LM_INDICES_MAG1 END + +LM_INDICES_MAG1 START_LIST # Do not edit! + 2 + 1 + 3 +LM_INDICES_MAG1 END_LIST + +LM_REMARKS_MAG1 START + ListDescription = functional landmark labels + ListUnits = + ListNrColumns = 40 + ListNrRows = 8 + ListNrTimepts = 1 + ListNrBlocks = 1 + ListBinary = 0 + ListType = 5 + ListTrafoType = 0 + ListGridType = 2 + ListFirstColumn = 1 + ListIndexMin = -1 + ListIndexMax = -1 + ListIndexAbsMax = -1 +LM_REMARKS_MAG1 END + +LM_REMARKS_MAG1 START_LIST # Do not edit! +Left ear +Nasion +Right ear +HPI1 +HPI2 +HPI3 +HPI4 +HPI5 +LM_REMARKS_MAG1 END_LIST + +""" + +WANT_TRANS = np.array( + [[0.99729224, -0.07353067, -0.00119791, 0.00126953], + [0.07319243, 0.99085848, 0.11332405, 0.02670814], + [-0.00714583, -0.11310488, 0.99355736, 0.04721836], + [0., 0., 0., 1.]]) + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname,tol', [ + pytest.param(curry7_rfDC_file, 1e-6, id='curry 7'), + pytest.param(curry8_rfDC_file, 1e-3, id='curry 8'), +]) +@pytest.mark.parametrize('mock_dev_head_t', [True, False]) +def test_read_raw_curry_rfDC(fname, tol, mock_dev_head_t, tmp_path): + """Test reading CURRY files.""" + if mock_dev_head_t: + if 'Curry 7' in fname: # not supported yet + return + # copy files to tmp_path + base = op.splitext(fname)[0] + for ext in ('.cdt', '.cdt.dpa'): + src = base + ext + dst = op.join(tmp_path, op.basename(base) + ext) + copyfile(src, dst) + if ext == '.cdt.dpa': + with open(dst, 'a') as fid: + fid.write(LM_CONTENT) + fname = op.join(tmp_path, op.basename(fname)) + with open(fname + '.hpi', 'w') as fid: + fid.write(HPI_CONTENT) + + # check data + bti_rfDC = read_raw_bti(pdf_fname=bti_rfDC_file, head_shape_fname=None) + with catch_logging() as log: + raw = read_raw_curry(fname, verbose=True) + log = log.getvalue() + if mock_dev_head_t: + assert 'Composing device' in log + else: + assert 'Leaving device' in log + assert 'no landmark' in log + + # test on the eeg chans, since these were not renamed by curry + eeg_names = [ch["ch_name"] for ch in raw.info["chs"] + if ch["kind"] == FIFF.FIFFV_EEG_CH] + + assert_allclose(raw.get_data(eeg_names), + bti_rfDC.get_data(eeg_names), rtol=tol) + assert bti_rfDC.info['dev_head_t'] is not None # XXX probably a BTI bug + if mock_dev_head_t: + assert raw.info['dev_head_t'] is not None + assert_allclose(raw.info['dev_head_t']['trans'], WANT_TRANS, atol=1e-5) + else: + assert raw.info['dev_head_t'] is None + + # check that most MEG sensors are approximately oriented outward from + # the device origin + n_meg = n_eeg = n_other = 0 + pos = list() + nn = list() + for ch in raw.info['chs']: + if ch['kind'] == FIFF.FIFFV_MEG_CH: + assert ch['coil_type'] == FIFF.FIFFV_COIL_CTF_GRAD + t = _loc_to_coil_trans(ch['loc']) + pos.append(t[:3, 3]) + nn.append(t[:3, 2]) + assert_allclose(np.linalg.norm(nn[-1]), 1.) + n_meg += 1 + elif ch['kind'] == FIFF.FIFFV_EEG_CH: + assert ch['coil_type'] == FIFF.FIFFV_COIL_EEG + n_eeg += 1 + else: + assert ch['coil_type'] == FIFF.FIFFV_COIL_NONE + n_other += 1 + assert n_meg == 148 + assert n_eeg == 31 + assert n_other == 15 + pos = np.array(pos) + nn = np.array(nn) + rad, origin = _fit_sphere(pos, disp=False) + assert 0.11 < rad < 0.13 + pos -= origin + pos /= np.linalg.norm(pos, axis=1, keepdims=True) + angles = np.abs(np.rad2deg(np.arccos((pos * nn).sum(-1)))) + assert (angles < 20).sum() > 100 + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname', [ + pytest.param(curry7_bdf_file, id='curry 7'), + pytest.param(curry8_bdf_file, id='curry 8'), +]) +def test_read_events_curry_are_same_as_bdf(fname): + """Test events from curry annotations recovers the right events.""" + EVENT_ID = {str(ii): ii for ii in range(5)} + REF_EVENTS = find_events(read_raw_bdf(bdf_file, preload=True)) + + raw = read_raw_curry(fname) + events, _ = events_from_annotations(raw, event_id=EVENT_ID) + assert_allclose(events, REF_EVENTS) + assert raw.info['dev_head_t'] is None + + +@testing.requires_testing_data +def test_check_missing_files(): + """Test checking for missing curry files (smoke test).""" + invalid_fname = "/invalid/path/name.xy" + + with pytest.raises(IOError, match="file type .*? must end with"): + _read_events_curry(invalid_fname) + + with pytest.raises(FileNotFoundError, match='does not exist'): + _get_curry_file_structure(invalid_fname) + + with pytest.raises(FileNotFoundError, match="files cannot be found"): + _get_curry_file_structure(missing_event_file, + required=["info", "events"]) + + +def _mock_info_file(src, dst, sfreq, time_step): + with open(src, 'r') as in_file, open(dst, 'w') as out_file: + for line in in_file: + if 'SampleFreqHz' in line: + out_file.write(line.replace('500', str(sfreq))) + elif 'SampleTimeUsec' in line: + out_file.write(line.replace('2000', str(time_step))) + else: + out_file.write(line) + + +@pytest.fixture(params=[ + pytest.param(dict(sfreq=500, time_step=0), id='correct sfreq'), + pytest.param(dict(sfreq=0, time_step=2000), id='correct time_step'), + pytest.param(dict(sfreq=500, time_step=2000), id='both correct'), + pytest.param(dict(sfreq=0, time_step=0), id='both 0', + marks=pytest.mark.xfail(raises=ValueError)), + pytest.param(dict(sfreq=500, time_step=42), id='mismatch', + marks=pytest.mark.xfail(raises=ValueError)), +]) +def sfreq_testing_data(tmp_path, request): + """Generate different sfreq, time_step scenarios to be tested.""" + sfreq, time_step = request.param['sfreq'], request.param['time_step'] + + in_base_name = curry7_bdf_file.strip('dat') + out_base_name = str(tmp_path / 'curry.') + + # create dummy empty files for 'dat' and 'rs3' + for fname in [out_base_name + ext for ext in ['dat', 'rs3']]: + open(fname, 'a').close() + + _mock_info_file(src=in_base_name + 'dap', dst=out_base_name + 'dap', + sfreq=sfreq, time_step=time_step) + _mock_info_file(src=in_base_name + 'rs3', dst=out_base_name + 'rs3', + sfreq=sfreq, time_step=time_step) + return out_base_name + 'dat' + + +@testing.requires_testing_data +def test_sfreq(sfreq_testing_data): + """Test sfreq and time_step.""" + raw = read_raw_curry(sfreq_testing_data, preload=False) + assert raw.info['sfreq'] == 500 + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname', [ + pytest.param(curry_dir + '/test_bdf_stim_channel Curry 7.cef', id='7'), + pytest.param(curry_dir + '/test_bdf_stim_channel Curry 8.cdt.cef', id='8'), + pytest.param(curry_dir + '/test_bdf_stim_channel Curry 7 ASCII.cef', + id='7 ascii'), + pytest.param(curry_dir + '/test_bdf_stim_channel Curry 8 ASCII.cdt.cef', + id='8 ascii'), +]) +def test_read_curry_annotations(fname): + """Test reading for Curry events file.""" + EXPECTED_ONSET = [0.484, 0.486, 0.62, 0.622, 1.904, 1.906, 3.212, 3.214, + 4.498, 4.5, 5.8, 5.802, 7.074, 7.076, 8.324, 8.326, 9.58, + 9.582] + EXPECTED_DURATION = np.zeros_like(EXPECTED_ONSET) + EXPECTED_DESCRIPTION = ['4', '50000', '2', '50000', '1', '50000', '1', + '50000', '1', '50000', '1', '50000', '1', '50000', + '1', '50000', '1', '50000'] + + annot = read_annotations(fname, sfreq='auto') + + assert annot.orig_time is None + assert_array_equal(annot.onset, EXPECTED_ONSET) + assert_array_equal(annot.duration, EXPECTED_DURATION) + assert_array_equal(annot.description, EXPECTED_DESCRIPTION) + + +def _get_read_annotations_mock_info(name_part, mock_dir): + original, modified = dict(), dict() + + original['event'] = curry_dir + '/test_bdf_stim_channel ' + name_part + original['base'], ext = original['event'].split(".", maxsplit=1) + version = _get_curry_version(ext) + original['info'] = original['base'] + FILE_EXTENSIONS[version]["info"] + + modified['base'] = str(mock_dir / 'curry') + modified['event'] = (modified['base'] + + FILE_EXTENSIONS[version]["events_cef"]) + modified['info'] = modified['base'] + FILE_EXTENSIONS[version]["info"] + + return original, modified + + +@testing.requires_testing_data +@pytest.mark.parametrize('name_part', [ + pytest.param('7.cef', id='7'), + pytest.param('8.cdt.cef', id='8'), + pytest.param('7 ASCII.cef', id='7 (ascii)'), + pytest.param('8 ASCII.cdt.cef', id='8 (ascii)'), +]) +def test_read_curry_annotations_using_mocked_info(tmp_path, name_part): + """Test reading for Curry events file.""" + EXPECTED_ONSET = [0.484, 0.486, 0.62, 0.622, 1.904, 1.906, 3.212, 3.214, + 4.498, 4.5, 5.8, 5.802, 7.074, 7.076, 8.324, 8.326, 9.58, + 9.582] + EXPECTED_DURATION = np.zeros_like(EXPECTED_ONSET) + EXPECTED_DESCRIPTION = ['4', '50000', '2', '50000', '1', '50000', '1', + '50000', '1', '50000', '1', '50000', '1', '50000', + '1', '50000', '1', '50000'] + + original, fname = _get_read_annotations_mock_info("Curry " + name_part, + tmp_path) + copyfile(src=original['event'], dst=fname['event']) + + _msg = 'required files cannot be found' + with pytest.raises(FileNotFoundError, match=_msg): + read_annotations(fname['event'], sfreq='auto') + + _mock_info_file(src=original['info'], dst=fname['info'], + sfreq=0, time_step=2000) + + annot = read_annotations(fname['event'], sfreq='auto') + + assert annot.orig_time is None + assert_array_equal(annot.onset, EXPECTED_ONSET) + assert_array_equal(annot.duration, EXPECTED_DURATION) + assert_array_equal(annot.description, EXPECTED_DESCRIPTION) + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname,expected_channel_list', [ + pytest.param(Ref_chan_omitted_file, + ['FP1', 'FPZ', 'FP2', 'VEO', 'EKG', 'Trigger'], + id='Ref omitted, normal order'), + pytest.param(Ref_chan_omitted_reordered_file, + ['FP2', 'FPZ', 'FP1', 'VEO', 'EKG', 'Trigger'], + id='Ref omitted, reordered') +]) +def test_read_files_missing_channel(fname, expected_channel_list): + """Test reading data files that has an omitted channel.""" + # This for Git issue #8391. In some cases, the 'labels' (.rs3 file will + # list channels that are not actually saved in the datafile (such as the + # 'Ref' channel). These channels are denoted in the 'info' (.dap) file + # in the CHAN_IN_FILE section with a '0' as their index. + # If the CHAN_IN_FILE section is present, the code also assures that the + # channels are sorted in the prescribed order. + # This test makes sure the data load correctly, and that we end up with + # the proper channel list. + raw = read_raw_curry(fname, preload=True) + assert raw.ch_names == expected_channel_list + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname,expected_meas_date', [ + pytest.param(Ref_chan_omitted_file, + datetime(2018, 11, 21, 12, 53, 48, + 525000, tzinfo=timezone.utc), + id='valid start date'), + pytest.param(curry7_rfDC_file, + None, + id='start date year is 0'), + pytest.param(curry7_bdf_file, + None, + id='start date seconds invalid') +]) +def test_meas_date(fname, expected_meas_date): + """Test reading acquisition start datetime info info['meas_date'].""" + # This for Git issue #8398. The 'info' (.dap) file includes acquisition + # start date & time. Test that this goes into raw.info['meas_date']. + # If the information is not valid, raw.info['meas_date'] should be None + raw = read_raw_curry(fname, preload=False) + assert raw.info['meas_date'] == expected_meas_date diff --git a/python/libs/mne/io/diff.py b/python/libs/mne/io/diff.py new file mode 100644 index 0000000..46ded94 --- /dev/null +++ b/python/libs/mne/io/diff.py @@ -0,0 +1,40 @@ +# Author: Alexandre Gramfort +# +# License: BSD Style. + +import numpy as np + +from ..utils import logger, verbose + + +@verbose +def is_equal(first, second, verbose=None): + """Check if 2 python structures are the same. + + Designed to handle dict, list, np.ndarray etc. + """ + all_equal = True + # Check all keys in first dict + if type(first) != type(second): + all_equal = False + if isinstance(first, dict): + for key in first.keys(): + if (key not in second): + logger.info("Missing key %s in %s" % (key, second)) + all_equal = False + else: + if not is_equal(first[key], second[key]): + all_equal = False + elif isinstance(first, np.ndarray): + if not np.allclose(first, second): + all_equal = False + elif isinstance(first, list): + for a, b in zip(first, second): + if not is_equal(a, b): + logger.info('%s and\n%s are different' % (a, b)) + all_equal = False + else: + if first != second: + logger.info('%s and\n%s are different' % (first, second)) + all_equal = False + return all_equal diff --git a/python/libs/mne/io/edf/__init__.py b/python/libs/mne/io/edf/__init__.py new file mode 100644 index 0000000..221f6c7 --- /dev/null +++ b/python/libs/mne/io/edf/__init__.py @@ -0,0 +1,7 @@ +"""EDF+,BDF module for conversion to FIF.""" + +# Author: Teon Brooks +# +# License: BSD-3-Clause + +from .edf import read_raw_edf, read_raw_bdf, read_raw_gdf diff --git a/python/libs/mne/io/edf/edf.py b/python/libs/mne/io/edf/edf.py new file mode 100644 index 0000000..2487582 --- /dev/null +++ b/python/libs/mne/io/edf/edf.py @@ -0,0 +1,1570 @@ +# -*- coding: utf-8 -*- +"""Reading tools from EDF, EDF+, BDF, and GDF.""" + +# Authors: Teon Brooks +# Martin Billinger +# Nicolas Barascud +# Stefan Appelhoff +# Joan Massich +# Clemens Brunner +# Jeroen Van Der Donckt (IDlab - imec) +# +# License: BSD-3-Clause + +from datetime import datetime, timezone, timedelta +import os +import re + +import numpy as np + +from ...utils import verbose, logger, warn +from ..utils import _blk_read_lims, _mult_cal_one +from ..base import BaseRaw +from ..meas_info import _empty_info, _unique_channel_names +from ..constants import FIFF +from ...filter import resample +from ...utils import fill_doc +from ...annotations import Annotations + + +# common channel type names mapped to internal ch types +CH_TYPE_MAPPING = { + 'EEG': FIFF.FIFFV_EEG_CH, + 'SEEG': FIFF.FIFFV_SEEG_CH, + 'ECOG': FIFF.FIFFV_ECOG_CH, + 'DBS': FIFF.FIFFV_DBS_CH, + 'EOG': FIFF.FIFFV_EOG_CH, + 'ECG': FIFF.FIFFV_ECG_CH, + 'EMG': FIFF.FIFFV_EMG_CH, + 'BIO': FIFF.FIFFV_BIO_CH, + 'RESP': FIFF.FIFFV_RESP_CH, + 'MISC': FIFF.FIFFV_MISC_CH, + 'SAO2': FIFF.FIFFV_BIO_CH, +} + + +@fill_doc +class RawEDF(BaseRaw): + """Raw object from EDF, EDF+ or BDF file. + + Parameters + ---------- + input_fname : str + Path to the EDF, EDF+ or BDF file. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + stim_channel : 'auto' | str | list of str | int | list of int + Defaults to 'auto', which means that channels named 'status' or + 'trigger' (case insensitive) are set to STIM. If str (or list of str), + all channels matching the name(s) are set to STIM. If int (or list of + ints), the channels corresponding to the indices are set to STIM. + exclude : list of str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. + infer_types : bool + If True, try to infer channel types from channel labels. If a channel + label starts with a known type (such as 'EEG') followed by a space and + a name (such as 'Fp1'), the channel type will be set accordingly, and + the channel will be renamed to the original label without the prefix. + For unknown prefixes, the type will be 'EEG' and the name will not be + modified. If False, do not infer types and assume all channels are of + type 'EEG'. + + .. versionadded:: 0.24.1 + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + mne.io.read_raw_edf : Recommended way to read EDF/EDF+ files. + mne.io.read_raw_bdf : Recommended way to read BDF files. + + Notes + ----- + Biosemi devices trigger codes are encoded in 16-bit format, whereas system + codes (CMS in/out-of range, battery low, etc.) are coded in bits 16-23 of + the status channel (see http://www.biosemi.com/faq/trigger_signals.htm). + To retrieve correct event values (bits 1-16), one could do: + + >>> events = mne.find_events(...) # doctest:+SKIP + >>> events[:, 2] &= (2**16 - 1) # doctest:+SKIP + + The above operation can be carried out directly in :func:`mne.find_events` + using the ``mask`` and ``mask_type`` parameters (see + :func:`mne.find_events` for more details). + + It is also possible to retrieve system codes, but no particular effort has + been made to decode these in MNE. In case it is necessary, for instance to + check the CMS bit, the following operation can be carried out: + + >>> cms_bit = 20 # doctest:+SKIP + >>> cms_high = (events[:, 2] & (1 << cms_bit)) != 0 # doctest:+SKIP + + It is worth noting that in some special cases, it may be necessary to shift + event values in order to retrieve correct event triggers. This depends on + the triggering device used to perform the synchronization. For instance, in + some files events need to be shifted by 8 bits: + + >>> events[:, 2] >>= 8 # doctest:+SKIP + + TAL channels called 'EDF Annotations' or 'BDF Annotations' are parsed and + extracted annotations are stored in raw.annotations. Use + :func:`mne.events_from_annotations` to obtain events from these + annotations. + + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + + @verbose + def __init__(self, input_fname, eog=None, misc=None, stim_channel='auto', + exclude=(), infer_types=False, preload=False, verbose=None): + logger.info('Extracting EDF parameters from {}...'.format(input_fname)) + input_fname = os.path.abspath(input_fname) + info, edf_info, orig_units = _get_info(input_fname, stim_channel, eog, + misc, exclude, infer_types, + preload) + logger.info('Creating raw.info structure...') + + # Raw attributes + last_samps = [edf_info['nsamples'] - 1] + super().__init__(info, preload, filenames=[input_fname], + raw_extras=[edf_info], last_samps=last_samps, + orig_format='int', orig_units=orig_units, + verbose=verbose) + + # Read annotations from file and set it + onset, duration, desc = list(), list(), list() + if len(edf_info['tal_idx']) > 0: + # Read TAL data exploiting the header info (no regexp) + idx = np.empty(0, int) + tal_data = self._read_segment_file( + np.empty((0, self.n_times)), idx, 0, 0, int(self.n_times), + np.ones((len(idx), 1)), None) + onset, duration, desc = _read_annotations_edf(tal_data[0]) + + self.set_annotations(Annotations(onset=onset, duration=duration, + description=desc, orig_time=None)) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + return _read_segment_file(data, idx, fi, start, stop, + self._raw_extras[fi], self._filenames[fi], + cals, mult) + + +@fill_doc +class RawGDF(BaseRaw): + """Raw object from GDF file. + + Parameters + ---------- + input_fname : str + Path to the GDF file. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + stim_channel : 'auto' | str | list of str | int | list of int + Defaults to 'auto', which means that channels named 'status' or + 'trigger' (case insensitive) are set to STIM. If str (or list of str), + all channels matching the name(s) are set to STIM. If int (or list of + ints), channels corresponding to the indices are set to STIM. + exclude : list of str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + mne.io.read_raw_gdf : Recommended way to read GDF files. + + Notes + ----- + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + + @verbose + def __init__(self, input_fname, eog=None, misc=None, + stim_channel='auto', exclude=(), preload=False, verbose=None): + logger.info('Extracting EDF parameters from {}...'.format(input_fname)) + input_fname = os.path.abspath(input_fname) + info, edf_info, orig_units = _get_info(input_fname, stim_channel, eog, + misc, exclude, True, preload) + logger.info('Creating raw.info structure...') + + # Raw attributes + last_samps = [edf_info['nsamples'] - 1] + super().__init__(info, preload, filenames=[input_fname], + raw_extras=[edf_info], last_samps=last_samps, + orig_format='int', orig_units=orig_units, + verbose=verbose) + + # Read annotations from file and set it + onset, duration, desc = _get_annotations_gdf(edf_info, + self.info['sfreq']) + + self.set_annotations(Annotations(onset=onset, duration=duration, + description=desc, orig_time=None)) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + return _read_segment_file(data, idx, fi, start, stop, + self._raw_extras[fi], self._filenames[fi], + cals, mult) + + +def _read_ch(fid, subtype, samp, dtype_byte, dtype=None): + """Read a number of samples for a single channel.""" + # BDF + if subtype == 'bdf': + ch_data = np.fromfile(fid, dtype=dtype, count=samp * dtype_byte) + ch_data = ch_data.reshape(-1, 3).astype(INT32) + ch_data = ((ch_data[:, 0]) + + (ch_data[:, 1] << 8) + + (ch_data[:, 2] << 16)) + # 24th bit determines the sign + ch_data[ch_data >= (1 << 23)] -= (1 << 24) + + # GDF data and EDF data + else: + ch_data = np.fromfile(fid, dtype=dtype, count=samp) + + return ch_data + + +def _read_segment_file(data, idx, fi, start, stop, raw_extras, filenames, + cals, mult): + """Read a chunk of raw data.""" + from scipy.interpolate import interp1d + + n_samps = raw_extras['n_samps'] + buf_len = int(raw_extras['max_samp']) + dtype = raw_extras['dtype_np'] + dtype_byte = raw_extras['dtype_byte'] + data_offset = raw_extras['data_offset'] + stim_channel_idxs = raw_extras['stim_channel_idxs'] + orig_sel = raw_extras['sel'] + tal_idx = raw_extras.get('tal_idx', np.empty(0, int)) + subtype = raw_extras['subtype'] + cal = raw_extras['cal'] + offsets = raw_extras['offsets'] + gains = raw_extras['units'] + + read_sel = np.concatenate([orig_sel[idx], tal_idx]) + tal_data = [] + + # only try to read the stim channel if it's not None and it's + # actually one of the requested channels + idx_arr = np.arange(idx.start, idx.stop) if isinstance(idx, slice) else idx + + # We could read this one EDF block at a time, which would be this: + ch_offsets = np.cumsum(np.concatenate([[0], n_samps]), dtype=np.int64) + block_start_idx, r_lims, d_lims = _blk_read_lims(start, stop, buf_len) + # But to speed it up, we really need to read multiple blocks at once, + # Otherwise we can end up with e.g. 18,181 chunks for a 20 MB file! + # Let's do ~10 MB chunks: + n_per = max(10 * 1024 * 1024 // (ch_offsets[-1] * dtype_byte), 1) + with open(filenames, 'rb', buffering=0) as fid: + + # Extract data + start_offset = (data_offset + + block_start_idx * ch_offsets[-1] * dtype_byte) + for ai in range(0, len(r_lims), n_per): + block_offset = ai * ch_offsets[-1] * dtype_byte + n_read = min(len(r_lims) - ai, n_per) + fid.seek(start_offset + block_offset, 0) + # Read and reshape to (n_chunks_read, ch0_ch1_ch2_ch3...) + many_chunk = _read_ch(fid, subtype, ch_offsets[-1] * n_read, + dtype_byte, dtype).reshape(n_read, -1) + r_sidx = r_lims[ai][0] + r_eidx = (buf_len * (n_read - 1) + r_lims[ai + n_read - 1][1]) + d_sidx = d_lims[ai][0] + d_eidx = d_lims[ai + n_read - 1][1] + one = np.zeros((len(orig_sel), d_eidx - d_sidx), dtype=data.dtype) + for ii, ci in enumerate(read_sel): + # This now has size (n_chunks_read, n_samp[ci]) + ch_data = many_chunk[:, ch_offsets[ci]:ch_offsets[ci + 1]] + + if ci in tal_idx: + tal_data.append(ch_data) + continue + + orig_idx = idx_arr[ii] + ch_data = ch_data * cal[orig_idx] + ch_data += offsets[orig_idx] + ch_data *= gains[orig_idx] + + assert ci == orig_sel[orig_idx] + + if n_samps[ci] != buf_len: + if orig_idx in stim_channel_idxs: + # Stim channel will be interpolated + old = np.linspace(0, 1, n_samps[ci] + 1, True) + new = np.linspace(0, 1, buf_len, False) + ch_data = np.append( + ch_data, np.zeros((len(ch_data), 1)), -1) + ch_data = interp1d(old, ch_data, + kind='zero', axis=-1)(new) + else: + # XXX resampling each chunk isn't great, + # it forces edge artifacts to appear at + # each buffer boundary :( + # it can also be very slow... + ch_data = resample( + ch_data.astype(np.float64), buf_len, n_samps[ci], + npad=0, axis=-1) + elif orig_idx in stim_channel_idxs: + ch_data = np.bitwise_and(ch_data.astype(int), 2**17 - 1) + one[orig_idx] = ch_data.ravel()[r_sidx:r_eidx] + _mult_cal_one(data[:, d_sidx:d_eidx], one, idx, cals, mult) + + if len(tal_data) > 1: + tal_data = np.concatenate([tal.ravel() for tal in tal_data]) + tal_data = tal_data[np.newaxis, :] + return tal_data + + +def _read_header(fname, exclude, infer_types): + """Unify EDF, BDF and GDF _read_header call. + + Parameters + ---------- + fname : str + Path to the EDF+, BDF, or GDF file. + exclude : list of str | str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. A str is + interpreted as a regular expression. + + Returns + ------- + (edf_info, orig_units) : tuple + """ + ext = os.path.splitext(fname)[1][1:].lower() + logger.info('%s file detected' % ext.upper()) + if ext in ('bdf', 'edf'): + return _read_edf_header(fname, exclude, infer_types) + elif ext == 'gdf': + return _read_gdf_header(fname, exclude), None + else: + raise NotImplementedError( + f'Only GDF, EDF, and BDF files are supported, got {ext}.') + + +def _get_info(fname, stim_channel, eog, misc, exclude, infer_types, preload): + """Extract information from EDF+, BDF or GDF file.""" + eog = eog if eog is not None else [] + misc = misc if misc is not None else [] + + edf_info, orig_units = _read_header(fname, exclude, infer_types) + + # XXX: `tal_ch_names` to pass to `_check_stim_channel` should be computed + # from `edf_info['ch_names']` and `edf_info['tal_idx']` but 'tal_idx' + # contains stim channels that are not TAL. + stim_channel_idxs, _ = _check_stim_channel( + stim_channel, edf_info['ch_names']) + + sel = edf_info['sel'] # selection of channels not excluded + ch_names = edf_info['ch_names'] # of length len(sel) + if 'ch_types' in edf_info: + ch_types = edf_info['ch_types'] # of length len(sel) + else: + ch_types = [None] * len(sel) + if len(sel) == 0: # only want stim channels + n_samps = edf_info['n_samps'][[0]] + else: + n_samps = edf_info['n_samps'][sel] + nchan = edf_info['nchan'] + physical_ranges = edf_info['physical_max'] - edf_info['physical_min'] + cals = edf_info['digital_max'] - edf_info['digital_min'] + bad_idx = np.where((~np.isfinite(cals)) | (cals == 0))[0] + if len(bad_idx) > 0: + warn('Scaling factor is not defined in following channels:\n' + + ', '.join(ch_names[i] for i in bad_idx)) + cals[bad_idx] = 1 + bad_idx = np.where(physical_ranges == 0)[0] + if len(bad_idx) > 0: + warn('Physical range is not defined in following channels:\n' + + ', '.join(ch_names[i] for i in bad_idx)) + physical_ranges[bad_idx] = 1 + + # Creates a list of dicts of eeg channels for raw.info + logger.info('Setting channel info structure...') + chs = list() + pick_mask = np.ones(len(ch_names)) + + chs_without_types = list() + + for idx, ch_name in enumerate(ch_names): + chan_info = {} + chan_info['cal'] = 1. + chan_info['logno'] = idx + 1 + chan_info['scanno'] = idx + 1 + chan_info['range'] = 1. + chan_info['unit_mul'] = FIFF.FIFF_UNITM_NONE + chan_info['ch_name'] = ch_name + chan_info['unit'] = FIFF.FIFF_UNIT_V + chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD + chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG + chan_info['kind'] = FIFF.FIFFV_EEG_CH + # montage can't be stored in EDF so channel locs are unknown: + chan_info['loc'] = np.full(12, np.nan) + + # if the edf info contained channel type information + # set it now + ch_type = ch_types[idx] + if ch_type is not None and ch_type in CH_TYPE_MAPPING: + chan_info['kind'] = CH_TYPE_MAPPING.get(ch_type) + if ch_type not in ['EEG', 'ECOG', 'SEEG', 'DBS']: + chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE + pick_mask[idx] = False + # if user passes in explicit mapping for eog, misc and stim + # channels set them here + if ch_name in eog or idx in eog or idx - nchan in eog: + chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE + chan_info['kind'] = FIFF.FIFFV_EOG_CH + pick_mask[idx] = False + elif ch_name in misc or idx in misc or idx - nchan in misc: + chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE + chan_info['kind'] = FIFF.FIFFV_MISC_CH + pick_mask[idx] = False + elif idx in stim_channel_idxs: + chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE + chan_info['unit'] = FIFF.FIFF_UNIT_NONE + chan_info['kind'] = FIFF.FIFFV_STIM_CH + pick_mask[idx] = False + chan_info['ch_name'] = ch_name + ch_names[idx] = chan_info['ch_name'] + edf_info['units'][idx] = 1 + elif ch_type not in CH_TYPE_MAPPING: + chs_without_types.append(ch_name) + chs.append(chan_info) + + # warn if channel type was not inferable + if len(chs_without_types): + msg = ('Could not determine channel type of the following channels, ' + f'they will be set as EEG:\n{", ".join(chs_without_types)}') + logger.info(msg) + + edf_info['stim_channel_idxs'] = stim_channel_idxs + if any(pick_mask): + picks = [item for item, mask in zip(range(nchan), pick_mask) if mask] + edf_info['max_samp'] = max_samp = n_samps[picks].max() + else: + edf_info['max_samp'] = max_samp = n_samps.max() + + # Info structure + # ------------------------------------------------------------------------- + + not_stim_ch = [x for x in range(n_samps.shape[0]) + if x not in stim_channel_idxs] + if len(not_stim_ch) == 0: # only loading stim channels + not_stim_ch = list(range(len(n_samps))) + sfreq = np.take(n_samps, not_stim_ch).max() * \ + edf_info['record_length'][1] / edf_info['record_length'][0] + del n_samps + info = _empty_info(sfreq) + info['meas_date'] = edf_info['meas_date'] + info['chs'] = chs + info['ch_names'] = ch_names + + # Filter settings + highpass = edf_info['highpass'] + lowpass = edf_info['lowpass'] + if highpass.size == 0: + pass + elif all(highpass): + if highpass[0] == 'NaN': + # Placeholder for future use. Highpass set in _empty_info. + pass + elif highpass[0] == 'DC': + info['highpass'] = 0. + else: + hp = highpass[0] + try: + hp = float(hp) + except Exception: + hp = 0. + info['highpass'] = hp + else: + info['highpass'] = float(np.max(highpass)) + warn('Channels contain different highpass filters. Highest filter ' + 'setting will be stored.') + if np.isnan(info['highpass']): + info['highpass'] = 0. + if lowpass.size == 0: + # Placeholder for future use. Lowpass set in _empty_info. + pass + elif all(lowpass): + if lowpass[0] in ('NaN', '0', '0.0'): + # Placeholder for future use. Lowpass set in _empty_info. + pass + else: + info['lowpass'] = float(lowpass[0]) + else: + info['lowpass'] = float(np.min(lowpass)) + warn('Channels contain different lowpass filters. Lowest filter ' + 'setting will be stored.') + if np.isnan(info['lowpass']): + info['lowpass'] = info['sfreq'] / 2. + + if info['highpass'] > info['lowpass']: + warn(f'Highpass cutoff frequency {info["highpass"]} is greater ' + f'than lowpass cutoff frequency {info["lowpass"]}, ' + 'setting values to 0 and Nyquist.') + info['highpass'] = 0. + info['lowpass'] = info['sfreq'] / 2. + + # Some keys to be consistent with FIF measurement info + info['description'] = None + edf_info['nsamples'] = int(edf_info['n_records'] * max_samp) + + info._unlocked = False + info._update_redundant() + + # Later used for reading + edf_info['cal'] = physical_ranges / cals + + # physical dimension in µV + edf_info['offsets'] = ( + edf_info['physical_min'] - edf_info['digital_min'] * edf_info['cal']) + del edf_info['physical_min'] + del edf_info['digital_min'] + + if edf_info['subtype'] == 'bdf': + edf_info['cal'][stim_channel_idxs] = 1 + edf_info['offsets'][stim_channel_idxs] = 0 + edf_info['units'][stim_channel_idxs] = 1 + + return info, edf_info, orig_units + + +def _parse_prefilter_string(prefiltering): + """Parse prefilter string from EDF+ and BDF headers.""" + highpass = np.array( + [v for hp in [re.findall(r'HP:\s*([0-9]+[.]*[0-9]*)', filt) + for filt in prefiltering] for v in hp] + ) + lowpass = np.array( + [v for hp in [re.findall(r'LP:\s*([0-9]+[.]*[0-9]*)', filt) + for filt in prefiltering] for v in hp] + ) + return highpass, lowpass + + +def _edf_str(x): + return x.decode('latin-1').split('\x00')[0] + + +def _read_edf_header(fname, exclude, infer_types): + """Read header information from EDF+ or BDF file.""" + edf_info = {'events': []} + + with open(fname, 'rb') as fid: + + fid.read(8) # version (unused here) + + # patient ID + patient = {} + id_info = fid.read(80).decode('latin-1').rstrip() + id_info = id_info.split(' ') + if len(id_info): + patient['id'] = id_info[0] + if len(id_info) == 4: + try: + birthdate = datetime.strptime(id_info[2], "%d-%b-%Y") + except ValueError: + birthdate = "X" + patient['sex'] = id_info[1] + patient['birthday'] = birthdate + patient['name'] = id_info[3] + + # Recording ID + meas_id = {} + rec_info = fid.read(80).decode('latin-1').rstrip().split(' ') + valid_startdate = False + if len(rec_info) == 5: + try: + startdate = datetime.strptime(rec_info[1], "%d-%b-%Y") + except ValueError: + startdate = "X" + else: + valid_startdate = True + meas_id['startdate'] = startdate + meas_id['study_id'] = rec_info[2] + meas_id['technician'] = rec_info[3] + meas_id['equipment'] = rec_info[4] + + # If startdate available in recording info, use it instead of the + # file's meas_date since it contains all 4 digits of the year + if valid_startdate: + day = meas_id['startdate'].day + month = meas_id['startdate'].month + year = meas_id['startdate'].year + fid.read(8) # skip file's meas_date + else: + meas_date = fid.read(8).decode('latin-1') + day, month, year = [int(x) for x in meas_date.split('.')] + year = year + 2000 if year < 85 else year + 1900 + + meas_time = fid.read(8).decode('latin-1') + hour, minute, sec = [int(x) for x in meas_time.split('.')] + try: + meas_date = datetime(year, month, day, hour, minute, sec, + tzinfo=timezone.utc) + except ValueError: + warn(f'Invalid date encountered ({year:04d}-{month:02d}-' + f'{day:02d} {hour:02d}:{minute:02d}:{sec:02d}).') + meas_date = None + + header_nbytes = int(_edf_str(fid.read(8))) + + # The following 44 bytes sometimes identify the file type, but this is + # not guaranteed. Therefore, we skip this field and use the file + # extension to determine the subtype (EDF or BDF, which differ in the + # number of bytes they use for the data records; EDF uses 2 bytes + # whereas BDF uses 3 bytes). + fid.read(44) + subtype = os.path.splitext(fname)[1][1:].lower() + + n_records = int(_edf_str(fid.read(8))) + record_length = float(_edf_str(fid.read(8))) + record_length = np.array([record_length, 1.]) # in seconds + if record_length[0] == 0: + record_length[0] = 1. + warn('Header information is incorrect for record length. Default ' + 'record length set to 1.\nIt is possible that this file only' + ' contains annotations and no signals. In that case, please ' + 'use mne.read_annotations() to load these annotations.') + + nchan = int(_edf_str(fid.read(4))) + channels = list(range(nchan)) + + # read in 16 byte labels and strip any extra spaces at the end + ch_labels = [fid.read(16).strip().decode('latin-1') for _ in channels] + + # get channel names and optionally channel type + # EDF specification contains 16 bytes that encode channel names, + # optionally prefixed by a string representing channel type separated + # by a space + if infer_types: + ch_types, ch_names = [], [] + for ch_label in ch_labels: + ch_type, ch_name = 'EEG', ch_label # default to EEG + parts = ch_label.split(' ') + if len(parts) > 1: + if parts[0].upper() in CH_TYPE_MAPPING: + ch_type = parts[0].upper() + ch_name = ' '.join(parts[1:]) + logger.info(f"Channel '{ch_label}' recognized as type " + f"{ch_type} (renamed to '{ch_name}').") + ch_types.append(ch_type) + ch_names.append(ch_name) + else: + ch_types, ch_names = ['EEG'] * nchan, ch_labels + + exclude = _find_exclude_idx(ch_names, exclude) + tal_idx = _find_tal_idx(ch_names) + exclude = np.concatenate([exclude, tal_idx]) + sel = np.setdiff1d(np.arange(len(ch_names)), exclude) + for ch in channels: + fid.read(80) # transducer + units = [fid.read(8).strip().decode('latin-1') for ch in channels] + edf_info['units'] = list() + for i, unit in enumerate(units): + if i in exclude: + continue + # allow μ (greek mu), µ (micro symbol) and μ (sjis mu) codepoints + if unit in ('\u03BCV', '\u00B5V', '\x83\xCAV', 'uV'): + edf_info['units'].append(1e-6) + elif unit == 'mV': + edf_info['units'].append(1e-3) + else: + edf_info['units'].append(1) + edf_info['units'] = np.array(edf_info['units'], float) + + ch_names = [ch_names[idx] for idx in sel] + units = [units[idx] for idx in sel] + + # make sure channel names are unique + ch_names = _unique_channel_names(ch_names) + orig_units = dict(zip(ch_names, units)) + + physical_min = np.array( + [float(_edf_str(fid.read(8))) for ch in channels])[sel] + physical_max = np.array( + [float(_edf_str(fid.read(8))) for ch in channels])[sel] + digital_min = np.array( + [float(_edf_str(fid.read(8))) for ch in channels])[sel] + digital_max = np.array( + [float(_edf_str(fid.read(8))) for ch in channels])[sel] + prefiltering = [_edf_str(fid.read(80)).strip() for ch in channels][:-1] + highpass, lowpass = _parse_prefilter_string(prefiltering) + + # number of samples per record + n_samps = np.array([int(_edf_str(fid.read(8))) for ch in channels]) + + # Populate edf_info + edf_info.update( + ch_names=ch_names, ch_types=ch_types, data_offset=header_nbytes, + digital_max=digital_max, digital_min=digital_min, + highpass=highpass, sel=sel, lowpass=lowpass, meas_date=meas_date, + n_records=n_records, n_samps=n_samps, nchan=nchan, + subject_info=patient, physical_max=physical_max, + physical_min=physical_min, record_length=record_length, + subtype=subtype, tal_idx=tal_idx) + + fid.read(32 * nchan).decode() # reserved + assert fid.tell() == header_nbytes + + fid.seek(0, 2) + n_bytes = fid.tell() + n_data_bytes = n_bytes - header_nbytes + total_samps = (n_data_bytes // 3 if subtype == 'bdf' + else n_data_bytes // 2) + read_records = total_samps // np.sum(n_samps) + if n_records != read_records: + warn('Number of records from the header does not match the file ' + 'size (perhaps the recording was not stopped before exiting).' + ' Inferring from the file size.') + edf_info['n_records'] = read_records + del n_records + + if subtype == 'bdf': + edf_info['dtype_byte'] = 3 # 24-bit (3 byte) integers + edf_info['dtype_np'] = UINT8 + else: + edf_info['dtype_byte'] = 2 # 16-bit (2 byte) integers + edf_info['dtype_np'] = INT16 + + return edf_info, orig_units + + +INT8 = ' 1: + # We will not read it properly, so this should be an error + raise RuntimeError("Reading multiple data types not supported") + return dtype_np[0], dtype_byte[0] + + +def _read_gdf_header(fname, exclude): + """Read GDF 1.x and GDF 2.x header info.""" + edf_info = dict() + events = None + with open(fname, 'rb') as fid: + + version = fid.read(8).decode() + edf_info['type'] = edf_info['subtype'] = version[:3] + edf_info['number'] = float(version[4:]) + meas_date = None + + # GDF 1.x + # --------------------------------------------------------------------- + if edf_info['number'] < 1.9: + + # patient ID + pid = fid.read(80).decode('latin-1') + pid = pid.split(' ', 2) + patient = {} + if len(pid) >= 2: + patient['id'] = pid[0] + patient['name'] = pid[1] + + # Recording ID + meas_id = {} + meas_id['recording_id'] = _edf_str(fid.read(80)).strip() + + # date + tm = _edf_str(fid.read(16)).strip() + try: + if tm[14:16] == ' ': + tm = tm[:14] + '00' + tm[16:] + meas_date = datetime( + int(tm[0:4]), int(tm[4:6]), + int(tm[6:8]), int(tm[8:10]), + int(tm[10:12]), int(tm[12:14]), + int(tm[14:16]) * pow(10, 4), + tzinfo=timezone.utc) + except Exception: + pass + + header_nbytes = np.fromfile(fid, INT64, 1)[0] + meas_id['equipment'] = np.fromfile(fid, UINT8, 8)[0] + meas_id['hospital'] = np.fromfile(fid, UINT8, 8)[0] + meas_id['technician'] = np.fromfile(fid, UINT8, 8)[0] + fid.seek(20, 1) # 20bytes reserved + + n_records = np.fromfile(fid, INT64, 1)[0] + # record length in seconds + record_length = np.fromfile(fid, UINT32, 2) + if record_length[0] == 0: + record_length[0] = 1. + warn('Header information is incorrect for record length. ' + 'Default record length set to 1.') + nchan = np.fromfile(fid, UINT32, 1)[0] + channels = list(range(nchan)) + ch_names = [_edf_str(fid.read(16)).strip() for ch in channels] + exclude = _find_exclude_idx(ch_names, exclude) + sel = np.setdiff1d(np.arange(len(ch_names)), exclude) + fid.seek(80 * len(channels), 1) # transducer + units = [_edf_str(fid.read(8)).strip() for ch in channels] + edf_info['units'] = list() + for i, unit in enumerate(units): + if i in exclude: + continue + if unit[:2] == 'uV': + edf_info['units'].append(1e-6) + else: + edf_info['units'].append(1) + edf_info['units'] = np.array(edf_info['units'], float) + + ch_names = [ch_names[idx] for idx in sel] + physical_min = np.fromfile(fid, FLOAT64, len(channels)) + physical_max = np.fromfile(fid, FLOAT64, len(channels)) + digital_min = np.fromfile(fid, INT64, len(channels)) + digital_max = np.fromfile(fid, INT64, len(channels)) + prefiltering = [_edf_str(fid.read(80)) for ch in channels][:-1] + highpass, lowpass = _parse_prefilter_string(prefiltering) + + # n samples per record + n_samps = np.fromfile(fid, INT32, len(channels)) + + # channel data type + dtype = np.fromfile(fid, INT32, len(channels)) + + # total number of bytes for data + bytes_tot = np.sum([GDFTYPE_BYTE[t] * n_samps[i] + for i, t in enumerate(dtype)]) + + # Populate edf_info + dtype_np, dtype_byte = _check_dtype_byte(dtype) + edf_info.update( + bytes_tot=bytes_tot, ch_names=ch_names, + data_offset=header_nbytes, digital_min=digital_min, + digital_max=digital_max, + dtype_byte=dtype_byte, dtype_np=dtype_np, exclude=exclude, + highpass=highpass, sel=sel, lowpass=lowpass, + meas_date=meas_date, + meas_id=meas_id, n_records=n_records, n_samps=n_samps, + nchan=nchan, subject_info=patient, physical_max=physical_max, + physical_min=physical_min, record_length=record_length) + + fid.seek(32 * edf_info['nchan'], 1) # reserved + assert fid.tell() == header_nbytes + + # Event table + # ----------------------------------------------------------------- + etp = header_nbytes + n_records * edf_info['bytes_tot'] + # skip data to go to event table + fid.seek(etp) + etmode = np.fromfile(fid, UINT8, 1)[0] + if etmode in (1, 3): + sr = np.fromfile(fid, UINT8, 3) + event_sr = sr[0] + for i in range(1, len(sr)): + event_sr = event_sr + sr[i] * 2 ** (i * 8) + n_events = np.fromfile(fid, UINT32, 1)[0] + pos = np.fromfile(fid, UINT32, n_events) - 1 # 1-based inds + typ = np.fromfile(fid, UINT16, n_events) + + if etmode == 3: + chn = np.fromfile(fid, UINT16, n_events) + dur = np.fromfile(fid, UINT32, n_events) + else: + chn = np.zeros(n_events, dtype=np.int32) + dur = np.ones(n_events, dtype=UINT32) + np.maximum(dur, 1, out=dur) + events = [n_events, pos, typ, chn, dur] + + # GDF 2.x + # --------------------------------------------------------------------- + else: + # FIXED HEADER + handedness = ('Unknown', 'Right', 'Left', 'Equal') + gender = ('Unknown', 'Male', 'Female') + scale = ('Unknown', 'No', 'Yes', 'Corrected') + + # date + pid = fid.read(66).decode() + pid = pid.split(' ', 2) + patient = {} + if len(pid) >= 2: + patient['id'] = pid[0] + patient['name'] = pid[1] + fid.seek(10, 1) # 10bytes reserved + + # Smoking / Alcohol abuse / drug abuse / medication + sadm = np.fromfile(fid, UINT8, 1)[0] + patient['smoking'] = scale[sadm % 4] + patient['alcohol_abuse'] = scale[(sadm >> 2) % 4] + patient['drug_abuse'] = scale[(sadm >> 4) % 4] + patient['medication'] = scale[(sadm >> 6) % 4] + patient['weight'] = np.fromfile(fid, UINT8, 1)[0] + if patient['weight'] == 0 or patient['weight'] == 255: + patient['weight'] = None + patient['height'] = np.fromfile(fid, UINT8, 1)[0] + if patient['height'] == 0 or patient['height'] == 255: + patient['height'] = None + + # Gender / Handedness / Visual Impairment + ghi = np.fromfile(fid, UINT8, 1)[0] + patient['sex'] = gender[ghi % 4] + patient['handedness'] = handedness[(ghi >> 2) % 4] + patient['visual'] = scale[(ghi >> 4) % 4] + + # Recording identification + meas_id = {} + meas_id['recording_id'] = _edf_str(fid.read(64)).strip() + vhsv = np.fromfile(fid, UINT8, 4) + loc = {} + if vhsv[3] == 0: + loc['vertpre'] = 10 * int(vhsv[0] >> 4) + int(vhsv[0] % 16) + loc['horzpre'] = 10 * int(vhsv[1] >> 4) + int(vhsv[1] % 16) + loc['size'] = 10 * int(vhsv[2] >> 4) + int(vhsv[2] % 16) + else: + loc['vertpre'] = 29 + loc['horzpre'] = 29 + loc['size'] = 29 + loc['version'] = 0 + loc['latitude'] = \ + float(np.fromfile(fid, UINT32, 1)[0]) / 3600000 + loc['longitude'] = \ + float(np.fromfile(fid, UINT32, 1)[0]) / 3600000 + loc['altitude'] = float(np.fromfile(fid, INT32, 1)[0]) / 100 + meas_id['loc'] = loc + + meas_date = np.fromfile(fid, UINT64, 1)[0] + if meas_date != 0: + meas_date = (datetime(1, 1, 1, tzinfo=timezone.utc) + + timedelta(meas_date * pow(2, -32) - 367)) + else: + meas_date = None + + birthday = np.fromfile(fid, UINT64, 1).tolist()[0] + if birthday == 0: + birthday = datetime(1, 1, 1, tzinfo=timezone.utc) + else: + birthday = (datetime(1, 1, 1, tzinfo=timezone.utc) + + timedelta(birthday * pow(2, -32) - 367)) + patient['birthday'] = birthday + if patient['birthday'] != datetime(1, 1, 1, 0, 0, + tzinfo=timezone.utc): + today = datetime.now(tz=timezone.utc) + patient['age'] = today.year - patient['birthday'].year + today = today.replace(year=patient['birthday'].year) + if today < patient['birthday']: + patient['age'] -= 1 + else: + patient['age'] = None + + header_nbytes = np.fromfile(fid, UINT16, 1)[0] * 256 + + fid.seek(6, 1) # 6 bytes reserved + meas_id['equipment'] = np.fromfile(fid, UINT8, 8) + meas_id['ip'] = np.fromfile(fid, UINT8, 6) + patient['headsize'] = np.fromfile(fid, UINT16, 3) + patient['headsize'] = np.asarray(patient['headsize'], np.float32) + patient['headsize'] = np.ma.masked_array( + patient['headsize'], + np.equal(patient['headsize'], 0), None).filled() + ref = np.fromfile(fid, FLOAT32, 3) + gnd = np.fromfile(fid, FLOAT32, 3) + n_records = np.fromfile(fid, INT64, 1)[0] + + # record length in seconds + record_length = np.fromfile(fid, UINT32, 2) + if record_length[0] == 0: + record_length[0] = 1. + warn('Header information is incorrect for record length. ' + 'Default record length set to 1.') + + nchan = np.fromfile(fid, UINT16, 1)[0] + fid.seek(2, 1) # 2bytes reserved + + # Channels (variable header) + channels = list(range(nchan)) + ch_names = [_edf_str(fid.read(16)).strip() for ch in channels] + exclude = _find_exclude_idx(ch_names, exclude) + sel = np.setdiff1d(np.arange(len(ch_names)), exclude) + + fid.seek(80 * len(channels), 1) # reserved space + fid.seek(6 * len(channels), 1) # phys_dim, obsolete + + """The Physical Dimensions are encoded as int16, according to: + - Units codes : + https://sourceforge.net/p/biosig/svn/HEAD/tree/trunk/biosig/doc/units.csv + - Decimal factors codes: + https://sourceforge.net/p/biosig/svn/HEAD/tree/trunk/biosig/doc/DecimalFactors.txt + """ # noqa + units = np.fromfile(fid, UINT16, len(channels)).tolist() + unitcodes = np.array(units[:]) + edf_info['units'] = list() + for i, unit in enumerate(units): + if i in exclude: + continue + if unit == 4275: # microvolts + edf_info['units'].append(1e-6) + elif unit == 4274: # millivolts + edf_info['units'].append(1e-3) + elif unit == 512: # dimensionless + edf_info['units'].append(1) + elif unit == 0: + edf_info['units'].append(1) # unrecognized + else: + warn('Unsupported physical dimension for channel %d ' + '(assuming dimensionless). Please contact the ' + 'MNE-Python developers for support.' % i) + edf_info['units'].append(1) + edf_info['units'] = np.array(edf_info['units'], float) + + ch_names = [ch_names[idx] for idx in sel] + physical_min = np.fromfile(fid, FLOAT64, len(channels)) + physical_max = np.fromfile(fid, FLOAT64, len(channels)) + digital_min = np.fromfile(fid, FLOAT64, len(channels)) + digital_max = np.fromfile(fid, FLOAT64, len(channels)) + + fid.seek(68 * len(channels), 1) # obsolete + lowpass = np.fromfile(fid, FLOAT32, len(channels)) + highpass = np.fromfile(fid, FLOAT32, len(channels)) + notch = np.fromfile(fid, FLOAT32, len(channels)) + + # number of samples per record + n_samps = np.fromfile(fid, INT32, len(channels)) + + # data type + dtype = np.fromfile(fid, INT32, len(channels)) + + channel = {} + channel['xyz'] = [np.fromfile(fid, FLOAT32, 3)[0] + for ch in channels] + + if edf_info['number'] < 2.19: + impedance = np.fromfile(fid, UINT8, + len(channels)).astype(float) + impedance[impedance == 255] = np.nan + channel['impedance'] = pow(2, impedance / 8) + fid.seek(19 * len(channels), 1) # reserved + else: + tmp = np.fromfile(fid, FLOAT32, 5 * len(channels)) + tmp = tmp[::5] + fZ = tmp[:] + impedance = tmp[:] + # channels with no voltage (code 4256) data + ch = [unitcodes & 65504 != 4256][0] + impedance[np.where(ch)] = None + # channel with no impedance (code 4288) data + ch = [unitcodes & 65504 != 4288][0] + fZ[np.where(ch)[0]] = None + + assert fid.tell() == header_nbytes + + # total number of bytes for data + bytes_tot = np.sum([GDFTYPE_BYTE[t] * n_samps[i] + for i, t in enumerate(dtype)]) + + # Populate edf_info + dtype_np, dtype_byte = _check_dtype_byte(dtype) + edf_info.update( + bytes_tot=bytes_tot, ch_names=ch_names, + data_offset=header_nbytes, + dtype_byte=dtype_byte, dtype_np=dtype_np, + digital_min=digital_min, digital_max=digital_max, + exclude=exclude, gnd=gnd, highpass=highpass, sel=sel, + impedance=impedance, lowpass=lowpass, meas_date=meas_date, + meas_id=meas_id, n_records=n_records, n_samps=n_samps, + nchan=nchan, notch=notch, subject_info=patient, + physical_max=physical_max, physical_min=physical_min, + record_length=record_length, ref=ref) + + # EVENT TABLE + # ----------------------------------------------------------------- + etp = edf_info['data_offset'] + edf_info['n_records'] * \ + edf_info['bytes_tot'] + fid.seek(etp) # skip data to go to event table + etmode = fid.read(1).decode() + if etmode != '': + etmode = np.fromstring(etmode, UINT8).tolist()[0] + + if edf_info['number'] < 1.94: + sr = np.fromfile(fid, UINT8, 3) + event_sr = sr[0] + for i in range(1, len(sr)): + event_sr = event_sr + sr[i] * 2**(i * 8) + n_events = np.fromfile(fid, UINT32, 1)[0] + else: + ne = np.fromfile(fid, UINT8, 3) + n_events = ne[0] + for i in range(1, len(ne)): + n_events = n_events + ne[i] * 2**(i * 8) + event_sr = np.fromfile(fid, FLOAT32, 1)[0] + + pos = np.fromfile(fid, UINT32, n_events) - 1 # 1-based inds + typ = np.fromfile(fid, UINT16, n_events) + + if etmode == 3: + chn = np.fromfile(fid, UINT16, n_events) + dur = np.fromfile(fid, UINT32, n_events) + else: + chn = np.zeros(n_events, dtype=np.uint32) + dur = np.ones(n_events, dtype=np.uint32) + np.clip(dur, 1, np.inf, out=dur) + events = [n_events, pos, typ, chn, dur] + edf_info['event_sfreq'] = event_sr + + edf_info.update(events=events, sel=np.arange(len(edf_info['ch_names']))) + + return edf_info + + +def _check_stim_channel(stim_channel, ch_names, + tal_ch_names=['EDF Annotations', 'BDF Annotations']): + """Check that the stimulus channel exists in the current datafile.""" + DEFAULT_STIM_CH_NAMES = ['status', 'trigger'] + + if stim_channel is None or stim_channel is False: + return [], [] + + if stim_channel is True: # convenient aliases + stim_channel = 'auto' + + elif isinstance(stim_channel, str): + if stim_channel == 'auto': + if 'auto' in ch_names: + warn(RuntimeWarning, "Using `stim_channel='auto'` when auto" + " also corresponds to a channel name is ambiguous." + " Please use `stim_channel=['auto']`.") + else: + valid_stim_ch_names = DEFAULT_STIM_CH_NAMES + else: + valid_stim_ch_names = [stim_channel.lower()] + + elif isinstance(stim_channel, int): + valid_stim_ch_names = [ch_names[stim_channel].lower()] + + elif isinstance(stim_channel, list): + if all([isinstance(s, str) for s in stim_channel]): + valid_stim_ch_names = [s.lower() for s in stim_channel] + elif all([isinstance(s, int) for s in stim_channel]): + valid_stim_ch_names = [ch_names[s].lower() for s in stim_channel] + else: + raise ValueError('Invalid stim_channel') + else: + raise ValueError('Invalid stim_channel') + + # Forbid the synthesis of stim channels from TAL Annotations + tal_ch_names_found = [ch for ch in valid_stim_ch_names + if ch in [t.lower() for t in tal_ch_names]] + if len(tal_ch_names_found): + _msg = ('The synthesis of the stim channel is not supported' + ' since 0.18. Please remove {} from `stim_channel`' + ' and use `mne.events_from_annotations` instead' + ).format(tal_ch_names_found) + raise ValueError(_msg) + + ch_names_low = [ch.lower() for ch in ch_names] + found = list(set(valid_stim_ch_names) & set(ch_names_low)) + + if not found: + return [], [] + else: + stim_channel_idxs = [ch_names_low.index(f) for f in found] + names = [ch_names[idx] for idx in stim_channel_idxs] + return stim_channel_idxs, names + + +def _find_exclude_idx(ch_names, exclude): + """Find indices of all channels to exclude. + + If there are several channels called "A" and we want to exclude "A", then + add (the index of) all "A" channels to the exclusion list. + """ + if isinstance(exclude, str): # regex for channel names + indices = [] + for idx, ch in enumerate(ch_names): + if re.match(exclude, ch): + indices.append(idx) + return indices + else: # list of channel names + return [idx for idx, ch in enumerate(ch_names) if ch in exclude] + + +def _find_tal_idx(ch_names): + # Annotations / TAL Channels + accepted_tal_ch_names = ['EDF Annotations', 'BDF Annotations'] + tal_channel_idx = np.where(np.in1d(ch_names, accepted_tal_ch_names))[0] + return tal_channel_idx + + +@fill_doc +def read_raw_edf(input_fname, eog=None, misc=None, stim_channel='auto', + exclude=(), infer_types=False, preload=False, verbose=None): + """Reader function for EDF or EDF+ files. + + Parameters + ---------- + input_fname : str + Path to the EDF or EDF+ file. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + stim_channel : 'auto' | str | list of str | int | list of int + Defaults to 'auto', which means that channels named 'status' or + 'trigger' (case insensitive) are set to STIM. If str (or list of str), + all channels matching the name(s) are set to STIM. If int (or list of + ints), channels corresponding to the indices are set to STIM. + exclude : list of str | str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. A str is + interpreted as a regular expression. + infer_types : bool + If True, try to infer channel types from channel labels. If a channel + label starts with a known type (such as 'EEG') followed by a space and + a name (such as 'Fp1'), the channel type will be set accordingly, and + the channel will be renamed to the original label without the prefix. + For unknown prefixes, the type will be 'EEG' and the name will not be + modified. If False, do not infer types and assume all channels are of + type 'EEG'. + + .. versionadded:: 0.24.1 + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawEDF + The raw instance. + + See Also + -------- + mne.io.read_raw_bdf : Reader function for BDF files. + mne.io.read_raw_gdf : Reader function for GDF files. + mne.export.export_raw : Export function for EDF files. + + Notes + ----- + It is worth noting that in some special cases, it may be necessary to shift + event values in order to retrieve correct event triggers. This depends on + the triggering device used to perform the synchronization. For instance, in + some files events need to be shifted by 8 bits: + + >>> events[:, 2] >>= 8 # doctest:+SKIP + + TAL channels called 'EDF Annotations' are parsed and extracted annotations + are stored in raw.annotations. Use :func:`mne.events_from_annotations` to + obtain events from these annotations. + + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + + The EDF specification allows optional storage of channel types in the + prefix of the signal label for each channel. For example, ``EEG Fz`` + implies that ``Fz`` is an EEG channel and ``MISC E`` would imply ``E`` is + a MISC channel. However, there is no standard way of specifying all + channel types. MNE-Python will try to infer the channel type, when such a + string exists, defaulting to EEG, when there is no prefix or the prefix is + not recognized. + + The following prefix strings are mapped to MNE internal types: + + - 'EEG': 'eeg' + - 'SEEG': 'seeg' + - 'ECOG': 'ecog' + - 'DBS': 'dbs' + - 'EOG': 'eog' + - 'ECG': 'ecg' + - 'EMG': 'emg' + - 'BIO': 'bio' + - 'RESP': 'resp' + - 'MISC': 'misc' + - 'SAO2': 'bio' + + The EDF specification allows storage of subseconds in measurement date. + However, this reader currently sets subseconds to 0 by default. + """ + input_fname = os.path.abspath(input_fname) + ext = os.path.splitext(input_fname)[1][1:].lower() + if ext != 'edf': + raise NotImplementedError(f'Only EDF files are supported, got {ext}.') + return RawEDF(input_fname=input_fname, eog=eog, misc=misc, + stim_channel=stim_channel, exclude=exclude, + infer_types=infer_types, preload=preload, verbose=verbose) + + +@fill_doc +def read_raw_bdf(input_fname, eog=None, misc=None, stim_channel='auto', + exclude=(), infer_types=False, preload=False, verbose=None): + """Reader function for BDF files. + + Parameters + ---------- + input_fname : str + Path to the BDF file. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + stim_channel : 'auto' | str | list of str | int | list of int + Defaults to 'auto', which means that channels named 'status' or + 'trigger' (case insensitive) are set to STIM. If str (or list of str), + all channels matching the name(s) are set to STIM. If int (or list of + ints), channels corresponding to the indices are set to STIM. + exclude : list of str | str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. A str is + interpreted as a regular expression. + infer_types : bool + If True, try to infer channel types from channel labels. If a channel + label starts with a known type (such as 'EEG') followed by a space and + a name (such as 'Fp1'), the channel type will be set accordingly, and + the channel will be renamed to the original label without the prefix. + For unknown prefixes, the type will be 'EEG' and the name will not be + modified. If False, do not infer types and assume all channels are of + type 'EEG'. + + .. versionadded:: 0.24.1 + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawEDF + The raw instance. + + See Also + -------- + mne.io.read_raw_edf : Reader function for EDF and EDF+ files. + mne.io.read_raw_gdf : Reader function for GDF files. + + Notes + ----- + Biosemi devices trigger codes are encoded in 16-bit format, whereas system + codes (CMS in/out-of range, battery low, etc.) are coded in bits 16-23 of + the status channel (see http://www.biosemi.com/faq/trigger_signals.htm). + To retrieve correct event values (bits 1-16), one could do: + + >>> events = mne.find_events(...) # doctest:+SKIP + >>> events[:, 2] &= (2**16 - 1) # doctest:+SKIP + + The above operation can be carried out directly in :func:`mne.find_events` + using the ``mask`` and ``mask_type`` parameters (see + :func:`mne.find_events` for more details). + + It is also possible to retrieve system codes, but no particular effort has + been made to decode these in MNE. In case it is necessary, for instance to + check the CMS bit, the following operation can be carried out: + + >>> cms_bit = 20 # doctest:+SKIP + >>> cms_high = (events[:, 2] & (1 << cms_bit)) != 0 # doctest:+SKIP + + It is worth noting that in some special cases, it may be necessary to shift + event values in order to retrieve correct event triggers. This depends on + the triggering device used to perform the synchronization. For instance, in + some files events need to be shifted by 8 bits: + + >>> events[:, 2] >>= 8 # doctest:+SKIP + + TAL channels called 'BDF Annotations' are parsed and extracted annotations + are stored in raw.annotations. Use :func:`mne.events_from_annotations` to + obtain events from these annotations. + + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + input_fname = os.path.abspath(input_fname) + ext = os.path.splitext(input_fname)[1][1:].lower() + if ext != 'bdf': + raise NotImplementedError(f'Only BDF files are supported, got {ext}.') + return RawEDF(input_fname=input_fname, eog=eog, misc=misc, + stim_channel=stim_channel, exclude=exclude, + infer_types=infer_types, preload=preload, verbose=verbose) + + +@fill_doc +def read_raw_gdf(input_fname, eog=None, misc=None, stim_channel='auto', + exclude=(), preload=False, verbose=None): + """Reader function for GDF files. + + Parameters + ---------- + input_fname : str + Path to the GDF file. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + stim_channel : 'auto' | str | list of str | int | list of int + Defaults to 'auto', which means that channels named 'status' or + 'trigger' (case insensitive) are set to STIM. If str (or list of str), + all channels matching the name(s) are set to STIM. If int (or list of + ints), channels corresponding to the indices are set to STIM. + exclude : list of str | str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. A str is + interpreted as a regular expression. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawGDF + The raw instance. + + See Also + -------- + mne.io.read_raw_edf : Reader function for EDF and EDF+ files. + mne.io.read_raw_bdf : Reader function for BDF files. + + Notes + ----- + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + input_fname = os.path.abspath(input_fname) + ext = os.path.splitext(input_fname)[1][1:].lower() + if ext != 'gdf': + raise NotImplementedError(f'Only BDF files are supported, got {ext}.') + return RawGDF(input_fname=input_fname, eog=eog, misc=misc, + stim_channel=stim_channel, exclude=exclude, preload=preload, + verbose=verbose) + + +def _read_annotations_edf(annotations): + """Annotation File Reader. + + Parameters + ---------- + annotations : ndarray (n_chans, n_samples) | str + Channel data in EDF+ TAL format or path to annotation file. + + Returns + ------- + onset : array of float, shape (n_annotations,) + The starting time of annotations in seconds after ``orig_time``. + duration : array of float, shape (n_annotations,) + Durations of the annotations in seconds. + description : array of str, shape (n_annotations,) + Array of strings containing description for each annotation. If a + string, all the annotations are given the same description. To reject + epochs, use description starting with keyword 'bad'. See example above. + """ + pat = '([+-]\\d+\\.?\\d*)(\x15(\\d+\\.?\\d*))?(\x14.*?)\x14\x00' + if isinstance(annotations, str): + with open(annotations, encoding='latin-1') as annot_file: + triggers = re.findall(pat, annot_file.read()) + else: + tals = bytearray() + annotations = np.atleast_2d(annotations) + for chan in annotations: + this_chan = chan.ravel() + if this_chan.dtype == INT32: # BDF + this_chan = this_chan.view(dtype=UINT8) + this_chan = this_chan.reshape(-1, 4) + # Why only keep the first 3 bytes as BDF values + # are stored with 24 bits (not 32) + this_chan = this_chan[:, :3].ravel() + # As ravel() returns a 1D array we can add all values at once + tals.extend(this_chan) + else: + this_chan = chan.astype(np.int64) + # Exploit np vectorized processing + tals.extend(np.uint8([this_chan % 256, this_chan // 256]) + .flatten('F')) + + # use of latin-1 because characters are only encoded for the first 256 + # code points and utf-8 can triggers an "invalid continuation byte" + # error + triggers = re.findall(pat, tals.decode('latin-1')) + + events = [] + offset = 0. + for k, ev in enumerate(triggers): + onset = float(ev[0]) + offset + duration = float(ev[2]) if ev[2] else 0 + for description in ev[3].split('\x14')[1:]: + if description: + events.append([onset, duration, description]) + elif k == 0: + # The startdate/time of a file is specified in the EDF+ header + # fields 'startdate of recording' and 'starttime of recording'. + # These fields must indicate the absolute second in which the + # start of the first data record falls. So, the first TAL in + # the first data record always starts with +0.X, indicating + # that the first data record starts a fraction, X, of a second + # after the startdate/time that is specified in the EDF+ + # header. If X=0, then the .X may be omitted. + offset = -onset + + return zip(*events) if events else (list(), list(), list()) + + +def _get_edf_default_event_id(descriptions): + mapping = {a: n for n, a in enumerate(sorted(set(descriptions)), start=1)} + return mapping + + +def _get_annotations_gdf(edf_info, sfreq): + onset, duration, desc = list(), list(), list() + events = edf_info.get('events', None) + # Annotations in GDF: events are stored as the following + # list: `events = [n_events, pos, typ, chn, dur]` where pos is the + # latency, dur is the duration in samples. They both are + # numpy.ndarray + if events is not None and events[1].shape[0] > 0: + onset = events[1] / sfreq + duration = events[4] / sfreq + desc = events[2] + + return onset, duration, desc diff --git a/python/libs/mne/io/edf/tests/__init__.py b/python/libs/mne/io/edf/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/edf/tests/test_edf.py b/python/libs/mne/io/edf/tests/test_edf.py new file mode 100644 index 0000000..41842c3 --- /dev/null +++ b/python/libs/mne/io/edf/tests/test_edf.py @@ -0,0 +1,588 @@ +# -*- coding: utf-8 -*- +# Authors: Teon Brooks +# Martin Billinger +# Alan Leggitt +# Alexandre Barachant +# Stefan Appelhoff +# Joan Massich +# +# License: BSD-3-Clause + +from contextlib import nullcontext +from functools import partial +import os.path as op +import inspect + +import numpy as np +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_equal, assert_allclose) +from scipy.io import loadmat + +import pytest + +from mne import pick_types, Annotations +from mne.annotations import events_from_annotations, read_annotations +from mne.datasets import testing +from mne.utils import requires_pandas, _record_warnings +from mne.io import read_raw_edf, read_raw_bdf, read_raw_fif, edf, read_raw_gdf +from mne.io.tests.test_raw import _test_raw_reader +from mne.io.edf.edf import (_get_edf_default_event_id, _read_annotations_edf, + _read_ch, _parse_prefilter_string, _edf_str, + _read_edf_header, _read_header) +from mne.io.pick import channel_indices_by_type, get_channel_type_constants +from mne.tests.test_annotations import _assert_annotations_equal + +td_mark = testing._pytest_mark() + +FILE = inspect.getfile(inspect.currentframe()) +data_dir = op.join(op.dirname(op.abspath(FILE)), 'data') +montage_path = op.join(data_dir, 'biosemi.hpts') # XXX: missing reader +bdf_path = op.join(data_dir, 'test.bdf') +edf_path = op.join(data_dir, 'test.edf') +duplicate_channel_labels_path = op.join(data_dir, + 'duplicate_channel_labels.edf') +edf_uneven_path = op.join(data_dir, 'test_uneven_samp.edf') +bdf_eeglab_path = op.join(data_dir, 'test_bdf_eeglab.mat') +edf_eeglab_path = op.join(data_dir, 'test_edf_eeglab.mat') +edf_uneven_eeglab_path = op.join(data_dir, 'test_uneven_samp.mat') +edf_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.edf') +edf_txt_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.txt') + +data_path = testing.data_path(download=False) +edf_stim_resamp_path = op.join(data_path, 'EDF', 'test_edf_stim_resamp.edf') +edf_overlap_annot_path = op.join(data_path, 'EDF', + 'test_edf_overlapping_annotations.edf') +edf_reduced = op.join(data_path, 'EDF', 'test_reduced.edf') +edf_annot_only = op.join(data_path, 'EDF', 'SC4001EC-Hypnogram.edf') +bdf_stim_channel_path = op.join(data_path, 'BDF', 'test_bdf_stim_channel.bdf') +bdf_multiple_annotations_path = op.join(data_path, 'BDF', + 'multiple_annotation_chans.bdf') +test_generator_bdf = op.join(data_path, 'BDF', 'test_generator_2.bdf') +test_generator_edf = op.join(data_path, 'EDF', 'test_generator_2.edf') +edf_annot_sub_s_path = op.join(data_path, 'EDF', 'subsecond_starttime.edf') +edf_chtypes_path = op.join(data_path, 'EDF', 'chtypes_edf.edf') + +eog = ['REOG', 'LEOG', 'IEOG'] +misc = ['EXG1', 'EXG5', 'EXG8', 'M1', 'M2'] + + +def test_orig_units(): + """Test exposure of original channel units.""" + raw = read_raw_edf(edf_path, preload=True) + + # Test original units + orig_units = raw._orig_units + assert len(orig_units) == len(raw.ch_names) + assert orig_units['A1'] == 'µV' # formerly 'uV' edit by _check_orig_units + + +def test_subject_info(tmp_path): + """Test exposure of original channel units.""" + raw = read_raw_edf(edf_path) + assert raw.info['subject_info'] is None # XXX this is arguably a bug + edf_info = raw._raw_extras[0] + assert edf_info['subject_info'] is not None + want = {'id': 'X', 'sex': 'X', 'birthday': 'X', 'name': 'X'} + for key, val in want.items(): + assert edf_info['subject_info'][key] == val, key + fname = tmp_path / 'test_raw.fif' + raw.save(fname) + raw = read_raw_fif(fname) + assert raw.info['subject_info'] is None # XXX should eventually round-trip + + +def test_bdf_data(): + """Test reading raw bdf files.""" + # XXX BDF data for these is around 0.01 when it should be in the uV range, + # probably some bug + test_scaling = False + raw_py = _test_raw_reader(read_raw_bdf, input_fname=bdf_path, + eog=eog, misc=misc, + exclude=['M2', 'IEOG'], + test_scaling=test_scaling, + ) + assert len(raw_py.ch_names) == 71 + raw_py = _test_raw_reader(read_raw_bdf, input_fname=bdf_path, + montage='biosemi64', eog=eog, misc=misc, + exclude=['M2', 'IEOG'], + test_scaling=test_scaling) + assert len(raw_py.ch_names) == 71 + assert 'RawEDF' in repr(raw_py) + picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads') + data_py, _ = raw_py[picks] + + # this .mat was generated using the EEG Lab Biosemi Reader + raw_eeglab = loadmat(bdf_eeglab_path) + raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts + data_eeglab = raw_eeglab[picks] + # bdf saved as a single, resolution to seven decimal points in matlab + assert_array_almost_equal(data_py, data_eeglab, 8) + + # Manually checking that float coordinates are imported + assert (raw_py.info['chs'][0]['loc']).any() + assert (raw_py.info['chs'][25]['loc']).any() + assert (raw_py.info['chs'][63]['loc']).any() + + +@testing.requires_testing_data +def test_bdf_crop_save_stim_channel(tmp_path): + """Test EDF with various sampling rates.""" + raw = read_raw_bdf(bdf_stim_channel_path) + raw.save(tmp_path / 'test-raw.fif', tmin=1.2, tmax=4.0, overwrite=True) + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname', [ + edf_reduced, + edf_overlap_annot_path, +]) +@pytest.mark.parametrize('stim_channel', (None, False, 'auto')) +def test_edf_others(fname, stim_channel): + """Test EDF with various sampling rates and overlapping annotations.""" + _test_raw_reader( + read_raw_edf, input_fname=fname, stim_channel=stim_channel, + verbose='error') + + +def test_edf_data_broken(tmp_path): + """Test edf files.""" + raw = _test_raw_reader(read_raw_edf, input_fname=edf_path, + exclude=['Ergo-Left', 'H10'], verbose='error') + raw_py = read_raw_edf(edf_path) + data = raw_py.get_data() + assert_equal(len(raw.ch_names) + 2, len(raw_py.ch_names)) + + # Test with number of records not in header (-1). + broken_fname = op.join(tmp_path, 'broken.edf') + with open(edf_path, 'rb') as fid_in: + fid_in.seek(0, 2) + n_bytes = fid_in.tell() + fid_in.seek(0, 0) + rbytes = fid_in.read() + with open(broken_fname, 'wb') as fid_out: + fid_out.write(rbytes[:236]) + fid_out.write(b'-1 ') + fid_out.write(rbytes[244:244 + int(n_bytes * 0.4)]) + with pytest.warns(RuntimeWarning, + match='records .* not match the file size'): + raw = read_raw_edf(broken_fname, preload=True) + read_raw_edf(broken_fname, exclude=raw.ch_names[:132], preload=True) + + # Test with \x00's in the data + with open(broken_fname, 'wb') as fid_out: + fid_out.write(rbytes[:184]) + assert rbytes[184:192] == b'36096 ' + fid_out.write(rbytes[184:192].replace(b' ', b'\x00')) + fid_out.write(rbytes[192:]) + raw_py = read_raw_edf(broken_fname) + data_new = raw_py.get_data() + assert_allclose(data, data_new) + + +def test_duplicate_channel_labels_edf(): + """Test reading edf file with duplicate channel names.""" + EXPECTED_CHANNEL_NAMES = ['EEG F1-Ref-0', 'EEG F2-Ref', 'EEG F1-Ref-1'] + with pytest.warns(RuntimeWarning, match='Channel names are not unique'): + raw = read_raw_edf(duplicate_channel_labels_path, preload=False) + + assert raw.ch_names == EXPECTED_CHANNEL_NAMES + + +def test_parse_annotation(tmp_path): + """Test parsing the tal channel.""" + # test the parser + annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00' + b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00' + b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00' + b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00' + b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00' + b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00') + annot_file = tmp_path / 'annotations.txt' + with open(annot_file, "wb") as f: + f.write(annot) + + annot = [a for a in bytes(annot)] + annot[1::2] = [a * 256 for a in annot[1::2]] + tal_channel_A = np.array(list(map(sum, zip(annot[0::2], annot[1::2]))), + dtype=np.int64) + + with open(annot_file, 'rb') as fid: + # ch_data = np.fromfile(fid, dtype=' +# Nicolas Barascud +# +# License: BSD-3-Clause + +from datetime import datetime, timezone, timedelta +import os.path as op +import shutil + +import pytest +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_equal) +import numpy as np +import scipy.io as sio + +from mne.datasets import testing +from mne.io import read_raw_gdf +from mne.io.tests.test_raw import _test_raw_reader +from mne import pick_types, find_events, events_from_annotations + +data_path = testing.data_path(download=False) +gdf1_path = str(op.join(data_path, 'GDF', 'test_gdf_1.25')) +gdf2_path = str(op.join(data_path, 'GDF', 'test_gdf_2.20')) +gdf_1ch_path = op.join(data_path, 'GDF', 'test_1ch.gdf') + + +@testing.requires_testing_data +def test_gdf_data(): + """Test reading raw GDF 1.x files.""" + raw = read_raw_gdf(gdf1_path + '.gdf', eog=None, misc=None, preload=True) + picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads') + data, _ = raw[picks] + + # Test Status is added as event + EXPECTED_EVS_ONSETS = raw._raw_extras[0]['events'][1] + EXPECTED_EVS_ID = { + '{}'.format(evs): i for i, evs in enumerate( + [32769, 32770, 33024, 33025, 33026, 33027, 33028, 33029, 33040, + 33041, 33042, 33043, 33044, 33045, 33285, 33286], 1) + } + evs, evs_id = events_from_annotations(raw) + assert_array_equal(evs[:, 0], EXPECTED_EVS_ONSETS) + assert evs_id == EXPECTED_EVS_ID + + # this .npy was generated using the official biosig python package + raw_biosig = np.load(gdf1_path + '_biosig.npy') + raw_biosig = raw_biosig * 1e-6 # data are stored in microvolts + data_biosig = raw_biosig[picks] + + # Assert data are almost equal + assert_array_almost_equal(data, data_biosig, 8) + + # Test for events + assert len(raw.annotations.duration == 963) + + # gh-5604 + assert raw.info['meas_date'] is None + + +@testing.requires_testing_data +def test_gdf2_birthday(tmp_path): + """Test reading raw GDF 2.x files.""" + new_fname = tmp_path / 'temp.gdf' + shutil.copyfile(gdf2_path + '.gdf', new_fname) + # go back 44.5 years so the subject should show up as 44 + offset_edf = ( # to their ref + datetime.now(tz=timezone.utc) - + datetime(1, 1, 1, tzinfo=timezone.utc) + ) + offset_44_yr = offset_edf - timedelta(days=int(365 * 44.5)) # 44.5 yr ago + offset_44_yr_days = offset_44_yr.total_seconds() / (24 * 60 * 60) # days + d = (int(offset_44_yr_days) + 367) * 2 ** 32 # with their conversion + with open(new_fname, 'r+b') as fid: + fid.seek(176, 0) + assert np.fromfile(fid, np.uint64, 1)[0] == 0 + fid.seek(176, 0) + fid.write(np.array([d], np.uint64).tobytes()) + fid.seek(176, 0) + assert np.fromfile(fid, np.uint64, 1)[0] == d + raw = read_raw_gdf(new_fname, eog=None, misc=None, preload=True) + assert raw._raw_extras[0]['subject_info']['age'] == 44 + # XXX this is a bug, it should be populated... + assert raw.info['subject_info'] is None + + +@testing.requires_testing_data +def test_gdf2_data(): + """Test reading raw GDF 2.x files.""" + raw = read_raw_gdf(gdf2_path + '.gdf', eog=None, misc=None, preload=True) + assert raw._raw_extras[0]['subject_info']['age'] is None + + picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads') + data, _ = raw[picks] + + # This .mat was generated using the official biosig matlab package + mat = sio.loadmat(gdf2_path + '_biosig.mat') + data_biosig = mat['dat'] * 1e-6 # data are stored in microvolts + data_biosig = data_biosig[picks] + + # Assert data are almost equal + assert_array_almost_equal(data, data_biosig, 8) + + # Find events + events = find_events(raw, verbose=1) + events[:, 2] >>= 8 # last 8 bits are system events in biosemi files + assert_equal(events.shape[0], 2) # 2 events in file + assert_array_equal(events[:, 2], [20, 28]) + + # gh-5604 + assert raw.info['meas_date'] is None + _test_raw_reader(read_raw_gdf, input_fname=gdf2_path + '.gdf', + eog=None, misc=None, + test_scaling=False, # XXX this should be True + ) + + +@testing.requires_testing_data +def test_one_channel_gdf(): + """Test a one-channel GDF file.""" + with pytest.warns(RuntimeWarning, match='different highpass'): + ecg = read_raw_gdf(gdf_1ch_path, preload=True) + assert ecg['ECG'][0].shape == (1, 4500) + assert 150.0 == ecg.info['sfreq'] + + +@testing.requires_testing_data +def test_gdf_exclude_channels(): + """Test reading GDF data with excluded channels.""" + raw = read_raw_gdf(gdf1_path + '.gdf', exclude=('FP1', 'O1')) + assert 'FP1' not in raw.ch_names + assert 'O1' not in raw.ch_names + raw = read_raw_gdf(gdf2_path + '.gdf', exclude=('Fp1', 'O1')) + assert 'Fp1' not in raw.ch_names + assert 'O1' not in raw.ch_names + raw = read_raw_gdf(gdf2_path + '.gdf', exclude=".+z$") + assert 'AFz' not in raw.ch_names + assert 'Cz' not in raw.ch_names + assert 'Pz' not in raw.ch_names + assert 'Oz' not in raw.ch_names diff --git a/python/libs/mne/io/eeglab/__init__.py b/python/libs/mne/io/eeglab/__init__.py new file mode 100644 index 0000000..1573360 --- /dev/null +++ b/python/libs/mne/io/eeglab/__init__.py @@ -0,0 +1,5 @@ +"""EEGLAB module for conversion to FIF.""" + +# Author: Mainak Jas + +from .eeglab import read_raw_eeglab, read_epochs_eeglab diff --git a/python/libs/mne/io/eeglab/eeglab.py b/python/libs/mne/io/eeglab/eeglab.py new file mode 100644 index 0000000..6b401cf --- /dev/null +++ b/python/libs/mne/io/eeglab/eeglab.py @@ -0,0 +1,661 @@ +# Authors: Mainak Jas +# Jona Sassenhagen +# Stefan Appelhoff +# +# License: BSD-3-Clause + +import os.path as op + +import numpy as np + +from ..pick import _PICK_TYPES_KEYS +from ..utils import _read_segments_file, _find_channels +from ..constants import FIFF +from ..meas_info import create_info +from ..base import BaseRaw +from ...utils import (logger, verbose, warn, fill_doc, Bunch, _check_fname, + _import_pymatreader_funcs) +from ...channels import make_dig_montage +from ...epochs import BaseEpochs +from ...event import read_events +from ...annotations import Annotations, read_annotations + +# just fix the scaling for now, EEGLAB doesn't seem to provide this info +CAL = 1e-6 + + +def _check_eeglab_fname(fname, dataname): + """Check whether the filename is valid. + + Check if the file extension is ``.fdt`` (older ``.dat`` being invalid) or + whether the ``EEG.data`` filename exists. If ``EEG.data`` file is absent + the set file name with .set changed to .fdt is checked. + """ + fmt = str(op.splitext(dataname)[-1]) + if fmt == '.dat': + raise NotImplementedError( + 'Old data format .dat detected. Please update your EEGLAB ' + 'version and resave the data in .fdt format') + elif fmt != '.fdt': + raise IOError('Expected .fdt file format. Found %s format' % fmt) + + basedir = op.dirname(fname) + data_fname = op.join(basedir, dataname) + if not op.exists(data_fname): + fdt_from_set_fname = op.splitext(fname)[0] + '.fdt' + if op.exists(fdt_from_set_fname): + data_fname = fdt_from_set_fname + msg = ('Data file name in EEG.data ({}) is incorrect, the file ' + 'name must have changed on disk, using the correct file ' + 'name ({}).') + warn(msg.format(dataname, op.basename(fdt_from_set_fname))) + elif not data_fname == fdt_from_set_fname: + msg = 'Could not find the .fdt data file, tried {} and {}.' + raise FileNotFoundError(msg.format(data_fname, fdt_from_set_fname)) + return data_fname + + +def _check_load_mat(fname, uint16_codec): + """Check if the mat struct contains 'EEG'.""" + read_mat = _import_pymatreader_funcs('EEGLAB I/O') + eeg = read_mat(fname, uint16_codec=uint16_codec) + if 'ALLEEG' in eeg: + raise NotImplementedError( + 'Loading an ALLEEG array is not supported. Please contact' + 'mne-python developers for more information.') + if 'EEG' in eeg: # fields are contained in EEG structure + eeg = eeg['EEG'] + eeg = eeg.get('EEG', eeg) # handle nested EEG structure + eeg = Bunch(**eeg) + eeg.trials = int(eeg.trials) + eeg.nbchan = int(eeg.nbchan) + eeg.pnts = int(eeg.pnts) + return eeg + + +def _to_loc(ll): + """Check if location exists.""" + if isinstance(ll, (int, float)) or len(ll) > 0: + return ll + else: + return np.nan + + +def _eeg_has_montage_information(eeg): + try: + from scipy.io.matlab import mat_struct + except ImportError: # SciPy < 1.8 + from scipy.io.matlab.mio5_params import mat_struct + if not len(eeg.chanlocs): + has_pos = False + else: + pos_fields = ['X', 'Y', 'Z'] + if isinstance(eeg.chanlocs[0], mat_struct): + has_pos = all(hasattr(eeg.chanlocs[0], fld) + for fld in pos_fields) + elif isinstance(eeg.chanlocs[0], np.ndarray): + # Old files + has_pos = all(fld in eeg.chanlocs[0].dtype.names + for fld in pos_fields) + elif isinstance(eeg.chanlocs[0], dict): + # new files + has_pos = all(fld in eeg.chanlocs[0] for fld in pos_fields) + else: + has_pos = False # unknown (sometimes we get [0, 0]) + + return has_pos + + +def _get_montage_information(eeg, get_pos): + """Get channel name, type and montage information from ['chanlocs'].""" + ch_names, ch_types, pos_ch_names, pos = list(), list(), list(), list() + unknown_types = dict() + for chanloc in eeg.chanlocs: + # channel name + ch_names.append(chanloc['labels']) + + # channel type + ch_type = 'eeg' + try_type = chanloc.get('type', None) + if isinstance(try_type, str): + try_type = try_type.strip().lower() + if try_type in _PICK_TYPES_KEYS: + ch_type = try_type + else: + if try_type in unknown_types: + unknown_types[try_type].append(chanloc['labels']) + else: + unknown_types[try_type] = [chanloc['labels']] + ch_types.append(ch_type) + + # channel loc + if get_pos: + loc_x = _to_loc(chanloc['X']) + loc_y = _to_loc(chanloc['Y']) + loc_z = _to_loc(chanloc['Z']) + locs = np.r_[-loc_y, loc_x, loc_z] + if not np.any(np.isnan(locs)): + pos_ch_names.append(chanloc['labels']) + pos.append(locs) + + # warn if unknown types were provided + if len(unknown_types): + warn('Unknown types found, setting as type EEG:\n' + + '\n'.join([f'{key}: {sorted(unknown_types[key])}' + for key in sorted(unknown_types)])) + + if pos_ch_names: + montage = make_dig_montage( + ch_pos=dict(zip(ch_names, np.array(pos))), + coord_frame='head') + else: + montage = None + + return ch_names, ch_types, montage + + +def _get_info(eeg, eog=()): + """Get measurement info.""" + # add the ch_names and info['chs'][idx]['loc'] + if not isinstance(eeg.chanlocs, np.ndarray) and eeg.nbchan == 1: + eeg.chanlocs = [eeg.chanlocs] + + if isinstance(eeg.chanlocs, dict): + eeg.chanlocs = _dol_to_lod(eeg.chanlocs) + + eeg_has_ch_names_info = len(eeg.chanlocs) > 0 + + if eeg_has_ch_names_info: + has_pos = _eeg_has_montage_information(eeg) + ch_names, ch_types, eeg_montage = \ + _get_montage_information(eeg, has_pos) + update_ch_names = False + else: # if eeg.chanlocs is empty, we still need default chan names + ch_names = ["EEG %03d" % ii for ii in range(eeg.nbchan)] + ch_types = 'eeg' + eeg_montage = None + update_ch_names = True + + info = create_info(ch_names, sfreq=eeg.srate, ch_types=ch_types) + + eog = _find_channels(ch_names, ch_type='EOG') if eog == 'auto' else eog + for idx, ch in enumerate(info['chs']): + ch['cal'] = CAL + if ch['ch_name'] in eog or idx in eog: + ch['coil_type'] = FIFF.FIFFV_COIL_NONE + ch['kind'] = FIFF.FIFFV_EOG_CH + + return info, eeg_montage, update_ch_names + + +def _set_dig_montage_in_init(self, montage): + """Set EEG sensor configuration and head digitization from when init. + + This is done from the information within fname when + read_raw_eeglab(fname) or read_epochs_eeglab(fname). + """ + if montage is None: + self.set_montage(None) + else: + missing_channels = set(self.ch_names) - set(montage.ch_names) + ch_pos = dict(zip( + list(missing_channels), + np.full((len(missing_channels), 3), np.nan) + )) + self.set_montage( + montage + make_dig_montage(ch_pos=ch_pos, coord_frame='head') + ) + + +@fill_doc +def read_raw_eeglab(input_fname, eog=(), preload=False, + uint16_codec=None, verbose=None): + r"""Read an EEGLAB .set file. + + Parameters + ---------- + input_fname : str + Path to the .set file. If the data is stored in a separate .fdt file, + it is expected to be in the same folder as the .set file. + eog : list | tuple | 'auto' + Names or indices of channels that should be designated EOG channels. + If 'auto', the channel names containing ``EOG`` or ``EYE`` are used. + Defaults to empty tuple. + %(preload)s + Note that preload=False will be effective only if the data is stored + in a separate binary file. + uint16_codec : str | None + If your \*.set file contains non-ascii characters, sometimes reading + it may fail and give rise to error message stating that "buffer is + too small". ``uint16_codec`` allows to specify what codec (for example: + 'latin1' or 'utf-8') should be used when reading character arrays and + can therefore help you solve this problem. + %(verbose)s + + Returns + ------- + raw : instance of RawEEGLAB + A Raw object containing EEGLAB .set data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + .. versionadded:: 0.11.0 + """ + return RawEEGLAB(input_fname=input_fname, preload=preload, + eog=eog, verbose=verbose, uint16_codec=uint16_codec) + + +@fill_doc +def read_epochs_eeglab(input_fname, events=None, event_id=None, + eog=(), verbose=None, uint16_codec=None): + r"""Reader function for EEGLAB epochs files. + + Parameters + ---------- + input_fname : str + Path to the .set file. If the data is stored in a separate .fdt file, + it is expected to be in the same folder as the .set file. + events : str | array, shape (n_events, 3) | None + Path to events file. If array, it is the events typically returned + by the read_events function. If some events don't match the events + of interest as specified by event_id, they will be marked as 'IGNORED' + in the drop log. If None, it is constructed from the EEGLAB (.set) file + with each unique event encoded with a different integer. + event_id : int | list of int | dict | None + The id of the event to consider. If dict, the keys can later be used + to access associated events. + Example:: + + {"auditory":1, "visual":3} + + If int, a dict will be created with + the id as string. If a list, all events with the IDs specified + in the list are used. If None, the event_id is constructed from the + EEGLAB (.set) file with each descriptions copied from ``eventtype``. + eog : list | tuple | 'auto' + Names or indices of channels that should be designated EOG channels. + If 'auto', the channel names containing ``EOG`` or ``EYE`` are used. + Defaults to empty tuple. + %(verbose)s + uint16_codec : str | None + If your \*.set file contains non-ascii characters, sometimes reading + it may fail and give rise to error message stating that "buffer is + too small". ``uint16_codec`` allows to specify what codec (for example: + 'latin1' or 'utf-8') should be used when reading character arrays and + can therefore help you solve this problem. + + Returns + ------- + epochs : instance of Epochs + The epochs. + + See Also + -------- + mne.Epochs : Documentation of attribute and methods. + + Notes + ----- + .. versionadded:: 0.11.0 + """ + epochs = EpochsEEGLAB(input_fname=input_fname, events=events, eog=eog, + event_id=event_id, verbose=verbose, + uint16_codec=uint16_codec) + return epochs + + +@fill_doc +class RawEEGLAB(BaseRaw): + r"""Raw object from EEGLAB .set file. + + Parameters + ---------- + input_fname : str + Path to the .set file. If the data is stored in a separate .fdt file, + it is expected to be in the same folder as the .set file. + eog : list | tuple | 'auto' + Names or indices of channels that should be designated EOG channels. + If 'auto', the channel names containing ``EOG`` or ``EYE`` are used. + Defaults to empty tuple. + %(preload)s + Note that preload=False will be effective only if the data is stored + in a separate binary file. + uint16_codec : str | None + If your \*.set file contains non-ascii characters, sometimes reading + it may fail and give rise to error message stating that "buffer is + too small". ``uint16_codec`` allows to specify what codec (for example: + 'latin1' or 'utf-8') should be used when reading character arrays and + can therefore help you solve this problem. + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + .. versionadded:: 0.11.0 + """ + + @verbose + def __init__(self, input_fname, eog=(), + preload=False, uint16_codec=None, verbose=None): # noqa: D102 + input_fname = _check_fname(input_fname, 'read', True, 'input_fname') + eeg = _check_load_mat(input_fname, uint16_codec) + if eeg.trials != 1: + raise TypeError('The number of trials is %d. It must be 1 for raw' + ' files. Please use `mne.io.read_epochs_eeglab` if' + ' the .set file contains epochs.' % eeg.trials) + + last_samps = [eeg.pnts - 1] + info, eeg_montage, _ = _get_info(eeg, eog=eog) + + # read the data + if isinstance(eeg.data, str): + data_fname = _check_eeglab_fname(input_fname, eeg.data) + logger.info('Reading %s' % data_fname) + + super(RawEEGLAB, self).__init__( + info, preload, filenames=[data_fname], last_samps=last_samps, + orig_format='double', verbose=verbose) + else: + if preload is False or isinstance(preload, str): + warn('Data will be preloaded. preload=False or a string ' + 'preload is not supported when the data is stored in ' + 'the .set file') + # can't be done in standard way with preload=True because of + # different reading path (.set file) + if eeg.nbchan == 1 and len(eeg.data.shape) == 1: + n_chan, n_times = [1, eeg.data.shape[0]] + else: + n_chan, n_times = eeg.data.shape + data = np.empty((n_chan, n_times), dtype=float) + data[:n_chan] = eeg.data + data *= CAL + super(RawEEGLAB, self).__init__( + info, data, filenames=[input_fname], last_samps=last_samps, + orig_format='double', verbose=verbose) + + # create event_ch from annotations + annot = read_annotations(input_fname) + self.set_annotations(annot) + _check_boundary(annot, None) + + _set_dig_montage_in_init(self, eeg_montage) + + latencies = np.round(annot.onset * self.info['sfreq']) + _check_latencies(latencies) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + _read_segments_file( + self, data, idx, fi, start, stop, cals, mult, dtype=' 1: + # first extract the events and construct an event_id dict + event_name, event_latencies, unique_ev = list(), list(), list() + ev_idx = 0 + warn_multiple_events = False + epochs = _bunchify(eeg.epoch) + events = _bunchify(eeg.event) + for ep in epochs: + if isinstance(ep.eventtype, (int, float)): + ep.eventtype = str(ep.eventtype) + if not isinstance(ep.eventtype, str): + event_type = '/'.join([str(et) for et in ep.eventtype]) + event_name.append(event_type) + # store latency of only first event + event_latencies.append(events[ev_idx].latency) + ev_idx += len(ep.eventtype) + warn_multiple_events = True + else: + event_type = ep.eventtype + event_name.append(ep.eventtype) + event_latencies.append(events[ev_idx].latency) + ev_idx += 1 + + if event_type not in unique_ev: + unique_ev.append(event_type) + + # invent event dict but use id > 0 so you know its a trigger + event_id = {ev: idx + 1 for idx, ev in enumerate(unique_ev)} + + # warn about multiple events in epoch if necessary + if warn_multiple_events: + warn('At least one epoch has multiple events. Only the latency' + ' of the first event will be retained.') + + # now fill up the event array + events = np.zeros((eeg.trials, 3), dtype=int) + for idx in range(0, eeg.trials): + if idx == 0: + prev_stim = 0 + elif (idx > 0 and + event_latencies[idx] - event_latencies[idx - 1] == 1): + prev_stim = event_id[event_name[idx - 1]] + events[idx, 0] = event_latencies[idx] + events[idx, 1] = prev_stim + events[idx, 2] = event_id[event_name[idx]] + elif isinstance(events, str): + events = read_events(events) + + logger.info('Extracting parameters from %s...' % input_fname) + info, eeg_montage, _ = _get_info(eeg, eog=eog) + + for key, val in event_id.items(): + if val not in events[:, 2]: + raise ValueError('No matching events found for %s ' + '(event id %i)' % (key, val)) + + if isinstance(eeg.data, str): + data_fname = _check_eeglab_fname(input_fname, eeg.data) + with open(data_fname, 'rb') as data_fid: + data = np.fromfile(data_fid, dtype=np.float32) + data = data.reshape((eeg.nbchan, eeg.pnts, eeg.trials), + order="F") + else: + data = eeg.data + + if eeg.nbchan == 1 and len(data.shape) == 2: + data = data[np.newaxis, :] + data = data.transpose((2, 0, 1)).astype('double') + data *= CAL + assert data.shape == (eeg.trials, eeg.nbchan, eeg.pnts) + tmin, tmax = eeg.xmin, eeg.xmax + + super(EpochsEEGLAB, self).__init__( + info, data, events, event_id, tmin, tmax, baseline, + reject=reject, flat=flat, reject_tmin=reject_tmin, + reject_tmax=reject_tmax, filename=input_fname, verbose=verbose) + + # data are preloaded but _bad_dropped is not set so we do it here: + self._bad_dropped = True + + _set_dig_montage_in_init(self, eeg_montage) + + logger.info('Ready.') + + +def _check_boundary(annot, event_id): + if event_id is None: + event_id = dict() + if "boundary" in annot.description and "boundary" not in event_id: + warn("The data contains 'boundary' events, indicating data " + "discontinuities. Be cautious of filtering and epoching around " + "these events.") + + +def _check_latencies(latencies): + if (latencies < -1).any(): + raise ValueError('At least one event sample index is negative. Please' + ' check if EEG.event.sample values are correct.') + if (latencies == -1).any(): + warn("At least one event has a sample index of -1. This usually is " + "a consequence of how eeglab handles event latency after " + "resampling - especially when you had a boundary event at the " + "beginning of the file. Please make sure that the events at " + "the very beginning of your EEGLAB file can be safely dropped " + "(e.g., because they are boundary events).") + + +def _bunchify(items): + if isinstance(items, dict): + items = _dol_to_lod(items) + if len(items) > 0 and isinstance(items[0], dict): + items = [Bunch(**item) for item in items] + return items + + +def _read_annotations_eeglab(eeg, uint16_codec=None): + r"""Create Annotations from EEGLAB file. + + This function reads the event attribute from the EEGLAB + structure and makes an :class:`mne.Annotations` object. + + Parameters + ---------- + eeg : object | str + 'EEG' struct or the path to the (EEGLAB) .set file. + uint16_codec : str | None + If your \*.set file contains non-ascii characters, sometimes reading + it may fail and give rise to error message stating that "buffer is + too small". ``uint16_codec`` allows to specify what codec (for example: + 'latin1' or 'utf-8') should be used when reading character arrays and + can therefore help you solve this problem. + + Returns + ------- + annotations : instance of Annotations + The annotations present in the file. + """ + if isinstance(eeg, str): + eeg = _check_load_mat(eeg, uint16_codec=uint16_codec) + + if not hasattr(eeg, 'event'): + events = [] + elif isinstance(eeg.event, dict) and \ + np.array(eeg.event['latency']).ndim > 0: + events = _dol_to_lod(eeg.event) + elif not isinstance(eeg.event, (np.ndarray, list)): + events = [eeg.event] + else: + events = eeg.event + events = _bunchify(events) + description = [str(event.type) for event in events] + onset = [event.latency - 1 for event in events] + duration = np.zeros(len(onset)) + if len(events) > 0 and hasattr(events[0], 'duration'): + for idx, event in enumerate(events): + # empty duration fields are read as empty arrays + is_empty_array = (isinstance(event.duration, np.ndarray) + and len(event.duration) == 0) + duration[idx] = np.nan if is_empty_array else event.duration + + return Annotations(onset=np.array(onset) / eeg.srate, + duration=duration / eeg.srate, + description=description, + orig_time=None) + + +def _dol_to_lod(dol): + """Convert a dict of lists to a list of dicts.""" + return [{key: dol[key][ii] for key in dol.keys()} + for ii in range(len(dol[list(dol.keys())[0]]))] diff --git a/python/libs/mne/io/eeglab/tests/__init__.py b/python/libs/mne/io/eeglab/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/eeglab/tests/test_eeglab.py b/python/libs/mne/io/eeglab/tests/test_eeglab.py new file mode 100644 index 0000000..24abb7a --- /dev/null +++ b/python/libs/mne/io/eeglab/tests/test_eeglab.py @@ -0,0 +1,456 @@ +# Author: Mainak Jas +# Mikolaj Magnuski +# Stefan Appelhoff +# +# License: BSD-3-Clause + +from copy import deepcopy +import os.path as op +import shutil + +import numpy as np +from numpy.testing import (assert_array_equal, assert_array_almost_equal, + assert_equal, assert_allclose) +import pytest +from scipy import io + +from mne import write_events, read_epochs_eeglab +from mne.channels import read_custom_montage +from mne.io import read_raw_eeglab +from mne.io.eeglab.eeglab import _get_montage_information, _dol_to_lod +from mne.io.tests.test_raw import _test_raw_reader +from mne.datasets import testing +from mne.utils import Bunch +from mne.annotations import events_from_annotations, read_annotations + +base_dir = op.join(testing.data_path(download=False), 'EEGLAB') + +raw_fname_mat = op.join(base_dir, 'test_raw.set') +raw_fname_onefile_mat = op.join(base_dir, 'test_raw_onefile.set') +raw_fname_event_duration = op.join(base_dir, 'test_raw_event_duration.set') +epochs_fname_mat = op.join(base_dir, 'test_epochs.set') +epochs_fname_onefile_mat = op.join(base_dir, 'test_epochs_onefile.set') +raw_mat_fnames = [raw_fname_mat, raw_fname_onefile_mat] +epochs_mat_fnames = [epochs_fname_mat, epochs_fname_onefile_mat] +raw_fname_chanloc = op.join(base_dir, 'test_raw_chanloc.set') +raw_fname_2021 = op.join(base_dir, 'test_raw_2021.set') +raw_fname_h5 = op.join(base_dir, 'test_raw_h5.set') +raw_fname_onefile_h5 = op.join(base_dir, 'test_raw_onefile_h5.set') +epochs_fname_h5 = op.join(base_dir, 'test_epochs_h5.set') +epochs_fname_onefile_h5 = op.join(base_dir, 'test_epochs_onefile_h5.set') +raw_h5_fnames = [raw_fname_h5, raw_fname_onefile_h5] +epochs_h5_fnames = [epochs_fname_h5, epochs_fname_onefile_h5] + +montage_path = op.join(base_dir, 'test_chans.locs') + + +pymatreader = pytest.importorskip('pymatreader') # module-level + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname', [ + raw_fname_mat, + raw_fname_h5, + raw_fname_chanloc, +], ids=op.basename) +def test_io_set_raw(fname): + """Test importing EEGLAB .set files.""" + montage = read_custom_montage(montage_path) + montage.ch_names = [ + 'EEG {0:03d}'.format(ii) for ii in range(len(montage.ch_names)) + ] + + kws = dict(reader=read_raw_eeglab, input_fname=fname) + if fname.endswith('test_raw_chanloc.set'): + with pytest.warns(RuntimeWarning, + match="The data contains 'boundary' events"): + raw0 = _test_raw_reader(**kws) + elif '_h5' in fname: # should be safe enough, and much faster + raw0 = read_raw_eeglab(fname, preload=True) + else: + raw0 = _test_raw_reader(**kws) + + # test that preloading works + if fname.endswith('test_raw_chanloc.set'): + raw0.set_montage(montage, on_missing='ignore') + # crop to check if the data has been properly preloaded; we cannot + # filter as the snippet of raw data is very short + raw0.crop(0, 1) + else: + raw0.set_montage(montage) + raw0.filter(1, None, l_trans_bandwidth='auto', filter_length='auto', + phase='zero') + + # test that using uint16_codec does not break stuff + read_raw_kws = dict(input_fname=fname, preload=False, uint16_codec='ascii') + if fname.endswith('test_raw_chanloc.set'): + with pytest.warns(RuntimeWarning, + match="The data contains 'boundary' events"): + raw0 = read_raw_eeglab(**read_raw_kws) + raw0.set_montage(montage, on_missing='ignore') + else: + raw0 = read_raw_eeglab(**read_raw_kws) + raw0.set_montage(montage) + + # Annotations + if fname != raw_fname_chanloc: + assert len(raw0.annotations) == 154 + assert set(raw0.annotations.description) == {'rt', 'square'} + assert_array_equal(raw0.annotations.duration, 0.) + + +@testing.requires_testing_data +def test_io_set_raw_more(tmp_path): + """Test importing EEGLAB .set files.""" + tmp_path = str(tmp_path) + eeg = io.loadmat(raw_fname_mat, struct_as_record=False, + squeeze_me=True)['EEG'] + + # test reading file with one event (read old version) + negative_latency_fname = op.join(tmp_path, 'test_negative_latency.set') + evnts = deepcopy(eeg.event[0]) + evnts.latency = 0 + io.savemat(negative_latency_fname, + {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, + 'nbchan': eeg.nbchan, + 'data': 'test_negative_latency.fdt', + 'epoch': eeg.epoch, 'event': evnts, + 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, + appendmat=False, oned_as='row') + shutil.copyfile(op.join(base_dir, 'test_raw.fdt'), + negative_latency_fname.replace('.set', '.fdt')) + with pytest.warns(RuntimeWarning, match="has a sample index of -1."): + read_raw_eeglab(input_fname=negative_latency_fname, preload=True) + + # test negative event latencies + evnts.latency = -1 + io.savemat(negative_latency_fname, + {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, + 'nbchan': eeg.nbchan, + 'data': 'test_negative_latency.fdt', + 'epoch': eeg.epoch, 'event': evnts, + 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, + appendmat=False, oned_as='row') + with pytest.raises(ValueError, match='event sample index is negative'): + with pytest.warns(RuntimeWarning, match="has a sample index of -1."): + read_raw_eeglab(input_fname=negative_latency_fname, preload=True) + + # test overlapping events + overlap_fname = op.join(tmp_path, 'test_overlap_event.set') + io.savemat(overlap_fname, + {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, + 'nbchan': eeg.nbchan, 'data': 'test_overlap_event.fdt', + 'epoch': eeg.epoch, + 'event': [eeg.event[0], eeg.event[0]], + 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, + appendmat=False, oned_as='row') + shutil.copyfile(op.join(base_dir, 'test_raw.fdt'), + overlap_fname.replace('.set', '.fdt')) + read_raw_eeglab(input_fname=overlap_fname, preload=True) + + # test reading file with empty event durations + empty_dur_fname = op.join(tmp_path, 'test_empty_durations.set') + evnts = deepcopy(eeg.event) + for ev in evnts: + ev.duration = np.array([], dtype='float') + + io.savemat(empty_dur_fname, + {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, + 'nbchan': eeg.nbchan, + 'data': 'test_negative_latency.fdt', + 'epoch': eeg.epoch, 'event': evnts, + 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, + appendmat=False, oned_as='row') + shutil.copyfile(op.join(base_dir, 'test_raw.fdt'), + empty_dur_fname.replace('.set', '.fdt')) + raw = read_raw_eeglab(input_fname=empty_dur_fname, preload=True) + assert (raw.annotations.duration == 0).all() + + # test reading file when the EEG.data name is wrong + io.savemat(overlap_fname, + {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, + 'nbchan': eeg.nbchan, 'data': 'test_overla_event.fdt', + 'epoch': eeg.epoch, + 'event': [eeg.event[0], eeg.event[0]], + 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, + appendmat=False, oned_as='row') + with pytest.warns(RuntimeWarning, match="must have changed on disk"): + read_raw_eeglab(input_fname=overlap_fname, preload=True) + + # raise error when both EEG.data and fdt name from set are wrong + overlap_fname = op.join(tmp_path, 'test_ovrlap_event.set') + io.savemat(overlap_fname, + {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, + 'nbchan': eeg.nbchan, 'data': 'test_overla_event.fdt', + 'epoch': eeg.epoch, + 'event': [eeg.event[0], eeg.event[0]], + 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, + appendmat=False, oned_as='row') + with pytest.raises(FileNotFoundError, match="not find the .fdt data file"): + read_raw_eeglab(input_fname=overlap_fname, preload=True) + + # test reading file with one channel + one_chan_fname = op.join(tmp_path, 'test_one_channel.set') + io.savemat(one_chan_fname, + {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, + 'nbchan': 1, 'data': np.random.random((1, 3)), + 'epoch': eeg.epoch, 'event': eeg.epoch, + 'chanlocs': {'labels': 'E1', 'Y': -6.6069, + 'X': 6.3023, 'Z': -2.9423}, + 'times': eeg.times[:3], 'pnts': 3}}, + appendmat=False, oned_as='row') + read_raw_eeglab(input_fname=one_chan_fname, preload=True) + + # test reading file with 3 channels - one without position information + # first, create chanlocs structured array + ch_names = ['F3', 'unknown', 'FPz'] + x, y, z = [1., 2., np.nan], [4., 5., np.nan], [7., 8., np.nan] + dt = [('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')] + nopos_dt = [('labels', 'S10'), ('Z', 'f8')] + chanlocs = np.zeros((3,), dtype=dt) + nopos_chanlocs = np.zeros((3,), dtype=nopos_dt) + for ind, vals in enumerate(zip(ch_names, x, y, z)): + for fld in range(4): + chanlocs[ind][dt[fld][0]] = vals[fld] + if fld in (0, 3): + nopos_chanlocs[ind][dt[fld][0]] = vals[fld] + # In theory this should work and be simpler, but there is an obscure + # SciPy writing bug that pops up sometimes: + # nopos_chanlocs = np.array(chanlocs[['labels', 'Z']]) + + # test reading channel names but not positions when there is no X (only Z) + # field in the EEG.chanlocs structure + nopos_fname = op.join(tmp_path, 'test_no_chanpos.set') + io.savemat(nopos_fname, + {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, 'nbchan': 3, + 'data': np.random.random((3, 2)), 'epoch': eeg.epoch, + 'event': eeg.epoch, 'chanlocs': nopos_chanlocs, + 'times': eeg.times[:2], 'pnts': 2}}, + appendmat=False, oned_as='row') + # load the file + raw = read_raw_eeglab(input_fname=nopos_fname, preload=True) + + # test that channel names have been loaded but not channel positions + for i in range(3): + assert_equal(raw.info['chs'][i]['ch_name'], ch_names[i]) + assert_array_equal(raw.info['chs'][i]['loc'][:3], + np.array([np.nan, np.nan, np.nan])) + + +@pytest.mark.timeout(60) # ~60 sec on Travis OSX +@testing.requires_testing_data +@pytest.mark.parametrize('fnames', [ + epochs_mat_fnames, + pytest.param(epochs_h5_fnames, marks=[pytest.mark.slowtest]), +]) +def test_io_set_epochs(fnames): + """Test importing EEGLAB .set epochs files.""" + epochs_fname, epochs_fname_onefile = fnames + with pytest.warns(RuntimeWarning, match='multiple events'): + epochs = read_epochs_eeglab(epochs_fname) + with pytest.warns(RuntimeWarning, match='multiple events'): + epochs2 = read_epochs_eeglab(epochs_fname_onefile) + # one warning for each read_epochs_eeglab because both files have epochs + # associated with multiple events + assert_array_equal(epochs.get_data(), epochs2.get_data()) + + +@testing.requires_testing_data +def test_io_set_epochs_events(tmp_path): + """Test different combinations of events and event_ids.""" + tmp_path = str(tmp_path) + out_fname = op.join(tmp_path, 'test-eve.fif') + events = np.array([[4, 0, 1], [12, 0, 2], [20, 0, 3], [26, 0, 3]]) + write_events(out_fname, events) + event_id = {'S255/S8': 1, 'S8': 2, 'S255/S9': 3} + out_fname = op.join(tmp_path, 'test-eve.fif') + epochs = read_epochs_eeglab(epochs_fname_mat, events, event_id) + assert_equal(len(epochs.events), 4) + assert epochs.preload + assert epochs._bad_dropped + epochs = read_epochs_eeglab(epochs_fname_mat, out_fname, event_id) + pytest.raises(ValueError, read_epochs_eeglab, epochs_fname_mat, + None, event_id) + pytest.raises(ValueError, read_epochs_eeglab, epochs_fname_mat, + epochs.events, None) + + +@testing.requires_testing_data +def test_degenerate(tmp_path): + """Test some degenerate conditions.""" + # test if .dat file raises an error + tmp_path = str(tmp_path) + eeg = io.loadmat(epochs_fname_mat, struct_as_record=False, + squeeze_me=True)['EEG'] + eeg.data = 'epochs_fname.dat' + bad_epochs_fname = op.join(tmp_path, 'test_epochs.set') + io.savemat(bad_epochs_fname, + {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, + 'nbchan': eeg.nbchan, 'data': eeg.data, + 'epoch': eeg.epoch, 'event': eeg.event, + 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, + appendmat=False, oned_as='row') + shutil.copyfile(op.join(base_dir, 'test_epochs.fdt'), + op.join(tmp_path, 'test_epochs.dat')) + with pytest.warns(RuntimeWarning, match='multiple events'): + pytest.raises(NotImplementedError, read_epochs_eeglab, + bad_epochs_fname) + + +@pytest.mark.parametrize("fname", [ + raw_fname_mat, + raw_fname_onefile_mat, + # We don't test the h5 varaints here because they are implicitly tested + # in test_io_set_raw +]) +@pytest.mark.filterwarnings('ignore: Complex objects') +@testing.requires_testing_data +def test_eeglab_annotations(fname): + """Test reading annotations in EEGLAB files.""" + annotations = read_annotations(fname) + assert len(annotations) == 154 + assert set(annotations.description) == {'rt', 'square'} + assert np.all(annotations.duration == 0.) + + +@testing.requires_testing_data +def test_eeglab_read_annotations(): + """Test annotations onsets are timestamps (+ validate some).""" + annotations = read_annotations(raw_fname_mat) + validation_samples = [0, 1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] + expected_onset = np.array([1.00, 1.69, 2.08, 4.70, 7.71, 11.30, 17.18, + 20.20, 26.12, 29.14, 35.25, 44.30, 47.15]) + assert annotations.orig_time is None + assert_array_almost_equal(annotations.onset[validation_samples], + expected_onset, decimal=2) + + # test if event durations are imported correctly + raw = read_raw_eeglab(raw_fname_event_duration, preload=True) + # file contains 3 annotations with 0.5 s (64 samples) duration each + assert_allclose(raw.annotations.duration, np.ones(3) * 0.5) + + +@testing.requires_testing_data +def test_eeglab_event_from_annot(): + """Test all forms of obtaining annotations.""" + raw_fname_mat = op.join(base_dir, 'test_raw.set') + raw_fname = raw_fname_mat + event_id = {'rt': 1, 'square': 2} + raw1 = read_raw_eeglab(input_fname=raw_fname, preload=False) + + annotations = read_annotations(raw_fname) + assert len(raw1.annotations) == 154 + raw1.set_annotations(annotations) + events_b, _ = events_from_annotations(raw1, event_id=event_id) + assert len(events_b) == 154 + + +def _assert_array_allclose_nan(left, right): + assert_array_equal(np.isnan(left), np.isnan(right)) + assert_allclose(left[~np.isnan(left)], right[~np.isnan(left)], atol=1e-8) + + +@pytest.fixture(scope='session') +def one_chanpos_fname(tmp_path_factory): + """Test file with 3 channels to exercise EEGLAB reader. + + File characteristics + - ch_names: 'F3', 'unknown', 'FPz' + - 'FPz' has no position information. + - the rest is aleatory + + Notes from when this code was factorized: + # test reading file with one event (read old version) + """ + fname = str(tmp_path_factory.mktemp('data') / 'test_chanpos.set') + file_conent = dict(EEG={ + 'trials': 1, 'nbchan': 3, 'pnts': 3, 'epoch': [], 'event': [], + 'srate': 128, 'times': np.array([0., 0.1, 0.2]), + 'data': np.empty([3, 3]), + 'chanlocs': np.array( + [(b'F3', 1., 4., 7.), + (b'unknown', 2., 5., 8.), + (b'FPz', np.nan, np.nan, np.nan)], + dtype=[('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')] + ) + }) + + io.savemat(file_name=fname, mdict=file_conent, appendmat=False, + oned_as='row') + + return fname + + +@testing.requires_testing_data +def test_position_information(one_chanpos_fname): + """Test reading file with 3 channels - one without position information.""" + nan = np.nan + EXPECTED_LOCATIONS_FROM_FILE = np.array([ + [-4., 1., 7., 0., 0., 0., nan, nan, nan, nan, nan, nan], + [-5., 2., 8., 0., 0., 0., nan, nan, nan, nan, nan, nan], + [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], + ]) + + EXPECTED_LOCATIONS_FROM_MONTAGE = np.array([ + [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], + [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], + [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], + ]) + + raw = read_raw_eeglab(input_fname=one_chanpos_fname, preload=True) + assert_array_equal(np.array([ch['loc'] for ch in raw.info['chs']]), + EXPECTED_LOCATIONS_FROM_FILE) + + # To accommodate the new behavior so that: + # read_raw_eeglab(.. montage=montage) and raw.set_montage(montage) + # behaves the same we need to flush the montage. otherwise we get + # a mix of what is in montage and in the file + raw = read_raw_eeglab( + input_fname=one_chanpos_fname, + preload=True, + ).set_montage(None) # Flush the montage builtin within input_fname + + _assert_array_allclose_nan(np.array([ch['loc'] for ch in raw.info['chs']]), + EXPECTED_LOCATIONS_FROM_MONTAGE) + + _assert_array_allclose_nan(np.array([ch['loc'] for ch in raw.info['chs']]), + EXPECTED_LOCATIONS_FROM_MONTAGE) + + +@testing.requires_testing_data +def test_io_set_raw_2021(): + """Test reading new default file format (no EEG struct).""" + assert "EEG" not in io.loadmat(raw_fname_2021) + _test_raw_reader(reader=read_raw_eeglab, input_fname=raw_fname_2021, + test_preloading=False, preload=True) + + +@testing.requires_testing_data +def test_read_single_epoch(): + """Test reading raw set file as an Epochs instance.""" + with pytest.raises(ValueError, match='trials less than 2'): + read_epochs_eeglab(raw_fname_mat) + + +@testing.requires_testing_data +def test_get_montage_info_with_ch_type(): + """Test that the channel types are properly returned.""" + mat = pymatreader.read_mat(raw_fname_onefile_mat, uint16_codec=None) + n = len(mat['EEG']['chanlocs']['labels']) + mat['EEG']['chanlocs']['type'] = ['eeg'] * (n - 2) + ['eog'] + ['stim'] + mat['EEG']['chanlocs'] = _dol_to_lod(mat['EEG']['chanlocs']) + mat['EEG'] = Bunch(**mat['EEG']) + ch_names, ch_types, montage = _get_montage_information(mat['EEG'], False) + assert len(ch_names) == len(ch_types) == n + assert ch_types == ['eeg'] * (n - 2) + ['eog'] + ['stim'] + assert montage is None + + # test unknown type warning + mat = pymatreader.read_mat(raw_fname_onefile_mat, uint16_codec=None) + n = len(mat['EEG']['chanlocs']['labels']) + mat['EEG']['chanlocs']['type'] = ['eeg'] * (n - 2) + ['eog'] + ['unknown'] + mat['EEG']['chanlocs'] = _dol_to_lod(mat['EEG']['chanlocs']) + mat['EEG'] = Bunch(**mat['EEG']) + with pytest.warns(RuntimeWarning, match='Unknown types found'): + ch_names, ch_types, montage = \ + _get_montage_information(mat['EEG'], False) diff --git a/python/libs/mne/io/egi/__init__.py b/python/libs/mne/io/egi/__init__.py new file mode 100644 index 0000000..dccf8e6 --- /dev/null +++ b/python/libs/mne/io/egi/__init__.py @@ -0,0 +1,6 @@ +"""EGI module for conversion to FIF.""" + +# Author: Denis A. Engemann + +from .egi import read_raw_egi +from .egimff import read_evokeds_mff diff --git a/python/libs/mne/io/egi/egi.py b/python/libs/mne/io/egi/egi.py new file mode 100644 index 0000000..26fccd2 --- /dev/null +++ b/python/libs/mne/io/egi/egi.py @@ -0,0 +1,275 @@ +# Authors: Denis A. Engemann +# Teon Brooks +# +# simplified BSD-3 license + +import datetime +import time + +import numpy as np + +from .egimff import _read_raw_egi_mff +from .events import _combine_triggers +from ..base import BaseRaw +from ..utils import _read_segments_file, _create_chs +from ..meas_info import _empty_info +from ..constants import FIFF +from ...utils import verbose, logger, warn, _validate_type, _check_fname + + +def _read_header(fid): + """Read EGI binary header.""" + version = np.fromfile(fid, ' 6 & ~np.bitwise_and(version, 6): + version = version.byteswap().astype(np.uint32) + else: + raise ValueError('Watchout. This does not seem to be a simple ' + 'binary EGI file.') + + def my_fread(*x, **y): + return np.fromfile(*x, **y)[0] + + info = dict( + version=version, + year=my_fread(fid, '>i2', 1), + month=my_fread(fid, '>i2', 1), + day=my_fread(fid, '>i2', 1), + hour=my_fread(fid, '>i2', 1), + minute=my_fread(fid, '>i2', 1), + second=my_fread(fid, '>i2', 1), + millisecond=my_fread(fid, '>i4', 1), + samp_rate=my_fread(fid, '>i2', 1), + n_channels=my_fread(fid, '>i2', 1), + gain=my_fread(fid, '>i2', 1), + bits=my_fread(fid, '>i2', 1), + value_range=my_fread(fid, '>i2', 1) + ) + + unsegmented = 1 if np.bitwise_and(version, 1) == 0 else 0 + precision = np.bitwise_and(version, 6) + if precision == 0: + raise RuntimeError('Floating point precision is undefined.') + + if unsegmented: + info.update(dict(n_categories=0, + n_segments=1, + n_samples=np.fromfile(fid, '>i4', 1)[0], + n_events=np.fromfile(fid, '>i2', 1)[0], + event_codes=[], + category_names=[], + category_lengths=[], + pre_baseline=0)) + for event in range(info['n_events']): + event_codes = ''.join(np.fromfile(fid, 'S1', 4).astype('U1')) + info['event_codes'].append(event_codes) + else: + raise NotImplementedError('Only continuous files are supported') + info['unsegmented'] = unsegmented + info['dtype'], info['orig_format'] = {2: ('>i2', 'short'), + 4: ('>f4', 'float'), + 6: ('>f8', 'double')}[precision] + info['dtype'] = np.dtype(info['dtype']) + return info + + +def _read_events(fid, info): + """Read events.""" + events = np.zeros([info['n_events'], + info['n_segments'] * info['n_samples']]) + fid.seek(36 + info['n_events'] * 4, 0) # skip header + for si in range(info['n_samples']): + # skip data channels + fid.seek(info['n_channels'] * info['dtype'].itemsize, 1) + # read event channels + events[:, si] = np.fromfile(fid, info['dtype'], info['n_events']) + return events + + +@verbose +def read_raw_egi(input_fname, eog=None, misc=None, + include=None, exclude=None, preload=False, + channel_naming='E%d', verbose=None): + """Read EGI simple binary as raw object. + + .. note:: This function attempts to create a synthetic trigger channel. + See the Notes section below. + + Parameters + ---------- + input_fname : path-like + Path to the raw file. Files with an extension .mff are automatically + considered to be EGI's native MFF format files. + eog : list or tuple + Names of channels or list of indices that should be designated + EOG channels. Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated + MISC channels. Default is None. + include : None | list + The event channels to be ignored when creating the synthetic + trigger. Defaults to None. + Note. Overrides ``exclude`` parameter. + exclude : None | list + The event channels to be ignored when creating the synthetic + trigger. Defaults to None. If None, channels that have more than + one event and the ``sync`` and ``TREV`` channels will be + ignored. + %(preload)s + + .. versionadded:: 0.11 + channel_naming : str + Channel naming convention for the data channels. Defaults to 'E%%d' + (resulting in channel names 'E1', 'E2', 'E3'...). The effective default + prior to 0.14.0 was 'EEG %%03d'. + + .. versionadded:: 0.14.0 + %(verbose)s + + Returns + ------- + raw : instance of RawEGI + A Raw object containing EGI data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + The trigger channel names are based on the arbitrary user dependent event + codes used. However this function will attempt to generate a **synthetic + trigger channel** named ``STI 014`` in accordance with the general + Neuromag / MNE naming pattern. + + The event_id assignment equals ``np.arange(n_events) + 1``. The resulting + ``event_id`` mapping is stored as attribute to the resulting raw object but + will be ignored when saving to a fiff. Note. The trigger channel is + artificially constructed based on timestamps received by the Netstation. + As a consequence, triggers have only short durations. + + This step will fail if events are not mutually exclusive. + """ + _validate_type(input_fname, 'path-like', 'input_fname') + input_fname = str(input_fname) + if input_fname.endswith('.mff'): + return _read_raw_egi_mff(input_fname, eog, misc, include, + exclude, preload, channel_naming, verbose) + return RawEGI(input_fname, eog, misc, include, exclude, preload, + channel_naming, verbose) + + +class RawEGI(BaseRaw): + """Raw object from EGI simple binary file.""" + + @verbose + def __init__(self, input_fname, eog=None, misc=None, + include=None, exclude=None, preload=False, + channel_naming='E%d', verbose=None): # noqa: D102 + input_fname = _check_fname(input_fname, 'read', True, 'input_fname') + if eog is None: + eog = [] + if misc is None: + misc = [] + with open(input_fname, 'rb') as fid: # 'rb' important for py3k + logger.info('Reading EGI header from %s...' % input_fname) + egi_info = _read_header(fid) + logger.info(' Reading events ...') + egi_events = _read_events(fid, egi_info) # update info + jump + if egi_info['value_range'] != 0 and egi_info['bits'] != 0: + cal = egi_info['value_range'] / 2. ** egi_info['bits'] + else: + cal = 1e-6 + + logger.info(' Assembling measurement info ...') + + event_codes = [] + if egi_info['n_events'] > 0: + event_codes = list(egi_info['event_codes']) + if include is None: + exclude_list = ['sync', 'TREV'] if exclude is None else exclude + exclude_inds = [i for i, k in enumerate(event_codes) if k in + exclude_list] + more_excludes = [] + if exclude is None: + for ii, event in enumerate(egi_events): + if event.sum() <= 1 and event_codes[ii]: + more_excludes.append(ii) + if len(exclude_inds) + len(more_excludes) == len(event_codes): + warn('Did not find any event code with more than one ' + 'event.', RuntimeWarning) + else: + exclude_inds.extend(more_excludes) + + exclude_inds.sort() + include_ = [i for i in np.arange(egi_info['n_events']) if + i not in exclude_inds] + include_names = [k for i, k in enumerate(event_codes) + if i in include_] + else: + include_ = [i for i, k in enumerate(event_codes) + if k in include] + include_names = include + + for kk, v in [('include', include_names), ('exclude', exclude)]: + if isinstance(v, list): + for k in v: + if k not in event_codes: + raise ValueError('Could find event named "%s"' % k) + elif v is not None: + raise ValueError('`%s` must be None or of type list' % kk) + + event_ids = np.arange(len(include_)) + 1 + logger.info(' Synthesizing trigger channel "STI 014" ...') + logger.info(' Excluding events {%s} ...' % + ", ".join([k for i, k in enumerate(event_codes) + if i not in include_])) + egi_info['new_trigger'] = _combine_triggers( + egi_events[include_], remapping=event_ids) + self.event_id = dict(zip([e for e in event_codes if e in + include_names], event_ids)) + else: + # No events + self.event_id = None + egi_info['new_trigger'] = None + info = _empty_info(egi_info['samp_rate']) + my_time = datetime.datetime( + egi_info['year'], egi_info['month'], egi_info['day'], + egi_info['hour'], egi_info['minute'], egi_info['second']) + my_timestamp = time.mktime(my_time.timetuple()) + info['meas_date'] = (my_timestamp, 0) + ch_names = [channel_naming % (i + 1) for i in + range(egi_info['n_channels'])] + ch_names.extend(list(egi_info['event_codes'])) + if egi_info['new_trigger'] is not None: + ch_names.append('STI 014') # our new_trigger + nchan = len(ch_names) + cals = np.repeat(cal, nchan) + ch_coil = FIFF.FIFFV_COIL_EEG + ch_kind = FIFF.FIFFV_EEG_CH + chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc) + sti_ch_idx = [i for i, name in enumerate(ch_names) if + name.startswith('STI') or name in event_codes] + for idx in sti_ch_idx: + chs[idx].update({'unit_mul': FIFF.FIFF_UNITM_NONE, 'cal': 1., + 'kind': FIFF.FIFFV_STIM_CH, + 'coil_type': FIFF.FIFFV_COIL_NONE, + 'unit': FIFF.FIFF_UNIT_NONE}) + info['chs'] = chs + info._unlocked = False + info._update_redundant() + super(RawEGI, self).__init__( + info, preload, orig_format=egi_info['orig_format'], + filenames=[input_fname], last_samps=[egi_info['n_samples'] - 1], + raw_extras=[egi_info], verbose=verbose) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file.""" + egi_info = self._raw_extras[fi] + dtype = egi_info['dtype'] + n_chan_read = egi_info['n_channels'] + egi_info['n_events'] + offset = 36 + egi_info['n_events'] * 4 + trigger_ch = egi_info['new_trigger'] + _read_segments_file(self, data, idx, fi, start, stop, cals, mult, + dtype=dtype, n_channels=n_chan_read, offset=offset, + trigger_ch=trigger_ch) diff --git a/python/libs/mne/io/egi/egimff.py b/python/libs/mne/io/egi/egimff.py new file mode 100644 index 0000000..ea5f3d7 --- /dev/null +++ b/python/libs/mne/io/egi/egimff.py @@ -0,0 +1,949 @@ +"""EGI NetStation Load Function.""" + +from collections import OrderedDict +import datetime +import math +import os.path as op +import re +from xml.dom.minidom import parse + +import numpy as np + +from .events import _read_events, _combine_triggers +from .general import (_get_signalfname, _get_ep_info, _extract, _get_blocks, + _get_gains, _block_r) +from ..base import BaseRaw +from ..constants import FIFF +from ..meas_info import _empty_info, create_info, _ensure_meas_date_none_or_dt +from ..proj import setup_proj +from ..utils import _create_chs, _mult_cal_one +from ...annotations import Annotations +from ...utils import verbose, logger, warn, _check_option, _check_fname +from ...evoked import EvokedArray + + +def _read_mff_header(filepath): + """Read mff header.""" + all_files = _get_signalfname(filepath) + eeg_file = all_files['EEG']['signal'] + eeg_info_file = all_files['EEG']['info'] + + info_filepath = op.join(filepath, 'info.xml') # add with filepath + tags = ['mffVersion', 'recordTime'] + version_and_date = _extract(tags, filepath=info_filepath) + version = "" + if len(version_and_date['mffVersion']): + version = version_and_date['mffVersion'][0] + + fname = op.join(filepath, eeg_file) + signal_blocks = _get_blocks(fname) + epochs = _get_ep_info(filepath) + summaryinfo = dict(eeg_fname=eeg_file, + info_fname=eeg_info_file) + summaryinfo.update(signal_blocks) + # sanity check and update relevant values + record_time = version_and_date['recordTime'][0] + # e.g., + # 2018-07-30T10:47:01.021673-04:00 + # 2017-09-20T09:55:44.072000000+01:00 + g = re.match( + r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.(\d{6}(?:\d{3})?)[+-]\d{2}:\d{2}', # noqa: E501 + record_time) + if g is None: + raise RuntimeError('Could not parse recordTime %r' % (record_time,)) + frac = g.groups()[0] + assert len(frac) in (6, 9) and all(f.isnumeric() for f in frac) # regex + div = 1000 if len(frac) == 6 else 1000000 + for key in ('last_samps', 'first_samps'): + # convert from times in µS to samples + for ei, e in enumerate(epochs[key]): + if e % div != 0: + raise RuntimeError('Could not parse epoch time %s' % (e,)) + epochs[key][ei] = e // div + epochs[key] = np.array(epochs[key], np.uint64) + # I guess they refer to times in milliseconds? + # What we really need to do here is: + # epochs[key] *= signal_blocks['sfreq'] + # epochs[key] //= 1000 + # But that multiplication risks an overflow, so let's only multiply + # by what we need to (e.g., a sample rate of 500 means we can multiply + # by 1 and divide by 2 rather than multiplying by 500 and dividing by + # 1000) + numerator = signal_blocks['sfreq'] + denominator = 1000 + this_gcd = math.gcd(numerator, denominator) + numerator = numerator // this_gcd + denominator = denominator // this_gcd + with np.errstate(over='raise'): + epochs[key] *= numerator + epochs[key] //= denominator + # Should be safe to cast to int now, which makes things later not + # upbroadcast to float + epochs[key] = epochs[key].astype(np.int64) + n_samps_block = signal_blocks['samples_block'].sum() + n_samps_epochs = (epochs['last_samps'] - epochs['first_samps']).sum() + bad = (n_samps_epochs != n_samps_block or + not (epochs['first_samps'] < epochs['last_samps']).all() or + not (epochs['first_samps'][1:] >= epochs['last_samps'][:-1]).all()) + if bad: + raise RuntimeError('EGI epoch first/last samps could not be parsed:\n' + '%s\n%s' % (list(epochs['first_samps']), + list(epochs['last_samps']))) + summaryinfo.update(epochs) + # index which samples in raw are actually readable from disk (i.e., not + # in a skip) + disk_samps = np.full(epochs['last_samps'][-1], -1) + offset = 0 + for first, last in zip(epochs['first_samps'], epochs['last_samps']): + n_this = last - first + disk_samps[first:last] = np.arange(offset, offset + n_this) + offset += n_this + summaryinfo['disk_samps'] = disk_samps + + # Add the sensor info. + sensor_layout_file = op.join(filepath, 'sensorLayout.xml') + sensor_layout_obj = parse(sensor_layout_file) + summaryinfo['device'] = (sensor_layout_obj.getElementsByTagName('name') + [0].firstChild.data) + sensors = sensor_layout_obj.getElementsByTagName('sensor') + chan_type = list() + chan_unit = list() + n_chans = 0 + numbers = list() # used for identification + for sensor in sensors: + sensortype = int(sensor.getElementsByTagName('type')[0] + .firstChild.data) + if sensortype in [0, 1]: + sn = sensor.getElementsByTagName('number')[0].firstChild.data + sn = sn.encode() + numbers.append(sn) + chan_type.append('eeg') + chan_unit.append('uV') + n_chans = n_chans + 1 + if n_chans != summaryinfo['n_channels']: + raise RuntimeError('Number of defined channels (%d) did not match the ' + 'expected channels (%d)' + % (n_chans, summaryinfo['n_channels'])) + + # Check presence of PNS data + pns_names = [] + if 'PNS' in all_files: + pns_fpath = op.join(filepath, all_files['PNS']['signal']) + pns_blocks = _get_blocks(pns_fpath) + pns_samples = pns_blocks['samples_block'] + signal_samples = signal_blocks['samples_block'] + same_blocks = (np.array_equal(pns_samples[:-1], + signal_samples[:-1]) and + pns_samples[-1] in (signal_samples[-1] - np.arange(2))) + if not same_blocks: + raise RuntimeError('PNS and signals samples did not match:\n' + '%s\nvs\n%s' + % (list(pns_samples), list(signal_samples))) + + pns_file = op.join(filepath, 'pnsSet.xml') + pns_obj = parse(pns_file) + sensors = pns_obj.getElementsByTagName('sensor') + pns_types = [] + pns_units = [] + for sensor in sensors: + # sensor number: + # sensor.getElementsByTagName('number')[0].firstChild.data + name = sensor.getElementsByTagName('name')[0].firstChild.data + unit_elem = sensor.getElementsByTagName('unit')[0].firstChild + unit = '' + if unit_elem is not None: + unit = unit_elem.data + + if name == 'ECG': + ch_type = 'ecg' + elif 'EMG' in name: + ch_type = 'emg' + else: + ch_type = 'bio' + pns_types.append(ch_type) + pns_units.append(unit) + pns_names.append(name) + + summaryinfo.update(pns_types=pns_types, pns_units=pns_units, + pns_fname=all_files['PNS']['signal'], + pns_sample_blocks=pns_blocks) + summaryinfo.update(pns_names=pns_names, version=version, + date=version_and_date['recordTime'][0], + chan_type=chan_type, chan_unit=chan_unit, + numbers=numbers) + + return summaryinfo + + +class _FixedOffset(datetime.tzinfo): + """Fixed offset in minutes east from UTC. + + Adapted from the official Python documentation. + """ + + def __init__(self, offset): + self._offset = datetime.timedelta(minutes=offset) + + def utcoffset(self, dt): + return self._offset + + def tzname(self, dt): + return 'MFF' + + def dst(self, dt): + return datetime.timedelta(0) + + +def _read_header(input_fname): + """Obtain the headers from the file package mff. + + Parameters + ---------- + input_fname : str + Path for the file + + Returns + ------- + info : dict + Main headers set. + """ + mff_hdr = _read_mff_header(input_fname) + with open(input_fname + '/signal1.bin', 'rb') as fid: + version = np.fromfile(fid, np.int32, 1)[0] + ''' + the datetime.strptime .f directive (milleseconds) + will only accept up to 6 digits. if there are more than + six millesecond digits in the provided timestamp string + (i.e. because of trailing zeros, as in test_egi_pns.mff) + then slice both the first 26 elements and the last 6 + elements of the timestamp string to truncate the + milleseconds to 6 digits and extract the timezone, + and then piece these together and assign back to mff_hdr['date'] + ''' + if len(mff_hdr['date']) > 32: + dt, tz = [mff_hdr['date'][:26], mff_hdr['date'][-6:]] + mff_hdr['date'] = dt + tz + + time_n = (datetime.datetime.strptime( + mff_hdr['date'], '%Y-%m-%dT%H:%M:%S.%f%z')) + + info = dict( + version=version, + meas_dt_local=time_n, + utc_offset=time_n.strftime('%z'), + gain=0, + bits=0, + value_range=0) + info.update(n_categories=0, n_segments=1, n_events=0, event_codes=[], + category_names=[], category_lengths=[], pre_baseline=0) + info.update(mff_hdr) + return info + + +def _get_eeg_calibration_info(filepath, egi_info): + """Calculate calibration info for EEG channels.""" + gains = _get_gains(op.join(filepath, egi_info['info_fname'])) + if egi_info['value_range'] != 0 and egi_info['bits'] != 0: + cals = [egi_info['value_range'] / 2 ** egi_info['bits']] * \ + len(egi_info['chan_type']) + else: + cal_scales = {'uV': 1e-6, 'V': 1} + cals = [cal_scales[t] for t in egi_info['chan_unit']] + if 'gcal' in gains: + cals *= gains['gcal'] + return cals + + +def _read_locs(filepath, chs, egi_info): + """Read channel locations.""" + from ...channels.montage import make_dig_montage + fname = op.join(filepath, 'coordinates.xml') + if not op.exists(fname): + return chs, None + reference_names = ('VREF', 'Vertex Reference') + dig_ident_map = { + 'Left periauricular point': 'lpa', + 'Right periauricular point': 'rpa', + 'Nasion': 'nasion', + } + numbers = np.array(egi_info['numbers']) + coordinates = parse(fname) + sensors = coordinates.getElementsByTagName('sensor') + ch_pos = OrderedDict() + hsp = list() + nlr = dict() + for sensor in sensors: + name_element = sensor.getElementsByTagName('name')[0].firstChild + name = '' if name_element is None else name_element.data + nr = sensor.getElementsByTagName('number')[0].firstChild.data.encode() + coords = [float(sensor.getElementsByTagName(coord)[0].firstChild.data) + for coord in 'xyz'] + loc = np.array(coords) / 100 # cm -> m + # create dig entry + if name in dig_ident_map: + nlr[dig_ident_map[name]] = loc + else: + if name in reference_names: + ch_pos['EEG000'] = loc + # add location to channel entry + id_ = np.flatnonzero(numbers == nr) + if len(id_) == 0: + hsp.append(loc) + else: + ch_pos[chs[id_[0]]['ch_name']] = loc + mon = make_dig_montage(ch_pos=ch_pos, hsp=hsp, **nlr) + return chs, mon + + +def _add_pns_channel_info(chs, egi_info, ch_names): + """Add info for PNS channels to channel info dict.""" + for i_ch, ch_name in enumerate(egi_info['pns_names']): + idx = ch_names.index(ch_name) + ch_type = egi_info['pns_types'][i_ch] + type_to_kind_map = {'ecg': FIFF.FIFFV_ECG_CH, + 'emg': FIFF.FIFFV_EMG_CH + } + ch_kind = type_to_kind_map.get(ch_type, FIFF.FIFFV_BIO_CH) + ch_unit = FIFF.FIFF_UNIT_V + ch_cal = 1e-6 + if egi_info['pns_units'][i_ch] != 'uV': + ch_unit = FIFF.FIFF_UNIT_NONE + ch_cal = 1.0 + chs[idx].update( + cal=ch_cal, kind=ch_kind, coil_type=FIFF.FIFFV_COIL_NONE, + unit=ch_unit) + return chs + + +@verbose +def _read_raw_egi_mff(input_fname, eog=None, misc=None, + include=None, exclude=None, preload=False, + channel_naming='E%d', verbose=None): + """Read EGI mff binary as raw object. + + .. note:: This function attempts to create a synthetic trigger channel. + See notes below. + + Parameters + ---------- + input_fname : str + Path to the raw file. + eog : list or tuple + Names of channels or list of indices that should be designated + EOG channels. Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated + MISC channels. Default is None. + include : None | list + The event channels to be ignored when creating the synthetic + trigger. Defaults to None. + Note. Overrides `exclude` parameter. + exclude : None | list + The event channels to be ignored when creating the synthetic + trigger. Defaults to None. If None, channels that have more than + one event and the ``sync`` and ``TREV`` channels will be + ignored. + %(preload)s + channel_naming : str + Channel naming convention for the data channels. Defaults to 'E%%d' + (resulting in channel names 'E1', 'E2', 'E3'...). The effective default + prior to 0.14.0 was 'EEG %%03d'. + %(verbose)s + + Returns + ------- + raw : instance of RawMff + A Raw object containing EGI mff data. + + Notes + ----- + The trigger channel names are based on the arbitrary user dependent event + codes used. However this function will attempt to generate a synthetic + trigger channel named ``STI 014`` in accordance with the general + Neuromag / MNE naming pattern. + + The event_id assignment equals ``np.arange(n_events) + 1``. The resulting + ``event_id`` mapping is stored as attribute to the resulting raw object but + will be ignored when saving to a fiff. Note. The trigger channel is + artificially constructed based on timestamps received by the Netstation. + As a consequence, triggers have only short durations. + + This step will fail if events are not mutually exclusive. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + .. versionadded:: 0.15.0 + """ + return RawMff(input_fname, eog, misc, include, exclude, + preload, channel_naming, verbose) + + +class RawMff(BaseRaw): + """RawMff class.""" + + @verbose + def __init__(self, input_fname, eog=None, misc=None, + include=None, exclude=None, preload=False, + channel_naming='E%d', verbose=None): + """Init the RawMff class.""" + input_fname = _check_fname(input_fname, 'read', True, 'input_fname', + need_dir=True) + logger.info('Reading EGI MFF Header from %s...' % input_fname) + egi_info = _read_header(input_fname) + if eog is None: + eog = [] + if misc is None: + misc = np.where(np.array( + egi_info['chan_type']) != 'eeg')[0].tolist() + + logger.info(' Reading events ...') + egi_events, egi_info = _read_events(input_fname, egi_info) + cals = _get_eeg_calibration_info(input_fname, egi_info) + logger.info(' Assembling measurement info ...') + if egi_info['n_events'] > 0: + event_codes = list(egi_info['event_codes']) + if include is None: + exclude_list = ['sync', 'TREV'] if exclude is None else exclude + exclude_inds = [i for i, k in enumerate(event_codes) if k in + exclude_list] + more_excludes = [] + if exclude is None: + for ii, event in enumerate(egi_events): + if event.sum() <= 1 and event_codes[ii]: + more_excludes.append(ii) + if len(exclude_inds) + len(more_excludes) == len(event_codes): + warn('Did not find any event code with more than one ' + 'event.', RuntimeWarning) + else: + exclude_inds.extend(more_excludes) + + exclude_inds.sort() + include_ = [i for i in np.arange(egi_info['n_events']) if + i not in exclude_inds] + include_names = [k for i, k in enumerate(event_codes) + if i in include_] + else: + include_ = [i for i, k in enumerate(event_codes) + if k in include] + include_names = include + + for kk, v in [('include', include_names), ('exclude', exclude)]: + if isinstance(v, list): + for k in v: + if k not in event_codes: + raise ValueError( + f'Could not find event named {repr(k)}') + elif v is not None: + raise ValueError('`%s` must be None or of type list' % kk) + logger.info(' Synthesizing trigger channel "STI 014" ...') + logger.info(' Excluding events {%s} ...' % + ", ".join([k for i, k in enumerate(event_codes) + if i not in include_])) + events_ids = np.arange(len(include_)) + 1 + egi_info['new_trigger'] = _combine_triggers( + egi_events[include_], remapping=events_ids) + self.event_id = dict(zip([e for e in event_codes if e in + include_names], events_ids)) + if egi_info['new_trigger'] is not None: + egi_events = np.vstack([egi_events, egi_info['new_trigger']]) + assert egi_events.shape[1] == egi_info['last_samps'][-1] + else: + # No events + self.event_id = None + egi_info['new_trigger'] = None + event_codes = [] + + meas_dt_utc = (egi_info['meas_dt_local'] + .astimezone(datetime.timezone.utc)) + info = _empty_info(egi_info['sfreq']) + info['meas_date'] = _ensure_meas_date_none_or_dt(meas_dt_utc) + info['utc_offset'] = egi_info['utc_offset'] + info['device_info'] = dict(type=egi_info['device']) + + # First: EEG + ch_names = [channel_naming % (i + 1) for i in + range(egi_info['n_channels'])] + + # Second: Stim + ch_names.extend(list(egi_info['event_codes'])) + if egi_info['new_trigger'] is not None: + ch_names.append('STI 014') # channel for combined events + cals = np.concatenate( + [cals, np.repeat(1, len(event_codes) + 1 + len(misc) + len(eog))]) + + # Third: PNS + ch_names.extend(egi_info['pns_names']) + cals = np.concatenate( + [cals, np.repeat(1, len(egi_info['pns_names']))]) + + # Actually create channels as EEG, then update stim and PNS + ch_coil = FIFF.FIFFV_COIL_EEG + ch_kind = FIFF.FIFFV_EEG_CH + chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc) + chs, mon = _read_locs(input_fname, chs, egi_info) + sti_ch_idx = [i for i, name in enumerate(ch_names) if + name.startswith('STI') or name in event_codes] + for idx in sti_ch_idx: + chs[idx].update({'unit_mul': FIFF.FIFF_UNITM_NONE, + 'cal': cals[idx], + 'kind': FIFF.FIFFV_STIM_CH, + 'coil_type': FIFF.FIFFV_COIL_NONE, + 'unit': FIFF.FIFF_UNIT_NONE}) + chs = _add_pns_channel_info(chs, egi_info, ch_names) + info['chs'] = chs + info._unlocked = False + info._update_redundant() + if mon is not None: + info.set_montage(mon, on_missing='ignore') + file_bin = op.join(input_fname, egi_info['eeg_fname']) + egi_info['egi_events'] = egi_events + + # Check how many channels to read are from EEG + keys = ('eeg', 'sti', 'pns') + idx = dict() + idx['eeg'] = np.where( + [ch['kind'] == FIFF.FIFFV_EEG_CH for ch in chs])[0] + idx['sti'] = np.where( + [ch['kind'] == FIFF.FIFFV_STIM_CH for ch in chs])[0] + idx['pns'] = np.where( + [ch['kind'] in (FIFF.FIFFV_ECG_CH, FIFF.FIFFV_EMG_CH, + FIFF.FIFFV_BIO_CH) for ch in chs])[0] + # By construction this should always be true, but check anyway + if not np.array_equal( + np.concatenate([idx[key] for key in keys]), + np.arange(len(chs))): + raise ValueError('Currently interlacing EEG and PNS channels' + 'is not supported') + egi_info['kind_bounds'] = [0] + for key in keys: + egi_info['kind_bounds'].append(len(idx[key])) + egi_info['kind_bounds'] = np.cumsum(egi_info['kind_bounds']) + assert egi_info['kind_bounds'][0] == 0 + assert egi_info['kind_bounds'][-1] == info['nchan'] + first_samps = [0] + last_samps = [egi_info['last_samps'][-1] - 1] + + annot = dict(onset=list(), duration=list(), description=list()) + if len(idx['pns']): + # PNS Data is present and should be read: + egi_info['pns_filepath'] = op.join( + input_fname, egi_info['pns_fname']) + # Check for PNS bug immediately + pns_samples = np.sum( + egi_info['pns_sample_blocks']['samples_block']) + eeg_samples = np.sum(egi_info['samples_block']) + if pns_samples == eeg_samples - 1: + warn('This file has the EGI PSG sample bug') + annot['onset'].append(last_samps[-1] / egi_info['sfreq']) + annot['duration'].append(1 / egi_info['sfreq']) + annot['description'].append('BAD_EGI_PSG') + elif pns_samples != eeg_samples: + raise RuntimeError( + 'PNS samples (%d) did not match EEG samples (%d)' + % (pns_samples, eeg_samples)) + + self._filenames = [file_bin] + self._raw_extras = [egi_info] + + super(RawMff, self).__init__( + info, preload=preload, orig_format='float', filenames=[file_bin], + first_samps=first_samps, last_samps=last_samps, + raw_extras=[egi_info], verbose=verbose) + + # Annotate acquisition skips + for first, prev_last in zip(egi_info['first_samps'][1:], + egi_info['last_samps'][:-1]): + gap = first - prev_last + assert gap >= 0 + if gap: + annot['onset'].append((prev_last - 0.5) / egi_info['sfreq']) + annot['duration'].append(gap / egi_info['sfreq']) + annot['description'].append('BAD_ACQ_SKIP') + + if len(annot['onset']): + self.set_annotations(Annotations(**annot, orig_time=None)) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of data.""" + logger.debug(f'Reading MFF {start:6d} ... {stop:6d} ...') + dtype = '= bounds[1]) & (idx < bounds[2]))[0] + stim_one = idx[stim_out] + stim_in = idx[stim_out] - bounds[1] + pns_out = np.where((idx >= bounds[2]) & (idx < bounds[3]))[0] + pns_in = idx[pns_out] - bounds[2] + pns_one = idx[pns_out, np.newaxis] + del eeg_out, stim_out, pns_out + + # take into account events (already extended to correct size) + one[stim_one, :] = egi_info['egi_events'][stim_in, start:stop] + + # Convert start and stop to limits in terms of the data + # actually on disk, plus an indexer (disk_use_idx) that populates + # the potentially larger `data` with it, taking skips into account + disk_samps = egi_info['disk_samps'][start:stop] + disk_use_idx = np.where(disk_samps > -1)[0] + # short circuit in case we don't need any samples + if not len(disk_use_idx): + _mult_cal_one(data, one, idx, cals, mult) + return + + start = disk_samps[disk_use_idx[0]] + stop = disk_samps[disk_use_idx[-1]] + 1 + assert len(disk_use_idx) == stop - start + + # Get starting/stopping block/samples + block_samples_offset = np.cumsum(samples_block) + offset_blocks = np.sum(block_samples_offset <= start) + offset_samples = start - (block_samples_offset[offset_blocks - 1] + if offset_blocks > 0 else 0) + + # TODO: Refactor this reading with the PNS reading in a single function + # (DRY) + samples_to_read = stop - start + with open(self._filenames[fi], 'rb', buffering=0) as fid: + # Go to starting block + current_block = 0 + current_block_info = None + current_data_sample = 0 + while current_block < offset_blocks: + this_block_info = _block_r(fid) + if this_block_info is not None: + current_block_info = this_block_info + fid.seek(current_block_info['block_size'], 1) + current_block += 1 + + # Start reading samples + while samples_to_read > 0: + logger.debug(f' Reading from block {current_block}') + this_block_info = _block_r(fid) + current_block += 1 + if this_block_info is not None: + current_block_info = this_block_info + + to_read = (current_block_info['nsamples'] * + current_block_info['nc']) + block_data = np.fromfile(fid, dtype, to_read) + block_data = block_data.reshape(n_channels, -1, order='C') + + # Compute indexes + samples_read = block_data.shape[1] + logger.debug(f' Read {samples_read} samples') + logger.debug(f' Offset {offset_samples} samples') + if offset_samples > 0: + # First block read, skip to the offset: + block_data = block_data[:, offset_samples:] + samples_read = samples_read - offset_samples + offset_samples = 0 + if samples_to_read < samples_read: + # Last block to read, skip the last samples + block_data = block_data[:, :samples_to_read] + samples_read = samples_to_read + logger.debug(f' Keep {samples_read} samples') + + s_start = current_data_sample + s_end = s_start + samples_read + + one[eeg_one, disk_use_idx[s_start:s_end]] = block_data[eeg_in] + samples_to_read = samples_to_read - samples_read + current_data_sample = current_data_sample + samples_read + + if len(pns_one) > 0: + # PNS Data is present and should be read: + pns_filepath = egi_info['pns_filepath'] + pns_info = egi_info['pns_sample_blocks'] + n_channels = pns_info['n_channels'] + samples_block = pns_info['samples_block'] + + # Get starting/stopping block/samples + block_samples_offset = np.cumsum(samples_block) + offset_blocks = np.sum(block_samples_offset < start) + offset_samples = start - (block_samples_offset[offset_blocks - 1] + if offset_blocks > 0 else 0) + + samples_to_read = stop - start + with open(pns_filepath, 'rb', buffering=0) as fid: + # Check file size + fid.seek(0, 2) + file_size = fid.tell() + fid.seek(0) + # Go to starting block + current_block = 0 + current_block_info = None + current_data_sample = 0 + while current_block < offset_blocks: + this_block_info = _block_r(fid) + if this_block_info is not None: + current_block_info = this_block_info + fid.seek(current_block_info['block_size'], 1) + current_block += 1 + + # Start reading samples + while samples_to_read > 0: + if samples_to_read == 1 and fid.tell() == file_size: + # We are in the presence of the EEG bug + # fill with zeros and break the loop + one[pns_one, -1] = 0 + break + + this_block_info = _block_r(fid) + if this_block_info is not None: + current_block_info = this_block_info + + to_read = (current_block_info['nsamples'] * + current_block_info['nc']) + block_data = np.fromfile(fid, dtype, to_read) + block_data = block_data.reshape(n_channels, -1, order='C') + + # Compute indexes + samples_read = block_data.shape[1] + if offset_samples > 0: + # First block read, skip to the offset: + block_data = block_data[:, offset_samples:] + samples_read = samples_read - offset_samples + offset_samples = 0 + + if samples_to_read < samples_read: + # Last block to read, skip the last samples + block_data = block_data[:, :samples_to_read] + samples_read = samples_to_read + + s_start = current_data_sample + s_end = s_start + samples_read + + one[pns_one, disk_use_idx[s_start:s_end]] = \ + block_data[pns_in] + samples_to_read = samples_to_read - samples_read + current_data_sample = current_data_sample + samples_read + + # do the calibration + _mult_cal_one(data, one, idx, cals, mult) + + +@verbose +def read_evokeds_mff(fname, condition=None, channel_naming='E%d', + baseline=None, verbose=None): + """Read averaged MFF file as EvokedArray or list of EvokedArray. + + Parameters + ---------- + fname : str + File path to averaged MFF file. Should end in .mff. + condition : int or str | list of int or str | None + The index (indices) or category (categories) from which to read in + data. Averaged MFF files can contain separate averages for different + categories. These can be indexed by the block number or the category + name. If ``condition`` is a list or None, a list of EvokedArray objects + is returned. + channel_naming : str + Channel naming convention for EEG channels. Defaults to 'E%%d' + (resulting in channel names 'E1', 'E2', 'E3'...). + baseline : None (default) or tuple of length 2 + The time interval to apply baseline correction. If None do not apply + it. If baseline is (a, b) the interval is between "a (s)" and "b (s)". + If a is None the beginning of the data is used and if b is None then b + is set to the end of the interval. If baseline is equal to (None, None) + all the time interval is used. Correction is applied by computing mean + of the baseline period and subtracting it from the data. The baseline + (a, b) includes both endpoints, i.e. all timepoints t such that + a <= t <= b. + %(verbose)s + + Returns + ------- + evoked : EvokedArray or list of EvokedArray + The evoked dataset(s); one EvokedArray if condition is int or str, + or list of EvokedArray if condition is None or list. + + Raises + ------ + ValueError + If ``fname`` has file extension other than '.mff'. + ValueError + If the MFF file specified by ``fname`` is not averaged. + ValueError + If no categories.xml file in MFF directory specified by ``fname``. + + See Also + -------- + Evoked, EvokedArray, create_info + + Notes + ----- + .. versionadded:: 0.22 + """ + mffpy = _import_mffpy() + # Confirm `fname` is a path to an MFF file + if not fname.endswith('.mff'): + raise ValueError('fname must be an MFF file with extension ".mff".') + # Confirm the input MFF is averaged + mff = mffpy.Reader(fname) + try: + flavor = mff.mff_flavor + except AttributeError: # < 6.3 + flavor = mff.flavor + if flavor not in ('averaged', 'segmented'): # old, new names + raise ValueError(f'{fname} is a {flavor} MFF file. ' + 'fname must be the path to an averaged MFF file.') + # Check for categories.xml file + if 'categories.xml' not in mff.directory.listdir(): + raise ValueError('categories.xml not found in MFF directory. ' + f'{fname} may not be an averaged MFF file.') + return_list = True + if condition is None: + categories = mff.categories.categories + condition = list(categories.keys()) + elif not isinstance(condition, list): + condition = [condition] + return_list = False + logger.info(f'Reading {len(condition)} evoked datasets from {fname} ...') + output = [_read_evoked_mff(fname, c, channel_naming=channel_naming, + verbose=verbose).apply_baseline(baseline) + for c in condition] + return output if return_list else output[0] + + +def _read_evoked_mff(fname, condition, channel_naming='E%d', verbose=None): + """Read evoked data from MFF file.""" + import mffpy + egi_info = _read_header(fname) + mff = mffpy.Reader(fname) + categories = mff.categories.categories + + if isinstance(condition, str): + # Condition is interpreted as category name + category = _check_option('condition', condition, categories, + extra='provided as category name') + epoch = mff.epochs[category] + elif isinstance(condition, int): + # Condition is interpreted as epoch index + try: + epoch = mff.epochs[condition] + except IndexError: + raise ValueError(f'"condition" parameter ({condition}), provided ' + 'as epoch index, is out of range for available ' + f'epochs ({len(mff.epochs)}).') + category = epoch.name + else: + raise TypeError('"condition" parameter must be either int or str.') + + # Read in signals from the target epoch + data = mff.get_physical_samples_from_epoch(epoch) + eeg_data, t0 = data['EEG'] + if 'PNSData' in data: + pns_data, t0 = data['PNSData'] + all_data = np.vstack((eeg_data, pns_data)) + ch_types = egi_info['chan_type'] + egi_info['pns_types'] + else: + all_data = eeg_data + ch_types = egi_info['chan_type'] + all_data *= 1e-6 # convert to volts + + # Load metadata into info object + # Exclude info['meas_date'] because record time info in + # averaged MFF is the time of the averaging, not true record time. + ch_names = [channel_naming % (i + 1) for i in + range(mff.num_channels['EEG'])] + ch_names.extend(egi_info['pns_names']) + info = create_info(ch_names, mff.sampling_rates['EEG'], ch_types) + with info._unlock(): + info['device_info'] = dict(type=egi_info['device']) + info['nchan'] = sum(mff.num_channels.values()) + + # Add individual channel info + # Get calibration info for EEG channels + cals = _get_eeg_calibration_info(fname, egi_info) + # Initialize calibration for PNS channels, will be updated later + cals = np.concatenate([cals, np.repeat(1, len(egi_info['pns_names']))]) + ch_coil = FIFF.FIFFV_COIL_EEG + ch_kind = FIFF.FIFFV_EEG_CH + chs = _create_chs(ch_names, cals, ch_coil, ch_kind, (), (), (), ()) + chs, mon = _read_locs(fname, chs, egi_info) + # Update PNS channel info + chs = _add_pns_channel_info(chs, egi_info, ch_names) + with info._unlock(): + info['chs'] = chs + if mon is not None: + info.set_montage(mon, on_missing='ignore') + + # Add bad channels to info + info['description'] = category + try: + channel_status = categories[category][0]['channelStatus'] + except KeyError: + warn(f'Channel status data not found for condition {category}. ' + 'No channels will be marked as bad.', category=UserWarning) + channel_status = None + bads = [] + if channel_status: + for entry in channel_status: + if entry['exclusion'] == 'badChannels': + if entry['signalBin'] == 1: + # Add bad EEG channels + for ch in entry['channels']: + bads.append(channel_naming % ch) + elif entry['signalBin'] == 2: + # Add bad PNS channels + for ch in entry['channels']: + bads.append(egi_info['pns_names'][ch - 1]) + info['bads'] = bads + + # Add EEG reference to info + # Initialize 'custom_ref_applied' to False + with info._unlock(): + info['custom_ref_applied'] = False + try: + fp = mff.directory.filepointer('history') + except (ValueError, FileNotFoundError): # old (<=0.6.3) vs new mffpy + pass + else: + with fp: + history = mffpy.XML.from_file(fp) + for entry in history.entries: + if entry['method'] == 'Montage Operations Tool': + if 'Average Reference' in entry['settings']: + # Average reference has been applied + projector, info = setup_proj(info) + else: + # Custom reference has been applied that is not an average + info['custom_ref_applied'] = True + + # Get nave from categories.xml + try: + nave = categories[category][0]['keys']['#seg']['data'] + except KeyError: + warn(f'Number of averaged epochs not found for condition {category}. ' + 'nave will default to 1.', category=UserWarning) + nave = 1 + + # Let tmin default to 0 + return EvokedArray(all_data, info, tmin=0., comment=category, + nave=nave, verbose=verbose) + + +def _import_mffpy(why='read averaged .mff files'): + """Import and return module mffpy.""" + try: + import mffpy + except ImportError as exp: + msg = f'mffpy is required to {why}, got:\n{exp}' + raise ImportError(msg) + + return mffpy diff --git a/python/libs/mne/io/egi/events.py b/python/libs/mne/io/egi/events.py new file mode 100644 index 0000000..e2fffd3 --- /dev/null +++ b/python/libs/mne/io/egi/events.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# +# License: BSD-3-Clause + +from datetime import datetime +from glob import glob +from os.path import basename, join, splitext +from xml.etree.ElementTree import parse + +import numpy as np + +from ...utils import logger + + +def _read_events(input_fname, info): + """Read events for the record. + + Parameters + ---------- + input_fname : str + The file path. + info : dict + Header info array. + """ + n_samples = info['last_samps'][-1] + mff_events, event_codes = _read_mff_events(input_fname, info['sfreq']) + info['n_events'] = len(event_codes) + info['event_codes'] = event_codes + events = np.zeros([info['n_events'], info['n_segments'] * n_samples]) + for n, event in enumerate(event_codes): + for i in mff_events[event]: + events[n][i] = n + 1 + return events, info + + +def _read_mff_events(filename, sfreq): + """Extract the events. + + Parameters + ---------- + filename : str + File path. + sfreq : float + The sampling frequency + """ + orig = {} + for xml_file in glob(join(filename, '*.xml')): + xml_type = splitext(basename(xml_file))[0] + orig[xml_type] = _parse_xml(xml_file) + xml_files = orig.keys() + xml_events = [x for x in xml_files if x[:7] == 'Events_'] + for item in orig['info']: + if 'recordTime' in item: + start_time = _ns2py_time(item['recordTime']) + break + markers = [] + code = [] + for xml in xml_events: + for event in orig[xml][2:]: + event_start = _ns2py_time(event['beginTime']) + start = (event_start - start_time).total_seconds() + if event['code'] not in code: + code.append(event['code']) + marker = {'name': event['code'], + 'start': start, + 'start_sample': int(np.fix(start * sfreq)), + 'end': start + float(event['duration']) / 1e9, + 'chan': None, + } + markers.append(marker) + events_tims = dict() + for ev in code: + trig_samp = list(c['start_sample'] for n, + c in enumerate(markers) if c['name'] == ev) + events_tims.update({ev: trig_samp}) + return events_tims, code + + +def _parse_xml(xml_file): + """Parse XML file.""" + xml = parse(xml_file) + root = xml.getroot() + return _xml2list(root) + + +def _xml2list(root): + """Parse XML item.""" + output = [] + for element in root: + + if len(element) > 0: + if element[0].tag != element[-1].tag: + output.append(_xml2dict(element)) + else: + output.append(_xml2list(element)) + + elif element.text: + text = element.text.strip() + if text: + tag = _ns(element.tag) + output.append({tag: text}) + + return output + + +def _ns(s): + """Remove namespace, but only if there is a namespace to begin with.""" + if '}' in s: + return '}'.join(s.split('}')[1:]) + else: + return s + + +def _xml2dict(root): + """Use functions instead of Class. + + remove namespace based on + http://stackoverflow.com/questions/2148119 + """ + output = {} + if root.items(): + output.update(dict(root.items())) + + for element in root: + if len(element) > 0: + if len(element) == 1 or element[0].tag != element[1].tag: + one_dict = _xml2dict(element) + else: + one_dict = {_ns(element[0].tag): _xml2list(element)} + + if element.items(): + one_dict.update(dict(element.items())) + output.update({_ns(element.tag): one_dict}) + + elif element.items(): + output.update({_ns(element.tag): dict(element.items())}) + + else: + output.update({_ns(element.tag): element.text}) + return output + + +def _ns2py_time(nstime): + """Parse times.""" + nsdate = nstime[0:10] + nstime0 = nstime[11:26] + nstime00 = nsdate + " " + nstime0 + pytime = datetime.strptime(nstime00, '%Y-%m-%d %H:%M:%S.%f') + return pytime + + +def _combine_triggers(data, remapping=None): + """Combine binary triggers.""" + new_trigger = np.zeros(data.shape[1]) + if data.astype(bool).sum(axis=0).max() > 1: # ensure no overlaps + logger.info(' Found multiple events at the same time ' + 'sample. Cannot create trigger channel.') + return + if remapping is None: + remapping = np.arange(data) + 1 + for d, event_id in zip(data, remapping): + idx = d.nonzero() + if np.any(idx): + new_trigger[idx] += event_id + return new_trigger diff --git a/python/libs/mne/io/egi/general.py b/python/libs/mne/io/egi/general.py new file mode 100644 index 0000000..b8212ed --- /dev/null +++ b/python/libs/mne/io/egi/general.py @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- +# +# License: BSD-3-Clause + +import os +from xml.dom.minidom import parse +import re + +import numpy as np + +from ...utils import _pl + + +def _extract(tags, filepath=None, obj=None): + """Extract info from XML.""" + if obj is not None: + fileobj = obj + elif filepath is not None: + fileobj = parse(filepath) + else: + raise ValueError('There is not object or file to extract data') + infoxml = dict() + for tag in tags: + value = fileobj.getElementsByTagName(tag) + infoxml[tag] = [] + for i in range(len(value)): + infoxml[tag].append(value[i].firstChild.data) + return infoxml + + +def _get_gains(filepath): + """Parse gains.""" + file_obj = parse(filepath) + objects = file_obj.getElementsByTagName('calibration') + gains = dict() + for ob in objects: + value = ob.getElementsByTagName('type') + if value[0].firstChild.data == 'GCAL': + data_g = _extract(['ch'], obj=ob)['ch'] + gains.update(gcal=np.asarray(data_g, dtype=np.float64)) + elif value[0].firstChild.data == 'ICAL': + data_g = _extract(['ch'], obj=ob)['ch'] + gains.update(ical=np.asarray(data_g, dtype=np.float64)) + return gains + + +def _get_ep_info(filepath): + """Get epoch info.""" + epochfile = filepath + '/epochs.xml' + epochlist = parse(epochfile) + epochs = epochlist.getElementsByTagName('epoch') + keys = ('first_samps', 'last_samps', 'first_blocks', 'last_blocks') + epoch_info = {key: list() for key in keys} + for epoch in epochs: + ep_begin = int(epoch.getElementsByTagName('beginTime')[0] + .firstChild.data) + ep_end = int(epoch.getElementsByTagName('endTime')[0].firstChild.data) + first_block = int(epoch.getElementsByTagName('firstBlock')[0] + .firstChild.data) + last_block = int(epoch.getElementsByTagName('lastBlock')[0] + .firstChild.data) + epoch_info['first_samps'].append(ep_begin) + epoch_info['last_samps'].append(ep_end) + epoch_info['first_blocks'].append(first_block) + epoch_info['last_blocks'].append(last_block) + # Don't turn into ndarray here, keep native int because it can deal with + # huge numbers (could use np.uint64 but it's more work) + return epoch_info + + +def _get_blocks(filepath): + """Get info from meta data blocks.""" + binfile = os.path.join(filepath) + n_blocks = 0 + samples_block = [] + header_sizes = [] + n_channels = [] + sfreq = [] + # Meta data consists of: + # * 1 byte of flag (1 for meta data, 0 for data) + # * 1 byte of header size + # * 1 byte of block size + # * 1 byte of n_channels + # * n_channels bytes of offsets + # * n_channels bytes of sigfreqs? + with open(binfile, 'rb') as fid: + fid.seek(0, 2) # go to end of file + file_length = fid.tell() + block_size = file_length + fid.seek(0) + position = 0 + while position < file_length: + block = _block_r(fid) + if block is None: + samples_block.append(samples_block[n_blocks - 1]) + n_blocks += 1 + fid.seek(block_size, 1) + position = fid.tell() + continue + block_size = block['block_size'] + header_size = block['header_size'] + header_sizes.append(header_size) + samples_block.append(block['nsamples']) + n_blocks += 1 + fid.seek(block_size, 1) + sfreq.append(block['sfreq']) + n_channels.append(block['nc']) + position = fid.tell() + + if any([n != n_channels[0] for n in n_channels]): + raise RuntimeError("All the blocks don't have the same amount of " + "channels.") + if any([f != sfreq[0] for f in sfreq]): + raise RuntimeError("All the blocks don't have the same sampling " + "frequency.") + if len(samples_block) < 1: + raise RuntimeError("There seems to be no data") + samples_block = np.array(samples_block) + signal_blocks = dict(n_channels=n_channels[0], sfreq=sfreq[0], + n_blocks=n_blocks, samples_block=samples_block, + header_sizes=header_sizes) + return signal_blocks + + +def _get_signalfname(filepath): + """Get filenames.""" + listfiles = os.listdir(filepath) + binfiles = list(f for f in listfiles if 'signal' in f and + f[-4:] == '.bin' and f[0] != '.') + all_files = {} + infofiles = list() + for binfile in binfiles: + bin_num_str = re.search(r'\d+', binfile).group() + infofile = 'info' + bin_num_str + '.xml' + infofiles.append(infofile) + infobjfile = os.path.join(filepath, infofile) + infobj = parse(infobjfile) + if len(infobj.getElementsByTagName('EEG')): + signal_type = 'EEG' + elif len(infobj.getElementsByTagName('PNSData')): + signal_type = 'PNS' + all_files[signal_type] = { + 'signal': 'signal{}.bin'.format(bin_num_str), + 'info': infofile} + if 'EEG' not in all_files: + raise FileNotFoundError( + 'Could not find any EEG data in the %d file%s found in %s:\n%s' + % (len(infofiles), _pl(infofiles), filepath, '\n'.join(infofiles))) + return all_files + + +def _block_r(fid): + """Read meta data.""" + if np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] != 1: # not metadata + return None + header_size = np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] + block_size = np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] + hl = int(block_size / 4) + nc = np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] + nsamples = int(hl / nc) + np.fromfile(fid, dtype=np.dtype('i4'), count=nc) # sigoffset + sigfreq = np.fromfile(fid, dtype=np.dtype('i4'), count=nc) + depth = sigfreq[0] & 0xFF + if depth != 32: + raise ValueError('I do not know how to read this MFF (depth != 32)') + sfreq = sigfreq[0] >> 8 + count = int(header_size / 4 - (4 + 2 * nc)) + np.fromfile(fid, dtype=np.dtype('i4'), count=count) # sigoffset + block = dict(nc=nc, + hl=hl, + nsamples=nsamples, + block_size=block_size, + header_size=header_size, + sfreq=sfreq) + return block diff --git a/python/libs/mne/io/egi/tests/__init__.py b/python/libs/mne/io/egi/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/egi/tests/test_egi.py b/python/libs/mne/io/egi/tests/test_egi.py new file mode 100644 index 0000000..5c62170 --- /dev/null +++ b/python/libs/mne/io/egi/tests/test_egi.py @@ -0,0 +1,458 @@ +# -*- coding: utf-8 -*- +# Authors: Denis A. Engemann +# simplified BSD-3 license + + +from pathlib import Path +import os.path as op +import os +import shutil +from datetime import datetime, timezone + +import numpy as np +from numpy.testing import assert_array_equal, assert_allclose +import pytest +from scipy import io as sio + +from mne import find_events, pick_types +from mne.io import read_raw_egi, read_evokeds_mff, read_raw_fif +from mne.io.constants import FIFF +from mne.io.egi.egi import _combine_triggers +from mne.io.tests.test_raw import _test_raw_reader +from mne.utils import requires_version, object_diff +from mne.datasets.testing import data_path, requires_testing_data + +base_dir = op.join(op.dirname(op.abspath(__file__)), 'data') +egi_fname = op.join(base_dir, 'test_egi.raw') +egi_txt_fname = op.join(base_dir, 'test_egi.txt') +testing_path = data_path(download=False) +egi_path = op.join(testing_path, 'EGI') +egi_mff_fname = op.join(egi_path, 'test_egi.mff') +egi_mff_pns_fname = op.join(egi_path, 'test_egi_pns.mff') +egi_pause_fname = op.join(egi_path, 'test_egi_multiepoch_paused.mff') +egi_eprime_pause_fname = op.join(egi_path, 'test_egi_multiepoch_eprime.mff') +egi_pause_w1337_fname = op.join(egi_path, 'w1337_20191014_105416.mff') +egi_mff_evoked_fname = op.join(egi_path, 'test_egi_evoked.mff') +egi_txt_evoked_cat1_fname = op.join(egi_path, 'test_egi_evoked_cat1.txt') +egi_txt_evoked_cat2_fname = op.join(egi_path, 'test_egi_evoked_cat2.txt') + +# absolute event times from NetStation +egi_pause_events = {'AM40': [7.224, 11.928, 14.413, 16.848], + 'bgin': [6.121, 8.434, 13.369, 15.815, 18.094], + 'FIX+': [6.225, 10.929, 13.414, 15.849], + 'ITI+': [8.293, 12.997, 15.482, 17.918]} +# absolute epoch times +egi_pause_skips = [(1304000.0, 1772000.0), (8660000.0, 12296000.0)] + +egi_eprime_pause_events = {'AM40': [6.049, 8.434, 10.936, 13.321], + 'bgin': [4.902, 7.381, 9.901, 12.268, 14.619], + 'FIX+': [5.050, 7.435, 9.937, 12.322], + 'ITI+': [7.185, 9.503, 12.005, 14.391]} +egi_eprime_pause_skips = [(1344000.0, 1804000.0)] + +egi_pause_w1337_events = None +egi_pause_w1337_skips = [(21956000.0, 40444000.0), (60936000.0, 89332000.0)] + + +@requires_testing_data +@pytest.mark.parametrize('fname, skip_times, event_times', [ + (egi_pause_fname, egi_pause_skips, egi_pause_events), + (egi_eprime_pause_fname, egi_eprime_pause_skips, egi_eprime_pause_events), + (egi_pause_w1337_fname, egi_pause_w1337_skips, egi_pause_w1337_events), +]) +def test_egi_mff_pause(fname, skip_times, event_times): + """Test EGI MFF with pauses.""" + if fname == egi_pause_w1337_fname: + # too slow to _test_raw_reader + raw = read_raw_egi(fname).load_data() + else: + with pytest.warns(RuntimeWarning, match='Acquisition skips detected'): + raw = _test_raw_reader(read_raw_egi, input_fname=fname, + test_scaling=False, # XXX probably some bug + test_rank='less', + ) + assert raw.info['sfreq'] == 250. # true for all of these files + assert len(raw.annotations) == len(skip_times) + + # assert event onsets match expected times + if event_times is None: + with pytest.raises(ValueError, match='Consider using .*events_from'): + find_events(raw) + else: + events = find_events(raw) + for event_type in event_times.keys(): + ns_samples = np.floor(np.array(event_times[event_type]) * + raw.info['sfreq']) + assert_array_equal( + events[events[:, 2] == raw.event_id[event_type], 0], + ns_samples) + + # read some data from the middle of the skip, assert it's all zeros + stim_picks = pick_types(raw.info, meg=False, stim=True, exclude=()) + other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), stim_picks) + for ii, annot in enumerate(raw.annotations): + assert annot['description'] == 'BAD_ACQ_SKIP' + start, stop = raw.time_as_index( + [annot['onset'], annot['onset'] + annot['duration']]) + data, _ = raw[:, start:stop] + assert_array_equal(data[other_picks], 0.) + if event_times is not None: + assert raw.ch_names[-1] == 'STI 014' + assert not np.array_equal(data[stim_picks], 0.) + + # assert skips match expected onset and duration + skip = ((start + 1) / raw.info['sfreq'] * 1e6, + (stop + 1) / raw.info['sfreq'] * 1e6) + assert skip == skip_times[ii] + + +@requires_testing_data +@pytest.mark.parametrize('fname', [ + egi_pause_fname, + egi_eprime_pause_fname, + egi_pause_w1337_fname, +]) +def test_egi_mff_pause_chunks(fname, tmp_path): + """Test that on-demand of all short segments works (via I/O).""" + fname_temp = tmp_path / 'test_raw.fif' + raw_data = read_raw_egi(fname, preload=True).get_data() + raw = read_raw_egi(fname) + with pytest.warns(RuntimeWarning, match='Acquisition skips detected'): + raw.save(fname_temp) + del raw + raw_data_2 = read_raw_fif(fname_temp).get_data() + assert_allclose(raw_data, raw_data_2) + + +@requires_testing_data +def test_io_egi_mff(): + """Test importing EGI MFF simple binary files.""" + raw = read_raw_egi(egi_mff_fname, include=None) + assert ('RawMff' in repr(raw)) + include = ['DIN1', 'DIN2', 'DIN3', 'DIN4', 'DIN5', 'DIN7'] + raw = _test_raw_reader(read_raw_egi, input_fname=egi_mff_fname, + include=include, channel_naming='EEG %03d', + test_scaling=False, # XXX probably some bug + ) + assert raw.info['sfreq'] == 1000. + # The ref here is redundant, but we don't currently have a way in + # DigMontage to mark that a given channel is actually the ref so... + assert len(raw.info['dig']) == 133 # 129 eeg + 1 ref + 3 cardinal points + assert raw.info['dig'][0]['ident'] == 1 # EEG channel E1 + assert raw.info['dig'][3]['ident'] == 0 # Reference channel + assert raw.info['dig'][-1]['ident'] == 129 # Reference channel + ref_loc = raw.info['dig'][3]['r'] + eeg_picks = pick_types(raw.info, eeg=True) + assert len(eeg_picks) == 129 + for i in eeg_picks: + loc = raw.info['chs'][i]['loc'] + assert loc[:3].any(), loc[:3] + assert_array_equal(loc[3:6], ref_loc, err_msg=f'{i}') + assert raw.info['device_info']['type'] == 'HydroCel GSN 128 1.0' + + assert 'eeg' in raw + eeg_chan = [c for c in raw.ch_names if 'EEG' in c] + assert len(eeg_chan) == 129 + assert 'STI 014' in raw.ch_names + + events = find_events(raw, stim_channel='STI 014') + assert len(events) == 8 + assert np.unique(events[:, 1])[0] == 0 + assert np.unique(events[:, 0])[0] != 0 + assert np.unique(events[:, 2])[0] != 0 + + with pytest.raises(ValueError, match='Could not find event'): + read_raw_egi(egi_mff_fname, include=['Foo']) + with pytest.raises(ValueError, match='Could not find event'): + read_raw_egi(egi_mff_fname, exclude=['Bar']) + for ii, k in enumerate(include, 1): + assert k in raw.event_id + assert raw.event_id[k] == ii + + +def test_io_egi(): + """Test importing EGI simple binary files.""" + # test default + with open(egi_txt_fname) as fid: + data = np.loadtxt(fid) + t = data[0] + data = data[1:] + data *= 1e-6 # µV + + with pytest.warns(RuntimeWarning, match='Did not find any event code'): + raw = read_raw_egi(egi_fname, include=None) + + # The reader should accept a Path, too. + with pytest.warns(RuntimeWarning, match='Did not find any event code'): + raw = read_raw_egi(Path(egi_fname), include=None) + + assert 'RawEGI' in repr(raw) + data_read, t_read = raw[:256] + assert_allclose(t_read, t) + assert_allclose(data_read, data, atol=1e-10) + + include = ['TRSP', 'XXX1'] + raw = _test_raw_reader(read_raw_egi, input_fname=egi_fname, + include=include, test_rank='less', + test_scaling=False, # XXX probably some bug + ) + + assert 'eeg' in raw + + eeg_chan = [c for c in raw.ch_names if c.startswith('E')] + assert len(eeg_chan) == 256 + picks = pick_types(raw.info, eeg=True) + assert len(picks) == 256 + assert 'STI 014' in raw.ch_names + + events = find_events(raw, stim_channel='STI 014') + assert len(events) == 2 # ground truth + assert np.unique(events[:, 1])[0] == 0 + assert np.unique(events[:, 0])[0] != 0 + assert np.unique(events[:, 2])[0] != 0 + triggers = np.array([[0, 1, 1, 0], [0, 0, 1, 0]]) + + # test trigger functionality + triggers = np.array([[0, 1, 0, 0], [0, 0, 1, 0]]) + events_ids = [12, 24] + new_trigger = _combine_triggers(triggers, events_ids) + assert_array_equal(np.unique(new_trigger), np.unique([0, 12, 24])) + + pytest.raises(ValueError, read_raw_egi, egi_fname, include=['Foo'], + preload=False) + pytest.raises(ValueError, read_raw_egi, egi_fname, exclude=['Bar'], + preload=False) + for ii, k in enumerate(include, 1): + assert (k in raw.event_id) + assert (raw.event_id[k] == ii) + + +@requires_testing_data +def test_io_egi_pns_mff(tmp_path): + """Test importing EGI MFF with PNS data.""" + raw = read_raw_egi(egi_mff_pns_fname, include=None, preload=True, + verbose='error') + assert ('RawMff' in repr(raw)) + pns_chans = pick_types(raw.info, ecg=True, bio=True, emg=True) + assert len(pns_chans) == 7 + names = [raw.ch_names[x] for x in pns_chans] + pns_names = ['Resp. Temperature', + 'Resp. Pressure', + 'ECG', + 'Body Position', + 'Resp. Effort Chest', + 'Resp. Effort Abdomen', + 'EMG-Leg'] + _test_raw_reader(read_raw_egi, input_fname=egi_mff_pns_fname, + channel_naming='EEG %03d', verbose='error', + test_rank='less', + test_scaling=False, # XXX probably some bug + ) + assert names == pns_names + mat_names = [ + 'Resp_Temperature', + 'Resp_Pressure', + 'ECG', + 'Body_Position', + 'Resp_Effort_Chest', + 'Resp_Effort_Abdomen', + 'EMGLeg' + ] + egi_fname_mat = op.join(testing_path, 'EGI', 'test_egi_pns.mat') + mc = sio.loadmat(egi_fname_mat) + for ch_name, ch_idx, mat_name in zip(pns_names, pns_chans, mat_names): + print('Testing {}'.format(ch_name)) + mc_key = [x for x in mc.keys() if mat_name in x][0] + cal = raw.info['chs'][ch_idx]['cal'] + mat_data = mc[mc_key] * cal + raw_data = raw[ch_idx][0] + assert_array_equal(mat_data, raw_data) + + # EEG missing + new_mff = tmp_path / 'temp.mff' + shutil.copytree(egi_mff_pns_fname, new_mff) + read_raw_egi(new_mff, verbose='error') + os.remove(op.join(new_mff, 'info1.xml')) + os.remove(op.join(new_mff, 'signal1.bin')) + with pytest.raises(FileNotFoundError, match='Could not find any EEG'): + read_raw_egi(new_mff, verbose='error') + + +@requires_testing_data +@pytest.mark.parametrize('preload', (True, False)) +def test_io_egi_pns_mff_bug(preload): + """Test importing EGI MFF with PNS data (BUG).""" + egi_fname_mff = op.join(testing_path, 'EGI', 'test_egi_pns_bug.mff') + with pytest.warns(RuntimeWarning, match='EGI PSG sample bug'): + raw = read_raw_egi(egi_fname_mff, include=None, preload=preload, + verbose='warning') + assert len(raw.annotations) == 1 + assert_allclose(raw.annotations.duration, [0.004]) + assert_allclose(raw.annotations.onset, [13.948]) + egi_fname_mat = op.join(testing_path, 'EGI', 'test_egi_pns.mat') + mc = sio.loadmat(egi_fname_mat) + pns_chans = pick_types(raw.info, ecg=True, bio=True, emg=True) + pns_names = ['Resp. Temperature'[:15], + 'Resp. Pressure', + 'ECG', + 'Body Position', + 'Resp. Effort Chest'[:15], + 'Resp. Effort Abdomen'[:15], + 'EMG-Leg'] + mat_names = [ + 'Resp_Temperature'[:15], + 'Resp_Pressure', + 'ECG', + 'Body_Position', + 'Resp_Effort_Chest'[:15], + 'Resp_Effort_Abdomen'[:15], + 'EMGLeg' + + ] + for ch_name, ch_idx, mat_name in zip(pns_names, pns_chans, mat_names): + print('Testing {}'.format(ch_name)) + mc_key = [x for x in mc.keys() if mat_name in x][0] + cal = raw.info['chs'][ch_idx]['cal'] + mat_data = mc[mc_key] * cal + mat_data[:, -1] = 0 # The MFF has one less sample, the last one + raw_data = raw[ch_idx][0] + assert_array_equal(mat_data, raw_data) + + +@requires_testing_data +def test_io_egi_crop_no_preload(): + """Test crop non-preloaded EGI MFF data (BUG).""" + raw = read_raw_egi(egi_mff_fname, preload=False) + raw.crop(17.5, 20.5) + raw.load_data() + raw_preload = read_raw_egi(egi_mff_fname, preload=True) + raw_preload.crop(17.5, 20.5) + raw_preload.load_data() + assert_allclose(raw._data, raw_preload._data) + + +@requires_version('mffpy', '0.5.7') +@requires_testing_data +@pytest.mark.parametrize('idx, cond, tmax, signals, bads', [ + (0, 'Category 1', 0.016, egi_txt_evoked_cat1_fname, + ['E8', 'E11', 'E17', 'E28', 'ECG']), + (1, 'Category 2', 0.0, egi_txt_evoked_cat2_fname, + ['E257', 'EMG']) +]) +def test_io_egi_evokeds_mff(idx, cond, tmax, signals, bads): + """Test reading evoked MFF file.""" + # Test reading all conditions from evokeds + evokeds = read_evokeds_mff(egi_mff_evoked_fname) + assert len(evokeds) == 2 + # Test reading list of conditions from evokeds + evokeds = read_evokeds_mff(egi_mff_evoked_fname, condition=[0, 1]) + assert len(evokeds) == 2 + # Test invalid condition + with pytest.raises(ValueError) as exc_info: + read_evokeds_mff(egi_mff_evoked_fname, condition='Invalid Condition') + message = "Invalid value for the 'condition' parameter provided as " \ + "category name. Allowed values are 'Category 1' and " \ + "'Category 2', but got 'Invalid Condition' instead." + assert str(exc_info.value) == message + with pytest.raises(ValueError) as exc_info: + read_evokeds_mff(egi_mff_evoked_fname, condition=2) + message = '"condition" parameter (2), provided as epoch index, ' \ + 'is out of range for available epochs (2).' + assert str(exc_info.value) == message + with pytest.raises(TypeError) as exc_info: + read_evokeds_mff(egi_mff_evoked_fname, condition=1.2) + message = '"condition" parameter must be either int or str.' + assert str(exc_info.value) == message + # Test reading evoked data from single condition + evoked_cond = read_evokeds_mff(egi_mff_evoked_fname, condition=cond) + evoked_idx = read_evokeds_mff(egi_mff_evoked_fname, condition=idx) + for evoked in [evoked_cond, evoked_idx]: + assert evoked.comment == cond + assert evoked.nave == 3 + assert evoked.tmin == 0.0 + assert evoked.tmax == tmax + # Check signal data + data = np.loadtxt(signals, ndmin=2).T * 1e-6 # convert to volts + assert_allclose(evoked_cond.data, data, atol=1e-12) + assert_allclose(evoked_idx.data, data, atol=1e-12) + # Check info + assert object_diff(evoked_cond.info, evoked_idx.info) == '' + assert evoked_cond.info['description'] == cond + assert evoked_cond.info['bads'] == bads + assert len(evoked_cond.info['ch_names']) == 259 + assert 'ECG' in evoked_cond.info['ch_names'] + assert 'EMG' in evoked_cond.info['ch_names'] + assert 'ecg' in evoked_cond + assert 'emg' in evoked_cond + pick_eeg = pick_types(evoked_cond.info, eeg=True, exclude=[]) + assert len(pick_eeg) == 257 + assert evoked_cond.info['nchan'] == 259 + assert evoked_cond.info['sfreq'] == 250.0 + assert not evoked_cond.info['custom_ref_applied'] + assert len(evoked_cond.info['dig']) == 261 + assert evoked_cond.info['device_info']['type'] == 'HydroCel GSN 256 1.0' + + +@requires_version('mffpy', '0.5.7') +@requires_testing_data +def test_read_evokeds_mff_bad_input(): + """Test errors are thrown when reading invalid input file.""" + # Test file that is not an MFF + with pytest.raises(ValueError) as exc_info: + read_evokeds_mff(egi_fname) + message = 'fname must be an MFF file with extension ".mff".' + assert str(exc_info.value) == message + # Test continuous MFF + with pytest.raises(ValueError) as exc_info: + read_evokeds_mff(egi_mff_fname) + message = f'{egi_mff_fname} is a continuous MFF file. ' \ + 'fname must be the path to an averaged MFF file.' + assert str(exc_info.value) == message + + +@requires_testing_data +def test_egi_coord_frame(): + """Test that EGI coordinate frame is changed to head.""" + info = read_raw_egi(egi_mff_fname).info + want_idents = ( + FIFF.FIFFV_POINT_LPA, + FIFF.FIFFV_POINT_NASION, + FIFF.FIFFV_POINT_RPA, + ) + for ii, want in enumerate(want_idents): + d = info['dig'][ii] + assert d['kind'] == FIFF.FIFFV_POINT_CARDINAL + assert d['ident'] == want + loc = d['r'] + if ii == 0: + assert 0.05 < -loc[0] < 0.1, 'LPA' + assert_allclose(loc[1:], 0, atol=1e-7, err_msg='LPA') + elif ii == 1: + assert 0.05 < loc[1] < 0.11, 'Nasion' + assert_allclose(loc[::2], 0, atol=1e-7, err_msg='Nasion') + else: + assert ii == 2 + assert 0.05 < loc[0] < 0.1, 'RPA' + assert_allclose(loc[1:], 0, atol=1e-7, err_msg='RPA') + for d in info['dig'][3:]: + assert d['kind'] == FIFF.FIFFV_POINT_EEG + + +@requires_testing_data +@pytest.mark.parametrize('fname, timestamp, utc_offset', [ + (egi_mff_fname, '2017-02-23T11:35:13.220824+01:00', '+0100'), + (egi_mff_pns_fname, '2017-09-20T09:55:44.072000+01:00', '+0100'), + (egi_eprime_pause_fname, '2018-07-30T10:46:09.621673-04:00', '-0400'), + (egi_pause_w1337_fname, '2019-10-14T10:54:27.395210-07:00', '-0700'), +]) +def test_meas_date(fname, timestamp, utc_offset): + """Test meas date conversion.""" + raw = read_raw_egi(fname, verbose='warning') + dt = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f%z') + measdate = dt.astimezone(timezone.utc) + hour_local = int(dt.strftime('%H')) + hour_utc = int(raw.info['meas_date'].strftime('%H')) + local_utc_diff = hour_local - hour_utc + assert raw.info['meas_date'] == measdate + assert raw.info['utc_offset'] == utc_offset + assert local_utc_diff == int(utc_offset[:-2]) diff --git a/python/libs/mne/io/eximia/__init__.py b/python/libs/mne/io/eximia/__init__.py new file mode 100644 index 0000000..b6cd940 --- /dev/null +++ b/python/libs/mne/io/eximia/__init__.py @@ -0,0 +1,7 @@ +"""Eximia module for conversion to FIF.""" + +# Author: Eric Larson +# +# License: BSD-3-Clause + +from .eximia import read_raw_eximia diff --git a/python/libs/mne/io/eximia/eximia.py b/python/libs/mne/io/eximia/eximia.py new file mode 100644 index 0000000..b454357 --- /dev/null +++ b/python/libs/mne/io/eximia/eximia.py @@ -0,0 +1,92 @@ +# Authors: Eric Larson +# Federico Raimondo +# +# License: BSD-3-Clause + +import os.path as op + +from ..base import BaseRaw +from ..utils import _read_segments_file, _file_size +from ..meas_info import create_info +from ...utils import logger, verbose, warn, fill_doc, _check_fname + + +@fill_doc +def read_raw_eximia(fname, preload=False, verbose=None): + """Reader for an eXimia EEG file. + + Parameters + ---------- + fname : str + Path to the eXimia data file (.nxe). + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawEximia + A Raw object containing eXimia data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawEximia(fname, preload, verbose) + + +@fill_doc +class RawEximia(BaseRaw): + """Raw object from an Eximia EEG file. + + Parameters + ---------- + fname : str + Path to the eXimia data file (.nxe). + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + fname = _check_fname(fname, 'read', True, 'fname') + data_name = op.basename(fname) + logger.info('Loading %s' % data_name) + # Create vhdr and vmrk files so that we can use mne_brain_vision2fiff + n_chan = 64 + sfreq = 1450. + # data are multiplexed int16 + ch_names = ['GateIn', 'Trig1', 'Trig2', 'EOG'] + ch_types = ['stim', 'stim', 'stim', 'eog'] + cals = [0.0015259021896696422, 0.0015259021896696422, + 0.0015259021896696422, 0.3814755474174106] + ch_names += ('Fp1 Fpz Fp2 AF1 AFz AF2 ' + 'F7 F3 F1 Fz F2 F4 F8 ' + 'FT9 FT7 FC5 FC3 FC1 FCz FC2 FC4 FC6 FT8 FT10 ' + 'T7 C5 C3 C1 Cz C2 C4 C6 T8 ' + 'TP9 TP7 CP5 CP3 CP1 CPz CP2 CP4 CP6 TP8 TP10 ' + 'P9 P7 P3 P1 Pz P2 P4 P8 ' + 'P10 PO3 POz PO4 O1 Oz O2 Iz'.split()) + n_eeg = len(ch_names) - len(cals) + cals += [0.07629510948348212] * n_eeg + ch_types += ['eeg'] * n_eeg + assert len(ch_names) == n_chan + info = create_info(ch_names, sfreq, ch_types) + n_bytes = _file_size(fname) + n_samples, extra = divmod(n_bytes, (n_chan * 2)) + if extra != 0: + warn('Incorrect number of samples in file (%s), the file is ' + 'likely truncated' % (n_samples,)) + for ch, cal in zip(info['chs'], cals): + ch['cal'] = cal + super(RawEximia, self).__init__( + info, preload=preload, last_samps=(n_samples - 1,), + filenames=[fname], orig_format='short') + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + _read_segments_file( + self, data, idx, fi, start, stop, cals, mult, dtype=' +# simplified BSD-3 license +import os.path as op + +from numpy.testing import assert_array_equal +from scipy import io as sio + +from mne.io import read_raw_eximia +from mne.io.tests.test_raw import _test_raw_reader +from mne.datasets.testing import data_path, requires_testing_data + +testing_path = data_path(download=False) + + +@requires_testing_data +def test_eximia_nxe(): + """Test reading Eximia NXE files.""" + fname = op.join(testing_path, 'eximia', 'test_eximia.nxe') + raw = read_raw_eximia(fname, preload=True) + assert 'RawEximia' in repr(raw) + _test_raw_reader(read_raw_eximia, fname=fname, + test_scaling=False, # XXX probably a scaling problem + ) + fname_mat = op.join(testing_path, 'eximia', 'test_eximia.mat') + mc = sio.loadmat(fname_mat) + m_data = mc['data'] + m_header = mc['header'] + assert raw._data.shape == m_data.shape + assert m_header['Fs'][0, 0][0, 0] == raw.info['sfreq'] + m_names = [x[0][0] for x in m_header['label'][0, 0]] + m_names = list( + map(lambda x: x.replace('GATE', 'GateIn').replace('TRIG', 'Trig'), + m_names)) + assert raw.ch_names == m_names + m_ch_types = [x[0][0] for x in m_header['chantype'][0, 0]] + m_ch_types = list( + map(lambda x: x.replace('unknown', 'stim').replace('trigger', 'stim'), + m_ch_types)) + types_dict = {2: 'eeg', 3: 'stim', 202: 'eog'} + ch_types = [types_dict[raw.info['chs'][x]['kind']] + for x in range(len(raw.ch_names))] + assert ch_types == m_ch_types + + assert_array_equal(m_data, raw._data) diff --git a/python/libs/mne/io/fieldtrip/__init__.py b/python/libs/mne/io/fieldtrip/__init__.py new file mode 100644 index 0000000..2085c93 --- /dev/null +++ b/python/libs/mne/io/fieldtrip/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: UTF-8 -*- +# Authors: Thomas Hartmann +# Dirk Gütlin +# +# License: BSD-3-Clause + +from .fieldtrip import (read_evoked_fieldtrip, read_epochs_fieldtrip, + read_raw_fieldtrip) diff --git a/python/libs/mne/io/fieldtrip/fieldtrip.py b/python/libs/mne/io/fieldtrip/fieldtrip.py new file mode 100644 index 0000000..1a5dda5 --- /dev/null +++ b/python/libs/mne/io/fieldtrip/fieldtrip.py @@ -0,0 +1,181 @@ +# -*- coding: UTF-8 -*- +# Authors: Thomas Hartmann +# Dirk Gütlin +# +# License: BSD-3-Clause + +import numpy as np + +from .utils import _create_info, _set_tmin, _create_events, \ + _create_event_metadata, _validate_ft_struct +from ...utils import _check_fname, _import_pymatreader_funcs +from ..array.array import RawArray +from ...epochs import EpochsArray +from ...evoked import EvokedArray + + +def read_raw_fieldtrip(fname, info, data_name='data'): + """Load continuous (raw) data from a FieldTrip preprocessing structure. + + This function expects to find single trial raw data (FT_DATATYPE_RAW) in + the structure data_name is pointing at. + + .. warning:: FieldTrip does not normally store the original information + concerning channel location, orientation, type etc. It is + therefore **highly recommended** to provide the info field. + This can be obtained by reading the original raw data file + with MNE functions (without preload). The returned object + contains the necessary info field. + + Parameters + ---------- + fname : str + Path and filename of the .mat file containing the data. + info : dict or None + The info dict of the raw data file corresponding to the data to import. + If this is set to None, limited information is extracted from the + FieldTrip structure. + data_name : str + Name of heading dict/ variable name under which the data was originally + saved in MATLAB. + + Returns + ------- + raw : instance of RawArray + A Raw Object containing the loaded data. + """ + read_mat = _import_pymatreader_funcs('FieldTrip I/O') + fname = _check_fname(fname, overwrite='read', must_exist=True) + + ft_struct = read_mat(fname, + ignore_fields=['previous'], + variable_names=[data_name]) + + # load data and set ft_struct to the heading dictionary + ft_struct = ft_struct[data_name] + + _validate_ft_struct(ft_struct) + + info = _create_info(ft_struct, info) # create info structure + data = np.array(ft_struct['trial']) # create the main data array + + if data.ndim > 2: + data = np.squeeze(data) + + if data.ndim == 1: + data = data[np.newaxis, ...] + + if data.ndim != 2: + raise RuntimeError('The data you are trying to load does not seem to ' + 'be raw data') + + raw = RawArray(data, info) # create an MNE RawArray + return raw + + +def read_epochs_fieldtrip(fname, info, data_name='data', + trialinfo_column=0): + """Load epoched data from a FieldTrip preprocessing structure. + + This function expects to find epoched data in the structure data_name is + pointing at. + + .. warning:: Only epochs with the same amount of channels and samples are + supported! + + .. warning:: FieldTrip does not normally store the original information + concerning channel location, orientation, type etc. It is + therefore **highly recommended** to provide the info field. + This can be obtained by reading the original raw data file + with MNE functions (without preload). The returned object + contains the necessary info field. + + Parameters + ---------- + fname : str + Path and filename of the .mat file containing the data. + info : dict or None + The info dict of the raw data file corresponding to the data to import. + If this is set to None, limited information is extracted from the + FieldTrip structure. + data_name : str + Name of heading dict/ variable name under which the data was originally + saved in MATLAB. + trialinfo_column : int + Column of the trialinfo matrix to use for the event codes. + + Returns + ------- + epochs : instance of EpochsArray + An EpochsArray containing the loaded data. + """ + read_mat = _import_pymatreader_funcs('FieldTrip I/O') + ft_struct = read_mat(fname, + ignore_fields=['previous'], + variable_names=[data_name]) + + # load data and set ft_struct to the heading dictionary + ft_struct = ft_struct[data_name] + + _validate_ft_struct(ft_struct) + + info = _create_info(ft_struct, info) # create info structure + data = np.array(ft_struct['trial']) # create the epochs data array + events = _create_events(ft_struct, trialinfo_column) + if events is not None: + metadata = _create_event_metadata(ft_struct) + else: + metadata = None + tmin = _set_tmin(ft_struct) # create start time + + epochs = EpochsArray(data=data, info=info, tmin=tmin, + events=events, metadata=metadata, proj=False) + return epochs + + +def read_evoked_fieldtrip(fname, info, comment=None, + data_name='data'): + """Load evoked data from a FieldTrip timelocked structure. + + This function expects to find timelocked data in the structure data_name is + pointing at. + + .. warning:: FieldTrip does not normally store the original information + concerning channel location, orientation, type etc. It is + therefore **highly recommended** to provide the info field. + This can be obtained by reading the original raw data file + with MNE functions (without preload). The returned object + contains the necessary info field. + + Parameters + ---------- + fname : str + Path and filename of the .mat file containing the data. + info : dict or None + The info dict of the raw data file corresponding to the data to import. + If this is set to None, limited information is extracted from the + FieldTrip structure. + comment : str + Comment on dataset. Can be the condition. + data_name : str + Name of heading dict/ variable name under which the data was originally + saved in MATLAB. + + Returns + ------- + evoked : instance of EvokedArray + An EvokedArray containing the loaded data. + """ + read_mat = _import_pymatreader_funcs('FieldTrip I/O') + ft_struct = read_mat(fname, + ignore_fields=['previous'], + variable_names=[data_name]) + ft_struct = ft_struct[data_name] + + _validate_ft_struct(ft_struct) + + info = _create_info(ft_struct, info) # create info structure + data_evoked = ft_struct['avg'] # create evoked data + + evoked = EvokedArray(data_evoked, info, comment=comment) + return evoked diff --git a/python/libs/mne/io/fieldtrip/tests/__init__.py b/python/libs/mne/io/fieldtrip/tests/__init__.py new file mode 100644 index 0000000..047bd9a --- /dev/null +++ b/python/libs/mne/io/fieldtrip/tests/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: UTF-8 -*- +# Authors: Thomas Hartmann +# Dirk Gütlin +# +# License: BSD-3-Clause diff --git a/python/libs/mne/io/fieldtrip/tests/helpers.py b/python/libs/mne/io/fieldtrip/tests/helpers.py new file mode 100644 index 0000000..1bda5a0 --- /dev/null +++ b/python/libs/mne/io/fieldtrip/tests/helpers.py @@ -0,0 +1,223 @@ +# -*- coding: UTF-8 -*- +# Authors: Thomas Hartmann +# Dirk Gütlin +# +# License: BSD-3-Clause +from functools import partial +import os + +import numpy as np + +import mne +from mne.io.constants import FIFF +from mne.utils import object_diff + + +info_ignored_fields = ('file_id', 'hpi_results', 'hpi_meas', 'meas_id', + 'meas_date', 'highpass', 'lowpass', 'subject_info', + 'hpi_subsystem', 'experimenter', 'description', + 'proj_id', 'proj_name', 'line_freq', 'gantry_angle', + 'dev_head_t', 'bads', 'ctf_head_t', 'dev_ctf_t') + +ch_ignore_fields = ('logno', 'cal', 'range', 'scanno', 'coil_type', 'kind', + 'loc', 'coord_frame', 'unit') + +info_long_fields = ('hpi_meas', 'projs') + +system_to_reader_fn_dict = {'neuromag306': mne.io.read_raw_fif, + 'CNT': partial(mne.io.read_raw_cnt), + 'CTF': partial(mne.io.read_raw_ctf, + clean_names=True), + 'BTI': partial(mne.io.read_raw_bti, + head_shape_fname=None, + rename_channels=False, + sort_by_ch_name=False), + 'EGI': mne.io.read_raw_egi, + 'eximia': mne.io.read_raw_eximia} + +ignore_channels_dict = {'BTI': ['MUz', 'MLx', 'MLy', 'MUx', 'MUy', 'MLz']} + +drop_extra_chans_dict = {'EGI': ['STI 014', 'DIN1', 'DIN3', + 'DIN7', 'DIN4', 'DIN5', 'DIN2'], + 'eximia': ['GateIn', 'Trig1', 'Trig2']} + +system_decimal_accuracy_dict = {'CNT': 2} + +pandas_not_found_warning_msg = 'The Pandas library is not installed. Not ' \ + 'returning the original trialinfo matrix as ' \ + 'metadata.' + +testing_path = mne.datasets.testing.data_path(download=False) + + +def _remove_ignored_ch_fields(info): + if 'chs' in info: + for cur_ch in info['chs']: + for cur_field in ch_ignore_fields: + if cur_field in cur_ch: + del cur_ch[cur_field] + + +def _remove_long_info_fields(info): + for cur_field in info_long_fields: + if cur_field in info: + del info[cur_field] + + +def _remove_ignored_info_fields(info): + for cur_field in info_ignored_fields: + if cur_field in info: + del info[cur_field] + + _remove_ignored_ch_fields(info) + _remove_bad_dig_fields(info) + + +def _remove_bad_dig_fields(info): + # The reference location appears to be lost, so we cannot add it. + # Similarly, fiducial locations do not appear to be stored, so we + # cannot add those, either. Same with HPI coils. + if info['dig'] is not None: + with info._unlock(): + info['dig'] = [d for d in info['dig'] + if d['kind'] == FIFF.FIFFV_POINT_EEG and + d['ident'] != 0] # ref + + +def get_data_paths(system): + """Return common paths for all tests.""" + return testing_path / 'fieldtrip' / 'ft_test_data' / system + + +def get_cfg_local(system): + """Return cfg_local field for the system.""" + from pymatreader import read_mat + cfg_local = read_mat(os.path.join(get_data_paths(system), 'raw_v7.mat'), + ['cfg_local'])['cfg_local'] + + return cfg_local + + +def get_raw_info(system): + """Return the info dict of the raw data.""" + cfg_local = get_cfg_local(system) + + raw_data_file = os.path.join(testing_path, cfg_local['file_name']) + reader_function = system_to_reader_fn_dict[system] + + info = reader_function(raw_data_file, preload=False).info + with info._unlock(): + info['comps'] = [] + return info + + +def get_raw_data(system, drop_extra_chs=False): + """Find, load and process the raw data.""" + cfg_local = get_cfg_local(system) + + raw_data_file = os.path.join(testing_path, cfg_local['file_name']) + reader_function = system_to_reader_fn_dict[system] + + raw_data = reader_function(raw_data_file, preload=True) + crop = min(cfg_local['crop'], np.max(raw_data.times)) + if system == 'eximia': + crop -= 0.5 * (1.0 / raw_data.info['sfreq']) + raw_data.crop(0, crop) + raw_data.del_proj('all') + with raw_data.info._unlock(): + raw_data.info['comps'] = [] + raw_data.drop_channels(cfg_local['removed_chan_names']) + + if system in ['EGI']: + raw_data._data[0:-1, :] = raw_data._data[0:-1, :] * 1e6 + + if system in ['CNT']: + raw_data._data = raw_data._data * 1e6 + + if system in ignore_channels_dict: + raw_data.drop_channels(ignore_channels_dict[system]) + + if system in drop_extra_chans_dict and drop_extra_chs: + raw_data.drop_channels(drop_extra_chans_dict[system]) + + return raw_data + + +def get_epochs(system): + """Find, load and process the epoched data.""" + cfg_local = get_cfg_local(system) + raw_data = get_raw_data(system) + + if cfg_local['eventtype'] in raw_data.ch_names: + stim_channel = cfg_local['eventtype'] + else: + stim_channel = 'STI 014' + + if system == 'CNT': + events, event_id = mne.events_from_annotations(raw_data) + events[:, 0] = events[:, 0] + 1 + else: + events = mne.find_events(raw_data, stim_channel=stim_channel, + shortest_event=1) + + if isinstance(cfg_local['eventvalue'], np.ndarray): + event_id = list(cfg_local['eventvalue'].astype('int')) + else: + event_id = [int(cfg_local['eventvalue'])] + + event_id = [id for id in event_id if id in events[:, 2]] + + epochs = mne.Epochs(raw_data, events=events, + event_id=event_id, + tmin=-cfg_local['prestim'], + tmax=cfg_local['poststim'], baseline=None) + + return epochs + + +def get_evoked(system): + """Find, load and process the avg data.""" + epochs = get_epochs(system) + return epochs.average(picks=np.arange(len(epochs.ch_names))) + + +def check_info_fields(expected, actual, has_raw_info, ignore_long=True): + """ + Check if info fields are equal. + + Some fields are ignored. + """ + expected = expected.info.copy() + actual = actual.info.copy() + + if not has_raw_info: + _remove_ignored_info_fields(expected) + _remove_ignored_info_fields(actual) + + _remove_long_info_fields(expected) + _remove_long_info_fields(actual) + + # we annoyingly have two ways of representing this, so just always use + # an empty list here + for obj in (expected, actual): + if obj['dig'] is None: + with obj._unlock(): + obj['dig'] = [] + + d = object_diff(actual, expected, allclose=True) + assert d == '', d + + +def check_data(expected, actual, system): + """Check data for equality.""" + decimal = 7 + if system in system_decimal_accuracy_dict: + decimal = system_decimal_accuracy_dict[system] + + np.testing.assert_almost_equal(expected, actual, decimal=decimal) + + +def assert_warning_in_record(warning_message, warn_record): + """Assert that a warning message is in the records.""" + all_messages = [str(w.message) for w in warn_record] + assert warning_message in all_messages diff --git a/python/libs/mne/io/fieldtrip/tests/test_fieldtrip.py b/python/libs/mne/io/fieldtrip/tests/test_fieldtrip.py new file mode 100644 index 0000000..81fd123 --- /dev/null +++ b/python/libs/mne/io/fieldtrip/tests/test_fieldtrip.py @@ -0,0 +1,313 @@ +# -*- coding: UTF-8 -*- +# Authors: Thomas Hartmann +# Dirk Gütlin +# +# License: BSD-3-Clause + +from contextlib import nullcontext +import copy +import itertools +import os.path + +import pytest +import numpy as np + +import mne +from mne.datasets import testing +from mne.io.fieldtrip.utils import NOINFO_WARNING, _create_events +from mne.utils import _check_pandas_installed, _record_warnings +from mne.io.fieldtrip.tests.helpers import ( + check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, + pandas_not_found_warning_msg, get_raw_info, check_data, + assert_warning_in_record) + +# missing: KIT: biggest problem here is that the channels do not have the same +# names. +# EGI: no calibration done in FT. so data is VERY different + +all_systems_raw = ['neuromag306', 'CTF', 'CNT', 'BTI', 'eximia'] +all_systems_epochs = ['neuromag306', 'CTF', 'CNT'] +all_versions = ['v7', 'v73'] +use_info = [True, False] +all_test_params_raw = list(itertools.product(all_systems_raw, all_versions, + use_info)) +all_test_params_epochs = list(itertools.product(all_systems_epochs, + all_versions, + use_info)) +# just for speed we skip some slowest ones -- the coverage should still +# be sufficient +for obj in (all_test_params_epochs, all_test_params_raw): + for key in [('CTF', 'v73', True), ('neuromag306', 'v73', False)]: + obj.pop(obj.index(key)) + for ki, key in enumerate(obj): + if key[1] == 'v73': + obj[ki] = pytest.param(*obj[ki], marks=pytest.mark.slowtest) + +no_info_warning = {'expected_warning': RuntimeWarning, + 'match': NOINFO_WARNING} + +pymatreader = pytest.importorskip('pymatreader') # module-level +testing_path = mne.datasets.testing.data_path(download=False) + + +@pytest.mark.slowtest +@testing.requires_testing_data +# Reading the sample CNT data results in a RuntimeWarning because it cannot +# parse the measurement date. We need to ignore that warning. +@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') +@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') +@pytest.mark.parametrize('cur_system, version, use_info', + all_test_params_epochs) +def test_read_evoked(cur_system, version, use_info): + """Test comparing reading an Evoked object and the FieldTrip version.""" + test_data_folder_ft = get_data_paths(cur_system) + mne_avg = get_evoked(cur_system) + if use_info: + info = get_raw_info(cur_system) + ctx = nullcontext() + else: + info = None + ctx = pytest.warns(**no_info_warning) + + cur_fname = os.path.join(test_data_folder_ft, + 'averaged_%s.mat' % (version,)) + with ctx: + avg_ft = mne.io.read_evoked_fieldtrip(cur_fname, info) + + mne_data = mne_avg.data[:, :-1] + ft_data = avg_ft.data + + check_data(mne_data, ft_data, cur_system) + check_info_fields(mne_avg, avg_ft, use_info) + + +@testing.requires_testing_data +# Reading the sample CNT data results in a RuntimeWarning because it cannot +# parse the measurement date. We need to ignore that warning. +@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') +@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') +@pytest.mark.parametrize('cur_system, version, use_info', + all_test_params_epochs) +# Strange, non-deterministic Pandas errors: +# "ValueError: cannot expose native-only dtype 'g' in non-native +# byte order '<' via buffer interface" +@pytest.mark.skipif(os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true', + reason='Pandas problem on Azure CI') +def test_read_epochs(cur_system, version, use_info, monkeypatch): + """Test comparing reading an Epochs object and the FieldTrip version.""" + pandas = _check_pandas_installed(strict=False) + has_pandas = pandas is not False + test_data_folder_ft = get_data_paths(cur_system) + mne_epoched = get_epochs(cur_system) + if use_info: + info = get_raw_info(cur_system) + ctx = nullcontext() + else: + info = None + ctx = pytest.warns(**no_info_warning) + + cur_fname = os.path.join(test_data_folder_ft, + 'epoched_%s.mat' % (version,)) + if has_pandas: + with ctx: + epoched_ft = mne.io.read_epochs_fieldtrip(cur_fname, info) + assert isinstance(epoched_ft.metadata, pandas.DataFrame) + else: + with _record_warnings() as warn_record: + epoched_ft = mne.io.read_epochs_fieldtrip(cur_fname, info) + assert epoched_ft.metadata is None + assert_warning_in_record(pandas_not_found_warning_msg, warn_record) + if info is None: + assert_warning_in_record(NOINFO_WARNING, warn_record) + + mne_data = mne_epoched.get_data()[:, :, :-1] + ft_data = epoched_ft.get_data() + + check_data(mne_data, ft_data, cur_system) + check_info_fields(mne_epoched, epoched_ft, use_info) + read_mat = pymatreader.read_mat + + # weird sfreq + def modify_mat(fname, variable_names=None, ignore_fields=None): + out = read_mat(fname, variable_names, ignore_fields) + if 'fsample' in out['data']: + out['data']['fsample'] = np.repeat(out['data']['fsample'], 2) + return out + + monkeypatch.setattr(pymatreader, 'read_mat', modify_mat) + with pytest.warns(RuntimeWarning, match='multiple'): + mne.io.read_epochs_fieldtrip(cur_fname, info) + + +@testing.requires_testing_data +# Reading the sample CNT data results in a RuntimeWarning because it cannot +# parse the measurement date. We need to ignore that warning. +@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') +@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') +@pytest.mark.parametrize('cur_system, version, use_info', all_test_params_raw) +def test_raw(cur_system, version, use_info): + """Test comparing reading a raw fiff file and the FieldTrip version.""" + # Load the raw fiff file with mne + test_data_folder_ft = get_data_paths(cur_system) + raw_fiff_mne = get_raw_data(cur_system, drop_extra_chs=True) + if use_info: + info = get_raw_info(cur_system) + if cur_system in ('BTI', 'eximia'): + ctx = pytest.warns(RuntimeWarning, match='cannot be found in') + else: + ctx = nullcontext() + else: + info = None + ctx = pytest.warns(**no_info_warning) + + cur_fname = os.path.join(test_data_folder_ft, + 'raw_%s.mat' % (version,)) + + with ctx: + raw_fiff_ft = mne.io.read_raw_fieldtrip(cur_fname, info) + + if cur_system == 'BTI' and not use_info: + raw_fiff_ft.drop_channels(['MzA', 'MxA', 'MyaA', + 'MyA', 'MxaA', 'MzaA']) + + if cur_system == 'eximia' and not use_info: + raw_fiff_ft.drop_channels(['TRIG2', 'TRIG1', 'GATE']) + + # Check that the data was loaded correctly + check_data(raw_fiff_mne.get_data(), + raw_fiff_ft.get_data(), + cur_system) + + # Check info field + check_info_fields(raw_fiff_mne, raw_fiff_ft, use_info) + + +@testing.requires_testing_data +def test_load_epoched_as_raw(): + """Test whether exception is thrown when loading epochs as raw.""" + test_data_folder_ft = get_data_paths('neuromag306') + info = get_raw_info('neuromag306') + cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat') + + with pytest.raises(RuntimeError): + mne.io.read_raw_fieldtrip(cur_fname, info) + + +@testing.requires_testing_data +def test_invalid_trialinfocolumn(): + """Test for exceptions when using wrong values for trialinfo parameter.""" + test_data_folder_ft = get_data_paths('neuromag306') + info = get_raw_info('neuromag306') + cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat') + + with pytest.raises(ValueError): + mne.io.read_epochs_fieldtrip(cur_fname, info, trialinfo_column=-1) + + with pytest.raises(ValueError): + mne.io.read_epochs_fieldtrip(cur_fname, info, trialinfo_column=3) + + +@testing.requires_testing_data +def test_create_events(): + """Test 2dim trialinfo fields.""" + test_data_folder_ft = get_data_paths('neuromag306') + cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat') + original_data = pymatreader.read_mat(cur_fname, ['data', ]) + + new_data = copy.deepcopy(original_data) + new_data['trialinfo'] = np.array([[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]]) + + with pytest.raises(ValueError): + _create_events(new_data, -1) + + for cur_col in np.arange(4): + evts = _create_events(new_data, cur_col) + assert np.all(evts[:, 2] == cur_col + 1) + + with pytest.raises(ValueError): + _create_events(new_data, 4) + + +@testing.requires_testing_data +@pytest.mark.parametrize('version', all_versions) +def test_one_channel_elec_bug(version): + """Test if loading data having only one elec in the elec field works.""" + fname = os.path.join(testing_path, 'fieldtrip', + 'one_channel_elec_bug_data_%s.mat' % (version, )) + + with pytest.warns(**no_info_warning): + mne.io.read_raw_fieldtrip(fname, info=None) + + +@testing.requires_testing_data +# Reading the sample CNT data results in a RuntimeWarning because it cannot +# parse the measurement date. We need to ignore that warning. +@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') +@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') +@pytest.mark.parametrize('version', all_versions) +@pytest.mark.parametrize('type', ['averaged', 'epoched', 'raw']) +def test_throw_exception_on_cellarray(version, type): + """Test for a meaningful exception when the data is a cell array.""" + fname = os.path.join(get_data_paths('cellarray'), + '%s_%s.mat' % (type, version)) + + info = get_raw_info('CNT') + + with pytest.raises(RuntimeError, match='Loading of data in cell arrays ' + 'is not supported'): + if type == 'averaged': + mne.read_evoked_fieldtrip(fname, info) + elif type == 'epoched': + mne.read_epochs_fieldtrip(fname, info) + elif type == 'raw': + mne.io.read_raw_fieldtrip(fname, info) + + +@testing.requires_testing_data +def test_with_missing_channels(): + """Test _create_info when channels are missing from info.""" + cur_system = 'neuromag306' + test_data_folder_ft = get_data_paths(cur_system) + info = get_raw_info(cur_system) + del info['chs'][1:20] + info._update_redundant() + + with pytest.warns(RuntimeWarning): + mne.io.read_raw_fieldtrip( + os.path.join(test_data_folder_ft, 'raw_v7.mat'), info) + mne.read_evoked_fieldtrip( + os.path.join(test_data_folder_ft, 'averaged_v7.mat'), info) + mne.read_epochs_fieldtrip( + os.path.join(test_data_folder_ft, 'epoched_v7.mat'), info) + + +@testing.requires_testing_data +@pytest.mark.filterwarnings('ignore: Importing FieldTrip data without an info') +@pytest.mark.filterwarnings('ignore: Cannot guess the correct type') +def test_throw_error_on_non_uniform_time_field(): + """Test if an error is thrown when time fields are not uniform.""" + fname = os.path.join(testing_path, 'fieldtrip', 'not_uniform_time.mat') + + with pytest.raises(RuntimeError, match='Loading data with non-uniform ' + 'times per epoch is not supported'): + mne.io.read_epochs_fieldtrip(fname, info=None) + + +@testing.requires_testing_data +@pytest.mark.filterwarnings('ignore: Importing FieldTrip data without an info') +def test_throw_error_when_importing_old_ft_version_data(): + """Test if an error is thrown if the data was saved with an old version.""" + fname = os.path.join(testing_path, 'fieldtrip', 'old_version.mat') + + with pytest.raises(RuntimeError, match='This file was created with ' + 'an old version of FieldTrip. You ' + 'can convert the data to the new ' + 'version by loading it into ' + 'FieldTrip and applying ' + 'ft_selectdata with an ' + 'empty cfg structure on it. ' + 'Otherwise you can supply ' + 'the Info field.'): + mne.io.read_epochs_fieldtrip(fname, info=None) diff --git a/python/libs/mne/io/fieldtrip/utils.py b/python/libs/mne/io/fieldtrip/utils.py new file mode 100644 index 0000000..2f190ce --- /dev/null +++ b/python/libs/mne/io/fieldtrip/utils.py @@ -0,0 +1,348 @@ +# -*- coding: UTF-8 -*- +# Authors: Thomas Hartmann +# Dirk Gütlin +# +# License: BSD-3-Clause +import numpy as np + +from .._digitization import DigPoint +from ..constants import FIFF +from ..meas_info import create_info +from ..pick import pick_info +from ...transforms import rotation3d_align_z_axis +from ...utils import warn, _check_pandas_installed + +_supported_megs = ['neuromag306'] + +_unit_dict = {'m': 1, + 'cm': 1e-2, + 'mm': 1e-3, + 'V': 1, + 'mV': 1e-3, + 'uV': 1e-6, + 'T': 1, + 'T/m': 1, + 'T/cm': 1e2} + +NOINFO_WARNING = 'Importing FieldTrip data without an info dict from the ' \ + 'original file. Channel locations, orientations and types ' \ + 'will be incorrect. The imported data cannot be used for ' \ + 'source analysis, channel interpolation etc.' + + +def _validate_ft_struct(ft_struct): + """Run validation checks on the ft_structure.""" + if isinstance(ft_struct, list): + raise RuntimeError('Loading of data in cell arrays is not supported') + + +def _create_info(ft_struct, raw_info): + """Create MNE info structure from a FieldTrip structure.""" + if raw_info is None: + warn(NOINFO_WARNING) + + sfreq = _set_sfreq(ft_struct) + ch_names = ft_struct['label'] + if raw_info: + info = raw_info.copy() + missing_channels = set(ch_names) - set(info['ch_names']) + if missing_channels: + warn('The following channels are present in the FieldTrip data ' + 'but cannot be found in the provided info: %s.\n' + 'These channels will be removed from the resulting data!' + % (str(missing_channels), )) + + missing_chan_idx = [ch_names.index(ch) for ch in missing_channels] + new_chs = [ch for ch in ch_names if ch not in missing_channels] + ch_names = new_chs + ft_struct['label'] = ch_names + + if 'trial' in ft_struct: + ft_struct['trial'] = _remove_missing_channels_from_trial( + ft_struct['trial'], + missing_chan_idx + ) + + if 'avg' in ft_struct: + if ft_struct['avg'].ndim == 2: + ft_struct['avg'] = np.delete(ft_struct['avg'], + missing_chan_idx, + axis=0) + + with info._unlock(): + info['sfreq'] = sfreq + ch_idx = [info['ch_names'].index(ch) for ch in ch_names] + pick_info(info, ch_idx, copy=False) + else: + info = create_info(ch_names, sfreq) + chs, dig = _create_info_chs_dig(ft_struct) + with info._unlock(update_redundant=True): + info.update(chs=chs, dig=dig) + + return info + + +def _remove_missing_channels_from_trial(trial, missing_chan_idx): + if isinstance(trial, list): + for idx_trial in range(len(trial)): + trial[idx_trial] = _remove_missing_channels_from_trial( + trial[idx_trial], missing_chan_idx + ) + elif isinstance(trial, np.ndarray): + if trial.ndim == 2: + trial = np.delete(trial, + missing_chan_idx, + axis=0) + else: + raise ValueError('"trial" field of the FieldTrip structure ' + 'has an unknown format.') + + return trial + + +def _create_info_chs_dig(ft_struct): + """Create the chs info field from the FieldTrip structure.""" + all_channels = ft_struct['label'] + ch_defaults = dict(coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + cal=1.0, + range=1.0, + unit_mul=FIFF.FIFF_UNITM_NONE, + loc=np.array([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1]), + unit=FIFF.FIFF_UNIT_V) + try: + elec = ft_struct['elec'] + except KeyError: + elec = None + + try: + grad = ft_struct['grad'] + except KeyError: + grad = None + + if elec is None and grad is None: + warn('The supplied FieldTrip structure does not have an elec or grad ' + 'field. No channel locations will extracted and the kind of ' + 'channel might be inaccurate.') + if 'chanpos' not in (elec or grad or {'chanpos': None}): + raise RuntimeError( + 'This file was created with an old version of FieldTrip. You can ' + 'convert the data to the new version by loading it into FieldTrip ' + 'and applying ft_selectdata with an empty cfg structure on it. ' + 'Otherwise you can supply the Info field.') + + chs = list() + dig = list() + counter = 0 + for idx_chan, cur_channel_label in enumerate(all_channels): + cur_ch = ch_defaults.copy() + cur_ch['ch_name'] = cur_channel_label + cur_ch['logno'] = idx_chan + 1 + cur_ch['scanno'] = idx_chan + 1 + if elec and cur_channel_label in elec['label']: + cur_ch = _process_channel_eeg(cur_ch, elec) + # Ref gets ident=0 and we don't have it, so start at 1 + counter += 1 + d = DigPoint( + r=cur_ch['loc'][:3], coord_frame=FIFF.FIFFV_COORD_HEAD, + kind=FIFF.FIFFV_POINT_EEG, ident=counter) + dig.append(d) + elif grad and cur_channel_label in grad['label']: + cur_ch = _process_channel_meg(cur_ch, grad) + else: + if cur_channel_label.startswith('EOG'): + cur_ch['kind'] = FIFF.FIFFV_EOG_CH + cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG + elif cur_channel_label.startswith('ECG'): + cur_ch['kind'] = FIFF.FIFFV_ECG_CH + cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR + elif cur_channel_label.startswith('STI'): + cur_ch['kind'] = FIFF.FIFFV_STIM_CH + cur_ch['coil_type'] = FIFF.FIFFV_COIL_NONE + else: + warn('Cannot guess the correct type of channel %s. Making ' + 'it a MISC channel.' % (cur_channel_label,)) + cur_ch['kind'] = FIFF.FIFFV_MISC_CH + cur_ch['coil_type'] = FIFF.FIFFV_COIL_NONE + + chs.append(cur_ch) + + return chs, dig + + +def _set_sfreq(ft_struct): + """Set the sample frequency.""" + try: + sfreq = ft_struct['fsample'] + except KeyError: + try: + time = ft_struct['time'] + except KeyError: + raise ValueError('No Source for sfreq found') + else: + t1, t2 = float(time[0]), float(time[1]) + sfreq = 1 / (t2 - t1) + try: + sfreq = float(sfreq) + except TypeError: + warn('FieldTrip structure contained multiple sample rates, trying the ' + f'first of:\n{sfreq} Hz') + sfreq = float(sfreq.ravel()[0]) + return sfreq + + +def _set_tmin(ft_struct): + """Set the start time before the event in evoked data if possible.""" + times = ft_struct['time'] + time_check = all(times[i][0] == times[i - 1][0] + for i, x in enumerate(times)) + if time_check: + tmin = times[0][0] + else: + raise RuntimeError('Loading data with non-uniform ' + 'times per epoch is not supported') + return tmin + + +def _create_events(ft_struct, trialinfo_column): + """Create an event matrix from the FieldTrip structure.""" + if 'trialinfo' not in ft_struct: + return None + + event_type = ft_struct['trialinfo'] + event_number = range(len(event_type)) + + if trialinfo_column < 0: + raise ValueError('trialinfo_column must be positive') + + available_ti_cols = 1 + if event_type.ndim == 2: + available_ti_cols = event_type.shape[1] + + if trialinfo_column > (available_ti_cols - 1): + raise ValueError('trialinfo_column is higher than the amount of' + 'columns in trialinfo.') + + event_trans_val = np.zeros(len(event_type)) + + if event_type.ndim == 2: + event_type = event_type[:, trialinfo_column] + + events = np.vstack([np.array(event_number), event_trans_val, + event_type]).astype('int').T + + return events + + +def _create_event_metadata(ft_struct): + """Create event metadata from trialinfo.""" + pandas = _check_pandas_installed(strict=False) + if not pandas: + warn('The Pandas library is not installed. Not returning the original ' + 'trialinfo matrix as metadata.') + return None + + metadata = pandas.DataFrame(ft_struct['trialinfo']) + + return metadata + + +def _process_channel_eeg(cur_ch, elec): + """Convert EEG channel from FieldTrip to MNE. + + Parameters + ---------- + cur_ch: dict + Channel specific dictionary to populate. + + elec: dict + elec dict as loaded from the FieldTrip structure + + Returns + ------- + cur_ch: dict + The original dict (cur_ch) with the added information + """ + all_labels = np.asanyarray(elec['label']) + chan_idx_in_elec = np.where(all_labels == cur_ch['ch_name'])[0][0] + position = np.squeeze(elec['chanpos'][chan_idx_in_elec, :]) + # chanunit = elec['chanunit'][chan_idx_in_elec] # not used/needed yet + position_unit = elec['unit'] + + position = position * _unit_dict[position_unit] + cur_ch['loc'] = np.hstack((position, np.zeros((9,)))) + cur_ch['unit'] = FIFF.FIFF_UNIT_V + cur_ch['kind'] = FIFF.FIFFV_EEG_CH + cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG + cur_ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD + + return cur_ch + + +def _process_channel_meg(cur_ch, grad): + """Convert MEG channel from FieldTrip to MNE. + + Parameters + ---------- + cur_ch: dict + Channel specific dictionary to populate. + + grad: dict + grad dict as loaded from the FieldTrip structure + + Returns + ------- + dict: The original dict (cur_ch) with the added information + """ + all_labels = np.asanyarray(grad['label']) + chan_idx_in_grad = np.where(all_labels == cur_ch['ch_name'])[0][0] + gradtype = grad['type'] + chantype = grad['chantype'][chan_idx_in_grad] + position_unit = grad['unit'] + position = np.squeeze(grad['chanpos'][chan_idx_in_grad, :]) + position = position * _unit_dict[position_unit] + + if gradtype == 'neuromag306' and 'tra' in grad and 'coilpos' in grad: + # Try to regenerate original channel pos. + idx_in_coilpos = np.where(grad['tra'][chan_idx_in_grad, :] != 0)[0] + cur_coilpos = grad['coilpos'][idx_in_coilpos, :] + cur_coilpos = cur_coilpos * _unit_dict[position_unit] + cur_coilori = grad['coilori'][idx_in_coilpos, :] + if chantype == 'megmag': + position = cur_coilpos[0] - 0.0003 * cur_coilori[0] + if chantype == 'megplanar': + tmp_pos = cur_coilpos - 0.0003 * cur_coilori + position = np.average(tmp_pos, axis=0) + + original_orientation = np.squeeze(grad['chanori'][chan_idx_in_grad, :]) + try: + orientation = rotation3d_align_z_axis(original_orientation).T + except AssertionError: + orientation = np.eye(3) + assert orientation.shape == (3, 3) + orientation = orientation.flatten() + # chanunit = grad['chanunit'][chan_idx_in_grad] # not used/needed yet + + cur_ch['loc'] = np.hstack((position, orientation)) + cur_ch['kind'] = FIFF.FIFFV_MEG_CH + if chantype == 'megmag': + cur_ch['coil_type'] = FIFF.FIFFV_COIL_POINT_MAGNETOMETER + cur_ch['unit'] = FIFF.FIFF_UNIT_T + elif chantype == 'megplanar': + cur_ch['coil_type'] = FIFF.FIFFV_COIL_VV_PLANAR_T1 + cur_ch['unit'] = FIFF.FIFF_UNIT_T_M + elif chantype == 'refmag': + cur_ch['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_MAG + cur_ch['unit'] = FIFF.FIFF_UNIT_T + elif chantype == 'refgrad': + cur_ch['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_GRAD + cur_ch['unit'] = FIFF.FIFF_UNIT_T + elif chantype == 'meggrad': + cur_ch['coil_type'] = FIFF.FIFFV_COIL_AXIAL_GRAD_5CM + cur_ch['unit'] = FIFF.FIFF_UNIT_T + else: + raise RuntimeError('Unexpected coil type: %s.' % ( + chantype,)) + + cur_ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD + + return cur_ch diff --git a/python/libs/mne/io/fiff/__init__.py b/python/libs/mne/io/fiff/__init__.py new file mode 100644 index 0000000..0df2dc2 --- /dev/null +++ b/python/libs/mne/io/fiff/__init__.py @@ -0,0 +1,4 @@ +"""FIF raw data reader.""" + +from .raw import Raw +from .raw import read_raw_fif diff --git a/python/libs/mne/io/fiff/raw.py b/python/libs/mne/io/fiff/raw.py new file mode 100644 index 0000000..05c53a8 --- /dev/null +++ b/python/libs/mne/io/fiff/raw.py @@ -0,0 +1,484 @@ +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Martin Luessi +# Denis Engemann +# Teon Brooks +# +# License: BSD-3-Clause + +import copy +import os +import os.path as op + +import numpy as np + +from ..constants import FIFF +from ..open import fiff_open, _fiff_get_fid, _get_next_fname +from ..meas_info import read_meas_info +from ..tree import dir_tree_find +from ..tag import read_tag, read_tag_info +from ..base import (BaseRaw, _RawShell, _check_raw_compatibility, + _check_maxshield) +from ..utils import _mult_cal_one + +from ...annotations import Annotations, _read_annotations_fif + +from ...event import AcqParserFIF +from ...utils import (check_fname, logger, verbose, warn, fill_doc, _file_like, + _on_missing, _check_fname) + + +@fill_doc +class Raw(BaseRaw): + """Raw data in FIF format. + + Parameters + ---------- + fname : str | file-like + The raw filename to load. For files that have automatically been split, + the split part will be automatically loaded. Filenames not ending with + ``raw.fif``, ``raw_sss.fif``, ``raw_tsss.fif``, ``_meg.fif``, + ``_eeg.fif``, or ``_ieeg.fif`` (with or without an optional additional + ``.gz`` extension) will generate a warning. If a file-like object is + provided, preloading must be used. + + .. versionchanged:: 0.18 + Support for file-like objects. + allow_maxshield : bool | str (default False) + If True, allow loading of data that has been recorded with internal + active compensation (MaxShield). Data recorded with MaxShield should + generally not be loaded directly, but should first be processed using + SSS/tSSS to remove the compensation signals that may also affect brain + activity. Can also be "yes" to load without eliciting a warning. + %(preload)s + %(on_split_missing)s + %(verbose)s + + Attributes + ---------- + %(info_not_none)s + ch_names : list of string + List of channels' names. + n_times : int + Total number of time points in the raw file. + times : ndarray + Time vector in seconds. Starts from 0, independently of `first_samp` + value. Time interval between consecutive time samples is equal to the + inverse of the sampling frequency. + preload : bool + Indicates whether raw data are in memory. + %(verbose)s + """ + + @verbose + def __init__(self, fname, allow_maxshield=False, preload=False, + on_split_missing='raise', verbose=None): # noqa: D102 + raws = [] + do_check_ext = not _file_like(fname) + next_fname = fname + while next_fname is not None: + raw, next_fname, buffer_size_sec = \ + self._read_raw_file(next_fname, allow_maxshield, + preload, do_check_ext) + do_check_ext = False + raws.append(raw) + if next_fname is not None: + if not op.exists(next_fname): + msg = ( + f'Split raw file detected but next file {next_fname} ' + 'does not exist. Ensure all files were transferred ' + 'properly and that split and original files were not ' + 'manually renamed on disk (split files should be ' + 'renamed by loading and re-saving with MNE-Python to ' + 'preserve proper filename linkage).') + _on_missing(on_split_missing, msg, name='on_split_missing') + break + if _file_like(fname): + # avoid serialization error when copying file-like + fname = None # noqa + + _check_raw_compatibility(raws) + super(Raw, self).__init__( + copy.deepcopy(raws[0].info), False, + [r.first_samp for r in raws], [r.last_samp for r in raws], + [r.filename for r in raws], [r._raw_extras for r in raws], + raws[0].orig_format, None, buffer_size_sec=buffer_size_sec, + verbose=verbose) + + # combine annotations + self.set_annotations(raws[0].annotations, emit_warning=False) + + # Add annotations for in-data skips + for extra in self._raw_extras: + mask = [ent is None for ent in extra['ent']] + start = extra['bounds'][:-1][mask] + stop = extra['bounds'][1:][mask] - 1 + duration = (stop - start + 1.) / self.info['sfreq'] + annot = Annotations(onset=(start / self.info['sfreq']), + duration=duration, + description='BAD_ACQ_SKIP', + orig_time=self.info['meas_date']) + + self._annotations += annot + + if preload: + self._preload_data(preload) + else: + self.preload = False + # If using a file-like object, fix the filenames to be representative + # strings now instead of the file-like objects + self._filenames = [_get_fname_rep(fname) for fname in self._filenames] + + @verbose + def _read_raw_file(self, fname, allow_maxshield, preload, + do_check_ext=True, verbose=None): + """Read in header information from a raw file.""" + logger.info('Opening raw data file %s...' % fname) + + # Read in the whole file if preload is on and .fif.gz (saves time) + if not _file_like(fname): + if do_check_ext: + endings = ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif', + '_meg.fif', '_eeg.fif', '_ieeg.fif') + endings += tuple([f'{e}.gz' for e in endings]) + check_fname(fname, 'raw', endings) + # filename + fname = _check_fname(fname, 'read', True, 'fname') + ext = os.path.splitext(fname)[1].lower() + whole_file = preload if '.gz' in ext else False + del ext + else: + # file-like + if not preload: + raise ValueError('preload must be used with file-like objects') + whole_file = True + fname_rep = _get_fname_rep(fname) + ff, tree, _ = fiff_open(fname, preload=whole_file) + with ff as fid: + # Read the measurement info + + info, meas = read_meas_info(fid, tree, clean_bads=True) + annotations = _read_annotations_fif(fid, tree) + + # Locate the data of interest + raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA) + if len(raw_node) == 0: + raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA) + if (len(raw_node) == 0): + raw_node = dir_tree_find(meas, FIFF.FIFFB_IAS_RAW_DATA) + if (len(raw_node) == 0): + raise ValueError('No raw data in %s' % fname_rep) + _check_maxshield(allow_maxshield) + with info._unlock(): + info['maxshield'] = True + del meas + + if len(raw_node) == 1: + raw_node = raw_node[0] + + # Process the directory + directory = raw_node['directory'] + nent = raw_node['nent'] + nchan = int(info['nchan']) + first = 0 + first_samp = 0 + first_skip = 0 + + # Get first sample tag if it is there + if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE: + tag = read_tag(fid, directory[first].pos) + first_samp = int(tag.data) + first += 1 + _check_entry(first, nent) + + # Omit initial skip + if directory[first].kind == FIFF.FIFF_DATA_SKIP: + # This first skip can be applied only after we know the bufsize + tag = read_tag(fid, directory[first].pos) + first_skip = int(tag.data) + first += 1 + _check_entry(first, nent) + + raw = _RawShell() + raw.filename = fname + raw.first_samp = first_samp + if info['meas_date'] is None and annotations is not None: + # we need to adjust annotations.onset as when there is no meas + # date set_annotations considers that the origin of time is the + # first available sample (ignores first_samp) + annotations.onset -= first_samp / info['sfreq'] + raw.set_annotations(annotations) + + # Go through the remaining tags in the directory + raw_extras = list() + nskip = 0 + orig_format = None + + for k in range(first, nent): + ent = directory[k] + # There can be skips in the data (e.g., if the user unclicked) + # an re-clicked the button + if ent.kind == FIFF.FIFF_DATA_SKIP: + tag = read_tag(fid, ent.pos) + nskip = int(tag.data) + elif ent.kind == FIFF.FIFF_DATA_BUFFER: + # Figure out the number of samples in this buffer + if ent.type == FIFF.FIFFT_DAU_PACK16: + nsamp = ent.size // (2 * nchan) + elif ent.type == FIFF.FIFFT_SHORT: + nsamp = ent.size // (2 * nchan) + elif ent.type == FIFF.FIFFT_FLOAT: + nsamp = ent.size // (4 * nchan) + elif ent.type == FIFF.FIFFT_DOUBLE: + nsamp = ent.size // (8 * nchan) + elif ent.type == FIFF.FIFFT_INT: + nsamp = ent.size // (4 * nchan) + elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT: + nsamp = ent.size // (8 * nchan) + elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE: + nsamp = ent.size // (16 * nchan) + else: + raise ValueError('Cannot handle data buffers of type ' + '%d' % ent.type) + if orig_format is None: + if ent.type == FIFF.FIFFT_DAU_PACK16: + orig_format = 'short' + elif ent.type == FIFF.FIFFT_SHORT: + orig_format = 'short' + elif ent.type == FIFF.FIFFT_FLOAT: + orig_format = 'single' + elif ent.type == FIFF.FIFFT_DOUBLE: + orig_format = 'double' + elif ent.type == FIFF.FIFFT_INT: + orig_format = 'int' + elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT: + orig_format = 'single' + elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE: + orig_format = 'double' + + # Do we have an initial skip pending? + if first_skip > 0: + first_samp += nsamp * first_skip + raw.first_samp = first_samp + first_skip = 0 + + # Do we have a skip pending? + if nskip > 0: + raw_extras.append(dict( + ent=None, first=first_samp, nsamp=nskip * nsamp, + last=first_samp + nskip * nsamp - 1)) + first_samp += nskip * nsamp + nskip = 0 + + # Add a data buffer + raw_extras.append(dict(ent=ent, first=first_samp, + last=first_samp + nsamp - 1, + nsamp=nsamp)) + first_samp += nsamp + + next_fname = _get_next_fname(fid, fname_rep, tree) + + # reformat raw_extras to be a dict of list/ndarray rather than + # list of dict (faster access) + raw_extras = {key: [r[key] for r in raw_extras] + for key in raw_extras[0]} + for key in raw_extras: + if key != 'ent': # dict or None + raw_extras[key] = np.array(raw_extras[key], int) + if not np.array_equal(raw_extras['last'][:-1], + raw_extras['first'][1:] - 1): + raise RuntimeError('FIF file appears to be broken') + bounds = np.cumsum(np.concatenate( + [raw_extras['first'][:1], raw_extras['nsamp']])) + raw_extras['bounds'] = bounds + assert len(raw_extras['bounds']) == len(raw_extras['ent']) + 1 + # store the original buffer size + buffer_size_sec = np.median(raw_extras['nsamp']) / info['sfreq'] + del raw_extras['first'] + del raw_extras['last'] + del raw_extras['nsamp'] + + raw.last_samp = first_samp - 1 + raw.orig_format = orig_format + + # Add the calibration factors + cals = np.zeros(info['nchan']) + for k in range(info['nchan']): + cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal'] + + raw._cals = cals + raw._raw_extras = raw_extras + logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % ( + raw.first_samp, raw.last_samp, + float(raw.first_samp) / info['sfreq'], + float(raw.last_samp) / info['sfreq'])) + + raw.info = info + + logger.info('Ready.') + + return raw, next_fname, buffer_size_sec + + @property + def _dtype(self): + """Get the dtype to use to store data from disk.""" + if self._dtype_ is not None: + return self._dtype_ + dtype = None + for raw_extra, filename in zip(self._raw_extras, self._filenames): + for ent in raw_extra['ent']: + if ent is not None: + with _fiff_get_fid(filename) as fid: + fid.seek(ent.pos, 0) + tag = read_tag_info(fid) + if tag is not None: + if tag.type in (FIFF.FIFFT_COMPLEX_FLOAT, + FIFF.FIFFT_COMPLEX_DOUBLE): + dtype = np.complex128 + else: + dtype = np.float64 + if dtype is not None: + break + if dtype is not None: + break + if dtype is None: + raise RuntimeError('bug in reading') + self._dtype_ = dtype + return dtype + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file.""" + n_bad = 0 + with _fiff_get_fid(self._filenames[fi]) as fid: + bounds = self._raw_extras[fi]['bounds'] + ents = self._raw_extras[fi]['ent'] + nchan = self._raw_extras[fi]['orig_nchan'] + use = (stop > bounds[:-1]) & (start < bounds[1:]) + offset = 0 + for ei in np.where(use)[0]: + first = bounds[ei] + last = bounds[ei + 1] + nsamp = last - first + ent = ents[ei] + first_pick = max(start - first, 0) + last_pick = min(nsamp, stop - first) + picksamp = last_pick - first_pick + # only read data if it exists + if ent is not None: + one = read_tag(fid, ent.pos, + shape=(nsamp, nchan), + rlims=(first_pick, last_pick)).data + try: + one.shape = (picksamp, nchan) + except AttributeError: # one is None + n_bad += picksamp + else: + _mult_cal_one(data[:, offset:(offset + picksamp)], + one.T, idx, cals, mult) + offset += picksamp + if n_bad: + warn(f'FIF raw buffer could not be read, acquisition error ' + f'likely: {n_bad} samples set to zero') + assert offset == stop - start + + def fix_mag_coil_types(self): + """Fix Elekta magnetometer coil types. + + Returns + ------- + raw : instance of Raw + The raw object. Operates in place. + + Notes + ----- + This function changes magnetometer coil types 3022 (T1: SQ20483N) and + 3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition + records in the info structure. + + Neuromag Vectorview systems can contain magnetometers with two + different coil sizes (3022 and 3023 vs. 3024). The systems + incorporating coils of type 3024 were introduced last and are used at + the majority of MEG sites. At some sites with 3024 magnetometers, + the data files have still defined the magnetometers to be of type + 3022 to ensure compatibility with older versions of Neuromag software. + In the MNE software as well as in the present version of Neuromag + software coil type 3024 is fully supported. Therefore, it is now safe + to upgrade the data files to use the true coil type. + + .. note:: The effect of the difference between the coil sizes on the + current estimates computed by the MNE software is very small. + Therefore the use of mne_fix_mag_coil_types is not mandatory. + """ + from ...channels import fix_mag_coil_types + fix_mag_coil_types(self.info) + return self + + @property + def acqparser(self): + """The AcqParserFIF for the measurement info. + + See Also + -------- + mne.AcqParserFIF + """ + if getattr(self, '_acqparser', None) is None: + self._acqparser = AcqParserFIF(self.info) + return self._acqparser + + +def _get_fname_rep(fname): + if not _file_like(fname): + return fname + else: + return 'File-like' + + +def _check_entry(first, nent): + """Sanity check entries.""" + if first >= nent: + raise IOError('Could not read data, perhaps this is a corrupt file') + + +@fill_doc +def read_raw_fif(fname, allow_maxshield=False, preload=False, + on_split_missing='raise', verbose=None): + """Reader function for Raw FIF data. + + Parameters + ---------- + fname : str | file-like + The raw filename to load. For files that have automatically been split, + the split part will be automatically loaded. Filenames should end + with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif, + raw_tsss.fif.gz, or _meg.fif. If a file-like object is provided, + preloading must be used. + + .. versionchanged:: 0.18 + Support for file-like objects. + allow_maxshield : bool | str (default False) + If True, allow loading of data that has been recorded with internal + active compensation (MaxShield). Data recorded with MaxShield should + generally not be loaded directly, but should first be processed using + SSS/tSSS to remove the compensation signals that may also affect brain + activity. Can also be "yes" to load without eliciting a warning. + %(preload)s + %(on_split_missing)s + %(verbose)s + + Returns + ------- + raw : instance of Raw + A Raw object containing FIF data. + + Notes + ----- + .. versionadded:: 0.9.0 + + When reading a FIF file, note that the first N seconds annotated + ``BAD_ACQ_SKIP`` are **skipped**. They are removed from ``raw.times`` and + ``raw.n_times`` parameters but ``raw.first_samp`` and ``raw.first_time`` + are updated accordingly. + """ + return Raw(fname=fname, allow_maxshield=allow_maxshield, + preload=preload, verbose=verbose, + on_split_missing=on_split_missing) diff --git a/python/libs/mne/io/fiff/tests/__init__.py b/python/libs/mne/io/fiff/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/fiff/tests/test_raw_fiff.py b/python/libs/mne/io/fiff/tests/test_raw_fiff.py new file mode 100644 index 0000000..9655f2a --- /dev/null +++ b/python/libs/mne/io/fiff/tests/test_raw_fiff.py @@ -0,0 +1,1816 @@ +# -*- coding: utf-8 -*- +# Author: Alexandre Gramfort +# Denis Engemann +# +# License: BSD-3-Clause + +from copy import deepcopy +from pathlib import Path +from functools import partial +from io import BytesIO +import os +import os.path as op +import pathlib +import pickle +import shutil +import sys + +import numpy as np +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_allclose) +import pytest + +from mne.datasets import testing +from mne.filter import filter_data +from mne.io.constants import FIFF +from mne.io import RawArray, concatenate_raws, read_raw_fif, base +from mne.io.open import read_tag, read_tag_info +from mne.io.tag import _read_tag_header +from mne.io.tests.test_raw import _test_concat, _test_raw_reader +from mne import (concatenate_events, find_events, equalize_channels, + compute_proj_raw, pick_types, pick_channels, create_info, + pick_info) +from mne.utils import (requires_pandas, assert_object_equal, _dt_to_stamp, + requires_mne, run_subprocess, _record_warnings, + assert_and_remove_boundary_annot) +from mne.annotations import Annotations + +testing_path = testing.data_path(download=False) +data_dir = op.join(testing_path, 'MEG', 'sample') +fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif') +ms_fname = op.join(testing_path, 'SSS', 'test_move_anon_raw.fif') +skip_fname = op.join(testing_path, 'misc', 'intervalrecording_raw.fif') + +base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data') +test_fif_fname = op.join(base_dir, 'test_raw.fif') +test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz') +ctf_fname = op.join(base_dir, 'test_ctf_raw.fif') +ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif') +fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif') +bad_file_works = op.join(base_dir, 'test_bads.txt') +bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt') +hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt') +hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif') + + +@testing.requires_testing_data +def test_acq_skip(tmp_path): + """Test treatment of acquisition skips.""" + raw = read_raw_fif(skip_fname, preload=True) + picks = [1, 2, 10] + assert len(raw.times) == 17000 + annotations = raw.annotations + assert len(annotations) == 3 # there are 3 skips + assert_allclose(annotations.onset, [14, 19, 23]) + assert_allclose(annotations.duration, [2., 2., 3.]) # inclusive! + data, times = raw.get_data( + picks, reject_by_annotation='omit', return_times=True) + expected_data, expected_times = zip(raw[picks, :2000], + raw[picks, 4000:7000], + raw[picks, 9000:11000], + raw[picks, 14000:17000]) + expected_times = np.concatenate(list(expected_times), axis=-1) + assert_allclose(times, expected_times) + expected_data = list(expected_data) + assert_allclose(data, np.concatenate(expected_data, axis=-1), atol=1e-22) + + # Check that acquisition skips are handled properly in filtering + kwargs = dict(l_freq=None, h_freq=50., fir_design='firwin') + raw_filt = raw.copy().filter(picks=picks, **kwargs) + for data in expected_data: + filter_data(data, raw.info['sfreq'], copy=False, **kwargs) + data = raw_filt.get_data(picks, reject_by_annotation='omit') + assert_allclose(data, np.concatenate(expected_data, axis=-1), atol=1e-22) + + # Check that acquisition skips are handled properly during I/O + fname = tmp_path / 'test_raw.fif' + raw.save(fname, fmt=raw.orig_format) + # first: file size should not increase much (orig data is missing + # 7 of 17 buffers, so if we write them out it should increase the file + # size quite a bit. + orig_size = op.getsize(skip_fname) + new_size = op.getsize(fname) + max_size = int(1.05 * orig_size) # almost the same + annotations + assert new_size < max_size, (new_size, max_size) + raw_read = read_raw_fif(fname) + assert raw_read.annotations is not None + assert_allclose(raw.times, raw_read.times) + assert_allclose(raw_read[:][0], raw[:][0], atol=1e-17) + # Saving with a bad buffer length emits warning + raw.pick_channels(raw.ch_names[:2]) + with _record_warnings() as w: + raw.save(fname, buffer_size_sec=0.5, overwrite=True) + assert len(w) == 0 + with pytest.warns(RuntimeWarning, match='did not fit evenly'): + raw.save(fname, buffer_size_sec=2., overwrite=True) + + +def test_fix_types(): + """Test fixing of channel types.""" + for fname, change in ((hp_fif_fname, True), (test_fif_fname, False), + (ctf_fname, False)): + raw = read_raw_fif(fname) + mag_picks = pick_types(raw.info, meg='mag') + other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), mag_picks) + # we don't actually have any files suffering from this problem, so + # fake it + if change: + for ii in mag_picks: + raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T2 + orig_types = np.array([ch['coil_type'] for ch in raw.info['chs']]) + raw.fix_mag_coil_types() + new_types = np.array([ch['coil_type'] for ch in raw.info['chs']]) + if not change: + assert_array_equal(orig_types, new_types) + else: + assert_array_equal(orig_types[other_picks], new_types[other_picks]) + assert ((orig_types[mag_picks] != new_types[mag_picks]).all()) + assert ((new_types[mag_picks] == + FIFF.FIFFV_COIL_VV_MAG_T3).all()) + + +def test_concat(tmp_path): + """Test RawFIF concatenation.""" + # we trim the file to save lots of memory and some time + raw = read_raw_fif(test_fif_fname) + raw.crop(0, 2.) + test_name = tmp_path / 'test_raw.fif' + raw.save(test_name) + # now run the standard test + _test_concat(partial(read_raw_fif), test_name) + + +@testing.requires_testing_data +def test_hash_raw(): + """Test hashing raw objects.""" + raw = read_raw_fif(fif_fname) + pytest.raises(RuntimeError, raw.__hash__) + raw = read_raw_fif(fif_fname).crop(0, 0.5) + raw_size = raw._size + raw.load_data() + raw_load_size = raw._size + assert (raw_size < raw_load_size) + raw_2 = read_raw_fif(fif_fname).crop(0, 0.5) + raw_2.load_data() + assert hash(raw) == hash(raw_2) + # do NOT use assert_equal here, failing output is terrible + assert pickle.dumps(raw) == pickle.dumps(raw_2) + + raw_2._data[0, 0] -= 1 + assert hash(raw) != hash(raw_2) + + +@testing.requires_testing_data +def test_maxshield(): + """Test maxshield warning.""" + with pytest.warns(RuntimeWarning, match='Internal Active Shielding') as w: + read_raw_fif(ms_fname, allow_maxshield=True) + assert ('test_raw_fiff.py' in w[0].filename) + + +@testing.requires_testing_data +def test_subject_info(tmp_path): + """Test reading subject information.""" + raw = read_raw_fif(fif_fname).crop(0, 1) + assert (raw.info['subject_info'] is None) + # fake some subject data + keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex', + 'hand'] + vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1] + subject_info = dict() + for key, val in zip(keys, vals): + subject_info[key] = val + raw.info['subject_info'] = subject_info + out_fname = tmp_path / 'test_subj_info_raw.fif' + raw.save(out_fname, overwrite=True) + raw_read = read_raw_fif(out_fname) + for key in keys: + assert subject_info[key] == raw_read.info['subject_info'][key] + assert raw.info['meas_date'] == raw_read.info['meas_date'] + + for key in ['secs', 'usecs', 'version']: + assert raw.info['meas_id'][key] == raw_read.info['meas_id'][key] + assert_array_equal(raw.info['meas_id']['machid'], + raw_read.info['meas_id']['machid']) + + +@testing.requires_testing_data +def test_copy_append(): + """Test raw copying and appending combinations.""" + raw = read_raw_fif(fif_fname, preload=True).copy() + raw_full = read_raw_fif(fif_fname) + raw_full.append(raw) + data = raw_full[:, :][0] + assert data.shape[1] == 2 * raw._data.shape[1] + + +@testing.requires_testing_data +def test_output_formats(tmp_path): + """Test saving and loading raw data using multiple formats.""" + formats = ['short', 'int', 'single', 'double'] + tols = [1e-4, 1e-7, 1e-7, 1e-15] + + # let's fake a raw file with different formats + raw = read_raw_fif(test_fif_fname).crop(0, 1) + + temp_file = tmp_path / 'raw.fif' + for ii, (fmt, tol) in enumerate(zip(formats, tols)): + # Let's test the overwriting error throwing while we're at it + if ii > 0: + pytest.raises(IOError, raw.save, temp_file, fmt=fmt) + raw.save(temp_file, fmt=fmt, overwrite=True) + raw2 = read_raw_fif(temp_file) + raw2_data = raw2[:, :][0] + assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25) + assert raw2.orig_format == fmt + + +def _compare_combo(raw, new, times, n_times): + """Compare data.""" + for ti in times: # let's do a subset of points for speed + orig = raw[:, ti % n_times][0] + # these are almost_equals because of possible dtype differences + assert_allclose(orig, new[:, ti][0]) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_multiple_files(tmp_path): + """Test loading multiple files simultaneously.""" + # split file + raw = read_raw_fif(fif_fname).crop(0, 10) + raw.load_data() + raw.load_data() # test no operation + split_size = 3. # in seconds + sfreq = raw.info['sfreq'] + nsamp = (raw.last_samp - raw.first_samp) + tmins = np.round(np.arange(0., nsamp, split_size * sfreq)) + tmaxs = np.concatenate((tmins[1:] - 1, [nsamp])) + tmaxs /= sfreq + tmins /= sfreq + assert raw.n_times == len(raw.times) + + # going in reverse order so the last fname is the first file (need later) + raws = [None] * len(tmins) + for ri in range(len(tmins) - 1, -1, -1): + fname = tmp_path / ('test_raw_split-%d_raw.fif' % ri) + raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri]) + raws[ri] = read_raw_fif(fname) + assert (len(raws[ri].times) == + int(round((tmaxs[ri] - tmins[ri]) * + raw.info['sfreq'])) + 1) # + 1 b/c inclusive + events = [find_events(r, stim_channel='STI 014') for r in raws] + last_samps = [r.last_samp for r in raws] + first_samps = [r.first_samp for r in raws] + + # test concatenation of split file + pytest.raises(ValueError, concatenate_raws, raws, True, events[1:]) + all_raw_1, events1 = concatenate_raws(raws, preload=False, + events_list=events) + assert_allclose(all_raw_1.times, raw.times) + assert raw.first_samp == all_raw_1.first_samp + assert raw.last_samp == all_raw_1.last_samp + assert_allclose(raw[:, :][0], all_raw_1[:, :][0]) + raws[0] = read_raw_fif(fname) + all_raw_2 = concatenate_raws(raws, preload=True) + assert_allclose(raw[:, :][0], all_raw_2[:, :][0]) + + # test proper event treatment for split files + events2 = concatenate_events(events, first_samps, last_samps) + events3 = find_events(all_raw_2, stim_channel='STI 014') + assert_array_equal(events1, events2) + assert_array_equal(events1, events3) + + # test various methods of combining files + raw = read_raw_fif(fif_fname, preload=True) + n_times = raw.n_times + # make sure that all our data match + times = list(range(0, 2 * n_times, 999)) + # add potentially problematic points + times.extend([n_times - 1, n_times, 2 * n_times - 1]) + + raw_combo0 = concatenate_raws([read_raw_fif(f) + for f in [fif_fname, fif_fname]], + preload=True) + _compare_combo(raw, raw_combo0, times, n_times) + raw_combo = concatenate_raws([read_raw_fif(f) + for f in [fif_fname, fif_fname]], + preload=False) + _compare_combo(raw, raw_combo, times, n_times) + raw_combo = concatenate_raws([read_raw_fif(f) + for f in [fif_fname, fif_fname]], + preload='memmap8.dat') + _compare_combo(raw, raw_combo, times, n_times) + assert raw[:, :][0].shape[1] * 2 == raw_combo0[:, :][0].shape[1] + assert raw_combo0[:, :][0].shape[1] == raw_combo0.n_times + + # with all data preloaded, result should be preloaded + raw_combo = read_raw_fif(fif_fname, preload=True) + raw_combo.append(read_raw_fif(fif_fname, preload=True)) + assert (raw_combo.preload is True) + assert raw_combo.n_times == raw_combo._data.shape[1] + _compare_combo(raw, raw_combo, times, n_times) + + # with any data not preloaded, don't set result as preloaded + raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=True), + read_raw_fif(fif_fname, preload=False)]) + assert (raw_combo.preload is False) + assert_array_equal(find_events(raw_combo, stim_channel='STI 014'), + find_events(raw_combo0, stim_channel='STI 014')) + _compare_combo(raw, raw_combo, times, n_times) + + # user should be able to force data to be preloaded upon concat + raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False), + read_raw_fif(fif_fname, preload=True)], + preload=True) + assert (raw_combo.preload is True) + _compare_combo(raw, raw_combo, times, n_times) + + raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False), + read_raw_fif(fif_fname, preload=True)], + preload='memmap3.dat') + _compare_combo(raw, raw_combo, times, n_times) + + raw_combo = concatenate_raws([ + read_raw_fif(fif_fname, preload=True), + read_raw_fif(fif_fname, preload=True)], preload='memmap4.dat') + _compare_combo(raw, raw_combo, times, n_times) + + raw_combo = concatenate_raws([ + read_raw_fif(fif_fname, preload=False), + read_raw_fif(fif_fname, preload=False)], preload='memmap5.dat') + _compare_combo(raw, raw_combo, times, n_times) + + # verify that combining raws with different projectors throws an exception + raw.add_proj([], remove_existing=True) + pytest.raises(ValueError, raw.append, + read_raw_fif(fif_fname, preload=True)) + + # now test event treatment for concatenated raw files + events = [find_events(raw, stim_channel='STI 014'), + find_events(raw, stim_channel='STI 014')] + last_samps = [raw.last_samp, raw.last_samp] + first_samps = [raw.first_samp, raw.first_samp] + events = concatenate_events(events, first_samps, last_samps) + events2 = find_events(raw_combo0, stim_channel='STI 014') + assert_array_equal(events, events2) + + # check out the len method + assert len(raw) == raw.n_times + assert len(raw) == raw.last_samp - raw.first_samp + 1 + + +@testing.requires_testing_data +@pytest.mark.parametrize('on_mismatch', ('ignore', 'warn', 'raise')) +def test_concatenate_raws(on_mismatch): + """Test error handling during raw concatenation.""" + raw = read_raw_fif(fif_fname).crop(0, 10) + raws = [raw, raw.copy()] + raws[1].info['dev_head_t']['trans'] += 0.1 + kws = dict(raws=raws, on_mismatch=on_mismatch) + + if on_mismatch == 'ignore': + concatenate_raws(**kws) + elif on_mismatch == 'warn': + with pytest.warns(RuntimeWarning, match='different head positions'): + concatenate_raws(**kws) + elif on_mismatch == 'raise': + with pytest.raises(ValueError, match='different head positions'): + concatenate_raws(**kws) + + +@testing.requires_testing_data +@pytest.mark.parametrize('mod', ( + 'meg', + pytest.param('raw', marks=[ + pytest.mark.filterwarnings( + 'ignore:.*naming conventions.*:RuntimeWarning'), + pytest.mark.slowtest]), +)) +def test_split_files(tmp_path, mod, monkeypatch): + """Test writing and reading of split raw files.""" + raw_1 = read_raw_fif(fif_fname, preload=True) + # Test a very close corner case + + assert_allclose(raw_1.buffer_size_sec, 10., atol=1e-2) # samp rate + split_fname = tmp_path / f'split_raw_{mod}.fif' + # intended filenames + split_fname_elekta_part2 = tmp_path / f'split_raw_{mod}-1.fif' + split_fname_bids_part1 = tmp_path / f'split_raw_split-01_{mod}.fif' + split_fname_bids_part2 = tmp_path / f'split_raw_split-02_{mod}.fif' + raw_1.set_annotations(Annotations([2.], [5.5], 'test')) + + # Check that if BIDS is used and no split is needed it defaults to + # simple writing without _split- entity. + raw_1.save(split_fname, split_naming='bids', verbose=True) + assert op.isfile(split_fname) + assert not op.isfile(split_fname_bids_part1) + for split_naming in ('neuromag', 'bids'): + with pytest.raises(FileExistsError, match='Destination file'): + raw_1.save(split_fname, split_naming=split_naming, verbose=True) + os.remove(split_fname) + with open(split_fname_bids_part1, 'w'): + pass + with pytest.raises(FileExistsError, match='Destination file'): + raw_1.save(split_fname, split_naming='bids', verbose=True) + assert not op.isfile(split_fname) + raw_1.save(split_fname, split_naming='neuromag', verbose=True) # okay + os.remove(split_fname) + os.remove(split_fname_bids_part1) + + raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB', + verbose=True) + + # check that the filenames match the intended pattern + assert op.isfile(split_fname) + assert op.isfile(split_fname_elekta_part2) + # check that filenames are being formatted correctly for BIDS + raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB', + split_naming='bids', overwrite=True, verbose=True) + assert op.isfile(split_fname_bids_part1) + assert op.isfile(split_fname_bids_part2) + + annot = Annotations(np.arange(20), np.ones((20,)), 'test') + raw_1.set_annotations(annot) + split_fname = op.join(tmp_path, 'split_raw.fif') + raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB') + raw_2 = read_raw_fif(split_fname) + assert_allclose(raw_2.buffer_size_sec, 1., atol=1e-2) # samp rate + assert_allclose(raw_1.annotations.onset, raw_2.annotations.onset) + assert_allclose(raw_1.annotations.duration, raw_2.annotations.duration, + rtol=0.001 / raw_2.info['sfreq']) + assert_array_equal(raw_1.annotations.description, + raw_2.annotations.description) + + data_1, times_1 = raw_1[:, :] + data_2, times_2 = raw_2[:, :] + assert_array_equal(data_1, data_2) + assert_array_equal(times_1, times_2) + + raw_bids = read_raw_fif(split_fname_bids_part1) + data_bids, times_bids = raw_bids[:, :] + assert_array_equal(data_1, data_bids) + assert_array_equal(times_1, times_bids) + del raw_bids + # split missing behaviors + os.remove(split_fname_bids_part2) + with pytest.raises(ValueError, match='manually renamed'): + read_raw_fif(split_fname_bids_part1, on_split_missing='raise') + with pytest.warns(RuntimeWarning, match='Split raw file detected'): + read_raw_fif(split_fname_bids_part1, on_split_missing='warn') + read_raw_fif(split_fname_bids_part1, on_split_missing='ignore') + + # test the case where we only end up with one buffer to write + # (GH#3210). These tests rely on writing meas info and annotations + # taking up a certain number of bytes, so if we change those functions + # somehow, the numbers below for e.g. split_size might need to be + # adjusted. + raw_crop = raw_1.copy().crop(0, 5) + raw_crop.set_annotations(Annotations([2.], [5.5], 'test'), + emit_warning=False) + with pytest.raises(ValueError, + match='after writing measurement information'): + raw_crop.save(split_fname, split_size='1MB', # too small a size + buffer_size_sec=1., overwrite=True) + with pytest.raises(ValueError, + match='too large for the given split size'): + raw_crop.save(split_fname, + split_size=3003000, # still too small, now after Info + buffer_size_sec=1., overwrite=True) + # just barely big enough here; the right size to write exactly one buffer + # at a time so we hit GH#3210 if we aren't careful + raw_crop.save(split_fname, split_size='4.5MB', + buffer_size_sec=1., overwrite=True) + raw_read = read_raw_fif(split_fname) + assert_allclose(raw_crop[:][0], raw_read[:][0], atol=1e-20) + + # Check our buffer arithmetic + + # 1 buffer required + raw_crop = raw_1.copy().crop(0, 1) + raw_crop.save(split_fname, buffer_size_sec=1., overwrite=True) + raw_read = read_raw_fif(split_fname) + assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (301,)) + assert_allclose(raw_crop[:][0], raw_read[:][0]) + # 2 buffers required + raw_crop.save(split_fname, buffer_size_sec=0.5, overwrite=True) + raw_read = read_raw_fif(split_fname) + assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (151, 150)) + assert_allclose(raw_crop[:][0], raw_read[:][0]) + # 2 buffers required + raw_crop.save(split_fname, + buffer_size_sec=1. - 1.01 / raw_crop.info['sfreq'], + overwrite=True) + raw_read = read_raw_fif(split_fname) + assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (300, 1)) + assert_allclose(raw_crop[:][0], raw_read[:][0]) + raw_crop.save(split_fname, + buffer_size_sec=1. - 2.01 / raw_crop.info['sfreq'], + overwrite=True) + raw_read = read_raw_fif(split_fname) + assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (299, 2)) + assert_allclose(raw_crop[:][0], raw_read[:][0]) + + # proper ending + assert op.isdir(tmp_path) + with pytest.raises(ValueError, match='must end with an underscore'): + raw_crop.save( + tmp_path / 'test.fif', split_naming='bids', verbose='error') + + # reserved file is deleted + fname = tmp_path / 'test_raw.fif' + monkeypatch.setattr(base, '_write_raw_fid', _err) + with pytest.raises(RuntimeError, match='Killed mid-write'): + raw_1.save(fname, split_size='10MB', split_naming='bids') + assert op.isfile(fname) + assert not op.isfile(tmp_path / 'test_split-01_raw.fif') + + +def _err(*args, **kwargs): + raise RuntimeError('Killed mid-write') + + +def _no_write_file_name(fid, kind, data): + assert kind == FIFF.FIFF_REF_FILE_NAME # the only string we actually write + return + + +def test_split_numbers(tmp_path, monkeypatch): + """Test handling of split files using numbers instead of names.""" + monkeypatch.setattr(base, 'write_string', _no_write_file_name) + raw = read_raw_fif(test_fif_fname).pick('eeg') + # gh-8339 + dashes_fname = tmp_path / 'sub-1_ses-2_task-3_raw.fif' + raw.save(dashes_fname, split_size='5MB', + buffer_size_sec=1.) + assert op.isfile(dashes_fname) + next_fname = str(dashes_fname)[:-4] + '-1.fif' + assert op.isfile(next_fname) + raw_read = read_raw_fif(dashes_fname) + assert_allclose(raw.times, raw_read.times) + assert_allclose(raw.get_data(), raw_read.get_data(), atol=1e-16) + + +def test_load_bad_channels(tmp_path): + """Test reading/writing of bad channels.""" + # Load correctly marked file (manually done in mne_process_raw) + raw_marked = read_raw_fif(fif_bad_marked_fname) + correct_bads = raw_marked.info['bads'] + raw = read_raw_fif(test_fif_fname) + # Make sure it starts clean + assert_array_equal(raw.info['bads'], []) + + # Test normal case + raw.load_bad_channels(bad_file_works) + # Write it out, read it in, and check + raw.save(tmp_path / 'foo_raw.fif') + raw_new = read_raw_fif(tmp_path / 'foo_raw.fif') + assert correct_bads == raw_new.info['bads'] + # Reset it + raw.info['bads'] = [] + + # Test bad case + pytest.raises(ValueError, raw.load_bad_channels, bad_file_wrong) + + # Test forcing the bad case + with pytest.warns(RuntimeWarning, match='1 bad channel'): + raw.load_bad_channels(bad_file_wrong, force=True) + + # write it out, read it in, and check + raw.save(tmp_path / 'foo_raw.fif', overwrite=True) + raw_new = read_raw_fif(tmp_path / 'foo_raw.fif') + assert correct_bads == raw_new.info['bads'] + + # Check that bad channels are cleared + raw.load_bad_channels(None) + raw.save(tmp_path / 'foo_raw.fif', overwrite=True) + raw_new = read_raw_fif(tmp_path / 'foo_raw.fif') + assert raw_new.info['bads'] == [] + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_io_raw(tmp_path): + """Test IO for raw data (Neuromag).""" + rng = np.random.RandomState(0) + # test unicode io + for chars in [u'äöé', 'a']: + with read_raw_fif(fif_fname) as r: + assert ('Raw' in repr(r)) + assert (op.basename(fif_fname) in repr(r)) + r.info['description'] = chars + temp_file = tmp_path / 'raw.fif' + r.save(temp_file, overwrite=True) + with read_raw_fif(temp_file) as r2: + desc2 = r2.info['description'] + assert desc2 == chars + + # Let's construct a simple test for IO first + raw = read_raw_fif(fif_fname).crop(0, 3.5) + raw.load_data() + # put in some data that we know the values of + data = rng.randn(raw._data.shape[0], raw._data.shape[1]) + raw._data[:, :] = data + # save it somewhere + fname = tmp_path / 'test_copy_raw.fif' + raw.save(fname, buffer_size_sec=1.0) + # read it in, make sure the whole thing matches + raw = read_raw_fif(fname) + assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20) + # let's read portions across the 1-sec tag boundary, too + inds = raw.time_as_index([1.75, 2.25]) + sl = slice(inds[0], inds[1]) + assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20) + + +@pytest.mark.parametrize('fname_in, fname_out', [ + (test_fif_fname, 'raw.fif'), + pytest.param(test_fif_gz_fname, 'raw.fif.gz', marks=pytest.mark.slowtest), + (ctf_fname, 'raw.fif')]) +def test_io_raw_additional(fname_in, fname_out, tmp_path): + """Test IO for raw data (Neuromag + CTF + gz).""" + fname_out = tmp_path / fname_out + raw = read_raw_fif(fname_in).crop(0, 2) + + nchan = raw.info['nchan'] + ch_names = raw.info['ch_names'] + meg_channels_idx = [k for k in range(nchan) + if ch_names[k][0] == 'M'] + n_channels = 100 + meg_channels_idx = meg_channels_idx[:n_channels] + start, stop = raw.time_as_index([0, 5], use_rounding=True) + data, times = raw[meg_channels_idx, start:(stop + 1)] + meg_ch_names = [ch_names[k] for k in meg_channels_idx] + + # Set up pick list: MEG + STI 014 - bad channels + include = ['STI 014'] + include += meg_ch_names + picks = pick_types(raw.info, meg=True, eeg=False, stim=True, + misc=True, ref_meg=True, include=include, + exclude='bads') + + # Writing with drop_small_buffer True + raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3, + drop_small_buffer=True, overwrite=True) + raw2 = read_raw_fif(fname_out) + + sel = pick_channels(raw2.ch_names, meg_ch_names) + data2, times2 = raw2[sel, :] + assert (times2.max() <= 3) + + # Writing + raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True) + + if fname_in in (fif_fname, fif_fname + '.gz'): + assert len(raw.info['dig']) == 146 + + raw2 = read_raw_fif(fname_out) + + sel = pick_channels(raw2.ch_names, meg_ch_names) + data2, times2 = raw2[sel, :] + + assert_allclose(data, data2, rtol=1e-6, atol=1e-20) + assert_allclose(times, times2) + assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5) + + # check transformations + for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']: + if raw.info[trans] is None: + assert (raw2.info[trans] is None) + else: + assert_array_equal(raw.info[trans]['trans'], + raw2.info[trans]['trans']) + + # check transformation 'from' and 'to' + if trans.startswith('dev'): + from_id = FIFF.FIFFV_COORD_DEVICE + else: + from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD + if trans[4:8] == 'head': + to_id = FIFF.FIFFV_COORD_HEAD + else: + to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD + for raw_ in [raw, raw2]: + assert raw_.info[trans]['from'] == from_id + assert raw_.info[trans]['to'] == to_id + + if fname_in == fif_fname or fname_in == fif_fname + '.gz': + assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r']) + + # test warnings on bad filenames + raw_badname = tmp_path / 'test-bad-name.fif.gz' + with pytest.warns(RuntimeWarning, match='raw.fif'): + raw.save(raw_badname) + with pytest.warns(RuntimeWarning, match='raw.fif'): + read_raw_fif(raw_badname) + + +@testing.requires_testing_data +@pytest.mark.parametrize('dtype', ('complex128', 'complex64')) +def test_io_complex(tmp_path, dtype): + """Test IO with complex data types.""" + rng = np.random.RandomState(0) + n_ch = 5 + raw = read_raw_fif(fif_fname).crop(0, 1).pick(np.arange(n_ch)).load_data() + data_orig = raw.get_data() + imag_rand = np.array(1j * rng.randn(n_ch, len(raw.times)), dtype=dtype) + raw_cp = raw.copy() + raw_cp._data = np.array(raw_cp._data, dtype) + raw_cp._data += imag_rand + with pytest.warns(RuntimeWarning, match='Saving .* complex data.'): + raw_cp.save(tmp_path / 'raw.fif', overwrite=True) + + raw2 = read_raw_fif(tmp_path / 'raw.fif') + raw2_data, _ = raw2[:] + assert_allclose(raw2_data, raw_cp._data) + # with preloading + raw2 = read_raw_fif(tmp_path / 'raw.fif', preload=True) + raw2_data, _ = raw2[:] + assert_allclose(raw2_data, raw_cp._data) + assert_allclose(data_orig, raw_cp._data.real) + + +@testing.requires_testing_data +def test_getitem(): + """Test getitem/indexing of Raw.""" + for preload in [False, True, 'memmap.dat']: + raw = read_raw_fif(fif_fname, preload=preload) + data, times = raw[0, :] + data1, times1 = raw[0] + assert_array_equal(data, data1) + assert_array_equal(times, times1) + data, times = raw[0:2, :] + data1, times1 = raw[0:2] + assert_array_equal(data, data1) + assert_array_equal(times, times1) + data1, times1 = raw[[0, 1]] + assert_array_equal(data, data1) + assert_array_equal(times, times1) + assert_array_equal(raw[raw.ch_names[0]][0][0], raw[0][0][0]) + assert_array_equal( + raw[-10:-1, :][0], + raw[len(raw.ch_names) - 10:len(raw.ch_names) - 1, :][0]) + with pytest.raises(ValueError, match='No appropriate channels'): + raw[slice(-len(raw.ch_names) - 1), slice(None)] + with pytest.raises(ValueError, match='must be'): + raw[-1000] + + +@testing.requires_testing_data +def test_proj(tmp_path): + """Test SSP proj operations.""" + for proj in [True, False]: + raw = read_raw_fif(fif_fname, preload=False) + if proj: + raw.apply_proj() + assert (all(p['active'] == proj for p in raw.info['projs'])) + + data, times = raw[0:2, :] + data1, times1 = raw[0:2] + assert_array_equal(data, data1) + assert_array_equal(times, times1) + + # test adding / deleting proj + if proj: + pytest.raises(ValueError, raw.add_proj, [], + {'remove_existing': True}) + pytest.raises(ValueError, raw.del_proj, 0) + else: + projs = deepcopy(raw.info['projs']) + n_proj = len(raw.info['projs']) + raw.del_proj(0) + assert len(raw.info['projs']) == n_proj - 1 + raw.add_proj(projs, remove_existing=False) + # Test that already existing projections are not added. + assert len(raw.info['projs']) == n_proj + raw.add_proj(projs[:-1], remove_existing=True) + assert len(raw.info['projs']) == n_proj - 1 + + # test apply_proj() with and without preload + for preload in [True, False]: + raw = read_raw_fif(fif_fname, preload=preload) + data, times = raw[:, 0:2] + raw.apply_proj() + data_proj_1 = np.dot(raw._projector, data) + + # load the file again without proj + raw = read_raw_fif(fif_fname, preload=preload) + + # write the file with proj. activated, make sure proj has been applied + raw.save(tmp_path / 'raw.fif', proj=True, overwrite=True) + raw2 = read_raw_fif(tmp_path / 'raw.fif') + data_proj_2, _ = raw2[:, 0:2] + assert_allclose(data_proj_1, data_proj_2) + assert (all(p['active'] for p in raw2.info['projs'])) + + # read orig file with proj. active + raw2 = read_raw_fif(fif_fname, preload=preload) + raw2.apply_proj() + data_proj_2, _ = raw2[:, 0:2] + assert_allclose(data_proj_1, data_proj_2) + assert (all(p['active'] for p in raw2.info['projs'])) + + # test that apply_proj works + raw.apply_proj() + data_proj_2, _ = raw[:, 0:2] + assert_allclose(data_proj_1, data_proj_2) + assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2)) + + # Test that picking removes projectors ... + raw = read_raw_fif(fif_fname) + n_projs = len(raw.info['projs']) + raw.pick_types(meg=False, eeg=True) + assert len(raw.info['projs']) == n_projs - 3 + + # ... but only if it doesn't apply to any channels in the dataset anymore. + raw = read_raw_fif(fif_fname) + n_projs = len(raw.info['projs']) + raw.pick_types(meg='mag', eeg=True) + assert len(raw.info['projs']) == n_projs + + # I/O roundtrip of an MEG projector with a Raw that only contains EEG + # data. + out_fname = tmp_path / 'test_raw.fif' + raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002) + proj = raw.info['projs'][-1] + raw.pick_types(meg=False, eeg=True) + raw.add_proj(proj) # Restore, because picking removed it! + raw._data.fill(0) + raw._data[-1] = 1. + raw.save(out_fname) + raw = read_raw_fif(out_fname, preload=False) + raw.apply_proj() + assert_allclose(raw[:, :][0][:1], raw[0, :][0]) + + +@testing.requires_testing_data +@pytest.mark.parametrize('preload', [False, True, 'memmap.dat']) +def test_preload_modify(preload, tmp_path): + """Test preloading and modifying data.""" + rng = np.random.RandomState(0) + raw = read_raw_fif(fif_fname, preload=preload) + + nsamp = raw.last_samp - raw.first_samp + 1 + picks = pick_types(raw.info, meg='grad', exclude='bads') + + data = rng.randn(len(picks), nsamp // 2) + + try: + raw[picks, :nsamp // 2] = data + except RuntimeError: + if not preload: + return + else: + raise + + tmp_fname = tmp_path / 'raw.fif' + raw.save(tmp_fname, overwrite=True) + + raw_new = read_raw_fif(tmp_fname) + data_new, _ = raw_new[picks, :nsamp // 2] + + assert_allclose(data, data_new) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_filter(): + """Test filtering (FIR and IIR) and Raw.apply_function interface.""" + raw = read_raw_fif(fif_fname).crop(0, 7) + raw.load_data() + sig_dec_notch = 12 + sig_dec_notch_fit = 12 + picks_meg = pick_types(raw.info, meg=True, exclude='bads') + picks = picks_meg[:4] + + trans = 2.0 + filter_params = dict(picks=picks, filter_length='auto', + h_trans_bandwidth=trans, l_trans_bandwidth=trans, + fir_design='firwin') + raw_lp = raw.copy().filter(None, 8.0, **filter_params) + raw_hp = raw.copy().filter(16.0, None, **filter_params) + raw_bp = raw.copy().filter(8.0 + trans, 16.0 - trans, **filter_params) + raw_bs = raw.copy().filter(16.0, 8.0, **filter_params) + + data, _ = raw[picks, :] + + lp_data, _ = raw_lp[picks, :] + hp_data, _ = raw_hp[picks, :] + bp_data, _ = raw_bp[picks, :] + bs_data, _ = raw_bs[picks, :] + + tols = dict(atol=1e-20, rtol=1e-5) + assert_allclose(bs_data, lp_data + hp_data, **tols) + assert_allclose(data, lp_data + bp_data + hp_data, **tols) + assert_allclose(data, bp_data + bs_data, **tols) + + filter_params_iir = dict(picks=picks, n_jobs=2, method='iir', + iir_params=dict(output='ba')) + raw_lp_iir = raw.copy().filter(None, 4.0, **filter_params_iir) + raw_hp_iir = raw.copy().filter(8.0, None, **filter_params_iir) + raw_bp_iir = raw.copy().filter(4.0, 8.0, **filter_params_iir) + del filter_params_iir + lp_data_iir, _ = raw_lp_iir[picks, :] + hp_data_iir, _ = raw_hp_iir[picks, :] + bp_data_iir, _ = raw_bp_iir[picks, :] + summation = lp_data_iir + hp_data_iir + bp_data_iir + assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100], 11) + + # make sure we didn't touch other channels + data, _ = raw[picks_meg[4:], :] + bp_data, _ = raw_bp[picks_meg[4:], :] + assert_array_equal(data, bp_data) + bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :] + assert_array_equal(data, bp_data_iir) + + # ... and that inplace changes are inplace + raw_copy = raw.copy() + assert np.may_share_memory(raw._data, raw._data) + assert not np.may_share_memory(raw_copy._data, raw._data) + # this could be assert_array_equal but we do this to mirror the call below + assert (raw._data[0] == raw_copy._data[0]).all() + raw_copy.filter(None, 20., n_jobs=2, **filter_params) + assert not (raw._data[0] == raw_copy._data[0]).all() + assert_array_equal(raw.copy().filter(None, 20., **filter_params)._data, + raw_copy._data) + + # do a very simple check on line filtering + raw_bs = raw.copy().filter(60.0 + trans, 60.0 - trans, **filter_params) + data_bs, _ = raw_bs[picks, :] + raw_notch = raw.copy().notch_filter( + 60.0, picks=picks, n_jobs=2, method='fir', + trans_bandwidth=2 * trans) + data_notch, _ = raw_notch[picks, :] + assert_array_almost_equal(data_bs, data_notch, sig_dec_notch) + + # now use the sinusoidal fitting + assert raw.times[-1] < 10 # catch error with filter_length > n_times + raw_notch = raw.copy().notch_filter( + None, picks=picks, n_jobs=2, method='spectrum_fit', + filter_length='10s') + data_notch, _ = raw_notch[picks, :] + data, _ = raw[picks, :] + assert_array_almost_equal(data, data_notch, sig_dec_notch_fit) + + # filter should set the "lowpass" and "highpass" parameters + raw = RawArray(np.random.randn(3, 1000), + create_info(3, 1000., ['eeg'] * 2 + ['stim'])) + with raw.info._unlock(): + raw.info['lowpass'] = raw.info['highpass'] = None + for kind in ('none', 'lowpass', 'highpass', 'bandpass', 'bandstop'): + print(kind) + h_freq = l_freq = None + if kind in ('lowpass', 'bandpass'): + h_freq = 70 + if kind in ('highpass', 'bandpass'): + l_freq = 30 + if kind == 'bandstop': + l_freq, h_freq = 70, 30 + assert (raw.info['lowpass'] is None) + assert (raw.info['highpass'] is None) + kwargs = dict(l_trans_bandwidth=20, h_trans_bandwidth=20, + filter_length='auto', phase='zero', fir_design='firwin') + raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(1), + **kwargs) + assert (raw.info['lowpass'] is None) + assert (raw.info['highpass'] is None) + raw_filt = raw.copy().filter(l_freq, h_freq, **kwargs) + wanted_h = h_freq if kind != 'bandstop' else None + wanted_l = l_freq if kind != 'bandstop' else None + assert raw_filt.info['lowpass'] == wanted_h + assert raw_filt.info['highpass'] == wanted_l + # Using all data channels should still set the params (GH#3259) + raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(2), + **kwargs) + assert raw_filt.info['lowpass'] == wanted_h + assert raw_filt.info['highpass'] == wanted_l + + +def test_filter_picks(): + """Test filtering default channel picks.""" + ch_types = ['mag', 'grad', 'eeg', 'seeg', 'dbs', 'misc', 'stim', 'ecog', + 'hbo', 'hbr'] + info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256) + raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info) + + # -- Deal with meg mag grad and fnirs exceptions + ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg', 'dbs', 'ecog') + + # -- Filter data channels + for ch_type in ('mag', 'grad', 'eeg', 'seeg', 'dbs', 'ecog', 'hbo', 'hbr'): + picks = {ch: ch == ch_type for ch in ch_types} + picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False + picks['fnirs'] = ch_type if ch_type in ('hbo', 'hbr') else False + raw_ = raw.copy().pick_types(**picks) + raw_.filter(10, 30, fir_design='firwin') + + # -- Error if no data channel + for ch_type in ('misc', 'stim'): + picks = {ch: ch == ch_type for ch in ch_types} + raw_ = raw.copy().pick_types(**picks) + pytest.raises(ValueError, raw_.filter, 10, 30) + + +@testing.requires_testing_data +def test_crop(): + """Test cropping raw files.""" + # split a concatenated file to test a difficult case + raw = concatenate_raws([read_raw_fif(f) + for f in [fif_fname, fif_fname]]) + split_size = 10. # in seconds + sfreq = raw.info['sfreq'] + nsamp = (raw.last_samp - raw.first_samp + 1) + + # do an annoying case (off-by-one splitting) + tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))] + tmins = np.sort(tmins) + tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1])) + tmaxs /= sfreq + tmins /= sfreq + raws = [None] * len(tmins) + for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)): + raws[ri] = raw.copy().crop(tmin, tmax) + if ri < len(tmins) - 1: + assert_allclose( + raws[ri].times, + raw.copy().crop(tmin, tmins[ri + 1], include_tmax=False).times) + assert raws[ri] + all_raw_2 = concatenate_raws(raws, preload=False) + assert raw.first_samp == all_raw_2.first_samp + assert raw.last_samp == all_raw_2.last_samp + assert_array_equal(raw[:, :][0], all_raw_2[:, :][0]) + + tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq)) + tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1])) + tmaxs /= sfreq + tmins /= sfreq + + # going in revere order so the last fname is the first file (need it later) + raws = [None] * len(tmins) + for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)): + raws[ri] = raw.copy().crop(tmin, tmax) + # test concatenation of split file + all_raw_1 = concatenate_raws(raws, preload=False) + + all_raw_2 = raw.copy().crop(0, None) + for ar in [all_raw_1, all_raw_2]: + assert raw.first_samp == ar.first_samp + assert raw.last_samp == ar.last_samp + assert_array_equal(raw[:, :][0], ar[:, :][0]) + + # test shape consistency of cropped raw + data = np.zeros((1, 1002001)) + info = create_info(1, 1000) + raw = RawArray(data, info) + for tmin in range(0, 1001, 100): + raw1 = raw.copy().crop(tmin=tmin, tmax=tmin + 2) + assert raw1[:][0].shape == (1, 2001) + + # degenerate + with pytest.raises(ValueError, match='No samples.*when include_tmax=Fals'): + raw.crop(0, 0, include_tmax=False) + + +@testing.requires_testing_data +def test_resample_equiv(): + """Test resample (with I/O and multiple files).""" + raw = read_raw_fif(fif_fname).crop(0, 1) + raw_preload = raw.copy().load_data() + for r in (raw, raw_preload): + r.resample(r.info['sfreq'] / 4.) + assert_allclose(raw._data, raw_preload._data) + + +@pytest.mark.slowtest +@testing.requires_testing_data +@pytest.mark.parametrize('preload, n, npad', [ + (True, 512, 'auto'), + (False, 512, 0), +]) +def test_resample(tmp_path, preload, n, npad): + """Test resample (with I/O and multiple files).""" + raw = read_raw_fif(fif_fname) + raw.crop(0, raw.times[n - 1]) + assert len(raw.times) == n + if preload: + raw.load_data() + raw_resamp = raw.copy() + sfreq = raw.info['sfreq'] + # test parallel on upsample + raw_resamp.resample(sfreq * 2, n_jobs=2, npad=npad) + assert raw_resamp.n_times == len(raw_resamp.times) + raw_resamp.save(tmp_path / 'raw_resamp-raw.fif') + raw_resamp = read_raw_fif(tmp_path / 'raw_resamp-raw.fif', preload=True) + assert sfreq == raw_resamp.info['sfreq'] / 2 + assert raw.n_times == raw_resamp.n_times // 2 + assert raw_resamp.get_data().shape[1] == raw_resamp.n_times + assert raw.get_data().shape[0] == raw_resamp._data.shape[0] + # test non-parallel on downsample + raw_resamp.resample(sfreq, n_jobs=1, npad=npad) + assert raw_resamp.info['sfreq'] == sfreq + assert raw.get_data().shape == raw_resamp._data.shape + assert raw.first_samp == raw_resamp.first_samp + assert raw.last_samp == raw.last_samp + # upsampling then downsampling doubles resampling error, but this still + # works (hooray). Note that the stim channels had to be sub-sampled + # without filtering to be accurately preserved + # note we have to treat MEG and EEG+STIM channels differently (tols) + assert_allclose(raw.get_data()[:306, 200:-200], + raw_resamp._data[:306, 200:-200], + rtol=1e-2, atol=1e-12) + assert_allclose(raw.get_data()[306:, 200:-200], + raw_resamp._data[306:, 200:-200], + rtol=1e-2, atol=1e-7) + + # now check multiple file support w/resampling, as order of operations + # (concat, resample) should not affect our data + raw1 = raw.copy() + raw2 = raw.copy() + raw3 = raw.copy() + raw4 = raw.copy() + raw1 = concatenate_raws([raw1, raw2]) + raw1.resample(10., npad=npad) + raw3.resample(10., npad=npad) + raw4.resample(10., npad=npad) + raw3 = concatenate_raws([raw3, raw4]) + assert_array_equal(raw1._data, raw3._data) + assert_array_equal(raw1._first_samps, raw3._first_samps) + assert_array_equal(raw1._last_samps, raw3._last_samps) + assert_array_equal(raw1._raw_lengths, raw3._raw_lengths) + assert raw1.first_samp == raw3.first_samp + assert raw1.last_samp == raw3.last_samp + assert raw1.info['sfreq'] == raw3.info['sfreq'] + + # smoke test crop after resample + raw4.crop(tmin=raw4.times[1], tmax=raw4.times[-1]) + + # test resampling of stim channel + + # basic decimation + stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] + raw = RawArray([stim], create_info(1, len(stim), ['stim'])) + assert_allclose(raw.resample(8., npad=npad)._data, + [[1, 1, 0, 0, 1, 1, 0, 0]]) + + # decimation of multiple stim channels + raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim'])) + assert_allclose(raw.resample(8., npad=npad, verbose='error')._data, + [[1, 1, 0, 0, 1, 1, 0, 0], + [1, 1, 0, 0, 1, 1, 0, 0]]) + + # decimation that could potentially drop events if the decimation is + # done naively + stim = [0, 0, 0, 1, 1, 0, 0, 0] + raw = RawArray([stim], create_info(1, len(stim), ['stim'])) + assert_allclose(raw.resample(4., npad=npad)._data, + [[0, 1, 1, 0]]) + + # two events are merged in this case (warning) + stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0] + raw = RawArray([stim], create_info(1, len(stim), ['stim'])) + with pytest.warns(RuntimeWarning, match='become unreliable'): + raw.resample(8., npad=npad) + + # events are dropped in this case (warning) + stim = [0, 1, 1, 0, 0, 1, 1, 0] + raw = RawArray([stim], create_info(1, len(stim), ['stim'])) + with pytest.warns(RuntimeWarning, match='become unreliable'): + raw.resample(4., npad=npad) + + # test resampling events: this should no longer give a warning + # we often have first_samp != 0, include it here too + stim = [0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1] # an event at end + # test is on half the sfreq, but should work with trickier ones too + o_sfreq, sfreq_ratio = len(stim), 0.5 + n_sfreq = o_sfreq * sfreq_ratio + first_samp = len(stim) // 2 + raw = RawArray([stim], create_info(1, o_sfreq, ['stim']), + first_samp=first_samp) + events = find_events(raw) + raw, events = raw.resample(n_sfreq, events=events, npad=npad) + # Try index into raw.times with resampled events: + raw.times[events[:, 0] - raw.first_samp] + n_fsamp = int(first_samp * sfreq_ratio) # how it's calc'd in base.py + # NB np.round used for rounding event times, which has 0.5 as corner case: + # https://docs.scipy.org/doc/numpy/reference/generated/numpy.around.html + assert_array_equal( + events, + np.array([[np.round(1 * sfreq_ratio) + n_fsamp, 0, 1], + [np.round(10 * sfreq_ratio) + n_fsamp, 0, 1], + [np.minimum(np.round(15 * sfreq_ratio), + raw._data.shape[1] - 1) + n_fsamp, 0, 1]])) + + # test copy flag + stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] + raw = RawArray([stim], create_info(1, len(stim), ['stim'])) + raw_resampled = raw.copy().resample(4., npad=npad) + assert (raw_resampled is not raw) + raw_resampled = raw.resample(4., npad=npad) + assert (raw_resampled is raw) + + # resample should still work even when no stim channel is present + raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg'])) + with raw.info._unlock(): + raw.info['lowpass'] = 50. + raw.resample(10, npad=npad) + assert raw.info['lowpass'] == 5. + assert len(raw) == 10 + + +def test_resample_stim(): + """Test stim_picks argument.""" + data = np.ones((2, 1000)) + info = create_info(2, 1000., ('eeg', 'misc')) + raw = RawArray(data, info) + raw.resample(500., stim_picks='misc') + + +@testing.requires_testing_data +def test_hilbert(): + """Test computation of analytic signal using hilbert.""" + raw = read_raw_fif(fif_fname, preload=True) + picks_meg = pick_types(raw.info, meg=True, exclude='bads') + picks = picks_meg[:4] + + raw_filt = raw.copy() + raw_filt.filter(10, 20, picks=picks, l_trans_bandwidth='auto', + h_trans_bandwidth='auto', filter_length='auto', + phase='zero', fir_window='blackman', fir_design='firwin') + raw_filt_2 = raw_filt.copy() + + raw2 = raw.copy() + raw3 = raw.copy() + raw.apply_hilbert(picks, n_fft='auto') + raw2.apply_hilbert(picks, n_fft='auto', envelope=True) + + # Test custom n_fft + raw_filt.apply_hilbert(picks, n_fft='auto') + n_fft = 2 ** int(np.ceil(np.log2(raw_filt_2.n_times + 1000))) + raw_filt_2.apply_hilbert(picks, n_fft=n_fft) + assert raw_filt._data.shape == raw_filt_2._data.shape + assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50], + atol=1e-13, rtol=1e-2) + with pytest.raises(ValueError, match='n_fft.*must be at least the number'): + raw3.apply_hilbert(picks, n_fft=raw3.n_times - 100) + + env = np.abs(raw._data[picks, :]) + assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13) + + +@testing.requires_testing_data +def test_raw_copy(): + """Test Raw copy.""" + raw = read_raw_fif(fif_fname, preload=True) + data, _ = raw[:, :] + copied = raw.copy() + copied_data, _ = copied[:, :] + assert_array_equal(data, copied_data) + assert sorted(raw.__dict__.keys()) == sorted(copied.__dict__.keys()) + + raw = read_raw_fif(fif_fname, preload=False) + data, _ = raw[:, :] + copied = raw.copy() + copied_data, _ = copied[:, :] + assert_array_equal(data, copied_data) + assert sorted(raw.__dict__.keys()) == sorted(copied.__dict__.keys()) + + +@requires_pandas +def test_to_data_frame(): + """Test raw Pandas exporter.""" + from pandas import Timedelta + raw = read_raw_fif(test_fif_fname).crop(0, 1).load_data() + _, times = raw[0, :10] + df = raw.to_data_frame(index='time') + assert ((df.columns == raw.ch_names).all()) + assert_array_equal(np.round(times * 1e3), df.index.values[:10]) + df = raw.to_data_frame(index=None) + assert ('time' in df.columns) + assert_array_equal(df.values[:, 1], raw._data[0] * 1e13) + assert_array_equal(df.values[:, 3], raw._data[2] * 1e15) + # test long format + df_long = raw.to_data_frame(long_format=True) + assert(len(df_long) == raw.get_data().size) + expected = ('time', 'channel', 'ch_type', 'value') + assert set(expected) == set(df_long.columns) + # test bad time format + with pytest.raises(ValueError, match='not a valid time format. Valid'): + raw.to_data_frame(time_format='foo') + # test time format error handling + raw.set_meas_date(None) + with pytest.warns(RuntimeWarning, match='Cannot convert to Datetime when'): + df = raw.to_data_frame(time_format='datetime') + assert isinstance(df['time'].iloc[0], Timedelta) + + +@requires_pandas +@pytest.mark.parametrize('time_format', (None, 'ms', 'timedelta', 'datetime')) +def test_to_data_frame_time_format(time_format): + """Test time conversion in epochs Pandas exporter.""" + from pandas import Timedelta, Timestamp + raw = read_raw_fif(test_fif_fname, preload=True) + # test time_format + df = raw.to_data_frame(time_format=time_format) + dtypes = {None: np.float64, 'ms': np.int64, 'timedelta': Timedelta, + 'datetime': Timestamp} + assert isinstance(df['time'].iloc[0], dtypes[time_format]) + + +def test_add_channels(): + """Test raw splitting / re-appending channel types.""" + rng = np.random.RandomState(0) + raw = read_raw_fif(test_fif_fname).crop(0, 1).load_data() + raw_nopre = read_raw_fif(test_fif_fname, preload=False) + raw_eeg_meg = raw.copy().pick_types(meg=True, eeg=True) + raw_eeg = raw.copy().pick_types(meg=False, eeg=True) + raw_meg = raw.copy().pick_types(meg=True, eeg=False) + raw_stim = raw.copy().pick_types(meg=False, eeg=False, stim=True) + raw_new = raw_meg.copy().add_channels([raw_eeg, raw_stim]) + assert ( + all(ch in raw_new.ch_names + for ch in list(raw_stim.ch_names) + list(raw_meg.ch_names)) + ) + raw_new = raw_meg.copy().add_channels([raw_eeg]) + + assert (ch in raw_new.ch_names for ch in raw.ch_names) + assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0]) + assert_array_equal(raw_new[:, :][1], raw[:, :][1]) + assert (all(ch not in raw_new.ch_names for ch in raw_stim.ch_names)) + + # Testing force updates + raw_arr_info = create_info(['1', '2'], raw_meg.info['sfreq'], 'eeg') + orig_head_t = raw_arr_info['dev_head_t'] + raw_arr = rng.randn(2, raw_eeg.n_times) + raw_arr = RawArray(raw_arr, raw_arr_info) + # This should error because of conflicts in Info + raw_arr.info['dev_head_t'] = orig_head_t + with pytest.raises(ValueError, match='mutually inconsistent dev_head_t'): + raw_meg.copy().add_channels([raw_arr]) + raw_meg.copy().add_channels([raw_arr], force_update_info=True) + # Make sure that values didn't get overwritten + assert_object_equal(raw_arr.info['dev_head_t'], orig_head_t) + # Make sure all variants work + for simult in (False, True): # simultaneous adding or not + raw_new = raw_meg.copy() + if simult: + raw_new.add_channels([raw_eeg, raw_stim]) + else: + raw_new.add_channels([raw_eeg]) + raw_new.add_channels([raw_stim]) + for other in (raw_meg, raw_stim, raw_eeg): + assert_allclose( + raw_new.copy().pick_channels(other.ch_names).get_data(), + other.get_data()) + + # Now test errors + raw_badsf = raw_eeg.copy() + with raw_badsf.info._unlock(): + raw_badsf.info['sfreq'] = 3.1415927 + raw_eeg.crop(.5) + + pytest.raises(RuntimeError, raw_meg.add_channels, [raw_nopre]) + pytest.raises(RuntimeError, raw_meg.add_channels, [raw_badsf]) + pytest.raises(AssertionError, raw_meg.add_channels, [raw_eeg]) + pytest.raises(ValueError, raw_meg.add_channels, [raw_meg]) + pytest.raises(TypeError, raw_meg.add_channels, raw_badsf) + + +@testing.requires_testing_data +def test_save(tmp_path): + """Test saving raw.""" + temp_fname = tmp_path / 'test_raw.fif' + shutil.copyfile(fif_fname, temp_fname) + raw = read_raw_fif(temp_fname, preload=False) + # can't write over file being read + with pytest.raises(ValueError, match='to the same file'): + raw.save(temp_fname) + raw.load_data() + # can't overwrite file without overwrite=True + with pytest.raises(IOError, match='file exists'): + raw.save(fif_fname) + + # test abspath support and annotations + orig_time = _dt_to_stamp(raw.info['meas_date'])[0] + raw._first_time + annot = Annotations([10], [5], ['test'], orig_time=orig_time) + raw.set_annotations(annot) + annot = raw.annotations + new_fname = tmp_path / 'break_raw.fif' + raw.save(new_fname, overwrite=True) + new_raw = read_raw_fif(new_fname, preload=False) + pytest.raises(ValueError, new_raw.save, new_fname) + assert_array_almost_equal(annot.onset, new_raw.annotations.onset) + assert_array_equal(annot.duration, new_raw.annotations.duration) + assert_array_equal(annot.description, new_raw.annotations.description) + assert annot.orig_time == new_raw.annotations.orig_time + + # test set_meas_date(None) + raw.set_meas_date(None) + raw.save(new_fname, overwrite=True) + new_raw = read_raw_fif(new_fname, preload=False) + assert new_raw.info['meas_date'] is None + + +@testing.requires_testing_data +def test_annotation_crop(tmp_path): + """Test annotation sync after cropping and concatenating.""" + annot = Annotations([5., 11., 15.], [2., 1., 3.], ['test', 'test', 'test']) + raw = read_raw_fif(fif_fname, preload=False) + raw.set_annotations(annot) + r1 = raw.copy().crop(2.5, 7.5) + r2 = raw.copy().crop(12.5, 17.5) + r3 = raw.copy().crop(10., 12.) + raw = concatenate_raws([r1, r2, r3]) # segments reordered + assert_and_remove_boundary_annot(raw, 2) + onsets = raw.annotations.onset + durations = raw.annotations.duration + # 2*5s clips combined with annotations at 2.5s + 2s clip, annotation at 1s + assert_array_almost_equal(onsets[:3], [47.95, 52.95, 56.46], decimal=2) + assert_array_almost_equal([2., 2.5, 1.], durations[:3], decimal=2) + + # test annotation clipping + orig_time = _dt_to_stamp(raw.info['meas_date']) + orig_time = orig_time[0] + orig_time[1] * 1e-6 + raw._first_time - 1. + annot = Annotations([0., raw.times[-1]], [2., 2.], 'test', orig_time) + with pytest.warns(RuntimeWarning, match='Limited .* expanding outside'): + raw.set_annotations(annot) + assert_allclose(raw.annotations.duration, + [1., 1. + 1. / raw.info['sfreq']], atol=1e-3) + + # make sure we can overwrite the file we loaded when preload=True + new_fname = tmp_path / 'break_raw.fif' + raw.save(new_fname) + new_raw = read_raw_fif(new_fname, preload=True) + new_raw.save(new_fname, overwrite=True) + + +@testing.requires_testing_data +def test_with_statement(): + """Test with statement.""" + for preload in [True, False]: + with read_raw_fif(fif_fname, preload=preload) as raw_: + print(raw_) + + +def test_compensation_raw(tmp_path): + """Test Raw compensation.""" + raw_3 = read_raw_fif(ctf_comp_fname) + assert raw_3.compensation_grade == 3 + data_3, times = raw_3[:, :] + + # data come with grade 3 + for ii in range(2): + raw_3_new = raw_3.copy() + if ii == 0: + raw_3_new.load_data() + raw_3_new.apply_gradient_compensation(3) + assert raw_3_new.compensation_grade == 3 + data_new, times_new = raw_3_new[:, :] + assert_array_equal(times, times_new) + assert_array_equal(data_3, data_new) + + # change to grade 0 + raw_0 = raw_3.copy().apply_gradient_compensation(0) + assert raw_0.compensation_grade == 0 + data_0, times_new = raw_0[:, :] + assert_array_equal(times, times_new) + assert (np.mean(np.abs(data_0 - data_3)) > 1e-12) + # change to grade 1 + raw_1 = raw_0.copy().apply_gradient_compensation(1) + assert raw_1.compensation_grade == 1 + data_1, times_new = raw_1[:, :] + assert_array_equal(times, times_new) + assert (np.mean(np.abs(data_1 - data_3)) > 1e-12) + pytest.raises(ValueError, raw_1.apply_gradient_compensation, 33) + raw_bad = raw_0.copy() + raw_bad.add_proj(compute_proj_raw(raw_0, duration=0.5, verbose='error')) + raw_bad.apply_proj() + pytest.raises(RuntimeError, raw_bad.apply_gradient_compensation, 1) + # with preload + tols = dict(rtol=1e-12, atol=1e-25) + raw_1_new = raw_3.copy().load_data().apply_gradient_compensation(1) + assert raw_1_new.compensation_grade == 1 + data_1_new, times_new = raw_1_new[:, :] + assert_array_equal(times, times_new) + assert (np.mean(np.abs(data_1_new - data_3)) > 1e-12) + assert_allclose(data_1, data_1_new, **tols) + # change back + raw_3_new = raw_1.copy().apply_gradient_compensation(3) + data_3_new, times_new = raw_3_new[:, :] + assert_allclose(data_3, data_3_new, **tols) + raw_3_new = raw_1.copy().load_data().apply_gradient_compensation(3) + data_3_new, times_new = raw_3_new[:, :] + assert_allclose(data_3, data_3_new, **tols) + + for load in (False, True): + for raw in (raw_0, raw_1): + raw_3_new = raw.copy() + if load: + raw_3_new.load_data() + raw_3_new.apply_gradient_compensation(3) + assert raw_3_new.compensation_grade == 3 + data_3_new, times_new = raw_3_new[:, :] + assert_array_equal(times, times_new) + assert (np.mean(np.abs(data_3_new - data_1)) > 1e-12) + assert_allclose(data_3, data_3_new, **tols) + + # Try IO with compensation + temp_file = tmp_path / 'raw.fif' + raw_3.save(temp_file, overwrite=True) + for preload in (True, False): + raw_read = read_raw_fif(temp_file, preload=preload) + assert raw_read.compensation_grade == 3 + data_read, times_new = raw_read[:, :] + assert_array_equal(times, times_new) + assert_allclose(data_3, data_read, **tols) + raw_read.apply_gradient_compensation(1) + data_read, times_new = raw_read[:, :] + assert_array_equal(times, times_new) + assert_allclose(data_1, data_read, **tols) + + # Now save the file that has modified compensation + # and make sure the compensation is the same as it was, + # but that we can undo it + + # These channels have norm 1e-11/1e-12, so atol=1e-18 isn't awesome, + # but it's due to the single precision of the info['comps'] leading + # to inexact inversions with saving/loading (casting back to single) + # in between (e.g., 1->3->1 will degrade like this) + looser_tols = dict(rtol=1e-6, atol=1e-18) + raw_1.save(temp_file, overwrite=True) + for preload in (True, False): + raw_read = read_raw_fif(temp_file, preload=preload, verbose=True) + assert raw_read.compensation_grade == 1 + data_read, times_new = raw_read[:, :] + assert_array_equal(times, times_new) + assert_allclose(data_1, data_read, **looser_tols) + raw_read.apply_gradient_compensation(3, verbose=True) + data_read, times_new = raw_read[:, :] + assert_array_equal(times, times_new) + assert_allclose(data_3, data_read, **looser_tols) + + +@requires_mne +def test_compensation_raw_mne(tmp_path): + """Test Raw compensation by comparing with MNE-C.""" + def compensate_mne(fname, grad): + tmp_fname = tmp_path / 'mne_ctf_test_raw.fif' + cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname, + '--grad', str(grad), '--projoff', '--filteroff'] + run_subprocess(cmd) + return read_raw_fif(tmp_fname, preload=True) + + for grad in [0, 2, 3]: + raw_py = read_raw_fif(ctf_comp_fname, preload=True) + raw_py.apply_gradient_compensation(grad) + raw_c = compensate_mne(ctf_comp_fname, grad) + assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17) + assert raw_py.info['nchan'] == raw_c.info['nchan'] + for ch_py, ch_c in zip(raw_py.info['chs'], raw_c.info['chs']): + for key in ('ch_name', 'coil_type', 'scanno', 'logno', 'unit', + 'coord_frame', 'kind'): + assert ch_py[key] == ch_c[key] + for key in ('loc', 'unit_mul', 'range', 'cal'): + assert_allclose(ch_py[key], ch_c[key]) + + +@testing.requires_testing_data +def test_drop_channels_mixin(): + """Test channels-dropping functionality.""" + raw = read_raw_fif(fif_fname, preload=True) + drop_ch = raw.ch_names[:3] + ch_names = raw.ch_names[3:] + + ch_names_orig = raw.ch_names + dummy = raw.copy().drop_channels(drop_ch) + assert ch_names == dummy.ch_names + assert ch_names_orig == raw.ch_names + assert len(ch_names_orig) == raw._data.shape[0] + + raw.drop_channels(drop_ch) + assert ch_names == raw.ch_names + assert len(ch_names) == len(raw._cals) + assert len(ch_names) == raw._data.shape[0] + + # Test that dropping all channels a projector applies to will lead to the + # removal of said projector. + raw = read_raw_fif(fif_fname) + n_projs = len(raw.info['projs']) + eeg_names = raw.info['projs'][-1]['data']['col_names'] + with pytest.raises(RuntimeError, match='loaded'): + raw.copy().apply_proj().drop_channels(eeg_names) + raw.load_data().drop_channels(eeg_names) # EEG proj + assert len(raw.info['projs']) == n_projs - 1 + + +@testing.requires_testing_data +@pytest.mark.parametrize('preload', (True, False)) +def test_pick_channels_mixin(preload): + """Test channel-picking functionality.""" + raw = read_raw_fif(fif_fname, preload=preload) + raw_orig = raw.copy() + ch_names = raw.ch_names[:3] + + ch_names_orig = raw.ch_names + dummy = raw.copy().pick_channels(ch_names) + assert ch_names == dummy.ch_names + assert ch_names_orig == raw.ch_names + assert len(ch_names_orig) == raw.get_data().shape[0] + + raw.pick_channels(ch_names) # copy is False + assert ch_names == raw.ch_names + assert len(ch_names) == len(raw._cals) + assert len(ch_names) == raw.get_data().shape[0] + with pytest.raises(ValueError, match='must be'): + raw.pick_channels(ch_names[0]) + + assert_allclose(raw[:][0], raw_orig[:3][0]) + + +@testing.requires_testing_data +def test_equalize_channels(): + """Test equalization of channels.""" + raw1 = read_raw_fif(fif_fname, preload=True) + + raw2 = raw1.copy() + ch_names = raw1.ch_names[2:] + raw1.drop_channels(raw1.ch_names[:1]) + raw2.drop_channels(raw2.ch_names[1:2]) + my_comparison = [raw1, raw2] + my_comparison = equalize_channels(my_comparison) + for e in my_comparison: + assert ch_names == e.ch_names + + +def test_memmap(tmp_path): + """Test some interesting memmapping cases.""" + # concatenate_raw + memmaps = [str(tmp_path / str(ii)) for ii in range(3)] + raw_0 = read_raw_fif(test_fif_fname, preload=memmaps[0]) + assert raw_0._data.filename == memmaps[0] + raw_1 = read_raw_fif(test_fif_fname, preload=memmaps[1]) + assert raw_1._data.filename == memmaps[1] + raw_0.append(raw_1, preload=memmaps[2]) + assert raw_0._data.filename == memmaps[2] + # add_channels + orig_data = raw_0[:][0] + new_ch_info = pick_info(raw_0.info, [0]) + new_ch_info['chs'][0]['ch_name'] = 'foo' + new_ch_info._update_redundant() + new_data = np.linspace(0, 1, len(raw_0.times))[np.newaxis] + ch = RawArray(new_data, new_ch_info) + raw_0.add_channels([ch]) + if sys.platform == 'darwin': + assert not hasattr(raw_0._data, 'filename') + else: + assert raw_0._data.filename == memmaps[2] + assert_allclose(orig_data, raw_0[:-1][0], atol=1e-7) + assert_allclose(new_data, raw_0[-1][0], atol=1e-7) + + # now let's see if .copy() actually works; it does, but eventually + # we should make it optionally memmap to a new filename rather than + # create an in-memory version (filename=None) + raw_0 = read_raw_fif(test_fif_fname, preload=memmaps[0]) + assert raw_0._data.filename == memmaps[0] + assert raw_0._data[:1, 3:5].all() + raw_1 = raw_0.copy() + assert isinstance(raw_1._data, np.memmap) + assert raw_1._data.filename is None + raw_0._data[:] = 0. + assert not raw_0._data.any() + assert raw_1._data[:1, 3:5].all() + # other things like drop_channels and crop work but do not use memmapping, + # eventually we might want to add support for some of these as users + # require them. + + +# These are slow on Azure Windows so let's do a subset +@pytest.mark.parametrize('kind', [ + 'file', + pytest.param('bytes', marks=pytest.mark.slowtest), +]) +@pytest.mark.parametrize('preload', [ + True, + pytest.param(str, marks=pytest.mark.slowtest), +]) +@pytest.mark.parametrize('split', [ + False, + pytest.param(True, marks=pytest.mark.slowtest), +]) +def test_file_like(kind, preload, split, tmp_path): + """Test handling with file-like objects.""" + if split: + fname = tmp_path / 'test_raw.fif' + read_raw_fif(test_fif_fname).save(fname, split_size='5MB') + assert op.isfile(fname) + assert op.isfile(str(fname)[:-4] + '-1.fif') + else: + fname = test_fif_fname + if preload is str: + preload = str(tmp_path / 'memmap') + with open(str(fname), 'rb') as file_fid: + fid = BytesIO(file_fid.read()) if kind == 'bytes' else file_fid + assert not fid.closed + assert not file_fid.closed + with pytest.raises(ValueError, match='preload must be used with file'): + read_raw_fif(fid) + assert not fid.closed + assert not file_fid.closed + # Use test_preloading=False but explicitly pass the preload type + # so that we don't bother testing preload=False + kwargs = dict(fname=fid, preload=preload, on_split_missing='ignore', + test_preloading=False, test_kwargs=False) + _test_raw_reader(read_raw_fif, **kwargs) + assert not fid.closed + assert not file_fid.closed + assert file_fid.closed + + +def test_str_like(): + """Test handling with str-like objects.""" + fname = pathlib.Path(test_fif_fname) + raw_path = read_raw_fif(fname, preload=True) + raw_str = read_raw_fif(test_fif_fname, preload=True) + assert_allclose(raw_path._data, raw_str._data) + + +@pytest.mark.parametrize('fname', [ + test_fif_fname, + testing._pytest_param(fif_fname), + testing._pytest_param(ms_fname), +]) +def test_bad_acq(fname): + """Test handling of acquisition errors.""" + # see gh-7844 + raw = read_raw_fif(fname, allow_maxshield='yes').load_data() + with open(fname, 'rb') as fid: + for ent in raw._raw_extras[0]['ent']: + fid.seek(ent.pos, 0) + tag = _read_tag_header(fid) + # hack these, others (kind, type) should be correct + tag.pos, tag.next = ent.pos, ent.next + assert tag == ent + + +@testing.requires_testing_data +@pytest.mark.skipif(sys.platform not in ('darwin', 'linux'), + reason='Needs proper symlinking') +def test_split_symlink(tmp_path): + """Test split files with symlinks.""" + # regression test for gh-9221 + (tmp_path / 'first').mkdir() + first = tmp_path / 'first' / 'test_raw.fif' + raw = read_raw_fif(fif_fname).pick('meg').load_data() + raw.save(first, buffer_size_sec=1, split_size='10MB', verbose=True) + second = str(first)[:-4] + '-1.fif' + assert op.isfile(second) + assert not op.isfile(str(first)[:-4] + '-2.fif') + (tmp_path / 'a').mkdir() + (tmp_path / 'b').mkdir() + new_first = tmp_path / 'a' / 'test_raw.fif' + new_second = tmp_path / 'b' / 'test_raw-1.fif' + shutil.move(first, new_first) + shutil.move(second, new_second) + os.symlink(new_first, first) + os.symlink(new_second, second) + raw_new = read_raw_fif(first) + assert_allclose(raw_new.get_data(), raw.get_data()) + + +@testing.requires_testing_data +def test_corrupted(tmp_path): + """Test that a corrupted file can still be read.""" + # Must be a file written by Neuromag, not us, since we don't write the dir + # at the end, so use the skip one (straight from acq). + raw = read_raw_fif(skip_fname) + with open(skip_fname, 'rb') as fid: + tag = read_tag_info(fid) + tag = read_tag(fid) + dirpos = int(tag.data) + assert dirpos == 12641532 + fid.seek(0) + data = fid.read(dirpos) + bad_fname = tmp_path / 'test_raw.fif' + with open(bad_fname, 'wb') as fid: + fid.write(data) + with pytest.warns(RuntimeWarning, match='.*tag directory.*corrupt.*'): + raw_bad = read_raw_fif(bad_fname) + assert_allclose(raw.get_data(), raw_bad.get_data()) + + +@testing.requires_testing_data +def test_expand_user(tmp_path, monkeypatch): + """Test that we're expanding `~` before reading and writing.""" + monkeypatch.setenv('HOME', str(tmp_path)) + monkeypatch.setenv('USERPROFILE', str(tmp_path)) # Windows + + path_in = Path(fif_fname) + path_out = tmp_path / path_in.name + path_home = Path('~') / path_in.name + + shutil.copyfile( + src=path_in, + dst=path_out + ) + + raw = read_raw_fif(fname=path_home, preload=True) + raw.save(fname=path_home, overwrite=True) diff --git a/python/libs/mne/io/hitachi/__init__.py b/python/libs/mne/io/hitachi/__init__.py new file mode 100644 index 0000000..cdd39ba --- /dev/null +++ b/python/libs/mne/io/hitachi/__init__.py @@ -0,0 +1,7 @@ +"""fNIRS module for conversion to FIF.""" + +# Author: Eric Larson +# +# License: BSD-3-Clause + +from .hitachi import read_raw_hitachi diff --git a/python/libs/mne/io/hitachi/hitachi.py b/python/libs/mne/io/hitachi/hitachi.py new file mode 100644 index 0000000..5e1bab6 --- /dev/null +++ b/python/libs/mne/io/hitachi/hitachi.py @@ -0,0 +1,286 @@ +# Authors: Eric Larson +# +# License: BSD-3-Clause + +import datetime as dt +import re + +import numpy as np + +from ..base import BaseRaw +from ..constants import FIFF +from ..meas_info import create_info +from ..nirx.nirx import _read_csv_rows_cols +from ..utils import _mult_cal_one +from ...utils import (logger, verbose, fill_doc, warn, _check_fname, + _check_option) + + +@fill_doc +def read_raw_hitachi(fname, preload=False, verbose=None): + """Reader for a Hitachi fNIRS recording. + + Parameters + ---------- + fname : str + Path to the Hitachi CSV file. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawHitachi + A Raw object containing Hitachi data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + %(hitachi_notes)s + """ + return RawHitachi(fname, preload, verbose=verbose) + + +def _check_bad(cond, msg): + if cond: + raise RuntimeError(f'Could not parse file: {msg}') + + +@fill_doc +class RawHitachi(BaseRaw): + """Raw object from a Hitachi fNIRS file. + + Parameters + ---------- + fname : str + Path to the Hitachi CSV file. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + %(hitachi_notes)s + """ + + @verbose + def __init__(self, fname, preload=False, *, verbose=None): + fname = _check_fname(fname, 'read', True, 'fname') + logger.info('Loading %s' % fname) + + raw_extra = dict(fname=fname) + info_extra = dict() + subject_info = dict() + ch_wavelengths = dict() + fnirs_wavelengths = [None, None] + meas_date = age = ch_names = sfreq = None + with open(fname, 'rb') as fid: + lines = fid.read() + lines = lines.decode('latin-1').rstrip('\r\n') + oldlen = len(lines) + assert len(lines) == oldlen + bounds = [0] + end = '\n' if '\n' in lines else '\r' + bounds.extend(a.end() for a in re.finditer(end, lines)) + bounds.append(len(lines)) + lines = lines.split(end) + assert len(bounds) == len(lines) + 1 + line = lines[0].rstrip(',\r\n') + _check_bad(line != 'Header', 'no header found') + li = 0 + mode = None + for li, line in enumerate(lines[1:], 1): + # Newer format has some blank lines + if len(line) == 0: + continue + parts = line.rstrip(',\r\n').split(',') + if len(parts) == 0: # some header lines are blank + continue + kind, parts = parts[0], parts[1:] + if len(parts) == 0: + parts = [''] # some fields (e.g., Comment) meaningfully blank + if kind == 'File Version': + logger.info(f'Reading Hitachi fNIRS file version {parts[0]}') + elif kind == 'AnalyzeMode': + _check_bad( + parts != ['Continuous'], f'not continuous data ({parts})') + elif kind == 'Sampling Period[s]': + sfreq = 1 / float(parts[0]) + elif kind == 'Exception': + raise NotImplementedError(kind) + elif kind == 'Comment': + info_extra['description'] = parts[0] + elif kind == 'ID': + subject_info['his_id'] = parts[0] + elif kind == 'Name': + if len(parts): + name = parts[0].split(' ') + if len(name): + subject_info['first_name'] = name[0] + subject_info['last_name'] = ' '.join(name[1:]) + elif kind == 'Age': + age = int(parts[0].rstrip('y')) + elif kind == 'Mode': + mode = parts[0] + elif kind in ('HPF[Hz]', 'LPF[Hz]'): + try: + freq = float(parts[0]) + except ValueError: + pass + else: + info_extra[{'HPF[Hz]': 'highpass', + 'LPF[Hz]': 'lowpass'}[kind]] = freq + elif kind == 'Date': + # 5/17/04 5:14 + try: + mdy, HM = parts[0].split(' ') + H, M = HM.split(':') + if len(H) == 1: + H = f'0{H}' + mdyHM = ' '.join([mdy, ':'.join([H, M])]) + for fmt in ('%m/%d/%y %H:%M', '%Y/%m/%d %H:%M'): + try: + meas_date = dt.datetime.strptime(mdyHM, fmt) + except Exception: + pass + else: + break + else: + raise RuntimeError # unknown format + except Exception: + warn('Extraction of measurement date failed. ' + 'Please report this as a github issue. ' + 'The date is being set to January 1st, 2000, ' + f'instead of {repr(parts[0])}') + elif kind == 'Sex': + try: + subject_info['sex'] = dict( + female=FIFF.FIFFV_SUBJ_SEX_FEMALE, + male=FIFF.FIFFV_SUBJ_SEX_MALE)[parts[0].lower()] + except KeyError: + pass + elif kind == 'Wave[nm]': + fnirs_wavelengths[:] = [int(part) for part in parts] + elif kind == 'Wave Length': + ch_regex = re.compile(r'^(.*)\(([0-9\.]+)\)$') + for ent in parts: + _, v = ch_regex.match(ent).groups() + ch_wavelengths[ent] = float(v) + elif kind == 'Data': + break + fnirs_wavelengths = np.array(fnirs_wavelengths, int) + assert len(fnirs_wavelengths) == 2 + ch_names = lines[li + 1].rstrip(',\r\n').split(',') + # cull to correct ones + raw_extra['keep_mask'] = ~np.in1d(ch_names, ['Probe1', 'Time']) + # set types + ch_names = [ch_name for ci, ch_name in enumerate(ch_names) + if raw_extra['keep_mask'][ci]] + ch_types = ['fnirs_cw_amplitude' if ch_name.startswith('CH') + else 'stim' + for ch_name in ch_names] + # get locations + nirs_names = [ch_name for ch_name, ch_type in zip(ch_names, ch_types) + if ch_type == 'fnirs_cw_amplitude'] + n_nirs = len(nirs_names) + assert n_nirs % 2 == 0 + names = { + '3x3': 'ETG-100', + '3x5': 'ETG-7000', + '4x4': 'ETG-7000', + '3x11': 'ETG-4000', + } + _check_option('Hitachi mode', mode, sorted(names)) + n_row, n_col = [int(x) for x in mode.split('x')] + logger.info(f'Constructing pairing matrix for {names[mode]} ({mode})') + pairs = _compute_pairs(n_row, n_col, n=1 + (mode == '3x3')) + assert n_nirs == len(pairs) * 2 + locs = np.zeros((len(ch_names), 12)) + idxs = np.where(np.array(ch_types, 'U') == 'fnirs_cw_amplitude')[0] + for ii, idx in enumerate(idxs): + ch_name = ch_names[idx] + # Use the actual/accurate wavelength in loc + acc_freq = ch_wavelengths[ch_name] + locs[idx][9] = acc_freq + # Rename channel based on standard naming scheme, using the + # nominal wavelength + sidx, didx = pairs[ii // 2] + nom_freq = fnirs_wavelengths[np.argmin(np.abs( + acc_freq - fnirs_wavelengths))] + ch_names[idx] = f'S{sidx + 1}_D{didx + 1} {nom_freq}' + + # figure out bounds + bounds = raw_extra['bounds'] = bounds[li + 2:] + last_samp = len(bounds) - 2 + + if age is not None and meas_date is not None: + subject_info['birthday'] = (meas_date.year - age, + meas_date.month, + meas_date.day) + if meas_date is None: + meas_date = dt.datetime(2000, 1, 1, 0, 0, 0) + meas_date = meas_date.replace(tzinfo=dt.timezone.utc) + if subject_info: + info_extra['subject_info'] = subject_info + + # Create mne structure + info = create_info(ch_names, sfreq, ch_types=ch_types) + with info._unlock(): + info.update(info_extra) + info['meas_date'] = meas_date + for li, loc in enumerate(locs): + info['chs'][li]['loc'][:] = loc + + super().__init__( + info, preload, filenames=[fname], last_samps=[last_samp], + raw_extras=[raw_extra], verbose=verbose) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file.""" + this_data = _read_csv_rows_cols( + self._raw_extras[fi]['fname'], + start, stop, self._raw_extras[fi]['keep_mask'], + self._raw_extras[fi]['bounds'], sep=',', + replace=lambda x: + x.replace('\r', '\n') + .replace('\n\n', '\n') + .replace('\n', ',') + .replace(':', '')).T + _mult_cal_one(data, this_data, idx, cals, mult) + return data + + +def _compute_pairs(n_rows, n_cols, n=1): + n_tot = n_rows * n_cols + sd_idx = (np.arange(n_tot) // 2).reshape(n_rows, n_cols) + d_bool = np.empty((n_rows, n_cols), bool) + for ri in range(n_rows): + d_bool[ri] = np.arange(ri, ri + n_cols) % 2 + pairs = list() + for ri in range(n_rows): + # First iterate over connections within the row + for ci in range(n_cols - 1): + pair = (sd_idx[ri, ci], sd_idx[ri, ci + 1]) + if d_bool[ri, ci]: # reverse + pair = pair[::-1] + pairs.append(pair) + # Next iterate over row-row connections, if applicable + if ri >= n_rows - 1: + continue + for ci in range(n_cols): + pair = (sd_idx[ri, ci], sd_idx[ri + 1, ci]) + if d_bool[ri, ci]: + pair = pair[::-1] + pairs.append(pair) + if n > 1: + assert n == 2 # only one supported for now + pairs = np.array(pairs, int) + second = pairs + pairs.max(axis=0) + 1 + pairs = np.r_[pairs, second] + pairs = tuple(tuple(row) for row in pairs) + return tuple(pairs) diff --git a/python/libs/mne/io/hitachi/tests/test_hitachi.py b/python/libs/mne/io/hitachi/tests/test_hitachi.py new file mode 100644 index 0000000..7a91bfb --- /dev/null +++ b/python/libs/mne/io/hitachi/tests/test_hitachi.py @@ -0,0 +1,317 @@ +# -*- coding: utf-8 -*- +# Authors: Eric Larson +# +# License: BSD-3-Clause + +import datetime as dt + +import pytest +import numpy as np +from numpy.testing import assert_allclose, assert_array_less + +from mne.channels import make_standard_montage +from mne.io import read_raw_hitachi +from mne.io.hitachi.hitachi import _compute_pairs +from mne.io.tests.test_raw import _test_raw_reader +from mne.preprocessing.nirs import (source_detector_distances, + optical_density, tddr, beer_lambert_law, + scalp_coupling_index) + + +CONTENTS = dict() +CONTENTS['1.18'] = b"""\ +Header,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +File Version,1.18,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Patient Information,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +ID,TestID,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Name,Test,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Comment,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Age, 45y,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Sex,Female,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Analyze Information,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +AnalyzeMode,Continuous,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Pre Time[s],9,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Post Time[s],7,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Recovery Time[s],12,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Base Time[s],10,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Fitting Degree,1,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +HPF[Hz],No Filter,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +LPF[Hz],0.1,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Moving Average[s],0.1,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Measure Information,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Date,5/17/04 5:14,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Mode,3x5,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Wave[nm],695,830,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Wave Length,CH1(700.1),CH1(829.1),CH2(699.3),CH2(827.9),CH3(699.3),CH3(827.9),CH4(698.6),CH4(828.1),CH5(700.1),CH5(829.1),CH6(698.5),CH6(828.2),CH7(699.3),CH7(827.9),CH8(699.8),CH8(828.5),CH9(698.6),CH9(828.1),CH10(698.5),CH10(828.2),CH11(698.5),CH11(828.2),CH12(699.8),CH12(828.5),CH13(699.8),CH13(828.5),CH14(699.0),CH14(828.2),CH15(698.5),CH15(828.2),CH16(699.5),CH16(828.1),CH17(699.8),CH17(828.5),CH18(699.5),CH18(828.5),CH19(699.0),CH19(828.2),CH20(699.5),CH20(828.1),CH21(699.5),CH21(828.1),CH22(699.5),CH22(828.5),,,,, +Analog Gain,30.117647,30.117647,30.117647,30.117647,94.117647,94.117647,94.117647,94.117647,10.27451,10.27451,30.117647,30.117647,59.607843,59.607843,94.117647,94.117647,110.588235,110.588235,10.27451,10.27451,59.607843,59.607843,59.607843,59.607843,110.588235,110.588235,10.27451,10.27451,41.176471,41.176471,59.607843,59.607843,9.333333,9.333333,110.588235,110.588235,41.176471,41.176471,41.176471,41.176471,9.333333,9.333333,9.333333,9.333333,,,,, +Digital Gain,13.38,2.82,44.57,7.52,29.46,4.12,36.56,4.84,6.67,1.37,22.61,3.28,79.19,10.88,16.37,2.78,39.97,4.09,100,38.02,36.58,3.95,61.77,7.67,100,15.23,31.52,7.15,53.46,5.05,13.61,2.07,63.74,11.16,14.35,2.05,58.64,8.93,14.53,1.99,8.16,1.82,27.84,5.97,,,,, +Sampling Period[s],0.1,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +StimType,BLOCK,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Stim Time[s],20,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +F1,15,F2,15,F3,15,F4,15,F5,15,F6,15,F7,15,F8,15,F9,15,M,15,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Repeat Count,3,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Exception Ch,0,0,0,0,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,,,,,,,,,,,,,,,,,,,,,,,,,,, +,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Data,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Probe1,CH1(700.1),CH1(829.1),CH2(699.3),CH2(827.9),CH3(699.3),CH3(827.9),CH4(698.6),CH4(828.1),CH5(700.1),CH5(829.1),CH6(698.5),CH6(828.2),CH7(699.3),CH7(827.9),CH8(699.8),CH8(828.5),CH9(698.6),CH9(828.1),CH10(698.5),CH10(828.2),CH11(698.5),CH11(828.2),CH12(699.8),CH12(828.5),CH13(699.8),CH13(828.5),CH14(699.0),CH14(828.2),CH15(698.5),CH15(828.2),CH16(699.5),CH16(828.1),CH17(699.8),CH17(828.5),CH18(699.5),CH18(828.5),CH19(699.0),CH19(828.2),CH20(699.5),CH20(828.1),CH21(699.5),CH21(828.1),CH22(699.5),CH22(828.5),Mark,Time,BodyMovement,RemovalMark,PreScan +1,1.99371338,1.95037842,2.24243164,2.17483521,2.2052002,2.16384888,2.18017578,2.10418701,1.75735474,1.63879395,1.99432373,1.88644409,2.1307373,2.05368042,2.08358765,1.96212769,1.89682007,1.97311401,0.6993103,1.8006897,2.01049805,1.86462402,1.95968628,1.87988281,1.59408569,1.97891235,1.9392395,1.73034668,2.18307495,2.00424194,1.90841675,1.87286377,1.89331055,1.79077148,1.82418823,1.82601929,2.44125366,2.05291748,2.09381104,2.03796387,1.92672729,1.90353394,2.10266113,2.0401001,0,14:08.2,0,0,1 +2,1.97433472,1.94091797,2.31689453,2.16079712,2.22290039,2.15209961,2.12936401,2.08328247,1.75445557,1.63269043,2.01126099,1.88186646,2.11425781,2.04345703,2.07015991,1.95236206,1.90826416,1.95266724,0.71853638,1.78970337,2.03872681,1.8572998,1.95602417,1.86798096,1.59118652,1.94122314,1.93054199,1.72439575,2.21435547,2.00531006,1.90200806,1.86508179,1.87957764,1.78085327,1.81564331,1.81365967,2.33200073,2.02987671,2.0980835,2.02682495,1.92520142,1.8939209,2.10327148,2.01843262,0,14:08.4,0,0,1 +3,1.97616577,1.93359375,2.2819519,2.15866089,2.19512939,2.15301514,2.10266113,2.07931519,1.74468994,1.62582397,2.04330444,1.87774658,2.06222534,2.0425415,2.06008911,1.95098877,1.94946289,1.94793701,0.69549561,1.76803589,1.9909668,1.85028076,1.93374634,1.86599731,1.60140991,1.92367554,1.92260742,1.72225952,2.16796875,1.99523926,1.89208984,1.86080933,1.88095093,1.78207397,1.80541992,1.81091309,2.32818604,2.01675415,2.10418701,2.01965332,1.92001343,1.88995361,2.08648682,2.01065063,0,14:08.4,0,0,1 +4,1.97799683,1.93695068,2.25875854,2.16461182,2.23007202,2.15774536,2.12554932,2.08068848,1.73980713,1.62155151,2.02072144,1.87957764,2.09320068,2.0401001,2.06558228,1.95327759,1.90414429,1.953125,0.66207886,1.77108765,2.01797485,1.85043335,1.98562622,1.86843872,1.48071289,1.93771362,1.92520142,1.71554565,2.18002319,1.98654175,1.89208984,1.85958862,1.88766479,1.78253174,1.81396484,1.80923462,2.34085083,2.01660156,2.09869385,2.0211792,1.92108154,1.89041138,2.07946777,2.01278687,0,14:08.6,0,0,1 +5,1.97219849,1.93847656,2.315979,2.16430664,2.20230103,2.15713501,2.07458496,2.07824707,1.73614502,1.62399292,2.00408936,1.87652588,2.1005249,2.03887939,2.07061768,1.95220947,1.89361572,1.95129395,0.72784424,1.79977417,2.01309204,1.85089111,1.88674927,1.86035156,1.40136719,1.97387695,1.91711426,1.71966553,2.16247559,1.99234009,1.8737793,1.86035156,1.8989563,1.78543091,1.83700562,1.80877686,2.39151001,2.02896118,2.08709717,2.02224731,1.92138672,1.89025879,2.09014893,2.01431274,0,14:08.6,0,0,1 +6,1.96807861,1.93969727,2.27905273,2.15988159,2.22793579,2.15789795,2.10266113,2.08618164,1.73950195,1.62445068,2.01156616,1.87728882,2.08847046,2.03811646,2.06420898,1.95617676,1.91879272,1.95541382,0.7019043,1.77963257,1.98974609,1.85195923,1.93954468,1.86630249,1.57897949,2.00317383,1.91040039,1.72515869,2.24868774,1.99157715,1.89849854,1.86218262,1.90658569,1.78634644,1.82495117,1.81427002,2.45651245,2.03353882,2.10083008,2.02758789,1.92184448,1.89331055,2.09686279,2.01980591,0,14:08.8,0,0,1 +7,1.98776245,1.94198608,2.2895813,2.15759277,2.21405029,2.16812134,2.15332031,2.09991455,1.74499512,1.62765503,2.01919556,1.87911987,2.2026062,2.04589844,2.05276489,1.96105957,1.93862915,1.96105957,0.67474365,1.78604126,1.99249268,1.84967041,1.89849854,1.86676025,1.39373779,1.95846558,1.90979004,1.72698975,2.27798462,1.99798584,1.91055298,1.86172485,1.91375732,1.78375244,1.78024292,1.8157959,2.30819702,2.05245972,2.1043396,2.03079224,1.925354,1.89697266,2.11288452,2.02545166,0,14:08.9,0,0,1 +8,1.98364258,1.94488525,2.28744507,2.16598511,2.20123291,2.17254639,2.13745117,2.10357666,1.75003052,1.63162231,2.00790405,1.87637329,2.20916748,2.05444336,2.05459595,1.95983887,1.90536499,1.96777344,0.72494507,1.78741455,1.98318481,1.85516357,1.89605713,1.86798096,1.4453125,1.95663452,1.91436768,1.73355103,2.22427368,2.00180054,1.89605713,1.86599731,1.86950684,1.79077148,1.79962158,1.82327271,2.31536865,2.06726074,2.09564209,2.03521729,1.92459106,1.90078735,2.10281372,2.03353882,0,14:08.9,0,0,1 +9,1.97967529,1.95297241,2.34939575,2.18048096,2.22961426,2.1711731,2.10784912,2.10617065,1.75308228,1.6368103,2.00836182,1.87728882,2.07595825,2.04437256,2.04742432,1.96411133,1.92672729,1.97036743,0.65322876,1.78192139,2.00515747,1.85562134,1.99584961,1.86828613,1.41708374,1.91955566,1.92321777,1.73690796,2.25341797,1.99813843,1.89788818,1.86904907,1.8699646,1.79336548,1.80130005,1.82952881,2.44827271,2.08602905,2.10388184,2.03964233,1.9291687,1.90505981,2.0980835,2.0401001,0,14:09.1,0,0,1 +10,1.96777344,1.95663452,2.3046875,2.17712402,2.26348877,2.17590332,2.14691162,2.11624146,1.75506592,1.64108276,2.01538086,1.88278198,2.21374512,2.04696655,2.07824707,1.96838379,1.94854736,1.97387695,0.64300537,1.80709839,2.03277588,1.86340332,1.96380615,1.87942505,1.38870239,1.95419312,1.92611694,1.74087524,2.30636597,2.00012207,1.90124512,1.87103271,1.88766479,1.79397583,1.81411743,1.83273315,2.38845825,2.09106445,2.12188721,2.04544067,1.93237305,1.90994263,2.12310791,2.04696655,0,14:09.1,0,0,1 +11,1.98181152,1.96182251,2.2883606,2.1812439,2.25265503,2.18231201,2.12814331,2.11181641,1.75842285,1.64596558,2.0135498,1.88995361,2.15209961,2.05230713,2.07077026,1.97387695,1.86767578,1.98471069,0.69610596,1.80511475,1.9960022,1.87103271,1.90963745,1.8838501,1.41983032,1.98654175,1.91070557,1.74255371,2.22869873,2.00119019,1.8977356,1.87103271,1.87271118,1.80007935,1.83288574,1.83670044,2.45758057,2.09213257,2.08786011,2.04910278,1.93267822,1.90963745,2.10998535,2.05307007,0,14:09.2,0,0,1 +12,1.9909668,1.95526123,2.2756958,2.17941284,2.26486206,2.17056274,2.14797974,2.09945679,1.75415039,1.64199829,2.02407837,1.8913269,2.07015991,2.05383301,2.05596924,1.96411133,1.85272217,1.96365356,0.62423706,1.79443359,1.99417114,1.86538696,1.96487427,1.87637329,1.50161743,1.97616577,1.92626953,1.73583984,2.29568481,2.00210571,1.90765381,1.86904907,1.87347412,1.79473877,1.81488037,1.82327271,2.35809326,2.07244873,2.09411621,2.03353882,1.92932129,1.90338135,2.10510254,2.03475952,0,14:09.4,0,0,1 +13,1.98287964,1.94839478,2.24395752,2.18048096,2.22961426,2.16156006,2.13409424,2.08709717,1.74362183,1.62902832,2.0111084,1.89331055,2.10952759,2.05001831,2.06558228,1.95846558,1.92581177,1.95098877,0.67565918,1.77017212,1.96060181,1.85882568,1.95159912,1.8762207,1.68884277,1.97921753,1.92855835,1.73416138,2.23876953,2.00210571,1.90200806,1.86828613,1.88858032,1.78710938,1.83029175,1.81137085,2.3526001,2.04116821,2.10769653,2.02423096,1.92565918,1.90002441,2.09457397,2.01904297,0,14:09.5,0,0,1 +14,1.99539185,1.93984985,2.27966309,2.16644287,2.20947266,2.16186523,2.12966919,2.08984375,1.73339844,1.61865234,2.02865601,1.89239502,2.11593628,2.05337524,2.05307007,1.95632935,1.9682312,1.95175171,0.69198608,1.7729187,1.95541382,1.85348511,1.95953369,1.86935425,1.55059814,1.9203186,1.91192627,1.73065186,2.30255127,1.99172974,1.89743042,1.86325073,1.87255859,1.7855835,1.83547974,1.81137085,2.35107422,2.0413208,2.09243774,2.01477051,1.92810059,1.89575195,2.10174561,2.01538086,0,14:09.6,0,0,1 +15,1.97799683,1.9380188,2.25616455,2.15927124,2.23983765,2.16186523,2.15667725,2.0942688,1.72332764,1.61178589,2.04833984,1.88995361,2.12112427,2.04421997,2.0703125,1.9569397,1.98425293,1.9581604,0.6690979,1.75369263,1.97433472,1.8536377,1.90307617,1.87408447,1.5020752,1.92214966,1.90536499,1.7276001,2.22579956,1.98196411,1.90704346,1.86431885,1.88552856,1.78817749,1.8157959,1.81396484,2.34634399,2.02667236,2.05535889,2.01370239,1.92642212,1.8963623,2.10540771,2.01812744,0,14:09.6,0,0,1 +16,1.97402954,1.94320679,2.28927612,2.16308594,2.22045898,2.15759277,2.13287354,2.09533691,1.72103882,1.61392212,2.03170776,1.89239502,2.12036133,2.0539856,2.05886841,1.95785522,1.96426392,1.95388794,0.64727783,1.75918579,2.00241089,1.85623169,1.95159912,1.86813354,1.42822266,1.94442749,1.91574097,1.73110962,2.2265625,1.98608398,1.90155029,1.86447144,1.87133789,1.7880249,1.82418823,1.81488037,2.38815308,2.03323364,2.06680298,2.01141357,1.92764282,1.89865112,2.11410522,2.02316284,0,14:09.8,0,0,1 +17,1.97616577,1.94488525,2.28408813,2.16262817,2.22442627,2.16323853,2.10845947,2.09335327,1.72180176,1.61437988,2.01812744,1.89025879,2.14065552,2.04421997,2.05230713,1.95846558,1.93649292,1.95465088,0.62026978,1.77139282,1.99203491,1.85516357,1.91833496,1.8598938,1.5234375,1.96426392,1.92260742,1.73278809,2.18643188,1.98486328,1.91436768,1.86691284,1.87942505,1.79458618,1.82723999,1.82144165,2.24609375,2.04086304,2.08587646,2.01477051,1.92611694,1.90170288,2.10418701,2.03155518,0,14:09.8,0,0,1 +18,1.98242188,1.94839478,2.23770142,2.17681885,2.24349976,2.16690063,2.14187622,2.10189819,1.72317505,1.6166687,2.01126099,1.89300537,2.2442627,2.05917358,2.07855225,1.95892334,2.00881958,1.96578979,0.66619873,1.79229736,2.00164795,1.85974121,1.94900513,1.86813354,1.49673462,1.97021484,1.93161011,1.73477173,2.26104736,1.99493408,1.9052124,1.86889648,1.85714722,1.79290771,1.80648804,1.82571411,2.33688354,2.03872681,2.0715332,2.02072144,1.92749023,1.9039917,2.11380005,2.03704834,0,14:09.9,0,0,1 +19,1.99203491,1.95053101,2.21984863,2.16567993,2.24914551,2.16644287,2.17529297,2.11242676,1.72943115,1.62200928,2.01705933,1.89071655,2.08297729,2.05413818,2.06710815,1.96289063,1.95388794,1.97113037,0.65765381,1.79656982,1.99569702,1.86813354,1.95297241,1.88034058,1.61651611,1.96380615,1.91329956,1.7388916,2.31140137,1.99768066,1.91375732,1.87347412,1.88522339,1.80053711,1.8321228,1.83013916,2.43652344,2.05963135,2.0715332,2.02224731,1.93084717,1.9078064,2.11685181,2.04299927,0,14:10.1,0,0,1 +20,1.99111938,1.95373535,2.26196289,2.18078613,2.1987915,2.16827393,2.13729858,2.11776733,1.73477173,1.6267395,2.01141357,1.88583374,2.09472656,2.05413818,2.0602417,1.97067261,1.92123413,1.97341919,0.67626953,1.81976318,1.98623657,1.8711853,1.99081421,1.88323975,1.51351929,1.94259644,1.91070557,1.74301147,2.34313965,1.99783325,1.90933228,1.87561035,1.91986084,1.80526733,1.83883667,1.83425903,2.3789978,2.07061768,2.10906982,2.02911377,1.93069458,1.91040039,2.11120605,2.05444336,0,14:10.1,0,0,1 +21,1.99432373,1.95632935,2.26348877,2.17254639,2.22091675,2.17041016,2.1395874,2.11257935,1.73446655,1.62948608,2.0085144,1.88919067,2.13272095,2.06359863,2.07229614,1.96777344,1.91635132,1.97753906,0.67993164,1.78771973,2.01278687,1.87179565,1.92855835,1.88446045,1.44363403,1.96456909,1.90948486,1.74346924,2.24441528,2.0022583,1.90689087,1.87637329,1.91253662,1.80480957,1.81213379,1.83670044,2.32421875,2.07244873,2.08938599,2.03018188,1.93405151,1.91101074,2.12799072,2.05245972,0,14:10.3,0,0,1 +22,1.97540283,1.94702148,2.29507446,2.16796875,2.23678589,2.16369629,2.15377808,2.09854126,1.73431396,1.62582397,2.02285767,1.88613892,2.14736938,2.05108643,2.06466675,1.95739746,1.98852539,1.95831299,0.66726685,1.80206299,1.99890137,1.87103271,1.95175171,1.87332153,1.48300171,1.93161011,1.91070557,1.73553467,2.3236084,1.99890137,1.89224243,1.87210083,1.90490723,1.79199219,1.80984497,1.82266235,2.44430542,2.059021,2.09091187,2.02682495,1.92596436,1.90322876,2.11151123,2.0300293,0,14:10.3,0,0,1 +23,1.98196411,1.94076538,2.25891113,2.17376709,2.22000122,2.162323,2.11456299,2.07885742,1.73431396,1.62353516,2.02423096,1.88400269,2.14614868,2.0489502,2.04788208,1.95053101,1.9392395,1.93847656,0.6968689,1.80007935,1.99325562,1.86325073,1.92306519,1.86325073,1.52069092,1.96899414,1.89575195,1.73110962,2.38296509,1.99050903,1.90322876,1.86798096,1.88522339,1.79397583,1.81793213,1.81259155,2.34802246,2.05993652,2.09609985,2.02072144,1.92993164,1.89910889,2.09411621,2.01675415,0,14:10.5,0,0,1 +24,1.97601318,1.93710327,2.27630615,2.16629028,2.22366333,2.15438843,2.13867188,2.08312988,1.73614502,1.62216187,2.00546265,1.88217163,2.11608887,2.0501709,2.04437256,1.94610596,1.88034058,1.93847656,0.68283081,1.78573608,1.99447632,1.86126709,1.93969727,1.86203003,1.6204834,1.95159912,1.90353394,1.72515869,2.29537964,1.99157715,1.90353394,1.86798096,1.90856934,1.78466797,1.77825928,1.81182861,2.35046387,2.06130981,2.10220337,2.02468872,1.92657471,1.89956665,2.10189819,2.01812744,0,14:10.6,0,0,1 +25,1.97372437,1.9380188,2.26913452,2.16491699,2.20046997,2.15499878,2.13699341,2.0916748,1.73629761,1.62216187,2.02514648,1.88156128,2.17895508,2.03781128,2.06069946,1.94992065,1.95129395,1.9493103,0.65811157,1.78390503,2.00195313,1.85958862,1.96380615,1.86416626,1.69540405,1.92749023,1.91375732,1.72927856,2.30285645,1.98608398,1.90231323,1.86950684,1.90658569,1.79290771,1.80221558,1.81259155,2.39135742,2.05551147,2.09152222,2.02423096,1.92901611,1.90032959,2.10449219,2.02301025,0,14:10.6,0,0,1 +26,1.98699951,1.9418335,2.2845459,2.16461182,2.21847534,2.15362549,2.14553833,2.0916748,1.73446655,1.62445068,2.02957153,1.88095093,2.18887329,2.05001831,2.05780029,1.94961548,1.97113037,1.95617676,0.6690979,1.77810669,1.98165894,1.86172485,1.93984985,1.86752319,1.46759033,1.93847656,1.90689087,1.73477173,2.2215271,1.99356079,1.9078064,1.86767578,1.87561035,1.79550171,1.78695679,1.8145752,2.38891602,2.06634521,2.0803833,2.02377319,1.93099976,1.8989563,2.11105347,2.02850342,0,14:10.8,0,0,1 +27,1.99172974,1.94488525,2.28012085,2.16674805,2.24365234,2.15255737,2.14172363,2.1055603,1.7350769,1.62643433,2.02728271,1.88201904,2.13470459,2.03796387,2.06558228,1.95373535,1.9128418,1.9631958,0.67138672,1.78161621,1.99951172,1.86965942,1.90719604,1.87133789,1.49520874,1.96914673,1.90155029,1.73812866,2.16400146,2.00546265,1.89971924,1.86859131,1.89453125,1.79473877,1.82540894,1.81884766,2.40783691,2.07489014,2.08694458,2.02423096,1.92840576,1.90185547,2.10632324,2.03353882,0,14:10.8,0,0,1 +28,1.9732666,1.95175171,2.25753784,2.16812134,2.240448,2.16156006,2.11791992,2.10235596,1.73721313,1.63009644,1.99203491,1.88873291,2.20687866,2.04162598,2.0489502,1.95770264,1.93862915,1.95953369,0.67749023,1.79397583,2.0211792,1.86920166,1.85409546,1.87423706,1.53579712,1.94671631,1.90414429,1.74194336,2.23876953,1.99981689,1.91268921,1.86859131,1.88369751,1.79840088,1.83441162,1.82449341,2.38571167,2.07824707,2.09381104,2.02697754,1.92687988,1.90490723,2.10479736,2.04147339,0,14:11.0,0,0,1 +29,1.96792603,1.95770264,2.28240967,2.16888428,2.22106934,2.16705322,2.1295166,2.10174561,1.74163818,1.63330078,1.99935913,1.8901062,2.05825806,2.04925537,2.0401001,1.95861816,1.91741943,1.96105957,0.70831299,1.81396484,1.99874878,1.86752319,1.96304321,1.87301636,1.42089844,1.96243286,1.90505981,1.74209595,2.13973999,2.00485229,1.90628052,1.87057495,1.88659668,1.80053711,1.83898926,1.82998657,2.44888306,2.07855225,2.1055603,2.02880859,1.93206787,1.90536499,2.11380005,2.04437256,0,14:11.1,0,0,1 +30,1.97982788,1.95861816,2.2505188,2.16903687,2.2227478,2.16842651,2.12631226,2.11807251,1.74026489,1.63619995,1.99783325,1.89453125,2.05108643,2.04086304,2.06497192,1.96716309,1.87042236,1.97525024,0.67626953,1.82174683,2.00271606,1.87316895,1.97113037,1.87850952,1.46469116,1.95175171,1.92520142,1.74423218,2.26837158,1.99874878,1.9090271,1.87347412,1.90673828,1.80084229,1.83151245,1.82739258,2.51907349,2.09030151,2.10220337,2.03170776,1.93618774,1.9102478,2.11425781,2.05169678,0,14:11.2,0,0,1 +31,1.97662354,1.96273804,2.19848633,2.17575073,2.19696045,2.16384888,2.14111328,2.12295532,1.74194336,1.63970947,2.01950073,1.89575195,2.13088989,2.06619263,2.04589844,1.96792603,1.90551758,1.98348999,0.65628052,1.81152344,2.01904297,1.87957764,1.92276001,1.88323975,1.38900757,1.93771362,1.90887451,1.75201416,2.27508545,2.00576782,1.91711426,1.87973022,1.88964844,1.80877686,1.82907104,1.83349609,2.44598389,2.09075928,2.10540771,2.0401001,1.93710327,1.91833496,2.11593628,2.05657959,0,14:11.2,0,0,1 +32,1.98669434,1.96029663,2.29675293,2.18917847,2.212677,2.16842651,2.14202881,2.11395264,1.74453735,1.64199829,2.00759888,1.89605713,2.14096069,2.05078125,2.0463562,1.96517944,2.01156616,1.97677612,0.64559937,1.80419922,1.98120117,1.88049316,1.91513062,1.88598633,1.56158447,1.95755005,1.91253662,1.75048828,2.18658447,2.00912476,1.90185547,1.88064575,1.88568115,1.80496216,1.8296814,1.83319092,2.41394043,2.10067749,2.10159302,2.0324707,1.9354248,1.9140625,2.11898804,2.05200195,0,14:11.3,0,0,1 +33,1.98532104,1.953125,2.27600098,2.18566895,2.20947266,2.15698242,2.13500977,2.0954895,1.74285889,1.63894653,2.01202393,1.88949585,2.19451904,2.06787109,2.05490112,1.95678711,1.97540283,1.96029663,0.66055298,1.81503296,1.97738647,1.87210083,1.97067261,1.87454224,1.6456604,1.96868896,1.90658569,1.74728394,2.25372314,2.01553345,1.90567017,1.87515259,1.90261841,1.79962158,1.82418823,1.81686401,2.33856201,2.09381104,2.09152222,2.02682495,1.93237305,1.90582275,2.10144043,2.03186035,0,14:11.5,0,0,1 +34,1.98028564,1.94595337,2.23480225,2.17849731,2.20993042,2.15255737,2.09884644,2.08557129,1.74606323,1.63528442,2.02789307,1.88995361,2.24090576,2.05276489,2.06481934,1.94869995,1.96670532,1.93939209,0.71411133,1.82739258,2.00210571,1.87133789,1.8951416,1.87179565,1.40487671,1.97174072,1.92230225,1.73446655,2.30804443,1.99539185,1.90155029,1.87393188,1.90307617,1.79489136,1.84265137,1.81015015,2.44552612,2.05978394,2.08709717,2.02377319,1.927948,1.902771,2.09716797,2.02133179,0,14:11.6,0,0,1 +35,1.97540283,1.9418335,2.27020264,2.17758179,2.19360352,2.15652466,2.16308594,2.08129883,1.74377441,1.63543701,2.00958252,1.88842773,2.11303711,2.06390381,2.06314087,1.9493103,1.9619751,1.93954468,0.69076538,1.81472778,2.03689575,1.86843872,1.91299438,1.87408447,1.51916504,1.91619873,1.91848755,1.73812866,2.23510742,1.98944092,1.90612793,1.87515259,1.91253662,1.78939819,1.82876587,1.81427002,2.47390747,2.06634521,2.10739136,2.02651978,1.92840576,1.90383911,2.09747314,2.02224731,0,14:11.7,0,0,1 +36,1.98654175,1.94610596,2.27264404,2.17437744,2.22106934,2.15866089,2.12554932,2.08435059,1.74606323,1.63543701,1.9984436,1.88232422,2.15118408,2.07473755,2.06695557,1.94976807,1.91421509,1.94625854,0.6652832,1.7930603,2.01431274,1.8661499,1.9052124,1.86920166,1.55166626,1.90185547,1.90719604,1.74057007,2.18048096,1.99295044,1.90582275,1.87240601,1.87057495,1.79382324,1.81945801,1.81213379,2.37976074,2.06634521,2.12615967,2.02682495,1.92977905,1.90292358,2.10525513,2.02194214,0,14:11.8,0,0,1 +37,1.97418213,1.95037842,2.25952148,2.17269897,2.22961426,2.15362549,2.0614624,2.09396362,1.74377441,1.63726807,1.98318481,1.88934326,2.14096069,2.05230713,2.06344604,1.953125,1.89468384,1.9468689,0.64910889,1.81091309,1.97463989,1.86981201,1.90811157,1.87866211,1.52664185,1.94763184,1.89559937,1.73934937,2.22137451,2.00042725,1.90795898,1.87728882,1.88369751,1.79885864,1.82128906,1.80984497,2.44415283,2.081604,2.10021973,2.02728271,1.93084717,1.90689087,2.11395264,2.02575684,0,14:11.8,0,0,1 +38,1.97113037,1.95098877,2.28057861,2.17910767,2.20901489,2.1546936,2.10067749,2.09320068,1.74591064,1.63955688,1.97174072,1.89056396,2.1446228,2.04574585,2.05657959,1.95877075,1.92642212,1.95083618,0.63217163,1.81213379,2.00714111,1.87301636,1.98638916,1.87606812,1.45858765,1.99295044,1.90216064,1.74407959,2.17819214,1.99295044,1.92062378,1.87606812,1.88461304,1.80175781,1.82876587,1.81533813,2.4307251,2.07778931,2.09899902,2.02865601,1.92947388,1.90719604,2.11364746,2.03216553,0,14:11.9,0,0,1 +39,1.98394775,1.95678711,2.25265503,2.18460083,2.21923828,2.15988159,2.15713501,2.09335327,1.74743652,1.64108276,2.04437256,1.89544678,2.12524414,2.05551147,2.05307007,1.96258545,1.99295044,1.95983887,0.64346313,1.82022095,2.00897217,1.87362671,1.93954468,1.87927246,1.52832031,1.95571899,1.9203186,1.75460815,2.2946167,2.006073,1.91299438,1.87805176,1.88018799,1.80877686,1.82983398,1.822052,2.30957031,2.07626343,2.09136963,2.03262329,1.93527222,1.91085815,2.12005615,2.04284668,0,14:12.1,0,0,1 +40,1.99996948,1.95755005,2.28988647,2.18170166,2.22320557,2.17285156,2.16186523,2.10845947,1.74118042,1.63909912,2.05337524,1.90185547,2.13424683,2.06466675,2.07107544,1.96563721,1.96090698,1.96670532,0.71044922,1.79550171,2.01095581,1.8762207,1.97921753,1.8901062,1.50222778,1.93222046,1.92138672,1.75964355,2.3059082,2.01477051,1.91619873,1.87973022,1.90139771,1.80679321,1.83731079,1.82510376,2.34085083,2.08114624,2.08953857,2.02774048,1.93740845,1.91421509,2.11639404,2.04940796,0,14:12.2,0,0,1 +41,1.98394775,1.95785522,2.25570679,2.17987061,2.19512939,2.17559814,2.11608887,2.11883545,1.7388916,1.64154053,2.0324707,1.90444946,2.1333313,2.06680298,2.06741333,1.97052002,1.94564819,1.97097778,0.71166992,1.81335449,1.99691772,1.87667847,1.94152832,1.8901062,1.54220581,1.95541382,1.91833496,1.7666626,2.22686768,2.00790405,1.92260742,1.87988281,1.88781738,1.81121826,1.83670044,1.83380127,2.39196777,2.09289551,2.10189819,2.02896118,1.9380188,1.91558838,2.1182251,2.05627441,0,14:12.2,0,0,1 +42,1.96655273,1.96670532,2.2428894,2.18521118,2.23602295,2.1824646,2.16140747,2.11669922,1.74179077,1.64825439,2.03186035,1.90765381,2.07855225,2.06985474,2.05505371,1.97662354,1.98059082,1.97189331,0.6741333,1.84951782,1.97174072,1.88201904,1.95968628,1.89102173,1.41387939,1.98577881,1.92245483,1.77017212,2.16629028,2.0199585,1.9229126,1.88522339,1.87713623,1.81396484,1.81137085,1.8371582,2.3600769,2.11273193,2.07809448,2.03613281,1.94168091,1.92138672,2.12341309,2.06573486,0,14:12.4,0,0,1 +43,1.97341919,1.96578979,2.24502563,2.19055176,2.23724365,2.17605591,2.15484619,2.12188721,1.746521,1.6494751,2.01507568,1.90429688,2.10388184,2.06283569,2.07687378,1.97143555,1.96487427,1.97555542,0.68862915,1.82952881,2.03384399,1.88674927,1.96395874,1.8850708,1.54769897,1.99737549,1.92306519,1.76605225,2.21679688,2.0211792,1.91802979,1.88659668,1.87820435,1.81503296,1.82281494,1.84005737,2.43560791,2.10571289,2.10769653,2.03689575,1.94198608,1.91970825,2.11730957,2.06207275,0,14:12.5,0,0,1 +44,1.97677612,1.95495605,2.31628418,2.1824646,2.24975586,2.16705322,2.16384888,2.10510254,1.74972534,1.64962769,2.00683594,1.89987183,2.18933105,2.05993652,2.05795288,1.96350098,1.96136475,1.95556641,0.67184448,1.83441162,2.01568604,1.88369751,1.93725586,1.88537598,1.53778076,1.9644165,1.9078064,1.76193237,2.23556519,2.01507568,1.92657471,1.88308716,1.90872192,1.80892944,1.80892944,1.824646,2.41073608,2.10205078,2.10174561,2.02835083,1.93664551,1.91375732,2.10754395,2.04376221,0,14:12.6,0,0,1 +45,1.98364258,1.95449829,2.40234375,2.18063354,2.24273682,2.16293335,2.12936401,2.08892822,1.75170898,1.6468811,2.03491211,1.89651489,2.09945679,2.06298828,2.04452515,1.9619751,1.95175171,1.94580078,0.67153931,1.84738159,1.99142456,1.88156128,1.97402954,1.88873291,1.5562439,1.93710327,1.91986084,1.75445557,2.24014282,2.00073242,1.91513062,1.88217163,1.9128418,1.80236816,1.81152344,1.81549072,2.42279053,2.09197998,2.10281372,2.02880859,1.93405151,1.91238403,2.10678101,2.03369141,0,14:12.6,0,0,1 +46,1.96777344,1.95617676,2.30743408,2.1812439,2.23892212,2.15713501,2.13638306,2.0930481,1.74987793,1.64794922,2.03643799,1.89727783,2.1005249,2.06054688,2.05734253,1.96029663,1.96228027,1.95129395,0.72982788,1.83746338,2.02377319,1.88827515,1.97128296,1.88751221,1.50405884,1.9102478,1.89865112,1.7515564,2.27020264,2.01141357,1.93069458,1.87820435,1.88690186,1.80007935,1.82373047,1.81625366,2.35076904,2.10906982,2.08908081,2.03033447,1.93267822,1.90689087,2.10128784,2.03399658,0,14:12.8,0,0,1 +47,1.96929932,1.95877075,2.28775024,2.18338013,2.19100952,2.16186523,2.16033936,2.09518433,1.75170898,1.64901733,2.00866699,1.89712524,2.13317871,2.05780029,2.06939697,1.95587158,1.93664551,1.95556641,0.70632935,1.85668945,2.01766968,1.89147949,1.90383911,1.88262939,1.52236938,1.93008423,1.92367554,1.75460815,2.26470947,2.0149231,1.92520142,1.87896729,1.86187744,1.80343628,1.81396484,1.82006836,2.41210938,2.11471558,2.10922241,2.03659058,1.93893433,1.90811157,2.12219238,2.03598022,0,14:12.8,0,0,1 +48,1.99050903,1.96456909,2.27706909,2.19543457,2.17453003,2.15866089,2.17346191,2.10296631,1.75491333,1.6519165,1.98883057,1.89880371,2.10327148,2.05596924,2.06604004,1.95922852,1.9720459,1.9581604,0.70800781,1.84875488,1.98745728,1.88720703,1.85241699,1.88262939,1.64047241,1.95999146,1.92306519,1.7565918,2.19161987,2.00561523,1.90505981,1.87606812,1.89590454,1.8057251,1.81045532,1.82006836,2.48703003,2.1194458,2.10891724,2.03399658,1.93939209,1.90689087,2.11242676,2.04162598,0,14:12.9,0,0,1 +49,1.97280884,1.97250366,2.30514526,2.1875,2.18338013,2.16217041,2.16903687,2.09976196,1.7539978,1.65435791,2.02072144,1.90002441,2.09899902,2.04727173,2.05581665,1.96090698,2.02911377,1.95846558,0.68664551,1.86218262,1.99523926,1.89331055,1.92123413,1.87957764,1.65481567,1.98486328,1.90444946,1.75323486,2.19009399,2.01766968,1.9342041,1.87744141,1.91238403,1.80587769,1.84326172,1.82693481,2.46429443,2.12585449,2.11929321,2.0350647,1.94122314,1.90887451,2.09945679,2.04971313,0,14:13.1,0,0,1 +50,1.98562622,1.97647095,2.25646973,2.19741821,2.22045898,2.16873169,2.15682983,2.10876465,1.75262451,1.6569519,2.00668335,1.90261841,2.12188721,2.04498291,2.06100464,1.96395874,1.95648193,1.96670532,0.69229126,1.85089111,2.00088501,1.89285278,1.9354248,1.88446045,1.55502319,1.96472168,1.90673828,1.76055908,2.2605896,2.01889038,1.91513062,1.88034058,1.94366455,1.80252075,1.84631348,1.82983398,2.37731934,2.12509155,2.11257935,2.03948975,1.94213867,1.91162109,2.11624146,2.05383301,0,14:13.2,0,0,1 +51,1.98532104,1.978302,2.27981567,2.19924927,2.24197388,2.17575073,2.18032837,2.11700439,1.74530029,1.65390015,2.02804565,1.91070557,2.18261719,2.06604004,2.06130981,1.97311401,1.95709229,1.97128296,0.67932129,1.84173584,2.03796387,1.90048218,1.94915771,1.89086914,1.46484375,1.97113037,1.92382813,1.76513672,2.27050781,2.02255249,1.91848755,1.88430786,1.91436768,1.81274414,1.84127808,1.83654785,2.46307373,2.11654663,2.09915161,2.04284668,1.93908691,1.91619873,2.12753296,2.05993652,0,14:13.2,0,0,1 +52,1.97509766,1.97967529,2.36450195,2.19345093,2.20993042,2.17163086,2.16583252,2.12280273,1.73919678,1.64871216,2.03323364,1.91452026,2.16796875,2.07000732,2.07672119,1.978302,1.98638916,1.97525024,0.7093811,1.84326172,2.03903198,1.89804077,1.99325562,1.90338135,1.72515869,1.95877075,1.92169189,1.77062988,2.31582642,2.03323364,1.9090271,1.88720703,1.88232422,1.82006836,1.83380127,1.84494019,2.45056152,2.12860107,2.10067749,2.04116821,1.93969727,1.91940308,2.13150024,2.06924438,0,14:13.3,0,0,1 +53,1.99325562,1.97662354,2.32818604,2.2052002,2.22793579,2.1786499,2.16842651,2.11837769,1.74057007,1.64794922,2.04315186,1.91970825,2.12310791,2.07366943,2.07748413,1.97845459,1.97799683,1.97662354,0.70541382,1.82373047,2.01553345,1.90078735,1.95404053,1.90170288,1.62277222,1.94091797,1.91558838,1.77001953,2.30239868,2.02728271,1.93084717,1.88735962,1.88156128,1.81793213,1.84570313,1.84280396,2.46688843,2.13165283,2.10281372,2.0413208,1.94015503,1.91955566,2.12234497,2.06802368,0,14:13.5,0,0,1 +54,1.99813843,1.96731567,2.29202271,2.2076416,2.23937988,2.17666626,2.13287354,2.10449219,1.73568726,1.64352417,2.03887939,1.91711426,2.06069946,2.07778931,2.08129883,1.96777344,1.93588257,1.94976807,0.71517944,1.83792114,2.02056885,1.89315796,1.92520142,1.89743042,1.54418945,1.96350098,1.90139771,1.76101685,2.27981567,2.02011108,1.92138672,1.88781738,1.90750122,1.81045532,1.81747437,1.82617188,2.29568481,2.1105957,2.10891724,2.03613281,1.93557739,1.91497803,2.10845947,2.04650879,0,14:13.6,0,0,1 +55,1.97982788,1.95541382,2.26745605,2.19268799,2.1913147,2.16308594,2.12265015,2.09365845,1.7350769,1.6394043,2.02392578,1.91116333,2.11090088,2.07733154,2.07366943,1.96212769,1.9631958,1.94335938,0.66879272,1.82617188,2.03765869,1.88583374,1.925354,1.88858032,1.53167725,1.97021484,1.90078735,1.75140381,2.27142334,2.01004028,1.92474365,1.88491821,1.8951416,1.80236816,1.8145752,1.81838989,2.31750488,2.08572388,2.10067749,2.02957153,1.93252563,1.91040039,2.10586548,2.03338623,0,14:13.6,0,0,1 +56,1.9859314,1.95953369,2.30621338,2.18582153,2.26013184,2.16308594,2.12921143,2.08862305,1.7338562,1.6394043,2.02148438,1.90628052,2.16705322,2.06985474,2.07275391,1.96228027,1.90734863,1.94320679,0.67001343,1.84280396,2.00927734,1.88934326,1.93099976,1.88140869,1.48971558,1.95343018,1.90475464,1.7527771,2.2303772,2.00912476,1.92123413,1.88354492,1.87469482,1.80419922,1.83670044,1.81594849,2.43453979,2.09838867,2.10510254,2.03552246,1.93344116,1.90979004,2.10449219,2.03201294,0,14:13.8,0,0,1 +57,1.97692871,1.95877075,2.25662231,2.19192505,2.18582153,2.16278076,2.12554932,2.09823608,1.7364502,1.63955688,2.00866699,1.90658569,2.16339111,2.05917358,2.07611084,1.96426392,1.92504883,1.94625854,0.67138672,1.83792114,2.00088501,1.88934326,1.97357178,1.88339233,1.52603149,1.91513062,1.90689087,1.75064087,2.23968506,2.01004028,1.92642212,1.88079834,1.90872192,1.80130005,1.8208313,1.8196106,2.35809326,2.11914063,2.10494995,2.03186035,1.93527222,1.90917969,2.10693359,2.03475952,0,14:13.9,0,0,1 +58,1.96884155,1.96380615,2.2869873,2.18612671,2.19711304,2.16278076,2.17208862,2.09838867,1.7364502,1.64108276,2.0401001,1.902771,2.16567993,2.06344604,2.08374023,1.96090698,1.95404053,1.95205688,0.68054199,1.82876587,1.99401855,1.88980103,1.92932129,1.88278198,1.5574646,1.94778442,1.90444946,1.75033569,2.27706909,2.01431274,1.92123413,1.88522339,1.89453125,1.80648804,1.8347168,1.82128906,2.36129761,2.09823608,2.10357666,2.03796387,1.9342041,1.91268921,2.11395264,2.0401001,0,14:13.9,0,0,1 +59,1.99127197,1.96868896,2.30560303,2.19406128,2.20077515,2.16506958,2.1282959,2.10098267,1.73904419,1.64550781,2.02056885,1.90383911,2.18032837,2.06710815,2.05780029,1.9581604,1.94717407,1.96212769,0.68649292,1.83181763,1.98196411,1.89056396,1.97280884,1.8888855,1.50177002,1.95922852,1.90307617,1.75338745,2.15057373,2.01873779,1.90261841,1.88278198,1.8901062,1.80679321,1.81869507,1.82510376,2.41439819,2.10342407,2.11669922,2.03872681,1.9342041,1.91101074,2.12295532,2.0463562,0,14:14.1,0,0,1 +60,1.99523926,1.97143555,2.33352661,2.19512939,2.22671509,2.16888428,2.11273193,2.10479736,1.7414856,1.65023804,2.02011108,1.90368652,2.15698242,2.06314087,2.06939697,1.96411133,1.98226929,1.96411133,0.68206787,1.85348511,1.97357178,1.89147949,1.96670532,1.89239502,1.45980835,1.9569397,1.90994263,1.75567627,2.19894409,2.01599121,1.91940308,1.88644409,1.87759399,1.80664063,1.8296814,1.83166504,2.4055481,2.14035034,2.10906982,2.04696655,1.93634033,1.91757202,2.12265015,2.05444336,0,14:14.2,0,0,1 +""" # noqa: E501 + + +CONTENTS['1.25'] = b"""\ +Header +File Version,1.25 +Patient Information +ID,REDACTED +Name, +Comment,sentence,, +Birth Date,1999/09/09 +Age, 99y +Sex,Male +Analyze Information +AnalyzeMode,Continuous +Pre Time[s],9.0 +Post Time[s],7.0 +Recovery Time[s],12.0 +Base Time[s],5 +Fitting Degree,1 +HPF[Hz],No Filter +LPF[Hz],No Filter +Moving Average[s],0.1 +Measure Information +Date,2020/02/02 11:20 +Probe Type,adult +Mode,3x11 +Wave[nm],695,830 +Wave Length,CH1(703.6),CH1(829.0),CH2(703.9),CH2(829.3),CH3(703.9),CH3(829.3),CH4(703.9),CH4(828.8),CH5(703.9),CH5(828.8),CH6(703.1),CH6(828.8),CH7(703.1),CH7(828.8),CH8(702.9),CH8(829.0),CH9(702.9),CH9(829.0),CH10(703.6),CH10(829.0),CH11(703.6),CH11(829.0),CH12(703.4),CH12(829.0),CH13(703.9),CH13(829.3),CH14(703.6),CH14(828.5),CH15(703.9),CH15(828.8),CH16(704.1),CH16(829.8),CH17(703.1),CH17(828.8),CH18(704.4),CH18(829.0),CH19(702.9),CH19(829.0),CH20(703.9),CH20(828.8),CH21(703.6),CH21(829.0),CH22(703.4),CH22(829.0),CH23(703.4),CH23(829.0),CH24(703.6),CH24(828.5),CH25(703.6),CH25(828.5),CH26(704.1),CH26(829.8),CH27(704.1),CH27(829.8),CH28(704.4),CH28(829.0),CH29(704.4),CH29(829.0),CH30(703.9),CH30(828.8),CH31(703.9),CH31(828.8),CH32(704.1),CH32(828.8),CH33(703.4),CH33(829.0),CH34(703.1),CH34(828.8),CH35(703.6),CH35(828.5),CH36(704.4),CH36(828.8),CH37(704.1),CH37(829.8),CH38(703.6),CH38(828.5),CH39(704.4),CH39(829.0),CH40(703.9),CH40(829.0),CH41(703.9),CH41(828.8),CH42(704.4),CH42(829.0),CH43(704.1),CH43(828.8),CH44(703.1),CH44(828.8),CH45(703.1),CH45(828.8),CH46(704.4),CH46(828.8),CH47(704.4),CH47(828.8),CH48(703.6),CH48(828.5),CH49(703.6),CH49(828.5),CH50(703.9),CH50(829.0),CH51(703.9),CH51(829.0),CH52(704.4),CH52(829.0) +Analog Gain,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,6.58823500,6.58823500,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,6.58823500,6.58823500,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,6.58823500,6.58823500,2.74509800,2.74509800,1.25490200,1.25490200,4.07843100,4.07843100,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,2.74509800,2.74509800,2.74509800,2.74509800,4.07843100,4.07843100,4.07843100,4.07843100,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200,1.25490200 +Digital Gain,5.99000000,3.61000000,1.93000000,1.12000000,5.43000000,3.20000000,7.11000000,4.02000000,11.82000000,6.06000000,16.42000000,7.48000000,15.82000000,7.56000000,12.13000000,6.80000000,14.62000000,7.78000000,5.35000000,2.57000000,10.27000000,5.01000000,52.84000000,26.95000000,9.75000000,6.12000000,6.97000000,3.99000000,11.31000000,5.91000000,2.26000000,1.15000000,9.49000000,4.58000000,3.21000000,2.25000000,2.97000000,1.65000000,12.34000000,6.77000000,33.81000000,17.23000000,7.10000000,2.92000000,12.78000000,7.02000000,17.47000000,9.94000000,16.93000000,9.55000000,6.87000000,3.47000000,5.96000000,3.07000000,20.56000000,13.42000000,3.67000000,2.53000000,17.39000000,8.72000000,2.96000000,1.61000000,14.25000000,5.67000000,8.87000000,4.21000000,21.68000000,11.89000000,12.24000000,6.47000000,28.07000000,16.60000000,13.95000000,7.05000000,9.37000000,5.53000000,18.15000000,11.62000000,39.72000000,19.87000000,20.78000000,9.56000000,91.12000000,43.86000000,17.86000000,7.84000000,5.47000000,3.40000000,4.83000000,3.38000000,7.61000000,5.15000000,4.65000000,2.86000000,1.33000000,0.88000000,16.06000000,11.46000000,11.94000000,6.47000000,25.92000000,12.83000000,4.35000000,2.44000000 +Sampling Period[s],0.1 +StimType,STIM +Stim Time[s] +A,15,B,15,C,15,D,15,E,15,F,15,G,15,H,15,I,15,J,15 +Repeat Count,20 +Exception Ch,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + + + + + + +Data +Probe1,CH1(703.6),CH1(829.0),CH2(703.9),CH2(829.3),CH3(703.9),CH3(829.3),CH4(703.9),CH4(828.8),CH5(703.9),CH5(828.8),CH6(703.1),CH6(828.8),CH7(703.1),CH7(828.8),CH8(702.9),CH8(829.0),CH9(702.9),CH9(829.0),CH10(703.6),CH10(829.0),CH11(703.6),CH11(829.0),CH12(703.4),CH12(829.0),CH13(703.9),CH13(829.3),CH14(703.6),CH14(828.5),CH15(703.9),CH15(828.8),CH16(704.1),CH16(829.8),CH17(703.1),CH17(828.8),CH18(704.4),CH18(829.0),CH19(702.9),CH19(829.0),CH20(703.9),CH20(828.8),CH21(703.6),CH21(829.0),CH22(703.4),CH22(829.0),CH23(703.4),CH23(829.0),CH24(703.6),CH24(828.5),CH25(703.6),CH25(828.5),CH26(704.1),CH26(829.8),CH27(704.1),CH27(829.8),CH28(704.4),CH28(829.0),CH29(704.4),CH29(829.0),CH30(703.9),CH30(828.8),CH31(703.9),CH31(828.8),CH32(704.1),CH32(828.8),CH33(703.4),CH33(829.0),CH34(703.1),CH34(828.8),CH35(703.6),CH35(828.5),CH36(704.4),CH36(828.8),CH37(704.1),CH37(829.8),CH38(703.6),CH38(828.5),CH39(704.4),CH39(829.0),CH40(703.9),CH40(829.0),CH41(703.9),CH41(828.8),CH42(704.4),CH42(829.0),CH43(704.1),CH43(828.8),CH44(703.1),CH44(828.8),CH45(703.1),CH45(828.8),CH46(704.4),CH46(828.8),CH47(704.4),CH47(828.8),CH48(703.6),CH48(828.5),CH49(703.6),CH49(828.5),CH50(703.9),CH50(829.0),CH51(703.9),CH51(829.0),CH52(704.4),CH52(829.0),Mark,Time,BodyMovement,RemovalMark,PreScan +1,2.01614380,2.01263428,2.01354980,2.01248169,2.02392578,2.01644897,2.02453613,2.01934814,2.01873779,2.01919556,2.02087402,2.01889038,2.02621460,2.03781128,2.02423096,2.02011108,2.00881958,2.00637817,2.02087402,2.01644897,2.02209473,2.00988770,2.03155518,2.01919556,2.02148438,2.01370239,2.01324463,2.00973511,2.00973511,2.01477051,2.02117920,2.02636719,2.00805664,2.01583862,2.02529907,2.03033447,2.00424194,1.99935913,2.01202393,2.01568604,2.01583862,2.01095581,2.02545166,2.01721191,2.02758789,2.02957153,2.00622559,2.00073242,2.00759888,2.00485229,2.02041626,2.03094482,2.01782227,2.01675415,2.00820923,2.01110840,2.01354980,2.00637817,2.01538086,2.00271606,2.01156616,2.01217651,2.01736450,1.99386597,2.03353882,2.02362061,2.03598022,2.02728271,2.00866699,1.99676514,2.00714111,2.00744629,2.02087402,2.01492310,2.01202393,2.01644897,2.00057983,1.99584961,2.01370239,1.99050903,1.99783325,1.99371338,2.01431274,1.99920654,1.99630737,1.97814941,2.02545166,2.01126099,2.02102661,2.00790405,2.00912476,1.99813843,2.01156616,1.99981689,1.99829102,1.99523926,2.01141357,2.00424194,2.00164795,1.98730469,2.03948975,2.00637817,2.00958252,1.99615479,0,15:40:49.15,0,0,1 +2,2.02087402,2.02194214,2.01782227,2.02026367,2.02743530,2.02453613,2.02789307,2.02758789,2.02255249,2.02880859,2.02819824,2.02880859,2.03445435,2.04879761,2.02850342,2.02865601,2.01583862,2.01477051,2.02529907,2.02499390,2.03033447,2.02178955,2.03308105,2.02636719,2.02545166,2.02224731,2.01690674,2.01751709,2.01522827,2.02423096,2.02590942,2.03536987,2.01507568,2.02850342,2.03033447,2.04040527,2.00729370,2.00546265,2.01431274,2.02407837,2.02224731,2.02346802,2.02972412,2.02682495,2.03186035,2.03720093,2.01171875,2.00851440,2.00897217,2.01370239,2.02468872,2.04177856,2.02224731,2.02606201,2.01339722,2.02499390,2.01766968,2.01583862,2.01690674,2.00851440,2.01568604,2.02117920,2.02087402,2.00164795,2.03689575,2.03247070,2.04132080,2.03613281,2.01110840,2.00332642,2.01293945,2.01583862,2.02560425,2.02224731,2.01828003,2.02621460,2.00637817,2.00607300,2.01431274,1.99890137,2.00714111,1.99813843,2.01721191,2.00942993,1.99874878,1.98593140,2.02941895,2.02087402,2.02743530,2.01690674,2.01263428,2.00668335,2.01690674,2.00592041,2.00210571,2.00210571,2.01568604,2.01110840,2.00378418,1.99447632,2.04177856,2.01278687,2.01217651,2.00210571,0,15:40:49.26,0,0,1 +3,2.01614380,2.01263428,2.01354980,2.01248169,2.02392578,2.01644897,2.02453613,2.01934814,2.01873779,2.01919556,2.02087402,2.01889038,2.02621460,2.03781128,2.02423096,2.02011108,2.00881958,2.00637817,2.02087402,2.01644897,2.02209473,2.00988770,2.03155518,2.01919556,2.02148438,2.01370239,2.01324463,2.00973511,2.00973511,2.01477051,2.02117920,2.02636719,2.00805664,2.01583862,2.02529907,2.03033447,2.00424194,1.99935913,2.01202393,2.01568604,2.01583862,2.01095581,2.02545166,2.01721191,2.02758789,2.02957153,2.00622559,2.00073242,2.00759888,2.00485229,2.02041626,2.03094482,2.01782227,2.01675415,2.00820923,2.01110840,2.01354980,2.00637817,2.01538086,2.00271606,2.01156616,2.01217651,2.01736450,1.99386597,2.03353882,2.02362061,2.03598022,2.02728271,2.00866699,1.99676514,2.00714111,2.00744629,2.02087402,2.01492310,2.01202393,2.01644897,2.00057983,1.99584961,2.01370239,1.99050903,1.99783325,1.99371338,2.01431274,1.99920654,1.99630737,1.97814941,2.02545166,2.01126099,2.02102661,2.00790405,2.00912476,1.99813843,2.01156616,1.99981689,1.99829102,1.99523926,2.01141357,2.00424194,2.00164795,1.98730469,2.03948975,2.00637817,2.00958252,1.99615479,0,15:40:49.15,0,0,1 +4,2.02087402,2.02194214,2.01782227,2.02026367,2.02743530,2.02453613,2.02789307,2.02758789,2.02255249,2.02880859,2.02819824,2.02880859,2.03445435,2.04879761,2.02850342,2.02865601,2.01583862,2.01477051,2.02529907,2.02499390,2.03033447,2.02178955,2.03308105,2.02636719,2.02545166,2.02224731,2.01690674,2.01751709,2.01522827,2.02423096,2.02590942,2.03536987,2.01507568,2.02850342,2.03033447,2.04040527,2.00729370,2.00546265,2.01431274,2.02407837,2.02224731,2.02346802,2.02972412,2.02682495,2.03186035,2.03720093,2.01171875,2.00851440,2.00897217,2.01370239,2.02468872,2.04177856,2.02224731,2.02606201,2.01339722,2.02499390,2.01766968,2.01583862,2.01690674,2.00851440,2.01568604,2.02117920,2.02087402,2.00164795,2.03689575,2.03247070,2.04132080,2.03613281,2.01110840,2.00332642,2.01293945,2.01583862,2.02560425,2.02224731,2.01828003,2.02621460,2.00637817,2.00607300,2.01431274,1.99890137,2.00714111,1.99813843,2.01721191,2.00942993,1.99874878,1.98593140,2.02941895,2.02087402,2.02743530,2.01690674,2.01263428,2.00668335,2.01690674,2.00592041,2.00210571,2.00210571,2.01568604,2.01110840,2.00378418,1.99447632,2.04177856,2.01278687,2.01217651,2.00210571,0,15:40:49.26,0,0,1 +5,2.01614380,2.01263428,2.01354980,2.01248169,2.02392578,2.01644897,2.02453613,2.01934814,2.01873779,2.01919556,2.02087402,2.01889038,2.02621460,2.03781128,2.02423096,2.02011108,2.00881958,2.00637817,2.02087402,2.01644897,2.02209473,2.00988770,2.03155518,2.01919556,2.02148438,2.01370239,2.01324463,2.00973511,2.00973511,2.01477051,2.02117920,2.02636719,2.00805664,2.01583862,2.02529907,2.03033447,2.00424194,1.99935913,2.01202393,2.01568604,2.01583862,2.01095581,2.02545166,2.01721191,2.02758789,2.02957153,2.00622559,2.00073242,2.00759888,2.00485229,2.02041626,2.03094482,2.01782227,2.01675415,2.00820923,2.01110840,2.01354980,2.00637817,2.01538086,2.00271606,2.01156616,2.01217651,2.01736450,1.99386597,2.03353882,2.02362061,2.03598022,2.02728271,2.00866699,1.99676514,2.00714111,2.00744629,2.02087402,2.01492310,2.01202393,2.01644897,2.00057983,1.99584961,2.01370239,1.99050903,1.99783325,1.99371338,2.01431274,1.99920654,1.99630737,1.97814941,2.02545166,2.01126099,2.02102661,2.00790405,2.00912476,1.99813843,2.01156616,1.99981689,1.99829102,1.99523926,2.01141357,2.00424194,2.00164795,1.98730469,2.03948975,2.00637817,2.00958252,1.99615479,0,15:40:49.15,0,0,1 +6,2.02087402,2.02194214,2.01782227,2.02026367,2.02743530,2.02453613,2.02789307,2.02758789,2.02255249,2.02880859,2.02819824,2.02880859,2.03445435,2.04879761,2.02850342,2.02865601,2.01583862,2.01477051,2.02529907,2.02499390,2.03033447,2.02178955,2.03308105,2.02636719,2.02545166,2.02224731,2.01690674,2.01751709,2.01522827,2.02423096,2.02590942,2.03536987,2.01507568,2.02850342,2.03033447,2.04040527,2.00729370,2.00546265,2.01431274,2.02407837,2.02224731,2.02346802,2.02972412,2.02682495,2.03186035,2.03720093,2.01171875,2.00851440,2.00897217,2.01370239,2.02468872,2.04177856,2.02224731,2.02606201,2.01339722,2.02499390,2.01766968,2.01583862,2.01690674,2.00851440,2.01568604,2.02117920,2.02087402,2.00164795,2.03689575,2.03247070,2.04132080,2.03613281,2.01110840,2.00332642,2.01293945,2.01583862,2.02560425,2.02224731,2.01828003,2.02621460,2.00637817,2.00607300,2.01431274,1.99890137,2.00714111,1.99813843,2.01721191,2.00942993,1.99874878,1.98593140,2.02941895,2.02087402,2.02743530,2.01690674,2.01263428,2.00668335,2.01690674,2.00592041,2.00210571,2.00210571,2.01568604,2.01110840,2.00378418,1.99447632,2.04177856,2.01278687,2.01217651,2.00210571,0,15:40:49.26,0,0,1 +7,2.01614380,2.01263428,2.01354980,2.01248169,2.02392578,2.01644897,2.02453613,2.01934814,2.01873779,2.01919556,2.02087402,2.01889038,2.02621460,2.03781128,2.02423096,2.02011108,2.00881958,2.00637817,2.02087402,2.01644897,2.02209473,2.00988770,2.03155518,2.01919556,2.02148438,2.01370239,2.01324463,2.00973511,2.00973511,2.01477051,2.02117920,2.02636719,2.00805664,2.01583862,2.02529907,2.03033447,2.00424194,1.99935913,2.01202393,2.01568604,2.01583862,2.01095581,2.02545166,2.01721191,2.02758789,2.02957153,2.00622559,2.00073242,2.00759888,2.00485229,2.02041626,2.03094482,2.01782227,2.01675415,2.00820923,2.01110840,2.01354980,2.00637817,2.01538086,2.00271606,2.01156616,2.01217651,2.01736450,1.99386597,2.03353882,2.02362061,2.03598022,2.02728271,2.00866699,1.99676514,2.00714111,2.00744629,2.02087402,2.01492310,2.01202393,2.01644897,2.00057983,1.99584961,2.01370239,1.99050903,1.99783325,1.99371338,2.01431274,1.99920654,1.99630737,1.97814941,2.02545166,2.01126099,2.02102661,2.00790405,2.00912476,1.99813843,2.01156616,1.99981689,1.99829102,1.99523926,2.01141357,2.00424194,2.00164795,1.98730469,2.03948975,2.00637817,2.00958252,1.99615479,0,15:40:49.15,0,0,1 +8,2.02087402,2.02194214,2.01782227,2.02026367,2.02743530,2.02453613,2.02789307,2.02758789,2.02255249,2.02880859,2.02819824,2.02880859,2.03445435,2.04879761,2.02850342,2.02865601,2.01583862,2.01477051,2.02529907,2.02499390,2.03033447,2.02178955,2.03308105,2.02636719,2.02545166,2.02224731,2.01690674,2.01751709,2.01522827,2.02423096,2.02590942,2.03536987,2.01507568,2.02850342,2.03033447,2.04040527,2.00729370,2.00546265,2.01431274,2.02407837,2.02224731,2.02346802,2.02972412,2.02682495,2.03186035,2.03720093,2.01171875,2.00851440,2.00897217,2.01370239,2.02468872,2.04177856,2.02224731,2.02606201,2.01339722,2.02499390,2.01766968,2.01583862,2.01690674,2.00851440,2.01568604,2.02117920,2.02087402,2.00164795,2.03689575,2.03247070,2.04132080,2.03613281,2.01110840,2.00332642,2.01293945,2.01583862,2.02560425,2.02224731,2.01828003,2.02621460,2.00637817,2.00607300,2.01431274,1.99890137,2.00714111,1.99813843,2.01721191,2.00942993,1.99874878,1.98593140,2.02941895,2.02087402,2.02743530,2.01690674,2.01263428,2.00668335,2.01690674,2.00592041,2.00210571,2.00210571,2.01568604,2.01110840,2.00378418,1.99447632,2.04177856,2.01278687,2.01217651,2.00210571,0,15:40:49.26,0,0,1 +9,2.01614380,2.01263428,2.01354980,2.01248169,2.02392578,2.01644897,2.02453613,2.01934814,2.01873779,2.01919556,2.02087402,2.01889038,2.02621460,2.03781128,2.02423096,2.02011108,2.00881958,2.00637817,2.02087402,2.01644897,2.02209473,2.00988770,2.03155518,2.01919556,2.02148438,2.01370239,2.01324463,2.00973511,2.00973511,2.01477051,2.02117920,2.02636719,2.00805664,2.01583862,2.02529907,2.03033447,2.00424194,1.99935913,2.01202393,2.01568604,2.01583862,2.01095581,2.02545166,2.01721191,2.02758789,2.02957153,2.00622559,2.00073242,2.00759888,2.00485229,2.02041626,2.03094482,2.01782227,2.01675415,2.00820923,2.01110840,2.01354980,2.00637817,2.01538086,2.00271606,2.01156616,2.01217651,2.01736450,1.99386597,2.03353882,2.02362061,2.03598022,2.02728271,2.00866699,1.99676514,2.00714111,2.00744629,2.02087402,2.01492310,2.01202393,2.01644897,2.00057983,1.99584961,2.01370239,1.99050903,1.99783325,1.99371338,2.01431274,1.99920654,1.99630737,1.97814941,2.02545166,2.01126099,2.02102661,2.00790405,2.00912476,1.99813843,2.01156616,1.99981689,1.99829102,1.99523926,2.01141357,2.00424194,2.00164795,1.98730469,2.03948975,2.00637817,2.00958252,1.99615479,0,15:40:49.15,0,0,1 +10,2.02087402,2.02194214,2.01782227,2.02026367,2.02743530,2.02453613,2.02789307,2.02758789,2.02255249,2.02880859,2.02819824,2.02880859,2.03445435,2.04879761,2.02850342,2.02865601,2.01583862,2.01477051,2.02529907,2.02499390,2.03033447,2.02178955,2.03308105,2.02636719,2.02545166,2.02224731,2.01690674,2.01751709,2.01522827,2.02423096,2.02590942,2.03536987,2.01507568,2.02850342,2.03033447,2.04040527,2.00729370,2.00546265,2.01431274,2.02407837,2.02224731,2.02346802,2.02972412,2.02682495,2.03186035,2.03720093,2.01171875,2.00851440,2.00897217,2.01370239,2.02468872,2.04177856,2.02224731,2.02606201,2.01339722,2.02499390,2.01766968,2.01583862,2.01690674,2.00851440,2.01568604,2.02117920,2.02087402,2.00164795,2.03689575,2.03247070,2.04132080,2.03613281,2.01110840,2.00332642,2.01293945,2.01583862,2.02560425,2.02224731,2.01828003,2.02621460,2.00637817,2.00607300,2.01431274,1.99890137,2.00714111,1.99813843,2.01721191,2.00942993,1.99874878,1.98593140,2.02941895,2.02087402,2.02743530,2.01690674,2.01263428,2.00668335,2.01690674,2.00592041,2.00210571,2.00210571,2.01568604,2.01110840,2.00378418,1.99447632,2.04177856,2.01278687,2.01217651,2.00210571,0,15:40:49.26,0,0,1 +""" # noqa: E501 + + +@pytest.mark.parametrize('preload', (True, False)) +@pytest.mark.parametrize('version, n_ch, n_times, lowpass, sex, date, end', [ + ('1.18', 48, 60, 0.1, 2, (2004, 5, 17, 5, 14, 0, 0), None), + ('1.25', 108, 10, 5., 1, (2020, 2, 2, 11, 20, 0, 0), b'\r'), + ('1.25', 108, 10, 5., 1, (2020, 2, 2, 11, 20, 0, 0), b'\n'), + ('1.25', 108, 10, 5., 1, (2020, 2, 2, 11, 20, 0, 0), b'\r\n'), +]) +def test_hitachi_basic(preload, version, n_ch, n_times, lowpass, sex, date, + end, tmp_path): + """Test NIRSport1 file with no saturation.""" + fname = tmp_path / 'test.csv' + contents = CONTENTS[version] + if end is not None: + contents = contents.replace(b'\r', b'\n').replace(b'\n\n', b'\n') + contents = contents.replace(b'\n', end) + with open(fname, 'wb') as fid: + fid.write(CONTENTS[version]) + raw = read_raw_hitachi(fname, preload=preload, verbose=True) + data = raw.get_data() + assert data.shape == (n_ch, n_times) + assert raw.info['sfreq'] == 10 + assert raw.info['lowpass'] == lowpass + assert raw.info['subject_info']['sex'] == sex + assert np.isfinite(raw.get_data()).all() + assert raw.info['meas_date'] == dt.datetime(*date, tzinfo=dt.timezone.utc) + # bad distances (zero) + distances = source_detector_distances(raw.info) + want = [0.] * (n_ch - 4) + assert_allclose(distances, want, atol=0.) + raw_od_bad = optical_density(raw) + with pytest.warns(RuntimeWarning, match='will be zero'): + beer_lambert_law(raw_od_bad, ppf=6) + # bad distances (too big) + if version == '1.18': + need = sum(([f'S{ii}', f'D{ii}'] for ii in range(1, 9)), [])[:-1] + have = 'P7 FC3 C3 CP3 P3 F5 FC5 C5 CP5 P5 F7 FT7 T7 TP7 F3'.split() + assert len(need) == len(have) + mon = make_standard_montage('standard_1020') + mon.rename_channels(dict(zip(have, need))) + raw.set_montage(mon) + raw_od_bad = optical_density(raw) + with pytest.warns(RuntimeWarning, match='greater than 10 cm'): + beer_lambert_law(raw_od_bad, ppf=6) + # good distances + mon = make_standard_montage('standard_1020') + if version == '1.18': + need = sum(([f'S{ii}', f'D{ii}'] for ii in range(1, 9)), [])[:-1] + have = 'F3 FC3 C3 CP3 P3 F5 FC5 C5 CP5 P5 F7 FT7 T7 TP7 P7'.split() + else: + need = sum(([f'S{ii}', f'D{ii}'] for ii in range(1, 18)), [])[:-1] + have = ('FT9 FT7 FC5 FC3 FC1 FCz FC2 FC4 FC6 FT8 FT10 ' + 'T9 T7 C5 C3 C1 Cz C2 C4 C6 T8 T10 ' + 'TP9 TP7 CP5 CP3 CP1 CPz CP2 CP4 CP6 TP8 TP10').split() + assert len(need) == len(have) + mon.rename_channels(dict(zip(have, need))) + raw.set_montage(mon) + distances = source_detector_distances(raw.info) + want = [0.03] * (n_ch - 4) + assert_allclose(distances, want, atol=0.01) + test_rank = 'less' if n_times < n_ch else True + _test_raw_reader(read_raw_hitachi, fname=fname, + boundary_decimal=1, test_rank=test_rank) # low fs + + # TODO: eventually we should refactor these to be in + # mne/io/tests/test_raw.py and run them for all fNIRS readers + + # OD + raw_od = optical_density(raw) + assert np.isfinite(raw_od.get_data()).all() + sci = scalp_coupling_index(raw_od, verbose='error') + lo, mi, hi = np.percentile(sci, [5, 50, 95]) + if version == '1.18': + assert -0.1 < lo < 0.1 # not great + assert 0.4 < mi < 0.5 + assert 0.8 < hi < 0.9 + else: + assert 0.99 <= lo <= hi <= 1 + # TDDR + raw_tddr = tddr(raw_od) + data = raw_tddr.get_data('fnirs') + assert np.isfinite(data.all()) + peaks = np.ptp(data, axis=-1) + assert_array_less(1e-4, peaks, err_msg='TDDR too small') + assert_array_less(peaks, 1, err_msg='TDDR too big') + # HbO/HbR + raw_tddr.set_montage(mon) + raw_h = beer_lambert_law(raw_tddr, ppf=6) + data = raw_h.get_data('fnirs') + assert np.isfinite(data).all() + assert data.shape == (n_ch - 4, n_times) + peaks = np.ptp(data, axis=-1) + assert_array_less(1e-10, peaks, err_msg='Beer-Lambert too small') + assert_array_less(peaks, 1e-5, err_msg='Beer-Lambert too big') + + +# From Hitachi 2 Homer +KNOWN_PAIRS = { + (3, 3, 2): ( + (0, 0), (1, 0), (0, 1), (2, 0), (1, 2), + (2, 1), (2, 2), (3, 1), (2, 3), (4, 2), + (3, 3), (4, 3), (5, 4), (6, 4), (5, 5), + (7, 4), (6, 6), (7, 5), (7, 6), (8, 5), + (7, 7), (9, 6), (8, 7), (9, 7)), + (3, 5, 1): ( + (0, 0), (1, 0), (1, 1), (2, 1), (0, 2), + (3, 0), (1, 3), (4, 1), (2, 4), (3, 2), + (3, 3), (4, 3), (4, 4), (5, 2), (3, 5), + (6, 3), (4, 6), (7, 4), (5, 5), (6, 5), + (6, 6), (7, 6)), + (4, 4, 1): ( + (0, 0), (1, 0), (1, 1), (0, 2), (2, 0), + (1, 3), (3, 1), (2, 2), (2, 3), (3, 3), + (4, 2), (2, 4), (5, 3), (3, 5), (4, 4), + (5, 4), (5, 5), (4, 6), (6, 4), (5, 7), + (7, 5), (6, 6), (6, 7), (7, 7)), + (3, 11, 1): ( + (0, 0), (1, 0), (1, 1), (2, 1), (2, 2), + (3, 2), (3, 3), (4, 3), (4, 4), (5, 4), + (0, 5), (6, 0), (1, 6), (7, 1), (2, 7), + (8, 2), (3, 8), (9, 3), (4, 9), (10, 4), + (5, 10), (6, 5), (6, 6), (7, 6), (7, 7), + (8, 7), (8, 8), (9, 8), (9, 9), (10, 9), + (10, 10), (11, 5), (6, 11), (12, 6), (7, 12), + (13, 7), (8, 13), (14, 8), (9, 14), (15, 9), + (10, 15), (16, 10), (11, 11), (12, 11), (12, 12), + (13, 12), (13, 13), (14, 13), (14, 14), (15, 14), + (15, 15), (16, 15)), +} + + +@pytest.mark.parametrize('n_rows, n_cols, n', list(KNOWN_PAIRS)) +def test_compute_pairs(n_rows, n_cols, n): + """Test computation of S-D pairings.""" + want = KNOWN_PAIRS[(n_rows, n_cols, n)] + got = _compute_pairs(n_rows, n_cols, n) + assert got == want diff --git a/python/libs/mne/io/kit/__init__.py b/python/libs/mne/io/kit/__init__.py new file mode 100644 index 0000000..a520b91 --- /dev/null +++ b/python/libs/mne/io/kit/__init__.py @@ -0,0 +1,8 @@ +"""KIT module for reading raw data.""" + +# Author: Teon Brooks +# +# License: BSD-3-Clause + +from .kit import read_raw_kit, read_epochs_kit +from .coreg import read_mrk diff --git a/python/libs/mne/io/kit/constants.py b/python/libs/mne/io/kit/constants.py new file mode 100644 index 0000000..144dc58 --- /dev/null +++ b/python/libs/mne/io/kit/constants.py @@ -0,0 +1,260 @@ +"""KIT constants.""" + +# Authors: Teon Brooks +# Christian Brodbeck +# +# License: BSD-3-Clause + +from ..constants import FIFF +from ...utils import BunchConst + + +KIT = BunchConst() + +# byte values +KIT.SHORT = 2 +KIT.INT = 4 +KIT.DOUBLE = 8 + +# channel parameters +KIT.CALIB_FACTOR = 1.0 # mne_manual p.272 +KIT.RANGE = 1. # mne_manual p.272 +KIT.UNIT_MUL = FIFF.FIFF_UNITM_NONE # default is 0 mne_manual p.273 +KIT.GAINS = [1, 2, 5, 10, 20, 50, 100, 200] + +KIT.HPFS = { + 1: (0, 1, 3, 3), + 2: (0, 0.03, 0.1, 0.3, 1, 3, 10, 30), + 3: (0, 0.03, 0.1, 0.3, 1, 3, 10, 30), + 4: (0, 1, 3, 10, 30, 100, 200, 500), +} +KIT.LPFS = { + 1: (10, 20, 50, 100, 200, 500, 1000, 2000), + 2: (10, 20, 50, 100, 200, 500, 1000, 2000), + 3: (10, 20, 50, 100, 200, 500, 1000, 10000), + 4: (10, 30, 100, 300, 1000, 2000, 5000, 10000), +} +KIT.BEFS = { + 1: (0, 50, 60, 60), + 2: (0, 0, 0), + 3: (0, 60, 50, 50), +} + +# Map FLL-Type to filter options (high, low, band) +KIT.FLL_SETTINGS = { + 0: (1, 1, 1), # Hanger Type #1 + 10: (1, 1, 1), # Hanger Type #2 + 20: (1, 1, 1), # Hanger Type #2 + 50: (2, 1, 1), # Hanger Type #3 + 60: (2, 1, 1), # Hanger Type #3 + 100: (3, 3, 3), # Low Band Kapper Type + 101: (1, 3, 2), # Berlin (DC, 200 Hz, Through) + 120: (3, 3, 3), # Low Band Kapper Type + 200: (4, 4, 3), # High Band Kapper Type + 300: (2, 2, 2), # Kapper Type +} + +# channel types +KIT.CHANNEL_MAGNETOMETER = 1 +KIT.CHANNEL_MAGNETOMETER_REFERENCE = 0x101 +KIT.CHANNEL_AXIAL_GRADIOMETER = 2 +KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE = 0x102 +KIT.CHANNEL_PLANAR_GRADIOMETER = 3 +KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE = 0x103 +KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER = 4 +KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE = 0x104 +KIT.CHANNEL_TRIGGER = -1 +KIT.CHANNEL_EEG = -2 +KIT.CHANNEL_ECG = -3 +KIT.CHANNEL_ETC = -4 +KIT.CHANNEL_NULL = 0 +KIT.CHANNELS_MEG = ( + KIT.CHANNEL_MAGNETOMETER, + KIT.CHANNEL_MAGNETOMETER_REFERENCE, + KIT.CHANNEL_AXIAL_GRADIOMETER, + KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE, + KIT.CHANNEL_PLANAR_GRADIOMETER, + KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE, + KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER, + KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE, +) +KIT.CHANNELS_REFERENCE = ( + KIT.CHANNEL_MAGNETOMETER_REFERENCE, + KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE, + KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE, + KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE, +) +KIT.CHANNELS_MISC = ( + KIT.CHANNEL_TRIGGER, + KIT.CHANNEL_EEG, + KIT.CHANNEL_ECG, + KIT.CHANNEL_ETC, +) +KIT.CHANNEL_NAME_NCHAR = { + KIT.CHANNEL_MAGNETOMETER: 6, + KIT.CHANNEL_AXIAL_GRADIOMETER: 6, + KIT.CHANNEL_TRIGGER: 32, + KIT.CHANNEL_EEG: 8, + KIT.CHANNEL_ECG: 32, + KIT.CHANNEL_ETC: 32, +} +KIT.CH_TO_FIFF_COIL = { + # KIT.CHANNEL_MAGNETOMETER: FIFF.???, + KIT.CHANNEL_MAGNETOMETER_REFERENCE: FIFF.FIFFV_COIL_KIT_REF_MAG, + KIT.CHANNEL_AXIAL_GRADIOMETER: FIFF.FIFFV_COIL_KIT_GRAD, + # KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE: FIFF.???, + # KIT.CHANNEL_PLANAR_GRADIOMETER: FIFF.???, + # KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE: FIFF.???, + # KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER: FIFF.???, + # KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE: FIFF.???, + KIT.CHANNEL_TRIGGER: FIFF.FIFFV_COIL_NONE, + KIT.CHANNEL_EEG: FIFF.FIFFV_COIL_EEG, + KIT.CHANNEL_ECG: FIFF.FIFFV_COIL_NONE, + KIT.CHANNEL_ETC: FIFF.FIFFV_COIL_NONE, + KIT.CHANNEL_NULL: FIFF.FIFFV_COIL_NONE, +} +KIT.CH_TO_FIFF_KIND = { + KIT.CHANNEL_MAGNETOMETER: FIFF.FIFFV_MEG_CH, + KIT.CHANNEL_MAGNETOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH, + KIT.CHANNEL_AXIAL_GRADIOMETER: FIFF.FIFFV_MEG_CH, + KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH, + KIT.CHANNEL_PLANAR_GRADIOMETER: FIFF.FIFFV_MEG_CH, + KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH, + KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER: FIFF.FIFFV_MEG_CH, + KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH, + KIT.CHANNEL_TRIGGER: FIFF.FIFFV_MISC_CH, + KIT.CHANNEL_EEG: FIFF.FIFFV_EEG_CH, + KIT.CHANNEL_ECG: FIFF.FIFFV_ECG_CH, + KIT.CHANNEL_ETC: FIFF.FIFFV_MISC_CH, + KIT.CHANNEL_NULL: FIFF.FIFFV_MISC_CH, +} +KIT.CH_LABEL = { + KIT.CHANNEL_TRIGGER: 'TRIGGER', + KIT.CHANNEL_EEG: 'EEG', + KIT.CHANNEL_ECG: 'ECG', + KIT.CHANNEL_ETC: 'MISC', + KIT.CHANNEL_NULL: 'MISC', +} + +# Acquisition modes +KIT.CONTINUOUS = 1 +KIT.EVOKED = 2 +KIT.EPOCHS = 3 + +# coreg constants +KIT.DIG_POINTS = 10000 + +# Known KIT systems +# ----------------- +# KIT recording system is encoded in the SQD file as integer: +KIT.SYSTEM_MQ_ADULT = 345 # Macquarie Dept of Cognitive Science, 2006 - +KIT.SYSTEM_MQ_CHILD = 403 # Macquarie Dept of Cognitive Science, 2006 - +KIT.SYSTEM_AS = 260 # Academia Sinica at Taiwan +KIT.SYSTEM_AS_2008 = 261 # Academia Sinica, 2008 or 2009 - +KIT.SYSTEM_NYU_2008 = 32 # NYU-NY, July 7, 2008 - +KIT.SYSTEM_NYU_2009 = 33 # NYU-NY, January 24, 2009 - +KIT.SYSTEM_NYU_2010 = 34 # NYU-NY, January 22, 2010 - +KIT.SYSTEM_NYU_2019 = 35 # NYU-NY, September 18, 2019 - +KIT.SYSTEM_NYUAD_2011 = 440 # NYU-AD initial launch May 20, 2011 - +KIT.SYSTEM_NYUAD_2012 = 441 # NYU-AD more channels July 11, 2012 - +KIT.SYSTEM_NYUAD_2014 = 442 # NYU-AD move to NYUAD campus Nov 20, 2014 - +KIT.SYSTEM_UMD_2004 = 51 # UMD Marie Mount Hall, October 1, 2004 - +KIT.SYSTEM_UMD_2014_07 = 52 # UMD update to 16 bit ADC, July 4, 2014 - +KIT.SYSTEM_UMD_2014_12 = 53 # UMD December 4, 2014 - +KIT.SYSTEM_UMD_2019_09 = 54 # UMD September 3, 2019 - +KIT.SYSTEM_YOKOGAWA_2017_01 = 1001 # Kanazawa (until 2017) +KIT.SYSTEM_YOKOGAWA_2018_01 = 10020 # Kanazawa (since 2018) +KIT.SYSTEM_YOKOGAWA_2020_08 = 10021 # Kanazawa (since August 2020) +KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008 = 124 + +# Sensor layouts for plotting +KIT_LAYOUT = { + KIT.SYSTEM_AS: None, + KIT.SYSTEM_AS_2008: 'KIT-AS-2008', + KIT.SYSTEM_MQ_ADULT: 'KIT-160', + KIT.SYSTEM_MQ_CHILD: 'KIT-125', + KIT.SYSTEM_NYU_2008: 'KIT-157', + KIT.SYSTEM_NYU_2009: 'KIT-157', + KIT.SYSTEM_NYU_2010: 'KIT-157', + KIT.SYSTEM_NYU_2019: None, + KIT.SYSTEM_NYUAD_2011: 'KIT-AD', + KIT.SYSTEM_NYUAD_2012: 'KIT-AD', + KIT.SYSTEM_NYUAD_2014: 'KIT-AD', + KIT.SYSTEM_UMD_2004: None, + KIT.SYSTEM_UMD_2014_07: None, + KIT.SYSTEM_UMD_2014_12: 'KIT-UMD-3', + KIT.SYSTEM_UMD_2019_09: None, + KIT.SYSTEM_YOKOGAWA_2017_01: None, + KIT.SYSTEM_YOKOGAWA_2018_01: None, + KIT.SYSTEM_YOKOGAWA_2020_08: None, + KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008: None, +} +# Sensor neighbor definitions +KIT_NEIGHBORS = { + KIT.SYSTEM_AS: None, + KIT.SYSTEM_AS_2008: None, + KIT.SYSTEM_MQ_ADULT: None, + KIT.SYSTEM_MQ_CHILD: None, + KIT.SYSTEM_NYU_2008: 'KIT-157', + KIT.SYSTEM_NYU_2009: 'KIT-157', + KIT.SYSTEM_NYU_2010: 'KIT-157', + KIT.SYSTEM_NYU_2019: 'KIT-NYU-2019', + KIT.SYSTEM_NYUAD_2011: 'KIT-208', + KIT.SYSTEM_NYUAD_2012: 'KIT-208', + KIT.SYSTEM_NYUAD_2014: 'KIT-208', + KIT.SYSTEM_UMD_2004: 'KIT-UMD-1', + KIT.SYSTEM_UMD_2014_07: 'KIT-UMD-2', + KIT.SYSTEM_UMD_2014_12: 'KIT-UMD-3', + KIT.SYSTEM_UMD_2019_09: 'KIT-UMD-4', + KIT.SYSTEM_YOKOGAWA_2017_01: None, + KIT.SYSTEM_YOKOGAWA_2018_01: None, + KIT.SYSTEM_YOKOGAWA_2020_08: None, + KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008: None, +} +# Names displayed in the info dict description +KIT_SYSNAMES = { + KIT.SYSTEM_MQ_ADULT: 'Macquarie Dept of Cognitive Science (Adult), 2006-', + KIT.SYSTEM_MQ_CHILD: 'Macquarie Dept of Cognitive Science (Child), 2006-', + KIT.SYSTEM_AS: 'Academia Sinica, -2008', + KIT.SYSTEM_AS_2008: 'Academia Sinica, 2008-', + KIT.SYSTEM_NYU_2008: 'NYU New York, 2008-9', + KIT.SYSTEM_NYU_2009: 'NYU New York, 2009-10', + KIT.SYSTEM_NYU_2010: 'NYU New York, 2010-', + KIT.SYSTEM_NYUAD_2011: 'New York University Abu Dhabi, 2011-12', + KIT.SYSTEM_NYUAD_2012: 'New York University Abu Dhabi, 2012-14', + KIT.SYSTEM_NYUAD_2014: 'New York University Abu Dhabi, 2014-', + KIT.SYSTEM_UMD_2004: 'University of Maryland, 2004-14', + KIT.SYSTEM_UMD_2014_07: 'University of Maryland, 2014', + KIT.SYSTEM_UMD_2014_12: 'University of Maryland, 2014-', + KIT.SYSTEM_UMD_2019_09: 'University of Maryland, 2019-', + KIT.SYSTEM_YOKOGAWA_2017_01: 'Yokogawa of Kanazawa (until 2017)', + KIT.SYSTEM_YOKOGAWA_2018_01: 'Yokogawa of Kanazawa (since 2018)', + KIT.SYSTEM_YOKOGAWA_2020_08: 'Yokogawa of Kanazawa (since August 2020)', + KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008: 'Eagle Technology MEG (KIT/Yokogawa style) at PTB (since 2008, software upgrade in 2018)', # noqa: E501 +} + +LEGACY_AMP_PARAMS = { + KIT.SYSTEM_NYU_2008: (5., 11.), + KIT.SYSTEM_NYU_2009: (5., 11.), + KIT.SYSTEM_NYU_2010: (5., 11.), + KIT.SYSTEM_UMD_2004: (5., 11.), +} + +# Ones that we don't use are commented out +KIT.DIR_INDEX_DIR = 0 +KIT.DIR_INDEX_SYSTEM = 1 +KIT.DIR_INDEX_CHANNELS = 4 +KIT.DIR_INDEX_CALIBRATION = 5 +# FLL = 6 +KIT.DIR_INDEX_AMP_FILTER = 7 +KIT.DIR_INDEX_ACQ_COND = 8 +KIT.DIR_INDEX_RAW_DATA = 9 +# AVERAGED_DATA = 10 +# MRI = 11 +KIT.DIR_INDEX_COREG = 12 +# MAGNETIC_SOURCE = 13 +# TRIGGER = 14 +# BOOKMARKS = 15 +# DIGITIZER = 25 +KIT.DIR_INDEX_DIG_POINTS = 26 +KIT.DIR_INDEX_CHPI_DATA = 29 diff --git a/python/libs/mne/io/kit/coreg.py b/python/libs/mne/io/kit/coreg.py new file mode 100644 index 0000000..9b332e7 --- /dev/null +++ b/python/libs/mne/io/kit/coreg.py @@ -0,0 +1,212 @@ +"""Coordinate Point Extractor for KIT system.""" + +# Author: Teon Brooks +# +# License: BSD-3-Clause + +from collections import OrderedDict +from os import SEEK_CUR, path as op +import pickle +import re + +import numpy as np + +from .constants import KIT, FIFF +from .._digitization import _make_dig_points +from ...transforms import (Transform, apply_trans, get_ras_to_neuromag_trans, + als_ras_trans) +from ...utils import warn, _check_option + + +INT32 = ' KIT.DIG_POINTS: + hsp = _decimate_points(hsp, res=0.005) + n_new = len(hsp) + warn("The selected head shape contained {n_in} points, which is " + "more than recommended ({n_rec}), and was automatically " + "downsampled to {n_new} points. The preferred way to " + "downsample is using FastScan.".format( + n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new)) + + if isinstance(elp, str): + elp_points = _read_dig_kit(elp) + if len(elp_points) != 8: + raise ValueError("File %r should contain 8 points; got shape " + "%s." % (elp, elp_points.shape)) + elp = elp_points + elif len(elp) not in (6, 7, 8): + raise ValueError("ELP should contain 6 ~ 8 points; got shape " + "%s." % (elp.shape,)) + if isinstance(mrk, str): + mrk = read_mrk(mrk) + + mrk = apply_trans(als_ras_trans, mrk) + + nasion, lpa, rpa = elp[:3] + nmtrans = get_ras_to_neuromag_trans(nasion, lpa, rpa) + elp = apply_trans(nmtrans, elp) + hsp = apply_trans(nmtrans, hsp) + eeg = OrderedDict((k, apply_trans(nmtrans, p)) for k, p in eeg.items()) + + # device head transform + trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out='trans') + + nasion, lpa, rpa = elp[:3] + elp = elp[3:] + + dig_points = _make_dig_points(nasion, lpa, rpa, elp, hsp, dig_ch_pos=eeg) + dev_head_t = Transform('meg', 'head', trans) + + hpi_results = [dict(dig_points=[ + dict(ident=ci, r=r, kind=FIFF.FIFFV_POINT_HPI, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN) + for ci, r in enumerate(mrk)], coord_trans=dev_head_t)] + + return dig_points, dev_head_t, hpi_results + + +def _read_dig_kit(fname, unit='auto'): + # Read dig points from a file and return ndarray, using FastSCAN for .txt + from ...channels.montage import ( + read_polhemus_fastscan, read_dig_polhemus_isotrak, read_custom_montage, + _check_dig_shape) + assert unit in ('auto', 'm', 'mm') + _, ext = op.splitext(fname) + _check_option('file extension', ext[1:], ('hsp', 'elp', 'mat', 'txt')) + if ext == '.txt': + unit = 'mm' if unit == 'auto' else unit + out = read_polhemus_fastscan(fname, unit=unit, + on_header_missing='ignore') + elif ext in ('.hsp', '.elp'): + unit = 'm' if unit == 'auto' else unit + mon = read_dig_polhemus_isotrak(fname, unit=unit) + if fname.endswith('.hsp'): + dig = [d['r'] for d in mon.dig + if d['kind'] != FIFF.FIFFV_POINT_CARDINAL] + else: + dig = [d['r'] for d in mon.dig] + if dig and \ + mon.dig[0]['kind'] == FIFF.FIFFV_POINT_CARDINAL and \ + mon.dig[0]['ident'] == FIFF.FIFFV_POINT_LPA: + # LPA, Nasion, RPA -> NLR + dig[:3] = [dig[1], dig[0], dig[2]] + out = np.array(dig, float) + else: + assert ext == '.mat' + out = np.array([d['r'] for d in read_custom_montage(fname).dig]) + _check_dig_shape(out) + return out diff --git a/python/libs/mne/io/kit/kit.py b/python/libs/mne/io/kit/kit.py new file mode 100644 index 0000000..c57bc25 --- /dev/null +++ b/python/libs/mne/io/kit/kit.py @@ -0,0 +1,966 @@ +"""Conversion tool from SQD to FIF. + +RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py. +""" + +# Authors: Teon Brooks +# Joan Massich +# Christian Brodbeck +# +# License: BSD-3-Clause + +from collections import defaultdict, OrderedDict +from math import sin, cos +from os import SEEK_CUR, path as op + +import numpy as np + +from ..pick import pick_types +from ...utils import (verbose, logger, warn, fill_doc, _check_option, + _stamp_to_dt, _check_fname) +from ...transforms import apply_trans, als_ras_trans +from ..base import BaseRaw +from ..utils import _mult_cal_one +from ...epochs import BaseEpochs +from ..constants import FIFF +from ..meas_info import _empty_info +from .constants import KIT, LEGACY_AMP_PARAMS +from .coreg import read_mrk, _set_dig_kit +from ...event import read_events + + +FLOAT64 = '' | None + Channel-value correspondence when converting KIT trigger channels to a + Neuromag-style stim channel. For '<', the largest values are assigned + to the first channel (default). For '>', the largest values are + assigned to the last channel. Can also be specified as a list of + trigger channel indexes. If None, no synthesized channel is generated. + slope : '+' | '-' + How to interpret values on KIT trigger channels when synthesizing a + Neuromag-style stim channel. With '+', a positive slope (low-to-high) + is interpreted as an event. With '-', a negative slope (high-to-low) + is interpreted as an event. + stimthresh : float + The threshold level for accepting voltage changes in KIT trigger + channels as a trigger event. If None, stim must also be set to None. + %(preload)s + stim_code : 'binary' | 'channel' + How to decode trigger values from stim channels. 'binary' read stim + channel events as binary code, 'channel' encodes channel number. + allow_unknown_format : bool + Force reading old data that is not officially supported. Alternatively, + read and re-save the data with the KIT MEG Laboratory application. + %(standardize_names)s + %(verbose)s + + Notes + ----- + ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the + Polhemus FastScan system. hsp refers to the headshape surface points. elp + refers to the points in head-space that corresponds to the HPI points. + Currently, '*.elp' and '*.hsp' files are NOT supported. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>', + slope='-', stimthresh=1, preload=False, stim_code='binary', + allow_unknown_format=False, standardize_names=None, + verbose=None): # noqa: D102 + logger.info('Extracting SQD Parameters from %s...' % input_fname) + input_fname = op.abspath(input_fname) + self.preload = False + logger.info('Creating Raw.info structure...') + info, kit_info = get_kit_info( + input_fname, allow_unknown_format, standardize_names) + kit_info['slope'] = slope + kit_info['stimthresh'] = stimthresh + if kit_info['acq_type'] != KIT.CONTINUOUS: + raise TypeError('SQD file contains epochs, not raw data. Wrong ' + 'reader.') + logger.info('Creating Info structure...') + + last_samps = [kit_info['n_samples'] - 1] + self._raw_extras = [kit_info] + self._set_stimchannels(info, stim, stim_code) + super(RawKIT, self).__init__( + info, preload, last_samps=last_samps, filenames=[input_fname], + raw_extras=self._raw_extras, verbose=verbose) + self.info = _call_digitization( + info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info) + logger.info('Ready.') + + def read_stim_ch(self, buffer_size=1e5): + """Read events from data. + + Parameter + --------- + buffer_size : int + The size of chunk to by which the data are scanned. + + Returns + ------- + events : array, [samples] + The event vector (1 x samples). + """ + buffer_size = int(buffer_size) + start = int(self.first_samp) + stop = int(self.last_samp + 1) + + pick = pick_types(self.info, meg=False, ref_meg=False, + stim=True, exclude=[]) + stim_ch = np.empty((1, stop), dtype=np.int64) + for b_start in range(start, stop, buffer_size): + b_stop = b_start + buffer_size + x = self[pick, b_start:b_stop][0] + stim_ch[:, b_start:b_start + x.shape[1]] = x + + return stim_ch + + @fill_doc + def _set_stimchannels(self, info, stim, stim_code): + """Specify how the trigger channel is synthesized from analog channels. + + Has to be done before loading data. For a RawKIT instance that has been + created with preload=True, this method will raise a + NotImplementedError. + + Parameters + ---------- + %(info_not_none)s + stim : list of int | '<' | '>' + Can be submitted as list of trigger channels. + If a list is not specified, the default triggers extracted from + misc channels will be used with specified directionality. + '<' means that largest values assigned to the first channel + in sequence. + '>' means the largest trigger assigned to the last channel + in sequence. + stim_code : 'binary' | 'channel' + How to decode trigger values from stim channels. 'binary' read stim + channel events as binary code, 'channel' encodes channel number. + """ + if self.preload: + raise NotImplementedError("Can't change stim channel after " + "loading data") + _check_option('stim_code', stim_code, ['binary', 'channel']) + + if stim is not None: + if isinstance(stim, str): + picks = _default_stim_chs(info) + if stim == '<': + stim = picks[::-1] + elif stim == '>': + stim = picks + else: + raise ValueError("stim needs to be list of int, '>' or " + "'<', not %r" % str(stim)) + else: + stim = np.asarray(stim, int) + if stim.max() >= self._raw_extras[0]['nchan']: + raise ValueError( + 'Got stim=%s, but sqd file only has %i channels' % + (stim, self._raw_extras[0]['nchan'])) + + # modify info + nchan = self._raw_extras[0]['nchan'] + 1 + info['chs'].append(dict( + cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0, + unit=FIFF.FIFF_UNIT_NONE, unit_mul=FIFF.FIFF_UNITM_NONE, + ch_name='STI 014', + coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan), + kind=FIFF.FIFFV_STIM_CH, coord_frame=FIFF.FIFFV_COORD_UNKNOWN)) + info._update_redundant() + + self._raw_extras[0]['stim'] = stim + self._raw_extras[0]['stim_code'] = stim_code + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + sqd = self._raw_extras[fi] + nchan = sqd['nchan'] + data_left = (stop - start) * nchan + conv_factor = sqd['conv_factor'] + + n_bytes = sqd['dtype'].itemsize + assert n_bytes in (2, 4) + # Read up to 100 MB of data at a time. + blk_size = min(data_left, (100000000 // n_bytes // nchan) * nchan) + with open(self._filenames[fi], 'rb', buffering=0) as fid: + # extract data + pointer = start * nchan * n_bytes + fid.seek(sqd['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'] + pointer) + stim = sqd['stim'] + for blk_start in np.arange(0, data_left, blk_size) // nchan: + blk_size = min(blk_size, data_left - blk_start * nchan) + block = np.fromfile(fid, dtype=sqd['dtype'], count=blk_size) + block = block.reshape(nchan, -1, order='F').astype(float) + blk_stop = blk_start + block.shape[1] + data_view = data[:, blk_start:blk_stop] + block *= conv_factor + + # Create a synthetic stim channel + if stim is not None: + stim_ch = _make_stim_channel( + block[stim, :], sqd['slope'], sqd['stimthresh'], + sqd['stim_code'], stim) + block = np.vstack((block, stim_ch)) + + _mult_cal_one(data_view, block, idx, cals, mult) + # cals are all unity, so can be ignored + + +def _default_stim_chs(info): + """Return default stim channels for SQD files.""" + return pick_types(info, meg=False, ref_meg=False, misc=True, + exclude=[])[:8] + + +def _make_stim_channel(trigger_chs, slope, threshold, stim_code, + trigger_values): + """Create synthetic stim channel from multiple trigger channels.""" + if slope == '+': + trig_chs_bin = trigger_chs > threshold + elif slope == '-': + trig_chs_bin = trigger_chs < threshold + else: + raise ValueError("slope needs to be '+' or '-'") + # trigger value + if stim_code == 'binary': + trigger_values = 2 ** np.arange(len(trigger_chs)) + elif stim_code != 'channel': + raise ValueError("stim_code must be 'binary' or 'channel', got %s" % + repr(stim_code)) + trig_chs = trig_chs_bin * trigger_values[:, np.newaxis] + return np.array(trig_chs.sum(axis=0), ndmin=2) + + +class EpochsKIT(BaseEpochs): + """Epochs Array object from KIT SQD file. + + Parameters + ---------- + input_fname : str + Path to the sqd file. + events : str | array, shape (n_events, 3) + Path to events file. If array, it is the events typically returned + by the read_events function. If some events don't match the events + of interest as specified by event_id,they will be marked as 'IGNORED' + in the drop log. + event_id : int | list of int | dict | None + The id of the event to consider. If dict, + the keys can later be used to access associated events. Example: + dict(auditory=1, visual=3). If int, a dict will be created with + the id as string. If a list, all events with the IDs specified + in the list are used. If None, all events will be used with + and a dict is created with string integer names corresponding + to the event id integers. + tmin : float + Start time before event. + baseline : None or tuple of length 2 (default (None, 0)) + The time interval to apply baseline correction. + If None do not apply it. If baseline is (a, b) + the interval is between "a (s)" and "b (s)". + If a is None the beginning of the data is used + and if b is None then b is set to the end of the interval. + If baseline is equal to (None, None) all the time + interval is used. + The baseline (a, b) includes both endpoints, i.e. all + timepoints t such that a <= t <= b. + reject : dict | None + Rejection parameters based on peak-to-peak amplitude. + Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. + If reject is None then no rejection is done. Example:: + + reject = dict(grad=4000e-13, # T / m (gradiometers) + mag=4e-12, # T (magnetometers) + eeg=40e-6, # V (EEG channels) + eog=250e-6 # V (EOG channels) + ) + flat : dict | None + Rejection parameters based on flatness of signal. + Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values + are floats that set the minimum acceptable peak-to-peak amplitude. + If flat is None then no rejection is done. + reject_tmin : scalar | None + Start of the time window used to reject epochs (with the default None, + the window will start with tmin). + reject_tmax : scalar | None + End of the time window used to reject epochs (with the default None, + the window will end with tmax). + mrk : None | str | array_like, shape = (5, 3) | list of str or array_like + Marker points representing the location of the marker coils with + respect to the MEG Sensors, or path to a marker file. + If list, all of the markers will be averaged together. + elp : None | str | array_like, shape = (8, 3) + Digitizer points representing the location of the fiducials and the + marker coils with respect to the digitized head shape, or path to a + file containing these points. + hsp : None | str | array, shape = (n_points, 3) + Digitizer head shape points, or path to head shape file. If more than + 10`000 points are in the head shape, they are automatically decimated. + allow_unknown_format : bool + Force reading old data that is not officially supported. Alternatively, + read and re-save the data with the KIT MEG Laboratory application. + %(standardize_names)s + %(verbose)s + + Notes + ----- + ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the + Polhemus FastScan system. hsp refers to the headshape surface points. elp + refers to the points in head-space that corresponds to the HPI points. + Currently, '*.elp' and '*.hsp' files are NOT supported. + + See Also + -------- + mne.Epochs : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, input_fname, events, event_id=None, tmin=0, + baseline=None, reject=None, flat=None, reject_tmin=None, + reject_tmax=None, mrk=None, elp=None, hsp=None, + allow_unknown_format=False, standardize_names=None, + verbose=None): # noqa: D102 + + if isinstance(events, str): + events = read_events(events) + + input_fname = _check_fname(fname=input_fname, must_exist=True, + overwrite='read') + logger.info('Extracting KIT Parameters from %s...' % input_fname) + self.info, kit_info = get_kit_info( + input_fname, allow_unknown_format, standardize_names) + kit_info.update(input_fname=input_fname) + self._raw_extras = [kit_info] + self._filenames = [] + if len(events) != self._raw_extras[0]['n_epochs']: + raise ValueError('Event list does not match number of epochs.') + + if self._raw_extras[0]['acq_type'] == KIT.EPOCHS: + self._raw_extras[0]['data_length'] = KIT.INT + else: + raise TypeError('SQD file contains raw data, not epochs or ' + 'average. Wrong reader.') + + if event_id is None: # convert to int to make typing-checks happy + event_id = {str(e): int(e) for e in np.unique(events[:, 2])} + + for key, val in event_id.items(): + if val not in events[:, 2]: + raise ValueError('No matching events found for %s ' + '(event id %i)' % (key, val)) + + data = self._read_kit_data() + assert data.shape == (self._raw_extras[0]['n_epochs'], + self.info['nchan'], + self._raw_extras[0]['frame_length']) + tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin + super(EpochsKIT, self).__init__( + self.info, data, events, event_id, tmin, tmax, baseline, + reject=reject, flat=flat, reject_tmin=reject_tmin, + reject_tmax=reject_tmax, filename=input_fname, verbose=verbose) + self.info = _call_digitization( + info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info) + logger.info('Ready.') + + def _read_kit_data(self): + """Read epochs data. + + Returns + ------- + data : array, [channels x samples] + the data matrix (channels x samples). + times : array, [samples] + returns the time values corresponding to the samples. + """ + info = self._raw_extras[0] + epoch_length = info['frame_length'] + n_epochs = info['n_epochs'] + n_samples = info['n_samples'] + input_fname = info['input_fname'] + dtype = info['dtype'] + nchan = info['nchan'] + + with open(input_fname, 'rb', buffering=0) as fid: + fid.seek(info['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset']) + count = n_samples * nchan + data = np.fromfile(fid, dtype=dtype, count=count) + data = data.reshape((n_samples, nchan)).T + data = data * info['conv_factor'] + data = data.reshape((nchan, n_epochs, epoch_length)) + data = data.transpose((1, 0, 2)) + + return data + + +def _read_dir(fid): + return dict(offset=np.fromfile(fid, UINT32, 1)[0], + size=np.fromfile(fid, INT32, 1)[0], + max_count=np.fromfile(fid, INT32, 1)[0], + count=np.fromfile(fid, INT32, 1)[0]) + + +@verbose +def _read_dirs(fid, verbose=None): + dirs = list() + dirs.append(_read_dir(fid)) + for ii in range(dirs[0]['count'] - 1): + logger.debug(f' KIT dir entry {ii} @ {fid.tell()}') + dirs.append(_read_dir(fid)) + assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count'] + return dirs + + +@verbose +def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, + verbose=None): + """Extract all the information from the sqd/con file. + + Parameters + ---------- + rawfile : str + KIT file to be read. + allow_unknown_format : bool + Force reading old data that is not officially supported. Alternatively, + read and re-save the data with the KIT MEG Laboratory application. + %(standardize_names)s + %(verbose)s + + Returns + ------- + %(info_not_none)s + sqd : dict + A dict containing all the sqd parameter settings. + """ + sqd = dict() + sqd['rawfile'] = rawfile + unsupported_format = False + with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug + # + # directories (0) + # + sqd['dirs'] = dirs = _read_dirs(fid) + + # + # system (1) + # + fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]['offset']) + # check file format version + version, revision = np.fromfile(fid, INT32, 2) + if version < 2 or (version == 2 and revision < 3): + version_string = "V%iR%03i" % (version, revision) + if allow_unknown_format: + unsupported_format = True + warn("Force loading KIT format %s" % version_string) + else: + raise UnsupportedKITFormat( + version_string, + "SQD file format %s is not officially supported. " + "Set allow_unknown_format=True to load it anyways." % + (version_string,)) + + sysid = np.fromfile(fid, INT32, 1)[0] + # basic info + system_name = _read_name(fid, n=128) + # model name + model_name = _read_name(fid, n=128) + # channels + sqd['nchan'] = channel_count = int(np.fromfile(fid, INT32, 1)[0]) + comment = _read_name(fid, n=256) + create_time, last_modified_time = np.fromfile(fid, INT32, 2) + fid.seek(KIT.INT * 3, SEEK_CUR) # reserved + dewar_style = np.fromfile(fid, INT32, 1)[0] + fid.seek(KIT.INT * 3, SEEK_CUR) # spare + fll_type = np.fromfile(fid, INT32, 1)[0] + fid.seek(KIT.INT * 3, SEEK_CUR) # spare + trigger_type = np.fromfile(fid, INT32, 1)[0] + fid.seek(KIT.INT * 3, SEEK_CUR) # spare + adboard_type = np.fromfile(fid, INT32, 1)[0] + fid.seek(KIT.INT * 29, SEEK_CUR) # reserved + + if version < 2 or (version == 2 and revision <= 3): + adc_range = float(np.fromfile(fid, INT32, 1)[0]) + else: + adc_range = np.fromfile(fid, FLOAT64, 1)[0] + adc_polarity, adc_allocated, adc_stored = np.fromfile(fid, INT32, 3) + system_name = system_name.replace('\x00', '') + system_name = system_name.strip().replace('\n', '/') + model_name = model_name.replace('\x00', '') + model_name = model_name.strip().replace('\n', '/') + + full_version = f'V{version:d}R{revision:03d}' + logger.debug("SQD file basic information:") + logger.debug("Meg160 version = %s", full_version) + logger.debug("System ID = %i", sysid) + logger.debug("System name = %s", system_name) + logger.debug("Model name = %s", model_name) + logger.debug("Channel count = %i", channel_count) + logger.debug("Comment = %s", comment) + logger.debug("Dewar style = %i", dewar_style) + logger.debug("FLL type = %i", fll_type) + logger.debug("Trigger type = %i", trigger_type) + logger.debug("A/D board type = %i", adboard_type) + logger.debug("ADC range = +/-%s[V]", adc_range / 2.) + logger.debug("ADC allocate = %i[bit]", adc_allocated) + logger.debug("ADC bit = %i[bit]", adc_stored) + # MGH description: 'acquisition (megacq) VectorView system at NMR-MGH' + description = \ + f'{system_name} ({sysid}) {full_version} {model_name}' + assert adc_allocated % 8 == 0 + sqd['dtype'] = np.dtype(f'%d, check ' + 'your data for correctness, including channel scales and ' + 'filter settings!' + % (system_name, model_name, sysid, fll_type, use_fll_type)) + fll_type = use_fll_type + + # + # channel information (4) + # + chan_dir = dirs[KIT.DIR_INDEX_CHANNELS] + chan_offset, chan_size = chan_dir['offset'], chan_dir['size'] + sqd['channels'] = channels = [] + exg_gains = list() + for i in range(channel_count): + fid.seek(chan_offset + chan_size * i) + channel_type, = np.fromfile(fid, INT32, 1) + # System 52 mislabeled reference channels as NULL. This was fixed + # in system 53; not sure about 51... + if sysid == 52 and i < 160 and channel_type == KIT.CHANNEL_NULL: + channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE + + if channel_type in KIT.CHANNELS_MEG: + if channel_type not in KIT.CH_TO_FIFF_COIL: + raise NotImplementedError( + "KIT channel type %i can not be read. Please contact " + "the mne-python developers." % channel_type) + channels.append({ + 'type': channel_type, + # (x, y, z, theta, phi) for all MEG channels. Some channel + # types have additional information which we're not using. + 'loc': np.fromfile(fid, dtype=FLOAT64, count=5), + }) + if channel_type in KIT.CHANNEL_NAME_NCHAR: + fid.seek(16, SEEK_CUR) # misc fields + channels[-1]['name'] = _read_name(fid, channel_type) + elif channel_type in KIT.CHANNELS_MISC: + channel_no, = np.fromfile(fid, INT32, 1) + fid.seek(4, SEEK_CUR) + name = _read_name(fid, channel_type) + channels.append({ + 'type': channel_type, + 'no': channel_no, + 'name': name, + }) + if channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG): + offset = 6 if channel_type == KIT.CHANNEL_EEG else 8 + fid.seek(offset, SEEK_CUR) + exg_gains.append(np.fromfile(fid, FLOAT64, 1)[0]) + elif channel_type == KIT.CHANNEL_NULL: + channels.append({'type': channel_type}) + else: + raise IOError("Unknown KIT channel type: %i" % channel_type) + exg_gains = np.array(exg_gains) + + # + # Channel sensitivity information: (5) + # + + # only sensor channels requires gain. the additional misc channels + # (trigger channels, audio and voice channels) are passed + # through unaffected + fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]['offset']) + # (offset [Volt], gain [Tesla/Volt]) for each channel + sensitivity = np.fromfile(fid, dtype=FLOAT64, count=channel_count * 2) + sensitivity.shape = (channel_count, 2) + channel_offset, channel_gain = sensitivity.T + assert (channel_offset == 0).all() # otherwise we have a problem + + # + # amplifier gain (7) + # + fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]['offset']) + amp_data = np.fromfile(fid, INT32, 1)[0] + if fll_type >= 100: # Kapper Type + # gain: mask bit + gain1 = (amp_data & 0x00007000) >> 12 + gain2 = (amp_data & 0x70000000) >> 28 + gain3 = (amp_data & 0x07000000) >> 24 + amp_gain = (KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3]) + # filter settings + hpf = (amp_data & 0x00000700) >> 8 + lpf = (amp_data & 0x00070000) >> 16 + bef = (amp_data & 0x00000003) >> 0 + else: # Hanger Type + # gain + input_gain = (amp_data & 0x1800) >> 11 + output_gain = (amp_data & 0x0007) >> 0 + amp_gain = KIT.GAINS[input_gain] * KIT.GAINS[output_gain] + # filter settings + hpf = (amp_data & 0x007) >> 4 + lpf = (amp_data & 0x0700) >> 8 + bef = (amp_data & 0xc000) >> 14 + hpf_options, lpf_options, bef_options = KIT.FLL_SETTINGS[fll_type] + sqd['highpass'] = KIT.HPFS[hpf_options][hpf] + sqd['lowpass'] = KIT.LPFS[lpf_options][lpf] + sqd['notch'] = KIT.BEFS[bef_options][bef] + + # + # Acquisition Parameters (8) + # + fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]['offset']) + sqd['acq_type'], = acq_type, = np.fromfile(fid, INT32, 1) + sqd['sfreq'], = np.fromfile(fid, FLOAT64, 1) + if acq_type == KIT.CONTINUOUS: + # samples_count, = np.fromfile(fid, INT32, 1) + fid.seek(KIT.INT, SEEK_CUR) + sqd['n_samples'], = np.fromfile(fid, INT32, 1) + elif acq_type == KIT.EVOKED or acq_type == KIT.EPOCHS: + sqd['frame_length'], = np.fromfile(fid, INT32, 1) + sqd['pretrigger_length'], = np.fromfile(fid, INT32, 1) + sqd['average_count'], = np.fromfile(fid, INT32, 1) + sqd['n_epochs'], = np.fromfile(fid, INT32, 1) + if acq_type == KIT.EVOKED: + sqd['n_samples'] = sqd['frame_length'] + else: + sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs'] + else: + raise IOError("Invalid acquisition type: %i. Your file is neither " + "continuous nor epoched data." % (acq_type,)) + + # + # digitization information (12 and 26) + # + dig_dir = dirs[KIT.DIR_INDEX_DIG_POINTS] + cor_dir = dirs[KIT.DIR_INDEX_COREG] + dig = dict() + hsp = list() + if dig_dir['count'] > 0 and cor_dir['count'] > 0: + # directories (0) + fid.seek(dig_dir['offset']) + for _ in range(dig_dir['count']): + name = _read_name(fid, n=8).strip() + # Sometimes there are mismatches (e.g., AFz vs AFZ) between + # the channel name and its digitized, name, so let's be case + # insensitive. It will also prevent collisions with HSP + name = name.lower() + rr = np.fromfile(fid, FLOAT64, 3) + if name: + assert name not in dig + dig[name] = rr + else: + hsp.append(rr) + + # nasion, lpa, rpa, HPI in native space + elp = [] + for key in ( + 'fidnz', 'fidt9', 'fidt10', + 'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4', 'hpi_5'): + if key in dig and np.isfinite(dig[key]).all(): + elp.append(dig.pop(key)) + elp = np.array(elp) + hsp = np.array(hsp, float).reshape(-1, 3) + if elp.shape not in ((6, 3), (7, 3), (8, 3)): + raise RuntimeError( + f'Fewer than 3 HPI coils found, got {len(elp) - 3}') + # coregistration + fid.seek(cor_dir['offset']) + mrk = np.zeros((elp.shape[0] - 3, 3)) + meg_done = [True] * 5 + for _ in range(cor_dir['count']): + done = np.fromfile(fid, INT32, 1)[0] + fid.seek(16 * KIT.DOUBLE + # meg_to_mri + 16 * KIT.DOUBLE, # mri_to_meg + SEEK_CUR) + marker_count = np.fromfile(fid, INT32, 1)[0] + if not done: + continue + assert marker_count >= len(mrk) + for mi in range(len(mrk)): + mri_type, meg_type, mri_done, this_meg_done = \ + np.fromfile(fid, INT32, 4) + meg_done[mi] = bool(this_meg_done) + fid.seek(3 * KIT.DOUBLE, SEEK_CUR) # mri_pos + mrk[mi] = np.fromfile(fid, FLOAT64, 3) + fid.seek(256, SEEK_CUR) # marker_file (char) + if not all(meg_done): + logger.info(f'Keeping {sum(meg_done)}/{len(meg_done)} HPI ' + 'coils that were digitized') + elp = elp[[True] * 3 + meg_done] + mrk = mrk[meg_done] + sqd.update(hsp=hsp, elp=elp, mrk=mrk) + + # precompute conversion factor for reading data + if unsupported_format: + if sysid not in LEGACY_AMP_PARAMS: + raise IOError("Legacy parameters for system ID %i unavailable" % + (sysid,)) + adc_range, adc_stored = LEGACY_AMP_PARAMS[sysid] + is_meg = np.array([ch['type'] in KIT.CHANNELS_MEG for ch in channels]) + ad_to_volt = adc_range / (2. ** adc_stored) + ad_to_tesla = ad_to_volt / amp_gain * channel_gain + conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt) + # XXX this is a bit of a hack. Should probably do this more cleanly at + # some point... the 2 ** (adc_stored - 14) was empirically determined using + # the test files with known amplitudes. The conv_factors need to be + # replaced by these values otherwise we're off by a factor off 5000.0 + # for the EEG data. + is_exg = [ch['type'] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG) + for ch in channels] + exg_gains /= 2. ** (adc_stored - 14) + exg_gains[exg_gains == 0] = ad_to_volt + conv_factor[is_exg] = exg_gains + sqd['conv_factor'] = conv_factor[:, np.newaxis] + + # Create raw.info dict for raw fif object with SQD data + info = _empty_info(float(sqd['sfreq'])) + info.update(meas_date=_stamp_to_dt((create_time, 0)), + lowpass=sqd['lowpass'], + highpass=sqd['highpass'], kit_system_id=sysid, + description=description) + + # Creates a list of dicts of meg channels for raw.info + logger.info('Setting channel info structure...') + info['chs'] = fiff_channels = [] + channel_index = defaultdict(lambda: 0) + sqd['eeg_dig'] = OrderedDict() + for idx, ch in enumerate(channels, 1): + if ch['type'] in KIT.CHANNELS_MEG: + ch_name = ch.get('name', '') + if ch_name == '' or standardize_names: + ch_name = 'MEG %03d' % idx + # create three orthogonal vector + # ch_angles[0]: theta, ch_angles[1]: phi + theta, phi = np.radians(ch['loc'][3:]) + x = sin(theta) * cos(phi) + y = sin(theta) * sin(phi) + z = cos(theta) + vec_z = np.array([x, y, z]) + vec_z /= np.linalg.norm(vec_z) + vec_x = np.zeros(vec_z.size, dtype=np.float64) + if vec_z[1] < vec_z[2]: + if vec_z[0] < vec_z[1]: + vec_x[0] = 1.0 + else: + vec_x[1] = 1.0 + elif vec_z[0] < vec_z[2]: + vec_x[0] = 1.0 + else: + vec_x[2] = 1.0 + vec_x -= np.sum(vec_x * vec_z) * vec_z + vec_x /= np.linalg.norm(vec_x) + vec_y = np.cross(vec_z, vec_x) + # transform to Neuromag like coordinate space + vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z)) + vecs = apply_trans(als_ras_trans, vecs) + unit = FIFF.FIFF_UNIT_T + loc = vecs.ravel() + else: + ch_type_label = KIT.CH_LABEL[ch['type']] + channel_index[ch_type_label] += 1 + ch_type_index = channel_index[ch_type_label] + ch_name = ch.get('name', '') + eeg_name = ch_name.lower() + # some files have all EEG labeled as EEG + if ch_name in ('', 'EEG') or standardize_names: + ch_name = '%s %03i' % (ch_type_label, ch_type_index) + unit = FIFF.FIFF_UNIT_V + loc = np.zeros(12) + if eeg_name and eeg_name in dig: + loc[:3] = sqd['eeg_dig'][eeg_name] = dig[eeg_name] + fiff_channels.append(dict( + cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE, + unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name, + coord_frame=FIFF.FIFFV_COORD_DEVICE, + coil_type=KIT.CH_TO_FIFF_COIL[ch['type']], + kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc)) + info._unlocked = False + info._update_redundant() + return info, sqd + + +def _read_name(fid, ch_type=None, n=None): + n = n if ch_type is None else KIT.CHANNEL_NAME_NCHAR[ch_type] + return fid.read(n).split(b'\x00')[0].decode('utf-8') + + +@fill_doc +def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>', + slope='-', stimthresh=1, preload=False, stim_code='binary', + allow_unknown_format=False, standardize_names=False, + verbose=None): + """Reader function for Ricoh/KIT conversion to FIF. + + Parameters + ---------- + input_fname : str + Path to the sqd file. + mrk : None | str | array_like, shape (5, 3) | list of str or array_like + Marker points representing the location of the marker coils with + respect to the MEG Sensors, or path to a marker file. + If list, all of the markers will be averaged together. + elp : None | str | array_like, shape (8, 3) + Digitizer points representing the location of the fiducials and the + marker coils with respect to the digitized head shape, or path to a + file containing these points. + hsp : None | str | array, shape (n_points, 3) + Digitizer head shape points, or path to head shape file. If more than + 10,000 points are in the head shape, they are automatically decimated. + stim : list of int | '<' | '>' + Channel-value correspondence when converting KIT trigger channels to a + Neuromag-style stim channel. For '<', the largest values are assigned + to the first channel (default). For '>', the largest values are + assigned to the last channel. Can also be specified as a list of + trigger channel indexes. + slope : '+' | '-' + How to interpret values on KIT trigger channels when synthesizing a + Neuromag-style stim channel. With '+', a positive slope (low-to-high) + is interpreted as an event. With '-', a negative slope (high-to-low) + is interpreted as an event. + stimthresh : float + The threshold level for accepting voltage changes in KIT trigger + channels as a trigger event. + %(preload)s + stim_code : 'binary' | 'channel' + How to decode trigger values from stim channels. 'binary' read stim + channel events as binary code, 'channel' encodes channel number. + allow_unknown_format : bool + Force reading old data that is not officially supported. Alternatively, + read and re-save the data with the KIT MEG Laboratory application. + %(standardize_names)s + %(verbose)s + + Returns + ------- + raw : instance of RawKIT + A Raw object containing KIT data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + If mrk, hsp or elp are array_like inputs, then the numbers in xyz + coordinates should be in units of meters. + """ + return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp, + stim=stim, slope=slope, stimthresh=stimthresh, + preload=preload, stim_code=stim_code, + allow_unknown_format=allow_unknown_format, + standardize_names=standardize_names, verbose=verbose) + + +@fill_doc +def read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None, + hsp=None, allow_unknown_format=False, + standardize_names=False, verbose=None): + """Reader function for Ricoh/KIT epochs files. + + Parameters + ---------- + input_fname : str + Path to the sqd file. + %(events_epochs)s + %(event_id)s + mrk : None | str | array_like, shape (5, 3) | list of str or array_like + Marker points representing the location of the marker coils with + respect to the MEG Sensors, or path to a marker file. + If list, all of the markers will be averaged together. + elp : None | str | array_like, shape (8, 3) + Digitizer points representing the location of the fiducials and the + marker coils with respect to the digitized head shape, or path to a + file containing these points. + hsp : None | str | array, shape (n_points, 3) + Digitizer head shape points, or path to head shape file. If more than + 10,000 points are in the head shape, they are automatically decimated. + allow_unknown_format : bool + Force reading old data that is not officially supported. Alternatively, + read and re-save the data with the KIT MEG Laboratory application. + %(standardize_names)s + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The epochs. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + epochs = EpochsKIT(input_fname=input_fname, events=events, + event_id=event_id, mrk=mrk, elp=elp, hsp=hsp, + allow_unknown_format=allow_unknown_format, + standardize_names=standardize_names, + verbose=verbose) + return epochs diff --git a/python/libs/mne/io/kit/tests/__init__.py b/python/libs/mne/io/kit/tests/__init__.py new file mode 100644 index 0000000..aba6507 --- /dev/null +++ b/python/libs/mne/io/kit/tests/__init__.py @@ -0,0 +1,3 @@ +import os.path as op + +data_dir = op.join(op.dirname(__file__), 'data') diff --git a/python/libs/mne/io/kit/tests/test_coreg.py b/python/libs/mne/io/kit/tests/test_coreg.py new file mode 100644 index 0000000..e654096 --- /dev/null +++ b/python/libs/mne/io/kit/tests/test_coreg.py @@ -0,0 +1,44 @@ +# Authors: Christian Brodbeck +# +# License: BSD-3-Clause + +import inspect +import os +import pickle + +import pytest +from numpy.testing import assert_array_equal + +from mne.io.kit import read_mrk +from mne.io._digitization import _write_dig_points + + +FILE = inspect.getfile(inspect.currentframe()) +parent_dir = os.path.dirname(os.path.abspath(FILE)) +data_dir = os.path.join(parent_dir, 'data') +mrk_fname = os.path.join(data_dir, 'test_mrk.sqd') + + +def test_io_mrk(tmp_path): + """Test IO for mrk files.""" + tempdir = str(tmp_path) + pts = read_mrk(mrk_fname) + + # txt + path = os.path.join(tempdir, 'mrk.txt') + _write_dig_points(path, pts) + pts_2 = read_mrk(path) + assert_array_equal(pts, pts_2, "read/write mrk to text") + + # pickle + fname = os.path.join(tempdir, 'mrk.pickled') + with open(fname, 'wb') as fid: + pickle.dump(dict(mrk=pts), fid) + pts_2 = read_mrk(fname) + assert_array_equal(pts_2, pts, "pickle mrk") + with open(fname, 'wb') as fid: + pickle.dump(dict(), fid) + pytest.raises(ValueError, read_mrk, fname) + + # unsupported extension + pytest.raises(ValueError, read_mrk, "file.ext") diff --git a/python/libs/mne/io/kit/tests/test_kit.py b/python/libs/mne/io/kit/tests/test_kit.py new file mode 100644 index 0000000..abae15b --- /dev/null +++ b/python/libs/mne/io/kit/tests/test_kit.py @@ -0,0 +1,386 @@ +# Author: Teon Brooks +# +# License: BSD-3-Clause + +import os.path as op + +import numpy as np +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_equal, assert_allclose) +import pytest +from scipy import linalg +import scipy.io + +import mne +from mne import pick_types, Epochs, find_events, read_events +from mne.datasets.testing import requires_testing_data +from mne.transforms import apply_trans +from mne.utils import assert_dig_allclose +from mne.io import read_raw_fif, read_raw_kit, read_epochs_kit +from mne.io.constants import FIFF +from mne.io.kit.kit import get_kit_info +from mne.io.kit.coreg import read_sns +from mne.io.kit.constants import KIT +from mne.io.tests.test_raw import _test_raw_reader +from mne.surface import _get_ico_surface +from mne.io.kit import __file__ as _KIT_INIT_FILE + +data_dir = op.join(op.dirname(_KIT_INIT_FILE), 'tests', 'data') +sqd_path = op.join(data_dir, 'test.sqd') +sqd_umd_path = op.join(data_dir, 'test_umd-raw.sqd') +epochs_path = op.join(data_dir, 'test-epoch.raw') +events_path = op.join(data_dir, 'test-eve.txt') +mrk_path = op.join(data_dir, 'test_mrk.sqd') +mrk2_path = op.join(data_dir, 'test_mrk_pre.sqd') +mrk3_path = op.join(data_dir, 'test_mrk_post.sqd') +elp_txt_path = op.join(data_dir, 'test_elp.txt') +hsp_txt_path = op.join(data_dir, 'test_hsp.txt') +elp_path = op.join(data_dir, 'test.elp') +hsp_path = op.join(data_dir, 'test.hsp') + +data_path = mne.datasets.testing.data_path(download=False) +sqd_as_path = op.join(data_path, 'KIT', 'test_as-raw.con') +yokogawa_path = op.join( + data_path, 'KIT', 'ArtificalSignalData_Yokogawa_1khz.con') +ricoh_path = op.join( + data_path, 'KIT', 'ArtificalSignalData_RICOH_1khz.con') +ricoh_systems_paths = [op.join( + data_path, 'KIT', 'Example_PQA160C_1001-export_anonymyze.con')] +ricoh_systems_paths += [op.join( + data_path, 'KIT', 'Example_RICOH160-1_10020-export_anonymyze.con')] +ricoh_systems_paths += [op.join( + data_path, 'KIT', 'Example_RICOH160-1_10021-export_anonymyze.con')] +ricoh_systems_paths += [op.join( + data_path, 'KIT', '010409_Motor_task_coregist-export_tiny_1s.con')] +berlin_path = op.join(data_path, 'KIT', 'data_berlin.con') + + +@requires_testing_data +def test_data(tmp_path): + """Test reading raw kit files.""" + pytest.raises(TypeError, read_raw_kit, epochs_path) + pytest.raises(TypeError, read_epochs_kit, sqd_path) + pytest.raises(ValueError, read_raw_kit, sqd_path, mrk_path, elp_txt_path) + pytest.raises(ValueError, read_raw_kit, sqd_path, None, None, None, + list(range(200, 190, -1))) + pytest.raises(ValueError, read_raw_kit, sqd_path, None, None, None, + list(range(167, 159, -1)), '*', 1, True) + # check functionality + raw_mrk = read_raw_kit(sqd_path, [mrk2_path, mrk3_path], elp_txt_path, + hsp_txt_path) + assert raw_mrk.info['description'] == \ + 'NYU 160ch System since Jan24 2009 (34) V2R004 EQ1160C' + raw_py = _test_raw_reader(read_raw_kit, input_fname=sqd_path, mrk=mrk_path, + elp=elp_txt_path, hsp=hsp_txt_path, + stim=list(range(167, 159, -1)), slope='+', + stimthresh=1) + assert 'RawKIT' in repr(raw_py) + assert_equal(raw_mrk.info['kit_system_id'], KIT.SYSTEM_NYU_2010) + + # check number/kind of channels + assert_equal(len(raw_py.info['chs']), 193) + kit_channels = (('kind', {FIFF.FIFFV_MEG_CH: 157, FIFF.FIFFV_REF_MEG_CH: 3, + FIFF.FIFFV_MISC_CH: 32, FIFF.FIFFV_STIM_CH: 1}), + ('coil_type', {FIFF.FIFFV_COIL_KIT_GRAD: 157, + FIFF.FIFFV_COIL_KIT_REF_MAG: 3, + FIFF.FIFFV_COIL_NONE: 33})) + for label, target in kit_channels: + actual = {id_: sum(ch[label] == id_ for ch in raw_py.info['chs']) for + id_ in target.keys()} + assert_equal(actual, target) + + # Test stim channel + raw_stim = read_raw_kit(sqd_path, mrk_path, elp_txt_path, hsp_txt_path, + stim='<', preload=False) + for raw in [raw_py, raw_stim, raw_mrk]: + stim_pick = pick_types(raw.info, meg=False, ref_meg=False, + stim=True, exclude='bads') + stim1, _ = raw[stim_pick] + stim2 = np.array(raw.read_stim_ch(), ndmin=2) + assert_array_equal(stim1, stim2) + + # Binary file only stores the sensor channels + py_picks = pick_types(raw_py.info, meg=True, exclude='bads') + raw_bin = op.join(data_dir, 'test_bin_raw.fif') + raw_bin = read_raw_fif(raw_bin, preload=True) + bin_picks = pick_types(raw_bin.info, meg=True, stim=True, exclude='bads') + data_bin, _ = raw_bin[bin_picks] + data_py, _ = raw_py[py_picks] + + # this .mat was generated using the Yokogawa MEG Reader + data_Ykgw = op.join(data_dir, 'test_Ykgw.mat') + data_Ykgw = scipy.io.loadmat(data_Ykgw)['data'] + data_Ykgw = data_Ykgw[py_picks] + + assert_array_almost_equal(data_py, data_Ykgw) + + py_picks = pick_types(raw_py.info, meg=True, stim=True, ref_meg=False, + exclude='bads') + data_py, _ = raw_py[py_picks] + assert_array_almost_equal(data_py, data_bin) + + # KIT-UMD data + _test_raw_reader(read_raw_kit, input_fname=sqd_umd_path, test_rank='less') + raw = read_raw_kit(sqd_umd_path) + assert raw.info['description'] == \ + 'University of Maryland/Kanazawa Institute of Technology/160-channel MEG System (53) V2R004 PQ1160R' # noqa: E501 + assert_equal(raw.info['kit_system_id'], KIT.SYSTEM_UMD_2014_12) + # check number/kind of channels + assert_equal(len(raw.info['chs']), 193) + for label, target in kit_channels: + actual = {id_: sum(ch[label] == id_ for ch in raw.info['chs']) for + id_ in target.keys()} + assert_equal(actual, target) + + # KIT Academia Sinica + raw = read_raw_kit(sqd_as_path, slope='+') + assert raw.info['description'] == \ + 'Academia Sinica/Institute of Linguistics//Magnetoencephalograph System (261) V2R004 PQ1160R-N2' # noqa: E501 + assert_equal(raw.info['kit_system_id'], KIT.SYSTEM_AS_2008) + assert_equal(raw.info['chs'][100]['ch_name'], 'MEG 101') + assert_equal(raw.info['chs'][100]['kind'], FIFF.FIFFV_MEG_CH) + assert_equal(raw.info['chs'][100]['coil_type'], FIFF.FIFFV_COIL_KIT_GRAD) + assert_equal(raw.info['chs'][157]['ch_name'], 'MEG 158') + assert_equal(raw.info['chs'][157]['kind'], FIFF.FIFFV_REF_MEG_CH) + assert_equal(raw.info['chs'][157]['coil_type'], + FIFF.FIFFV_COIL_KIT_REF_MAG) + assert_equal(raw.info['chs'][160]['ch_name'], 'EEG 001') + assert_equal(raw.info['chs'][160]['kind'], FIFF.FIFFV_EEG_CH) + assert_equal(raw.info['chs'][160]['coil_type'], FIFF.FIFFV_COIL_EEG) + assert_array_equal(find_events(raw), [[91, 0, 2]]) + + +@requires_testing_data +def test_unknown_format(tmp_path): + """Test our warning about an unknown format.""" + fname = tmp_path / op.basename(ricoh_path) + _, kit_info = get_kit_info(ricoh_path, allow_unknown_format=False) + n_before = kit_info['dirs'][KIT.DIR_INDEX_SYSTEM]['offset'] + with open(fname, 'wb') as fout: + with open(ricoh_path, 'rb') as fin: + fout.write(fin.read(n_before)) + version, revision = np.fromfile(fin, ' 2 # good + version = 1 # bad + np.array([version, revision], ' 5000 + + # should have similar size, distance from center + dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1)) + dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1)) + hsp_rad = np.mean(dist) + hsp_dec_rad = np.mean(dist_dec) + assert_array_almost_equal(hsp_rad, hsp_dec_rad, decimal=3) + + +@requires_testing_data +@pytest.mark.parametrize('fname, desc, system_id', [ + (ricoh_systems_paths[0], + 'Meg160/Analysis (1001) V2R004 PQA160C', 1001), + (ricoh_systems_paths[1], + 'RICOH MEG System (10020) V3R000 RICOH160-1', 10020), + (ricoh_systems_paths[2], + 'RICOH MEG System (10021) V3R000 RICOH160-1', 10021), + (ricoh_systems_paths[3], + 'Yokogawa Electric Corporation/MEG device for infants/151-channel MEG ' + 'System (903) V2R004 PQ1151R', 903), +]) +def test_raw_system_id(fname, desc, system_id): + """Test reading basics and system IDs.""" + raw = _test_raw_reader(read_raw_kit, input_fname=fname) + assert raw.info['description'] == desc + assert raw.info['kit_system_id'] == system_id + + +@requires_testing_data +def test_berlin(): + """Test data from Berlin.""" + # gh-8535 + raw = read_raw_kit(berlin_path) + assert raw.info['description'] == 'Physikalisch Technische Bundesanstalt, Berlin/128-channel MEG System (124) V2R004 PQ1128R-N2' # noqa: E501 + assert raw.info['kit_system_id'] == 124 + assert raw.info['highpass'] == 0. + assert raw.info['lowpass'] == 200. + assert raw.info['sfreq'] == 500. + n = int(round(28.77 * raw.info['sfreq'])) + meg = raw.get_data('MEG 003', n, n + 1)[0, 0] + assert_allclose(meg, -8.89e-12, rtol=1e-3) + eeg = raw.get_data('E14', n, n + 1)[0, 0] + assert_allclose(eeg, -2.55, rtol=1e-3) diff --git a/python/libs/mne/io/matrix.py b/python/libs/mne/io/matrix.py new file mode 100644 index 0000000..4da12b8 --- /dev/null +++ b/python/libs/mne/io/matrix.py @@ -0,0 +1,128 @@ +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# +# License: BSD-3-Clause + +from .constants import FIFF +from .tag import find_tag, has_tag +from .write import (write_int, start_block, end_block, write_float_matrix, + write_name_list) +from ..utils import logger + + +def _transpose_named_matrix(mat): + """Transpose mat inplace (no copy).""" + mat['nrow'], mat['ncol'] = mat['ncol'], mat['nrow'] + mat['row_names'], mat['col_names'] = mat['col_names'], mat['row_names'] + mat['data'] = mat['data'].T + + +def _read_named_matrix(fid, node, matkind, indent=' ', transpose=False): + """Read named matrix from the given node. + + Parameters + ---------- + fid : file + The opened file descriptor. + node : dict + The node in the tree. + matkind : int + The type of matrix. + transpose : bool + If True, transpose the matrix. Default is False. + %(verbose)s + + Returns + ------- + mat: dict + The matrix data + """ + # Descend one level if necessary + if node['block'] != FIFF.FIFFB_MNE_NAMED_MATRIX: + for k in range(node['nchild']): + if node['children'][k]['block'] == FIFF.FIFFB_MNE_NAMED_MATRIX: + if has_tag(node['children'][k], matkind): + node = node['children'][k] + break + else: + logger.info(indent + 'Desired named matrix (kind = %d) not ' + 'available' % matkind) + return None + else: + if not has_tag(node, matkind): + logger.info(indent + 'Desired named matrix (kind = %d) not ' + 'available' % matkind) + return None + + # Read everything we need + tag = find_tag(fid, node, matkind) + if tag is None: + raise ValueError('Matrix data missing') + else: + data = tag.data + + nrow, ncol = data.shape + tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW) + if tag is not None and tag.data != nrow: + raise ValueError('Number of rows in matrix data and FIFF_MNE_NROW ' + 'tag do not match') + + tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL) + if tag is not None and tag.data != ncol: + raise ValueError('Number of columns in matrix data and ' + 'FIFF_MNE_NCOL tag do not match') + + tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES) + row_names = tag.data.split(':') if tag is not None else [] + + tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES) + col_names = tag.data.split(':') if tag is not None else [] + + mat = dict(nrow=nrow, ncol=ncol, row_names=row_names, col_names=col_names, + data=data) + if transpose: + _transpose_named_matrix(mat) + return mat + + +def write_named_matrix(fid, kind, mat): + """Write named matrix from the given node. + + Parameters + ---------- + fid : file + The opened file descriptor. + kind : int + The kind of the matrix. + matkind : int + The type of matrix. + """ + # let's save ourselves from disaster + n_tot = mat['nrow'] * mat['ncol'] + if mat['data'].size != n_tot: + ratio = n_tot / float(mat['data'].size) + if n_tot < mat['data'].size and ratio > 0: + ratio = 1 / ratio + raise ValueError('Cannot write matrix: row (%i) and column (%i) ' + 'total element (%i) mismatch with data size (%i), ' + 'appears to be off by a factor of %gx' + % (mat['nrow'], mat['ncol'], n_tot, + mat['data'].size, ratio)) + start_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX) + write_int(fid, FIFF.FIFF_MNE_NROW, mat['nrow']) + write_int(fid, FIFF.FIFF_MNE_NCOL, mat['ncol']) + + if len(mat['row_names']) > 0: + # let's prevent unintentional stupidity + if len(mat['row_names']) != mat['nrow']: + raise ValueError('len(mat["row_names"]) != mat["nrow"]') + write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, mat['row_names']) + + if len(mat['col_names']) > 0: + # let's prevent unintentional stupidity + if len(mat['col_names']) != mat['ncol']: + raise ValueError('len(mat["col_names"]) != mat["ncol"]') + write_name_list(fid, FIFF.FIFF_MNE_COL_NAMES, mat['col_names']) + + write_float_matrix(fid, kind, mat['data']) + end_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX) diff --git a/python/libs/mne/io/meas_info.py b/python/libs/mne/io/meas_info.py new file mode 100644 index 0000000..f6cebf4 --- /dev/null +++ b/python/libs/mne/io/meas_info.py @@ -0,0 +1,2932 @@ +# -*- coding: utf-8 -*- +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Teon Brooks +# Stefan Appelhoff +# +# License: BSD-3-Clause + +from collections import Counter, OrderedDict +from collections.abc import Mapping +import contextlib +from copy import deepcopy +import datetime +from io import BytesIO +import operator +from textwrap import shorten +import string + +import numpy as np + +from .pick import (channel_type, pick_channels, pick_info, _get_channel_types, + get_channel_type_constants, pick_types, _contains_ch_type) +from .constants import FIFF, _coord_frame_named +from .open import fiff_open +from .tree import dir_tree_find +from .tag import (read_tag, find_tag, _ch_coord_dict, _update_ch_info_named, + _rename_list) +from .proj import (_read_proj, _write_proj, _uniquify_projs, _normalize_proj, + _proj_equal, Projection) +from .ctf_comp import _read_ctf_comp, write_ctf_comp +from .write import (start_and_end_file, start_block, end_block, + write_string, write_dig_points, write_float, write_int, + write_coord_trans, write_ch_info, write_name_list, + write_julian, write_float_matrix, write_id, DATE_NONE) +from .proc_history import _read_proc_history, _write_proc_history +from ..transforms import (invert_transform, Transform, _coord_frame_name, + _ensure_trans, _frame_to_str) +from ..utils import (logger, verbose, warn, object_diff, _validate_type, + _stamp_to_dt, _dt_to_stamp, _pl, _is_numeric, deprecated, + _check_option, _on_missing, _check_on_missing, fill_doc, + _check_fname) +from ._digitization import (_format_dig_points, _dig_kind_proper, DigPoint, + _dig_kind_rev, _dig_kind_ints, _read_dig_fif) +from ._digitization import write_dig, _get_data_as_dict_from_dig +from .compensator import get_current_comp +from ..defaults import _handle_default + + +b = bytes # alias + +_SCALAR_CH_KEYS = ('scanno', 'logno', 'kind', 'range', 'cal', 'coil_type', + 'unit', 'unit_mul', 'coord_frame') +_ALL_CH_KEYS_SET = set(_SCALAR_CH_KEYS + ('loc', 'ch_name')) +# XXX we need to require these except when doing simplify_info +_MIN_CH_KEYS_SET = set(('kind', 'cal', 'unit', 'loc', 'ch_name')) + + +def _get_valid_units(): + """Get valid units according to the International System of Units (SI). + + The International System of Units (SI, :footcite:`WikipediaSI`) is the + default system for describing units in the Brain Imaging Data Structure + (BIDS). For more information, see the BIDS specification + :footcite:`BIDSdocs` and the appendix "Units" therein. + + References + ---------- + .. footbibliography:: + """ + valid_prefix_names = ['yocto', 'zepto', 'atto', 'femto', 'pico', 'nano', + 'micro', 'milli', 'centi', 'deci', 'deca', 'hecto', + 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', + 'zetta', 'yotta'] + valid_prefix_symbols = ['y', 'z', 'a', 'f', 'p', 'n', u'µ', 'm', 'c', 'd', + 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] + valid_unit_names = ['metre', 'kilogram', 'second', 'ampere', 'kelvin', + 'mole', 'candela', 'radian', 'steradian', 'hertz', + 'newton', 'pascal', 'joule', 'watt', 'coulomb', 'volt', + 'farad', 'ohm', 'siemens', 'weber', 'tesla', 'henry', + 'degree Celsius', 'lumen', 'lux', 'becquerel', 'gray', + 'sievert', 'katal'] + valid_unit_symbols = ['m', 'kg', 's', 'A', 'K', 'mol', 'cd', 'rad', 'sr', + 'Hz', 'N', 'Pa', 'J', 'W', 'C', 'V', 'F', u'Ω', 'S', + 'Wb', 'T', 'H', u'°C', 'lm', 'lx', 'Bq', 'Gy', 'Sv', + 'kat'] + + # Valid units are all possible combinations of either prefix name or prefix + # symbol together with either unit name or unit symbol. E.g., nV for + # nanovolt + valid_units = [] + valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_names + for unit in valid_unit_names]) + valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_names + for unit in valid_unit_symbols]) + valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_symbols + for unit in valid_unit_names]) + valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_symbols + for unit in valid_unit_symbols]) + + # units are also valid without a prefix + valid_units += valid_unit_names + valid_units += valid_unit_symbols + + # we also accept "n/a" as a unit, which is the default missing value in + # BIDS + valid_units += ["n/a"] + + return tuple(valid_units) + + +@verbose +def _unique_channel_names(ch_names, max_length=None, verbose=None): + """Ensure unique channel names.""" + suffixes = tuple(string.ascii_lowercase) + if max_length is not None: + ch_names[:] = [name[:max_length] for name in ch_names] + unique_ids = np.unique(ch_names, return_index=True)[1] + if len(unique_ids) != len(ch_names): + dups = {ch_names[x] + for x in np.setdiff1d(range(len(ch_names)), unique_ids)} + warn('Channel names are not unique, found duplicates for: ' + '%s. Applying running numbers for duplicates.' % dups) + for ch_stem in dups: + overlaps = np.where(np.array(ch_names) == ch_stem)[0] + # We need an extra character since we append '-'. + # np.ceil(...) is the maximum number of appended digits. + if max_length is not None: + n_keep = ( + max_length - 1 - int(np.ceil(np.log10(len(overlaps))))) + else: + n_keep = np.inf + n_keep = min(len(ch_stem), n_keep) + ch_stem = ch_stem[:n_keep] + for idx, ch_idx in enumerate(overlaps): + # try idx first, then loop through lower case chars + for suffix in (idx,) + suffixes: + ch_name = ch_stem + '-%s' % suffix + if ch_name not in ch_names: + break + if ch_name not in ch_names: + ch_names[ch_idx] = ch_name + else: + raise ValueError('Adding a single alphanumeric for a ' + 'duplicate resulted in another ' + 'duplicate name %s' % ch_name) + return ch_names + + +class MontageMixin(object): + """Mixin for Montage getting and setting.""" + + @fill_doc + def get_montage(self): + """Get a DigMontage from instance. + + Returns + ------- + %(montage)s + """ + from ..channels.montage import make_dig_montage + info = self if isinstance(self, Info) else self.info + if info['dig'] is None: + return None + # obtain coord_frame, and landmark coords + # (nasion, lpa, rpa, hsp, hpi) from DigPoints + montage_bunch = _get_data_as_dict_from_dig(info['dig']) + coord_frame = _frame_to_str.get(montage_bunch.coord_frame) + + # get the channel names and chs data structure + ch_names, chs = info['ch_names'], info['chs'] + picks = pick_types(info, meg=False, eeg=True, seeg=True, + ecog=True, dbs=True, fnirs=True, exclude=[]) + + # channel positions from dig do not match ch_names one to one, + # so use loc[:3] instead + ch_pos = {ch_names[ii]: chs[ii]['loc'][:3] for ii in picks} + + # create montage + montage = make_dig_montage( + ch_pos=ch_pos, + coord_frame=coord_frame, + nasion=montage_bunch.nasion, + lpa=montage_bunch.lpa, + rpa=montage_bunch.rpa, + hsp=montage_bunch.hsp, + hpi=montage_bunch.hpi, + ) + return montage + + @verbose + def set_montage(self, montage, match_case=True, match_alias=False, + on_missing='raise', verbose=None): + """Set %(montage_types)s channel positions and digitization points. + + Parameters + ---------- + %(montage)s + %(match_case)s + %(match_alias)s + %(on_missing_montage)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The instance. + + Notes + ----- + Operates in place. + + .. warning:: + Only %(montage_types)s channels can have their positions set using + a montage. Other channel types (e.g., MEG channels) should have + their positions defined properly using their data reading + functions. + """ + # How to set up a montage to old named fif file (walk through example) + # https://gist.github.com/massich/f6a9f4799f1fbeb8f5e8f8bc7b07d3df + + from ..channels.montage import _set_montage + info = self if isinstance(self, Info) else self.info + _set_montage(info, montage, match_case, match_alias, on_missing) + return self + + +class ContainsMixin(object): + """Mixin class for Raw, Evoked, Epochs and Info.""" + + def __contains__(self, ch_type): + """Check channel type membership. + + Parameters + ---------- + ch_type : str + Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc. + + Returns + ------- + in : bool + Whether or not the instance contains the given channel type. + + Examples + -------- + Channel type membership can be tested as:: + + >>> 'meg' in inst # doctest: +SKIP + True + >>> 'seeg' in inst # doctest: +SKIP + False + + """ + info = self if isinstance(self, Info) else self.info + if ch_type == 'meg': + has_ch_type = (_contains_ch_type(info, 'mag') or + _contains_ch_type(info, 'grad')) + else: + has_ch_type = _contains_ch_type(info, ch_type) + return has_ch_type + + @property + def compensation_grade(self): + """The current gradient compensation grade.""" + info = self if isinstance(self, Info) else self.info + return get_current_comp(info) + + @fill_doc + def get_channel_types(self, picks=None, unique=False, only_data_chs=False): + """Get a list of channel type for each channel. + + Parameters + ---------- + %(picks_all)s + unique : bool + Whether to return only unique channel types. Default is ``False``. + only_data_chs : bool + Whether to ignore non-data channels. Default is ``False``. + + Returns + ------- + channel_types : list + The channel types. + """ + info = self if isinstance(self, Info) else self.info + return _get_channel_types(info, picks=picks, unique=unique, + only_data_chs=only_data_chs) + + +def _format_trans(obj, key): + try: + t = obj[key] + except KeyError: + pass + else: + if t is not None: + obj[key] = Transform(t['from'], t['to'], t['trans']) + + +def _check_ch_keys(ch, ci, name='info["chs"]', check_min=True): + ch_keys = set(ch) + bad = sorted(ch_keys.difference(_ALL_CH_KEYS_SET)) + if bad: + raise KeyError( + f'key{_pl(bad)} errantly present for {name}[{ci}]: {bad}') + if check_min: + bad = sorted(_MIN_CH_KEYS_SET.difference(ch_keys)) + if bad: + raise KeyError( + f'key{_pl(bad)} missing for {name}[{ci}]: {bad}',) + + +# As options are added here, test_meas_info.py:test_info_bad should be updated +def _check_bads(bads): + _validate_type(bads, list, 'bads') + return bads + + +def _check_description(description): + _validate_type(description, (None, str), "info['description']") + return description + + +def _check_dev_head_t(dev_head_t): + _validate_type(dev_head_t, (Transform, None), "info['dev_head_t']") + if dev_head_t is not None: + dev_head_t = _ensure_trans(dev_head_t, 'meg', 'head') + return dev_head_t + + +def _check_experimenter(experimenter): + _validate_type(experimenter, (None, str), 'experimenter') + return experimenter + + +def _check_line_freq(line_freq): + _validate_type(line_freq, (None, 'numeric'), 'line_freq') + line_freq = float(line_freq) if line_freq is not None else line_freq + return line_freq + + +def _check_subject_info(subject_info): + _validate_type(subject_info, (None, dict), 'subject_info') + return subject_info + + +def _check_device_info(device_info): + _validate_type(device_info, (None, dict, ), 'device_info') + return device_info + + +def _check_helium_info(helium_info): + _validate_type(helium_info, (None, dict, ), 'helium_info') + return helium_info + + +class Info(dict, MontageMixin, ContainsMixin): + """Measurement information. + + This data structure behaves like a dictionary. It contains all metadata + that is available for a recording. However, its keys are restricted to + those provided by the + `FIF format specification `__, + so new entries should not be manually added. + + .. warning:: The only entries that should be manually changed by the user + are ``info['bads']``, ``info['description']``, + ``info['device_info']``, ``info['dev_head_t']``, + ``info['experimenter']``, info['helium_info'], + ``info['line_freq']``, ``info['temp']`` and + ``info['subject_info']``. All other entries should be + considered read-only, though they can be modified by various + MNE-Python functions or methods (which have safeguards to + ensure all fields remain in sync). + + .. warning:: This class should not be instantiated directly. To create a + measurement information structure, use + :func:`mne.create_info`. + + Parameters + ---------- + *args : list + Arguments. + **kwargs : dict + Keyword arguments. + + Attributes + ---------- + acq_pars : str | None + MEG system acquisition parameters. + See :class:`mne.AcqParserFIF` for details. + acq_stim : str | None + MEG system stimulus parameters. + bads : list of str + List of bad (noisy/broken) channels, by name. These channels will by + default be ignored by many processing steps. + ch_names : list of str + The names of the channels. + chs : list of dict + A list of channel information dictionaries, one per channel. + See Notes for more information. + command_line : str + Contains the command and arguments used to create the source space + (used for source estimation). + comps : list of dict + CTF software gradient compensation data. + See Notes for more information. + ctf_head_t : dict | None + The transformation from 4D/CTF head coordinates to Neuromag head + coordinates. This is only present in 4D/CTF data. + custom_ref_applied : int + Whether a custom (=other than average) reference has been applied to + the EEG data. This flag is checked by some algorithms that require an + average reference to be set. + description : str | None + String description of the recording. + dev_ctf_t : dict | None + The transformation from device coordinates to 4D/CTF head coordinates. + This is only present in 4D/CTF data. + dev_head_t : dict | None + The device to head transformation. + device_info : dict | None + Information about the acquisition device. See Notes for details. + + .. versionadded:: 0.19 + dig : list of dict | None + The Polhemus digitization data in head coordinates. + See Notes for more information. + events : list of dict + Event list, sometimes extracted from the stim channels by Neuromag + systems. In general this should not be used and + :func:`mne.find_events` should be used for event processing. + See Notes for more information. + experimenter : str | None + Name of the person that ran the experiment. + file_id : dict | None + The FIF globally unique ID. See Notes for more information. + gantry_angle : float | None + Tilt angle of the gantry in degrees. + helium_info : dict | None + Information about the device helium. See Notes for details. + + .. versionadded:: 0.19 + highpass : float + Highpass corner frequency in Hertz. Zero indicates a DC recording. + hpi_meas : list of dict + HPI measurements that were taken at the start of the recording + (e.g. coil frequencies). + See Notes for details. + hpi_results : list of dict + Head position indicator (HPI) digitization points and fit information + (e.g., the resulting transform). + See Notes for details. + hpi_subsystem : dict | None + Information about the HPI subsystem that was used (e.g., event + channel used for cHPI measurements). + See Notes for details. + kit_system_id : int + Identifies the KIT system. + line_freq : float | None + Frequency of the power line in Hertz. + lowpass : float + Lowpass corner frequency in Hertz. + It is automatically set to half the sampling rate if there is + otherwise no low-pass applied to the data. + maxshield : bool + True if active shielding (IAS) was active during recording. + meas_date : datetime + The time (UTC) of the recording. + + .. versionchanged:: 0.20 + This is stored as a :class:`~python:datetime.datetime` object + instead of a tuple of seconds/microseconds. + meas_file : str | None + Raw measurement file (used for source estimation). + meas_id : dict | None + The ID assigned to this measurement by the acquisition system or + during file conversion. Follows the same format as ``file_id``. + mri_file : str | None + File containing the MRI to head transformation (used for source + estimation). + mri_head_t : dict | None + Transformation from MRI to head coordinates (used for source + estimation). + mri_id : dict | None + MRI unique ID (used for source estimation). + nchan : int + Number of channels. + proc_history : list of dict + The MaxFilter processing history. + See Notes for details. + proj_id : int | None + ID number of the project the experiment belongs to. + proj_name : str | None + Name of the project the experiment belongs to. + projs : list of Projection + List of SSP operators that operate on the data. + See :class:`mne.Projection` for details. + sfreq : float + Sampling frequency in Hertz. + subject_info : dict | None + Information about the subject. + See Notes for details. + temp : object | None + Can be used to store temporary objects in an Info instance. It will not + survive an I/O roundtrip. + + .. versionadded:: 0.24 + utc_offset : str + "UTC offset of related meas_date (sHH:MM). + + .. versionadded:: 0.19 + working_dir : str + Working directory used when the source space was created (used for + source estimation). + xplotter_layout : str + Layout of the Xplotter (Neuromag system only). + + See Also + -------- + mne.create_info + + Notes + ----- + The following parameters have a nested structure. + + * ``chs`` list of dict: + + cal : float + The calibration factor to bring the channels to physical + units. Used in product with ``range`` to scale the data read + from disk. + ch_name : str + The channel name. + coil_type : int + Coil type, e.g. ``FIFFV_COIL_MEG``. + coord_frame : int + The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. + kind : int + The kind of channel, e.g. ``FIFFV_EEG_CH``. + loc : array, shape (12,) + Channel location. For MEG this is the position plus the + normal given by a 3x3 rotation matrix. For EEG this is the + position followed by reference position (with 6 unused). + The values are specified in device coordinates for MEG and in + head coordinates for EEG channels, respectively. + logno : int + Logical channel number, conventions in the usage of this + number vary. + range : float + The hardware-oriented part of the calibration factor. + This should be only applied to the continuous raw data. + Used in product with ``cal`` to scale data read from disk. + scanno : int + Scanning order number, starting from 1. + unit : int + The unit to use, e.g. ``FIFF_UNIT_T_M``. + unit_mul : int + Unit multipliers, most commonly ``FIFF_UNITM_NONE``. + + * ``comps`` list of dict: + + ctfkind : int + CTF compensation grade. + colcals : ndarray + Column calibrations. + mat : dict + A named matrix dictionary (with entries "data", "col_names", etc.) + containing the compensation matrix. + rowcals : ndarray + Row calibrations. + save_calibrated : bool + Were the compensation data saved in calibrated form. + + * ``device_info`` dict: + + type : str + Device type. + model : str + Device model. + serial : str + Device serial. + site : str + Device site. + + * ``dig`` list of dict: + + kind : int + The kind of channel, + e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``. + r : array, shape (3,) + 3D position in m. and coord_frame. + ident : int + Number specifying the identity of the point. + e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``, or + 42 if kind is ``FIFFV_POINT_EEG``. + coord_frame : int + The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. + + * ``events`` list of dict: + + channels : list of int + Channel indices for the events. + list : ndarray, shape (n_events * 3,) + Events in triplets as number of samples, before, after. + + * ``file_id`` dict: + + version : int + FIF format version, i.e. ``FIFFC_VERSION``. + machid : ndarray, shape (2,) + Unique machine ID, usually derived from the MAC address. + secs : int + Time in seconds. + usecs : int + Time in microseconds. + + * ``helium_info`` dict: + + he_level_raw : float + Helium level (%) before position correction. + helium_level : float + Helium level (%) after position correction. + orig_file_guid : str + Original file GUID. + meas_date : tuple of int + The helium level meas date. + + * ``hpi_meas`` list of dict: + + creator : str + Program that did the measurement. + sfreq : float + Sample rate. + nchan : int + Number of channels used. + nave : int + Number of averages used. + ncoil : int + Number of coils used. + first_samp : int + First sample used. + last_samp : int + Last sample used. + hpi_coils : list of dict + Coils, containing: + + number: int + Coil number + epoch : ndarray + Buffer containing one epoch and channel. + slopes : ndarray, shape (n_channels,) + HPI data. + corr_coeff : ndarray, shape (n_channels,) + HPI curve fit correlations. + coil_freq : float + HPI coil excitation frequency + + * ``hpi_results`` list of dict: + + dig_points : list + Digitization points (see ``dig`` definition) for the HPI coils. + order : ndarray, shape (ncoil,) + The determined digitization order. + used : ndarray, shape (nused,) + The indices of the used coils. + moments : ndarray, shape (ncoil, 3) + The coil moments. + goodness : ndarray, shape (ncoil,) + The goodness of fits. + good_limit : float + The goodness of fit limit. + dist_limit : float + The distance limit. + accept : int + Whether or not the fit was accepted. + coord_trans : instance of Transformation + The resulting MEG<->head transformation. + + * ``hpi_subsystem`` dict: + + ncoil : int + The number of coils. + event_channel : str + The event channel used to encode cHPI status (e.g., STI201). + hpi_coils : list of ndarray + List of length ``ncoil``, each 4-element ndarray contains the + event bits used on the event channel to indicate cHPI status + (using the first element of these arrays is typically + sufficient). + + * ``mri_id`` dict: + + version : int + FIF format version, i.e. ``FIFFC_VERSION``. + machid : ndarray, shape (2,) + Unique machine ID, usually derived from the MAC address. + secs : int + Time in seconds. + usecs : int + Time in microseconds. + + * ``proc_history`` list of dict: + + block_id : dict + See ``id`` above. + date : ndarray, shape (2,) + 2-element tuple of seconds and microseconds. + experimenter : str + Name of the person who ran the program. + creator : str + Program that did the processing. + max_info : dict + Maxwel filtering info, can contain: + + sss_info : dict + SSS processing information. + max_st + tSSS processing information. + sss_ctc : dict + Cross-talk processing information. + sss_cal : dict + Fine-calibration information. + smartshield : dict + MaxShield information. This dictionary is (always?) empty, + but its presence implies that MaxShield was used during + acquisition. + + * ``subject_info`` dict: + + id : int + Integer subject identifier. + his_id : str + String subject identifier. + last_name : str + Last name. + first_name : str + First name. + middle_name : str + Middle name. + birthday : tuple of int + Birthday in (year, month, day) format. + sex : int + Subject sex (0=unknown, 1=male, 2=female). + hand : int + Handedness (1=right, 2=left, 3=ambidextrous). + """ + + _attributes = { + 'acq_pars': 'acq_pars cannot be set directly. ' + 'See mne.AcqParserFIF() for details.', + 'acq_stim': 'acq_stim cannot be set directly.', + 'bads': _check_bads, + 'ch_names': 'ch_names cannot be set directly. ' + 'Please use methods inst.add_channels(), ' + 'inst.drop_channels(), inst.pick_channels(), ' + 'inst.rename_channels(), inst.reorder_channels() ' + 'and inst.set_channel_types() instead.', + 'chs': 'chs cannot be set directly. ' + 'Please use methods inst.add_channels(), ' + 'inst.drop_channels(), inst.pick_channels(), ' + 'inst.rename_channels(), inst.reorder_channels() ' + 'and inst.set_channel_types() instead.', + 'command_line': 'command_line cannot be set directly.', + 'comps': 'comps cannot be set directly. ' + 'Please use method Raw.apply_gradient_compensation() ' + 'instead.', + 'ctf_head_t': 'ctf_head_t cannot be set directly.', + 'custom_ref_applied': 'custom_ref_applied cannot be set directly. ' + 'Please use method inst.set_eeg_reference() ' + 'instead.', + 'description': _check_description, + 'dev_ctf_t': 'dev_ctf_t cannot be set directly.', + 'dev_head_t': _check_dev_head_t, + 'device_info': _check_device_info, + 'dig': 'dig cannot be set directly. ' + 'Please use method inst.set_montage() instead.', + 'events': 'events cannot be set directly.', + 'experimenter': _check_experimenter, + 'file_id': 'file_id cannot be set directly.', + 'gantry_angle': 'gantry_angle cannot be set directly.', + 'helium_info': _check_helium_info, + 'highpass': 'highpass cannot be set directly. ' + 'Please use method inst.filter() instead.', + 'hpi_meas': 'hpi_meas can not be set directly.', + 'hpi_results': 'hpi_results cannot be set directly.', + 'hpi_subsystem': 'hpi_subsystem cannot be set directly.', + 'kit_system_id': 'kit_system_id cannot be set directly.', + 'line_freq': _check_line_freq, + 'lowpass': 'lowpass cannot be set directly. ' + 'Please use method inst.filter() instead.', + 'maxshield': 'maxshield cannot be set directly.', + 'meas_date': 'meas_date cannot be set directly. ' + 'Please use method inst.set_meas_date() instead.', + 'meas_file': 'meas_file cannot be set directly.', + 'meas_id': 'meas_id cannot be set directly.', + 'mri_file': 'mri_file cannot be set directly.', + 'mri_head_t': 'mri_head_t cannot be set directly.', + 'mri_id': 'mri_id cannot be set directly.', + 'nchan': 'nchan cannot be set directly. ' + 'Please use methods inst.add_channels(), ' + 'inst.drop_channels(), and inst.pick_channels() instead.', + 'proc_history': 'proc_history cannot be set directly.', + 'proj_id': 'proj_id cannot be set directly.', + 'proj_name': 'proj_name cannot be set directly.', + 'projs': 'projs cannot be set directly. ' + 'Please use methods inst.add_proj() and inst.del_proj() ' + 'instead.', + 'sfreq': 'sfreq cannot be set directly. ' + 'Please use method inst.resample() instead.', + 'subject_info': _check_subject_info, + 'temp': lambda x: x, + 'utc_offset': 'utc_offset cannot be set directly.', + 'working_dir': 'working_dir cannot be set directly.', + 'xplotter_layout': 'xplotter_layout cannot be set directly.' + } + + def __init__(self, *args, **kwargs): + self._unlocked = True + super().__init__(*args, **kwargs) + # Deal with h5io writing things as dict + for key in ('dev_head_t', 'ctf_head_t', 'dev_ctf_t'): + _format_trans(self, key) + for res in self.get('hpi_results', []): + _format_trans(res, 'coord_trans') + if self.get('dig', None) is not None and len(self['dig']): + if isinstance(self['dig'], dict): # needs to be unpacked + self['dig'] = _dict_unpack(self['dig'], _DIG_CAST) + if not isinstance(self['dig'][0], DigPoint): + self['dig'] = _format_dig_points(self['dig']) + if isinstance(self.get('chs', None), dict): + self['chs']['ch_name'] = [str(x) for x in np.char.decode( + self['chs']['ch_name'], encoding='utf8')] + self['chs'] = _dict_unpack(self['chs'], _CH_CAST) + for pi, proj in enumerate(self.get('projs', [])): + if not isinstance(proj, Projection): + self['projs'][pi] = Projection(**proj) + # Old files could have meas_date as tuple instead of datetime + try: + meas_date = self['meas_date'] + except KeyError: + pass + else: + self['meas_date'] = _ensure_meas_date_none_or_dt(meas_date) + self._unlocked = False + + def __getstate__(self): + """Get state (for pickling).""" + return {'_unlocked': self._unlocked} + + def __setstate__(self, state): + """Set state (for pickling).""" + self._unlocked = state['_unlocked'] + + def __setitem__(self, key, val): + """Attribute setter.""" + # During unpickling, the _unlocked attribute has not been set, so + # let __setstate__ do it later and act unlocked now + unlocked = getattr(self, '_unlocked', True) + if key in self._attributes: + if isinstance(self._attributes[key], str): + if not unlocked: + raise RuntimeError(self._attributes[key]) + else: + val = self._attributes[key](val) # attribute checker function + else: + raise RuntimeError( + f"Info does not support directly setting the key {repr(key)}. " + "You can set info['temp'] to store temporary objects in an " + "Info instance, but these will not survive an I/O round-trip.") + super().__setitem__(key, val) + + def update(self, other=None, **kwargs): + """Update method using __setitem__().""" + iterable = other.items() if isinstance(other, Mapping) else other + if other is not None: + for key, val in iterable: + self[key] = val + for key, val in kwargs.items(): + self[key] = val + + @contextlib.contextmanager + def _unlock(self, *, update_redundant=False, check_after=False): + """Context manager unlocking access to attributes.""" + # needed for nested _unlock() + state = self._unlocked if hasattr(self, '_unlocked') else False + + self._unlocked = True + try: + yield + except Exception: + raise + else: + if update_redundant: + self._update_redundant() + if check_after: + self._check_consistency() + finally: + self._unlocked = state + + def copy(self): + """Copy the instance. + + Returns + ------- + info : instance of Info + The copied info. + """ + return deepcopy(self) + + def normalize_proj(self): + """(Re-)Normalize projection vectors after subselection. + + Applying projection after sub-selecting a set of channels that + were originally used to compute the original projection vectors + can be dangerous (e.g., if few channels remain, most power was + in channels that are no longer picked, etc.). By default, mne + will emit a warning when this is done. + + This function will re-normalize projectors to use only the + remaining channels, thus avoiding that warning. Only use this + function if you're confident that the projection vectors still + adequately capture the original signal of interest. + """ + _normalize_proj(self) + + def __repr__(self): + """Summarize info instead of printing all.""" + MAX_WIDTH = 68 + strs = [' 0: + entr = ('%d item%s (%s)' % (this_len, _pl(this_len), + type(v).__name__)) + else: + entr = '' + if entr != '': + non_empty += 1 + strs.append('%s: %s' % (k, entr)) + st = '\n '.join(sorted(strs)) + st += '\n>' + st %= non_empty + return st + + def __deepcopy__(self, memodict): + """Make a deepcopy.""" + result = Info.__new__(Info) + result._unlocked = True + for k, v in self.items(): + # chs is roughly half the time but most are immutable + if k == 'chs': + # dict shallow copy is fast, so use it then overwrite + result[k] = list() + for ch in v: + ch = ch.copy() # shallow + ch['loc'] = ch['loc'].copy() + result[k].append(ch) + elif k == 'ch_names': + # we know it's list of str, shallow okay and saves ~100 µs + result[k] = v.copy() + elif k == 'hpi_meas': + hms = list() + for hm in v: + hm = hm.copy() + # the only mutable thing here is some entries in coils + hm['hpi_coils'] = [coil.copy() for coil in hm['hpi_coils']] + # There is a *tiny* risk here that someone could write + # raw.info['hpi_meas'][0]['hpi_coils'][1]['epoch'] = ... + # and assume that info.copy() will make an actual copy, + # but copying these entries has a 2x slowdown penalty so + # probably not worth it for such a deep corner case: + # for coil in hpi_coils: + # for key in ('epoch', 'slopes', 'corr_coeff'): + # coil[key] = coil[key].copy() + hms.append(hm) + result[k] = hms + else: + result[k] = deepcopy(v, memodict) + result._unlocked = False + return result + + def _check_consistency(self, prepend_error=''): + """Do some self-consistency checks and datatype tweaks.""" + missing = [bad for bad in self['bads'] if bad not in self['ch_names']] + if len(missing) > 0: + msg = '%sbad channel(s) %s marked do not exist in info' + raise RuntimeError(msg % (prepend_error, missing,)) + meas_date = self.get('meas_date') + if meas_date is not None: + if (not isinstance(self['meas_date'], datetime.datetime) or + self['meas_date'].tzinfo is None or + self['meas_date'].tzinfo is not datetime.timezone.utc): + raise RuntimeError('%sinfo["meas_date"] must be a datetime ' + 'object in UTC or None, got %r' + % (prepend_error, repr(self['meas_date']),)) + + chs = [ch['ch_name'] for ch in self['chs']] + if len(self['ch_names']) != len(chs) or any( + ch_1 != ch_2 for ch_1, ch_2 in zip(self['ch_names'], chs)) or \ + self['nchan'] != len(chs): + raise RuntimeError('%sinfo channel name inconsistency detected, ' + 'please notify mne-python developers' + % (prepend_error,)) + + # make sure we have the proper datatypes + with self._unlock(): + for key in ('sfreq', 'highpass', 'lowpass'): + if self.get(key) is not None: + self[key] = float(self[key]) + + for pi, proj in enumerate(self.get('projs', [])): + _validate_type(proj, Projection, f'info["projs"][{pi}]') + for key in ('kind', 'active', 'desc', 'data', 'explained_var'): + if key not in proj: + raise RuntimeError(f'Projection incomplete, missing {key}') + + # Ensure info['chs'] has immutable entries (copies much faster) + for ci, ch in enumerate(self['chs']): + _check_ch_keys(ch, ci) + ch_name = ch['ch_name'] + if not isinstance(ch_name, str): + raise TypeError( + 'Bad info: info["chs"][%d]["ch_name"] is not a string, ' + 'got type %s' % (ci, type(ch_name))) + for key in _SCALAR_CH_KEYS: + val = ch.get(key, 1) + if not _is_numeric(val): + raise TypeError( + 'Bad info: info["chs"][%d][%r] = %s is type %s, must ' + 'be float or int' % (ci, key, val, type(val))) + loc = ch['loc'] + if not (isinstance(loc, np.ndarray) and loc.shape == (12,)): + raise TypeError( + 'Bad info: info["chs"][%d]["loc"] must be ndarray with ' + '12 elements, got %r' % (ci, loc)) + + # make sure channel names are unique + with self._unlock(): + self['ch_names'] = _unique_channel_names(self['ch_names']) + for idx, ch_name in enumerate(self['ch_names']): + self['chs'][idx]['ch_name'] = ch_name + + def _update_redundant(self): + """Update the redundant entries.""" + with self._unlock(): + self['ch_names'] = [ch['ch_name'] for ch in self['chs']] + self['nchan'] = len(self['chs']) + + @deprecated('use inst.pick_channels instead.') + def pick_channels(self, ch_names, ordered=False): + """Pick channels from this Info object. + + Parameters + ---------- + ch_names : list of str + List of channels to keep. All other channels are dropped. + ordered : bool + If True (default False), ensure that the order of the channels + matches the order of ``ch_names``. + + Returns + ------- + info : instance of Info. + The modified Info object. + + Notes + ----- + Operates in-place. + + .. versionadded:: 0.20.0 + """ + sel = pick_channels(self.ch_names, ch_names, exclude=[], + ordered=ordered) + return pick_info(self, sel, copy=False, verbose=False) + + @property + def ch_names(self): + return self['ch_names'] + + def _get_chs_for_repr(self): + titles = _handle_default('titles') + + # good channels + channels = {} + ch_types = [channel_type(self, idx) for idx in range(len(self['chs']))] + ch_counts = Counter(ch_types) + for ch_type, count in ch_counts.items(): + if ch_type == 'meg': + channels['mag'] = len(pick_types(self, meg='mag')) + channels['grad'] = len(pick_types(self, meg='grad')) + elif ch_type == 'eog': + pick_eog = pick_types(self, eog=True) + eog = ', '.join( + np.array(self['ch_names'])[pick_eog]) + elif ch_type == 'ecg': + pick_ecg = pick_types(self, ecg=True) + ecg = ', '.join( + np.array(self['ch_names'])[pick_ecg]) + channels[ch_type] = count + + good_channels = ', '.join( + [f'{v} {titles.get(k, k.upper())}' for k, v in channels.items()]) + + if 'ecg' not in channels.keys(): + ecg = 'Not available' + if 'eog' not in channels.keys(): + eog = 'Not available' + + # bad channels + if len(self['bads']) > 0: + bad_channels = ', '.join(self['bads']) + else: + bad_channels = 'None' + + return good_channels, bad_channels, ecg, eog + + def _repr_html_(self, caption=None): + """Summarize info for HTML representation.""" + from ..html_templates import repr_templates_env + if isinstance(caption, str): + html = f'

{caption}

' + else: + html = '' + + good_channels, bad_channels, ecg, eog = self._get_chs_for_repr() + + # TODO + # Most of the following checks are to ensure that we get a proper repr + # for Forward['info'] (and probably others like + # InverseOperator['info']??), which doesn't seem to follow our standard + # Info structure used elsewhere. + # Proposed solution for a future refactoring: + # Forward['info'] should get its own Info subclass (with respective + # repr). + + # meas date + if 'meas_date' in self and self['meas_date'] is not None: + meas_date = self['meas_date'].strftime( + "%B %d, %Y %H:%M:%S" + ) + ' GMT' + else: + meas_date = None + + if 'projs' in self and self['projs']: + projs = [ + f'{p["desc"]} : {"on" if p["active"] else "off"}' + for p in self['projs'] + ] + else: + projs = None + + if 'subject_info' in self: + subject_info = self['subject_info'] + else: + subject_info = None + + if 'lowpass' in self: + lowpass = self['lowpass'] + else: + lowpass = None + + if 'highpass' in self: + highpass = self['highpass'] + else: + highpass = None + + if 'sfreq' in self: + sfreq = self['sfreq'] + else: + sfreq = None + + if 'experimenter' in self: + experimenter = self['experimenter'] + else: + experimenter = None + + info_template = repr_templates_env.get_template('info.html.jinja') + html += info_template.render( + caption=caption, meas_date=meas_date, ecg=ecg, + eog=eog, good_channels=good_channels, bad_channels=bad_channels, + projs=projs, subject_info=subject_info, lowpass=lowpass, + highpass=highpass, sfreq=sfreq, experimenter=experimenter + ) + return html + + +def _simplify_info(info): + """Return a simplified info structure to speed up picking.""" + chs = [{key: ch[key] + for key in ('ch_name', 'kind', 'unit', 'coil_type', 'loc', 'cal')} + for ch in info['chs']] + sub_info = Info(chs=chs, bads=info['bads'], comps=info['comps'], + projs=info['projs'], + custom_ref_applied=info['custom_ref_applied']) + sub_info._update_redundant() + return sub_info + + +@verbose +def read_fiducials(fname, verbose=None): + """Read fiducials from a fiff file. + + Parameters + ---------- + fname : path-like + The filename to read. + %(verbose)s + + Returns + ------- + pts : list of dict + List of digitizer points (each point in a dict). + coord_frame : int + The coordinate frame of the points (one of + ``mne.io.constants.FIFF.FIFFV_COORD_...``). + """ + fname = _check_fname( + fname=fname, + overwrite='read', + must_exist=True + ) + fid, tree, _ = fiff_open(fname) + with fid: + isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK) + isotrak = isotrak[0] + pts = [] + coord_frame = FIFF.FIFFV_COORD_HEAD + for k in range(isotrak['nent']): + kind = isotrak['directory'][k].kind + pos = isotrak['directory'][k].pos + if kind == FIFF.FIFF_DIG_POINT: + tag = read_tag(fid, pos) + pts.append(DigPoint(tag.data)) + elif kind == FIFF.FIFF_MNE_COORD_FRAME: + tag = read_tag(fid, pos) + coord_frame = tag.data[0] + coord_frame = _coord_frame_named.get(coord_frame, coord_frame) + + # coord_frame is not stored in the tag + for pt in pts: + pt['coord_frame'] = coord_frame + + return pts, coord_frame + + +@verbose +def write_fiducials(fname, pts, coord_frame='unknown', *, overwrite=False, + verbose=None): + """Write fiducials to a fiff file. + + Parameters + ---------- + fname : path-like + Destination file name. + pts : iterator of dict + Iterator through digitizer points. Each point is a dictionary with + the keys 'kind', 'ident' and 'r'. + coord_frame : str | int + The coordinate frame of the points. If a string, must be one of + ``'meg'``, ``'mri'``, ``'mri_voxel'``, ``'head'``, + ``'mri_tal'``, ``'ras'``, ``'fs_tal'``, ``'ctf_head'``, + ``'ctf_meg'``, and ``'unknown'`` + If an integer, must be one of the constants defined as + ``mne.io.constants.FIFF.FIFFV_COORD_...``. + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + """ + write_dig(fname, pts, coord_frame, overwrite=overwrite) + + +@verbose +def read_info(fname, verbose=None): + """Read measurement info from a file. + + Parameters + ---------- + fname : str + File name. + %(verbose)s + + Returns + ------- + %(info_not_none)s + """ + f, tree, _ = fiff_open(fname) + with f as fid: + info = read_meas_info(fid, tree)[0] + return info + + +def read_bad_channels(fid, node): + """Read bad channels. + + Parameters + ---------- + fid : file + The file descriptor. + node : dict + The node of the FIF tree that contains info on the bad channels. + + Returns + ------- + bads : list + A list of bad channel's names. + """ + return _read_bad_channels(fid, node) + + +def _read_bad_channels(fid, node, ch_names_mapping): + ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping + nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS) + + bads = [] + if len(nodes) > 0: + for node in nodes: + tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST) + if tag is not None and tag.data is not None: + bads = tag.data.split(':') + bads[:] = _rename_list(bads, ch_names_mapping) + return bads + + +@verbose +def read_meas_info(fid, tree, clean_bads=False, verbose=None): + """Read the measurement info. + + Parameters + ---------- + fid : file + Open file descriptor. + tree : tree + FIF tree structure. + clean_bads : bool + If True, clean info['bads'] before running consistency check. + Should only be needed for old files where we did not check bads + before saving. + %(verbose)s + + Returns + ------- + %(info_not_none)s + meas : dict + Node in tree that contains the info. + """ + # Find the desired blocks + meas = dir_tree_find(tree, FIFF.FIFFB_MEAS) + if len(meas) == 0: + raise ValueError('Could not find measurement data') + if len(meas) > 1: + raise ValueError('Cannot read more that 1 measurement data') + meas = meas[0] + + meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO) + if len(meas_info) == 0: + raise ValueError('Could not find measurement info') + if len(meas_info) > 1: + raise ValueError('Cannot read more that 1 measurement info') + meas_info = meas_info[0] + + # Read measurement info + dev_head_t = None + ctf_head_t = None + dev_ctf_t = None + meas_date = None + utc_offset = None + highpass = None + lowpass = None + nchan = None + sfreq = None + chs = [] + experimenter = None + description = None + proj_id = None + proj_name = None + line_freq = None + gantry_angle = None + custom_ref_applied = FIFF.FIFFV_MNE_CUSTOM_REF_OFF + xplotter_layout = None + kit_system_id = None + for k in range(meas_info['nent']): + kind = meas_info['directory'][k].kind + pos = meas_info['directory'][k].pos + if kind == FIFF.FIFF_NCHAN: + tag = read_tag(fid, pos) + nchan = int(tag.data) + elif kind == FIFF.FIFF_SFREQ: + tag = read_tag(fid, pos) + sfreq = float(tag.data) + elif kind == FIFF.FIFF_CH_INFO: + tag = read_tag(fid, pos) + chs.append(tag.data) + elif kind == FIFF.FIFF_LOWPASS: + tag = read_tag(fid, pos) + if not np.isnan(tag.data): + lowpass = float(tag.data) + elif kind == FIFF.FIFF_HIGHPASS: + tag = read_tag(fid, pos) + if not np.isnan(tag.data): + highpass = float(tag.data) + elif kind == FIFF.FIFF_MEAS_DATE: + tag = read_tag(fid, pos) + meas_date = tuple(tag.data) + if len(meas_date) == 1: # can happen from old C conversions + meas_date = (meas_date[0], 0) + elif kind == FIFF.FIFF_UTC_OFFSET: + tag = read_tag(fid, pos) + utc_offset = str(tag.data) + elif kind == FIFF.FIFF_COORD_TRANS: + tag = read_tag(fid, pos) + cand = tag.data + + if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \ + cand['to'] == FIFF.FIFFV_COORD_HEAD: + dev_head_t = cand + elif cand['from'] == FIFF.FIFFV_COORD_HEAD and \ + cand['to'] == FIFF.FIFFV_COORD_DEVICE: + # this reversal can happen with BabyMEG data + dev_head_t = invert_transform(cand) + elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \ + cand['to'] == FIFF.FIFFV_COORD_HEAD: + ctf_head_t = cand + elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE and \ + cand['to'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + dev_ctf_t = cand + elif kind == FIFF.FIFF_EXPERIMENTER: + tag = read_tag(fid, pos) + experimenter = tag.data + elif kind == FIFF.FIFF_DESCRIPTION: + tag = read_tag(fid, pos) + description = tag.data + elif kind == FIFF.FIFF_PROJ_ID: + tag = read_tag(fid, pos) + proj_id = tag.data + elif kind == FIFF.FIFF_PROJ_NAME: + tag = read_tag(fid, pos) + proj_name = tag.data + elif kind == FIFF.FIFF_LINE_FREQ: + tag = read_tag(fid, pos) + line_freq = float(tag.data) + elif kind == FIFF.FIFF_GANTRY_ANGLE: + tag = read_tag(fid, pos) + gantry_angle = float(tag.data) + elif kind in [FIFF.FIFF_MNE_CUSTOM_REF, 236]: # 236 used before v0.11 + tag = read_tag(fid, pos) + custom_ref_applied = int(tag.data) + elif kind == FIFF.FIFF_XPLOTTER_LAYOUT: + tag = read_tag(fid, pos) + xplotter_layout = str(tag.data) + elif kind == FIFF.FIFF_MNE_KIT_SYSTEM_ID: + tag = read_tag(fid, pos) + kit_system_id = int(tag.data) + ch_names_mapping = _read_extended_ch_info(chs, meas_info, fid) + + # Check that we have everything we need + if nchan is None: + raise ValueError('Number of channels is not defined') + + if sfreq is None: + raise ValueError('Sampling frequency is not defined') + + if len(chs) == 0: + raise ValueError('Channel information not defined') + + if len(chs) != nchan: + raise ValueError('Incorrect number of channel definitions found') + + if dev_head_t is None or ctf_head_t is None: + hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT) + if len(hpi_result) == 1: + hpi_result = hpi_result[0] + for k in range(hpi_result['nent']): + kind = hpi_result['directory'][k].kind + pos = hpi_result['directory'][k].pos + if kind == FIFF.FIFF_COORD_TRANS: + tag = read_tag(fid, pos) + cand = tag.data + if (cand['from'] == FIFF.FIFFV_COORD_DEVICE and + cand['to'] == FIFF.FIFFV_COORD_HEAD and + dev_head_t is None): + dev_head_t = cand + elif (cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and + cand['to'] == FIFF.FIFFV_COORD_HEAD and + ctf_head_t is None): + ctf_head_t = cand + + # Locate the Polhemus data + dig = _read_dig_fif(fid, meas_info) + + # Locate the acquisition information + acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS) + acq_pars = None + acq_stim = None + if len(acqpars) == 1: + acqpars = acqpars[0] + for k in range(acqpars['nent']): + kind = acqpars['directory'][k].kind + pos = acqpars['directory'][k].pos + if kind == FIFF.FIFF_DACQ_PARS: + tag = read_tag(fid, pos) + acq_pars = tag.data + elif kind == FIFF.FIFF_DACQ_STIM: + tag = read_tag(fid, pos) + acq_stim = tag.data + + # Load the SSP data + projs = _read_proj( + fid, meas_info, ch_names_mapping=ch_names_mapping) + + # Load the CTF compensation data + comps = _read_ctf_comp( + fid, meas_info, chs, ch_names_mapping=ch_names_mapping) + + # Load the bad channel list + bads = _read_bad_channels( + fid, meas_info, ch_names_mapping=ch_names_mapping) + + # + # Put the data together + # + info = Info(file_id=tree['id']) + info._unlocked = True + + # Locate events list + events = dir_tree_find(meas_info, FIFF.FIFFB_EVENTS) + evs = list() + for event in events: + ev = dict() + for k in range(event['nent']): + kind = event['directory'][k].kind + pos = event['directory'][k].pos + if kind == FIFF.FIFF_EVENT_CHANNELS: + ev['channels'] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_EVENT_LIST: + ev['list'] = read_tag(fid, pos).data + evs.append(ev) + info['events'] = evs + + # Locate HPI result + hpi_results = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT) + hrs = list() + for hpi_result in hpi_results: + hr = dict() + hr['dig_points'] = [] + for k in range(hpi_result['nent']): + kind = hpi_result['directory'][k].kind + pos = hpi_result['directory'][k].pos + if kind == FIFF.FIFF_DIG_POINT: + hr['dig_points'].append(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_HPI_DIGITIZATION_ORDER: + hr['order'] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_COILS_USED: + hr['used'] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_COIL_MOMENTS: + hr['moments'] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_FIT_GOODNESS: + hr['goodness'] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_FIT_GOOD_LIMIT: + hr['good_limit'] = float(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_HPI_FIT_DIST_LIMIT: + hr['dist_limit'] = float(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_HPI_FIT_ACCEPT: + hr['accept'] = int(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_COORD_TRANS: + hr['coord_trans'] = read_tag(fid, pos).data + hrs.append(hr) + info['hpi_results'] = hrs + + # Locate HPI Measurement + hpi_meass = dir_tree_find(meas_info, FIFF.FIFFB_HPI_MEAS) + hms = list() + for hpi_meas in hpi_meass: + hm = dict() + for k in range(hpi_meas['nent']): + kind = hpi_meas['directory'][k].kind + pos = hpi_meas['directory'][k].pos + if kind == FIFF.FIFF_CREATOR: + hm['creator'] = str(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_SFREQ: + hm['sfreq'] = float(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_NCHAN: + hm['nchan'] = int(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_NAVE: + hm['nave'] = int(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_HPI_NCOIL: + hm['ncoil'] = int(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_FIRST_SAMPLE: + hm['first_samp'] = int(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_LAST_SAMPLE: + hm['last_samp'] = int(read_tag(fid, pos).data) + hpi_coils = dir_tree_find(hpi_meas, FIFF.FIFFB_HPI_COIL) + hcs = [] + for hpi_coil in hpi_coils: + hc = dict() + for k in range(hpi_coil['nent']): + kind = hpi_coil['directory'][k].kind + pos = hpi_coil['directory'][k].pos + if kind == FIFF.FIFF_HPI_COIL_NO: + hc['number'] = int(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_EPOCH: + hc['epoch'] = read_tag(fid, pos).data + hc['epoch'].flags.writeable = False + elif kind == FIFF.FIFF_HPI_SLOPES: + hc['slopes'] = read_tag(fid, pos).data + hc['slopes'].flags.writeable = False + elif kind == FIFF.FIFF_HPI_CORR_COEFF: + hc['corr_coeff'] = read_tag(fid, pos).data + hc['corr_coeff'].flags.writeable = False + elif kind == FIFF.FIFF_HPI_COIL_FREQ: + hc['coil_freq'] = float(read_tag(fid, pos).data) + hcs.append(hc) + hm['hpi_coils'] = hcs + hms.append(hm) + info['hpi_meas'] = hms + del hms + + subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT) + si = None + if len(subject_info) == 1: + subject_info = subject_info[0] + si = dict() + for k in range(subject_info['nent']): + kind = subject_info['directory'][k].kind + pos = subject_info['directory'][k].pos + if kind == FIFF.FIFF_SUBJ_ID: + tag = read_tag(fid, pos) + si['id'] = int(tag.data) + elif kind == FIFF.FIFF_SUBJ_HIS_ID: + tag = read_tag(fid, pos) + si['his_id'] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_LAST_NAME: + tag = read_tag(fid, pos) + si['last_name'] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_FIRST_NAME: + tag = read_tag(fid, pos) + si['first_name'] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_MIDDLE_NAME: + tag = read_tag(fid, pos) + si['middle_name'] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY: + try: + tag = read_tag(fid, pos) + except OverflowError: + warn('Encountered an error while trying to read the ' + 'birthday from the input data. No birthday will be ' + 'set. Please check the integrity of the birthday ' + 'information in the input data.') + continue + si['birthday'] = tag.data + elif kind == FIFF.FIFF_SUBJ_SEX: + tag = read_tag(fid, pos) + si['sex'] = int(tag.data) + elif kind == FIFF.FIFF_SUBJ_HAND: + tag = read_tag(fid, pos) + si['hand'] = int(tag.data) + elif kind == FIFF.FIFF_SUBJ_WEIGHT: + tag = read_tag(fid, pos) + si['weight'] = tag.data + elif kind == FIFF.FIFF_SUBJ_HEIGHT: + tag = read_tag(fid, pos) + si['height'] = tag.data + info['subject_info'] = si + del si + + device_info = dir_tree_find(meas_info, FIFF.FIFFB_DEVICE) + di = None + if len(device_info) == 1: + device_info = device_info[0] + di = dict() + for k in range(device_info['nent']): + kind = device_info['directory'][k].kind + pos = device_info['directory'][k].pos + if kind == FIFF.FIFF_DEVICE_TYPE: + tag = read_tag(fid, pos) + di['type'] = str(tag.data) + elif kind == FIFF.FIFF_DEVICE_MODEL: + tag = read_tag(fid, pos) + di['model'] = str(tag.data) + elif kind == FIFF.FIFF_DEVICE_SERIAL: + tag = read_tag(fid, pos) + di['serial'] = str(tag.data) + elif kind == FIFF.FIFF_DEVICE_SITE: + tag = read_tag(fid, pos) + di['site'] = str(tag.data) + info['device_info'] = di + del di + + helium_info = dir_tree_find(meas_info, FIFF.FIFFB_HELIUM) + hi = None + if len(helium_info) == 1: + helium_info = helium_info[0] + hi = dict() + for k in range(helium_info['nent']): + kind = helium_info['directory'][k].kind + pos = helium_info['directory'][k].pos + if kind == FIFF.FIFF_HE_LEVEL_RAW: + tag = read_tag(fid, pos) + hi['he_level_raw'] = float(tag.data) + elif kind == FIFF.FIFF_HELIUM_LEVEL: + tag = read_tag(fid, pos) + hi['helium_level'] = float(tag.data) + elif kind == FIFF.FIFF_ORIG_FILE_GUID: + tag = read_tag(fid, pos) + hi['orig_file_guid'] = str(tag.data) + elif kind == FIFF.FIFF_MEAS_DATE: + tag = read_tag(fid, pos) + hi['meas_date'] = tuple(int(t) for t in tag.data) + info['helium_info'] = hi + del hi + + hpi_subsystem = dir_tree_find(meas_info, FIFF.FIFFB_HPI_SUBSYSTEM) + hs = None + if len(hpi_subsystem) == 1: + hpi_subsystem = hpi_subsystem[0] + hs = dict() + for k in range(hpi_subsystem['nent']): + kind = hpi_subsystem['directory'][k].kind + pos = hpi_subsystem['directory'][k].pos + if kind == FIFF.FIFF_HPI_NCOIL: + tag = read_tag(fid, pos) + hs['ncoil'] = int(tag.data) + elif kind == FIFF.FIFF_EVENT_CHANNEL: + tag = read_tag(fid, pos) + hs['event_channel'] = str(tag.data) + hpi_coils = dir_tree_find(hpi_subsystem, FIFF.FIFFB_HPI_COIL) + hc = [] + for coil in hpi_coils: + this_coil = dict() + for j in range(coil['nent']): + kind = coil['directory'][j].kind + pos = coil['directory'][j].pos + if kind == FIFF.FIFF_EVENT_BITS: + tag = read_tag(fid, pos) + this_coil['event_bits'] = np.array(tag.data) + hc.append(this_coil) + hs['hpi_coils'] = hc + info['hpi_subsystem'] = hs + + # Read processing history + info['proc_history'] = _read_proc_history(fid, tree) + + # Make the most appropriate selection for the measurement id + if meas_info['parent_id'] is None: + if meas_info['id'] is None: + if meas['id'] is None: + if meas['parent_id'] is None: + info['meas_id'] = info['file_id'] + else: + info['meas_id'] = meas['parent_id'] + else: + info['meas_id'] = meas['id'] + else: + info['meas_id'] = meas_info['id'] + else: + info['meas_id'] = meas_info['parent_id'] + info['experimenter'] = experimenter + info['description'] = description + info['proj_id'] = proj_id + info['proj_name'] = proj_name + if meas_date is None: + meas_date = (info['meas_id']['secs'], info['meas_id']['usecs']) + info['meas_date'] = _ensure_meas_date_none_or_dt(meas_date) + info['utc_offset'] = utc_offset + + info['sfreq'] = sfreq + info['highpass'] = highpass if highpass is not None else 0. + info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0 + info['line_freq'] = line_freq + info['gantry_angle'] = gantry_angle + + # Add the channel information and make a list of channel names + # for convenience + info['chs'] = chs + + # + # Add the coordinate transformations + # + info['dev_head_t'] = dev_head_t + info['ctf_head_t'] = ctf_head_t + info['dev_ctf_t'] = dev_ctf_t + if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None: + from ..transforms import Transform + head_ctf_trans = np.linalg.inv(ctf_head_t['trans']) + dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans']) + info['dev_ctf_t'] = Transform('meg', 'ctf_head', dev_ctf_trans) + + # All kinds of auxliary stuff + info['dig'] = _format_dig_points(dig) + info['bads'] = bads + info._update_redundant() + if clean_bads: + info['bads'] = [b for b in bads if b in info['ch_names']] + info['projs'] = projs + info['comps'] = comps + info['acq_pars'] = acq_pars + info['acq_stim'] = acq_stim + info['custom_ref_applied'] = custom_ref_applied + info['xplotter_layout'] = xplotter_layout + info['kit_system_id'] = kit_system_id + info._check_consistency() + info._unlocked = False + return info, meas + + +def _read_extended_ch_info(chs, parent, fid): + ch_infos = dir_tree_find(parent, FIFF.FIFFB_CH_INFO) + if len(ch_infos) == 0: + return + _check_option('length of channel infos', len(ch_infos), [len(chs)]) + logger.info(' Reading extended channel information') + + # Here we assume that ``remap`` is in the same order as the channels + # themselves, which is hopefully safe enough. + ch_names_mapping = dict() + for new, ch in zip(ch_infos, chs): + for k in range(new['nent']): + kind = new['directory'][k].kind + try: + key, cast = _CH_READ_MAP[kind] + except KeyError: + # This shouldn't happen if we're up to date with the FIFF + # spec + warn(f'Discarding extra channel information kind {kind}') + continue + assert key in ch + data = read_tag(fid, new['directory'][k].pos).data + if data is not None: + data = cast(data) + if key == 'ch_name': + ch_names_mapping[ch[key]] = data + ch[key] = data + _update_ch_info_named(ch) + # we need to return ch_names_mapping so that we can also rename the + # bad channels + return ch_names_mapping + + +def _rename_comps(comps, ch_names_mapping): + if not (comps and ch_names_mapping): + return + for comp in comps: + data = comp['data'] + for key in ('row_names', 'col_names'): + data[key][:] = _rename_list(data[key], ch_names_mapping) + + +def _ensure_meas_date_none_or_dt(meas_date): + if meas_date is None or np.array_equal(meas_date, DATE_NONE): + meas_date = None + elif not isinstance(meas_date, datetime.datetime): + meas_date = _stamp_to_dt(meas_date) + return meas_date + + +def _check_dates(info, prepend_error=''): + """Check dates before writing as fif files. + + It's needed because of the limited integer precision + of the fix standard. + """ + for key in ('file_id', 'meas_id'): + value = info.get(key) + if value is not None: + assert 'msecs' not in value + for key_2 in ('secs', 'usecs'): + if (value[key_2] < np.iinfo('>i4').min or + value[key_2] > np.iinfo('>i4').max): + raise RuntimeError('%sinfo[%s][%s] must be between ' + '"%r" and "%r", got "%r"' + % (prepend_error, key, key_2, + np.iinfo('>i4').min, + np.iinfo('>i4').max, + value[key_2]),) + + meas_date = info.get('meas_date') + if meas_date is None: + return + + meas_date_stamp = _dt_to_stamp(meas_date) + if (meas_date_stamp[0] < np.iinfo('>i4').min or + meas_date_stamp[0] > np.iinfo('>i4').max): + raise RuntimeError( + '%sinfo["meas_date"] seconds must be between "%r" ' + 'and "%r", got "%r"' + % (prepend_error, (np.iinfo('>i4').min, 0), + (np.iinfo('>i4').max, 0), meas_date_stamp[0],)) + + +@fill_doc +def write_meas_info(fid, info, data_type=None, reset_range=True): + """Write measurement info into a file id (from a fif file). + + Parameters + ---------- + fid : file + Open file descriptor. + %(info_not_none)s + data_type : int + The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), + 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for + raw data. + reset_range : bool + If True, info['chs'][k]['range'] will be set to unity. + + Notes + ----- + Tags are written in a particular order for compatibility with maxfilter. + """ + info._check_consistency() + _check_dates(info) + + # Measurement info + start_block(fid, FIFF.FIFFB_MEAS_INFO) + + # Add measurement id + if info['meas_id'] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id']) + + for event in info['events']: + start_block(fid, FIFF.FIFFB_EVENTS) + if event.get('channels') is not None: + write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event['channels']) + if event.get('list') is not None: + write_int(fid, FIFF.FIFF_EVENT_LIST, event['list']) + end_block(fid, FIFF.FIFFB_EVENTS) + + # HPI Result + for hpi_result in info['hpi_results']: + start_block(fid, FIFF.FIFFB_HPI_RESULT) + write_dig_points(fid, hpi_result['dig_points']) + if 'order' in hpi_result: + write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER, + hpi_result['order']) + if 'used' in hpi_result: + write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result['used']) + if 'moments' in hpi_result: + write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS, + hpi_result['moments']) + if 'goodness' in hpi_result: + write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS, + hpi_result['goodness']) + if 'good_limit' in hpi_result: + write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT, + hpi_result['good_limit']) + if 'dist_limit' in hpi_result: + write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT, + hpi_result['dist_limit']) + if 'accept' in hpi_result: + write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result['accept']) + if 'coord_trans' in hpi_result: + write_coord_trans(fid, hpi_result['coord_trans']) + end_block(fid, FIFF.FIFFB_HPI_RESULT) + + # HPI Measurement + for hpi_meas in info['hpi_meas']: + start_block(fid, FIFF.FIFFB_HPI_MEAS) + if hpi_meas.get('creator') is not None: + write_string(fid, FIFF.FIFF_CREATOR, hpi_meas['creator']) + if hpi_meas.get('sfreq') is not None: + write_float(fid, FIFF.FIFF_SFREQ, hpi_meas['sfreq']) + if hpi_meas.get('nchan') is not None: + write_int(fid, FIFF.FIFF_NCHAN, hpi_meas['nchan']) + if hpi_meas.get('nave') is not None: + write_int(fid, FIFF.FIFF_NAVE, hpi_meas['nave']) + if hpi_meas.get('ncoil') is not None: + write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas['ncoil']) + if hpi_meas.get('first_samp') is not None: + write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas['first_samp']) + if hpi_meas.get('last_samp') is not None: + write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas['last_samp']) + for hpi_coil in hpi_meas['hpi_coils']: + start_block(fid, FIFF.FIFFB_HPI_COIL) + if hpi_coil.get('number') is not None: + write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil['number']) + if hpi_coil.get('epoch') is not None: + write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil['epoch']) + if hpi_coil.get('slopes') is not None: + write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil['slopes']) + if hpi_coil.get('corr_coeff') is not None: + write_float(fid, FIFF.FIFF_HPI_CORR_COEFF, + hpi_coil['corr_coeff']) + if hpi_coil.get('coil_freq') is not None: + write_float(fid, FIFF.FIFF_HPI_COIL_FREQ, + hpi_coil['coil_freq']) + end_block(fid, FIFF.FIFFB_HPI_COIL) + end_block(fid, FIFF.FIFFB_HPI_MEAS) + + # Polhemus data + write_dig_points(fid, info['dig'], block=True) + + # megacq parameters + if info['acq_pars'] is not None or info['acq_stim'] is not None: + start_block(fid, FIFF.FIFFB_DACQ_PARS) + if info['acq_pars'] is not None: + write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars']) + + if info['acq_stim'] is not None: + write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim']) + + end_block(fid, FIFF.FIFFB_DACQ_PARS) + + # Coordinate transformations if the HPI result block was not there + if info['dev_head_t'] is not None: + write_coord_trans(fid, info['dev_head_t']) + + if info['ctf_head_t'] is not None: + write_coord_trans(fid, info['ctf_head_t']) + + if info['dev_ctf_t'] is not None: + write_coord_trans(fid, info['dev_ctf_t']) + + # Projectors + ch_names_mapping = _make_ch_names_mapping(info['chs']) + _write_proj(fid, info['projs'], ch_names_mapping=ch_names_mapping) + + # Bad channels + if len(info['bads']) > 0: + bads = _rename_list(info['bads'], ch_names_mapping) + start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) + write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, bads) + end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) + + # General + if info.get('experimenter') is not None: + write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter']) + if info.get('description') is not None: + write_string(fid, FIFF.FIFF_DESCRIPTION, info['description']) + if info.get('proj_id') is not None: + write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id']) + if info.get('proj_name') is not None: + write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name']) + if info.get('meas_date') is not None: + write_int(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(info['meas_date'])) + if info.get('utc_offset') is not None: + write_string(fid, FIFF.FIFF_UTC_OFFSET, info['utc_offset']) + write_int(fid, FIFF.FIFF_NCHAN, info['nchan']) + write_float(fid, FIFF.FIFF_SFREQ, info['sfreq']) + if info['lowpass'] is not None: + write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass']) + if info['highpass'] is not None: + write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass']) + if info.get('line_freq') is not None: + write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq']) + if info.get('gantry_angle') is not None: + write_float(fid, FIFF.FIFF_GANTRY_ANGLE, info['gantry_angle']) + if data_type is not None: + write_int(fid, FIFF.FIFF_DATA_PACK, data_type) + if info.get('custom_ref_applied'): + write_int(fid, FIFF.FIFF_MNE_CUSTOM_REF, info['custom_ref_applied']) + if info.get('xplotter_layout'): + write_string(fid, FIFF.FIFF_XPLOTTER_LAYOUT, info['xplotter_layout']) + + # Channel information + _write_ch_infos(fid, info['chs'], reset_range, ch_names_mapping) + + # Subject information + if info.get('subject_info') is not None: + start_block(fid, FIFF.FIFFB_SUBJECT) + si = info['subject_info'] + if si.get('id') is not None: + write_int(fid, FIFF.FIFF_SUBJ_ID, si['id']) + if si.get('his_id') is not None: + write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id']) + if si.get('last_name') is not None: + write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name']) + if si.get('first_name') is not None: + write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name']) + if si.get('middle_name') is not None: + write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si['middle_name']) + if si.get('birthday') is not None: + write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday']) + if si.get('sex') is not None: + write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex']) + if si.get('hand') is not None: + write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand']) + if si.get('weight') is not None: + write_float(fid, FIFF.FIFF_SUBJ_WEIGHT, si['weight']) + if si.get('height') is not None: + write_float(fid, FIFF.FIFF_SUBJ_HEIGHT, si['height']) + end_block(fid, FIFF.FIFFB_SUBJECT) + del si + + if info.get('device_info') is not None: + start_block(fid, FIFF.FIFFB_DEVICE) + di = info['device_info'] + write_string(fid, FIFF.FIFF_DEVICE_TYPE, di['type']) + for key in ('model', 'serial', 'site'): + if di.get(key) is not None: + write_string(fid, getattr(FIFF, 'FIFF_DEVICE_' + key.upper()), + di[key]) + end_block(fid, FIFF.FIFFB_DEVICE) + del di + + if info.get('helium_info') is not None: + start_block(fid, FIFF.FIFFB_HELIUM) + hi = info['helium_info'] + if hi.get('he_level_raw') is not None: + write_float(fid, FIFF.FIFF_HE_LEVEL_RAW, hi['he_level_raw']) + if hi.get('helium_level') is not None: + write_float(fid, FIFF.FIFF_HELIUM_LEVEL, hi['helium_level']) + if hi.get('orig_file_guid') is not None: + write_string(fid, FIFF.FIFF_ORIG_FILE_GUID, hi['orig_file_guid']) + write_int(fid, FIFF.FIFF_MEAS_DATE, hi['meas_date']) + end_block(fid, FIFF.FIFFB_HELIUM) + del hi + + if info.get('hpi_subsystem') is not None: + hs = info['hpi_subsystem'] + start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM) + if hs.get('ncoil') is not None: + write_int(fid, FIFF.FIFF_HPI_NCOIL, hs['ncoil']) + if hs.get('event_channel') is not None: + write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs['event_channel']) + if hs.get('hpi_coils') is not None: + for coil in hs['hpi_coils']: + start_block(fid, FIFF.FIFFB_HPI_COIL) + if coil.get('event_bits') is not None: + write_int(fid, FIFF.FIFF_EVENT_BITS, + coil['event_bits']) + end_block(fid, FIFF.FIFFB_HPI_COIL) + end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM) + del hs + + # CTF compensation info + comps = info['comps'] + if ch_names_mapping: + comps = deepcopy(comps) + _rename_comps(comps, ch_names_mapping) + write_ctf_comp(fid, comps) + + # KIT system ID + if info.get('kit_system_id') is not None: + write_int(fid, FIFF.FIFF_MNE_KIT_SYSTEM_ID, info['kit_system_id']) + + end_block(fid, FIFF.FIFFB_MEAS_INFO) + + # Processing history + _write_proc_history(fid, info) + + +@fill_doc +def write_info(fname, info, data_type=None, reset_range=True): + """Write measurement info in fif file. + + Parameters + ---------- + fname : str + The name of the file. Should end by -info.fif. + %(info_not_none)s + data_type : int + The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), + 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for + raw data. + reset_range : bool + If True, info['chs'][k]['range'] will be set to unity. + """ + with start_and_end_file(fname) as fid: + start_block(fid, FIFF.FIFFB_MEAS) + write_meas_info(fid, info, data_type, reset_range) + end_block(fid, FIFF.FIFFB_MEAS) + + +@verbose +def _merge_info_values(infos, key, verbose=None): + """Merge things together. + + Fork for {'dict', 'list', 'array', 'other'} + and consider cases where one or all are of the same type. + + Does special things for "projs", "bads", and "meas_date". + """ + values = [d[key] for d in infos] + msg = ("Don't know how to merge '%s'. Make sure values are " + "compatible, got types:\n %s" + % (key, [type(v) for v in values])) + + def _flatten(lists): + return [item for sublist in lists for item in sublist] + + def _check_isinstance(values, kind, func): + return func([isinstance(v, kind) for v in values]) + + def _where_isinstance(values, kind): + """Get indices of instances.""" + return np.where([isinstance(v, type) for v in values])[0] + + # list + if _check_isinstance(values, list, all): + lists = (d[key] for d in infos) + if key == 'projs': + return _uniquify_projs(_flatten(lists)) + elif key == 'bads': + return sorted(set(_flatten(lists))) + else: + return _flatten(lists) + elif _check_isinstance(values, list, any): + idx = _where_isinstance(values, list) + if len(idx) == 1: + return values[int(idx)] + elif len(idx) > 1: + lists = (d[key] for d in infos if isinstance(d[key], list)) + return _flatten(lists) + # dict + elif _check_isinstance(values, dict, all): + is_qual = all(object_diff(values[0], v) == '' for v in values[1:]) + if is_qual: + return values[0] + else: + RuntimeError(msg) + elif _check_isinstance(values, dict, any): + idx = _where_isinstance(values, dict) + if len(idx) == 1: + return values[int(idx)] + elif len(idx) > 1: + raise RuntimeError(msg) + # ndarray + elif _check_isinstance(values, np.ndarray, all) or \ + _check_isinstance(values, tuple, all): + is_qual = all(np.array_equal(values[0], x) for x in values[1:]) + if is_qual: + return values[0] + elif key == 'meas_date': + logger.info('Found multiple entries for %s. ' + 'Setting value to `None`' % key) + return None + else: + raise RuntimeError(msg) + elif _check_isinstance(values, (np.ndarray, tuple), any): + idx = _where_isinstance(values, np.ndarray) + if len(idx) == 1: + return values[int(idx)] + elif len(idx) > 1: + raise RuntimeError(msg) + # other + else: + unique_values = set(values) + if len(unique_values) == 1: + return list(values)[0] + elif isinstance(list(unique_values)[0], BytesIO): + logger.info('Found multiple StringIO instances. ' + 'Setting value to `None`') + return None + elif isinstance(list(unique_values)[0], str): + logger.info('Found multiple filenames. ' + 'Setting value to `None`') + return None + else: + raise RuntimeError(msg) + + +@verbose +def _merge_info(infos, force_update_to_first=False, verbose=None): + """Merge multiple measurement info dictionaries. + + - Fields that are present in only one info object will be used in the + merged info. + - Fields that are present in multiple info objects and are the same + will be used in the merged info. + - Fields that are present in multiple info objects and are different + will result in a None value in the merged info. + - Channels will be concatenated. If multiple info objects contain + channels with the same name, an exception is raised. + + Parameters + ---------- + infos | list of instance of Info + Info objects to merge into one info object. + force_update_to_first : bool + If True, force the fields for objects in `info` will be updated + to match those in the first item. Use at your own risk, as this + may overwrite important metadata. + %(verbose)s + + Returns + ------- + info : instance of Info + The merged info object. + """ + for info in infos: + info._check_consistency() + if force_update_to_first is True: + infos = deepcopy(infos) + _force_update_info(infos[0], infos[1:]) + info = Info() + info._unlocked = True + info['chs'] = [] + for this_info in infos: + info['chs'].extend(this_info['chs']) + info._update_redundant() + duplicates = {ch for ch in info['ch_names'] + if info['ch_names'].count(ch) > 1} + if len(duplicates) > 0: + msg = ("The following channels are present in more than one input " + "measurement info objects: %s" % list(duplicates)) + raise ValueError(msg) + + transforms = ['ctf_head_t', 'dev_head_t', 'dev_ctf_t'] + for trans_name in transforms: + trans = [i[trans_name] for i in infos if i[trans_name]] + if len(trans) == 0: + info[trans_name] = None + elif len(trans) == 1: + info[trans_name] = trans[0] + elif all(np.all(trans[0]['trans'] == x['trans']) and + trans[0]['from'] == x['from'] and + trans[0]['to'] == x['to'] + for x in trans[1:]): + info[trans_name] = trans[0] + else: + msg = ("Measurement infos provide mutually inconsistent %s" % + trans_name) + raise ValueError(msg) + + # KIT system-IDs + kit_sys_ids = [i['kit_system_id'] for i in infos if i['kit_system_id']] + if len(kit_sys_ids) == 0: + info['kit_system_id'] = None + elif len(set(kit_sys_ids)) == 1: + info['kit_system_id'] = kit_sys_ids[0] + else: + raise ValueError("Trying to merge channels from different KIT systems") + + # hpi infos and digitization data: + fields = ['hpi_results', 'hpi_meas', 'dig'] + for k in fields: + values = [i[k] for i in infos if i[k]] + if len(values) == 0: + info[k] = [] + elif len(values) == 1: + info[k] = values[0] + elif all(object_diff(values[0], v) == '' for v in values[1:]): + info[k] = values[0] + else: + msg = ("Measurement infos are inconsistent for %s" % k) + raise ValueError(msg) + + # other fields + other_fields = ['acq_pars', 'acq_stim', 'bads', + 'comps', 'custom_ref_applied', 'description', + 'experimenter', 'file_id', 'highpass', 'utc_offset', + 'hpi_subsystem', 'events', 'device_info', 'helium_info', + 'line_freq', 'lowpass', 'meas_id', + 'proj_id', 'proj_name', 'projs', 'sfreq', 'gantry_angle', + 'subject_info', 'sfreq', 'xplotter_layout', 'proc_history'] + + for k in other_fields: + info[k] = _merge_info_values(infos, k) + + info['meas_date'] = infos[0]['meas_date'] + info._unlocked = False + + return info + + +@verbose +def create_info(ch_names, sfreq, ch_types='misc', verbose=None): + """Create a basic Info instance suitable for use with create_raw. + + Parameters + ---------- + ch_names : list of str | int + Channel names. If an int, a list of channel names will be created + from ``range(ch_names)``. + sfreq : float + Sample rate of the data. + ch_types : list of str | str + Channel types, default is ``'misc'`` which is not a + :term:`data channel `. + Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', + 'seeg', 'dbs', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' + or 'hbo'. If str, then all channels are assumed to be of the same type. + %(verbose)s + + Returns + ------- + %(info_not_none)s + + Notes + ----- + The info dictionary will be sparsely populated to enable functionality + within the rest of the package. Advanced functionality such as source + localization can only be obtained through substantial, proper + modifications of the info structure (not recommended). + + Note that the MEG device-to-head transform ``info['dev_head_t']`` will + be initialized to the identity transform. + + Proper units of measure: + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog + * T: mag + * T/m: grad + * M: hbo, hbr + * Am: dipole + * AU: misc + """ + try: + ch_names = operator.index(ch_names) # int-like + except TypeError: + pass + else: + ch_names = list(np.arange(ch_names).astype(str)) + _validate_type(ch_names, (list, tuple), "ch_names", + ("list, tuple, or int")) + sfreq = float(sfreq) + if sfreq <= 0: + raise ValueError('sfreq must be positive') + nchan = len(ch_names) + if isinstance(ch_types, str): + ch_types = [ch_types] * nchan + ch_types = np.atleast_1d(np.array(ch_types, np.str_)) + if ch_types.ndim != 1 or len(ch_types) != nchan: + raise ValueError('ch_types and ch_names must be the same length ' + '(%s != %s) for ch_types=%s' + % (len(ch_types), nchan, ch_types)) + info = _empty_info(sfreq) + ch_types_dict = get_channel_type_constants(include_defaults=True) + for ci, (ch_name, ch_type) in enumerate(zip(ch_names, ch_types)): + _validate_type(ch_name, 'str', "each entry in ch_names") + _validate_type(ch_type, 'str', "each entry in ch_types") + if ch_type not in ch_types_dict: + raise KeyError(f'kind must be one of {list(ch_types_dict)}, ' + f'not {ch_type}') + this_ch_dict = ch_types_dict[ch_type] + kind = this_ch_dict['kind'] + # handle chpi, where kind is a *list* of FIFF constants: + kind = kind[0] if isinstance(kind, (list, tuple)) else kind + # mirror what tag.py does here + coord_frame = _ch_coord_dict.get(kind, FIFF.FIFFV_COORD_UNKNOWN) + coil_type = this_ch_dict.get('coil_type', FIFF.FIFFV_COIL_NONE) + unit = this_ch_dict.get('unit', FIFF.FIFF_UNIT_NONE) + chan_info = dict(loc=np.full(12, np.nan), + unit_mul=FIFF.FIFF_UNITM_NONE, range=1., cal=1., + kind=kind, coil_type=coil_type, unit=unit, + coord_frame=coord_frame, ch_name=str(ch_name), + scanno=ci + 1, logno=ci + 1) + info['chs'].append(chan_info) + + info._update_redundant() + info._check_consistency() + info._unlocked = False + return info + + +RAW_INFO_FIELDS = ( + 'acq_pars', 'acq_stim', 'bads', 'ch_names', 'chs', + 'comps', 'ctf_head_t', 'custom_ref_applied', 'description', 'dev_ctf_t', + 'dev_head_t', 'dig', 'experimenter', 'events', 'utc_offset', 'device_info', + 'file_id', 'highpass', 'hpi_meas', 'hpi_results', 'helium_info', + 'hpi_subsystem', 'kit_system_id', 'line_freq', 'lowpass', 'meas_date', + 'meas_id', 'nchan', 'proj_id', 'proj_name', 'projs', 'sfreq', + 'subject_info', 'xplotter_layout', 'proc_history', 'gantry_angle', +) + + +def _empty_info(sfreq): + """Create an empty info dictionary.""" + _none_keys = ( + 'acq_pars', 'acq_stim', 'ctf_head_t', 'description', + 'dev_ctf_t', 'dig', 'experimenter', 'utc_offset', 'device_info', + 'file_id', 'highpass', 'hpi_subsystem', 'kit_system_id', 'helium_info', + 'line_freq', 'lowpass', 'meas_date', 'meas_id', 'proj_id', 'proj_name', + 'subject_info', 'xplotter_layout', 'gantry_angle', + ) + _list_keys = ('bads', 'chs', 'comps', 'events', 'hpi_meas', 'hpi_results', + 'projs', 'proc_history') + info = Info() + info._unlocked = True + for k in _none_keys: + info[k] = None + for k in _list_keys: + info[k] = list() + info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF + info['highpass'] = 0. + info['sfreq'] = float(sfreq) + info['lowpass'] = info['sfreq'] / 2. + info['dev_head_t'] = Transform('meg', 'head') + info._update_redundant() + info._check_consistency() + return info + + +def _force_update_info(info_base, info_target): + """Update target info objects with values from info base. + + Note that values in info_target will be overwritten by those in info_base. + This will overwrite all fields except for: 'chs', 'ch_names', 'nchan'. + + Parameters + ---------- + info_base : mne.Info + The Info object you want to use for overwriting values + in target Info objects. + info_target : mne.Info | list of mne.Info + The Info object(s) you wish to overwrite using info_base. These objects + will be modified in-place. + """ + exclude_keys = ['chs', 'ch_names', 'nchan'] + info_target = np.atleast_1d(info_target).ravel() + all_infos = np.hstack([info_base, info_target]) + for ii in all_infos: + if not isinstance(ii, Info): + raise ValueError('Inputs must be of type Info. ' + 'Found type %s' % type(ii)) + for key, val in info_base.items(): + if key in exclude_keys: + continue + for i_targ in info_target: + with i_targ._unlock(): + i_targ[key] = val + + +def _add_timedelta_to_stamp(meas_date_stamp, delta_t): + """Add a timedelta to a meas_date tuple.""" + if meas_date_stamp is not None: + meas_date_stamp = _dt_to_stamp(_stamp_to_dt(meas_date_stamp) + delta_t) + return meas_date_stamp + + +@verbose +def anonymize_info(info, daysback=None, keep_his=False, verbose=None): + """Anonymize measurement information in place. + + .. warning:: If ``info`` is part of an object like + :class:`raw.info `, you should directly use + the method :meth:`raw.anonymize() ` + to ensure that all parts of the data are anonymized and + stay synchronized (e.g., + :class:`raw.annotations `). + + Parameters + ---------- + %(info_not_none)s + %(daysback_anonymize_info)s + %(keep_his_anonymize_info)s + %(verbose)s + + Returns + ------- + info : instance of Info + The anonymized measurement information. + + Notes + ----- + %(anonymize_info_notes)s + """ + _validate_type(info, 'info', "self") + + default_anon_dos = datetime.datetime(2000, 1, 1, 0, 0, 0, + tzinfo=datetime.timezone.utc) + default_str = "mne_anonymize" + default_subject_id = 0 + default_sex = 0 + default_desc = ("Anonymized using a time shift" + " to preserve age at acquisition") + + none_meas_date = info['meas_date'] is None + + if none_meas_date: + if daysback is not None: + warn('Input info has "meas_date" set to None. ' + 'Removing all information from time/date structures, ' + '*NOT* performing any time shifts!') + else: + # compute timeshift delta + if daysback is None: + delta_t = info['meas_date'] - default_anon_dos + else: + delta_t = datetime.timedelta(days=daysback) + with info._unlock(): + info['meas_date'] = info['meas_date'] - delta_t + + # file_id and meas_id + for key in ('file_id', 'meas_id'): + value = info.get(key) + if value is not None: + assert 'msecs' not in value + if (none_meas_date or + ((value['secs'], value['usecs']) == DATE_NONE)): + # Don't try to shift backwards in time when no measurement + # date is available or when file_id is already a place holder + tmp = DATE_NONE + else: + tmp = _add_timedelta_to_stamp( + (value['secs'], value['usecs']), -delta_t) + value['secs'] = tmp[0] + value['usecs'] = tmp[1] + # The following copy is needed for a test CTF dataset + # otherwise value['machid'][:] = 0 would suffice + _tmp = value['machid'].copy() + _tmp[:] = 0 + value['machid'] = _tmp + + # subject info + subject_info = info.get('subject_info') + if subject_info is not None: + if subject_info.get('id') is not None: + subject_info['id'] = default_subject_id + if keep_his: + logger.info('Not fully anonymizing info - keeping ' + 'his_id, sex, and hand info') + else: + if subject_info.get('his_id') is not None: + subject_info['his_id'] = str(default_subject_id) + if subject_info.get('sex') is not None: + subject_info['sex'] = default_sex + if subject_info.get('hand') is not None: + del subject_info['hand'] # there's no "unknown" setting + + for key in ('last_name', 'first_name', 'middle_name'): + if subject_info.get(key) is not None: + subject_info[key] = default_str + + # anonymize the subject birthday + if none_meas_date: + subject_info.pop('birthday', None) + elif subject_info.get('birthday') is not None: + dob = datetime.datetime(subject_info['birthday'][0], + subject_info['birthday'][1], + subject_info['birthday'][2]) + dob -= delta_t + subject_info['birthday'] = dob.year, dob.month, dob.day + + for key in ('weight', 'height'): + if subject_info.get(key) is not None: + subject_info[key] = 0 + + info['experimenter'] = default_str + info['description'] = default_desc + with info._unlock(): + if info['proj_id'] is not None: + info['proj_id'] = np.zeros_like(info['proj_id']) + if info['proj_name'] is not None: + info['proj_name'] = default_str + if info['utc_offset'] is not None: + info['utc_offset'] = None + + proc_hist = info.get('proc_history') + if proc_hist is not None: + for record in proc_hist: + record['block_id']['machid'][:] = 0 + record['experimenter'] = default_str + if none_meas_date: + record['block_id']['secs'] = DATE_NONE[0] + record['block_id']['usecs'] = DATE_NONE[1] + record['date'] = DATE_NONE + else: + this_t0 = (record['block_id']['secs'], + record['block_id']['usecs']) + this_t1 = _add_timedelta_to_stamp( + this_t0, -delta_t) + record['block_id']['secs'] = this_t1[0] + record['block_id']['usecs'] = this_t1[1] + record['date'] = _add_timedelta_to_stamp( + record['date'], -delta_t) + + hi = info.get('helium_info') + if hi is not None: + if hi.get('orig_file_guid') is not None: + hi['orig_file_guid'] = default_str + if none_meas_date and hi.get('meas_date') is not None: + hi['meas_date'] = DATE_NONE + elif hi.get('meas_date') is not None: + hi['meas_date'] = _add_timedelta_to_stamp( + hi['meas_date'], -delta_t) + + di = info.get('device_info') + if di is not None: + for k in ('serial', 'site'): + if di.get(k) is not None: + di[k] = default_str + + err_mesg = ('anonymize_info generated an inconsistent info object. ' + 'Underlying Error:\n') + info._check_consistency(prepend_error=err_mesg) + err_mesg = ('anonymize_info generated an inconsistent info object. ' + 'daysback parameter was too large. ' + 'Underlying Error:\n') + _check_dates(info, prepend_error=err_mesg) + + return info + + +@fill_doc +def _bad_chans_comp(info, ch_names): + """Check if channel names are consistent with current compensation status. + + Parameters + ---------- + %(info_not_none)s + + ch_names : list of str + The channel names to check. + + Returns + ------- + status : bool + True if compensation is *currently* in use but some compensation + channels are not included in picks + + False if compensation is *currently* not being used + or if compensation is being used and all compensation channels + in info and included in picks. + + missing_ch_names: array-like of str, shape (n_missing,) + The names of compensation channels not included in picks. + Returns [] if no channels are missing. + + """ + if 'comps' not in info: + # should this be thought of as a bug? + return False, [] + + # only include compensation channels that would affect selected channels + ch_names_s = set(ch_names) + comp_names = [] + for comp in info['comps']: + if len(ch_names_s.intersection(comp['data']['row_names'])) > 0: + comp_names.extend(comp['data']['col_names']) + comp_names = sorted(set(comp_names)) + + missing_ch_names = sorted(set(comp_names).difference(ch_names)) + + if get_current_comp(info) != 0 and len(missing_ch_names) > 0: + return True, missing_ch_names + + return False, missing_ch_names + + +_DIG_CAST = dict( + kind=int, ident=int, r=lambda x: x, coord_frame=int) +# key -> const, cast, write +_CH_INFO_MAP = OrderedDict( + scanno=(FIFF.FIFF_CH_SCAN_NO, int, write_int), + logno=(FIFF.FIFF_CH_LOGICAL_NO, int, write_int), + kind=(FIFF.FIFF_CH_KIND, int, write_int), + range=(FIFF.FIFF_CH_RANGE, float, write_float), + cal=(FIFF.FIFF_CH_CAL, float, write_float), + coil_type=(FIFF.FIFF_CH_COIL_TYPE, int, write_int), + loc=(FIFF.FIFF_CH_LOC, lambda x: x, write_float), + unit=(FIFF.FIFF_CH_UNIT, int, write_int), + unit_mul=(FIFF.FIFF_CH_UNIT_MUL, int, write_int), + ch_name=(FIFF.FIFF_CH_DACQ_NAME, str, write_string), + coord_frame=(FIFF.FIFF_CH_COORD_FRAME, int, write_int), +) +# key -> cast +_CH_CAST = OrderedDict((key, val[1]) for key, val in _CH_INFO_MAP.items()) +# const -> key, cast +_CH_READ_MAP = OrderedDict((val[0], (key, val[1])) + for key, val in _CH_INFO_MAP.items()) + + +@contextlib.contextmanager +def _writing_info_hdf5(info): + # Make info writing faster by packing chs and dig into numpy arrays + orig_dig = info.get('dig', None) + orig_chs = info['chs'] + with info._unlock(): + try: + if orig_dig is not None and len(orig_dig) > 0: + info['dig'] = _dict_pack(info['dig'], _DIG_CAST) + info['chs'] = _dict_pack(info['chs'], _CH_CAST) + info['chs']['ch_name'] = np.char.encode( + info['chs']['ch_name'], encoding='utf8') + yield + finally: + if orig_dig is not None: + info['dig'] = orig_dig + info['chs'] = orig_chs + + +def _dict_pack(obj, casts): + # pack a list of dict into dict of array + return {key: np.array([o[key] for o in obj]) for key in casts} + + +def _dict_unpack(obj, casts): + # unpack a dict of array into a list of dict + n = len(obj[list(casts)[0]]) + return [{key: cast(obj[key][ii]) for key, cast in casts.items()} + for ii in range(n)] + + +def _make_ch_names_mapping(chs): + orig_ch_names = [c['ch_name'] for c in chs] + ch_names = orig_ch_names.copy() + _unique_channel_names(ch_names, max_length=15, verbose='error') + ch_names_mapping = dict() + if orig_ch_names != ch_names: + ch_names_mapping.update(zip(orig_ch_names, ch_names)) + return ch_names_mapping + + +def _write_ch_infos(fid, chs, reset_range, ch_names_mapping): + ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping + for k, c in enumerate(chs): + # Scan numbers may have been messed up + c = c.copy() + c['ch_name'] = ch_names_mapping.get(c['ch_name'], c['ch_name']) + assert len(c['ch_name']) <= 15 + c['scanno'] = k + 1 + # for float/double, the "range" param is unnecessary + if reset_range: + c['range'] = 1.0 + write_ch_info(fid, c) + # only write new-style channel information if necessary + if len(ch_names_mapping): + logger.info( + ' Writing channel names to FIF truncated to 15 characters ' + 'with remapping') + for ch in chs: + start_block(fid, FIFF.FIFFB_CH_INFO) + assert set(ch) == set(_CH_INFO_MAP) + for (key, (const, _, write)) in _CH_INFO_MAP.items(): + write(fid, const, ch[key]) + end_block(fid, FIFF.FIFFB_CH_INFO) + + +def _ensure_infos_match(info1, info2, name, *, on_mismatch='raise'): + """Check if infos match. + + Parameters + ---------- + info1, info2 : instance of Info + The infos to compare. + name : str + The name of the object appearing in the error message of the comparison + fails. + on_mismatch : 'raise' | 'warn' | 'ignore' + What to do in case of a mismatch of ``dev_head_t`` between ``info1`` + and ``info2``. + """ + _check_on_missing(on_missing=on_mismatch, name='on_mismatch') + + info1._check_consistency() + info2._check_consistency() + + if info1['nchan'] != info2['nchan']: + raise ValueError(f'{name}.info[\'nchan\'] must match') + if set(info1['bads']) != set(info2['bads']): + raise ValueError(f'{name}.info[\'bads\'] must match') + if info1['sfreq'] != info2['sfreq']: + raise ValueError(f'{name}.info[\'sfreq\'] must match') + if set(info1['ch_names']) != set(info2['ch_names']): + raise ValueError(f'{name}.info[\'ch_names\'] must match') + if len(info2['projs']) != len(info1['projs']): + raise ValueError(f'SSP projectors in {name} must be the same') + if any(not _proj_equal(p1, p2) for p1, p2 in + zip(info2['projs'], info1['projs'])): + raise ValueError(f'SSP projectors in {name} must be the same') + if (info1['dev_head_t'] is None) != (info2['dev_head_t'] is None) or \ + (info1['dev_head_t'] is not None and not + np.allclose(info1['dev_head_t']['trans'], + info2['dev_head_t']['trans'], rtol=1e-6)): + msg = (f"{name}.info['dev_head_t'] differs. The " + f"instances probably come from different runs, and " + f"are therefore associated with different head " + f"positions. Manually change info['dev_head_t'] to " + f"avoid this message but beware that this means the " + f"MEG sensors will not be properly spatially aligned. " + f"See mne.preprocessing.maxwell_filter to realign the " + f"runs to a common head position.") + _on_missing(on_missing=on_mismatch, msg=msg, + name='on_mismatch') diff --git a/python/libs/mne/io/nedf/__init__.py b/python/libs/mne/io/nedf/__init__.py new file mode 100644 index 0000000..7176694 --- /dev/null +++ b/python/libs/mne/io/nedf/__init__.py @@ -0,0 +1,7 @@ +"""NEDF file import module.""" + +# Author: Tristan Stenner +# +# License: BSD-3-Clause + +from .nedf import read_raw_nedf, _parse_nedf_header diff --git a/python/libs/mne/io/nedf/nedf.py b/python/libs/mne/io/nedf/nedf.py new file mode 100644 index 0000000..118d3bc --- /dev/null +++ b/python/libs/mne/io/nedf/nedf.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- +"""Import NeuroElectrics DataFormat (NEDF) files.""" + +from copy import deepcopy +from datetime import datetime, timezone +from xml.etree import ElementTree + +import numpy as np + +from ..base import BaseRaw +from ..meas_info import create_info +from ..utils import _mult_cal_one +from ...utils import warn, verbose, _check_fname + + +def _getsubnodetext(node, name): + """Get an element from an XML node, raise an error otherwise. + + Parameters + ---------- + node: Element + XML Element + name: str + Child element name + + Returns + ------- + test: str + Text contents of the child nodes + """ + subnode = node.findtext(name) + if not subnode: + raise RuntimeError('NEDF header ' + name + ' not found') + return subnode + + +def _parse_nedf_header(header): + """Read header information from the first 10kB of an .nedf file. + + Parameters + ---------- + header : bytes + Null-terminated header data, mostly the file's first 10240 bytes. + + Returns + ------- + info : dict + A dictionary with header information. + dt : numpy.dtype + Structure of the binary EEG/accelerometer/trigger data in the file. + n_samples : int + The number of data samples. + """ + info = {} + # nedf files have three accelerometer channels sampled at 100Hz followed + # by five EEG samples + TTL trigger sampled at 500Hz + # For 32 EEG channels and no stim channels, the data layout may look like + # [ ('acc', '>u2', (3,)), + # ('data', dtype([ + # ('eeg', 'u1', (32, 3)), + # ('trig', '>i4', (1,)) + # ]), (5,)) + # ] + + dt = [] # dtype for the binary data block + datadt = [] # dtype for a single EEG sample + + headerend = header.find(b'\0') + if headerend == -1: + raise RuntimeError('End of header null not found') + headerxml = ElementTree.fromstring(header[:headerend]) + nedfversion = headerxml.findtext('NEDFversion', '') + if nedfversion not in ['1.3', '1.4']: + warn('NEDFversion unsupported, use with caution') + + if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM': + warn('Found Starstim, this hasn\'t been tested extensively!') + + if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF': + raise RuntimeError('Unknown additional channel, aborting.') + + n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0)) + if n_acc: + # expect one sample of u16 accelerometer data per block + dt.append(('acc', '>u2', (n_acc,))) + + eegset = headerxml.find('EEGSettings') + if eegset is None: + raise RuntimeError('No EEG channels found') + nchantotal = int(_getsubnodetext(eegset, 'TotalNumberOfChannels')) + info['nchan'] = nchantotal + + info['sfreq'] = int(_getsubnodetext(eegset, 'EEGSamplingRate')) + info['ch_names'] = [e.text for e in eegset.find('EEGMontage')] + if nchantotal != len(info['ch_names']): + raise RuntimeError( + f"TotalNumberOfChannels ({nchantotal}) != " + f"channel count ({len(info['ch_names'])})") + # expect nchantotal uint24s + datadt.append(('eeg', 'B', (nchantotal, 3))) + + if headerxml.find('STIMSettings') is not None: + # 2* -> two stim samples per eeg sample + datadt.append(('stim', 'B', (2, nchantotal, 3))) + warn('stim channels are currently ignored') + + # Trigger data: 4 bytes in newer versions, 1 byte in older versions + trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B' + datadt.append(('trig', trigger_type)) + # 5 data samples per block + dt.append(('data', np.dtype(datadt), (5,))) + + date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', 0) + info['meas_date'] = datetime.fromtimestamp(int(date) / 1000, timezone.utc) + + n_samples = int(_getsubnodetext(eegset, 'NumberOfRecordsOfEEG')) + n_full, n_last = divmod(n_samples, 5) + dt_last = deepcopy(dt) + assert dt_last[-1][-1] == (5,) + dt_last[-1] = list(dt_last[-1]) + dt_last[-1][-1] = (n_last,) + dt_last[-1] = tuple(dt_last[-1]) + return info, np.dtype(dt), np.dtype(dt_last), n_samples, n_full + + +# the first 10240 bytes are header in XML format, padded with NULL bytes +_HDRLEN = 10240 + + +class RawNedf(BaseRaw): + """Raw object from NeuroElectrics nedf file.""" + + def __init__(self, filename, preload=False, verbose=None): + filename = _check_fname(filename, 'read', True, 'filename') + with open(filename, mode='rb') as fid: + header = fid.read(_HDRLEN) + header, dt, dt_last, n_samp, n_full = _parse_nedf_header(header) + ch_names = header['ch_names'] + ['STI 014'] + ch_types = ['eeg'] * len(ch_names) + ch_types[-1] = 'stim' + info = create_info(ch_names, header['sfreq'], ch_types) + # scaling factor ADC-values -> volts + # taken from the NEDF EEGLAB plugin + # (https://www.neuroelectrics.com/resources/software/): + for ch in info['chs'][:-1]: + ch['cal'] = 2.4 / (6.0 * 8388607) + with info._unlock(): + info['meas_date'] = header['meas_date'] + raw_extra = dict(dt=dt, dt_last=dt_last, n_full=n_full) + super().__init__( + info, preload=preload, filenames=[filename], verbose=verbose, + raw_extras=[raw_extra], last_samps=[n_samp - 1]) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + dt = self._raw_extras[fi]['dt'] + dt_last = self._raw_extras[fi]['dt_last'] + n_full = self._raw_extras[fi]['n_full'] + n_eeg = dt[1].subdtype[0][0].shape[0] + # data is stored in 5-sample chunks (except maybe the last one!) + # so we have to do some gymnastics to pick the correct parts to + # read + offset = start // 5 * dt.itemsize + _HDRLEN + start_sl = start % 5 + n_samples = stop - start + n_samples_full = min(stop, n_full * 5) - start + last = None + n_chunks = (n_samples_full - 1) // 5 + 1 + n_tot = n_chunks * 5 + with open(self._filenames[fi], 'rb') as fid: + fid.seek(offset, 0) + chunks = np.fromfile(fid, dtype=dt, count=n_chunks) + assert len(chunks) == n_chunks + if n_samples != n_samples_full: + last = np.fromfile(fid, dtype=dt_last, count=1) + eeg = _convert_eeg(chunks, n_eeg, n_tot) + trig = chunks['data']['trig'].reshape(1, n_tot) + if last is not None: + n_last = dt_last['data'].shape[0] + eeg = np.concatenate( + (eeg, _convert_eeg(last, n_eeg, n_last)), axis=-1) + trig = np.concatenate( + (trig, last['data']['trig'].reshape(1, n_last)), axis=-1) + one_ = np.concatenate((eeg, trig)) + one = one_[:, start_sl:n_samples + start_sl] + _mult_cal_one(data, one, idx, cals, mult) + + +def _convert_eeg(chunks, n_eeg, n_tot): + # convert uint8-triplet -> int32 + eeg = chunks['data']['eeg'] @ np.array([1 << 16, 1 << 8, 1]) + # convert sign if necessary + eeg[eeg > (1 << 23)] -= 1 << 24 + eeg = eeg.reshape((n_tot, n_eeg)).T + return eeg + + +@verbose +def read_raw_nedf(filename, preload=False, verbose=None): + """Read NeuroElectrics .nedf files. + + NEDF file versions starting from 1.3 are supported. + + Parameters + ---------- + filename : str + Path to the .nedf file. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawNedf + A Raw object containing NEDF data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawNedf(filename, preload, verbose) diff --git a/python/libs/mne/io/nedf/tests/__init__.py b/python/libs/mne/io/nedf/tests/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/python/libs/mne/io/nedf/tests/__init__.py @@ -0,0 +1 @@ + diff --git a/python/libs/mne/io/nedf/tests/test_nedf.py b/python/libs/mne/io/nedf/tests/test_nedf.py new file mode 100644 index 0000000..1c4bdc4 --- /dev/null +++ b/python/libs/mne/io/nedf/tests/test_nedf.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +"""Test reading of NEDF format.""" +# Author: Tristan Stenner +# +# License: BSD-3-Clause + +import os.path as op + +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from mne import find_events +from mne.io.constants import FIFF +from mne.io.nedf import read_raw_nedf, _parse_nedf_header +from mne.datasets import testing +from mne.io.tests.test_raw import _test_raw_reader + +eeg_path = testing.data_path(download=False, verbose=True) +eegfile = op.join(eeg_path, 'nedf', 'testdata.nedf') + +stimhdr = b""" + + 1.3 + %d + + 4 + 500 + ABCD + 11 + + +\x00""" + + +@pytest.mark.parametrize('nacc', (0, 3)) +def test_nedf_header_parser(nacc): + """Test NEDF header parsing and dtype extraction.""" + with pytest.warns(RuntimeWarning, match='stim channels.*ignored'): + info, dt, dt_last, n_samples, n_full = _parse_nedf_header( + stimhdr % nacc) + assert n_samples == 11 + assert n_full == 2 + nchan = 4 + assert info['nchan'] == nchan + assert dt.itemsize == 200 + nacc * 2 + if nacc: + assert dt.names[0] == 'acc' + assert dt['acc'].shape == (nacc,) + + assert dt['data'].shape == (5,) # blocks of 5 EEG samples each + assert dt_last['data'].shape == (1,) # plus one last extra one + + eegsampledt = dt['data'].subdtype[0] + assert eegsampledt.names == ('eeg', 'stim', 'trig') + assert eegsampledt['eeg'].shape == (nchan, 3) + assert eegsampledt['stim'].shape == (2, nchan, 3) + + +def test_invalid_headers(): + """Test that invalid headers raise exceptions.""" + tpl = b""" + 1.3 + + %s + ABCD + + \x00""" + nchan = b'4' + sr = b'500' + hdr = { + 'null': + b'No null terminator', + 'Unknown additional': + (b'1.3' + + b'???\x00'), # noqa: E501 + 'No EEG channels found': + b'1.3\x00', + 'TotalNumberOfChannels not found': + tpl % b'No nchan.', + '!= channel count': + tpl % (sr + b'52'), + 'EEGSamplingRate not found': + tpl % nchan, + 'NumberOfRecordsOfEEG not found': + tpl % (sr + nchan), + } + for match, invalid_hdr in hdr.items(): + with pytest.raises(RuntimeError, match=match): + _parse_nedf_header(invalid_hdr) + + sus_hdrs = { + 'unsupported': b'25\x00', + 'tested': ( + b'1.3' + + b'STARSTIM\x00'), + } + for match, sus_hdr in sus_hdrs.items(): + with pytest.warns(RuntimeWarning, match=match): + with pytest.raises(RuntimeError, match='No EEG channels found'): + _parse_nedf_header(sus_hdr) + + +@testing.requires_testing_data +def test_nedf_data(): + """Test reading raw NEDF files.""" + raw = read_raw_nedf(eegfile) + nsamples = len(raw) + assert nsamples == 32538 + + events = find_events(raw, shortest_event=1) + assert len(events) == 4 + assert_array_equal(events[:, 2], [1, 1, 1, 1]) + onsets = events[:, 0] / raw.info['sfreq'] + assert raw.info['sfreq'] == 500 + + data_end = raw.get_data('Fp1', nsamples - 100, nsamples).mean() + assert_allclose(data_end, .0176, atol=.01) + assert_allclose(raw.get_data('Fpz', 0, 100).mean(), .0185, atol=.01) + + assert_allclose(onsets, [22.384, 38.238, 49.496, 63.15]) + assert raw.info['meas_date'].year == 2019 + assert raw.ch_names[2] == 'AF7' + + for ch in raw.info['chs'][:-1]: + assert ch['kind'] == FIFF.FIFFV_EEG_CH + assert ch['unit'] == FIFF.FIFF_UNIT_V + assert raw.info['chs'][-1]['kind'] == FIFF.FIFFV_STIM_CH + assert raw.info['chs'][-1]['unit'] == FIFF.FIFF_UNIT_V + + # full tests + _test_raw_reader(read_raw_nedf, filename=eegfile) diff --git a/python/libs/mne/io/nicolet/__init__.py b/python/libs/mne/io/nicolet/__init__.py new file mode 100644 index 0000000..c3253d3 --- /dev/null +++ b/python/libs/mne/io/nicolet/__init__.py @@ -0,0 +1,7 @@ +"""Nicolet module for conversion to FIF.""" + +# Author: Jaakko Leppakangas +# +# License: BSD-3-Clause + +from .nicolet import read_raw_nicolet diff --git a/python/libs/mne/io/nicolet/nicolet.py b/python/libs/mne/io/nicolet/nicolet.py new file mode 100644 index 0000000..cbecdd7 --- /dev/null +++ b/python/libs/mne/io/nicolet/nicolet.py @@ -0,0 +1,176 @@ +# Author: Jaakko Leppakangas +# +# License: BSD-3-Clause + +import numpy as np +from os import path +import datetime +import calendar + +from ...utils import logger, fill_doc +from ..utils import _read_segments_file, _find_channels, _create_chs +from ..base import BaseRaw +from ..meas_info import _empty_info +from ..constants import FIFF + + +@fill_doc +def read_raw_nicolet(input_fname, ch_type, eog=(), + ecg=(), emg=(), misc=(), preload=False, verbose=None): + """Read Nicolet data as raw object. + + ..note:: This reader takes data files with the extension ``.data`` as an + input. The header file with the same file name stem and an + extension ``.head`` is expected to be found in the same + directory. + + Parameters + ---------- + input_fname : str + Path to the data file (ending with ``.data`` not ``.head``). + ch_type : str + Channel type to designate to the data channels. Supported data types + include 'eeg', 'dbs'. + eog : list | tuple | 'auto' + Names of channels or list of indices that should be designated + EOG channels. If 'auto', the channel names beginning with + ``EOG`` are used. Defaults to empty tuple. + ecg : list or tuple | 'auto' + Names of channels or list of indices that should be designated + ECG channels. If 'auto', the channel names beginning with + ``ECG`` are used. Defaults to empty tuple. + emg : list or tuple | 'auto' + Names of channels or list of indices that should be designated + EMG channels. If 'auto', the channel names beginning with + ``EMG`` are used. Defaults to empty tuple. + misc : list or tuple + Names of channels or list of indices that should be designated + MISC channels. Defaults to empty tuple. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of Raw + A Raw object containing the data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawNicolet(input_fname, ch_type, eog=eog, ecg=ecg, + emg=emg, misc=misc, preload=preload, verbose=verbose) + + +def _get_nicolet_info(fname, ch_type, eog, ecg, emg, misc): + """Extract info from Nicolet header files.""" + fname, extension = path.splitext(fname) + + if extension != '.data': + raise ValueError( + f'File name should end with .data not "{extension}".' + ) + + header = fname + '.head' + + logger.info('Reading header...') + header_info = dict() + with open(header, 'r') as fid: + for line in fid: + var, value = line.split('=') + if var == 'elec_names': + value = value[1:-2].split(',') # strip brackets + elif var == 'conversion_factor': + value = float(value) + elif var in ['num_channels', 'rec_id', 'adm_id', 'pat_id', + 'num_samples']: + value = int(value) + elif var != 'start_ts': + value = float(value) + header_info[var] = value + + ch_names = header_info['elec_names'] + if eog == 'auto': + eog = _find_channels(ch_names, 'EOG') + if ecg == 'auto': + ecg = _find_channels(ch_names, 'ECG') + if emg == 'auto': + emg = _find_channels(ch_names, 'EMG') + + date, time = header_info['start_ts'].split() + date = date.split('-') + time = time.split(':') + sec, msec = time[2].split('.') + date = datetime.datetime(int(date[0]), int(date[1]), int(date[2]), + int(time[0]), int(time[1]), int(sec), int(msec)) + info = _empty_info(header_info['sample_freq']) + info['meas_date'] = (calendar.timegm(date.utctimetuple()), 0) + + if ch_type == 'eeg': + ch_coil = FIFF.FIFFV_COIL_EEG + ch_kind = FIFF.FIFFV_EEG_CH + elif ch_type == 'seeg': + ch_coil = FIFF.FIFFV_COIL_EEG + ch_kind = FIFF.FIFFV_SEEG_CH + else: + raise TypeError("Channel type not recognized. Available types are " + "'eeg' and 'seeg'.") + cals = np.repeat(header_info['conversion_factor'] * 1e-6, len(ch_names)) + info['chs'] = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, + misc) + info['highpass'] = 0. + info['lowpass'] = info['sfreq'] / 2.0 + info._unlocked = False + info._update_redundant() + return info, header_info + + +class RawNicolet(BaseRaw): + """Raw object from Nicolet file. + + Parameters + ---------- + input_fname : str + Path to the Nicolet file. + ch_type : str + Channel type to designate to the data channels. Supported data types + include 'eeg', 'seeg'. + eog : list | tuple | 'auto' + Names of channels or list of indices that should be designated + EOG channels. If 'auto', the channel names beginning with + ``EOG`` are used. Defaults to empty tuple. + ecg : list or tuple | 'auto' + Names of channels or list of indices that should be designated + ECG channels. If 'auto', the channel names beginning with + ``ECG`` are used. Defaults to empty tuple. + emg : list or tuple | 'auto' + Names of channels or list of indices that should be designated + EMG channels. If 'auto', the channel names beginning with + ``EMG`` are used. Defaults to empty tuple. + misc : list or tuple + Names of channels or list of indices that should be designated + MISC channels. Defaults to empty tuple. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + def __init__(self, input_fname, ch_type, eog=(), + ecg=(), emg=(), misc=(), preload=False, + verbose=None): # noqa: D102 + input_fname = path.abspath(input_fname) + info, header_info = _get_nicolet_info(input_fname, ch_type, eog, ecg, + emg, misc) + last_samps = [header_info['num_samples'] - 1] + super(RawNicolet, self).__init__( + info, preload, filenames=[input_fname], raw_extras=[header_info], + last_samps=last_samps, orig_format='int', + verbose=verbose) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + _read_segments_file( + self, data, idx, fi, start, stop, cals, mult, dtype=' +# +# License: BSD-3-Clause + +import os.path as op +import inspect + +from mne.io import read_raw_nicolet +from mne.io.tests.test_raw import _test_raw_reader + +import pytest + +FILE = inspect.getfile(inspect.currentframe()) +base_dir = op.join(op.dirname(op.abspath(FILE)), 'data') +fname_data = op.join(base_dir, 'test_nicolet_raw.data') +fname_head = op.join(base_dir, 'test_nicolet_raw.head') + + +def test_data(): + """Test reading raw nicolet files.""" + _test_raw_reader(read_raw_nicolet, input_fname=fname_data, ch_type='eeg', + ecg='auto', eog='auto', emg='auto', misc=['PHO']) + + with pytest.raises(ValueError, + match='File name should end with .data not ".head".'): + read_raw_nicolet(fname_head, 'eeg') diff --git a/python/libs/mne/io/nihon/__init__.py b/python/libs/mne/io/nihon/__init__.py new file mode 100644 index 0000000..09a735e --- /dev/null +++ b/python/libs/mne/io/nihon/__init__.py @@ -0,0 +1,7 @@ +"""Nihon Kohden module for conversion to FIF.""" + +# Author: Fede Raimondo +# +# License: BSD-3-Clause + +from .nihon import read_raw_nihon diff --git a/python/libs/mne/io/nihon/nihon.py b/python/libs/mne/io/nihon/nihon.py new file mode 100644 index 0000000..00318a1 --- /dev/null +++ b/python/libs/mne/io/nihon/nihon.py @@ -0,0 +1,476 @@ +# Authors: Federico Raimondo +# +# License: BSD-3-Clause + +from collections import OrderedDict +from datetime import datetime, timezone +from pathlib import Path + +import numpy as np + +from ...utils import fill_doc, logger, verbose, warn, _check_fname +from ..base import BaseRaw +from ..meas_info import create_info +from ...annotations import Annotations +from ..utils import _mult_cal_one + + +def _ensure_path(fname): + out = fname + if not isinstance(out, Path): + out = Path(out) + return out + + +@fill_doc +def read_raw_nihon(fname, preload=False, verbose=None): + """Reader for an Nihon Kohden EEG file. + + Parameters + ---------- + fname : str + Path to the Nihon Kohden data file (``.EEG``). + preload : bool + If True, all data are loaded at initialization. + %(verbose)s + + Returns + ------- + raw : instance of RawNihon + A Raw object containing Nihon Kohden data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawNihon(fname, preload, verbose) + + +_valid_headers = [ + 'EEG-1100A V01.00', + 'EEG-1100B V01.00', + 'EEG-1100C V01.00', + 'QI-403A V01.00', + 'QI-403A V02.00', + 'EEG-2100 V01.00', + 'EEG-2100 V02.00', + 'DAE-2100D V01.30', + 'DAE-2100D V02.00', + # 'EEG-1200A V01.00', # Not working for the moment. +] + + +def _read_nihon_metadata(fname): + metadata = {} + fname = _ensure_path(fname) + pnt_fname = fname.with_suffix('.PNT') + if not pnt_fname.exists(): + warn('No PNT file exists. Metadata will be blank') + return metadata + logger.info('Found PNT file, reading metadata.') + with open(pnt_fname, 'r') as fid: + version = np.fromfile(fid, '|S16', 1).astype('U16')[0] + if version not in _valid_headers: + raise ValueError(f'Not a valid Nihon Kohden PNT file ({version})') + metadata['version'] = version + + # Read timestamp + fid.seek(0x40) + meas_str = np.fromfile(fid, '|S14', 1).astype('U14')[0] + meas_date = datetime.strptime(meas_str, '%Y%m%d%H%M%S') + meas_date = meas_date.replace(tzinfo=timezone.utc) + metadata['meas_date'] = meas_date + + return metadata + + +_default_chan_labels = [ + 'FP1', 'FP2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'F7', 'F8', + 'T3', 'T4', 'T5', 'T6', 'FZ', 'CZ', 'PZ', 'E', 'PG1', 'PG2', 'A1', 'A2', + 'T1', 'T2' +] +_default_chan_labels += [f'X{i}' for i in range(1, 12)] +_default_chan_labels += [f'NA{i}' for i in range(1, 6)] +_default_chan_labels += [f'DC{i:02}' for i in range(1, 33)] +_default_chan_labels += ['BN1', 'BN2', 'Mark1', 'Mark2'] +_default_chan_labels += [f'NA{i}' for i in range(6, 28)] +_default_chan_labels += ['X12/BP1', 'X13/BP2', 'X14/BP3', 'X15/BP4'] +_default_chan_labels += [f'X{i}' for i in range(16, 166)] +_default_chan_labels += ['NA28', 'Z'] + +_encodings = ('utf-8', 'latin1') + + +def _read_21e_file(fname): + fname = _ensure_path(fname) + e_fname = fname.with_suffix('.21E') + _chan_labels = [x for x in _default_chan_labels] + if e_fname.exists(): + # Read the 21E file and update the labels accordingly. + logger.info('Found 21E file, reading channel names.') + for enc in _encodings: + try: + with open(e_fname, 'r', encoding=enc) as fid: + keep_parsing = False + for line in fid: + if line.startswith('['): + if 'ELECTRODE' in line or 'REFERENCE' in line: + keep_parsing = True + else: + keep_parsing = False + elif keep_parsing is True: + idx, name = line.split('=') + idx = int(idx) + if idx >= len(_chan_labels): + n = idx - len(_chan_labels) + 1 + _chan_labels.extend(['UNK'] * n) + _chan_labels[idx] = name.strip() + except UnicodeDecodeError: + pass + else: + break + else: + warn(f'Could not decode 21E file as one of {_encodings}; ' + f'Default channel names are chosen.') + + return _chan_labels + + +def _read_nihon_header(fname): + # Read the Nihon Kohden EEG file header + fname = _ensure_path(fname) + _chan_labels = _read_21e_file(fname) + header = {} + logger.info(f'Reading header from {fname}') + with open(fname, 'r') as fid: + version = np.fromfile(fid, '|S16', 1).astype('U16')[0] + if version not in _valid_headers: + raise ValueError( + 'Not a valid Nihon Kohden EEG file ({})'.format(version)) + + fid.seek(0x0081) + control_block = np.fromfile(fid, '|S16', 1).astype('U16')[0] + if control_block not in _valid_headers: + raise ValueError('Not a valid Nihon Kohden EEG file ' + '(control block {})'.format(version)) + + fid.seek(0x17fe) + waveform_sign = np.fromfile(fid, np.uint8, 1)[0] + if waveform_sign != 1: + raise ValueError('Not a valid Nihon Kohden EEG file ' + '(waveform block)') + header['version'] = version + + fid.seek(0x0091) + n_ctlblocks = np.fromfile(fid, np.uint8, 1)[0] + header['n_ctlblocks'] = n_ctlblocks + controlblocks = [] + for i_ctl_block in range(n_ctlblocks): + t_controlblock = {} + fid.seek(0x0092 + i_ctl_block * 20) + t_ctl_address = np.fromfile(fid, np.uint32, 1)[0] + t_controlblock['address'] = t_ctl_address + fid.seek(t_ctl_address + 17) + n_datablocks = np.fromfile(fid, np.uint8, 1)[0] + t_controlblock['n_datablocks'] = n_datablocks + t_controlblock['datablocks'] = [] + for i_data_block in range(n_datablocks): + t_datablock = {} + fid.seek(t_ctl_address + i_data_block * 20 + 18) + t_data_address = np.fromfile(fid, np.uint32, 1)[0] + t_datablock['address'] = t_data_address + + fid.seek(t_data_address + 0x26) + t_n_channels = np.fromfile(fid, np.uint8, 1)[0] + t_datablock['n_channels'] = t_n_channels + + t_channels = [] + for i_ch in range(t_n_channels): + fid.seek(t_data_address + 0x27 + (i_ch * 10)) + t_idx = np.fromfile(fid, np.uint8, 1)[0] + t_channels.append(_chan_labels[t_idx]) + + t_datablock['channels'] = t_channels + + fid.seek(t_data_address + 0x1C) + t_record_duration = np.fromfile(fid, np.uint32, 1)[0] + t_datablock['duration'] = t_record_duration + + fid.seek(t_data_address + 0x1a) + sfreq = np.fromfile(fid, np.uint16, 1)[0] & 0x3FFF + t_datablock['sfreq'] = sfreq + + t_datablock['n_samples'] = int(t_record_duration * sfreq / 10) + t_controlblock['datablocks'].append(t_datablock) + controlblocks.append(t_controlblock) + header['controlblocks'] = controlblocks + + # Now check that every data block has the same channels and sfreq + chans = [] + sfreqs = [] + nsamples = [] + for t_ctl in header['controlblocks']: + for t_dtb in t_ctl['datablocks']: + chans.append(t_dtb['channels']) + sfreqs.append(t_dtb['sfreq']) + nsamples.append(t_dtb['n_samples']) + for i_elem in range(1, len(chans)): + if chans[0] != chans[i_elem]: + raise ValueError('Channel names in datablocks do not match') + if sfreqs[0] != sfreqs[i_elem]: + raise ValueError('Sample frequency in datablocks do not match') + header['ch_names'] = chans[0] + header['sfreq'] = sfreqs[0] + header['n_samples'] = np.sum(nsamples) + + # TODO: Support more than one controlblock and more than one datablock + if header['n_ctlblocks'] != 1: + raise NotImplementedError('I dont know how to read more than one ' + 'control block for this type of file :(') + if header['controlblocks'][0]['n_datablocks'] > 1: + # Multiple blocks, check that they all have the same kind of data + datablocks = header['controlblocks'][0]['datablocks'] + block_0 = datablocks[0] + for t_block in datablocks[1:]: + if block_0['n_channels'] != t_block['n_channels']: + raise ValueError( + 'Cannot read NK file with different number of channels ' + 'in each datablock') + if block_0['channels'] != t_block['channels']: + raise ValueError( + 'Cannot read NK file with different channels in each ' + 'datablock') + if block_0['sfreq'] != t_block['sfreq']: + raise ValueError( + 'Cannot read NK file with different sfreq in each ' + 'datablock') + + return header + + +def _read_nihon_annotations(fname): + fname = _ensure_path(fname) + log_fname = fname.with_suffix('.LOG') + if not log_fname.exists(): + warn('No LOG file exists. Annotations will not be read') + return dict(onset=[], duration=[], description=[]) + logger.info('Found LOG file, reading events.') + with open(log_fname, 'r') as fid: + version = np.fromfile(fid, '|S16', 1).astype('U16')[0] + if version not in _valid_headers: + raise ValueError( + 'Not a valid Nihon Kohden LOG file ({})'.format(version)) + + fid.seek(0x91) + n_logblocks = np.fromfile(fid, np.uint8, 1)[0] + all_onsets = [] + all_descriptions = [] + for t_block in range(n_logblocks): + fid.seek(0x92 + t_block * 20) + t_blk_address = np.fromfile(fid, np.uint32, 1)[0] + fid.seek(t_blk_address + 0x12) + n_logs = np.fromfile(fid, np.uint8, 1)[0] + fid.seek(t_blk_address + 0x14) + t_logs = np.fromfile(fid, '|S45', n_logs) + for t_log in t_logs: + for enc in _encodings: + try: + t_log = t_log.decode(enc) + except UnicodeDecodeError: + pass + else: + break + else: + warn(f'Could not decode log as one of {_encodings}') + continue + t_desc = t_log[:20].strip('\x00') + t_onset = datetime.strptime(t_log[20:26], '%H%M%S') + t_onset = (t_onset.hour * 3600 + t_onset.minute * 60 + + t_onset.second) + all_onsets.append(t_onset) + all_descriptions.append(t_desc) + + annots = dict( + onset=all_onsets, + duration=[0] * len(all_onsets), + description=all_descriptions) + return annots + + +def _map_ch_to_type(ch_name): + ch_type_pattern = OrderedDict([ + ('stim', ('Mark',)), ('misc', ('DC', 'NA', 'Z', '$')), + ('bio', ('X',))]) + for key, kinds in ch_type_pattern.items(): + if any(kind in ch_name for kind in kinds): + return key + return 'eeg' + + +def _map_ch_to_specs(ch_name): + unit_mult = 1e-3 + phys_min = -12002.9 + phys_max = 12002.56 + dig_min = -32768 + if ch_name.upper() in _default_chan_labels: + idx = _default_chan_labels.index(ch_name.upper()) + if (idx < 42 or idx > 73) and idx not in [76, 77]: + unit_mult = 1e-6 + phys_min = -3200 + phys_max = 3199.902 + t_range = phys_max - phys_min + cal = t_range / 65535 + offset = phys_min - (dig_min * cal) + + out = dict(unit=unit_mult, phys_min=phys_min, phys_max=phys_max, + dig_min=dig_min, cal=cal, offset=offset) + return out + + +@fill_doc +class RawNihon(BaseRaw): + """Raw object from a Nihon Kohden EEG file. + + Parameters + ---------- + fname : str + Path to the Nihon Kohden data file (.eeg). + preload : bool + If True, all data are loaded at initialization. + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + fname = _check_fname(fname, 'read', True, 'fname') + fname = _ensure_path(fname) + data_name = fname.name + logger.info('Loading %s' % data_name) + + header = _read_nihon_header(fname) + metadata = _read_nihon_metadata(fname) + + # n_chan = len(header['ch_names']) + 1 + sfreq = header['sfreq'] + # data are multiplexed int16 + ch_names = header['ch_names'] + ch_types = [_map_ch_to_type(x) for x in ch_names] + + info = create_info(ch_names, sfreq, ch_types) + n_samples = header['n_samples'] + + if 'meas_date' in metadata: + with info._unlock(): + info['meas_date'] = metadata['meas_date'] + chs = {x: _map_ch_to_specs(x) for x in info['ch_names']} + + cal = np.array( + [chs[x]['cal'] for x in info['ch_names']], float)[:, np.newaxis] + offsets = np.array( + [chs[x]['offset'] for x in info['ch_names']], float)[:, np.newaxis] + gains = np.array( + [chs[x]['unit'] for x in info['ch_names']], float)[:, np.newaxis] + + raw_extras = dict( + cal=cal, offsets=offsets, gains=gains, header=header) + self._header = header + + for i_ch, ch_name in enumerate(info['ch_names']): + t_range = (chs[ch_name]['phys_max'] - chs[ch_name]['phys_min']) + info['chs'][i_ch]['range'] = t_range + info['chs'][i_ch]['cal'] = 1 / t_range + + super(RawNihon, self).__init__( + info, preload=preload, last_samps=(n_samples - 1,), + filenames=[fname.as_posix()], orig_format='short', + raw_extras=[raw_extras]) + + # Get annotations from LOG file + annots = _read_nihon_annotations(fname) + + # Annotate acqusition skips + controlblock = self._header['controlblocks'][0] + cur_sample = 0 + if controlblock['n_datablocks'] > 1: + for i_block in range(controlblock['n_datablocks'] - 1): + t_block = controlblock['datablocks'][i_block] + cur_sample = cur_sample + t_block['n_samples'] + cur_tpoint = (cur_sample - 0.5) / t_block['sfreq'] + # Add annotations as in append raw + annots['onset'].append(cur_tpoint) + annots['duration'].append(0.0) + annots['description'].append('BAD boundary') + annots['onset'].append(cur_tpoint) + annots['duration'].append(0.0) + annots['description'].append('EDGE boundary') + + annotations = Annotations(**annots, orig_time=info['meas_date']) + self.set_annotations(annotations) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + # For now we assume one control block + header = self._raw_extras[fi]['header'] + + # Get the original cal, offsets and gains + cal = self._raw_extras[fi]['cal'] + offsets = self._raw_extras[fi]['offsets'] + gains = self._raw_extras[fi]['gains'] + + # get the right datablock + datablocks = header['controlblocks'][0]['datablocks'] + ends = np.cumsum([t['n_samples'] for t in datablocks]) + + start_block = np.where(start < ends)[0][0] + stop_block = np.where(stop <= ends)[0][0] + + if start_block != stop_block: + # Recursive call for each block independently + new_start = start + sample_start = 0 + for t_block_idx in range(start_block, stop_block + 1): + t_block = datablocks[t_block_idx] + if t_block == stop_block: + # If its the last block, we stop on the last sample to read + new_stop = stop + else: + # Otherwise, stop on the last sample of the block + new_stop = t_block['n_samples'] + new_start + samples_to_read = new_stop - new_start + sample_stop = sample_start + samples_to_read + + self._read_segment_file( + data[:, sample_start:sample_stop], idx, fi, + new_start, new_stop, cals, mult + ) + + # Update variables for next loop + sample_start = sample_stop + new_start = new_stop + else: + datablock = datablocks[start_block] + + n_channels = datablock['n_channels'] + 1 + datastart = (datablock['address'] + 0x27 + + (datablock['n_channels'] * 10)) + + # Compute start offset based on the beginning of the block + rel_start = start + if start_block != 0: + rel_start = start - ends[start_block - 1] + start_offset = datastart + rel_start * n_channels * 2 + + with open(self._filenames[fi], 'rb') as fid: + to_read = (stop - start) * n_channels + fid.seek(start_offset) + block_data = np.fromfile(fid, ' +# simplified BSD-3 license + +import pytest +from numpy.testing import assert_array_almost_equal + +from mne.io import read_raw_nihon, read_raw_edf +from mne.io.tests.test_raw import _test_raw_reader +from mne.datasets import testing +from mne.io.nihon.nihon import (_read_nihon_header, _read_nihon_metadata, + _read_nihon_annotations) +from mne.io.nihon import nihon + +data_path = testing.data_path(download=False) + + +@testing.requires_testing_data +def test_nihon_eeg(): + """Test reading Nihon Kohden EEG files.""" + fname = data_path / 'NihonKohden' / 'MB0400FU.EEG' + raw = read_raw_nihon(fname.as_posix(), preload=True) + assert 'RawNihon' in repr(raw) + _test_raw_reader(read_raw_nihon, fname=fname, test_scaling=False) + fname_edf = data_path / 'NihonKohden' / 'MB0400FU.EDF' + raw_edf = read_raw_edf(fname_edf, preload=True) + + assert raw._data.shape == raw_edf._data.shape + assert raw.info['sfreq'] == raw.info['sfreq'] + # ch names and order are switched in the EDF + edf_ch_names = {x: x.split(' ')[1].replace('-Ref', '') + for x in raw_edf.ch_names} + raw_edf.rename_channels(edf_ch_names) + assert raw.ch_names == raw_edf.ch_names + + for i, an1 in enumerate(raw.annotations): + # EDF has some weird annotations, which are not in the LOG file + an2 = raw_edf.annotations[i * 2 + 1] + assert an1['onset'] == an2['onset'] + assert an1['duration'] == an2['duration'] + # Also, it prepends 'Segment: ' to some annotations + t_desc = an2['description'].replace('Segment: ', '') + assert an1['description'] == t_desc + + assert_array_almost_equal(raw._data, raw_edf._data) + + with pytest.raises(ValueError, match='Not a valid Nihon Kohden EEG file'): + raw = read_raw_nihon(fname_edf, preload=True) + + with pytest.raises(ValueError, match='Not a valid Nihon Kohden EEG file'): + raw = _read_nihon_header(fname_edf) + + bad_fname = data_path / 'eximia' / 'text_eximia.nxe' + + msg = 'No PNT file exists. Metadata will be blank' + with pytest.warns(RuntimeWarning, match=msg): + meta = _read_nihon_metadata(bad_fname) + assert len(meta) == 0 + + msg = 'No LOG file exists. Annotations will not be read' + with pytest.warns(RuntimeWarning, match=msg): + annot = _read_nihon_annotations(bad_fname) + assert all(len(x) == 0 for x in annot.values()) + + # the nihon test file has $A1 and $A2 in it, which are not EEG + assert '$A1' in raw.ch_names + + # assert that channels with $ are 'misc' + picks = [ch for ch in raw.ch_names if ch.startswith('$')] + ch_types = raw.get_channel_types(picks=picks) + assert all(ch == 'misc' for ch in ch_types) + + +@testing.requires_testing_data +def test_nihon_duplicate_channels(monkeypatch): + """Test deduplication of channel names.""" + fname = data_path / 'NihonKohden' / 'MB0400FU.EEG' + + def return_channel_duplicates(fname): + ch_names = nihon._default_chan_labels + ch_names[1] = ch_names[0] + return ch_names + + monkeypatch.setattr(nihon, '_read_21e_file', return_channel_duplicates) + + assert len(nihon._read_21e_file(fname)) > \ + len(set(nihon._read_21e_file(fname))) + msg = 'Channel names are not unique, found duplicates for: ' \ + '{\'FP1\'}. Applying running numbers for duplicates.' + with pytest.warns(RuntimeWarning, match=msg): + read_raw_nihon(fname) diff --git a/python/libs/mne/io/nirx/__init__.py b/python/libs/mne/io/nirx/__init__.py new file mode 100644 index 0000000..0a8ee5e --- /dev/null +++ b/python/libs/mne/io/nirx/__init__.py @@ -0,0 +1,7 @@ +"""fNIRS module for conversion to FIF.""" + +# Author: Robert Luke +# +# License: BSD-3-Clause + +from .nirx import read_raw_nirx diff --git a/python/libs/mne/io/nirx/_localized_abbr.py b/python/libs/mne/io/nirx/_localized_abbr.py new file mode 100644 index 0000000..4e42f7d --- /dev/null +++ b/python/libs/mne/io/nirx/_localized_abbr.py @@ -0,0 +1,60 @@ +"""Localizations for meas_date extraction.""" +# Authors: Eric Larson +# +# License: BSD-3-Clause + +# This file was generated on 2021/01/31 on an Ubuntu system. +# When getting "unsupported locale setting" on Ubuntu (e.g., with localepurge), +# use "sudo locale-gen de_DE" etc. then "sudo update-locale". + +""" +import datetime +import locale +print('_localized_abbr = {') +for loc in ('en_US.utf8', 'de_DE', 'fr_FR', 'it_IT'): + print(f' {repr(loc)}: {{') + print(' "month": {', end='') + month_abbr = set() + for month in range(1, 13): # Month as locale’s abbreviated name + locale.setlocale(locale.LC_TIME, "en_US.utf8") + dt = datetime.datetime(year=2000, month=month, day=1) + val = dt.strftime("%b").lower() + locale.setlocale(locale.LC_TIME, loc) + key = dt.strftime("%b").lower() + month_abbr.add(key) + print(f'{repr(key)}: {repr(val)}, ', end='') + print('}, # noqa') + print(' "weekday": {', end='') + weekday_abbr = set() + for day in range(1, 8): # Weekday as locale’s abbreviated name. + locale.setlocale(locale.LC_TIME, "en_US.utf8") + dt = datetime.datetime(year=2000, month=1, day=day) + val = dt.strftime("%a").lower() + locale.setlocale(locale.LC_TIME, loc) + key = dt.strftime("%a").lower() + assert key not in weekday_abbr, key + weekday_abbr.add(key) + print(f'{repr(key)}: {repr(val)}, ', end='') + print('}, # noqa') + print(' },') +print('}\n') +""" + +_localized_abbr = { + 'en_US.utf8': { + "month": {'jan': 'jan', 'feb': 'feb', 'mar': 'mar', 'apr': 'apr', 'may': 'may', 'jun': 'jun', 'jul': 'jul', 'aug': 'aug', 'sep': 'sep', 'oct': 'oct', 'nov': 'nov', 'dec': 'dec', }, # noqa + "weekday": {'sat': 'sat', 'sun': 'sun', 'mon': 'mon', 'tue': 'tue', 'wed': 'wed', 'thu': 'thu', 'fri': 'fri', }, # noqa + }, + 'de_DE': { + "month": {'jan': 'jan', 'feb': 'feb', 'mär': 'mar', 'apr': 'apr', 'mai': 'may', 'jun': 'jun', 'jul': 'jul', 'aug': 'aug', 'sep': 'sep', 'okt': 'oct', 'nov': 'nov', 'dez': 'dec', }, # noqa + "weekday": {'sa': 'sat', 'so': 'sun', 'mo': 'mon', 'di': 'tue', 'mi': 'wed', 'do': 'thu', 'fr': 'fri', }, # noqa + }, + 'fr_FR': { + "month": {'janv.': 'jan', 'févr.': 'feb', 'mars': 'mar', 'avril': 'apr', 'mai': 'may', 'juin': 'jun', 'juil.': 'jul', 'août': 'aug', 'sept.': 'sep', 'oct.': 'oct', 'nov.': 'nov', 'déc.': 'dec', }, # noqa + "weekday": {'sam.': 'sat', 'dim.': 'sun', 'lun.': 'mon', 'mar.': 'tue', 'mer.': 'wed', 'jeu.': 'thu', 'ven.': 'fri', }, # noqa + }, + 'it_IT': { + "month": {'gen': 'jan', 'feb': 'feb', 'mar': 'mar', 'apr': 'apr', 'mag': 'may', 'giu': 'jun', 'lug': 'jul', 'ago': 'aug', 'set': 'sep', 'ott': 'oct', 'nov': 'nov', 'dic': 'dec', }, # noqa + "weekday": {'sab': 'sat', 'dom': 'sun', 'lun': 'mon', 'mar': 'tue', 'mer': 'wed', 'gio': 'thu', 'ven': 'fri', }, # noqa + }, +} diff --git a/python/libs/mne/io/nirx/nirx.py b/python/libs/mne/io/nirx/nirx.py new file mode 100644 index 0000000..8b1a45f --- /dev/null +++ b/python/libs/mne/io/nirx/nirx.py @@ -0,0 +1,512 @@ +# Authors: Robert Luke +# +# License: BSD-3-Clause + +from configparser import ConfigParser, RawConfigParser +import glob as glob +import re as re +import os.path as op +import datetime as dt +import json + +import numpy as np + +from ._localized_abbr import _localized_abbr +from ..base import BaseRaw +from ..utils import _mult_cal_one +from ..constants import FIFF +from ..meas_info import create_info, _format_dig_points +from ...annotations import Annotations +from ..._freesurfer import get_mni_fiducials +from ...transforms import apply_trans, _get_trans +from ...utils import (logger, verbose, fill_doc, warn, _check_fname, + _validate_type, _check_option, _mask_to_onsets_offsets) + + +@fill_doc +def read_raw_nirx(fname, saturated='annotate', preload=False, verbose=None): + """Reader for a NIRX fNIRS recording. + + Parameters + ---------- + fname : str + Path to the NIRX data folder or header file. + %(saturated)s + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawNIRX + A Raw object containing NIRX data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + %(nirx_notes)s + """ + return RawNIRX(fname, saturated, preload, verbose) + + +def _open(fname): + return open(fname, 'r', encoding='latin-1') + + +@fill_doc +class RawNIRX(BaseRaw): + """Raw object from a NIRX fNIRS file. + + Parameters + ---------- + fname : str + Path to the NIRX data folder or header file. + %(saturated)s + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + %(nirx_notes)s + """ + + @verbose + def __init__(self, fname, saturated, preload=False, verbose=None): + from scipy.io import loadmat + logger.info('Loading %s' % fname) + _validate_type(fname, 'path-like', 'fname') + _validate_type(saturated, str, 'saturated') + _check_option('saturated', saturated, ('annotate', 'nan', 'ignore')) + fname = str(fname) + if fname.endswith('.hdr'): + fname = op.dirname(op.abspath(fname)) + + fname = _check_fname(fname, 'read', True, 'fname', need_dir=True) + + json_config = glob.glob('%s/*%s' % (fname, "config.json")) + if len(json_config): + is_aurora = True + else: + is_aurora = False + + if is_aurora: + # NIRSport2 devices using Aurora software + keys = ('hdr', 'config.json', 'description.json', + 'wl1', 'wl2', 'probeInfo.mat', 'tri') + else: + # NIRScout devices and NIRSport1 devices + keys = ('hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2', + 'config.txt', 'probeInfo.mat') + n_dat = len(glob.glob('%s/*%s' % (fname, 'dat'))) + if n_dat != 1: + warn("A single dat file was expected in the specified path, " + f"but got {n_dat}. This may indicate that the file " + "structure has been modified since the measurement " + "was saved.") + + # Check if required files exist and store names for later use + files = dict() + nan_mask = dict() + for key in keys: + files[key] = glob.glob('%s/*%s' % (fname, key)) + fidx = 0 + if len(files[key]) != 1: + if key not in ('wl1', 'wl2'): + raise RuntimeError( + f'Need one {key} file, got {len(files[key])}') + noidx = np.where(['nosatflags_' in op.basename(x) + for x in files[key]])[0] + if len(noidx) != 1 or len(files[key]) != 2: + raise RuntimeError( + f'Need one nosatflags and one standard {key} file, ' + f'got {len(files[key])}') + # Here two files have been found, one that is called + # no sat flags. The nosatflag file has no NaNs in it. + noidx = noidx[0] + if saturated == 'ignore': + # Ignore NaN and return values + fidx = noidx + elif saturated == 'nan': + # Return NaN + fidx = 0 if noidx == 1 else 1 + else: + assert saturated == 'annotate' # guaranteed above + fidx = noidx + nan_mask[key] = files[key][0 if noidx == 1 else 1] + files[key] = files[key][fidx] + + # Read number of rows/samples of wavelength data + with _open(files['wl1']) as fid: + last_sample = fid.read().count('\n') - 1 + + # Read header file + # The header file isn't compliant with the configparser. So all the + # text between comments must be removed before passing to parser + with _open(files['hdr']) as f: + hdr_str_all = f.read() + hdr_str = re.sub('#.*?#', '', hdr_str_all, flags=re.DOTALL) + if is_aurora: + hdr_str = re.sub('(\\[DataStructure].*)', '', + hdr_str, flags=re.DOTALL) + hdr = RawConfigParser() + hdr.read_string(hdr_str) + + # Check that the file format version is supported + if is_aurora: + # We may need to ease this requirement back + if hdr['GeneralInfo']['Version'] not in ['2021.4.0-34-ge9fdbbc8', + '2021.9.0-5-g3eb32851']: + warn("MNE has not been tested with Aurora version " + f"{hdr['GeneralInfo']['Version']}") + else: + if hdr['GeneralInfo']['NIRStar'] not in ['"15.0"', '"15.2"', + '"15.3"']: + raise RuntimeError('MNE does not support this NIRStar version' + ' (%s)' % (hdr['GeneralInfo']['NIRStar'],)) + if "NIRScout" not in hdr['GeneralInfo']['Device'] \ + and "NIRSport" not in hdr['GeneralInfo']['Device']: + warn("Only import of data from NIRScout devices have been " + "thoroughly tested. You are using a %s device. " % + hdr['GeneralInfo']['Device']) + + # Parse required header fields + + # Extract measurement date and time + if is_aurora: + datetime_str = hdr['GeneralInfo']['Date'] + else: + datetime_str = hdr['GeneralInfo']['Date'] + \ + hdr['GeneralInfo']['Time'] + + meas_date = None + # Several formats have been observed so we try each in turn + for loc, translations in _localized_abbr.items(): + do_break = False + # So far we are lucky in that all the formats below, if they + # include %a (weekday abbr), always come first. Thus we can use + # a .split(), replace, and rejoin. + loc_datetime_str = datetime_str.split(' ') + for key, val in translations['weekday'].items(): + loc_datetime_str[0] = loc_datetime_str[0].replace(key, val) + for ii in range(1, len(loc_datetime_str)): + for key, val in translations['month'].items(): + loc_datetime_str[ii] = \ + loc_datetime_str[ii].replace(key, val) + loc_datetime_str = ' '.join(loc_datetime_str) + logger.debug(f'Trying {loc} datetime: {loc_datetime_str}') + for dt_code in ['"%a, %b %d, %Y""%H:%M:%S.%f"', + '"%a %d %b %Y""%H:%M:%S.%f"', + '"%a, %d %b %Y""%H:%M:%S.%f"', + '%Y-%m-%d %H:%M:%S.%f']: + try: + meas_date = dt.datetime.strptime(loc_datetime_str, dt_code) + except ValueError: + pass + else: + meas_date = meas_date.replace(tzinfo=dt.timezone.utc) + do_break = True + logger.debug( + f'Measurement date language {loc} detected: {dt_code}') + break + if do_break: + break + if meas_date is None: + warn("Extraction of measurement date from NIRX file failed. " + "This can be caused by files saved in certain locales " + f"(currently only {list(_localized_abbr)} supported). " + "Please report this as a github issue. " + "The date is being set to January 1st, 2000, " + f"instead of {repr(datetime_str)}.") + meas_date = dt.datetime(2000, 1, 1, 0, 0, 0, + tzinfo=dt.timezone.utc) + + # Extract frequencies of light used by machine + if is_aurora: + fnirs_wavelengths = [760, 850] + else: + fnirs_wavelengths = [int(s) for s in + re.findall(r'(\d+)', + hdr['ImagingParameters'][ + 'Wavelengths'])] + + # Extract source-detectors + if is_aurora: + sources = re.findall(r'(\d+)-\d+', hdr_str_all.split("\n")[-2]) + detectors = re.findall(r'\d+-(\d+)', hdr_str_all.split("\n")[-2]) + sources = [int(s) + 1 for s in sources] + detectors = [int(d) + 1 for d in detectors] + + else: + sources = np.asarray([int(s) for s in + re.findall(r'(\d+)-\d+:\d+', + hdr['DataStructure'] + ['S-D-Key'])], int) + detectors = np.asarray([int(s) for s in + re.findall(r'\d+-(\d+):\d+', + hdr['DataStructure'] + ['S-D-Key'])], int) + + # Extract sampling rate + if is_aurora: + samplingrate = float(hdr['GeneralInfo']['Sampling rate']) + else: + samplingrate = float(hdr['ImagingParameters']['SamplingRate']) + + # Read participant information file + if is_aurora: + with open(files['description.json']) as f: + inf = json.load(f) + else: + inf = ConfigParser(allow_no_value=True) + inf.read(files['inf']) + inf = inf._sections['Subject Demographics'] + + # Store subject information from inf file in mne format + # Note: NIRX also records "Study Type", "Experiment History", + # "Additional Notes", "Contact Information" and this information + # is currently discarded + # NIRStar does not record an id, or handedness by default + # The name field is used to populate the his_id variable. + subject_info = {} + if is_aurora: + names = inf["subject"].split() + else: + names = inf['name'].replace('"', "").split() + subject_info['his_id'] = "_".join(names) + if len(names) > 0: + subject_info['first_name'] = \ + names[0].replace("\"", "") + if len(names) > 1: + subject_info['last_name'] = \ + names[-1].replace("\"", "") + if len(names) > 2: + subject_info['middle_name'] = \ + names[-2].replace("\"", "") + subject_info['sex'] = inf['gender'].replace("\"", "") + # Recode values + if subject_info['sex'] in {'M', 'Male', '1'}: + subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE + elif subject_info['sex'] in {'F', 'Female', '2'}: + subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE + else: + subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN + if inf['age'] != '': + subject_info['birthday'] = (meas_date.year - int(inf['age']), + meas_date.month, + meas_date.day) + + # Read information about probe/montage/optodes + # A word on terminology used here: + # Sources produce light + # Detectors measure light + # Sources and detectors are both called optodes + # Each source - detector pair produces a channel + # Channels are defined as the midpoint between source and detector + mat_data = loadmat(files['probeInfo.mat']) + probes = mat_data['probeInfo']['probes'][0, 0] + requested_channels = probes['index_c'][0, 0] + src_locs = probes['coords_s3'][0, 0] / 100. + det_locs = probes['coords_d3'][0, 0] / 100. + ch_locs = probes['coords_c3'][0, 0] / 100. + + # These are all in MNI coordinates, so let's transform them to + # the Neuromag head coordinate frame + src_locs, det_locs, ch_locs, mri_head_t = _convert_fnirs_to_head( + 'fsaverage', 'mri', 'head', src_locs, det_locs, ch_locs) + + # Set up digitization + dig = get_mni_fiducials('fsaverage', verbose=False) + for fid in dig: + fid['r'] = apply_trans(mri_head_t, fid['r']) + fid['coord_frame'] = FIFF.FIFFV_COORD_HEAD + for ii, ch_loc in enumerate(ch_locs, 1): + dig.append(dict( + kind=FIFF.FIFFV_POINT_EEG, # misnomer but probably okay + r=ch_loc, + ident=ii, + coord_frame=FIFF.FIFFV_COORD_HEAD, + )) + dig = _format_dig_points(dig) + del mri_head_t + + # Determine requested channel indices + # The wl1 and wl2 files include all possible source - detector pairs. + # But most of these are not relevant. We want to extract only the + # subset requested in the probe file + req_ind = np.array([], int) + for req_idx in range(requested_channels.shape[0]): + sd_idx = np.where((sources == requested_channels[req_idx][0]) & + (detectors == requested_channels[req_idx][1])) + req_ind = np.concatenate((req_ind, sd_idx[0])) + req_ind = req_ind.astype(int) + + snames = [f"S{sources[idx]}" for idx in req_ind] + dnames = [f"_D{detectors[idx]}" for idx in req_ind] + sdnames = [m + str(n) for m, n in zip(snames, dnames)] + sd1 = [s + ' ' + str(fnirs_wavelengths[0]) for s in sdnames] + sd2 = [s + ' ' + str(fnirs_wavelengths[1]) for s in sdnames] + chnames = [val for pair in zip(sd1, sd2) for val in pair] + + # Create mne structure + info = create_info(chnames, + samplingrate, + ch_types='fnirs_cw_amplitude') + with info._unlock(): + info.update(subject_info=subject_info, dig=dig) + info['meas_date'] = meas_date + + # Store channel, source, and detector locations + # The channel location is stored in the first 3 entries of loc. + # The source location is stored in the second 3 entries of loc. + # The detector location is stored in the third 3 entries of loc. + # NIRx NIRSite uses MNI coordinates. + # Also encode the light frequency in the structure. + for ch_idx2 in range(requested_channels.shape[0]): + # Find source and store location + src = int(requested_channels[ch_idx2, 0]) - 1 + # Find detector and store location + det = int(requested_channels[ch_idx2, 1]) - 1 + # Store channel location as midpoint between source and detector. + midpoint = (src_locs[src, :] + det_locs[det, :]) / 2 + for ii in range(2): + ch_idx3 = ch_idx2 * 2 + ii + info['chs'][ch_idx3]['loc'][3:6] = src_locs[src, :] + info['chs'][ch_idx3]['loc'][6:9] = det_locs[det, :] + info['chs'][ch_idx3]['loc'][:3] = midpoint + info['chs'][ch_idx3]['loc'][9] = fnirs_wavelengths[ii] + info['chs'][ch_idx3]['coord_frame'] = FIFF.FIFFV_COORD_HEAD + + # Extract the start/stop numbers for samples in the CSV. In theory the + # sample bounds should just be 10 * the number of channels, but some + # files have mixed \n and \n\r endings (!) so we can't rely on it, and + # instead make a single pass over the entire file at the beginning so + # that we know how to seek and read later. + bounds = dict() + for key in ('wl1', 'wl2'): + offset = 0 + bounds[key] = [offset] + with open(files[key], 'rb') as fid: + for line in fid: + offset += len(line) + bounds[key].append(offset) + assert offset == fid.tell() + + # Extras required for reading data + raw_extras = { + 'sd_index': req_ind, + 'files': files, + 'bounds': bounds, + 'nan_mask': nan_mask, + } + # Get our saturated mask + annot_mask = None + for ki, key in enumerate(('wl1', 'wl2')): + if nan_mask.get(key, None) is None: + continue + mask = np.isnan(_read_csv_rows_cols( + nan_mask[key], 0, last_sample + 1, req_ind, {0: 0, 1: None}).T) + if saturated == 'nan': + nan_mask[key] = mask + else: + assert saturated == 'annotate' + if annot_mask is None: + annot_mask = np.zeros( + (len(info['ch_names']) // 2, last_sample + 1), bool) + annot_mask |= mask + nan_mask[key] = None # shouldn't need again + + super(RawNIRX, self).__init__( + info, preload, filenames=[fname], last_samps=[last_sample], + raw_extras=[raw_extras], verbose=verbose) + + # make onset/duration/description + onset, duration, description, ch_names = list(), list(), list(), list() + if annot_mask is not None: + for ci, mask in enumerate(annot_mask): + on, dur = _mask_to_onsets_offsets(mask) + on = on / info['sfreq'] + dur = dur / info['sfreq'] + dur -= on + onset.extend(on) + duration.extend(dur) + description.extend(['BAD_SATURATED'] * len(on)) + ch_names.extend([self.ch_names[2 * ci:2 * ci + 2]] * len(on)) + + # Read triggers from event file + if not is_aurora: + files['tri'] = files['hdr'][:-3] + 'evt' + if op.isfile(files['tri']): + with _open(files['tri']) as fid: + t = [re.findall(r'(\d+)', line) for line in fid] + for t_ in t: + if is_aurora: + trigger_frame = float(t_[7]) + desc = float(t_[8]) + else: + binary_value = ''.join(t_[1:])[::-1] + desc = float(int(binary_value, 2)) + trigger_frame = float(t_[0]) + onset.append(trigger_frame / samplingrate) + duration.append(1.) # No duration info stored in files + description.append(desc) + ch_names.append(list()) + annot = Annotations(onset, duration, description, ch_names=ch_names) + self.set_annotations(annot) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file. + + The NIRX machine records raw data as two different wavelengths. + The returned data interleaves the wavelengths. + """ + sd_index = self._raw_extras[fi]['sd_index'] + + wls = list() + for key in ('wl1', 'wl2'): + d = _read_csv_rows_cols( + self._raw_extras[fi]['files'][key], + start, stop, sd_index, + self._raw_extras[fi]['bounds'][key]).T + nan_mask = self._raw_extras[fi]['nan_mask'].get(key, None) + if nan_mask is not None: + d[nan_mask[:, start:stop]] = np.nan + wls.append(d) + + # TODO: Make this more efficient by only indexing above what we need. + # For now let's just construct the full data matrix and index. + # Interleave wavelength 1 and 2 to match channel names: + this_data = np.zeros((len(wls[0]) * 2, stop - start)) + this_data[0::2, :] = wls[0] + this_data[1::2, :] = wls[1] + _mult_cal_one(data, this_data, idx, cals, mult) + return data + + +def _read_csv_rows_cols(fname, start, stop, cols, bounds, + sep=' ', replace=None): + with open(fname, 'rb') as fid: + fid.seek(bounds[start]) + args = list() + if bounds[1] is not None: + args.append(bounds[stop] - bounds[start]) + data = fid.read(*args).decode('latin-1') + if replace is not None: + data = replace(data) + x = np.fromstring(data, float, sep=sep) + x.shape = (stop - start, -1) + x = x[:, cols] + return x + + +def _convert_fnirs_to_head(trans, fro, to, src_locs, det_locs, ch_locs): + mri_head_t, _ = _get_trans(trans, fro, to) + src_locs = apply_trans(mri_head_t, src_locs) + det_locs = apply_trans(mri_head_t, det_locs) + ch_locs = apply_trans(mri_head_t, ch_locs) + return src_locs, det_locs, ch_locs, mri_head_t diff --git a/python/libs/mne/io/nirx/tests/__init__.py b/python/libs/mne/io/nirx/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/nirx/tests/test_nirx.py b/python/libs/mne/io/nirx/tests/test_nirx.py new file mode 100644 index 0000000..3e86979 --- /dev/null +++ b/python/libs/mne/io/nirx/tests/test_nirx.py @@ -0,0 +1,601 @@ +# -*- coding: utf-8 -*- +# Authors: Robert Luke +# Eric Larson +# simplified BSD-3 license + +import os.path as op +import shutil +import os +import datetime as dt +import numpy as np + +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from mne import pick_types +from mne.datasets.testing import data_path, requires_testing_data +from mne.io import read_raw_nirx, read_raw_snirf +from mne.utils import requires_h5py +from mne.io.tests.test_raw import _test_raw_reader +from mne.preprocessing import annotate_nan +from mne.transforms import apply_trans, _get_trans +from mne.preprocessing.nirs import source_detector_distances,\ + short_channels +from mne.io.constants import FIFF + +testing_path = data_path(download=False) +fname_nirx_15_0 = op.join( + testing_path, 'NIRx', 'nirscout', 'nirx_15_0_recording') +fname_nirx_15_2 = op.join( + testing_path, 'NIRx', 'nirscout', 'nirx_15_2_recording') +fname_nirx_15_2_short = op.join( + testing_path, 'NIRx', 'nirscout', 'nirx_15_2_recording_w_short') +fname_nirx_15_3_short = op.join( + testing_path, 'NIRx', 'nirscout', 'nirx_15_3_recording') + + +# This file has no saturated sections +nirsport1_wo_sat = op.join(testing_path, 'NIRx', 'nirsport_v1', + 'nirx_15_3_recording_wo_saturation') +# This file has saturation, but not on the optode pairing in montage +nirsport1_w_sat = op.join(testing_path, 'NIRx', 'nirsport_v1', + 'nirx_15_3_recording_w_saturation_' + 'not_on_montage_channels') +# This file has saturation in channels of interest +nirsport1_w_fullsat = op.join( + testing_path, 'NIRx', 'nirsport_v1', 'nirx_15_3_recording_w_' + 'saturation_on_montage_channels') + +# NIRSport2 device using Aurora software and matching snirf file +nirsport2 = op.join( + testing_path, 'NIRx', 'nirsport_v2', 'aurora_recording _w_short_and_acc') +nirsport2_snirf = op.join( + testing_path, 'SNIRF', 'NIRx', 'NIRSport2', '1.0.3', + '2021-05-05_001.snirf') + +nirsport2_2021_9 = op.join( + testing_path, 'NIRx', 'nirsport_v2', 'aurora_2021_9') +snirf_nirsport2_20219 = op.join( + testing_path, 'SNIRF', 'NIRx', 'NIRSport2', '2021.9', + '2021-10-01_002.snirf') + + +@requires_h5py +@requires_testing_data +@pytest.mark.filterwarnings('ignore:.*Extraction of measurement.*:') +@pytest.mark.parametrize('fname_nirx, fname_snirf', ( + [nirsport2, nirsport2_snirf], + [nirsport2_2021_9, snirf_nirsport2_20219], +)) +def test_nirsport_v2_matches_snirf(fname_nirx, fname_snirf): + """Test NIRSport2 raw files return same data as snirf.""" + raw = read_raw_nirx(fname_nirx, preload=True) + raw_snirf = read_raw_snirf(fname_snirf, preload=True) + + assert_allclose(raw._data, raw_snirf._data) + + # Check the timing of annotations match (naming is different) + assert_allclose(raw.annotations.onset, raw_snirf.annotations.onset) + + assert_array_equal(raw.ch_names, raw_snirf.ch_names) + + # This test fails as snirf encodes name incorrectly. + # assert raw.info["subject_info"]["first_name"] == + # raw_snirf.info["subject_info"]["first_name"] + + +@requires_testing_data +@pytest.mark.filterwarnings('ignore:.*Extraction of measurement.*:') +def test_nirsport_v2(): + """Test NIRSport2 file.""" + raw = read_raw_nirx(nirsport2, preload=True) + assert raw._data.shape == (40, 128) + + # Test distance between optodes matches values from + # nirsite https://github.com/mne-tools/mne-testing-data/pull/86 + # figure 3 + allowed_distance_error = 0.005 + distances = source_detector_distances(raw.info) + assert_allclose(distances[::2][:14], + [0.0304, 0.0411, 0.008, 0.0400, 0.008, 0.0310, 0.0411, + 0.008, 0.0299, 0.008, 0.0370, 0.008, 0.0404, 0.008], + atol=allowed_distance_error) + + # Test location of detectors + # The locations of detectors can be seen in the first + # figure on this page... + # https://github.com/mne-tools/mne-testing-data/pull/86 + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D1' + assert_allclose( + mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) + + assert raw.info['ch_names'][2][3:5] == 'D6' + assert_allclose( + mni_locs[2], [-0.0841, -0.0138, 0.0248], atol=allowed_dist_error) + + assert raw.info['ch_names'][34][3:5] == 'D5' + assert_allclose( + mni_locs[34], [0.0845, -0.0451, -0.0123], atol=allowed_dist_error) + + # Test location of sensors + # The locations of sensors can be seen in the second + # figure on this page... + # https://github.com/mne-tools/mne-testing-data/pull/86 + locs = [ch['loc'][3:6] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][:2] == 'S1' + assert_allclose( + mni_locs[0], [-0.0848, -0.0162, -0.0163], atol=allowed_dist_error) + + assert raw.info['ch_names'][9][:2] == 'S2' + assert_allclose( + mni_locs[9], [-0.0, -0.1195, 0.0142], atol=allowed_dist_error) + + assert raw.info['ch_names'][34][:2] == 'S8' + assert_allclose( + mni_locs[34], [0.0828, -0.046, 0.0285], atol=allowed_dist_error) + + assert len(raw.annotations) == 3 + assert raw.annotations.description[0] == '1.0' + assert raw.annotations.description[2] == '6.0' + # Lose tolerance as I am eyeballing the time differences on screen + assert_allclose( + np.diff(raw.annotations.onset), [2.3, 3.1], atol=0.1) + + mon = raw.get_montage() + assert len(mon.dig) == 43 + + +@requires_testing_data +@pytest.mark.filterwarnings('ignore:.*Extraction of measurement.*:') +def test_nirsport_v1_wo_sat(): + """Test NIRSport1 file with no saturation.""" + raw = read_raw_nirx(nirsport1_wo_sat, preload=True) + + # Test data import + assert raw._data.shape == (26, 164) + assert raw.info['sfreq'] == 10.416667 + + # By default real data is returned + assert np.sum(np.isnan(raw.get_data())) == 0 + + raw = read_raw_nirx(nirsport1_wo_sat, preload=True, saturated='nan') + data = raw.get_data() + assert data.shape == (26, 164) + assert np.sum(np.isnan(data)) == 0 + + raw = read_raw_nirx(nirsport1_wo_sat, saturated='annotate') + data = raw.get_data() + assert data.shape == (26, 164) + assert np.sum(np.isnan(data)) == 0 + + +@pytest.mark.filterwarnings('ignore:.*Extraction of measurement.*:') +@requires_testing_data +def test_nirsport_v1_w_sat(): + """Test NIRSport1 file with NaNs but not in channel of interest.""" + raw = read_raw_nirx(nirsport1_w_sat) + + # Test data import + data = raw.get_data() + assert data.shape == (26, 176) + assert raw.info['sfreq'] == 10.416667 + assert np.sum(np.isnan(data)) == 0 + + raw = read_raw_nirx(nirsport1_w_sat, saturated='nan') + data = raw.get_data() + assert data.shape == (26, 176) + assert np.sum(np.isnan(data)) == 0 + + raw = read_raw_nirx(nirsport1_w_sat, saturated='annotate') + data = raw.get_data() + assert data.shape == (26, 176) + assert np.sum(np.isnan(data)) == 0 + + +@pytest.mark.filterwarnings('ignore:.*Extraction of measurement.*:') +@requires_testing_data +@pytest.mark.parametrize('preload', (True, False)) +@pytest.mark.parametrize('meas_date', (None, "orig")) +def test_nirsport_v1_w_bad_sat(preload, meas_date): + """Test NIRSport1 file with NaNs.""" + fname = nirsport1_w_fullsat + raw = read_raw_nirx(fname, preload=preload) + data = raw.get_data() + assert not np.isnan(data).any() + assert len(raw.annotations) == 5 + # annotated version and ignore should have same data but different annot + raw_ignore = read_raw_nirx(fname, saturated='ignore', preload=preload) + assert_allclose(raw_ignore.get_data(), data) + assert len(raw_ignore.annotations) == 2 + assert not any('NAN' in d for d in raw_ignore.annotations.description) + # nan version should not have same data, but we can give it the same annot + raw_nan = read_raw_nirx(fname, saturated='nan', preload=preload) + data_nan = raw_nan.get_data() + assert np.isnan(data_nan).any() + assert not np.allclose(raw_nan.get_data(), data) + raw_nan_annot = raw_ignore.copy() + if meas_date is None: + raw.set_meas_date(None) + raw_nan.set_meas_date(None) + raw_nan_annot.set_meas_date(None) + nan_annots = annotate_nan(raw_nan) + assert nan_annots.orig_time == raw_nan.info["meas_date"] + raw_nan_annot.set_annotations(nan_annots) + use_mask = np.where(raw.annotations.description == 'BAD_SATURATED') + for key in ('onset', 'duration'): + a = getattr(raw_nan_annot.annotations, key)[::2] # one ch in each + b = getattr(raw.annotations, key)[use_mask] # two chs in each + assert_allclose(a, b) + + +@requires_testing_data +def test_nirx_hdr_load(): + """Test reading NIRX files using path to header file.""" + fname = fname_nirx_15_2_short + "/NIRS-2019-08-23_001.hdr" + raw = read_raw_nirx(fname, preload=True) + + # Test data import + assert raw._data.shape == (26, 145) + assert raw.info['sfreq'] == 12.5 + + +@requires_testing_data +def test_nirx_missing_warn(): + """Test reading NIRX files when missing data.""" + with pytest.raises(FileNotFoundError, match='does not exist'): + read_raw_nirx(fname_nirx_15_2_short + "1", preload=True) + + +@requires_testing_data +def test_nirx_missing_evt(tmp_path): + """Test reading NIRX files when missing data.""" + shutil.copytree(fname_nirx_15_2_short, str(tmp_path) + "/data/") + os.rename(tmp_path / "data" / "NIRS-2019-08-23_001.evt", + tmp_path / "data" / "NIRS-2019-08-23_001.xxx") + fname = tmp_path / "data" / "NIRS-2019-08-23_001.hdr" + raw = read_raw_nirx(fname, preload=True) + assert raw.annotations.onset.shape == (0, ) + + +@requires_testing_data +def test_nirx_dat_warn(tmp_path): + """Test reading NIRX files when missing data.""" + shutil.copytree(fname_nirx_15_2_short, str(tmp_path) + "/data/") + os.rename(tmp_path / "data" / "NIRS-2019-08-23_001.dat", + tmp_path / "data" / "NIRS-2019-08-23_001.tmp") + fname = tmp_path / "data" / "NIRS-2019-08-23_001.hdr" + with pytest.raises(RuntimeWarning, match='A single dat'): + read_raw_nirx(fname, preload=True) + + +@requires_testing_data +def test_nirx_15_2_short(): + """Test reading NIRX files.""" + raw = read_raw_nirx(fname_nirx_15_2_short, preload=True) + + # Test data import + assert raw._data.shape == (26, 145) + assert raw.info['sfreq'] == 12.5 + assert raw.info['meas_date'] == dt.datetime(2019, 8, 23, 7, 37, 4, 540000, + tzinfo=dt.timezone.utc) + + # Test channel naming + assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850", + "S1_D9 760", "S1_D9 850"] + assert raw.info['ch_names'][24:26] == ["S5_D13 760", "S5_D13 850"] + + # Test frequency encoding + assert raw.info['chs'][0]['loc'][9] == 760 + assert raw.info['chs'][1]['loc'][9] == 850 + + # Test info import + assert raw.info['subject_info'] == dict(sex=1, first_name="MNE", + middle_name="Test", + last_name="Recording", + birthday=(2014, 8, 23), + his_id="MNE_Test_Recording") + + # Test distance between optodes matches values from + # nirsite https://github.com/mne-tools/mne-testing-data/pull/51 + # step 4 figure 2 + allowed_distance_error = 0.0002 + distances = source_detector_distances(raw.info) + assert_allclose(distances[::2], [ + 0.0304, 0.0078, 0.0310, 0.0086, 0.0416, + 0.0072, 0.0389, 0.0075, 0.0558, 0.0562, + 0.0561, 0.0565, 0.0077], atol=allowed_distance_error) + + # Test which channels are short + # These are the ones marked as red at + # https://github.com/mne-tools/mne-testing-data/pull/51 step 4 figure 2 + is_short = short_channels(raw.info) + assert_array_equal(is_short[:9:2], [False, True, False, True, False]) + is_short = short_channels(raw.info, threshold=0.003) + assert_array_equal(is_short[:3:2], [False, False]) + is_short = short_channels(raw.info, threshold=50) + assert_array_equal(is_short[:3:2], [True, True]) + + # Test trigger events + assert_array_equal(raw.annotations.description, ['3.0', '2.0', '1.0']) + + # Test location of detectors + # The locations of detectors can be seen in the first + # figure on this page... + # https://github.com/mne-tools/mne-testing-data/pull/51 + # And have been manually copied below + # These values were reported in mm, but according to this page... + # https://mne.tools/stable/auto_tutorials/intro/plot_40_sensor_locations.html + # 3d locations should be specified in meters, so that's what's tested below + # Detector locations are stored in the third three loc values + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D1' + assert_allclose( + mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) + + assert raw.info['ch_names'][4][3:5] == 'D3' + assert_allclose( + mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error) + + assert raw.info['ch_names'][8][3:5] == 'D2' + assert_allclose( + mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error) + + assert raw.info['ch_names'][12][3:5] == 'D4' + assert_allclose( + mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error) + + assert raw.info['ch_names'][16][3:5] == 'D5' + assert_allclose( + mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error) + + assert raw.info['ch_names'][19][3:5] == 'D6' + assert_allclose( + mni_locs[19], [0.0352, 0.0283, 0.0780], atol=allowed_dist_error) + + assert raw.info['ch_names'][21][3:5] == 'D7' + assert_allclose( + mni_locs[21], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error) + + +@requires_testing_data +def test_nirx_15_3_short(): + """Test reading NIRX files.""" + raw = read_raw_nirx(fname_nirx_15_3_short, preload=True) + + # Test data import + assert raw._data.shape == (26, 220) + assert raw.info['sfreq'] == 12.5 + + # Test channel naming + assert raw.info['ch_names'][:4] == ["S1_D2 760", "S1_D2 850", + "S1_D9 760", "S1_D9 850"] + assert raw.info['ch_names'][24:26] == ["S5_D13 760", "S5_D13 850"] + + # Test frequency encoding + assert raw.info['chs'][0]['loc'][9] == 760 + assert raw.info['chs'][1]['loc'][9] == 850 + + # Test info import + assert raw.info['subject_info'] == dict(birthday=(2020, 8, 18), + sex=0, + first_name="testMontage\\0A" + "TestMontage", + his_id="testMontage\\0A" + "TestMontage") + + # Test distance between optodes matches values from + # https://github.com/mne-tools/mne-testing-data/pull/72 + allowed_distance_error = 0.001 + distances = source_detector_distances(raw.info) + assert_allclose(distances[::2], [ + 0.0304, 0.0078, 0.0310, 0.0086, 0.0416, + 0.0072, 0.0389, 0.0075, 0.0558, 0.0562, + 0.0561, 0.0565, 0.0077], atol=allowed_distance_error) + + # Test which channels are short + # These are the ones marked as red at + # https://github.com/mne-tools/mne-testing-data/pull/72 + is_short = short_channels(raw.info) + assert_array_equal(is_short[:9:2], [False, True, False, True, False]) + is_short = short_channels(raw.info, threshold=0.003) + assert_array_equal(is_short[:3:2], [False, False]) + is_short = short_channels(raw.info, threshold=50) + assert_array_equal(is_short[:3:2], [True, True]) + + # Test trigger events + assert_array_equal(raw.annotations.description, ['4.0', '2.0', '1.0']) + + # Test location of detectors + # The locations of detectors can be seen in the first + # figure on this page... + # https://github.com/mne-tools/mne-testing-data/pull/72 + # And have been manually copied below + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D2' + assert_allclose( + mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) + + assert raw.info['ch_names'][4][3:5] == 'D1' + assert_allclose( + mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error) + + assert raw.info['ch_names'][8][3:5] == 'D3' + assert_allclose( + mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error) + + assert raw.info['ch_names'][12][3:5] == 'D4' + assert_allclose( + mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error) + + assert raw.info['ch_names'][16][3:5] == 'D5' + assert_allclose( + mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error) + + assert raw.info['ch_names'][19][3:5] == 'D6' + assert_allclose( + mni_locs[19], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error) + + assert raw.info['ch_names'][21][3:5] == 'D7' + assert_allclose( + mni_locs[21], [-0.0394, -0.0483, 0.0928], atol=allowed_dist_error) + + +@requires_testing_data +def test_locale_encoding(tmp_path): + """Test NIRx encoding.""" + fname = tmp_path / 'latin' + shutil.copytree(fname_nirx_15_2, fname) + hdr_fname = op.join(fname, 'NIRS-2019-10-02_003.hdr') + hdr = list() + with open(hdr_fname, 'rb') as fid: + hdr.extend(line for line in fid) + # French + hdr[2] = b'Date="jeu. 13 f\xe9vr. 2020"\r\n' + with open(hdr_fname, 'wb') as fid: + for line in hdr: + fid.write(line) + read_raw_nirx(fname, verbose='debug') + # German + hdr[2] = b'Date="mi 13 dez 2020"\r\n' + with open(hdr_fname, 'wb') as fid: + for line in hdr: + fid.write(line) + read_raw_nirx(fname, verbose='debug') + # Italian + hdr[2] = b'Date="ven 24 gen 2020"\r\n' + hdr[3] = b'Time="10:57:41.454"\r\n' + with open(hdr_fname, 'wb') as fid: + for line in hdr: + fid.write(line) + raw = read_raw_nirx(fname, verbose='debug') + want_dt = dt.datetime( + 2020, 1, 24, 10, 57, 41, 454000, tzinfo=dt.timezone.utc) + assert raw.info['meas_date'] == want_dt + + +@requires_testing_data +def test_nirx_15_2(): + """Test reading NIRX files.""" + raw = read_raw_nirx(fname_nirx_15_2, preload=True) + + # Test data import + assert raw._data.shape == (64, 67) + assert raw.info['sfreq'] == 3.90625 + assert raw.info['meas_date'] == dt.datetime(2019, 10, 2, 9, 8, 47, 511000, + tzinfo=dt.timezone.utc) + + # Test channel naming + assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850", + "S1_D10 760", "S1_D10 850"] + + # Test info import + assert raw.info['subject_info'] == dict(sex=1, first_name="TestRecording", + birthday=(1989, 10, 2), + his_id="TestRecording") + + # Test trigger events + assert_array_equal(raw.annotations.description, ['4.0', '6.0', '2.0']) + print(raw.annotations.onset) + + # Test location of detectors + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D1' + assert_allclose( + mni_locs[0], [-0.0292, 0.0852, -0.0142], atol=allowed_dist_error) + + assert raw.info['ch_names'][15][3:5] == 'D4' + assert_allclose( + mni_locs[15], [-0.0739, -0.0756, -0.0075], atol=allowed_dist_error) + + # Old name aliases for backward compat + assert 'fnirs_cw_amplitude' in raw + with pytest.raises(ValueError, match='Invalid value'): + 'fnirs_raw' in raw + assert 'fnirs_od' not in raw + picks = pick_types(raw.info, fnirs='fnirs_cw_amplitude') + assert len(picks) > 0 + + +@requires_testing_data +def test_nirx_15_0(): + """Test reading NIRX files.""" + raw = read_raw_nirx(fname_nirx_15_0, preload=True) + + # Test data import + assert raw._data.shape == (20, 92) + assert raw.info['sfreq'] == 6.25 + assert raw.info['meas_date'] == dt.datetime(2019, 10, 27, 13, 53, 34, + 209000, + tzinfo=dt.timezone.utc) + + # Test channel naming + assert raw.info['ch_names'][:12] == ["S1_D1 760", "S1_D1 850", + "S2_D2 760", "S2_D2 850", + "S3_D3 760", "S3_D3 850", + "S4_D4 760", "S4_D4 850", + "S5_D5 760", "S5_D5 850", + "S6_D6 760", "S6_D6 850"] + + # Test info import + assert raw.info['subject_info'] == {'birthday': (2004, 10, 27), + 'first_name': 'NIRX', + 'last_name': 'Test', + 'sex': FIFF.FIFFV_SUBJ_SEX_UNKNOWN, + 'his_id': "NIRX_Test"} + + # Test trigger events + assert_array_equal(raw.annotations.description, ['1.0', '2.0', '2.0']) + + # Test location of detectors + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D1' + assert_allclose( + mni_locs[0], [0.0287, -0.1143, -0.0332], atol=allowed_dist_error) + + assert raw.info['ch_names'][15][3:5] == 'D8' + assert_allclose( + mni_locs[15], [-0.0693, -0.0480, 0.0657], atol=allowed_dist_error) + + # Test distance between optodes matches values from + allowed_distance_error = 0.0002 + distances = source_detector_distances(raw.info) + assert_allclose(distances[::2], [ + 0.0301, 0.0315, 0.0343, 0.0368, 0.0408, + 0.0399, 0.0393, 0.0367, 0.0336, 0.0447], atol=allowed_distance_error) + + +@requires_testing_data +@pytest.mark.parametrize('fname, boundary_decimal', ( + [fname_nirx_15_2_short, 1], + [fname_nirx_15_2, 0], + [fname_nirx_15_2, 0], + [nirsport2_2021_9, 0], +)) +def test_nirx_standard(fname, boundary_decimal): + """Test standard operations.""" + _test_raw_reader(read_raw_nirx, fname=fname, + boundary_decimal=boundary_decimal) # low fs diff --git a/python/libs/mne/io/open.py b/python/libs/mne/io/open.py new file mode 100644 index 0000000..11f00a2 --- /dev/null +++ b/python/libs/mne/io/open.py @@ -0,0 +1,324 @@ +# -*- coding: utf-8 -*- +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# +# License: BSD-3-Clause + +import os.path as op +from io import BytesIO, SEEK_SET +from gzip import GzipFile + +import numpy as np + +from .tag import read_tag_info, read_tag, Tag, _call_dict_names +from .tree import make_dir_tree, dir_tree_find +from .constants import FIFF +from ..utils import logger, verbose, _file_like, warn + + +class _NoCloseRead(object): + """Create a wrapper that will not close when used as a context manager.""" + + def __init__(self, fid): + self.fid = fid + + def __enter__(self): + return self.fid + + def __exit__(self, type_, value, traceback): + return + + def close(self): + return + + def seek(self, offset, whence=SEEK_SET): + return self.fid.seek(offset, whence) + + def read(self, size=-1): + return self.fid.read(size) + + +def _fiff_get_fid(fname): + """Open a FIF file with no additional parsing.""" + if _file_like(fname): + fid = _NoCloseRead(fname) + fid.seek(0) + else: + fname = str(fname) + if op.splitext(fname)[1].lower() == '.gz': + logger.debug('Using gzip') + fid = GzipFile(fname, "rb") # Open in binary mode + else: + logger.debug('Using normal I/O') + fid = open(fname, "rb") # Open in binary mode + return fid + + +def _get_next_fname(fid, fname, tree): + """Get the next filename in split files.""" + nodes_list = dir_tree_find(tree, FIFF.FIFFB_REF) + next_fname = None + for nodes in nodes_list: + next_fname = None + for ent in nodes['directory']: + if ent.kind == FIFF.FIFF_REF_ROLE: + tag = read_tag(fid, ent.pos) + role = int(tag.data) + if role != FIFF.FIFFV_ROLE_NEXT_FILE: + next_fname = None + break + if ent.kind == FIFF.FIFF_REF_FILE_NAME: + tag = read_tag(fid, ent.pos) + next_fname = op.join(op.dirname(fname), tag.data) + if ent.kind == FIFF.FIFF_REF_FILE_NUM: + # Some files don't have the name, just the number. So + # we construct the name from the current name. + if next_fname is not None: + continue + next_num = read_tag(fid, ent.pos).data + path, base = op.split(fname) + idx = base.find('.') + idx2 = base.rfind('-') + num_str = base[idx2 + 1:idx] + if not num_str.isdigit(): + idx2 = -1 + + if idx2 < 0 and next_num == 1: + # this is the first file, which may not be numbered + next_fname = op.join( + path, '%s-%d.%s' % (base[:idx], next_num, + base[idx + 1:])) + continue + + next_fname = op.join(path, '%s-%d.%s' + % (base[:idx2], next_num, base[idx + 1:])) + if next_fname is not None: + break + return next_fname + + +@verbose +def fiff_open(fname, preload=False, verbose=None): + """Open a FIF file. + + Parameters + ---------- + fname : str | fid + Name of the fif file, or an opened file (will seek back to 0). + preload : bool + If True, all data from the file is read into a memory buffer. This + requires more memory, but can be faster for I/O operations that require + frequent seeks. + %(verbose)s + + Returns + ------- + fid : file + The file descriptor of the open file. + tree : fif tree + The tree is a complex structure filled with dictionaries, + lists and tags. + directory : list + A list of tags. + """ + fid = _fiff_get_fid(fname) + try: + return _fiff_open(fname, fid, preload) + except Exception: + fid.close() + raise + + +def _fiff_open(fname, fid, preload): + # do preloading of entire file + if preload: + # note that StringIO objects instantiated this way are read-only, + # but that's okay here since we are using mode "rb" anyway + with fid as fid_old: + fid = BytesIO(fid_old.read()) + + tag = read_tag_info(fid) + + # Check that this looks like a fif file + if tag.kind != FIFF.FIFF_FILE_ID: + raise ValueError('file does not start with a file id tag') + + if tag.type != FIFF.FIFFT_ID_STRUCT: + raise ValueError('file does not start with a file id tag') + + if tag.size != 20: + raise ValueError('file does not start with a file id tag') + + tag = read_tag(fid) + + if tag.kind != FIFF.FIFF_DIR_POINTER: + raise ValueError('file does not have a directory pointer') + + # Read or create the directory tree + logger.debug(' Creating tag directory for %s...' % fname) + + dirpos = int(tag.data) + read_slow = True + if dirpos > 0: + dir_tag = read_tag(fid, dirpos) + if dir_tag is None: + warn(f'FIF tag directory missing at the end of the file, possibly ' + f'corrupted file: {fname}') + else: + directory = dir_tag.data + read_slow = False + if read_slow: + fid.seek(0, 0) + directory = list() + while tag.next >= 0: + pos = fid.tell() + tag = read_tag_info(fid) + if tag is None: + break # HACK : to fix file ending with empty tag... + else: + tag.pos = pos + directory.append(tag) + + tree, _ = make_dir_tree(fid, directory) + + logger.debug('[done]') + + # Back to the beginning + fid.seek(0) + + return fid, tree, directory + + +@verbose +def show_fiff(fname, indent=' ', read_limit=np.inf, max_str=30, + output=str, tag=None, verbose=None): + """Show FIFF information. + + This function is similar to mne_show_fiff. + + Parameters + ---------- + fname : str + Filename to evaluate. + indent : str + How to indent the lines. + read_limit : int + Max number of bytes of data to read from a tag. Can be np.inf + to always read all data (helps test read completion). + max_str : int + Max number of characters of string representation to print for + each tag's data. + output : type + Either str or list. str is a convenience output for printing. + tag : int | None + Provide information about this tag. If None (default), all information + is shown. + %(verbose)s + + Returns + ------- + contents : str + The contents of the file. + """ + if output not in [list, str]: + raise ValueError('output must be list or str') + if isinstance(tag, str): # command mne show_fiff passes string + tag = int(tag) + f, tree, directory = fiff_open(fname) + # This gets set to 0 (unknown) by fiff_open, but FIFFB_ROOT probably + # makes more sense for display + tree['block'] = FIFF.FIFFB_ROOT + with f as fid: + out = _show_tree(fid, tree, indent=indent, level=0, + read_limit=read_limit, max_str=max_str, tag_id=tag) + if output == str: + out = '\n'.join(out) + return out + + +def _find_type(value, fmts=['FIFF_'], exclude=['FIFF_UNIT']): + """Find matching values.""" + value = int(value) + vals = [k for k, v in FIFF.items() + if v == value and any(fmt in k for fmt in fmts) and + not any(exc in k for exc in exclude)] + if len(vals) == 0: + vals = ['???'] + return vals + + +def _show_tree(fid, tree, indent, level, read_limit, max_str, tag_id): + """Show FIFF tree.""" + from scipy import sparse + this_idt = indent * level + next_idt = indent * (level + 1) + # print block-level information + out = [this_idt + str(int(tree['block'])) + ' = ' + + '/'.join(_find_type(tree['block'], fmts=['FIFFB_']))] + tag_found = False + if tag_id is None or out[0].strip().startswith(str(tag_id)): + tag_found = True + + if tree['directory'] is not None: + kinds = [ent.kind for ent in tree['directory']] + [-1] + types = [ent.type for ent in tree['directory']] + sizes = [ent.size for ent in tree['directory']] + poss = [ent.pos for ent in tree['directory']] + counter = 0 + good = True + for k, kn, size, pos, type_ in zip(kinds[:-1], kinds[1:], sizes, poss, + types): + if not tag_found and k != tag_id: + continue + tag = Tag(k, size, 0, pos) + if read_limit is None or size <= read_limit: + try: + tag = read_tag(fid, pos) + except Exception: + good = False + + if kn == k: + # don't print if the next item is the same type (count 'em) + counter += 1 + else: + # find the tag type + this_type = _find_type(k, fmts=['FIFF_']) + # prepend a count if necessary + prepend = 'x' + str(counter + 1) + ': ' if counter > 0 else '' + postpend = '' + # print tag data nicely + if tag.data is not None: + postpend = ' = ' + str(tag.data)[:max_str] + if isinstance(tag.data, np.ndarray): + if tag.data.size > 1: + postpend += ' ... array size=' + str(tag.data.size) + elif isinstance(tag.data, dict): + postpend += ' ... dict len=' + str(len(tag.data)) + elif isinstance(tag.data, str): + postpend += ' ... str len=' + str(len(tag.data)) + elif isinstance(tag.data, (list, tuple)): + postpend += ' ... list len=' + str(len(tag.data)) + elif sparse.issparse(tag.data): + postpend += (' ... sparse (%s) shape=%s' + % (tag.data.getformat(), tag.data.shape)) + else: + postpend += ' ... type=' + str(type(tag.data)) + postpend = '>' * 20 + 'BAD' if not good else postpend + type_ = _call_dict_names.get(type_, '?%s?' % (type_,)) + out += [next_idt + prepend + str(k) + ' = ' + + '/'.join(this_type) + + ' (' + str(size) + 'b %s)' % type_ + + postpend] + out[-1] = out[-1].replace('\n', u'¶') + counter = 0 + good = True + if tag_id in kinds: + tag_found = True + if not tag_found: + out = [''] + level = -1 # removes extra indent + # deal with children + for branch in tree['children']: + out += _show_tree(fid, branch, indent, level + 1, read_limit, max_str, + tag_id) + return out diff --git a/python/libs/mne/io/persyst/__init__.py b/python/libs/mne/io/persyst/__init__.py new file mode 100644 index 0000000..cef562f --- /dev/null +++ b/python/libs/mne/io/persyst/__init__.py @@ -0,0 +1,7 @@ +"""Persyst module for conversion to FIF.""" + +# Author: Adam Li +# +# License: BSD-3-Clause + +from .persyst import read_raw_persyst diff --git a/python/libs/mne/io/persyst/persyst.py b/python/libs/mne/io/persyst/persyst.py new file mode 100644 index 0000000..440e4bb --- /dev/null +++ b/python/libs/mne/io/persyst/persyst.py @@ -0,0 +1,463 @@ +# Authors: Adam Li +# +# License: BSD-3-Clause +import os +import os.path as op +from collections import OrderedDict +from datetime import datetime, timezone + +import numpy as np + +from ..base import BaseRaw +from ..constants import FIFF +from ..meas_info import create_info +from ..utils import _mult_cal_one +from ...annotations import Annotations +from ...utils import logger, verbose, fill_doc, warn, _check_fname + + +@fill_doc +def read_raw_persyst(fname, preload=False, verbose=None): + """Reader for a Persyst (.lay/.dat) recording. + + Parameters + ---------- + fname : str + Path to the Persyst header (.lay) file. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawPersyst + A Raw object containing Persyst data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + It is assumed that the ``.lay`` and ``.dat`` file + are in the same directory. To get the correct file path to the + ``.dat`` file, ``read_raw_persyst`` will get the corresponding dat + filename from the lay file, and look for that file inside the same + directory as the lay file. + """ + return RawPersyst(fname, preload, verbose) + + +@fill_doc +class RawPersyst(BaseRaw): + """Raw object from a Persyst file. + + Parameters + ---------- + fname : str + Path to the Persyst header (.lay) file. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + fname = _check_fname(fname, 'read', True, 'fname') + logger.info('Loading %s' % fname) + + # make sure filename is the Lay file + if not fname.endswith('.lay'): + fname = fname + '.lay' + # get the current directory and Lay filename + curr_path, lay_fname = op.dirname(fname), op.basename(fname) + if not op.exists(fname): + raise FileNotFoundError(f'The path you specified, ' + f'"{lay_fname}",does not exist.') + + # sections and subsections currently unused + keys, data, sections = _read_lay_contents(fname) + + # these are the section headers in the Persyst file layout + # Note: We do not make use of "SampleTimes" yet + fileinfo_dict = OrderedDict() + channelmap_dict = OrderedDict() + patient_dict = OrderedDict() + comments_dict = OrderedDict() + + # keep track of total number of comments + num_comments = 0 + + # loop through each line in the lay file + for key, val, section in zip(keys, data, sections): + if key == '': + continue + + # Make sure key are lowercase for everything, but electrodes. + # We also do not want to lower-case comments because those + # are free-form text where casing may matter. + if key is not None and section not in ['channelmap', + 'comments']: + key = key.lower() + + # FileInfo + if section == 'fileinfo': + # extract the .dat file name + if key == 'file': + dat_fname = op.basename(val) + dat_fpath = op.join(curr_path, op.basename(dat_fname)) + + # determine if .dat file exists where it should + error_msg = f'The data path you specified ' \ + f'does not exist for the lay path, ' \ + f'{lay_fname}. Make sure the dat file ' \ + f'is in the same directory as the lay ' \ + f'file, and the specified dat filename ' \ + f'matches.' + if not op.exists(dat_fpath): + raise FileNotFoundError(error_msg) + fileinfo_dict[key] = val + # ChannelMap + elif section == 'channelmap': + # channel map has = for = + channelmap_dict[key] = val + # Patient (All optional) + elif section == 'patient': + patient_dict[key] = val + # Comments (turned into mne.Annotations) + elif section == 'comments': + comments_dict[key] = comments_dict.get(key, list()) + [val] + num_comments += 1 + + # get numerical metadata + # datatype is either 7 for 32 bit, or 0 for 16 bit + datatype = fileinfo_dict.get('datatype') + cal = float(fileinfo_dict.get('calibration')) + n_chs = int(fileinfo_dict.get('waveformcount')) + + # Store subject information from lay file in mne format + # Note: Persyst also records "Physician", "Technician", + # "Medications", "History", and "Comments1" and "Comments2" + # and this information is currently discarded + subject_info = _get_subjectinfo(patient_dict) + + # set measurement date + testdate = patient_dict.get('testdate') + if testdate is not None: + # TODO: Persyst may change its internal date schemas + # without notice + # These are the 3 "so far" possible datatime storage + # formats in Persyst .lay + if '/' in testdate: + testdate = datetime.strptime(testdate, '%m/%d/%Y') + elif '-' in testdate: + testdate = datetime.strptime(testdate, '%d-%m-%Y') + elif '.' in testdate: + testdate = datetime.strptime(testdate, '%Y.%m.%d') + + if not isinstance(testdate, datetime): + warn('Cannot read in the measurement date due ' + 'to incompatible format. Please set manually ' + 'for %s ' % lay_fname) + meas_date = None + else: + testtime = datetime.strptime(patient_dict.get('testtime'), + '%H:%M:%S') + meas_date = datetime( + year=testdate.year, month=testdate.month, + day=testdate.day, hour=testtime.hour, + minute=testtime.minute, second=testtime.second, + tzinfo=timezone.utc) + + # Create mne structure + ch_names = list(channelmap_dict.keys()) + if n_chs != len(ch_names): + raise RuntimeError('Channels in lay file do not ' + 'match the number of channels ' + 'in the .dat file.') # noqa + # get rid of the "-Ref" in channel names + ch_names = [ch.upper().split('-REF')[0] for ch in ch_names] + + # get the sampling rate and default channel types to EEG + sfreq = fileinfo_dict.get('samplingrate') + ch_types = 'eeg' + info = create_info(ch_names, sfreq, ch_types=ch_types) + info.update(subject_info=subject_info) + with info._unlock(): + for idx in range(n_chs): + # calibration brings to uV then 1e-6 brings to V + info['chs'][idx]['cal'] = cal * 1.0e-6 + info['meas_date'] = meas_date + + # determine number of samples in file + # Note: We do not use the lay file to do this + # because clips in time may be generated by Persyst that + # DO NOT modify the "SampleTimes" section + with open(dat_fpath, 'rb') as f: + # determine the precision + if int(datatype) == 7: + # 32 bit + dtype = np.dtype('i4') + elif int(datatype) == 0: + # 16 bit + dtype = np.dtype('i2') + else: + raise RuntimeError(f'Unknown format: {datatype}') + + # allow offset to occur + f.seek(0, os.SEEK_END) + n_samples = f.tell() + n_samples = n_samples // (dtype.itemsize * n_chs) + + logger.debug(f'Loaded {n_samples} samples ' + f'for {n_chs} channels.') + + raw_extras = { + 'dtype': dtype, + 'n_chs': n_chs, + 'n_samples': n_samples + } + # create Raw object + super(RawPersyst, self).__init__( + info, preload, filenames=[dat_fpath], + last_samps=[n_samples - 1], + raw_extras=[raw_extras], verbose=verbose) + + # set annotations based on the comments read in + onset = np.zeros(num_comments, float) + duration = np.zeros(num_comments, float) + description = [''] * num_comments + + # loop through comments dictionary, which may contain + # multiple events for the same "text" annotation + t_idx = 0 + for _description, event_tuples in comments_dict.items(): + for (_onset, _duration) in event_tuples: + # extract the onset, duration, description to + # create an Annotations object + onset[t_idx] = _onset + duration[t_idx] = _duration + description[t_idx] = _description + t_idx += 1 + annot = Annotations(onset, duration, description) + self.set_annotations(annot) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file. + + The Persyst software records raw data in either 16 or 32 bit + binary files. In addition, it stores the calibration to convert + data to uV in the lay file. + """ + dtype = self._raw_extras[fi]['dtype'] + n_chs = self._raw_extras[fi]['n_chs'] + dat_fname = self._filenames[fi] + + # compute samples count based on start and stop + time_length_samps = stop - start + + # read data from .dat file into array of correct size, then calibrate + # records = recnum rows x inf columns + count = time_length_samps * n_chs + + # seek the dat file + with open(dat_fname, 'rb') as dat_file_ID: + # allow offset to occur + dat_file_ID.seek(n_chs * dtype.itemsize * start, 1) + + # read in the actual record starting at possibly offset + record = np.fromfile(dat_file_ID, dtype=dtype, + count=count) + + # chs * rows + # cast as float32; more than enough precision + record = np.reshape(record, (n_chs, -1), 'F').astype(np.float32) + + # calibrate to convert to V and handle mult + _mult_cal_one(data, record, idx, cals, mult) + + +def _get_subjectinfo(patient_dict): + # attempt to parse out the birthdate, but if it doesn't + # meet spec, then it will set to None + birthdate = patient_dict.get('birthdate') + if '/' in birthdate: + try: + birthdate = datetime.strptime(birthdate, '%m/%d/%y') + except ValueError: + birthdate = None + print('Unable to process birthdate of %s ' % birthdate) + elif '-' in birthdate: + try: + birthdate = datetime.strptime(birthdate, '%d-%m-%y') + except ValueError: + birthdate = None + print('Unable to process birthdate of %s ' % birthdate) + + subject_info = { + 'first_name': patient_dict.get('first'), + 'middle_name': patient_dict.get('middle'), + 'last_name': patient_dict.get('last'), + 'sex': patient_dict.get('sex'), + 'hand': patient_dict.get('hand'), + 'his_id': patient_dict.get('id'), + 'birthday': birthdate, + } + + # Recode sex values + sex_dict = dict( + m=FIFF.FIFFV_SUBJ_SEX_MALE, + male=FIFF.FIFFV_SUBJ_SEX_MALE, + f=FIFF.FIFFV_SUBJ_SEX_FEMALE, + female=FIFF.FIFFV_SUBJ_SEX_FEMALE, + ) + subject_info['sex'] = sex_dict.get(subject_info['sex'], + FIFF.FIFFV_SUBJ_SEX_UNKNOWN) + + # Recode hand values + hand_dict = dict( + r=FIFF.FIFFV_SUBJ_HAND_RIGHT, + right=FIFF.FIFFV_SUBJ_HAND_RIGHT, + l=FIFF.FIFFV_SUBJ_HAND_LEFT, + left=FIFF.FIFFV_SUBJ_HAND_LEFT, + a=FIFF.FIFFV_SUBJ_HAND_AMBI, + ambidextrous=FIFF.FIFFV_SUBJ_HAND_AMBI, + ambi=FIFF.FIFFV_SUBJ_HAND_AMBI, + ) + # no handedness is set when unknown + try: + subject_info['hand'] = hand_dict[subject_info['hand']] + except KeyError: + subject_info.pop('hand') + + return subject_info + + +def _read_lay_contents(fname): + """Lay file are laid out like a INI file.""" + # keep track of sections, keys and data + sections = [] + keys, data = [], [] + + # initialize all section to empty str + section = '' + with open(fname, 'r') as fin: + for line in fin: + # break a line into a status, key and value + status, key, val = _process_lay_line(line, section) + + # handle keys and values if they are + # Section, Subsections, or Line items + if status == 1: # Section was found + section = val.lower() + continue + + # keep track of all sections, subsections, + # keys and the data of the file + sections.append(section) + data.append(val) + keys.append(key) + + return keys, data, sections + + +def _process_lay_line(line, section): + """Process a line read from the Lay (INI) file. + + Each line in the .lay file will be processed + into a structured ``status``, ``key`` and ``value``. + + Parameters + ---------- + line : str + The actual line in the Lay file. + section : str + The section in the Lay file. + + Returns + ------- + status : int + Returns the following integers based on status. + -1 => unknown string found + 0 => empty line found + 1 => section found + 2 => key-value pair found + key : str + The string before the ``'='`` character. If section is "Comments", + then returns the text comment description. + value : str + The string from the line after the ``'='`` character. If section is + "Comments", then returns the onset and duration as a tuple. + + Notes + ----- + The lay file comprises of multiple "sections" that are documented with + bracket ``[]`` characters. For example, ``[FileInfo]`` and the lines + afterward indicate metadata about the data file itself. Within + each section, there are multiple lines in the format of + ``=``. + + For ``FileInfo``, ``Patient`` and ``ChannelMap`` + each line will be denoted with a ``key`` and a ``value`` that + can be represented as a dictionary. The keys describe what sort + of data that line holds, while the values contain the corresponding + value. In some cases, the ``value``. + + For ``SampleTimes``, the ``key`` and ``value`` pair indicate the + start and end time in seconds of the original data file. + + For ``Comments`` section, this denotes an area where users through + Persyst actually annotate data in time. These are instead + represented as 5 data points that are ``,`` delimited. These + data points are ordered as: + + 1. time (in seconds) of the annotation + 2. duration (in seconds) of the annotation + 3. state (unused) + 4. variable type (unused) + 5. free-form text describing the annotation + """ + key = '' # default; only return value possibly not set + line = line.strip() # remove leading and trailing spaces + end_idx = len(line) - 1 # get the last index of the line + + # empty sequence evaluates to false + if not line: + status = 0 + key = '' + value = '' + return status, key, value + # section found + elif (line[0] == '[') and (line[end_idx] == ']') \ + and (end_idx + 1 >= 3): + status = 1 + value = line[1:end_idx].lower() + # key found + else: + # handle Comments section differently from all other sections + # TODO: utilize state and var_type in code. + # Currently not used + if section == 'comments': + # Persyst Comments output 5 variables "," separated + time_sec, duration, state, var_type, text = line.split(',', 4) + status = 2 + key = text + value = (time_sec, duration) + # all other sections + else: + if '=' not in line: + raise RuntimeError('The line %s does not conform ' + 'to the standards. Please check the ' + '.lay file.' % line) # noqa + pos = line.index('=') + status = 2 + + # the line now is composed of a + # = + key = line[0:pos] + key.strip() + value = line[pos + 1:end_idx + 1] + value.strip() + return status, key, value diff --git a/python/libs/mne/io/persyst/tests/__init__.py b/python/libs/mne/io/persyst/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/persyst/tests/test_persyst.py b/python/libs/mne/io/persyst/tests/test_persyst.py new file mode 100644 index 0000000..5fa2187 --- /dev/null +++ b/python/libs/mne/io/persyst/tests/test_persyst.py @@ -0,0 +1,263 @@ +# -*- coding: utf-8 -*- +# Authors: Adam Li +# +# License: BSD-3-Clause + +import os +import os.path as op +import shutil + +import pytest +from numpy.testing import assert_array_equal +import numpy as np + +from mne.datasets.testing import data_path, requires_testing_data +from mne.io import read_raw_persyst +from mne.io.tests.test_raw import _test_raw_reader + +testing_path = data_path(download=False) +fname_lay = op.join( + testing_path, 'Persyst', + 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay') +fname_dat = op.join( + testing_path, 'Persyst', + 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.dat') + + +@requires_testing_data +def test_persyst_lay_load(): + """Test reading Persyst files using path to header file.""" + raw = read_raw_persyst(fname_lay, preload=False) + + # Test data import + assert raw.info['sfreq'] == 200 + assert raw.preload is False + + # load raw data + raw.load_data() + assert raw._data.shape == (83, 847) + assert raw.preload is True + + # defaults channels to EEG + raw = raw.pick_types(eeg=True) + assert len(raw.ch_names) == 83 + + # no "-Ref" in channel names + assert all(['-ref' not in ch.lower() + for ch in raw.ch_names]) + + # test with preload True + raw = read_raw_persyst(fname_lay, preload=True) + + +@requires_testing_data +def test_persyst_raw(): + """Test reading Persyst files using path to header file.""" + raw = read_raw_persyst(fname_lay, preload=False) + + # defaults channels to EEG + raw = raw.pick_types(eeg=True) + + # get data + data, times = raw.get_data(start=200, return_times=True) + assert data.shape == (83, 647) + + # seconds should match up to what is in the file + assert times.min() == 1.0 + assert times.max() == 4.23 + + # get data + data = raw.get_data(start=200, stop=400) + assert data.shape == (83, 200) + + # data should have been set correctly + assert not data.min() == 0 and not data.max() == 0 + + first_ch_data = raw.get_data(picks=[0], start=200, stop=400) + assert_array_equal(first_ch_data.squeeze(), data[0, :]) + + +@requires_testing_data +def test_persyst_dates(tmp_path): + """Test different Persyst date formats for meas date.""" + # now test what if you change contents of the lay file + out_dir = str(tmp_path) + new_fname_lay = op.join(out_dir, op.basename(fname_lay)) + new_fname_dat = op.join(out_dir, op.basename(fname_dat)) + shutil.copy(fname_dat, new_fname_dat) + + # reformat the lay file to have testdate with + # "/" character + with open(fname_lay, "r") as fin: + with open(new_fname_lay, 'w') as fout: + # for each line in the input file + for idx, line in enumerate(fin): + if line.startswith('TestDate'): + line = 'TestDate=01/23/2000\n' + fout.write(line) + # file should update correctly with datetime + raw = read_raw_persyst(new_fname_lay) + assert raw.info['meas_date'].month == 1 + assert raw.info['meas_date'].day == 23 + assert raw.info['meas_date'].year == 2000 + + # reformat the lay file to have testdate with + # "-" character + os.remove(new_fname_lay) + with open(fname_lay, "r") as fin: + with open(new_fname_lay, 'w') as fout: + # for each line in the input file + for idx, line in enumerate(fin): + if line.startswith('TestDate'): + line = 'TestDate=24-01-2000\n' + fout.write(line) + # file should update correctly with datetime + raw = read_raw_persyst(new_fname_lay) + assert raw.info['meas_date'].month == 1 + assert raw.info['meas_date'].day == 24 + assert raw.info['meas_date'].year == 2000 + + +@requires_testing_data +def test_persyst_wrong_file(tmp_path): + """Test reading Persyst files when passed in wrong file path.""" + with pytest.raises(FileNotFoundError, match='The path you'): + read_raw_persyst(fname_dat, preload=True) + + out_dir = str(tmp_path) + new_fname_lay = op.join(out_dir, op.basename(fname_lay)) + new_fname_dat = op.join(out_dir, op.basename(fname_dat)) + shutil.copy(fname_lay, new_fname_lay) + + # without a .dat file, reader should break + desired_err_msg = \ + 'The data path you specified does ' \ + 'not exist for the lay path, ' \ + 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay' + with pytest.raises(FileNotFoundError, match=desired_err_msg): + read_raw_persyst(new_fname_lay, preload=True) + + # once you copy over the .dat file things should work + shutil.copy(fname_dat, new_fname_dat) + read_raw_persyst(new_fname_lay, preload=True) + + +@requires_testing_data +def test_persyst_moved_file(tmp_path): + """Test reader - Persyst files need to be in same directory.""" + out_dir = str(tmp_path) + new_fname_lay = op.join(out_dir, op.basename(fname_lay)) + new_fname_dat = op.join(out_dir, op.basename(fname_dat)) + shutil.copy(fname_lay, new_fname_lay) + + # original file read should work + read_raw_persyst(fname_lay) + + # without a .dat file, reader should break + # when the lay file was moved + desired_err_msg = \ + 'The data path you specified does ' \ + 'not exist for the lay path, ' \ + 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay' + with pytest.raises(FileNotFoundError, match=desired_err_msg): + read_raw_persyst(new_fname_lay, preload=True) + + # now change the file contents to point + # to the full path, but it should still not work + # as reader requires lay and dat file to be in + # same directory + with open(fname_lay, "r") as fin: + with open(new_fname_lay, 'w') as fout: + # for each line in the input file + for idx, line in enumerate(fin): + if line.startswith('File='): + # give it the full path to the old data + test_fpath = op.join(op.dirname(fname_dat), + line.split('=')[1]) + line = f'File={test_fpath}\n' + fout.write(line) + with pytest.raises(FileNotFoundError, match=desired_err_msg): + read_raw_persyst(new_fname_lay, preload=True) + + # once we copy the dat file to the same directory, reader + # should work + shutil.copy(fname_dat, new_fname_dat) + read_raw_persyst(new_fname_lay, preload=True) + + +@requires_testing_data +def test_persyst_standard(): + """Test standard operations.""" + _test_raw_reader(read_raw_persyst, fname=fname_lay) + + +@requires_testing_data +def test_persyst_annotations(tmp_path): + """Test annotations reading in Persyst.""" + new_fname_lay = tmp_path / op.basename(fname_lay) + new_fname_dat = tmp_path / op.basename(fname_dat) + shutil.copy(fname_dat, new_fname_dat) + shutil.copy(fname_lay, new_fname_lay) + + raw = read_raw_persyst(new_fname_lay) + raw.crop(tmin=0, tmax=4) + + # get the annotations and make sure that repeated annotations + # are in the dataset + annotations = raw.annotations + assert np.count_nonzero(annotations.description == 'seizure') == 2 + + # make sure annotation with a "," character is in there + assert 'seizure1,2' in annotations.description + assert 'CLip2' in annotations.description + + +@requires_testing_data +def test_persyst_errors(tmp_path): + """Test reading Persyst files when passed in wrong file path.""" + out_dir = str(tmp_path) + new_fname_lay = op.join(out_dir, op.basename(fname_lay)) + new_fname_dat = op.join(out_dir, op.basename(fname_dat)) + shutil.copy(fname_dat, new_fname_dat) + + # reformat the lay file + with open(fname_lay, "r") as fin: + with open(new_fname_lay, 'w') as fout: + # for each line in the input file + for idx, line in enumerate(fin): + if idx == 1: + line = line.replace('=', ',') + fout.write(line) + # file should break + with pytest.raises(RuntimeError, match='The line'): + read_raw_persyst(new_fname_lay) + + # reformat the lay file + os.remove(new_fname_lay) + with open(fname_lay, "r") as fin: + with open(new_fname_lay, 'w') as fout: + # for each line in the input file + for idx, line in enumerate(fin): + if line.startswith('WaveformCount'): + line = 'WaveformCount=1\n' + fout.write(line) + # file should break + with pytest.raises(RuntimeError, match='Channels in lay ' + 'file do not'): + read_raw_persyst(new_fname_lay) + + # reformat the lay file to have testdate + # improperly specified + os.remove(new_fname_lay) + with open(fname_lay, "r") as fin: + with open(new_fname_lay, 'w') as fout: + # for each line in the input file + for idx, line in enumerate(fin): + if line.startswith('TestDate'): + line = 'TestDate=Jan 23rd 2000\n' + fout.write(line) + # file should not read in meas date + with pytest.warns(RuntimeWarning, + match='Cannot read in the measurement date'): + raw = read_raw_persyst(new_fname_lay) + assert raw.info['meas_date'] is None diff --git a/python/libs/mne/io/pick.py b/python/libs/mne/io/pick.py new file mode 100644 index 0000000..845b092 --- /dev/null +++ b/python/libs/mne/io/pick.py @@ -0,0 +1,1207 @@ +# -*- coding: utf-8 -*- +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Martin Luessi +# +# License: BSD-3-Clause + +from copy import deepcopy +import re + +import numpy as np + +from .constants import FIFF +from ..utils import (logger, verbose, _validate_type, fill_doc, _ensure_int, + _check_option, warn) + + +def get_channel_type_constants(include_defaults=False): + """Return all known channel types, and associated FIFF constants. + + Parameters + ---------- + include_defaults : bool + Whether to include default values for "unit" and "coil_type" for all + entries (see Notes). Defaults are generally based on values normally + present for a VectorView MEG system. Defaults to ``False``. + + Returns + ------- + channel_types : dict + The keys are channel type strings, and the values are dictionaries of + FIFF constants for "kind", and possibly "unit" and "coil_type". + + Notes + ----- + Values which might vary within a channel type across real data + recordings are excluded unless ``include_defaults=True``. For example, + "ref_meg" channels may have coil type + ``FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD``, ``FIFFV_COIL_VV_MAG_T3``, etc + (depending on the recording system), so no "coil_type" entry is given + for "ref_meg" unless ``include_defaults`` is requested. + """ + base = dict(grad=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T_M), + mag=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T), + ref_meg=dict(kind=FIFF.FIFFV_REF_MEG_CH), + eeg=dict(kind=FIFF.FIFFV_EEG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), + seeg=dict(kind=FIFF.FIFFV_SEEG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), + dbs=dict(kind=FIFF.FIFFV_DBS_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), + ecog=dict(kind=FIFF.FIFFV_ECOG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), + eog=dict(kind=FIFF.FIFFV_EOG_CH, unit=FIFF.FIFF_UNIT_V), + emg=dict(kind=FIFF.FIFFV_EMG_CH, unit=FIFF.FIFF_UNIT_V), + ecg=dict(kind=FIFF.FIFFV_ECG_CH, unit=FIFF.FIFF_UNIT_V), + resp=dict(kind=FIFF.FIFFV_RESP_CH, unit=FIFF.FIFF_UNIT_V), + bio=dict(kind=FIFF.FIFFV_BIO_CH, unit=FIFF.FIFF_UNIT_V), + misc=dict(kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V), + stim=dict(kind=FIFF.FIFFV_STIM_CH), + exci=dict(kind=FIFF.FIFFV_EXCI_CH), + syst=dict(kind=FIFF.FIFFV_SYST_CH), + ias=dict(kind=FIFF.FIFFV_IAS_CH), + gof=dict(kind=FIFF.FIFFV_GOODNESS_FIT), + dipole=dict(kind=FIFF.FIFFV_DIPOLE_WAVE), + chpi=dict(kind=[FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, + FIFF.FIFFV_QUAT_2, FIFF.FIFFV_QUAT_3, + FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5, + FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, + FIFF.FIFFV_HPI_ERR, FIFF.FIFFV_HPI_MOV]), + fnirs_cw_amplitude=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE), + fnirs_fd_ac_amplitude=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE), + fnirs_fd_phase=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_RAD, + coil_type=FIFF.FIFFV_COIL_FNIRS_FD_PHASE), + fnirs_od=dict(kind=FIFF.FIFFV_FNIRS_CH, + coil_type=FIFF.FIFFV_COIL_FNIRS_OD), + hbo=dict(kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_MOL, + coil_type=FIFF.FIFFV_COIL_FNIRS_HBO), + hbr=dict(kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_MOL, + coil_type=FIFF.FIFFV_COIL_FNIRS_HBR), + csd=dict(kind=FIFF.FIFFV_EEG_CH, + unit=FIFF.FIFF_UNIT_V_M2, + coil_type=FIFF.FIFFV_COIL_EEG_CSD)) + if include_defaults: + coil_none = dict(coil_type=FIFF.FIFFV_COIL_NONE) + unit_none = dict(unit=FIFF.FIFF_UNIT_NONE) + defaults = dict( + grad=dict(coil_type=FIFF.FIFFV_COIL_VV_PLANAR_T1), + mag=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3), + ref_meg=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3, + unit=FIFF.FIFF_UNIT_T), + misc=dict(**coil_none, **unit_none), # NB: overwrites UNIT_V + stim=dict(unit=FIFF.FIFF_UNIT_V, **coil_none), + eog=coil_none, + ecg=coil_none, + emg=coil_none, + bio=coil_none, + fnirs_od=unit_none, + ) + for key, value in defaults.items(): + base[key].update(value) + return base + + +_first_rule = { + FIFF.FIFFV_MEG_CH: 'meg', + FIFF.FIFFV_REF_MEG_CH: 'ref_meg', + FIFF.FIFFV_EEG_CH: 'eeg', + FIFF.FIFFV_STIM_CH: 'stim', + FIFF.FIFFV_EOG_CH: 'eog', + FIFF.FIFFV_EMG_CH: 'emg', + FIFF.FIFFV_ECG_CH: 'ecg', + FIFF.FIFFV_RESP_CH: 'resp', + FIFF.FIFFV_MISC_CH: 'misc', + FIFF.FIFFV_EXCI_CH: 'exci', + FIFF.FIFFV_IAS_CH: 'ias', + FIFF.FIFFV_SYST_CH: 'syst', + FIFF.FIFFV_SEEG_CH: 'seeg', + FIFF.FIFFV_DBS_CH: 'dbs', + FIFF.FIFFV_BIO_CH: 'bio', + FIFF.FIFFV_QUAT_0: 'chpi', + FIFF.FIFFV_QUAT_1: 'chpi', + FIFF.FIFFV_QUAT_2: 'chpi', + FIFF.FIFFV_QUAT_3: 'chpi', + FIFF.FIFFV_QUAT_4: 'chpi', + FIFF.FIFFV_QUAT_5: 'chpi', + FIFF.FIFFV_QUAT_6: 'chpi', + FIFF.FIFFV_HPI_G: 'chpi', + FIFF.FIFFV_HPI_ERR: 'chpi', + FIFF.FIFFV_HPI_MOV: 'chpi', + FIFF.FIFFV_DIPOLE_WAVE: 'dipole', + FIFF.FIFFV_GOODNESS_FIT: 'gof', + FIFF.FIFFV_ECOG_CH: 'ecog', + FIFF.FIFFV_FNIRS_CH: 'fnirs', +} +# How to reduce our categories in channel_type (originally) +_second_rules = { + 'meg': ('unit', {FIFF.FIFF_UNIT_T_M: 'grad', + FIFF.FIFF_UNIT_T: 'mag'}), + 'fnirs': ('coil_type', {FIFF.FIFFV_COIL_FNIRS_HBO: 'hbo', + FIFF.FIFFV_COIL_FNIRS_HBR: 'hbr', + FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE: + 'fnirs_cw_amplitude', + FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE: + 'fnirs_fd_ac_amplitude', + FIFF.FIFFV_COIL_FNIRS_FD_PHASE: + 'fnirs_fd_phase', + FIFF.FIFFV_COIL_FNIRS_OD: 'fnirs_od', + }), + 'eeg': ('coil_type', {FIFF.FIFFV_COIL_EEG: 'eeg', + FIFF.FIFFV_COIL_EEG_BIPOLAR: 'eeg', + FIFF.FIFFV_COIL_NONE: 'eeg', # MNE-C backward compat + FIFF.FIFFV_COIL_EEG_CSD: 'csd', + }) +} + + +@fill_doc +def channel_type(info, idx): + """Get channel type. + + Parameters + ---------- + %(info_not_none)s + idx : int + Index of channel. + + Returns + ------- + type : str + Type of channel. Will be one of:: + + {'grad', 'mag', 'eeg', 'csd', 'stim', 'eog', 'emg', 'ecg', + 'ref_meg', 'resp', 'exci', 'ias', 'syst', 'misc', 'seeg', 'dbs', + 'bio', 'chpi', 'dipole', 'gof', 'ecog', 'hbo', 'hbr'} + """ + # This is faster than the original _channel_type_old now in test_pick.py + # because it uses (at most!) two dict lookups plus one conditional + # to get the channel type string. + ch = info['chs'][idx] + try: + first_kind = _first_rule[ch['kind']] + except KeyError: + raise ValueError('Unknown channel type (%s) for channel "%s"' + % (ch['kind'], ch["ch_name"])) + if first_kind in _second_rules: + key, second_rule = _second_rules[first_kind] + first_kind = second_rule[ch[key]] + return first_kind + + +def pick_channels(ch_names, include, exclude=[], ordered=False): + """Pick channels by names. + + Returns the indices of ``ch_names`` in ``include`` but not in ``exclude``. + + Parameters + ---------- + ch_names : list of str + List of channels. + include : list of str + List of channels to include (if empty include all available). + + .. note:: This is to be treated as a set. The order of this list + is not used or maintained in ``sel``. + + exclude : list of str + List of channels to exclude (if empty do not exclude any channel). + Defaults to []. + ordered : bool + If true (default False), treat ``include`` as an ordered list + rather than a set, and any channels from ``include`` are missing + in ``ch_names`` an error will be raised. + + .. versionadded:: 0.18 + + Returns + ------- + sel : array of int + Indices of good channels. + + See Also + -------- + pick_channels_regexp, pick_types + """ + if len(np.unique(ch_names)) != len(ch_names): + raise RuntimeError('ch_names is not a unique list, picking is unsafe') + _check_excludes_includes(include) + _check_excludes_includes(exclude) + if not ordered: + if not isinstance(include, set): + include = set(include) + if not isinstance(exclude, set): + exclude = set(exclude) + sel = [] + for k, name in enumerate(ch_names): + if (len(include) == 0 or name in include) and name not in exclude: + sel.append(k) + else: + if not isinstance(include, list): + include = list(include) + if len(include) == 0: + include = list(ch_names) + if not isinstance(exclude, list): + exclude = list(exclude) + sel, missing = list(), list() + for name in include: + if name in ch_names: + if name not in exclude: + sel.append(ch_names.index(name)) + else: + missing.append(name) + if len(missing): + raise ValueError('Missing channels from ch_names required by ' + 'include:\n%s' % (missing,)) + return np.array(sel, int) + + +def pick_channels_regexp(ch_names, regexp): + """Pick channels using regular expression. + + Returns the indices of the good channels in ch_names. + + Parameters + ---------- + ch_names : list of str + List of channels. + + regexp : str + The regular expression. See python standard module for regular + expressions. + + Returns + ------- + sel : array of int + Indices of good channels. + + See Also + -------- + pick_channels + + Examples + -------- + >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG ...1') + [0] + >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG *') + [0, 1, 2] + """ + r = re.compile(regexp) + return [k for k, name in enumerate(ch_names) if r.match(name)] + + +def _triage_meg_pick(ch, meg): + """Triage an MEG pick type.""" + if meg is True: + return True + elif ch['unit'] == FIFF.FIFF_UNIT_T_M: + if meg == 'grad': + return True + elif meg == 'planar1' and ch['ch_name'].endswith('2'): + return True + elif meg == 'planar2' and ch['ch_name'].endswith('3'): + return True + elif (meg == 'mag' and ch['unit'] == FIFF.FIFF_UNIT_T): + return True + return False + + +def _triage_fnirs_pick(ch, fnirs, warned): + """Triage an fNIRS pick type.""" + if fnirs is True: + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO and 'hbo' in fnirs: + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR and 'hbr' in fnirs: + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE and \ + 'fnirs_cw_amplitude' in fnirs: + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE and \ + 'fnirs_fd_ac_amplitude' in fnirs: + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_PHASE and \ + 'fnirs_fd_phase' in fnirs: + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and 'fnirs_od' in fnirs: + return True + return False + + +def _check_meg_type(meg, allow_auto=False): + """Ensure a valid meg type.""" + if isinstance(meg, str): + allowed_types = ['grad', 'mag', 'planar1', 'planar2'] + allowed_types += ['auto'] if allow_auto else [] + if meg not in allowed_types: + raise ValueError('meg value must be one of %s or bool, not %s' + % (allowed_types, meg)) + + +def _check_info_exclude(info, exclude): + _validate_type(info, "info") + info._check_consistency() + if exclude is None: + raise ValueError('exclude must be a list of strings or "bads"') + elif exclude == 'bads': + exclude = info.get('bads', []) + elif not isinstance(exclude, (list, tuple)): + raise ValueError('exclude must either be "bads" or a list of strings.' + ' If only one channel is to be excluded, use ' + '[ch_name] instead of passing ch_name.') + return exclude + + +@fill_doc +def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, + emg=False, ref_meg='auto', misc=False, resp=False, chpi=False, + exci=False, ias=False, syst=False, seeg=False, dipole=False, + gof=False, bio=False, ecog=False, fnirs=False, csd=False, + dbs=False, include=(), exclude='bads', selection=None): + """Pick channels by type and names. + + Parameters + ---------- + %(info_not_none)s + meg : bool | str + If True include MEG channels. If string it can be 'mag', 'grad', + 'planar1' or 'planar2' to select only magnetometers, all gradiometers, + or a specific type of gradiometer. + eeg : bool + If True include EEG channels. + stim : bool + If True include stimulus channels. + eog : bool + If True include EOG channels. + ecg : bool + If True include ECG channels. + emg : bool + If True include EMG channels. + ref_meg : bool | str + If True include CTF / 4D reference channels. If 'auto', reference + channels are included if compensations are present and ``meg`` is not + False. Can also be the string options for the ``meg`` parameter. + misc : bool + If True include miscellaneous analog channels. + resp : bool + If True include response-trigger channel. For some MEG systems this + is separate from the stim channel. + chpi : bool + If True include continuous HPI coil channels. + exci : bool + Flux excitation channel used to be a stimulus channel. + ias : bool + Internal Active Shielding data (maybe on Triux only). + syst : bool + System status channel information (on Triux systems only). + seeg : bool + Stereotactic EEG channels. + dipole : bool + Dipole time course channels. + gof : bool + Dipole goodness of fit channels. + bio : bool + Bio channels. + ecog : bool + Electrocorticography channels. + fnirs : bool | str + Functional near-infrared spectroscopy channels. If True include all + fNIRS channels. If False (default) include none. If string it can be + 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to + include channels measuring deoxyhemoglobin). + csd : bool + Current source density channels. + dbs : bool + Deep brain stimulation channels. + include : list of str + List of additional channels to include. If empty do not include any. + exclude : list of str | str + List of channels to exclude. If 'bads' (default), exclude channels + in ``info['bads']``. + selection : list of str + Restrict sensor channels (MEG, EEG) to this list of channel names. + + Returns + ------- + sel : array of int + Indices of good channels. + """ + # NOTE: Changes to this function's signature should also be changed in + # PickChannelsMixin + _validate_type(meg, (bool, str), 'meg') + + exclude = _check_info_exclude(info, exclude) + nchan = info['nchan'] + pick = np.zeros(nchan, dtype=bool) + + _check_meg_type(ref_meg, allow_auto=True) + _check_meg_type(meg) + if isinstance(ref_meg, str) and ref_meg == 'auto': + ref_meg = ('comps' in info and info['comps'] is not None and + len(info['comps']) > 0 and meg is not False) + + for param in (eeg, stim, eog, ecg, emg, misc, resp, chpi, exci, + ias, syst, seeg, dipole, gof, bio, ecog, csd, dbs): + if not isinstance(param, bool): + w = ('Parameters for all channel types (with the exception of ' + '"meg", "ref_meg" and "fnirs") must be of type bool, not {}.') + raise ValueError(w.format(type(param))) + + param_dict = dict(eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, + misc=misc, resp=resp, chpi=chpi, exci=exci, + ias=ias, syst=syst, seeg=seeg, dbs=dbs, dipole=dipole, + gof=gof, bio=bio, ecog=ecog, csd=csd) + # avoid triage if possible + if isinstance(meg, bool): + for key in ('grad', 'mag'): + param_dict[key] = meg + if isinstance(fnirs, bool): + for key in _FNIRS_CH_TYPES_SPLIT: + param_dict[key] = fnirs + warned = [False] + for k in range(nchan): + ch_type = channel_type(info, k) + try: + pick[k] = param_dict[ch_type] + except KeyError: # not so simple + assert ch_type in ( + 'grad', 'mag', 'ref_meg') + _FNIRS_CH_TYPES_SPLIT + if ch_type in ('grad', 'mag'): + pick[k] = _triage_meg_pick(info['chs'][k], meg) + elif ch_type == 'ref_meg': + pick[k] = _triage_meg_pick(info['chs'][k], ref_meg) + else: # ch_type in ('hbo', 'hbr') + pick[k] = _triage_fnirs_pick(info['chs'][k], fnirs, warned) + + # restrict channels to selection if provided + if selection is not None: + # the selection only restricts these types of channels + sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH, + FIFF.FIFFV_EEG_CH] + for k in np.where(pick)[0]: + if (info['chs'][k]['kind'] in sel_kind and + info['ch_names'][k] not in selection): + pick[k] = False + + myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]] + myinclude += include + + if len(myinclude) == 0: + sel = np.array([], int) + else: + sel = pick_channels(info['ch_names'], myinclude, exclude) + + return sel + + +@verbose +def pick_info(info, sel=(), copy=True, verbose=None): + """Restrict an info structure to a selection of channels. + + Parameters + ---------- + %(info_not_none)s + sel : list of int | None + Indices of channels to include. If None, all channels + are included. + copy : bool + If copy is False, info is modified inplace. + %(verbose)s + + Returns + ------- + res : dict + Info structure restricted to a selection of channels. + """ + # avoid circular imports + from .meas_info import _bad_chans_comp + + info._check_consistency() + info = info.copy() if copy else info + if sel is None: + return info + elif len(sel) == 0: + raise ValueError('No channels match the selection.') + n_unique = len(np.unique(np.arange(len(info['ch_names']))[sel])) + if n_unique != len(sel): + raise ValueError('Found %d / %d unique names, sel is not unique' + % (n_unique, len(sel))) + + # make sure required the compensation channels are present + if len(info.get('comps', [])) > 0: + ch_names = [info['ch_names'][idx] for idx in sel] + _, comps_missing = _bad_chans_comp(info, ch_names) + if len(comps_missing) > 0: + logger.info('Removing %d compensators from info because ' + 'not all compensation channels were picked.' + % (len(info['comps']),)) + with info._unlock(): + info['comps'] = [] + with info._unlock(): + info['chs'] = [info['chs'][k] for k in sel] + info._update_redundant() + info['bads'] = [ch for ch in info['bads'] if ch in info['ch_names']] + if 'comps' in info: + comps = deepcopy(info['comps']) + for c in comps: + row_idx = [k for k, n in enumerate(c['data']['row_names']) + if n in info['ch_names']] + row_names = [c['data']['row_names'][i] for i in row_idx] + rowcals = c['rowcals'][row_idx] + c['rowcals'] = rowcals + c['data']['nrow'] = len(row_names) + c['data']['row_names'] = row_names + c['data']['data'] = c['data']['data'][row_idx] + with info._unlock(): + info['comps'] = comps + info._check_consistency() + + return info + + +def _has_kit_refs(info, picks): + """Determine if KIT ref channels are chosen. + + This is currently only used by make_forward_solution, which cannot + run when KIT reference channels are included. + """ + for p in picks: + if info['chs'][p]['coil_type'] == FIFF.FIFFV_COIL_KIT_REF_MAG: + return True + return False + + +def pick_channels_evoked(orig, include=[], exclude='bads'): + """Pick channels from evoked data. + + Parameters + ---------- + orig : Evoked object + One evoked dataset. + include : list of str, (optional) + List of channels to include (if empty, include all available). + exclude : list of str | str + List of channels to exclude. If empty do not exclude any (default). + If 'bads', exclude channels in orig.info['bads']. Defaults to 'bads'. + + Returns + ------- + res : instance of Evoked + Evoked data restricted to selected channels. If include and + exclude are empty it returns orig without copy. + """ + if len(include) == 0 and len(exclude) == 0: + return orig + + exclude = _check_excludes_includes(exclude, info=orig.info, + allow_bads=True) + sel = pick_channels(orig.info['ch_names'], include=include, + exclude=exclude) + + if len(sel) == 0: + raise ValueError('Warning : No channels match the selection.') + + res = deepcopy(orig) + # + # Modify the measurement info + # + res.info = pick_info(res.info, sel) + # + # Create the reduced data set + # + res.data = res.data[sel, :] + + return res + + +@verbose +def pick_channels_forward(orig, include=[], exclude=[], ordered=False, + copy=True, verbose=None): + """Pick channels from forward operator. + + Parameters + ---------- + orig : dict + A forward solution. + include : list of str + List of channels to include (if empty, include all available). + Defaults to []. + exclude : list of str | 'bads' + Channels to exclude (if empty, do not exclude any). Defaults to []. + If 'bads', then exclude bad channels in orig. + ordered : bool + If true (default False), treat ``include`` as an ordered list + rather than a set. + + .. versionadded:: 0.18 + copy : bool + If True (default), make a copy. + + .. versionadded:: 0.19 + %(verbose)s + + Returns + ------- + res : dict + Forward solution restricted to selected channels. If include and + exclude are empty it returns orig without copy. + """ + orig['info']._check_consistency() + if len(include) == 0 and len(exclude) == 0: + return orig.copy() if copy else orig + exclude = _check_excludes_includes(exclude, + info=orig['info'], allow_bads=True) + + # Allow for possibility of channel ordering in forward solution being + # different from that of the M/EEG file it is based on. + sel_sol = pick_channels(orig['sol']['row_names'], include=include, + exclude=exclude, ordered=ordered) + sel_info = pick_channels(orig['info']['ch_names'], include=include, + exclude=exclude, ordered=ordered) + + fwd = deepcopy(orig) if copy else orig + + # Check that forward solution and original data file agree on #channels + if len(sel_sol) != len(sel_info): + raise ValueError('Forward solution and functional data appear to ' + 'have different channel names, please check.') + + # Do we have something? + nuse = len(sel_sol) + if nuse == 0: + raise ValueError('Nothing remains after picking') + + logger.info(' %d out of %d channels remain after picking' + % (nuse, fwd['nchan'])) + + # Pick the correct rows of the forward operator using sel_sol + fwd['sol']['data'] = fwd['sol']['data'][sel_sol, :] + fwd['_orig_sol'] = fwd['_orig_sol'][sel_sol, :] + fwd['sol']['nrow'] = nuse + + ch_names = [fwd['sol']['row_names'][k] for k in sel_sol] + fwd['nchan'] = nuse + fwd['sol']['row_names'] = ch_names + + # Pick the appropriate channel names from the info-dict using sel_info + with fwd['info']._unlock(): + fwd['info']['chs'] = [fwd['info']['chs'][k] for k in sel_info] + fwd['info']._update_redundant() + fwd['info']['bads'] = [b for b in fwd['info']['bads'] if b in ch_names] + + if fwd['sol_grad'] is not None: + fwd['sol_grad']['data'] = fwd['sol_grad']['data'][sel_sol, :] + fwd['_orig_sol_grad'] = fwd['_orig_sol_grad'][sel_sol, :] + fwd['sol_grad']['nrow'] = nuse + fwd['sol_grad']['row_names'] = [fwd['sol_grad']['row_names'][k] + for k in sel_sol] + + return fwd + + +def pick_types_forward(orig, meg=False, eeg=False, ref_meg=True, seeg=False, + ecog=False, dbs=False, include=[], exclude=[]): + """Pick by channel type and names from a forward operator. + + Parameters + ---------- + orig : dict + A forward solution. + meg : bool | str + If True include MEG channels. If string it can be 'mag', 'grad', + 'planar1' or 'planar2' to select only magnetometers, all gradiometers, + or a specific type of gradiometer. + eeg : bool + If True include EEG channels. + ref_meg : bool + If True include CTF / 4D reference channels. + seeg : bool + If True include stereotactic EEG channels. + ecog : bool + If True include electrocorticography channels. + dbs : bool + If True include deep brain stimulation channels. + include : list of str + List of additional channels to include. If empty do not include any. + exclude : list of str | str + List of channels to exclude. If empty do not exclude any (default). + If 'bads', exclude channels in orig['info']['bads']. + + Returns + ------- + res : dict + Forward solution restricted to selected channel types. + """ + info = orig['info'] + sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg, + ecog=ecog, dbs=dbs, include=include, exclude=exclude) + if len(sel) == 0: + raise ValueError('No valid channels found') + include_ch_names = [info['ch_names'][k] for k in sel] + + return pick_channels_forward(orig, include_ch_names) + + +@fill_doc +def channel_indices_by_type(info, picks=None): + """Get indices of channels by type. + + Parameters + ---------- + %(info_not_none)s + %(picks_all)s + + Returns + ------- + idx_by_type : dict + A dictionary that maps each channel type to a (possibly empty) list of + channel indices. + """ + idx_by_type = {key: list() for key in _PICK_TYPES_KEYS if + key not in ('meg', 'fnirs')} + idx_by_type.update(mag=list(), grad=list(), hbo=list(), hbr=list(), + fnirs_cw_amplitude=list(), fnirs_fd_ac_amplitude=list(), + fnirs_fd_phase=list(), fnirs_od=list()) + picks = _picks_to_idx(info, picks, + none='all', exclude=(), allow_empty=True) + for k in picks: + ch_type = channel_type(info, k) + for key in idx_by_type.keys(): + if ch_type == key: + idx_by_type[key].append(k) + return idx_by_type + + +def pick_channels_cov(orig, include=[], exclude='bads', ordered=False, + copy=True): + """Pick channels from covariance matrix. + + Parameters + ---------- + orig : Covariance + A covariance. + include : list of str, (optional) + List of channels to include (if empty, include all available). + exclude : list of str, (optional) | 'bads' + Channels to exclude (if empty, do not exclude any). Defaults to 'bads'. + ordered : bool + If True (default False), ensure that the order of the channels in the + modified instance matches the order of ``include``. + + .. versionadded:: 0.20.0 + copy : bool + If True (the default), return a copy of the covariance matrix with the + modified channels. If False, channels are modified in-place. + + .. versionadded:: 0.20.0 + + Returns + ------- + res : dict + Covariance solution restricted to selected channels. + """ + if copy: + orig = orig.copy() + # A little peculiarity of the cov objects is that these two fields + # should not be copied over when None. + if 'method' in orig and orig['method'] is None: + del orig['method'] + if 'loglik' in orig and orig['loglik'] is None: + del orig['loglik'] + + exclude = orig['bads'] if exclude == 'bads' else exclude + sel = pick_channels(orig['names'], include=include, exclude=exclude, + ordered=ordered) + data = orig['data'][sel][:, sel] if not orig['diag'] else orig['data'][sel] + names = [orig['names'][k] for k in sel] + bads = [name for name in orig['bads'] if name in orig['names']] + + orig['data'] = data + orig['names'] = names + orig['bads'] = bads + orig['dim'] = len(data) + + return orig + + +def _mag_grad_dependent(info): + """Determine of mag and grad should be dealt with jointly.""" + # right now just uses SSS, could be computed / checked from cov + # but probably overkill + return any(ph.get('max_info', {}).get('sss_info', {}).get('in_order', 0) + for ph in info.get('proc_history', [])) + + +@fill_doc +def _contains_ch_type(info, ch_type): + """Check whether a certain channel type is in an info object. + + Parameters + ---------- + %(info_not_none)s + ch_type : str + the channel type to be checked for + + Returns + ------- + has_ch_type : bool + Whether the channel type is present or not. + """ + _validate_type(ch_type, 'str', "ch_type") + + meg_extras = list(_MEG_CH_TYPES_SPLIT) + fnirs_extras = list(_FNIRS_CH_TYPES_SPLIT) + valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS + if key != 'meg'] + meg_extras + fnirs_extras) + _check_option('ch_type', ch_type, valid_channel_types) + if info is None: + raise ValueError('Cannot check for channels of type "%s" because info ' + 'is None' % (ch_type,)) + return any(ch_type == channel_type(info, ii) + for ii in range(info['nchan'])) + + +@fill_doc +def _picks_by_type(info, meg_combined=False, ref_meg=False, exclude='bads'): + """Get data channel indices as separate list of tuples. + + Parameters + ---------- + %(info_not_none)s + meg_combined : bool | 'auto' + Whether to return combined picks for grad and mag. + Can be 'auto' to choose based on Maxwell filtering status. + ref_meg : bool + If True include CTF / 4D reference channels + exclude : list of str | str + List of channels to exclude. If 'bads' (default), exclude channels + in info['bads']. + + Returns + ------- + picks_list : list of tuples + The list of tuples of picks and the type string. + """ + _validate_type(ref_meg, bool, 'ref_meg') + exclude = _check_info_exclude(info, exclude) + if meg_combined == 'auto': + meg_combined = _mag_grad_dependent(info) + picks_list = [] + picks_list = {ch_type: list() for ch_type in _DATA_CH_TYPES_SPLIT} + for k in range(info['nchan']): + if info['chs'][k]['ch_name'] not in exclude: + this_type = channel_type(info, k) + try: + picks_list[this_type].append(k) + except KeyError: + # This annoyance is due to differences in pick_types + # and channel_type behavior + if this_type == 'ref_meg': + ch = info['chs'][k] + if _triage_meg_pick(ch, ref_meg): + if ch['unit'] == FIFF.FIFF_UNIT_T: + picks_list['mag'].append(k) + elif ch['unit'] == FIFF.FIFF_UNIT_T_M: + picks_list['grad'].append(k) + else: + pass # not a data channel type + picks_list = [(ch_type, np.array(picks_list[ch_type], int)) + for ch_type in _DATA_CH_TYPES_SPLIT] + assert _DATA_CH_TYPES_SPLIT[:2] == ('mag', 'grad') + if meg_combined and len(picks_list[0][1]) and len(picks_list[1][1]): + picks_list.insert( + 0, ('meg', np.unique(np.concatenate([picks_list.pop(0)[1], + picks_list.pop(0)[1]]))) + ) + picks_list = [p for p in picks_list if len(p[1])] + return picks_list + + +def _check_excludes_includes(chs, info=None, allow_bads=False): + """Ensure that inputs to exclude/include are list-like or "bads". + + Parameters + ---------- + chs : any input, should be list, tuple, set, str + The channels passed to include or exclude. + allow_bads : bool + Allow the user to supply "bads" as a string for auto exclusion. + + Returns + ------- + chs : list + Channels to be excluded/excluded. If allow_bads, and chs=="bads", + this will be the bad channels found in 'info'. + """ + from .meas_info import Info + if not isinstance(chs, (list, tuple, set, np.ndarray)): + if allow_bads is True: + if not isinstance(info, Info): + raise ValueError('Supply an info object if allow_bads is true') + elif chs != 'bads': + raise ValueError('If chs is a string, it must be "bads"') + else: + chs = info['bads'] + else: + raise ValueError( + 'include/exclude must be list, tuple, ndarray, or "bads". ' + + 'You provided type {}'.format(type(chs))) + return chs + + +_PICK_TYPES_DATA_DICT = dict( + meg=True, eeg=True, csd=True, stim=False, eog=False, ecg=False, emg=False, + misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, + seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True, + dbs=True) +_PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg']) +_MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2') +_FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') +_DATA_CH_TYPES_ORDER_DEFAULT = ( + 'mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'resp', 'emg', 'ref_meg', + 'misc', 'stim', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', 'ecog', + 'dbs') + _FNIRS_CH_TYPES_SPLIT + ('whitened',) +# Valid data types, ordered for consistency, used in viz/evoked. +_VALID_CHANNEL_TYPES = ( + 'eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'resp', 'emg', 'dipole', 'gof', + 'bio', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT + ('misc', 'csd') +_DATA_CH_TYPES_SPLIT = ( + 'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT + + +def _pick_data_channels(info, exclude='bads', with_ref_meg=True, + with_aux=False): + """Pick only data channels.""" + kwargs = _PICK_TYPES_DATA_DICT + if with_aux: + kwargs = kwargs.copy() + kwargs.update(eog=True, ecg=True, emg=True, bio=True) + return pick_types(info, ref_meg=with_ref_meg, exclude=exclude, **kwargs) + + +def _pick_data_or_ica(info, exclude=()): + """Pick only data or ICA channels.""" + if any(ch_name.startswith('ICA') for ch_name in info['ch_names']): + picks = pick_types(info, exclude=exclude, misc=True) + else: + picks = _pick_data_channels(info, exclude=exclude, with_ref_meg=True) + return picks + + +def _picks_to_idx(info, picks, none='data', exclude='bads', allow_empty=False, + with_ref_meg=True, return_kind=False): + """Convert and check pick validity.""" + from .meas_info import Info + picked_ch_type_or_generic = False + # + # None -> all, data, or data_or_ica (ndarray of int) + # + if isinstance(info, Info): + n_chan = info['nchan'] + else: + info = _ensure_int(info, 'info', 'an int or Info') + n_chan = info + assert n_chan >= 0 + + orig_picks = picks + # We do some extra_repr gymnastics to avoid calling repr(orig_picks) too + # soon as it can be a performance bottleneck (repr on ndarray is slow) + extra_repr = '' + if picks is None: + if isinstance(info, int): # special wrapper for no real info + picks = np.arange(n_chan) + extra_repr = ', treated as range(%d)' % (n_chan,) + else: + picks = none # let _picks_str_to_idx handle it + extra_repr = 'None, treated as "%s"' % (none,) + + # + # slice + # + if isinstance(picks, slice): + picks = np.arange(n_chan)[picks] + + # + # -> ndarray of int (and make a copy) + # + picks = np.atleast_1d(picks) # this works even for picks == 'something' + picks = np.array([], dtype=int) if len(picks) == 0 else picks + if picks.ndim != 1: + raise ValueError('picks must be 1D, got %sD' % (picks.ndim,)) + if picks.dtype.char in ('S', 'U'): + picks = _picks_str_to_idx(info, picks, exclude, with_ref_meg, + return_kind, extra_repr, allow_empty, + orig_picks) + if return_kind: + picked_ch_type_or_generic = picks[1] + picks = picks[0] + if picks.dtype.kind not in ['i', 'u']: + raise TypeError('picks must be a list of int or list of str, got ' + 'a data type of %s' % (picks.dtype,)) + del extra_repr + picks = picks.astype(int) + + # + # ensure we have (optionally non-empty) ndarray of valid int + # + if len(picks) == 0 and not allow_empty: + raise ValueError('No appropriate channels found for the given picks ' + '(%r)' % (orig_picks,)) + if (picks < -n_chan).any(): + raise ValueError('All picks must be >= %d, got %r' + % (-n_chan, orig_picks)) + if (picks >= n_chan).any(): + raise ValueError('All picks must be < n_channels (%d), got %r' + % (n_chan, orig_picks)) + picks %= n_chan # ensure positive + if return_kind: + return picks, picked_ch_type_or_generic + return picks + + +def _picks_str_to_idx(info, picks, exclude, with_ref_meg, return_kind, + extra_repr, allow_empty, orig_picks): + """Turn a list of str into ndarray of int.""" + # special case for _picks_to_idx w/no info: shouldn't really happen + if isinstance(info, int): + raise ValueError('picks as str can only be used when measurement ' + 'info is available') + + # + # first: check our special cases + # + + picks_generic = list() + if len(picks) == 1: + if picks[0] in ('all', 'data', 'data_or_ica'): + if picks[0] == 'all': + use_exclude = info['bads'] if exclude == 'bads' else exclude + picks_generic = pick_channels( + info['ch_names'], info['ch_names'], exclude=use_exclude) + elif picks[0] == 'data': + picks_generic = _pick_data_channels(info, exclude=exclude, + with_ref_meg=with_ref_meg) + elif picks[0] == 'data_or_ica': + picks_generic = _pick_data_or_ica(info, exclude=exclude) + if len(picks_generic) == 0 and orig_picks is None and \ + not allow_empty: + raise ValueError('picks (%s) yielded no channels, consider ' + 'passing picks explicitly' + % (repr(orig_picks) + extra_repr,)) + + # + # second: match all to channel names + # + + bad_names = [] + picks_name = list() + for pick in picks: + try: + picks_name.append(info['ch_names'].index(pick)) + except ValueError: + bad_names.append(pick) + + # + # third: match all to types + # + bad_type = None + picks_type = list() + kwargs = dict(meg=False) + meg, fnirs = set(), set() + for pick in picks: + if pick in _PICK_TYPES_KEYS: + kwargs[pick] = True + elif pick in _MEG_CH_TYPES_SPLIT: + meg |= {pick} + elif pick in _FNIRS_CH_TYPES_SPLIT: + fnirs |= {pick} + else: + bad_type = pick + break + else: + # triage MEG and FNIRS, which are complicated due to non-bool entries + extra_picks = set() + if len(meg) > 0 and not kwargs.get('meg', False): + # easiest just to iterate + for use_meg in meg: + extra_picks |= set(pick_types( + info, meg=use_meg, ref_meg=False, exclude=exclude)) + if len(fnirs) > 0 and not kwargs.get('fnirs', False): + if len(fnirs) == 1: + kwargs['fnirs'] = list(fnirs)[0] + else: + kwargs['fnirs'] = list(fnirs) + picks_type = pick_types(info, exclude=exclude, **kwargs) + if len(extra_picks) > 0: + picks_type = sorted(set(picks_type) | set(extra_picks)) + + # + # finally: ensure we have exactly one usable list + # + all_picks = (picks_generic, picks_name, picks_type) + any_found = [len(p) > 0 for p in all_picks] + if sum(any_found) == 0: + if not allow_empty: + raise ValueError( + 'picks (%s) could not be interpreted as ' + 'channel names (no channel "%s"), channel types (no ' + 'type "%s"), or a generic type (just "all" or "data")' + % (repr(orig_picks) + extra_repr, str(bad_names), bad_type)) + picks = np.array([], int) + elif sum(any_found) > 1: + raise RuntimeError('Some channel names are ambiguously equivalent to ' + 'channel types, cannot use string-based ' + 'picks for these') + else: + picks = np.array(all_picks[np.where(any_found)[0][0]]) + + picked_ch_type_or_generic = not len(picks_name) + if len(bad_names) > 0 and not picked_ch_type_or_generic: + warn(f'Channel(s) {bad_names} could not be picked, because ' + 'they are not present in the info instance.') + + if return_kind: + return picks, picked_ch_type_or_generic + return picks + + +def _pick_inst(inst, picks, exclude, copy=True): + """Return an instance with picked and excluded channels.""" + if copy is True: + inst = inst.copy() + picks = _picks_to_idx(inst.info, picks, exclude=[]) + pick_names = [inst.info['ch_names'][pick] for pick in picks] + inst.pick_channels(pick_names) + + if exclude == 'bads': + exclude = [ch for ch in inst.info['bads'] + if ch in inst.info['ch_names']] + if exclude is not None: + inst.drop_channels(exclude) + return inst + + +def _get_channel_types(info, picks=None, unique=False, only_data_chs=False): + """Get the data channel types in an info instance.""" + none = 'data' if only_data_chs else 'all' + picks = _picks_to_idx(info, picks, none, (), allow_empty=False) + ch_types = [channel_type(info, pick) for pick in picks] + if only_data_chs: + ch_types = [ch_type for ch_type in ch_types + if ch_type in _DATA_CH_TYPES_SPLIT] + return set(ch_types) if unique is True else ch_types diff --git a/python/libs/mne/io/proc_history.py b/python/libs/mne/io/proc_history.py new file mode 100644 index 0000000..f8395d1 --- /dev/null +++ b/python/libs/mne/io/proc_history.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- +# Authors: Denis A. Engemann +# Eric Larson +# License: Simplified BSD + +import numpy as np + +from .open import read_tag, fiff_open +from .tree import dir_tree_find +from .write import (start_block, end_block, write_int, write_float, + write_string, write_float_matrix, write_int_matrix, + write_float_sparse, write_id) +from .tag import find_tag +from .constants import FIFF +from ..fixes import _csc_matrix_cast +from ..utils import warn, _check_fname + +_proc_keys = ['parent_file_id', 'block_id', 'parent_block_id', + 'date', 'experimenter', 'creator'] +_proc_ids = [FIFF.FIFF_PARENT_FILE_ID, + FIFF.FIFF_BLOCK_ID, + FIFF.FIFF_PARENT_BLOCK_ID, + FIFF.FIFF_MEAS_DATE, + FIFF.FIFF_EXPERIMENTER, + FIFF.FIFF_CREATOR] +_proc_writers = [write_id, write_id, write_id, + write_int, write_string, write_string] +_proc_casters = [dict, dict, dict, np.array, str, str] + + +def _read_proc_history(fid, tree): + """Read processing history from fiff file. + + This function reads the SSS info, the CTC correction and the + calibaraions from the SSS processing logs inside af a raw file + (C.f. Maxfilter v2.2 manual (October 2010), page 21):: + + 104 = { 900 = proc. history + 104 = { 901 = proc. record + 103 = block ID + 204 = date + 212 = scientist + 113 = creator program + 104 = { 502 = SSS info + 264 = SSS task + 263 = SSS coord frame + 265 = SSS origin + 266 = SSS ins.order + 267 = SSS outs.order + 268 = SSS nr chnls + 269 = SSS components + 278 = SSS nfree + 243 = HPI g limit 0.98 + 244 = HPI dist limit 0.005 + 105 = } 502 = SSS info + 104 = { 504 = MaxST info + 264 = SSS task + 272 = SSST subspace correlation + 279 = SSST buffer length + 105 = } + 104 = { 501 = CTC correction + 103 = block ID + 204 = date + 113 = creator program + 800 = CTC matrix + 3417 = proj item chs + 105 = } 501 = CTC correction + 104 = { 503 = SSS finecalib. + 270 = SSS cal chnls + 271 = SSS cal coeff + 105 = } 503 = SSS finecalib. + 105 = } 901 = proc. record + 105 = } 900 = proc. history + """ + proc_history = dir_tree_find(tree, FIFF.FIFFB_PROCESSING_HISTORY) + out = list() + if len(proc_history) > 0: + proc_history = proc_history[0] + proc_records = dir_tree_find(proc_history, + FIFF.FIFFB_PROCESSING_RECORD) + for proc_record in proc_records: + record = dict() + for i_ent in range(proc_record['nent']): + kind = proc_record['directory'][i_ent].kind + pos = proc_record['directory'][i_ent].pos + for key, id_, cast in zip(_proc_keys, _proc_ids, + _proc_casters): + if kind == id_: + tag = read_tag(fid, pos) + record[key] = cast(tag.data) + break + else: + warn('Unknown processing history item %s' % kind) + record['max_info'] = _read_maxfilter_record(fid, proc_record) + iass = dir_tree_find(proc_record, FIFF.FIFFB_IAS) + if len(iass) > 0: + # XXX should eventually populate this + ss = [dict() for _ in range(len(iass))] + record['ias'] = ss + if len(record['max_info']) > 0: + out.append(record) + return out + + +def _write_proc_history(fid, info): + """Write processing history to file.""" + if len(info['proc_history']) > 0: + start_block(fid, FIFF.FIFFB_PROCESSING_HISTORY) + for record in info['proc_history']: + start_block(fid, FIFF.FIFFB_PROCESSING_RECORD) + for key, id_, writer in zip(_proc_keys, _proc_ids, _proc_writers): + if key in record: + writer(fid, id_, record[key]) + _write_maxfilter_record(fid, record['max_info']) + if 'ias' in record: + for _ in record['ias']: + start_block(fid, FIFF.FIFFB_IAS) + # XXX should eventually populate this + end_block(fid, FIFF.FIFFB_IAS) + end_block(fid, FIFF.FIFFB_PROCESSING_RECORD) + end_block(fid, FIFF.FIFFB_PROCESSING_HISTORY) + + +_sss_info_keys = ('job', 'frame', 'origin', 'in_order', + 'out_order', 'nchan', 'components', 'nfree', + 'hpi_g_limit', 'hpi_dist_limit') +_sss_info_ids = (FIFF.FIFF_SSS_JOB, + FIFF.FIFF_SSS_FRAME, + FIFF.FIFF_SSS_ORIGIN, + FIFF.FIFF_SSS_ORD_IN, + FIFF.FIFF_SSS_ORD_OUT, + FIFF.FIFF_SSS_NMAG, + FIFF.FIFF_SSS_COMPONENTS, + FIFF.FIFF_SSS_NFREE, + FIFF.FIFF_HPI_FIT_GOOD_LIMIT, + FIFF.FIFF_HPI_FIT_DIST_LIMIT) +_sss_info_writers = (write_int, write_int, write_float, write_int, + write_int, write_int, write_int, write_int, + write_float, write_float) +_sss_info_casters = (int, int, np.array, int, + int, int, np.array, int, + float, float) + +_max_st_keys = ('job', 'subspcorr', 'buflen') +_max_st_ids = (FIFF.FIFF_SSS_JOB, FIFF.FIFF_SSS_ST_CORR, + FIFF.FIFF_SSS_ST_LENGTH) +_max_st_writers = (write_int, write_float, write_float) +_max_st_casters = (int, float, float) + +_sss_ctc_keys = ('block_id', 'date', 'creator', 'decoupler') +_sss_ctc_ids = (FIFF.FIFF_BLOCK_ID, + FIFF.FIFF_MEAS_DATE, + FIFF.FIFF_CREATOR, + FIFF.FIFF_DECOUPLER_MATRIX) +_sss_ctc_writers = (write_id, write_int, write_string, write_float_sparse) +_sss_ctc_casters = (dict, np.array, str, _csc_matrix_cast) + +_sss_cal_keys = ('cal_chans', 'cal_corrs') +_sss_cal_ids = (FIFF.FIFF_SSS_CAL_CHANS, FIFF.FIFF_SSS_CAL_CORRS) +_sss_cal_writers = (write_int_matrix, write_float_matrix) +_sss_cal_casters = (np.array, np.array) + + +def _read_ctc(fname): + """Read cross-talk correction matrix.""" + fname = _check_fname(fname, overwrite='read', must_exist=True) + f, tree, _ = fiff_open(fname) + with f as fid: + sss_ctc = _read_maxfilter_record(fid, tree)['sss_ctc'] + bad_str = 'Invalid cross-talk FIF: %s' % fname + if len(sss_ctc) == 0: + raise ValueError(bad_str) + node = dir_tree_find(tree, FIFF.FIFFB_DATA_CORRECTION)[0] + comment = find_tag(fid, node, FIFF.FIFF_COMMENT).data + if comment != 'cross-talk compensation matrix': + raise ValueError(bad_str) + sss_ctc['creator'] = find_tag(fid, node, FIFF.FIFF_CREATOR).data + sss_ctc['date'] = find_tag(fid, node, FIFF.FIFF_MEAS_DATE).data + return sss_ctc + + +def _read_maxfilter_record(fid, tree): + """Read maxfilter processing record from file.""" + sss_info_block = dir_tree_find(tree, FIFF.FIFFB_SSS_INFO) # 502 + sss_info = dict() + if len(sss_info_block) > 0: + sss_info_block = sss_info_block[0] + for i_ent in range(sss_info_block['nent']): + kind = sss_info_block['directory'][i_ent].kind + pos = sss_info_block['directory'][i_ent].pos + for key, id_, cast in zip(_sss_info_keys, _sss_info_ids, + _sss_info_casters): + if kind == id_: + tag = read_tag(fid, pos) + sss_info[key] = cast(tag.data) + break + + max_st_block = dir_tree_find(tree, FIFF.FIFFB_SSS_ST_INFO) # 504 + max_st = dict() + if len(max_st_block) > 0: + max_st_block = max_st_block[0] + for i_ent in range(max_st_block['nent']): + kind = max_st_block['directory'][i_ent].kind + pos = max_st_block['directory'][i_ent].pos + for key, id_, cast in zip(_max_st_keys, _max_st_ids, + _max_st_casters): + if kind == id_: + tag = read_tag(fid, pos) + max_st[key] = cast(tag.data) + break + + sss_ctc_block = dir_tree_find(tree, FIFF.FIFFB_CHANNEL_DECOUPLER) # 501 + sss_ctc = dict() + if len(sss_ctc_block) > 0: + sss_ctc_block = sss_ctc_block[0] + for i_ent in range(sss_ctc_block['nent']): + kind = sss_ctc_block['directory'][i_ent].kind + pos = sss_ctc_block['directory'][i_ent].pos + for key, id_, cast in zip(_sss_ctc_keys, _sss_ctc_ids, + _sss_ctc_casters): + if kind == id_: + tag = read_tag(fid, pos) + sss_ctc[key] = cast(tag.data) + break + else: + if kind == FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST: + tag = read_tag(fid, pos) + chs = tag.data.split(':') + # This list can null chars in the last entry, e.g.: + # [..., u'MEG2642', u'MEG2643', u'MEG2641\x00 ... \x00'] + chs[-1] = chs[-1].split('\x00')[0] + sss_ctc['proj_items_chs'] = chs + + sss_cal_block = dir_tree_find(tree, FIFF.FIFFB_SSS_CAL) # 503 + sss_cal = dict() + if len(sss_cal_block) > 0: + sss_cal_block = sss_cal_block[0] + for i_ent in range(sss_cal_block['nent']): + kind = sss_cal_block['directory'][i_ent].kind + pos = sss_cal_block['directory'][i_ent].pos + for key, id_, cast in zip(_sss_cal_keys, _sss_cal_ids, + _sss_cal_casters): + if kind == id_: + tag = read_tag(fid, pos) + sss_cal[key] = cast(tag.data) + break + + max_info = dict(sss_info=sss_info, sss_ctc=sss_ctc, + sss_cal=sss_cal, max_st=max_st) + return max_info + + +def _write_maxfilter_record(fid, record): + """Write maxfilter processing record to file.""" + sss_info = record['sss_info'] + if len(sss_info) > 0: + start_block(fid, FIFF.FIFFB_SSS_INFO) + for key, id_, writer in zip(_sss_info_keys, _sss_info_ids, + _sss_info_writers): + if key in sss_info: + writer(fid, id_, sss_info[key]) + end_block(fid, FIFF.FIFFB_SSS_INFO) + + max_st = record['max_st'] + if len(max_st) > 0: + start_block(fid, FIFF.FIFFB_SSS_ST_INFO) + for key, id_, writer in zip(_max_st_keys, _max_st_ids, + _max_st_writers): + if key in max_st: + writer(fid, id_, max_st[key]) + end_block(fid, FIFF.FIFFB_SSS_ST_INFO) + + sss_ctc = record['sss_ctc'] + if len(sss_ctc) > 0: # dict has entries + start_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER) + for key, id_, writer in zip(_sss_ctc_keys, _sss_ctc_ids, + _sss_ctc_writers): + if key in sss_ctc: + writer(fid, id_, sss_ctc[key]) + if 'proj_items_chs' in sss_ctc: + write_string(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST, + ':'.join(sss_ctc['proj_items_chs'])) + end_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER) + + sss_cal = record['sss_cal'] + if len(sss_cal) > 0: + start_block(fid, FIFF.FIFFB_SSS_CAL) + for key, id_, writer in zip(_sss_cal_keys, _sss_cal_ids, + _sss_cal_writers): + if key in sss_cal: + writer(fid, id_, sss_cal[key]) + end_block(fid, FIFF.FIFFB_SSS_CAL) diff --git a/python/libs/mne/io/proj.py b/python/libs/mne/io/proj.py new file mode 100644 index 0000000..b2865a7 --- /dev/null +++ b/python/libs/mne/io/proj.py @@ -0,0 +1,910 @@ +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Denis Engemann +# Teon Brooks +# +# License: BSD-3-Clause + +from copy import deepcopy +from itertools import count + +import numpy as np + +from .constants import FIFF +from .pick import pick_types, pick_info +from .tag import find_tag, _rename_list +from .tree import dir_tree_find +from .write import (write_int, write_float, write_string, write_name_list, + write_float_matrix, end_block, start_block) +from ..defaults import _BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT +from ..utils import logger, verbose, warn, fill_doc, _validate_type + + +class Projection(dict): + """Projection vector. + + A basic class to proj a meaningful print for projection vectors. + + .. warning:: This class is generally not meant to be instantiated + directly, use ``compute_proj_*`` functions instead. + + Parameters + ---------- + data : dict + The data dictionary. + desc : str + The projector description. + kind : int + The projector kind. + active : bool + Whether or not the projector has been applied. + explained_var : float | None + The explained variance (proportion). + """ + + def __init__(self, *, data, desc='', kind=FIFF.FIFFV_PROJ_ITEM_FIELD, + active=False, explained_var=None): + super().__init__(desc=desc, kind=kind, active=active, data=data, + explained_var=explained_var) + + def __repr__(self): # noqa: D105 + s = "%s" % self['desc'] + s += ", active : %s" % self['active'] + s += f", n_channels : {len(self['data']['col_names'])}" + if self['explained_var'] is not None: + s += f', exp. var : {self["explained_var"] * 100:0.2f}%' + return "" % s + + # speed up info copy by taking advantage of mutability + def __deepcopy__(self, memodict): + """Make a deepcopy.""" + cls = self.__class__ + result = cls.__new__(cls) + for k, v in self.items(): + if k == 'data': + v = v.copy() + v['data'] = v['data'].copy() + result[k] = v + else: + result[k] = v # kind, active, desc, explained_var immutable + return result + + @fill_doc + def plot_topomap(self, info, cmap=None, sensors=True, + colorbar=False, res=64, size=1, show=True, + outlines='head', contours=6, image_interp='bilinear', + axes=None, vlim=(None, None), sphere=None, + border=_BORDER_DEFAULT): + """Plot topographic maps of SSP projections. + + Parameters + ---------- + %(info_not_none)s Used to determine the layout. + %(proj_topomap_kwargs)s + %(sphere_topomap_auto)s + %(border_topomap)s + + Returns + ------- + fig : instance of Figure + Figure distributing one image per channel across sensor topography. + + Notes + ----- + .. versionadded:: 0.15.0 + """ # noqa: E501 + from ..viz.topomap import plot_projs_topomap + return plot_projs_topomap(self, info, cmap, sensors, colorbar, + res, size, show, outlines, + contours, image_interp, axes, vlim, + sphere=sphere, border=border) + + +class ProjMixin(object): + """Mixin class for Raw, Evoked, Epochs. + + Notes + ----- + This mixin adds a proj attribute as a property to data containers. + It is True if at least one proj is present and all of them are active. + The projs might not be applied yet if data are not preloaded. In + this case it's the _projector attribute that does the job. + If a private _data attribute is present then the projs applied + to it are the ones marked as active. + + A proj parameter passed in constructor of raw or epochs calls + apply_proj and hence after the .proj attribute is True. + + As soon as you've applied the projs it will stay active in the + remaining pipeline. + + The suggested pipeline is proj=True in epochs (it's cheaper than for raw). + + When you use delayed SSP in Epochs, projs are applied when you call + get_data() method. They are not applied to the evoked._data unless you call + apply_proj(). The reason is that you want to reject with projs although + it's not stored in proj mode. + """ + + @property + def proj(self): + """Whether or not projections are active.""" + return (len(self.info['projs']) > 0 and + all(p['active'] for p in self.info['projs'])) + + @verbose + def add_proj(self, projs, remove_existing=False, verbose=None): + """Add SSP projection vectors. + + Parameters + ---------- + projs : list + List with projection vectors. + remove_existing : bool + Remove the projection vectors currently in the file. + %(verbose)s + + Returns + ------- + self : instance of Raw | Epochs | Evoked + The data container. + """ + if isinstance(projs, Projection): + projs = [projs] + + if (not isinstance(projs, list) and + not all(isinstance(p, Projection) for p in projs)): + raise ValueError('Only projs can be added. You supplied ' + 'something else.') + + # mark proj as inactive, as they have not been applied + projs = deactivate_proj(projs, copy=True) + if remove_existing: + # we cannot remove the proj if they are active + if any(p['active'] for p in self.info['projs']): + raise ValueError('Cannot remove projectors that have ' + 'already been applied') + with self.info._unlock(): + self.info['projs'] = projs + else: + self.info['projs'].extend(projs) + # We don't want to add projectors that are activated again. + with self.info._unlock(): + self.info['projs'] = _uniquify_projs(self.info['projs'], + check_active=False, + sort=False) + return self + + @verbose + def apply_proj(self, verbose=None): + """Apply the signal space projection (SSP) operators to the data. + + Parameters + ---------- + %(verbose)s + + Returns + ------- + self : instance of Raw | Epochs | Evoked + The instance. + + Notes + ----- + Once the projectors have been applied, they can no longer be + removed. It is usually not recommended to apply the projectors at + too early stages, as they are applied automatically later on + (e.g. when computing inverse solutions). + Hint: using the copy method individual projection vectors + can be tested without affecting the original data. + With evoked data, consider the following example:: + + projs_a = mne.read_proj('proj_a.fif') + projs_b = mne.read_proj('proj_b.fif') + # add the first, copy, apply and see ... + evoked.add_proj(a).copy().apply_proj().plot() + # add the second, copy, apply and see ... + evoked.add_proj(b).copy().apply_proj().plot() + # drop the first and see again + evoked.copy().del_proj(0).apply_proj().plot() + evoked.apply_proj() # finally keep both + """ + from ..epochs import BaseEpochs + from ..evoked import Evoked + from .base import BaseRaw + if self.info['projs'] is None or len(self.info['projs']) == 0: + logger.info('No projector specified for this dataset. ' + 'Please consider the method self.add_proj.') + return self + + # Exit delayed mode if you apply proj + if isinstance(self, BaseEpochs) and self._do_delayed_proj: + logger.info('Leaving delayed SSP mode.') + self._do_delayed_proj = False + + if all(p['active'] for p in self.info['projs']): + logger.info('Projections have already been applied. ' + 'Setting proj attribute to True.') + return self + + _projector, info = setup_proj(deepcopy(self.info), add_eeg_ref=False, + activate=True) + # let's not raise a RuntimeError here, otherwise interactive plotting + if _projector is None: # won't be fun. + logger.info('The projections don\'t apply to these data.' + ' Doing nothing.') + return self + self._projector, self.info = _projector, info + if isinstance(self, (BaseRaw, Evoked)): + if self.preload: + self._data = np.dot(self._projector, self._data) + else: # BaseEpochs + if self.preload: + for ii, e in enumerate(self._data): + self._data[ii] = self._project_epoch(e) + else: + self.load_data() # will automatically apply + logger.info('SSP projectors applied...') + return self + + def del_proj(self, idx='all'): + """Remove SSP projection vector. + + .. note:: The projection vector can only be removed if it is inactive + (has not been applied to the data). + + Parameters + ---------- + idx : int | list of int | str + Index of the projector to remove. Can also be "all" (default) + to remove all projectors. + + Returns + ------- + self : instance of Raw | Epochs | Evoked + The instance. + """ + if isinstance(idx, str) and idx == 'all': + idx = list(range(len(self.info['projs']))) + idx = np.atleast_1d(np.array(idx, int)).ravel() + + for ii in idx: + proj = self.info['projs'][ii] + if (proj['active'] and + set(self.info['ch_names']) & + set(proj['data']['col_names'])): + msg = (f'Cannot remove projector that has already been ' + f'applied, unless you first remove all channels it ' + f'applies to. The problematic projector is: {proj}') + raise ValueError(msg) + + keep = np.ones(len(self.info['projs'])) + keep[idx] = False # works with negative indexing and does checks + with self.info._unlock(): + self.info['projs'] = [p for p, k in zip(self.info['projs'], keep) + if k] + return self + + @fill_doc + def plot_projs_topomap(self, ch_type=None, cmap=None, + sensors=True, colorbar=False, res=64, size=1, + show=True, outlines='head', contours=6, + image_interp='bilinear', axes=None, + vlim=(None, None), sphere=None, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT): + """Plot SSP vector. + + Parameters + ---------- + ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None | list + The channel type to plot. For 'grad', the gradiometers are collec- + ted in pairs and the RMS for each pair is plotted. If None + (default), it will return all channel types present. If a list of + ch_types is provided, it will return multiple figures. + %(proj_topomap_kwargs)s + %(sphere_topomap_auto)s + %(extrapolate_topomap)s + + .. versionadded:: 0.20 + %(border_topomap)s + + Returns + ------- + fig : instance of Figure + Figure distributing one image per channel across sensor topography. + """ + if self.info['projs'] is not None or len(self.info['projs']) != 0: + from ..viz.topomap import plot_projs_topomap + fig = plot_projs_topomap(self.info['projs'], self.info, cmap=cmap, + sensors=sensors, colorbar=colorbar, + res=res, size=size, show=show, + outlines=outlines, contours=contours, + image_interp=image_interp, axes=axes, + vlim=vlim, sphere=sphere, + extrapolate=extrapolate, border=border) + else: + raise ValueError("Info is missing projs. Nothing to plot.") + return fig + + def _reconstruct_proj(self, mode='accurate', origin='auto'): + from ..forward import _map_meg_or_eeg_channels + if len(self.info['projs']) == 0: + return self + self.apply_proj() + for kind in ('meg', 'eeg'): + kwargs = dict(meg=False) + kwargs[kind] = True + picks = pick_types(self.info, **kwargs) + if len(picks) == 0: + continue + info_from = pick_info(self.info, picks) + info_to = info_from.copy() + with info_to._unlock(): + info_to['projs'] = [] + if kind == 'eeg' and _has_eeg_average_ref_proj( + info_from['projs']): + info_to['projs'] = [ + make_eeg_average_ref_proj(info_to, verbose=False)] + mapping = _map_meg_or_eeg_channels( + info_from, info_to, mode=mode, origin=origin) + self.data[..., picks, :] = np.matmul( + mapping, self.data[..., picks, :]) + return self + + +def _proj_equal(a, b, check_active=True): + """Test if two projectors are equal.""" + equal = ((a['active'] == b['active'] or not check_active) and + a['kind'] == b['kind'] and + a['desc'] == b['desc'] and + a['data']['col_names'] == b['data']['col_names'] and + a['data']['row_names'] == b['data']['row_names'] and + a['data']['ncol'] == b['data']['ncol'] and + a['data']['nrow'] == b['data']['nrow'] and + np.all(a['data']['data'] == b['data']['data'])) + return equal + + +@verbose +def _read_proj(fid, node, *, ch_names_mapping=None, verbose=None): + ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping + projs = list() + + # Locate the projection data + nodes = dir_tree_find(node, FIFF.FIFFB_PROJ) + if len(nodes) == 0: + return projs + + # This might exist but we won't use it: + # global_nchan = None + # tag = find_tag(fid, nodes[0], FIFF.FIFF_NCHAN) + # if tag is not None: + # global_nchan = int(tag.data) + + items = dir_tree_find(nodes[0], FIFF.FIFFB_PROJ_ITEM) + for item in items: + # Find all desired tags in one item + + # This probably also exists but used to be written incorrectly + # sometimes + # tag = find_tag(fid, item, FIFF.FIFF_NCHAN) + # if tag is not None: + # nchan = int(tag.data) + # else: + # nchan = global_nchan + + tag = find_tag(fid, item, FIFF.FIFF_DESCRIPTION) + if tag is not None: + desc = tag.data + else: + tag = find_tag(fid, item, FIFF.FIFF_NAME) + if tag is not None: + desc = tag.data + else: + raise ValueError('Projection item description missing') + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND) + if tag is not None: + kind = int(tag.data) + else: + raise ValueError('Projection item kind missing') + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC) + if tag is not None: + nvec = int(tag.data) + else: + raise ValueError('Number of projection vectors not specified') + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST) + if tag is not None: + names = tag.data.split(':') + else: + raise ValueError('Projection item channel list missing') + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS) + if tag is not None: + data = tag.data + else: + raise ValueError('Projection item data missing') + + tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE) + if tag is not None: + active = bool(tag.data) + else: + active = False + + tag = find_tag(fid, item, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR) + if tag is not None: + explained_var = float(tag.data) + else: + explained_var = None + + # handle the case when data is transposed for some reason + if data.shape[0] == len(names) and data.shape[1] == nvec: + data = data.T + + if data.shape[1] != len(names): + raise ValueError('Number of channel names does not match the ' + 'size of data matrix') + + # just always use this, we used to have bugs with writing the + # number correctly... + nchan = len(names) + names[:] = _rename_list(names, ch_names_mapping) + # Use exactly the same fields in data as in a named matrix + one = Projection(kind=kind, active=active, desc=desc, + data=dict(nrow=nvec, ncol=nchan, row_names=None, + col_names=names, data=data), + explained_var=explained_var) + + projs.append(one) + + if len(projs) > 0: + logger.info(' Read a total of %d projection items:' % len(projs)) + for proj in projs: + misc = 'active' if proj['active'] else ' idle' + logger.info(f' {proj["desc"]} ' + f'({proj["data"]["nrow"]} x ' + f'{len(proj["data"]["col_names"])}) {misc}') + + return projs + + +############################################################################### +# Write + +def _write_proj(fid, projs, *, ch_names_mapping=None): + """Write a projection operator to a file. + + Parameters + ---------- + fid : file + The file descriptor of the open file. + projs : dict + The projection operator. + """ + if len(projs) == 0: + return + + ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping + # validation + _validate_type(projs, (list, tuple), 'projs') + for pi, proj in enumerate(projs): + _validate_type(proj, Projection, f'projs[{pi}]') + + start_block(fid, FIFF.FIFFB_PROJ) + + for proj in projs: + start_block(fid, FIFF.FIFFB_PROJ_ITEM) + write_int(fid, FIFF.FIFF_NCHAN, len(proj['data']['col_names'])) + names = _rename_list(proj['data']['col_names'], ch_names_mapping) + write_name_list(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST, names) + write_string(fid, FIFF.FIFF_NAME, proj['desc']) + write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj['kind']) + if proj['kind'] == FIFF.FIFFV_PROJ_ITEM_FIELD: + write_float(fid, FIFF.FIFF_PROJ_ITEM_TIME, 0.0) + + write_int(fid, FIFF.FIFF_PROJ_ITEM_NVEC, proj['data']['nrow']) + write_int(fid, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE, proj['active']) + write_float_matrix(fid, FIFF.FIFF_PROJ_ITEM_VECTORS, + proj['data']['data']) + if proj['explained_var'] is not None: + write_float(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR, + proj['explained_var']) + end_block(fid, FIFF.FIFFB_PROJ_ITEM) + + end_block(fid, FIFF.FIFFB_PROJ) + + +############################################################################### +# Utils + +def _check_projs(projs, copy=True): + """Check that projs is a list of Projection.""" + if not isinstance(projs, (list, tuple)): + raise TypeError('projs must be a list or tuple, got %s' + % (type(projs),)) + for pi, p in enumerate(projs): + if not isinstance(p, Projection): + raise TypeError('All entries in projs list must be Projection ' + 'instances, but projs[%d] is type %s' + % (pi, type(p))) + return deepcopy(projs) if copy else projs + + +def make_projector(projs, ch_names, bads=(), include_active=True): + """Create an SSP operator from SSP projection vectors. + + Parameters + ---------- + projs : list + List of projection vectors. + ch_names : list of str + List of channels to include in the projection matrix. + bads : list of str + Some bad channels to exclude. If bad channels were marked + in the raw file when projs were calculated using mne-python, + they should not need to be included here as they will + have been automatically omitted from the projectors. + include_active : bool + Also include projectors that are already active. + + Returns + ------- + proj : array of shape [n_channels, n_channels] + The projection operator to apply to the data. + nproj : int + How many items in the projector. + U : array + The orthogonal basis of the projection vectors. + """ + return _make_projector(projs, ch_names, bads, include_active) + + +def _make_projector(projs, ch_names, bads=(), include_active=True, + inplace=False): + """Subselect projs based on ch_names and bads. + + Use inplace=True mode to modify ``projs`` inplace so that no + warning will be raised next time projectors are constructed with + the given inputs. If inplace=True, no meaningful data are returned. + """ + from scipy import linalg + nchan = len(ch_names) + if nchan == 0: + raise ValueError('No channel names specified') + + default_return = (np.eye(nchan, nchan), 0, np.empty((nchan, 0))) + + # Check trivial cases first + if projs is None: + return default_return + + nvec = 0 + nproj = 0 + for p in projs: + if not p['active'] or include_active: + nproj += 1 + nvec += p['data']['nrow'] + + if nproj == 0: + return default_return + + # Pick the appropriate entries + vecs = np.zeros((nchan, nvec)) + nvec = 0 + nonzero = 0 + bads = set(bads) + for k, p in enumerate(projs): + if not p['active'] or include_active: + if (len(p['data']['col_names']) != + len(np.unique(p['data']['col_names']))): + raise ValueError('Channel name list in projection item %d' + ' contains duplicate items' % k) + + # Get the two selection vectors to pick correct elements from + # the projection vectors omitting bad channels + sel = [] + vecsel = [] + p_set = set(p['data']['col_names']) # faster membership access + for c, name in enumerate(ch_names): + if name not in bads and name in p_set: + sel.append(c) + vecsel.append(p['data']['col_names'].index(name)) + + # If there is something to pick, pickit + nrow = p['data']['nrow'] + this_vecs = vecs[:, nvec:nvec + nrow] + if len(sel) > 0: + this_vecs[sel] = p['data']['data'][:, vecsel].T + + # Rescale for better detection of small singular values + for v in range(p['data']['nrow']): + psize = np.linalg.norm(this_vecs[:, v]) + if psize > 0: + orig_n = p['data']['data'].any(axis=0).sum() + # Average ref still works if channels are removed + # Use relative power to determine if we're in trouble. + # 10% loss is hopefully a reasonable threshold. + if psize < 0.9 and not inplace and \ + (p['kind'] != FIFF.FIFFV_PROJ_ITEM_EEG_AVREF or + len(vecsel) == 1): + warn( + f'Projection vector {repr(p["desc"])} has been ' + f'reduced to {100 * psize:0.2f}% of its ' + 'original magnitude by subselecting ' + f'{len(vecsel)}/{orig_n} of the original ' + 'channels. If the ignored channels were bad ' + 'during SSP computation, we recommend ' + 'recomputing proj (via compute_proj_raw ' + 'or related functions) with the bad channels ' + 'properly marked, because computing SSP with bad ' + 'channels present in the data but unmarked is ' + 'dangerous (it can bias the PCA used by SSP). ' + 'On the other hand, if you know that all channels ' + 'were good during SSP computation, you can safely ' + 'use info.normalize_proj() to suppress this ' + 'warning during projection.') + this_vecs[:, v] /= psize + nonzero += 1 + # If doing "inplace" mode, "fix" the projectors to only operate + # on this subset of channels. + if inplace: + p['data']['data'] = this_vecs[sel].T + p['data']['col_names'] = [p['data']['col_names'][ii] + for ii in vecsel] + p['data']['ncol'] = len(p['data']['col_names']) + nvec += p['data']['nrow'] + + # Check whether all of the vectors are exactly zero + if nonzero == 0 or inplace: + return default_return + + # Reorthogonalize the vectors + U, S, _ = linalg.svd(vecs[:, :nvec], full_matrices=False) + + # Throw away the linearly dependent guys + nproj = np.sum((S / S[0]) > 1e-2) + U = U[:, :nproj] + + # Here is the celebrated result + proj = np.eye(nchan, nchan) - np.dot(U, U.T) + if nproj >= nchan: # e.g., 3 channels and 3 projectors + raise RuntimeError('Application of %d projectors for %d channels ' + 'will yield no components.' % (nproj, nchan)) + + return proj, nproj, U + + +def _normalize_proj(info): + """Normalize proj after subselection to avoid warnings. + + This is really only useful for tests, and might not be needed + eventually if we change or improve our handling of projectors + with picks. + """ + # Here we do info.get b/c info can actually be a noise cov + _make_projector(info['projs'], info.get('ch_names', info.get('names')), + info['bads'], include_active=True, inplace=True) + + +@fill_doc +def make_projector_info(info, include_active=True): + """Make an SSP operator using the measurement info. + + Calls make_projector on good channels. + + Parameters + ---------- + %(info_not_none)s + include_active : bool + Also include projectors that are already active. + + Returns + ------- + proj : array of shape [n_channels, n_channels] + The projection operator to apply to the data. + nproj : int + How many items in the projector. + """ + proj, nproj, _ = make_projector(info['projs'], info['ch_names'], + info['bads'], include_active) + return proj, nproj + + +@verbose +def activate_proj(projs, copy=True, verbose=None): + """Set all projections to active. + + Useful before passing them to make_projector. + + Parameters + ---------- + projs : list + The projectors. + copy : bool + Modify projs in place or operate on a copy. + %(verbose)s + + Returns + ------- + projs : list + The projectors. + """ + if copy: + projs = deepcopy(projs) + + # Activate the projection items + for proj in projs: + proj['active'] = True + + logger.info('%d projection items activated' % len(projs)) + + return projs + + +@verbose +def deactivate_proj(projs, copy=True, verbose=None): + """Set all projections to inactive. + + Useful before saving raw data without projectors applied. + + Parameters + ---------- + projs : list + The projectors. + copy : bool + Modify projs in place or operate on a copy. + %(verbose)s + + Returns + ------- + projs : list + The projectors. + """ + if copy: + projs = deepcopy(projs) + + # Deactivate the projection items + for proj in projs: + proj['active'] = False + + logger.info('%d projection items deactivated' % len(projs)) + + return projs + + +@verbose +def make_eeg_average_ref_proj(info, activate=True, verbose=None): + """Create an EEG average reference SSP projection vector. + + Parameters + ---------- + %(info_not_none)s + activate : bool + If True projections are activated. + %(verbose)s + + Returns + ------- + eeg_proj: instance of Projection + The SSP/PCA projector. + """ + if info.get('custom_ref_applied', False): + raise RuntimeError('A custom reference has been applied to the ' + 'data earlier. Please use the ' + 'mne.io.set_eeg_reference function to move from ' + 'one EEG reference to another.') + + logger.info("Adding average EEG reference projection.") + eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False, + exclude='bads') + ch_names = info['ch_names'] + eeg_names = [ch_names[k] for k in eeg_sel] + n_eeg = len(eeg_sel) + if n_eeg == 0: + raise ValueError('Cannot create EEG average reference projector ' + '(no EEG data found)') + vec = np.ones((1, n_eeg)) + vec /= np.sqrt(n_eeg) + explained_var = None + eeg_proj_data = dict(col_names=eeg_names, row_names=None, + data=vec, nrow=1, ncol=n_eeg) + eeg_proj = Projection(active=activate, data=eeg_proj_data, + desc='Average EEG reference', + kind=FIFF.FIFFV_PROJ_ITEM_EEG_AVREF, + explained_var=explained_var) + return eeg_proj + + +def _has_eeg_average_ref_proj(projs, check_active=False): + """Determine if a list of projectors has an average EEG ref. + + Optionally, set check_active=True to additionally check if the CAR + has already been applied. + """ + for proj in projs: + if (proj['desc'] == 'Average EEG reference' or + proj['kind'] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF): + if not check_active or proj['active']: + return True + return False + + +def _needs_eeg_average_ref_proj(info): + """Determine if the EEG needs an averge EEG reference. + + This returns True if no custom reference has been applied and no average + reference projection is present in the list of projections. + """ + eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False, + exclude='bads') + return (len(eeg_sel) > 0 and + not info['custom_ref_applied'] and + not _has_eeg_average_ref_proj(info['projs'])) + + +@verbose +def setup_proj(info, add_eeg_ref=True, activate=True, verbose=None): + """Set up projection for Raw and Epochs. + + Parameters + ---------- + %(info_not_none)s Warning: will be modified in-place. + add_eeg_ref : bool + If True, an EEG average reference will be added (unless one + already exists). + activate : bool + If True projections are activated. + %(verbose)s + + Returns + ------- + projector : array of shape [n_channels, n_channels] + The projection operator to apply to the data. + info : mne.Info + The modified measurement info. + """ + # Add EEG ref reference proj if necessary + if add_eeg_ref and _needs_eeg_average_ref_proj(info): + eeg_proj = make_eeg_average_ref_proj(info, activate=activate) + info['projs'].append(eeg_proj) + + # Create the projector + projector, nproj = make_projector_info(info) + if nproj == 0: + if verbose: + logger.info('The projection vectors do not apply to these ' + 'channels') + projector = None + else: + logger.info('Created an SSP operator (subspace dimension = %d)' + % nproj) + + # The projection items have been activated + if activate: + with info._unlock(): + info['projs'] = activate_proj(info['projs'], copy=False) + + return projector, info + + +def _uniquify_projs(projs, check_active=True, sort=True): + """Make unique projs.""" + final_projs = [] + for proj in projs: # flatten + if not any(_proj_equal(p, proj, check_active) for p in final_projs): + final_projs.append(proj) + + my_count = count(len(final_projs)) + + def sorter(x): + """Sort in a nice way.""" + digits = [s for s in x['desc'] if s.isdigit()] + if digits: + sort_idx = int(digits[-1]) + else: + sort_idx = next(my_count) + return (sort_idx, x['desc']) + + return sorted(final_projs, key=sorter) if sort else final_projs diff --git a/python/libs/mne/io/reference.py b/python/libs/mne/io/reference.py new file mode 100644 index 0000000..fd92ad2 --- /dev/null +++ b/python/libs/mne/io/reference.py @@ -0,0 +1,563 @@ +# Authors: Marijn van Vliet +# Alexandre Gramfort +# Teon Brooks +# +# License: BSD-3-Clause + +import numpy as np + +from .constants import FIFF +from .meas_info import _check_ch_keys +from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj +from .proj import setup_proj +from .pick import pick_types, pick_channels, pick_channels_forward +from .base import BaseRaw +from ..evoked import Evoked +from ..epochs import BaseEpochs +from ..fixes import pinv +from ..utils import (logger, warn, verbose, _validate_type, _check_preload, + _check_option, fill_doc) +from ..defaults import DEFAULTS + + +def _copy_channel(inst, ch_name, new_ch_name): + """Add a copy of a channel specified by ch_name. + + Input data can be in the form of Raw, Epochs or Evoked. + + The instance object is modified inplace. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + Data containing the EEG channels + ch_name : str + Name of the channel to copy. + new_ch_name : str + Name given to the copy of the channel. + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The data with a copy of a given channel. + """ + new_inst = inst.copy().pick_channels([ch_name]) + new_inst.rename_channels({ch_name: new_ch_name}) + inst.add_channels([new_inst], force_update_info=True) + return inst + + +def _check_before_reference(inst, ref_from, ref_to, ch_type): + """Prepare instance for referencing.""" + # Check to see that data is preloaded + _check_preload(inst, "Applying a reference") + + ch_type = _get_ch_type(inst, ch_type) + ch_dict = {**{type_: True for type_ in ch_type}, + 'meg': False, 'ref_meg': False} + eeg_idx = pick_types(inst.info, **ch_dict) + + if ref_to is None: + ref_to = [inst.ch_names[i] for i in eeg_idx] + extra = 'EEG channels found' + else: + extra = 'channels supplied' + if len(ref_to) == 0: + raise ValueError('No %s to apply the reference to' % (extra,)) + + # After referencing, existing SSPs might not be valid anymore. + projs_to_remove = [] + for i, proj in enumerate(inst.info['projs']): + # Remove any average reference projections + if proj['desc'] == 'Average EEG reference' or \ + proj['kind'] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF: + logger.info('Removing existing average EEG reference ' + 'projection.') + # Don't remove the projection right away, but do this at the end of + # this loop. + projs_to_remove.append(i) + + # Inactive SSPs may block re-referencing + elif (not proj['active'] and + len([ch for ch in (ref_from + ref_to) + if ch in proj['data']['col_names']]) > 0): + + raise RuntimeError( + 'Inactive signal space projection (SSP) operators are ' + 'present that operate on sensors involved in the desired ' + 'referencing scheme. These projectors need to be applied ' + 'using the apply_proj() method function before the desired ' + 'reference can be set.' + ) + + for i in projs_to_remove: + del inst.info['projs'][i] + + # Need to call setup_proj after changing the projs: + inst._projector, _ = \ + setup_proj(inst.info, add_eeg_ref=False, activate=False) + + # If the reference touches EEG/ECoG/sEEG/DBS electrodes, note in the + # info that a non-CAR has been applied. + ref_to_channels = pick_channels(inst.ch_names, ref_to, ordered=True) + if len(np.intersect1d(ref_to_channels, eeg_idx)) > 0: + with inst.info._unlock(): + inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON + + return ref_to + + +def _apply_reference(inst, ref_from, ref_to=None, forward=None, + ch_type='auto'): + """Apply a custom EEG referencing scheme.""" + ref_to = _check_before_reference(inst, ref_from, ref_to, ch_type) + + # Compute reference + if len(ref_from) > 0: + # this is guaranteed below, but we should avoid the crazy pick_channels + # behavior that [] gives all. Also use ordered=True just to make sure + # that all supplied channels actually exist. + assert len(ref_to) > 0 + ref_names = ref_from + ref_from = pick_channels(inst.ch_names, ref_from, ordered=True) + ref_to = pick_channels(inst.ch_names, ref_to, ordered=True) + + data = inst._data + ref_data = data[..., ref_from, :].mean(-2, keepdims=True) + data[..., ref_to, :] -= ref_data + ref_data = ref_data[..., 0, :] + + # REST + if forward is not None: + # use ch_sel and the given forward + forward = pick_channels_forward(forward, ref_names, ordered=True) + # 1-3. Compute a forward (G) and avg-ref'ed data (done above) + G = forward['sol']['data'] + assert G.shape[0] == len(ref_names) + # 4. Compute the forward (G) and average-reference it (Ga): + Ga = G - np.mean(G, axis=0, keepdims=True) + # 5. Compute the Ga_inv by SVD + Ga_inv = pinv(Ga, rtol=1e-6) + # 6. Compute Ra = (G @ Ga_inv) in eq (8) from G and Ga_inv + Ra = G @ Ga_inv + # 7-8. Compute Vp = Ra @ Va; then Vpa=average(Vp) + Vpa = np.mean(Ra @ data[..., ref_from, :], axis=-2, keepdims=True) + data[..., ref_to, :] += Vpa + else: + ref_data = None + + return inst, ref_data + + +@fill_doc +def add_reference_channels(inst, ref_channels, copy=True): + """Add reference channels to data that consists of all zeros. + + Adds reference channels to data that were not included during recording. + This is useful when you need to re-reference your data to different + channels. These added channels will consist of all zeros. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + Instance of Raw or Epochs with EEG channels and reference channel(s). + %(ref_channels)s + copy : bool + Specifies whether the data will be copied (True) or modified in-place + (False). Defaults to True. + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + Data with added EEG reference channels. + """ + # Check to see that data is preloaded + _check_preload(inst, 'add_reference_channels') + _validate_type(ref_channels, (list, tuple, str), 'ref_channels') + if isinstance(ref_channels, str): + ref_channels = [ref_channels] + for ch in ref_channels: + if ch in inst.info['ch_names']: + raise ValueError("Channel %s already specified in inst." % ch) + + # Once CAR is applied (active), don't allow adding channels + if _has_eeg_average_ref_proj(inst.info['projs'], check_active=True): + raise RuntimeError('Average reference already applied to data.') + + if copy: + inst = inst.copy() + + if isinstance(inst, (BaseRaw, Evoked)): + data = inst._data + refs = np.zeros((len(ref_channels), data.shape[1])) + data = np.vstack((data, refs)) + inst._data = data + elif isinstance(inst, BaseEpochs): + data = inst._data + x, y, z = data.shape + refs = np.zeros((x * len(ref_channels), z)) + data = np.vstack((data.reshape((x * y, z), order='F'), refs)) + data = data.reshape(x, y + len(ref_channels), z, order='F') + inst._data = data + else: + raise TypeError("inst should be Raw, Epochs, or Evoked instead of %s." + % type(inst)) + nchan = len(inst.info['ch_names']) + + # only do this if we actually have digitisation points + if inst.info.get('dig', None) is not None: + # "zeroth" EEG electrode dig points is reference + ref_dig_loc = [dl for dl in inst.info['dig'] if ( + dl['kind'] == FIFF.FIFFV_POINT_EEG and + dl['ident'] == 0)] + if len(ref_channels) > 1 or len(ref_dig_loc) != len(ref_channels): + ref_dig_array = np.full(12, np.nan) + warn('The locations of multiple reference channels are ignored.') + else: # n_ref_channels == 1 and a single ref digitization exists + ref_dig_array = np.concatenate((ref_dig_loc[0]['r'], + ref_dig_loc[0]['r'], np.zeros(6))) + # Replace the (possibly new) Ref location for each channel + for idx in pick_types(inst.info, meg=False, eeg=True, exclude=[]): + inst.info['chs'][idx]['loc'][3:6] = ref_dig_loc[0]['r'] + else: + # Ideally we'd fall back on getting the location from a montage, but + # locations for non-present channels aren't stored, so location is + # unknown. Users can call set_montage() again if needed. + ref_dig_array = np.full(12, np.nan) + logger.info('Location for this channel is unknown; consider calling ' + 'set_montage() again if needed.') + + for ch in ref_channels: + chan_info = {'ch_name': ch, + 'coil_type': FIFF.FIFFV_COIL_EEG, + 'kind': FIFF.FIFFV_EEG_CH, + 'logno': nchan + 1, + 'scanno': nchan + 1, + 'cal': 1, + 'range': 1., + 'unit_mul': 0., + 'unit': FIFF.FIFF_UNIT_V, + 'coord_frame': FIFF.FIFFV_COORD_HEAD, + 'loc': ref_dig_array} + inst.info['chs'].append(chan_info) + inst.info._update_redundant() + if isinstance(inst, BaseRaw): + inst._cals = np.hstack((inst._cals, [1] * len(ref_channels))) + range_ = np.arange(1, len(ref_channels) + 1) + for pi, picks in enumerate(inst._read_picks): + inst._read_picks[pi] = np.concatenate( + [picks, np.max(picks) + range_]) + inst.info._check_consistency() + set_eeg_reference(inst, ref_channels=ref_channels, copy=False, + verbose=False) + return inst + + +_ref_dict = { + FIFF.FIFFV_MNE_CUSTOM_REF_ON: 'on', + FIFF.FIFFV_MNE_CUSTOM_REF_OFF: 'off', + FIFF.FIFFV_MNE_CUSTOM_REF_CSD: 'CSD', +} + + +def _check_can_reref(inst): + _validate_type(inst, (BaseRaw, BaseEpochs, Evoked), "Instance") + current_custom = inst.info['custom_ref_applied'] + if current_custom not in (FIFF.FIFFV_MNE_CUSTOM_REF_ON, + FIFF.FIFFV_MNE_CUSTOM_REF_OFF): + raise RuntimeError('Cannot set new reference on data with custom ' + 'reference type %r' % (_ref_dict[current_custom],)) + + +@verbose +def set_eeg_reference(inst, ref_channels='average', copy=True, + projection=False, ch_type='auto', forward=None, + verbose=None): + """Specify which reference to use for EEG data. + + Use this function to explicitly specify the desired reference for EEG. + This can be either an existing electrode or a new virtual channel. + This function will re-reference the data according to the desired + reference. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + Instance of Raw or Epochs with EEG channels and reference channel(s). + %(ref_channels_set_eeg_reference)s + copy : bool + Specifies whether the data will be copied (True) or modified in-place + (False). Defaults to True. + %(projection_set_eeg_reference)s + %(ch_type_set_eeg_reference)s + %(forward_set_eeg_reference)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + Data with EEG channels re-referenced. If ``ref_channels='average'`` and + ``projection=True`` a projection will be added instead of directly + re-referencing the data. + ref_data : array + Array of reference data subtracted from EEG channels. This will be + ``None`` if ``projection=True`` or ``ref_channels='REST'``. + %(set_eeg_reference_see_also_notes)s + """ + from ..forward import Forward + _check_can_reref(inst) + + if projection: # average reference projector + if ref_channels != 'average': + raise ValueError('Setting projection=True is only supported for ' + 'ref_channels="average", got %r.' + % (ref_channels,)) + if _has_eeg_average_ref_proj(inst.info['projs']): + warn('An average reference projection was already added. The data ' + 'has been left untouched.') + else: + # Creating an average reference may fail. In this case, make + # sure that the custom_ref_applied flag is left untouched. + custom_ref_applied = inst.info['custom_ref_applied'] + try: + with inst.info._unlock(): + inst.info['custom_ref_applied'] = \ + FIFF.FIFFV_MNE_CUSTOM_REF_OFF + inst.add_proj(make_eeg_average_ref_proj(inst.info, + activate=False)) + except Exception: + with inst.info._unlock(): + inst.info['custom_ref_applied'] = custom_ref_applied + raise + # If the data has been preloaded, projections will no + # longer be automatically applied. + if inst.preload: + logger.info('Average reference projection was added, ' + 'but has not been applied yet. Use the ' + 'apply_proj method to apply it.') + return inst, None + del projection # not used anymore + + inst = inst.copy() if copy else inst + ch_type = _get_ch_type(inst, ch_type) + ch_dict = {**{type_: True for type_ in ch_type}, + 'meg': False, 'ref_meg': False} + ch_sel = [inst.ch_names[i] for i in pick_types(inst.info, **ch_dict)] + + if ref_channels == 'REST': + _validate_type(forward, Forward, 'forward when ref_channels="REST"') + else: + forward = None # signal to _apply_reference not to do REST + + if ref_channels in ('average', 'REST'): + logger.info(f'Applying {ref_channels} reference.') + ref_channels = ch_sel + + if ref_channels == []: + logger.info('EEG data marked as already having the desired reference.') + else: + logger.info( + 'Applying a custom ' + f"{tuple(DEFAULTS['titles'][type_] for type_ in ch_type)} " + 'reference.') + + return _apply_reference(inst, ref_channels, ch_sel, forward, + ch_type=ch_type) + + +def _get_ch_type(inst, ch_type): + _validate_type(ch_type, (str, list, tuple), 'ch_type') + valid_ch_types = ('auto', 'eeg', 'ecog', 'seeg', 'dbs') + if isinstance(ch_type, str): + _check_option('ch_type', ch_type, valid_ch_types) + if ch_type != 'auto': + ch_type = [ch_type] + elif isinstance(ch_type, (list, tuple)): + for type_ in ch_type: + _validate_type(type_, str, 'ch_type') + _check_option('ch_type', type_, valid_ch_types[1:]) + ch_type = list(ch_type) + + # if ch_type is 'auto', search through list to find first reasonable + # reference-able channel type. + if ch_type == 'auto': + for type_ in ['eeg', 'ecog', 'seeg', 'dbs']: + if type_ in inst: + ch_type = [type_] + logger.info('%s channel type selected for ' + 're-referencing' % DEFAULTS['titles'][type_]) + break + # if auto comes up empty, or the user specifies a bad ch_type. + else: + raise ValueError('No EEG, ECoG, sEEG or DBS channels found ' + 'to rereference.') + return ch_type + + +@verbose +def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None, + drop_refs=True, copy=True, verbose=None): + """Re-reference selected channels using a bipolar referencing scheme. + + A bipolar reference takes the difference between two channels (the anode + minus the cathode) and adds it as a new virtual channel. The original + channels will be dropped by default. + + Multiple anodes and cathodes can be specified, in which case multiple + virtual channels will be created. The 1st cathode will be subtracted + from the 1st anode, the 2nd cathode from the 2nd anode, etc. + + By default, the virtual channels will be annotated with channel-info and + -location of the anodes and coil types will be set to EEG_BIPOLAR. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + Data containing the unreferenced channels. + anode : str | list of str + The name(s) of the channel(s) to use as anode in the bipolar reference. + cathode : str | list of str + The name(s) of the channel(s) to use as cathode in the bipolar + reference. + ch_name : str | list of str | None + The channel name(s) for the virtual channel(s) containing the resulting + signal. By default, bipolar channels are named after the anode and + cathode, but it is recommended to supply a more meaningful name. + ch_info : dict | list of dict | None + This parameter can be used to supply a dictionary (or a dictionary for + each bipolar channel) containing channel information to merge in, + overwriting the default values. Defaults to None. + drop_refs : bool + Whether to drop the anode/cathode channels from the instance. + copy : bool + Whether to operate on a copy of the data (True) or modify it in-place + (False). Defaults to True. + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + Data with the specified channels re-referenced. + + See Also + -------- + set_eeg_reference : Convenience function for creating an EEG reference. + + Notes + ----- + 1. If the anodes contain any EEG channels, this function removes + any pre-existing average reference projections. + + 2. During source localization, the EEG signal should have an average + reference. + + 3. The data must be preloaded. + + .. versionadded:: 0.9.0 + """ + from .meas_info import create_info + from ..io import RawArray + from ..epochs import EpochsArray + from ..evoked import EvokedArray + + _check_can_reref(inst) + if not isinstance(anode, list): + anode = [anode] + + if not isinstance(cathode, list): + cathode = [cathode] + + if len(anode) != len(cathode): + raise ValueError('Number of anodes (got %d) must equal the number ' + 'of cathodes (got %d).' % (len(anode), len(cathode))) + + if ch_name is None: + ch_name = [f'{a}-{c}' for (a, c) in zip(anode, cathode)] + elif not isinstance(ch_name, list): + ch_name = [ch_name] + if len(ch_name) != len(anode): + raise ValueError('Number of channel names must equal the number of ' + 'anodes/cathodes (got %d).' % len(ch_name)) + + # Check for duplicate channel names (it is allowed to give the name of the + # anode or cathode channel, as they will be replaced). + for ch, a, c in zip(ch_name, anode, cathode): + if ch not in [a, c] and ch in inst.ch_names: + raise ValueError('There is already a channel named "%s", please ' + 'specify a different name for the bipolar ' + 'channel using the ch_name parameter.' % ch) + + if ch_info is None: + ch_info = [{} for _ in anode] + elif not isinstance(ch_info, list): + ch_info = [ch_info] + if len(ch_info) != len(anode): + raise ValueError('Number of channel info dictionaries must equal the ' + 'number of anodes/cathodes.') + + if copy: + inst = inst.copy() + + anode = _check_before_reference(inst, ref_from=cathode, + ref_to=anode, ch_type='auto') + + # Create bipolar reference channels by multiplying the data + # (channels x time) with a matrix (n_virtual_channels x channels) + # and add them to the instance. + multiplier = np.zeros((len(anode), len(inst.ch_names))) + for idx, (a, c) in enumerate(zip(anode, cathode)): + multiplier[idx, inst.ch_names.index(a)] = 1 + multiplier[idx, inst.ch_names.index(c)] = -1 + + ref_info = create_info(ch_names=ch_name, sfreq=inst.info['sfreq'], + ch_types=inst.get_channel_types(picks=anode)) + + # Update "chs" in Reference-Info. + for ch_idx, (an, info) in enumerate(zip(anode, ch_info)): + _check_ch_keys(info, ch_idx, name='ch_info', check_min=False) + an_idx = inst.ch_names.index(an) + # Copy everything from anode (except ch_name). + an_chs = {k: v for k, v in inst.info['chs'][an_idx].items() + if k != 'ch_name'} + ref_info['chs'][ch_idx].update(an_chs) + # Set coil-type to bipolar. + ref_info['chs'][ch_idx]['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR + # Update with info from ch_info-parameter. + ref_info['chs'][ch_idx].update(info) + + # Set other info-keys from original instance. + pick_info = {k: v for k, v in inst.info.items() if k not in + ['chs', 'ch_names', 'bads', 'nchan', 'sfreq']} + with ref_info._unlock(): + ref_info.update(pick_info) + + # Rereferencing of data. + ref_data = multiplier @ inst._data + + if isinstance(inst, BaseRaw): + ref_inst = RawArray(ref_data, ref_info, first_samp=inst.first_samp, + copy=None) + elif isinstance(inst, BaseEpochs): + ref_inst = EpochsArray(ref_data, ref_info, events=inst.events, + tmin=inst.tmin, event_id=inst.event_id, + metadata=inst.metadata) + else: + ref_inst = EvokedArray(ref_data, ref_info, tmin=inst.tmin, + comment=inst.comment, nave=inst.nave, + kind='average') + + # Add referenced instance to original instance. + inst.add_channels([ref_inst], force_update_info=True) + + added_channels = ', '.join([name for name in ch_name]) + logger.info(f'Added the following bipolar channels:\n{added_channels}') + + for attr_name in ['picks', '_projector']: + setattr(inst, attr_name, None) + + # Drop remaining channels. + if drop_refs: + drop_channels = list((set(anode) | set(cathode)) & set(inst.ch_names)) + inst.drop_channels(drop_channels) + + return inst diff --git a/python/libs/mne/io/snirf/__init__.py b/python/libs/mne/io/snirf/__init__.py new file mode 100644 index 0000000..ea3f11d --- /dev/null +++ b/python/libs/mne/io/snirf/__init__.py @@ -0,0 +1,7 @@ +"""SNIRF module for conversion to FIF.""" + +# Author: Robert Luke +# +# License: BSD-3-Clause + +from ._snirf import read_raw_snirf diff --git a/python/libs/mne/io/snirf/_snirf.py b/python/libs/mne/io/snirf/_snirf.py new file mode 100644 index 0000000..5d28835 --- /dev/null +++ b/python/libs/mne/io/snirf/_snirf.py @@ -0,0 +1,449 @@ +# Authors: Robert Luke +# +# License: BSD-3-Clause + +import re +import numpy as np +import datetime + +from ..base import BaseRaw +from ..meas_info import create_info, _format_dig_points +from ..utils import _mult_cal_one +from ...annotations import Annotations +from ...utils import (logger, verbose, fill_doc, warn, _check_fname, + _import_h5py) +from ..constants import FIFF +from .._digitization import _make_dig_points +from ...transforms import _frame_to_str, apply_trans +from ..nirx.nirx import _convert_fnirs_to_head +from ..._freesurfer import get_mni_fiducials + + +@fill_doc +def read_raw_snirf(fname, optode_frame="unknown", preload=False, verbose=None): + """Reader for a continuous wave SNIRF data. + + .. note:: This reader supports the .snirf file type only, + not the .jnirs version. + Files with either 3D or 2D locations can be read. + However, we strongly recommend using 3D positions. + If 2D positions are used the behaviour of MNE functions + can not be guaranteed. + + Parameters + ---------- + fname : str + Path to the SNIRF data file. + optode_frame : str + Coordinate frame used for the optode positions. The default is unknown, + in which case the positions are not modified. If a known coordinate + frame is provided (head, meg, mri), then the positions are transformed + in to the Neuromag head coordinate frame (head). + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawSNIRF + A Raw object containing fNIRS data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawSNIRF(fname, optode_frame, preload, verbose) + + +def _open(fname): + return open(fname, 'r', encoding='latin-1') + + +@fill_doc +class RawSNIRF(BaseRaw): + """Raw object from a continuous wave SNIRF file. + + Parameters + ---------- + fname : str + Path to the SNIRF data file. + optode_frame : str + Coordinate frame used for the optode positions. The default is unknown, + in which case the positions are not modified. If a known coordinate + frame is provided (head, meg, mri), then the positions are transformed + in to the Neuromag head coordinate frame (head). + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, fname, optode_frame="unknown", + preload=False, verbose=None): + # Must be here due to circular import error + from ...preprocessing.nirs import _validate_nirs_info + h5py = _import_h5py() + + fname = _check_fname(fname, 'read', True, 'fname') + logger.info('Loading %s' % fname) + + with h5py.File(fname, 'r') as dat: + + if 'data2' in dat['nirs']: + warn("File contains multiple recordings. " + "MNE does not support this feature. " + "Only the first dataset will be processed.") + + snirf_data_type = np.array(dat.get('nirs/data1/measurementList1' + '/dataType')).item() + if snirf_data_type not in [1, 99999]: + # 1 = Continuous Wave + # 99999 = Processed + raise RuntimeError('MNE only supports reading continuous' + ' wave amplitude and processed haemoglobin' + ' SNIRF files. Expected type' + ' code 1 or 99999 but received type ' + f'code {snirf_data_type}') + + last_samps = dat.get('/nirs/data1/dataTimeSeries').shape[0] - 1 + + samplingrate_raw = np.array(dat.get('nirs/data1/time')) + sampling_rate = 0 + if samplingrate_raw.shape == (2, 1): + # specified as onset/samplerate + warn("Onset/sample rate SNIRF not yet supported.") + else: + # specified as time points + fs_diff = np.around(np.diff(samplingrate_raw), decimals=4) + if len(np.unique(fs_diff)) == 1: + # Uniformly sampled data + sampling_rate = 1. / np.unique(fs_diff) + else: + # print(np.unique(fs_diff)) + warn("Non uniform sampled data not supported.") + if sampling_rate == 0: + warn("Unable to extract sample rate from SNIRF file.") + + # Extract wavelengths + fnirs_wavelengths = np.array(dat.get('nirs/probe/wavelengths')) + fnirs_wavelengths = [int(w) for w in fnirs_wavelengths] + if len(fnirs_wavelengths) != 2: + raise RuntimeError(f'The data contains ' + f'{len(fnirs_wavelengths)}' + f' wavelengths: {fnirs_wavelengths}. ' + f'MNE only supports reading continuous' + ' wave amplitude SNIRF files ' + 'with two wavelengths.') + + # Extract channels + def atoi(text): + return int(text) if text.isdigit() else text + + def natural_keys(text): + return [atoi(c) for c in re.split(r'(\d+)', text)] + + channels = np.array([name for name in dat['nirs']['data1'].keys()]) + channels_idx = np.array(['measurementList' in n for n in channels]) + channels = channels[channels_idx] + channels = sorted(channels, key=natural_keys) + + # Source and detector labels are optional fields. + # Use S1, S2, S3, etc if not specified. + if 'sourceLabels_disabled' in dat['nirs/probe']: + # This is disabled as + # MNE-Python does not currently support custom source names. + # Instead, sources must be integer values. + sources = np.array(dat.get('nirs/probe/sourceLabels')) + sources = [s.decode('UTF-8') for s in sources] + else: + sources = np.unique([_correct_shape(np.array(dat.get( + 'nirs/data1/' + c + '/sourceIndex')))[0] + for c in channels]) + sources = [f"S{int(s)}" for s in sources] + + if 'detectorLabels_disabled' in dat['nirs/probe']: + # This is disabled as + # MNE-Python does not currently support custom detector names. + # Instead, detector must be integer values. + detectors = np.array(dat.get('nirs/probe/detectorLabels')) + detectors = [d.decode('UTF-8') for d in detectors] + else: + detectors = np.unique([_correct_shape(np.array(dat.get( + 'nirs/data1/' + c + '/detectorIndex')))[0] + for c in channels]) + detectors = [f"D{int(d)}" for d in detectors] + + # Extract source and detector locations + # 3D positions are optional in SNIRF, + # but highly recommended in MNE. + if ('detectorPos3D' in dat['nirs/probe']) &\ + ('sourcePos3D' in dat['nirs/probe']): + # If 3D positions are available they are used even if 2D exists + detPos3D = np.array(dat.get('nirs/probe/detectorPos3D')) + srcPos3D = np.array(dat.get('nirs/probe/sourcePos3D')) + elif ('detectorPos2D' in dat['nirs/probe']) &\ + ('sourcePos2D' in dat['nirs/probe']): + warn('The data only contains 2D location information for the ' + 'optode positions. ' + 'It is highly recommended that data is used ' + 'which contains 3D location information for the ' + 'optode positions. With only 2D locations it can not be ' + 'guaranteed that MNE functions will behave correctly ' + 'and produce accurate results. If it is not possible to ' + 'include 3D positions in your data, please consider ' + 'using the set_montage() function.') + + detPos2D = np.array(dat.get('nirs/probe/detectorPos2D')) + srcPos2D = np.array(dat.get('nirs/probe/sourcePos2D')) + # Set the third dimension to zero. See gh#9308 + detPos3D = np.append(detPos2D, + np.zeros((detPos2D.shape[0], 1)), axis=1) + srcPos3D = np.append(srcPos2D, + np.zeros((srcPos2D.shape[0], 1)), axis=1) + + else: + raise RuntimeError('No optode location information is ' + 'provided. MNE requires at least 2D ' + 'location information') + + assert len(sources) == srcPos3D.shape[0] + assert len(detectors) == detPos3D.shape[0] + + chnames = [] + ch_types = [] + for chan in channels: + src_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' + + chan + '/sourceIndex')))[0]) + det_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' + + chan + '/detectorIndex')))[0]) + + if snirf_data_type == 1: + wve_idx = int(_correct_shape(np.array( + dat.get('nirs/data1/' + chan + + '/wavelengthIndex')))[0]) + ch_name = sources[src_idx - 1] + '_' +\ + detectors[det_idx - 1] + ' ' +\ + str(fnirs_wavelengths[wve_idx - 1]) + chnames.append(ch_name) + ch_types.append('fnirs_cw_amplitude') + + elif snirf_data_type == 99999: + dt_id = _correct_shape( + np.array(dat.get('nirs/data1/' + chan + + '/dataTypeLabel')))[0].decode('UTF-8') + + # Convert between SNIRF processed names and MNE type names + dt_id = dt_id.lower().replace("dod", "fnirs_od") + + ch_name = sources[src_idx - 1] + '_' + \ + detectors[det_idx - 1] + + if dt_id == "fnirs_od": + wve_idx = int(_correct_shape(np.array( + dat.get('nirs/data1/' + chan + + '/wavelengthIndex')))[0]) + suffix = ' ' + str(fnirs_wavelengths[wve_idx - 1]) + else: + suffix = ' ' + dt_id.lower() + ch_name = ch_name + suffix + + chnames.append(ch_name) + ch_types.append(dt_id) + + # Create mne structure + info = create_info(chnames, + sampling_rate, + ch_types=ch_types) + + subject_info = {} + names = np.array(dat.get('nirs/metaDataTags/SubjectID')) + subject_info['first_name'] = \ + _correct_shape(names)[0].decode('UTF-8') + # Read non standard (but allowed) custom metadata tags + if 'lastName' in dat.get('nirs/metaDataTags/'): + ln = dat.get('/nirs/metaDataTags/lastName')[0].decode('UTF-8') + subject_info['last_name'] = ln + if 'middleName' in dat.get('nirs/metaDataTags/'): + m = dat.get('/nirs/metaDataTags/middleName')[0].decode('UTF-8') + subject_info['middle_name'] = m + if 'sex' in dat.get('nirs/metaDataTags/'): + s = dat.get('/nirs/metaDataTags/sex')[0].decode('UTF-8') + if s in {'M', 'Male', '1', 'm'}: + subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE + elif s in {'F', 'Female', '2', 'f'}: + subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE + elif s in {'0', 'u'}: + subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN + # End non standard name reading + # Update info + info.update(subject_info=subject_info) + + LengthUnit = np.array(dat.get('/nirs/metaDataTags/LengthUnit')) + LengthUnit = _correct_shape(LengthUnit)[0].decode('UTF-8') + scal = 1 + if "cm" in LengthUnit: + scal = 100 + elif "mm" in LengthUnit: + scal = 1000 + + srcPos3D /= scal + detPos3D /= scal + + if optode_frame in ["mri", "meg"]: + # These are all in MNI or MEG coordinates, so let's transform + # them to the Neuromag head coordinate frame + srcPos3D, detPos3D, _, head_t = _convert_fnirs_to_head( + 'fsaverage', optode_frame, 'head', srcPos3D, detPos3D, []) + else: + head_t = np.eye(4) + + if optode_frame in ["head", "mri", "meg"]: + # Then the transformation to head was performed above + coord_frame = FIFF.FIFFV_COORD_HEAD + elif 'MNE_coordFrame' in dat.get('nirs/metaDataTags/'): + coord_frame = int(dat.get('/nirs/metaDataTags/MNE_coordFrame') + [0]) + else: + coord_frame = FIFF.FIFFV_COORD_UNKNOWN + + for idx, chan in enumerate(channels): + src_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' + + chan + '/sourceIndex')))[0]) + det_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' + + chan + '/detectorIndex')))[0]) + + info['chs'][idx]['loc'][3:6] = srcPos3D[src_idx - 1, :] + info['chs'][idx]['loc'][6:9] = detPos3D[det_idx - 1, :] + # Store channel as mid point + midpoint = (info['chs'][idx]['loc'][3:6] + + info['chs'][idx]['loc'][6:9]) / 2 + info['chs'][idx]['loc'][0:3] = midpoint + info['chs'][idx]['coord_frame'] = coord_frame + + if (snirf_data_type in [1]) or \ + ((snirf_data_type == 99999) and + (ch_types[idx] == "fnirs_od")): + wve_idx = int(_correct_shape(np.array(dat.get( + 'nirs/data1/' + chan + '/wavelengthIndex')))[0]) + info['chs'][idx]['loc'][9] = fnirs_wavelengths[wve_idx - 1] + + if 'landmarkPos3D' in dat.get('nirs/probe/'): + diglocs = np.array(dat.get('/nirs/probe/landmarkPos3D')) + digname = np.array(dat.get('/nirs/probe/landmarkLabels')) + nasion, lpa, rpa, hpi = None, None, None, None + extra_ps = dict() + for idx, dign in enumerate(digname): + if dign == b'LPA': + lpa = diglocs[idx, :3] + elif dign == b'NASION': + nasion = diglocs[idx, :3] + elif dign == b'RPA': + rpa = diglocs[idx, :3] + else: + extra_ps[f'EEG{len(extra_ps) + 1:03d}'] = \ + diglocs[idx, :3] + dig = _make_dig_points( + nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, + dig_ch_pos=extra_ps, + coord_frame=_frame_to_str[coord_frame]) + else: + ch_locs = [info['chs'][idx]['loc'][0:3] + for idx in range(len(channels))] + # Set up digitization + dig = get_mni_fiducials('fsaverage', verbose=False) + for fid in dig: + fid['r'] = apply_trans(head_t, fid['r']) + fid['coord_frame'] = FIFF.FIFFV_COORD_HEAD + for ii, ch_loc in enumerate(ch_locs, 1): + dig.append(dict( + kind=FIFF.FIFFV_POINT_EEG, # misnomer prob okay + r=ch_loc, + ident=ii, + coord_frame=FIFF.FIFFV_COORD_HEAD, + )) + dig = _format_dig_points(dig) + del head_t + with info._unlock(): + info['dig'] = dig + + str_date = _correct_shape(np.array((dat.get( + '/nirs/metaDataTags/MeasurementDate'))))[0].decode('UTF-8') + str_time = _correct_shape(np.array((dat.get( + '/nirs/metaDataTags/MeasurementTime'))))[0].decode('UTF-8') + str_datetime = str_date + str_time + + # Several formats have been observed so we try each in turn + for dt_code in ['%Y-%m-%d%H:%M:%SZ', + '%Y-%m-%d%H:%M:%S']: + try: + meas_date = datetime.datetime.strptime( + str_datetime, dt_code) + except ValueError: + pass + else: + break + else: + warn("Extraction of measurement date from SNIRF file failed. " + "The date is being set to January 1st, 2000, " + f"instead of {str_datetime}") + meas_date = datetime.datetime(2000, 1, 1, 0, 0, 0) + meas_date = meas_date.replace(tzinfo=datetime.timezone.utc) + with info._unlock(): + info['meas_date'] = meas_date + + if 'DateOfBirth' in dat.get('nirs/metaDataTags/'): + str_birth = np.array((dat.get('/nirs/metaDataTags/' + 'DateOfBirth')))[0].decode() + birth_matched = re.fullmatch(r'(\d+)-(\d+)-(\d+)', str_birth) + if birth_matched is not None: + birthday = (int(birth_matched.groups()[0]), + int(birth_matched.groups()[1]), + int(birth_matched.groups()[2])) + with info._unlock(): + info["subject_info"]['birthday'] = birthday + + super(RawSNIRF, self).__init__(info, preload, filenames=[fname], + last_samps=[last_samps], + verbose=verbose) + + # Extract annotations + annot = Annotations([], [], []) + for key in dat['nirs']: + if 'stim' in key: + data = np.atleast_2d(np.array( + dat.get('/nirs/' + key + '/data'))) + if data.size > 0: + desc = _correct_shape(np.array(dat.get( + '/nirs/' + key + '/name')))[0] + annot.append(data[:, 0], 1.0, desc.decode('UTF-8')) + self.set_annotations(annot, emit_warning=False) + + # Reorder channels to match expected ordering in MNE if required' + if len(_validate_nirs_info(self.info, throw_errors=False)) == 0: + num_chans = len(self.ch_names) + chans = [] + for idx in range(num_chans // 2): + chans.append(idx) + chans.append(idx + num_chans // 2) + self.pick(picks=chans) + + # Validate that the fNIRS info is correctly formatted + _validate_nirs_info(self.info) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file.""" + import h5py + + with h5py.File(self._filenames[0], 'r') as dat: + one = dat['/nirs/data1/dataTimeSeries'][start:stop].T + + _mult_cal_one(data, one, idx, cals, mult) + + +# Helper function for when the numpy array has shape (), i.e. just one element. +def _correct_shape(arr): + if arr.shape == (): + arr = arr[np.newaxis] + return arr diff --git a/python/libs/mne/io/snirf/tests/__init__.py b/python/libs/mne/io/snirf/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/snirf/tests/test_snirf.py b/python/libs/mne/io/snirf/tests/test_snirf.py new file mode 100644 index 0000000..356e98c --- /dev/null +++ b/python/libs/mne/io/snirf/tests/test_snirf.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +# Authors: Robert Luke +# simplified BSD-3 license + +import os.path as op +import numpy as np +from numpy.testing import assert_allclose, assert_almost_equal, assert_equal +import shutil +import pytest + +from mne.datasets.testing import data_path, requires_testing_data +from mne.io import read_raw_snirf, read_raw_nirx +from mne.io.tests.test_raw import _test_raw_reader +from mne.preprocessing.nirs import (optical_density, beer_lambert_law, + short_channels, source_detector_distances) +from mne.transforms import apply_trans, _get_trans +from mne.io.constants import FIFF + + +testing_path = data_path(download=False) + +# SfNIRS files +sfnirs_homer_103_wShort = op.join(testing_path, 'SNIRF', 'SfNIRS', + 'snirf_homer3', '1.0.3', + 'snirf_1_3_nirx_15_2_' + 'recording_w_short.snirf') +sfnirs_homer_103_wShort_original = op.join(testing_path, 'NIRx', 'nirscout', + 'nirx_15_2_recording_w_short') + +sfnirs_homer_103_153 = op.join(testing_path, 'SNIRF', 'SfNIRS', 'snirf_homer3', + '1.0.3', 'nirx_15_3_recording.snirf') + +# NIRSport2 files +nirx_nirsport2_103 = op.join(testing_path, 'SNIRF', 'NIRx', 'NIRSport2', + '1.0.3', '2021-04-23_005.snirf') +nirx_nirsport2_103_2 = op.join(testing_path, 'SNIRF', 'NIRx', 'NIRSport2', + '1.0.3', '2021-05-05_001.snirf') +snirf_nirsport2_20219 = op.join(testing_path, 'SNIRF', 'NIRx', 'NIRSport2', + '2021.9', '2021-10-01_002.snirf') +nirx_nirsport2_20219 = op.join(testing_path, 'NIRx', 'nirsport_v2', + 'aurora_2021_9') + +# Kernel +kernel_hb = op.join(testing_path, 'SNIRF', 'Kernel', 'Flow50', + 'Portal_2021_11', 'hb.snirf') + +h5py = pytest.importorskip('h5py') # module-level + +# Fieldtrip +ft_od = op.join(testing_path, 'SNIRF', 'FieldTrip', + '220307_opticaldensity.snirf') + + +@requires_testing_data +@pytest.mark.filterwarnings('ignore:.*contains 2D location.*:') +@pytest.mark.parametrize('fname', ([sfnirs_homer_103_wShort, + nirx_nirsport2_103, + sfnirs_homer_103_153, + nirx_nirsport2_103, + nirx_nirsport2_103_2, + nirx_nirsport2_103_2, + kernel_hb + ])) +def test_basic_reading_and_min_process(fname): + """Test reading SNIRF files and minimum typical processing.""" + raw = read_raw_snirf(fname, preload=True) + # SNIRF data can contain several types, so only apply appropriate functions + if 'fnirs_cw_amplitude' in raw: + raw = optical_density(raw) + if 'fnirs_od' in raw: + raw = beer_lambert_law(raw, ppf=6) + assert 'hbo' in raw + assert 'hbr' in raw + + +@requires_testing_data +def test_snirf_basic(): + """Test reading SNIRF files.""" + raw = read_raw_snirf(sfnirs_homer_103_wShort, preload=True) + + # Test data import + assert raw._data.shape == (26, 145) + assert raw.info['sfreq'] == 12.5 + + # Test channel naming + assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850", + "S1_D9 760", "S1_D9 850"] + assert raw.info['ch_names'][24:26] == ["S5_D13 760", "S5_D13 850"] + + # Test frequency encoding + assert raw.info['chs'][0]['loc'][9] == 760 + assert raw.info['chs'][1]['loc'][9] == 850 + + # Test source locations + assert_allclose([-8.6765 * 1e-2, 0.0049 * 1e-2, -2.6167 * 1e-2], + raw.info['chs'][0]['loc'][3:6], rtol=0.02) + assert_allclose([7.9579 * 1e-2, -2.7571 * 1e-2, -2.2631 * 1e-2], + raw.info['chs'][4]['loc'][3:6], rtol=0.02) + assert_allclose([-2.1387 * 1e-2, -8.8874 * 1e-2, 3.8393 * 1e-2], + raw.info['chs'][8]['loc'][3:6], rtol=0.02) + assert_allclose([1.8602 * 1e-2, 9.7164 * 1e-2, 1.7539 * 1e-2], + raw.info['chs'][12]['loc'][3:6], rtol=0.02) + assert_allclose([-0.1108 * 1e-2, 0.7066 * 1e-2, 8.9883 * 1e-2], + raw.info['chs'][16]['loc'][3:6], rtol=0.02) + + # Test detector locations + assert_allclose([-8.0409 * 1e-2, -2.9677 * 1e-2, -2.5415 * 1e-2], + raw.info['chs'][0]['loc'][6:9], rtol=0.02) + assert_allclose([-8.7329 * 1e-2, 0.7577 * 1e-2, -2.7980 * 1e-2], + raw.info['chs'][3]['loc'][6:9], rtol=0.02) + assert_allclose([9.2027 * 1e-2, 0.0161 * 1e-2, -2.8909 * 1e-2], + raw.info['chs'][5]['loc'][6:9], rtol=0.02) + assert_allclose([7.7548 * 1e-2, -3.5901 * 1e-2, -2.3179 * 1e-2], + raw.info['chs'][7]['loc'][6:9], rtol=0.02) + + assert 'fnirs_cw_amplitude' in raw + + +@requires_testing_data +def test_snirf_against_nirx(): + """Test against file snirf was created from.""" + raw = read_raw_snirf(sfnirs_homer_103_wShort, preload=True) + raw_orig = read_raw_nirx(sfnirs_homer_103_wShort_original, preload=True) + + # Check annotations are the same + assert_allclose(raw.annotations.onset, raw_orig.annotations.onset) + assert_allclose([float(d) for d in raw.annotations.description], + [float(d) for d in raw_orig.annotations.description]) + assert_allclose(raw.annotations.duration, raw_orig.annotations.duration) + + # Check names are the same + assert raw.info['ch_names'] == raw_orig.info['ch_names'] + + # Check frequencies are the same + num_chans = len(raw.ch_names) + new_chs = raw.info['chs'] + ori_chs = raw_orig.info['chs'] + assert_allclose([new_chs[idx]['loc'][9] for idx in range(num_chans)], + [ori_chs[idx]['loc'][9] for idx in range(num_chans)]) + + # Check data is the same + assert_allclose(raw.get_data(), raw_orig.get_data()) + + +@requires_testing_data +def test_snirf_nonstandard(tmp_path): + """Test custom tags.""" + shutil.copy(sfnirs_homer_103_wShort, str(tmp_path) + "/mod.snirf") + fname = str(tmp_path) + "/mod.snirf" + # Manually mark up the file to match MNE-NIRS custom tags + with h5py.File(fname, "r+") as f: + f.create_dataset("nirs/metaDataTags/middleName", + data=['X'.encode('UTF-8')]) + f.create_dataset("nirs/metaDataTags/lastName", + data=['Y'.encode('UTF-8')]) + f.create_dataset("nirs/metaDataTags/sex", + data=['1'.encode('UTF-8')]) + raw = read_raw_snirf(fname, preload=True) + assert raw.info["subject_info"]["middle_name"] == 'X' + assert raw.info["subject_info"]["last_name"] == 'Y' + assert raw.info["subject_info"]["sex"] == 1 + with h5py.File(fname, "r+") as f: + del f['nirs/metaDataTags/sex'] + f.create_dataset("nirs/metaDataTags/sex", + data=['2'.encode('UTF-8')]) + raw = read_raw_snirf(fname, preload=True) + assert raw.info["subject_info"]["sex"] == 2 + with h5py.File(fname, "r+") as f: + del f['nirs/metaDataTags/sex'] + f.create_dataset("nirs/metaDataTags/sex", + data=['0'.encode('UTF-8')]) + raw = read_raw_snirf(fname, preload=True) + assert raw.info["subject_info"]["sex"] == 0 + + with h5py.File(fname, "r+") as f: + f.create_dataset("nirs/metaDataTags/MNE_coordFrame", data=[1]) + + +@requires_testing_data +def test_snirf_nirsport2(): + """Test reading SNIRF files.""" + raw = read_raw_snirf(nirx_nirsport2_103, preload=True) + + # Test data import + assert raw._data.shape == (92, 84) + assert_almost_equal(raw.info['sfreq'], 7.6, decimal=1) + + # Test channel naming + assert raw.info['ch_names'][:4] == ['S1_D1 760', 'S1_D1 850', + 'S1_D3 760', 'S1_D3 850'] + assert raw.info['ch_names'][24:26] == ['S6_D4 760', 'S6_D4 850'] + + # Test frequency encoding + assert raw.info['chs'][0]['loc'][9] == 760 + assert raw.info['chs'][1]['loc'][9] == 850 + + assert sum(short_channels(raw.info)) == 16 + + +@requires_testing_data +def test_snirf_coordframe(): + """Test reading SNIRF files.""" + raw = read_raw_snirf(nirx_nirsport2_103, optode_frame="head").\ + info['chs'][3]['coord_frame'] + assert raw == FIFF.FIFFV_COORD_HEAD + + raw = read_raw_snirf(nirx_nirsport2_103, optode_frame="mri").\ + info['chs'][3]['coord_frame'] + assert raw == FIFF.FIFFV_COORD_HEAD + + raw = read_raw_snirf(nirx_nirsport2_103, optode_frame="unknown").\ + info['chs'][3]['coord_frame'] + assert raw == FIFF.FIFFV_COORD_UNKNOWN + + +@requires_testing_data +def test_snirf_nirsport2_w_positions(): + """Test reading SNIRF files with known positions.""" + raw = read_raw_snirf(nirx_nirsport2_103_2, preload=True, + optode_frame="mri") + + # Test data import + assert raw._data.shape == (40, 128) + assert_almost_equal(raw.info['sfreq'], 10.2, decimal=1) + + # Test channel naming + assert raw.info['ch_names'][:4] == ['S1_D1 760', 'S1_D1 850', + 'S1_D6 760', 'S1_D6 850'] + assert raw.info['ch_names'][24:26] == ['S6_D4 760', 'S6_D4 850'] + + # Test frequency encoding + assert raw.info['chs'][0]['loc'][9] == 760 + assert raw.info['chs'][1]['loc'][9] == 850 + + assert sum(short_channels(raw.info)) == 16 + + # Test distance between optodes matches values from + # nirsite https://github.com/mne-tools/mne-testing-data/pull/86 + # figure 3 + allowed_distance_error = 0.005 + distances = source_detector_distances(raw.info) + assert_allclose(distances[::2][:14], + [0.0304, 0.0411, 0.008, 0.0400, 0.008, 0.0310, 0.0411, + 0.008, 0.0299, 0.008, 0.0370, 0.008, 0.0404, 0.008], + atol=allowed_distance_error) + + # Test location of detectors + # The locations of detectors can be seen in the first + # figure on this page... + # https://github.com/mne-tools/mne-testing-data/pull/86 + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D1' + assert_allclose( + mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) + + assert raw.info['ch_names'][2][3:5] == 'D6' + assert_allclose( + mni_locs[2], [-0.0841, -0.0138, 0.0248], atol=allowed_dist_error) + + assert raw.info['ch_names'][34][3:5] == 'D5' + assert_allclose( + mni_locs[34], [0.0845, -0.0451, -0.0123], atol=allowed_dist_error) + + # Test location of sensors + # The locations of sensors can be seen in the second + # figure on this page... + # https://github.com/mne-tools/mne-testing-data/pull/86 + allowed_dist_error = 0.0002 + locs = [ch['loc'][3:6] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][:2] == 'S1' + assert_allclose( + mni_locs[0], [-0.0848, -0.0162, -0.0163], atol=allowed_dist_error) + + assert raw.info['ch_names'][9][:2] == 'S2' + assert_allclose( + mni_locs[9], [-0.0, -0.1195, 0.0142], atol=allowed_dist_error) + + assert raw.info['ch_names'][34][:2] == 'S8' + assert_allclose( + mni_locs[34], [0.0828, -0.046, 0.0285], atol=allowed_dist_error) + + mon = raw.get_montage() + assert len(mon.dig) == 43 + + +@requires_testing_data +def test_snirf_fieldtrip_od(): + """Test reading FieldTrip SNIRF files with optical density data.""" + raw = read_raw_snirf(ft_od, preload=True) + + # Test data import + assert raw._data.shape == (72, 500) + assert raw.copy().pick('fnirs')._data.shape == (72, 500) + assert raw.copy().pick('fnirs_od')._data.shape == (72, 500) + with pytest.raises(ValueError, match='not be interpreted as channel'): + raw.copy().pick('hbo') + with pytest.raises(ValueError, match='not be interpreted as channel'): + raw.copy().pick('hbr') + + assert_allclose(raw.info['sfreq'], 50) + + +@requires_testing_data +def test_snirf_kernel_hb(): + """Test reading Kernel SNIRF files with haemoglobin data.""" + raw = read_raw_snirf(kernel_hb, preload=True) + + # Test data import + assert raw._data.shape == (180 * 2, 14) + assert raw.copy().pick('hbo')._data.shape == (180, 14) + assert raw.copy().pick('hbr')._data.shape == (180, 14) + + assert_allclose(raw.info['sfreq'], 8.257638) + + bad_nans = np.isnan(raw.get_data()).any(axis=1) + assert np.sum(bad_nans) == 20 + + assert len(raw.annotations.description) == 2 + assert raw.annotations.onset[0] == 0.036939 + assert raw.annotations.onset[1] == 0.874633 + assert raw.annotations.description[0] == "StartTrial" + assert raw.annotations.description[1] == "StartIti" + + +@requires_testing_data +@pytest.mark.parametrize('fname, boundary_decimal, test_scaling, test_rank', ( + [sfnirs_homer_103_wShort, 0, True, True], + [nirx_nirsport2_103, 0, True, False], # strange rank behavior + [nirx_nirsport2_103_2, 0, False, True], # weirdly small values + [snirf_nirsport2_20219, 0, True, True], +)) +def test_snirf_standard(fname, boundary_decimal, test_scaling, test_rank): + """Test standard operations.""" + _test_raw_reader(read_raw_snirf, fname=fname, + boundary_decimal=boundary_decimal, + test_scaling=test_scaling, + test_rank=test_rank) # low fs + + +@requires_testing_data +def test_annotation_description_from_stim_groups(): + """Test annotation descriptions parsed from stim group names.""" + raw = read_raw_snirf(nirx_nirsport2_103_2, preload=True) + expected_descriptions = ['1', '2', '6'] + assert_equal(expected_descriptions, raw.annotations.description) diff --git a/python/libs/mne/io/tag.py b/python/libs/mne/io/tag.py new file mode 100644 index 0000000..69504a5 --- /dev/null +++ b/python/libs/mne/io/tag.py @@ -0,0 +1,517 @@ +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# +# License: BSD-3-Clause + +from functools import partial +import struct + +import numpy as np + +from .constants import (FIFF, _dig_kind_named, _dig_cardinal_named, + _ch_kind_named, _ch_coil_type_named, _ch_unit_named, + _ch_unit_mul_named) +from ..utils.numerics import _julian_to_cal + + +############################################################################## +# HELPERS + +class Tag(object): + """Tag in FIF tree structure. + + Parameters + ---------- + kind : int + Kind of Tag. + type_ : int + Type of Tag. + size : int + Size in bytes. + int : next + Position of next Tag. + pos : int + Position of Tag is the original file. + """ + + def __init__(self, kind, type_, size, next, pos=None): # noqa: D102 + self.kind = int(kind) + self.type = int(type_) + self.size = int(size) + self.next = int(next) + self.pos = pos if pos is not None else next + self.pos = int(self.pos) + self.data = None + + def __repr__(self): # noqa: D105 + out = (" 0: + fid.seek(tag.next, 0) + return tag + + +def _frombuffer_rows(fid, tag_size, dtype=None, shape=None, rlims=None): + """Get a range of rows from a large tag.""" + if shape is not None: + item_size = np.dtype(dtype).itemsize + if not len(shape) == 2: + raise ValueError('Only implemented for 2D matrices') + want_shape = np.prod(shape) + have_shape = tag_size // item_size + if want_shape != have_shape: + raise ValueError('Wrong shape specified, requested %s have %s' + % (want_shape, have_shape)) + if not len(rlims) == 2: + raise ValueError('rlims must have two elements') + n_row_out = rlims[1] - rlims[0] + if n_row_out <= 0: + raise ValueError('rlims must yield at least one output') + row_size = item_size * shape[1] + # # of bytes to skip at the beginning, # to read, where to end + start_skip = int(rlims[0] * row_size) + read_size = int(n_row_out * row_size) + end_pos = int(fid.tell() + tag_size) + # Move the pointer ahead to the read point + fid.seek(start_skip, 1) + # Do the reading + out = np.frombuffer(fid.read(read_size), dtype=dtype) + # Move the pointer ahead to the end of the tag + fid.seek(end_pos) + else: + out = np.frombuffer(fid.read(tag_size), dtype=dtype) + return out + + +def _loc_to_coil_trans(loc): + """Convert loc vector to coil_trans.""" + assert loc.shape[-1] == 12 + coil_trans = np.zeros(loc.shape[:-1] + (4, 4)) + coil_trans[..., :3, 3] = loc[..., :3] + coil_trans[..., :3, :3] = np.reshape( + loc[..., 3:], loc.shape[:-1] + (3, 3)).swapaxes(-1, -2) + coil_trans[..., -1, -1] = 1. + return coil_trans + + +def _coil_trans_to_loc(coil_trans): + """Convert coil_trans to loc.""" + coil_trans = coil_trans.astype(np.float64) + return np.roll(coil_trans.T[:, :3], 1, 0).flatten() + + +def _loc_to_eeg_loc(loc): + """Convert a loc to an EEG loc.""" + if not np.isfinite(loc[:3]).all(): + raise RuntimeError('Missing EEG channel location') + if np.isfinite(loc[3:6]).all() and (loc[3:6]).any(): + return np.array([loc[0:3], loc[3:6]]).T + else: + return loc[0:3][:, np.newaxis].copy() + + +############################################################################## +# READING FUNCTIONS + +# None of these functions have docstring because it's more compact that way, +# and hopefully it's clear what they do by their names and variable values. +# See ``read_tag`` for variable descriptions. Return values are implied +# by the function names. + +_is_matrix = 4294901760 # ffff0000 +_matrix_coding_dense = 16384 # 4000 +_matrix_coding_CCS = 16400 # 4010 +_matrix_coding_RCS = 16416 # 4020 +_data_type = 65535 # ffff + + +def _read_tag_header(fid): + """Read only the header of a Tag.""" + s = fid.read(4 * 4) + if len(s) == 0: + return None + # struct.unpack faster than np.frombuffer, saves ~10% of time some places + return Tag(*struct.unpack('>iIii', s)) + + +_matrix_bit_dtype = { + FIFF.FIFFT_INT: (4, '>i4'), + FIFF.FIFFT_JULIAN: (4, '>i4'), + FIFF.FIFFT_FLOAT: (4, '>f4'), + FIFF.FIFFT_DOUBLE: (8, '>f8'), + FIFF.FIFFT_COMPLEX_FLOAT: (8, '>f4'), + FIFF.FIFFT_COMPLEX_DOUBLE: (16, '>f8'), +} + + +def _read_matrix(fid, tag, shape, rlims, matrix_coding): + """Read a matrix (dense or sparse) tag.""" + from scipy import sparse + matrix_coding = matrix_coding >> 16 + + # This should be easy to implement (see _frombuffer_rows) + # if we need it, but for now, it's not... + if shape is not None: + raise ValueError('Row reading not implemented for matrices ' + 'yet') + + # Matrices + if matrix_coding == _matrix_coding_dense: + # Find dimensions and return to the beginning of tag data + pos = fid.tell() + fid.seek(tag.size - 4, 1) + ndim = int(np.frombuffer(fid.read(4), dtype='>i4')) + fid.seek(-(ndim + 1) * 4, 1) + dims = np.frombuffer(fid.read(4 * ndim), dtype='>i4')[::-1] + # + # Back to where the data start + # + fid.seek(pos, 0) + + if ndim > 3: + raise Exception('Only 2 or 3-dimensional matrices are ' + 'supported at this time') + + matrix_type = _data_type & tag.type + try: + bit, dtype = _matrix_bit_dtype[matrix_type] + except KeyError: + raise RuntimeError('Cannot handle matrix of type %d yet' + % matrix_type) + data = fid.read(int(bit * dims.prod())) + data = np.frombuffer(data, dtype=dtype) + # Note: we need the non-conjugate transpose here + if matrix_type == FIFF.FIFFT_COMPLEX_FLOAT: + data = data.view('>c8') + elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE: + data = data.view('>c16') + data.shape = dims + elif matrix_coding in (_matrix_coding_CCS, _matrix_coding_RCS): + # Find dimensions and return to the beginning of tag data + pos = fid.tell() + fid.seek(tag.size - 4, 1) + ndim = int(np.frombuffer(fid.read(4), dtype='>i4')) + fid.seek(-(ndim + 2) * 4, 1) + dims = np.frombuffer(fid.read(4 * (ndim + 1)), dtype='>i4') + if ndim != 2: + raise Exception('Only two-dimensional matrices are ' + 'supported at this time') + + # Back to where the data start + fid.seek(pos, 0) + nnz = int(dims[0]) + nrow = int(dims[1]) + ncol = int(dims[2]) + data = np.frombuffer(fid.read(4 * nnz), dtype='>f4') + shape = (dims[1], dims[2]) + if matrix_coding == _matrix_coding_CCS: + # CCS + tmp_indices = fid.read(4 * nnz) + indices = np.frombuffer(tmp_indices, dtype='>i4') + tmp_ptr = fid.read(4 * (ncol + 1)) + indptr = np.frombuffer(tmp_ptr, dtype='>i4') + if indptr[-1] > len(indices) or np.any(indptr < 0): + # There was a bug in MNE-C that caused some data to be + # stored without byte swapping + indices = np.concatenate( + (np.frombuffer(tmp_indices[:4 * (nrow + 1)], dtype='>i4'), + np.frombuffer(tmp_indices[4 * (nrow + 1):], dtype=' len(indices) or np.any(indptr < 0): + # There was a bug in MNE-C that caused some data to be + # stored without byte swapping + indices = np.concatenate( + (np.frombuffer(tmp_indices[:4 * (ncol + 1)], dtype='>i4'), + np.frombuffer(tmp_indices[4 * (ncol + 1):], dtype='c8") + return d + + +def _read_complex_double(fid, tag, shape, rlims): + """Read complex double tag.""" + # data gets stored twice as large + if shape is not None: + shape = (shape[0], shape[1] * 2) + d = _frombuffer_rows(fid, tag.size, dtype=">f8", shape=shape, rlims=rlims) + d = d.view(">c16") + return d + + +def _read_id_struct(fid, tag, shape, rlims): + """Read ID struct tag.""" + return dict( + version=int(np.frombuffer(fid.read(4), dtype=">i4")), + machid=np.frombuffer(fid.read(8), dtype=">i4"), + secs=int(np.frombuffer(fid.read(4), dtype=">i4")), + usecs=int(np.frombuffer(fid.read(4), dtype=">i4"))) + + +def _read_dig_point_struct(fid, tag, shape, rlims): + """Read dig point struct tag.""" + kind = int(np.frombuffer(fid.read(4), dtype=">i4")) + kind = _dig_kind_named.get(kind, kind) + ident = int(np.frombuffer(fid.read(4), dtype=">i4")) + if kind == FIFF.FIFFV_POINT_CARDINAL: + ident = _dig_cardinal_named.get(ident, ident) + return dict( + kind=kind, ident=ident, + r=np.frombuffer(fid.read(12), dtype=">f4"), + coord_frame=FIFF.FIFFV_COORD_UNKNOWN) + + +def _read_coord_trans_struct(fid, tag, shape, rlims): + """Read coord trans struct tag.""" + from ..transforms import Transform + fro = int(np.frombuffer(fid.read(4), dtype=">i4")) + to = int(np.frombuffer(fid.read(4), dtype=">i4")) + rot = np.frombuffer(fid.read(36), dtype=">f4").reshape(3, 3) + move = np.frombuffer(fid.read(12), dtype=">f4") + trans = np.r_[np.c_[rot, move], + np.array([[0], [0], [0], [1]]).T] + data = Transform(fro, to, trans) + fid.seek(48, 1) # Skip over the inverse transformation + return data + + +_ch_coord_dict = { + FIFF.FIFFV_MEG_CH: FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_REF_MEG_CH: FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_EEG_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_ECOG_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_SEEG_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_DBS_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_FNIRS_CH: FIFF.FIFFV_COORD_HEAD +} + + +def _read_ch_info_struct(fid, tag, shape, rlims): + """Read channel info struct tag.""" + d = dict( + scanno=int(np.frombuffer(fid.read(4), dtype=">i4")), + logno=int(np.frombuffer(fid.read(4), dtype=">i4")), + kind=int(np.frombuffer(fid.read(4), dtype=">i4")), + range=float(np.frombuffer(fid.read(4), dtype=">f4")), + cal=float(np.frombuffer(fid.read(4), dtype=">f4")), + coil_type=int(np.frombuffer(fid.read(4), dtype=">i4")), + # deal with really old OSX Anaconda bug by casting to float64 + loc=np.frombuffer(fid.read(48), dtype=">f4").astype(np.float64), + # unit and exponent + unit=int(np.frombuffer(fid.read(4), dtype=">i4")), + unit_mul=int(np.frombuffer(fid.read(4), dtype=">i4")), + ) + # channel name + ch_name = np.frombuffer(fid.read(16), dtype=">c") + ch_name = ch_name[:np.argmax(ch_name == b'')].tobytes() + d['ch_name'] = ch_name.decode() + # coil coordinate system definition + _update_ch_info_named(d) + return d + + +def _update_ch_info_named(d): + d['coord_frame'] = _ch_coord_dict.get(d['kind'], FIFF.FIFFV_COORD_UNKNOWN) + d['kind'] = _ch_kind_named.get(d['kind'], d['kind']) + d['coil_type'] = _ch_coil_type_named.get(d['coil_type'], d['coil_type']) + d['unit'] = _ch_unit_named.get(d['unit'], d['unit']) + d['unit_mul'] = _ch_unit_mul_named.get(d['unit_mul'], d['unit_mul']) + + +def _read_old_pack(fid, tag, shape, rlims): + """Read old pack tag.""" + offset = float(np.frombuffer(fid.read(4), dtype=">f4")) + scale = float(np.frombuffer(fid.read(4), dtype=">f4")) + data = np.frombuffer(fid.read(tag.size - 8), dtype=">i2") + data = data * scale # to float64 + data += offset + return data + + +def _read_dir_entry_struct(fid, tag, shape, rlims): + """Read dir entry struct tag.""" + return [_read_tag_header(fid) for _ in range(tag.size // 16 - 1)] + + +def _read_julian(fid, tag, shape, rlims): + """Read julian tag.""" + return _julian_to_cal(int(np.frombuffer(fid.read(4), dtype=">i4"))) + + +# Read types call dict +_call_dict = { + FIFF.FIFFT_STRING: _read_string, + FIFF.FIFFT_COMPLEX_FLOAT: _read_complex_float, + FIFF.FIFFT_COMPLEX_DOUBLE: _read_complex_double, + FIFF.FIFFT_ID_STRUCT: _read_id_struct, + FIFF.FIFFT_DIG_POINT_STRUCT: _read_dig_point_struct, + FIFF.FIFFT_COORD_TRANS_STRUCT: _read_coord_trans_struct, + FIFF.FIFFT_CH_INFO_STRUCT: _read_ch_info_struct, + FIFF.FIFFT_OLD_PACK: _read_old_pack, + FIFF.FIFFT_DIR_ENTRY_STRUCT: _read_dir_entry_struct, + FIFF.FIFFT_JULIAN: _read_julian, +} +_call_dict_names = { + FIFF.FIFFT_STRING: 'str', + FIFF.FIFFT_COMPLEX_FLOAT: 'c8', + FIFF.FIFFT_COMPLEX_DOUBLE: 'c16', + FIFF.FIFFT_ID_STRUCT: 'ids', + FIFF.FIFFT_DIG_POINT_STRUCT: 'dps', + FIFF.FIFFT_COORD_TRANS_STRUCT: 'cts', + FIFF.FIFFT_CH_INFO_STRUCT: 'cis', + FIFF.FIFFT_OLD_PACK: 'op_', + FIFF.FIFFT_DIR_ENTRY_STRUCT: 'dir', + FIFF.FIFFT_JULIAN: 'jul', + FIFF.FIFFT_VOID: 'nul', # 0 +} + +# Append the simple types +_simple_dict = { + FIFF.FIFFT_BYTE: '>B', + FIFF.FIFFT_SHORT: '>i2', + FIFF.FIFFT_INT: '>i4', + FIFF.FIFFT_USHORT: '>u2', + FIFF.FIFFT_UINT: '>u4', + FIFF.FIFFT_FLOAT: '>f4', + FIFF.FIFFT_DOUBLE: '>f8', + FIFF.FIFFT_DAU_PACK16: '>i2', +} +for key, dtype in _simple_dict.items(): + _call_dict[key] = partial(_read_simple, dtype=dtype) + _call_dict_names[key] = dtype + + +def read_tag(fid, pos=None, shape=None, rlims=None): + """Read a Tag from a file at a given position. + + Parameters + ---------- + fid : file + The open FIF file descriptor. + pos : int + The position of the Tag in the file. + shape : tuple | None + If tuple, the shape of the stored matrix. Only to be used with + data stored as a vector (not implemented for matrices yet). + rlims : tuple | None + If tuple, the first (inclusive) and last (exclusive) rows to retrieve. + Note that data are assumed to be stored row-major in the file. Only to + be used with data stored as a vector (not implemented for matrices + yet). + + Returns + ------- + tag : Tag + The Tag read. + """ + if pos is not None: + fid.seek(pos, 0) + tag = _read_tag_header(fid) + if tag is None: + return tag + if tag.size > 0: + matrix_coding = _is_matrix & tag.type + if matrix_coding != 0: + tag.data = _read_matrix(fid, tag, shape, rlims, matrix_coding) + else: + # All other data types + try: + fun = _call_dict[tag.type] + except KeyError: + raise Exception('Unimplemented tag data type %s' % tag.type) + tag.data = fun(fid, tag, shape, rlims) + if tag.next != FIFF.FIFFV_NEXT_SEQ: + # f.seek(tag.next,0) + fid.seek(tag.next, 1) # XXX : fix? pb when tag.next < 0 + + return tag + + +def find_tag(fid, node, findkind): + """Find Tag in an open FIF file descriptor. + + Parameters + ---------- + fid : file-like + Open file. + node : dict + Node to search. + findkind : int + Tag kind to find. + + Returns + ------- + tag : instance of Tag + The first tag found. + """ + if node['directory'] is not None: + for subnode in node['directory']: + if subnode.kind == findkind: + return read_tag(fid, subnode.pos) + return None + + +def has_tag(node, kind): + """Check if the node contains a Tag of a given kind.""" + for d in node['directory']: + if d.kind == kind: + return True + return False + + +def _rename_list(bads, ch_names_mapping): + return [ch_names_mapping.get(bad, bad) for bad in bads] diff --git a/python/libs/mne/io/tests/__init__.py b/python/libs/mne/io/tests/__init__.py new file mode 100644 index 0000000..aba6507 --- /dev/null +++ b/python/libs/mne/io/tests/__init__.py @@ -0,0 +1,3 @@ +import os.path as op + +data_dir = op.join(op.dirname(__file__), 'data') diff --git a/python/libs/mne/io/tests/data/__init__.py b/python/libs/mne/io/tests/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/libs/mne/io/tests/test_apply_function.py b/python/libs/mne/io/tests/test_apply_function.py new file mode 100644 index 0000000..887ba6a --- /dev/null +++ b/python/libs/mne/io/tests/test_apply_function.py @@ -0,0 +1,65 @@ +# Authors: Eric Larson +# +# License: BSD-3-Clause + +import numpy as np +import pytest + +from mne import create_info +from mne.io import RawArray +from mne.utils import logger, catch_logging + + +def bad_1(x): + """Fail.""" + return # bad return type + + +def bad_2(x): + """Fail.""" + return x[:-1] # bad shape + + +def bad_3(x): + """Fail.""" + return x[0, :] + + +def printer(x): + """Print.""" + logger.info('exec') + return x + + +@pytest.mark.slowtest +def test_apply_function_verbose(): + """Test apply function verbosity.""" + n_chan = 2 + n_times = 3 + ch_names = [str(ii) for ii in range(n_chan)] + raw = RawArray(np.zeros((n_chan, n_times)), + create_info(ch_names, 1., 'mag')) + # test return types in both code paths (parallel / 1 job) + with pytest.raises(TypeError, match='Return value must be an ndarray'): + raw.apply_function(bad_1) + with pytest.raises(ValueError, match='Return data must have shape'): + raw.apply_function(bad_2) + with pytest.raises(TypeError, match='Return value must be an ndarray'): + raw.apply_function(bad_1, n_jobs=2) + with pytest.raises(ValueError, match='Return data must have shape'): + raw.apply_function(bad_2, n_jobs=2) + + # test return type when `channel_wise=False` + raw.apply_function(printer, channel_wise=False) + with pytest.raises(TypeError, match='Return value must be an ndarray'): + raw.apply_function(bad_1, channel_wise=False) + with pytest.raises(ValueError, match='Return data must have shape'): + raw.apply_function(bad_3, channel_wise=False) + + # check our arguments + with catch_logging() as sio: + out = raw.apply_function(printer, verbose=False) + assert len(sio.getvalue(close=False)) == 0 + assert out is raw + raw.apply_function(printer, verbose=True) + assert sio.getvalue().count('\n') == n_chan diff --git a/python/libs/mne/io/tests/test_compensator.py b/python/libs/mne/io/tests/test_compensator.py new file mode 100644 index 0000000..e5f4c02 --- /dev/null +++ b/python/libs/mne/io/tests/test_compensator.py @@ -0,0 +1,109 @@ +# Author: Alexandre Gramfort +# +# License: BSD-3-Clause + +import os.path as op +import numpy as np +from numpy.testing import assert_allclose +import pytest + +from mne import Epochs, read_evokeds, pick_types +from mne.io.compensator import make_compensator, get_current_comp +from mne.io import read_raw_fif +from mne.utils import requires_mne, run_subprocess + +base_dir = op.join(op.dirname(__file__), 'data') +ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif') + + +def test_compensation_identity(): + """Test compensation identity.""" + raw = read_raw_fif(ctf_comp_fname) + assert get_current_comp(raw.info) == 3 + comp1 = make_compensator(raw.info, 3, 1, exclude_comp_chs=False) + assert comp1.shape == (340, 340) + comp2 = make_compensator(raw.info, 3, 1, exclude_comp_chs=True) + assert comp2.shape == (311, 340) + + # round-trip + desired = np.eye(340) + for from_ in range(3): + for to in range(3): + if from_ == to: + continue + comp1 = make_compensator(raw.info, from_, to) + comp2 = make_compensator(raw.info, to, from_) + # To get 1e-12 here (instead of 1e-6) we must use the linalg.inv + # method mentioned in compensator.py + assert_allclose(np.dot(comp1, comp2), desired, atol=1e-12) + assert_allclose(np.dot(comp2, comp1), desired, atol=1e-12) + + +@pytest.mark.parametrize('preload', (True, False)) +@pytest.mark.parametrize('pick', (False, True)) +def test_compensation_apply(tmp_path, preload, pick): + """Test applying compensation.""" + # make sure that changing the comp doesn't modify the original data + raw = read_raw_fif(ctf_comp_fname, preload=preload) + assert raw._comp is None + raw2 = raw.copy() + raw2.apply_gradient_compensation(2) + if pick: + raw2.pick([0] + list(range(2, len(raw.ch_names)))) + raw.pick([0] + list(range(2, len(raw.ch_names)))) + assert get_current_comp(raw2.info) == 2 + if preload: + assert raw2._comp is None + else: + assert raw2._comp.shape == (len(raw2.ch_names),) * 2 + fname = op.join(tmp_path, 'ctf-raw.fif') + raw2.save(fname) + raw2 = read_raw_fif(fname) + assert raw2.compensation_grade == 2 + raw2.apply_gradient_compensation(3) + assert raw2.compensation_grade == 3 + data, _ = raw[:, :] + data2, _ = raw2[:, :] + # channels have norm ~1e-12 + assert_allclose(data, data2, rtol=1e-9, atol=1e-18) + for ch1, ch2 in zip(raw.info['chs'], raw2.info['chs']): + assert ch1['coil_type'] == ch2['coil_type'] + + +@requires_mne +def test_compensation_mne(tmp_path): + """Test comensation by comparing with MNE.""" + def make_evoked(fname, comp): + """Make evoked data.""" + raw = read_raw_fif(fname) + if comp is not None: + raw.apply_gradient_compensation(comp) + picks = pick_types(raw.info, meg=True, ref_meg=True) + events = np.array([[0, 0, 1]], dtype=np.int64) + evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks, + baseline=None).average() + return evoked + + def compensate_mne(fname, comp): + """Compensate using MNE-C.""" + tmp_fname = '%s-%d-ave.fif' % (fname[:-4], comp) + cmd = ['mne_compensate_data', '--in', fname, + '--out', tmp_fname, '--grad', str(comp)] + run_subprocess(cmd) + return read_evokeds(tmp_fname)[0] + + # save evoked response with default compensation + fname_default = op.join(tmp_path, 'ctf_default-ave.fif') + make_evoked(ctf_comp_fname, None).save(fname_default) + + for comp in [0, 1, 2, 3]: + evoked_py = make_evoked(ctf_comp_fname, comp) + evoked_c = compensate_mne(fname_default, comp) + picks_py = pick_types(evoked_py.info, meg=True, ref_meg=True) + picks_c = pick_types(evoked_c.info, meg=True, ref_meg=True) + assert_allclose(evoked_py.data[picks_py], evoked_c.data[picks_c], + rtol=1e-3, atol=1e-17) + chs_py = [evoked_py.info['chs'][ii] for ii in picks_py] + chs_c = [evoked_c.info['chs'][ii] for ii in picks_c] + for ch_py, ch_c in zip(chs_py, chs_c): + assert ch_py['coil_type'] == ch_c['coil_type'] diff --git a/python/libs/mne/io/tests/test_constants.py b/python/libs/mne/io/tests/test_constants.py new file mode 100644 index 0000000..b74c4ec --- /dev/null +++ b/python/libs/mne/io/tests/test_constants.py @@ -0,0 +1,348 @@ +# Author: Eric Larson +# +# License: BSD-3-Clause + +import os.path as op +import re +import shutil +import zipfile + +import numpy as np +import pooch +import pytest + +from mne.io.constants import (FIFF, FWD, _coord_frame_named, _ch_kind_named, + _ch_unit_named, _ch_unit_mul_named, + _ch_coil_type_named, _dig_kind_named, + _dig_cardinal_named) +from mne.forward._make_forward import _read_coil_defs +from mne.utils import requires_good_network + + +# https://github.com/mne-tools/fiff-constants/commits/master +REPO = 'mne-tools' +COMMIT = 'aa49e20cff5791fbaf01d77ad4ec2e0ecb69840d' + +# These are oddities that we won't address: +iod_dups = (355, 359) # these are in both MEGIN and MNE files +tag_dups = (3501,) # in both MEGIN and MNE files + +_dir_ignore_names = ('clear', 'copy', 'fromkeys', 'get', 'items', 'keys', + 'pop', 'popitem', 'setdefault', 'update', 'values', + 'has_key', 'iteritems', 'iterkeys', 'itervalues', # Py2 + 'viewitems', 'viewkeys', 'viewvalues', # Py2 + ) +_tag_ignore_names = ( +) # for fiff-constants pending updates +_ignore_incomplete_enums = ( # XXX eventually we could complete these + 'bem_surf_id', 'cardinal_point_cardiac', 'cond_model', 'coord', + 'dacq_system', 'diffusion_param', 'gantry_type', 'map_surf', + 'mne_lin_proj', 'mne_ori', 'mri_format', 'mri_pixel', 'proj_by', + 'tags', 'type', 'iod', 'volume_type', 'vol_type', +) +# not in coil_def.dat but in DictionaryTypes:enum(coil) +_missing_coil_def = ( + 0, # The location info contains no data + 1, # EEG electrode position in r0 + 3, # Old 24 channel system in HUT + 4, # The axial devices in the HUCS MCG system + 5, # Bipolar EEG electrode position + 6, # CSD-transformed EEG electrodes + 200, # Time-varying dipole definition + 300, # fNIRS oxyhemoglobin + 301, # fNIRS deoxyhemoglobin + 302, # fNIRS continuous wave + 303, # fNIRS optical density + 304, # fNIRS frequency domain AC amplitude + 305, # fNIRS frequency domain phase + 1000, # For testing the MCG software + 2001, # Generic axial gradiometer + 3011, # VV prototype wirewound planar sensor + 3014, # Vectorview SQ20950N planar gradiometer + 3021, # VV prototype wirewound magnetometer +) +# explicit aliases in constants.py +_aliases = dict( + FIFFV_COIL_MAGNES_R_MAG='FIFFV_COIL_MAGNES_REF_MAG', + FIFFV_COIL_MAGNES_R_GRAD='FIFFV_COIL_MAGNES_REF_GRAD', + FIFFV_COIL_MAGNES_R_GRAD_OFF='FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD', + FIFFV_COIL_FNIRS_RAW='FIFFV_COIL_FNIRS_CW_AMPLITUDE', + FIFFV_MNE_COORD_CTF_HEAD='FIFFV_MNE_COORD_4D_HEAD', + FIFFV_MNE_COORD_KIT_HEAD='FIFFV_MNE_COORD_4D_HEAD', + FIFFV_MNE_COORD_DIGITIZER='FIFFV_COORD_ISOTRAK', + FIFFV_MNE_COORD_SURFACE_RAS='FIFFV_COORD_MRI', + FIFFV_MNE_SENSOR_COV='FIFFV_MNE_NOISE_COV', + FIFFV_POINT_EEG='FIFFV_POINT_ECG', + FIFF_DESCRIPTION='FIFF_COMMENT', + FIFF_REF_PATH='FIFF_MRI_SOURCE_PATH', +) + + +@requires_good_network +def test_constants(tmp_path): + """Test compensation.""" + tmp_path = str(tmp_path) # old pytest... + fname = 'fiff.zip' + dest = op.join(tmp_path, fname) + pooch.retrieve( + url='https://codeload.github.com/' + f'{REPO}/fiff-constants/zip/{COMMIT}', + path=tmp_path, + fname=fname, + known_hash=None + ) + names = list() + with zipfile.ZipFile(dest, 'r') as ff: + for name in ff.namelist(): + if 'Dictionary' in name: + ff.extract(name, tmp_path) + names.append(op.basename(name)) + shutil.move(op.join(tmp_path, name), + op.join(tmp_path, names[-1])) + names = sorted(names) + assert names == ['DictionaryIOD.txt', 'DictionaryIOD_MNE.txt', + 'DictionaryStructures.txt', + 'DictionaryTags.txt', 'DictionaryTags_MNE.txt', + 'DictionaryTypes.txt', 'DictionaryTypes_MNE.txt'] + # IOD (MEGIN and MNE) + fif = dict(iod=dict(), tags=dict(), types=dict(), defines=dict()) + con = dict(iod=dict(), tags=dict(), types=dict(), defines=dict()) + fiff_version = None + for name in ['DictionaryIOD.txt', 'DictionaryIOD_MNE.txt']: + with open(op.join(tmp_path, name), 'rb') as fid: + for line in fid: + line = line.decode('latin1').strip() + if line.startswith('# Packing revision'): + assert fiff_version is None + fiff_version = line.split()[-1] + if (line.startswith('#') or line.startswith('alias') or + len(line) == 0): + continue + line = line.split('"') + assert len(line) in (1, 2, 3) + desc = '' if len(line) == 1 else line[1] + line = line[0].split() + assert len(line) in (2, 3) + if len(line) == 2: + kind, id_ = line + else: + kind, id_, tagged = line + assert tagged in ('tagged',) + id_ = int(id_) + if id_ not in iod_dups: + assert id_ not in fif['iod'] + fif['iod'][id_] = [kind, desc] + # Tags (MEGIN) + with open(op.join(tmp_path, 'DictionaryTags.txt'), 'rb') as fid: + for line in fid: + line = line.decode('ISO-8859-1').strip() + if (line.startswith('#') or line.startswith('alias') or + line.startswith(':') or len(line) == 0): + continue + line = line.split('"') + assert len(line) in (1, 2, 3), line + desc = '' if len(line) == 1 else line[1] + line = line[0].split() + assert len(line) == 4, line + kind, id_, dtype, unit = line + id_ = int(id_) + val = [kind, dtype, unit] + assert id_ not in fif['tags'], (fif['tags'].get(id_), val) + fif['tags'][id_] = val + # Tags (MNE) + with open(op.join(tmp_path, 'DictionaryTags_MNE.txt'), 'rb') as fid: + for li, line in enumerate(fid): + line = line.decode('ISO-8859-1').strip() + # ignore continuation lines (*) + if (line.startswith('#') or line.startswith('alias') or + line.startswith(':') or line.startswith('*') or + len(line) == 0): + continue + # weird syntax around line 80: + if line in ('/*', '"'): + continue + line = line.split('"') + assert len(line) in (1, 2, 3), line + if len(line) == 3 and len(line[2]) > 0: + l2 = line[2].strip() + assert l2.startswith('/*') and l2.endswith('*/'), l2 + desc = '' if len(line) == 1 else line[1] + line = line[0].split() + assert len(line) == 3, (li + 1, line) + kind, id_, dtype = line + unit = '-' + id_ = int(id_) + val = [kind, dtype, unit] + if id_ not in tag_dups: + assert id_ not in fif['tags'], (fif['tags'].get(id_), val) + fif['tags'][id_] = val + + # Types and enums + in_ = None + re_prim = re.compile(r'^primitive\((.*)\)\s*(\S*)\s*"(.*)"$') + re_enum = re.compile(r'^enum\((\S*)\)\s*".*"$') + re_enum_entry = re.compile(r'\s*(\S*)\s*(\S*)\s*"(.*)"$') + re_defi = re.compile(r'#define\s*(\S*)\s*(\S*)\s*"(.*)"$') + used_enums = list() + for extra in ('', '_MNE'): + with open(op.join(tmp_path, 'DictionaryTypes%s.txt' + % (extra,)), 'rb') as fid: + for li, line in enumerate(fid): + line = line.decode('ISO-8859-1').strip() + if in_ is None: + p = re_prim.match(line) + e = re_enum.match(line) + d = re_defi.match(line) + if p is not None: + t, s, d = p.groups() + s = int(s) + assert s not in fif['types'] + fif['types'][s] = [t, d] + elif e is not None: + # entering an enum + this_enum = e.group(1) + if this_enum not in fif: + used_enums.append(this_enum) + fif[this_enum] = dict() + con[this_enum] = dict() + in_ = fif[this_enum] + elif d is not None: + t, s, d = d.groups() + s = int(s) + fif['defines'][t] = [s, d] + else: + assert not line.startswith('enum(') + else: # in an enum + if line == '{': + continue + elif line == '}': + in_ = None + continue + t, s, d = re_enum_entry.match(line).groups() + s = int(s) + if t != 'ecg' and s != 3: # ecg defined the same way + assert s not in in_ + in_[s] = [t, d] + + # + # Assertions + # + + # Version + mne_version = '%d.%d' % (FIFF.FIFFC_MAJOR_VERSION, + FIFF.FIFFC_MINOR_VERSION) + assert fiff_version == mne_version + unknowns = list() + + # Assert that all our constants are in the FIF def + assert 'FIFFV_SSS_JOB_NOTHING' in dir(FIFF) + for name in sorted(dir(FIFF)): + if name.startswith('_') or name in _dir_ignore_names: + continue + check = None + val = getattr(FIFF, name) + if name in fif['defines']: + assert fif['defines'][name][0] == val + elif name.startswith('FIFFC_'): + # Checked above + assert name in ('FIFFC_MAJOR_VERSION', 'FIFFC_MINOR_VERSION', + 'FIFFC_VERSION') + elif name.startswith('FIFFB_'): + check = 'iod' + elif name.startswith('FIFFT_'): + check = 'types' + elif name.startswith('FIFFV_'): + if name.startswith('FIFFV_MNE_') and name.endswith('_ORI'): + check = 'mne_ori' + elif name.startswith('FIFFV_MNE_') and name.endswith('_COV'): + check = 'covariance_type' + elif name.startswith('FIFFV_MNE_COORD'): + check = 'coord' # weird wrapper + elif name.endswith('_CH') or '_QUAT_' in name or name in \ + ('FIFFV_DIPOLE_WAVE', 'FIFFV_GOODNESS_FIT', + 'FIFFV_HPI_ERR', 'FIFFV_HPI_G', 'FIFFV_HPI_MOV'): + check = 'ch_type' + elif name.startswith('FIFFV_SUBJ_'): + check = name.split('_')[2].lower() + elif name in ('FIFFV_POINT_LPA', 'FIFFV_POINT_NASION', + 'FIFFV_POINT_RPA', 'FIFFV_POINT_INION'): + check = 'cardinal_point' + else: + for check in used_enums: + if name.startswith('FIFFV_' + check.upper()): + break + else: + if name not in _tag_ignore_names: + raise RuntimeError('Could not find %s' % (name,)) + assert check in used_enums, name + if 'SSS' in check: + raise RuntimeError + elif name.startswith('FIFF_UNIT'): # units and multipliers + check = name.split('_')[1].lower() + elif name.startswith('FIFF_'): + check = 'tags' + else: + unknowns.append((name, val)) + if check is not None and name not in _tag_ignore_names: + assert val in fif[check], '%s: %s, %s' % (check, val, name) + if val in con[check]: + msg = "%s='%s' ?" % (name, con[check][val]) + assert _aliases.get(name) == con[check][val], msg + else: + con[check][val] = name + unknowns = '\n\t'.join('%s (%s)' % u for u in unknowns) + assert len(unknowns) == 0, 'Unknown types\n\t%s' % unknowns + + # Assert that all the FIF defs are in our constants + assert set(fif.keys()) == set(con.keys()) + for key in sorted(set(fif.keys()) - {'defines'}): + this_fif, this_con = fif[key], con[key] + assert len(set(this_fif.keys())) == len(this_fif) + assert len(set(this_con.keys())) == len(this_con) + missing_from_con = sorted(set(this_con.keys()) - set(this_fif.keys())) + assert missing_from_con == [], key + if key not in _ignore_incomplete_enums: + missing_from_fif = sorted(set(this_fif.keys()) - + set(this_con.keys())) + assert missing_from_fif == [], key + + # Assert that `coil_def.dat` has accurate descriptions of all enum(coil) + coil_def = _read_coil_defs() + coil_desc = np.array([c['desc'] for c in coil_def]) + coil_def = np.array([(c['coil_type'], c['accuracy']) + for c in coil_def], int) + mask = (coil_def[:, 1] == FWD.COIL_ACCURACY_ACCURATE) + coil_def = coil_def[mask, 0] + coil_desc = coil_desc[mask] + bad_list = [] + for key in fif['coil']: + if key not in _missing_coil_def and key not in coil_def: + bad_list.append((' %s,' % key).ljust(10) + + ' # ' + fif['coil'][key][1]) + assert len(bad_list) == 0, \ + '\nIn fiff-constants, missing from coil_def:\n' + '\n'.join(bad_list) + # Assert that enum(coil) has all `coil_def.dat` entries + for key, desc in zip(coil_def, coil_desc): + if key not in fif['coil']: + bad_list.append((' %s,' % key).ljust(10) + ' # ' + desc) + assert len(bad_list) == 0, \ + 'In coil_def, missing from fiff-constants:\n' + '\n'.join(bad_list) + + +@pytest.mark.parametrize('dict_, match, extras', [ + ({**_dig_kind_named, **_dig_cardinal_named}, 'FIFFV_POINT_', ()), + (_ch_kind_named, '^FIFFV_.*_CH$', + (FIFF.FIFFV_DIPOLE_WAVE, FIFF.FIFFV_GOODNESS_FIT)), + (_coord_frame_named, 'FIFFV_COORD_', ()), + (_ch_unit_named, 'FIFF_UNIT_', ()), + (_ch_unit_mul_named, 'FIFF_UNITM_', ()), + (_ch_coil_type_named, 'FIFFV_COIL_', ()), +]) +def test_dict_completion(dict_, match, extras): + """Test readable dict completions.""" + regex = re.compile(match) + got = set(FIFF[key] for key in FIFF if regex.search(key) is not None) + for e in extras: + got.add(e) + want = set(dict_) + assert got == want, match diff --git a/python/libs/mne/io/tests/test_meas_info.py b/python/libs/mne/io/tests/test_meas_info.py new file mode 100644 index 0000000..b14cb1a --- /dev/null +++ b/python/libs/mne/io/tests/test_meas_info.py @@ -0,0 +1,1089 @@ +# -*- coding: utf-8 -*- +# # Authors: MNE Developers +# Stefan Appelhoff +# +# License: BSD-3-Clause + +from datetime import datetime, timedelta, timezone, date +import hashlib +import os.path as op +import pickle + +import pytest +import numpy as np +from numpy.testing import assert_array_equal, assert_allclose +from scipy import sparse +import string + +from mne import (Epochs, read_events, pick_info, pick_types, Annotations, + read_evokeds, make_forward_solution, make_sphere_model, + setup_volume_source_space, write_forward_solution, + read_forward_solution, write_cov, read_cov, read_epochs, + compute_covariance) +from mne.channels import (read_polhemus_fastscan, make_standard_montage, + equalize_channels) +from mne.event import make_fixed_length_events +from mne.datasets import testing +from mne.io import (read_fiducials, write_fiducials, _coil_trans_to_loc, + _loc_to_coil_trans, read_raw_fif, read_info, write_info, + meas_info, Projection, BaseRaw, read_raw_ctf, RawArray) +from mne.io.constants import FIFF +from mne.io.write import _generate_meas_id, DATE_NONE +from mne.io.meas_info import (Info, create_info, _merge_info, + _force_update_info, RAW_INFO_FIELDS, + _bad_chans_comp, _get_valid_units, + anonymize_info, _stamp_to_dt, _dt_to_stamp, + _add_timedelta_to_stamp, _read_extended_ch_info) +from mne.minimum_norm import (make_inverse_operator, write_inverse_operator, + read_inverse_operator, apply_inverse) +from mne.io._digitization import _write_dig_points, _make_dig_points, DigPoint +from mne.transforms import Transform +from mne.utils import catch_logging, assert_object_equal, _record_warnings + +fiducials_fname = op.join(op.dirname(__file__), '..', '..', 'data', + 'fsaverage', 'fsaverage-fiducials.fif') +base_dir = op.join(op.dirname(__file__), 'data') +raw_fname = op.join(base_dir, 'test_raw.fif') +chpi_fname = op.join(base_dir, 'test_chpi_raw_sss.fif') +event_name = op.join(base_dir, 'test-eve.fif') + +kit_data_dir = op.join(op.dirname(__file__), '..', 'kit', 'tests', 'data') +hsp_fname = op.join(kit_data_dir, 'test_hsp.txt') +elp_fname = op.join(kit_data_dir, 'test_elp.txt') + +data_path = testing.data_path(download=False) +sss_path = op.join(data_path, 'SSS') +pre = op.join(sss_path, 'test_move_anon_') +sss_ctc_fname = pre + 'crossTalk_raw_sss.fif' +ctf_fname = op.join(data_path, 'CTF', 'testdata_ctf.ds') +raw_invalid_bday_fname = op.join(data_path, 'misc', + 'sample_invalid_birthday_raw.fif') + + +@pytest.mark.parametrize('kwargs, want', [ + (dict(meg=False, eeg=True), [0]), + (dict(meg=False, fnirs=True), [5]), + (dict(meg=False, fnirs='hbo'), [5]), + (dict(meg=False, fnirs='hbr'), []), + (dict(meg=False, misc=True), [1]), + (dict(meg=True), [2, 3, 4]), + (dict(meg='grad'), [2, 3]), + (dict(meg='planar1'), [2]), + (dict(meg='planar2'), [3]), + (dict(meg='mag'), [4]), +]) +def test_create_info_grad(kwargs, want): + """Test create_info behavior with grad coils.""" + info = create_info(6, 256, ["eeg", "misc", "grad", "grad", "mag", "hbo"]) + # Put these in an order such that grads get named "2" and "3", since + # they get picked based first on coil_type then ch_name... + assert [ch['ch_name'] for ch in info['chs'] + if ch['coil_type'] == FIFF.FIFFV_COIL_VV_PLANAR_T1] == ['2', '3'] + picks = pick_types(info, **kwargs) + assert_array_equal(picks, want) + + +def test_get_valid_units(): + """Test the valid units.""" + valid_units = _get_valid_units() + assert isinstance(valid_units, tuple) + assert all(isinstance(unit, str) for unit in valid_units) + assert "n/a" in valid_units + + +def test_coil_trans(): + """Test loc<->coil_trans functions.""" + rng = np.random.RandomState(0) + x = rng.randn(4, 4) + x[3] = [0, 0, 0, 1] + assert_allclose(_loc_to_coil_trans(_coil_trans_to_loc(x)), x) + x = rng.randn(12) + assert_allclose(_coil_trans_to_loc(_loc_to_coil_trans(x)), x) + + +def test_make_info(): + """Test some create_info properties.""" + n_ch = np.longlong(1) + info = create_info(n_ch, 1000., 'eeg') + assert set(info.keys()) == set(RAW_INFO_FIELDS) + + coil_types = {ch['coil_type'] for ch in info['chs']} + assert FIFF.FIFFV_COIL_EEG in coil_types + + pytest.raises(TypeError, create_info, ch_names='Test Ch', sfreq=1000) + pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=-1000) + pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=1000, + ch_types=['eeg', 'eeg']) + pytest.raises(TypeError, create_info, ch_names=[np.array([1])], + sfreq=1000) + pytest.raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000, + ch_types=np.array([1])) + pytest.raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000, + ch_types='awesome') + pytest.raises(TypeError, create_info, ['Test Ch'], sfreq=1000, + montage=np.array([1])) + m = make_standard_montage('biosemi32') + info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg') + info.set_montage(m) + ch_pos = [ch['loc'][:3] for ch in info['chs']] + ch_pos_mon = m._get_ch_pos() + ch_pos_mon = np.array( + [ch_pos_mon[ch_name] for ch_name in info['ch_names']]) + # transform to head + ch_pos_mon += (0., 0., 0.04014) + assert_allclose(ch_pos, ch_pos_mon, atol=1e-5) + + +def test_duplicate_name_correction(): + """Test duplicate channel names with running number.""" + # When running number is possible + info = create_info(['A', 'A', 'A'], 1000., verbose='error') + assert info['ch_names'] == ['A-0', 'A-1', 'A-2'] + + # When running number is not possible but alpha numeric is + info = create_info(['A', 'A', 'A-0'], 1000., verbose='error') + assert info['ch_names'] == ['A-a', 'A-1', 'A-0'] + + # When a single addition is not sufficient + with pytest.raises(ValueError, match='Adding a single alphanumeric'): + ch_n = ['A', 'A'] + # add all options for first duplicate channel (0) + ch_n.extend([f'{ch_n[0]}-{c}' for c in string.ascii_lowercase + '0']) + create_info(ch_n, 1000., verbose='error') + + +def test_fiducials_io(tmp_path): + """Test fiducials i/o.""" + pts, coord_frame = read_fiducials(fiducials_fname) + assert pts[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI + assert pts[0]['ident'] == FIFF.FIFFV_POINT_CARDINAL + + temp_fname = tmp_path / 'test.fif' + write_fiducials(temp_fname, pts, coord_frame) + pts_1, coord_frame_1 = read_fiducials(temp_fname) + assert coord_frame == coord_frame_1 + for pt, pt_1 in zip(pts, pts_1): + assert pt['kind'] == pt_1['kind'] + assert pt['ident'] == pt_1['ident'] + assert pt['coord_frame'] == pt_1['coord_frame'] + assert_array_equal(pt['r'], pt_1['r']) + assert isinstance(pt, DigPoint) + assert isinstance(pt_1, DigPoint) + + # test safeguards + pts[0]['coord_frame'] += 1 + with pytest.raises(ValueError, match='coord_frame entries that are incom'): + write_fiducials(temp_fname, pts, coord_frame, overwrite=True) + + +def test_info(): + """Test info object.""" + raw = read_raw_fif(raw_fname) + event_id, tmin, tmax = 1, -0.2, 0.5 + events = read_events(event_name) + event_id = int(events[0, 2]) + epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None) + + evoked = epochs.average() + + # Test subclassing was successful. + info = Info(a=7, b='aaaaa') + assert ('a' in info) + assert ('b' in info) + + # Test info attribute in API objects + for obj in [raw, epochs, evoked]: + assert (isinstance(obj.info, Info)) + rep = repr(obj.info) + assert '2002-12-03 19:01:10 UTC' in rep, rep + assert '146 items (3 Cardinal, 4 HPI, 61 EEG, 78 Extra)' in rep + dig_rep = repr(obj.info['dig'][0]) + assert 'LPA' in dig_rep, dig_rep + assert '(-71.4, 0.0, 0.0) mm' in dig_rep, dig_rep + assert 'head frame' in dig_rep, dig_rep + # Test our BunchConstNamed support + for func in (str, repr): + assert '4 (FIFFV_COORD_HEAD)' == \ + func(obj.info['dig'][0]['coord_frame']) + + # Test read-only fields + info = raw.info.copy() + nchan = len(info['chs']) + ch_names = [ch['ch_name'] for ch in info['chs']] + assert info['nchan'] == nchan + assert list(info['ch_names']) == ch_names + + # Deleting of regular fields should work + info['experimenter'] = 'bar' + del info['experimenter'] + + # Test updating of fields + del info['chs'][-1] + info._update_redundant() + assert info['nchan'] == nchan - 1 + assert list(info['ch_names']) == ch_names[:-1] + + info['chs'][0]['ch_name'] = 'foo' + info._update_redundant() + assert info['ch_names'][0] == 'foo' + + # Test casting to and from a dict + info_dict = dict(info) + info2 = Info(info_dict) + assert info == info2 + + +def test_read_write_info(tmp_path): + """Test IO of info.""" + info = read_info(raw_fname) + temp_file = tmp_path / 'info.fif' + # check for bug `#1198` + info['dev_head_t']['trans'] = np.eye(4) + t1 = info['dev_head_t']['trans'] + write_info(temp_file, info) + info2 = read_info(temp_file) + t2 = info2['dev_head_t']['trans'] + assert (len(info['chs']) == len(info2['chs'])) + assert_array_equal(t1, t2) + # proc_history (e.g., GH#1875) + creator = u'é' + info = read_info(chpi_fname) + info['proc_history'][0]['creator'] = creator + info['hpi_meas'][0]['creator'] = creator + info['subject_info']['his_id'] = creator + info['subject_info']['weight'] = 11.1 + info['subject_info']['height'] = 2.3 + + with info._unlock(): + if info['gantry_angle'] is None: # future testing data may include it + info['gantry_angle'] = 0. # Elekta supine position + gantry_angle = info['gantry_angle'] + + meas_id = info['meas_id'] + write_info(temp_file, info) + info = read_info(temp_file) + assert info['proc_history'][0]['creator'] == creator + assert info['hpi_meas'][0]['creator'] == creator + assert info['subject_info']['his_id'] == creator + assert info['gantry_angle'] == gantry_angle + assert info['subject_info']['height'] == 2.3 + assert info['subject_info']['weight'] == 11.1 + for key in ['secs', 'usecs', 'version']: + assert info['meas_id'][key] == meas_id[key] + assert_array_equal(info['meas_id']['machid'], meas_id['machid']) + + # Test that writing twice produces the same file + m1 = hashlib.md5() + with open(temp_file, 'rb') as fid: + m1.update(fid.read()) + m1 = m1.hexdigest() + temp_file_2 = tmp_path / 'info2.fif' + assert temp_file_2 != temp_file + write_info(temp_file_2, info) + m2 = hashlib.md5() + with open(str(temp_file_2), 'rb') as fid: + m2.update(fid.read()) + m2 = m2.hexdigest() + assert m1 == m2 + + info = read_info(raw_fname) + with info._unlock(): + info['meas_date'] = None + anonymize_info(info, verbose='error') + assert info['meas_date'] is None + tmp_fname_3 = tmp_path / 'info3.fif' + write_info(tmp_fname_3, info) + assert info['meas_date'] is None + info2 = read_info(tmp_fname_3) + assert info2['meas_date'] is None + + # Check that having a very old date in fine until you try to save it to fif + with info._unlock(check_after=True): + info['meas_date'] = datetime(1800, 1, 1, 0, 0, 0, tzinfo=timezone.utc) + fname = tmp_path / 'test.fif' + with pytest.raises(RuntimeError, match='must be between '): + write_info(fname, info) + + +def test_io_dig_points(tmp_path): + """Test Writing for dig files.""" + points = read_polhemus_fastscan(hsp_fname, on_header_missing='ignore') + + dest = tmp_path / 'test.txt' + dest_bad = tmp_path / 'test.mne' + with pytest.raises(ValueError, match='must be of shape'): + _write_dig_points(dest, points[:, :2]) + with pytest.raises(ValueError, match='extension'): + _write_dig_points(dest_bad, points) + _write_dig_points(dest, points) + points1 = read_polhemus_fastscan( + dest, unit='m', on_header_missing='ignore') + err = "Dig points diverged after writing and reading." + assert_array_equal(points, points1, err) + + points2 = np.array([[-106.93, 99.80], [99.80, 68.81]]) + np.savetxt(dest, points2, delimiter='\t', newline='\n') + with pytest.raises(ValueError, match='must be of shape'): + with pytest.warns(RuntimeWarning, match='FastSCAN header'): + read_polhemus_fastscan(dest, on_header_missing='warn') + + +def test_io_coord_frame(tmp_path): + """Test round trip for coordinate frame.""" + fname = tmp_path / 'test.fif' + for ch_type in ('eeg', 'seeg', 'ecog', 'dbs', 'hbo', 'hbr'): + info = create_info( + ch_names=['Test Ch'], sfreq=1000., ch_types=[ch_type]) + info['chs'][0]['loc'][:3] = [0.05, 0.01, -0.03] + write_info(fname, info) + info2 = read_info(fname) + assert info2['chs'][0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD + + +def test_make_dig_points(): + """Test application of Polhemus HSP to info.""" + extra_points = read_polhemus_fastscan( + hsp_fname, on_header_missing='ignore') + info = create_info(ch_names=['Test Ch'], sfreq=1000.) + assert info['dig'] is None + + with info._unlock(): + info['dig'] = _make_dig_points(extra_points=extra_points) + assert (info['dig']) + assert_allclose(info['dig'][0]['r'], [-.10693, .09980, .06881]) + + elp_points = read_polhemus_fastscan(elp_fname, on_header_missing='ignore') + nasion, lpa, rpa = elp_points[:3] + info = create_info(ch_names=['Test Ch'], sfreq=1000.) + assert info['dig'] is None + + with info._unlock(): + info['dig'] = _make_dig_points(nasion, lpa, rpa, elp_points[3:], None) + assert (info['dig']) + idx = [d['ident'] for d in info['dig']].index(FIFF.FIFFV_POINT_NASION) + assert_allclose(info['dig'][idx]['r'], [.0013930, .0131613, -.0046967]) + pytest.raises(ValueError, _make_dig_points, nasion[:2]) + pytest.raises(ValueError, _make_dig_points, None, lpa[:2]) + pytest.raises(ValueError, _make_dig_points, None, None, rpa[:2]) + pytest.raises(ValueError, _make_dig_points, None, None, None, + elp_points[:, :2]) + pytest.raises(ValueError, _make_dig_points, None, None, None, None, + elp_points[:, :2]) + + +def test_redundant(): + """Test some of the redundant properties of info.""" + # Indexing + info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.) + assert info['ch_names'][0] == 'a' + assert info['ch_names'][1] == 'b' + assert info['ch_names'][2] == 'c' + + # Equality + assert info['ch_names'] == info['ch_names'] + assert info['ch_names'] == ['a', 'b', 'c'] + + # No channels in info + info = create_info(ch_names=[], sfreq=1000.) + assert info['ch_names'] == [] + + # List should be read-only + info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.) + + +def test_merge_info(): + """Test merging of multiple Info objects.""" + info_a = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.) + info_b = create_info(ch_names=['d', 'e', 'f'], sfreq=1000.) + info_merged = _merge_info([info_a, info_b]) + assert info_merged['nchan'], 6 + assert info_merged['ch_names'], ['a', 'b', 'c', 'd', 'e', 'f'] + pytest.raises(ValueError, _merge_info, [info_a, info_a]) + + # Testing for force updates before merging + info_c = create_info(ch_names=['g', 'h', 'i'], sfreq=500.) + # This will break because sfreq is not equal + pytest.raises(RuntimeError, _merge_info, [info_a, info_c]) + _force_update_info(info_a, info_c) + assert (info_c['sfreq'] == info_a['sfreq']) + assert (info_c['ch_names'][0] != info_a['ch_names'][0]) + # Make sure it works now + _merge_info([info_a, info_c]) + # Check that you must supply Info + pytest.raises(ValueError, _force_update_info, info_a, + dict([('sfreq', 1000.)])) + # KIT System-ID + info_a._unlocked = info_b._unlocked = True + info_a['kit_system_id'] = 50 + assert _merge_info((info_a, info_b))['kit_system_id'] == 50 + info_b['kit_system_id'] = 50 + assert _merge_info((info_a, info_b))['kit_system_id'] == 50 + info_b['kit_system_id'] = 60 + pytest.raises(ValueError, _merge_info, (info_a, info_b)) + + # hpi infos + info_d = create_info(ch_names=['d', 'e', 'f'], sfreq=1000.) + info_merged = _merge_info([info_a, info_d]) + assert not info_merged['hpi_meas'] + assert not info_merged['hpi_results'] + info_a['hpi_meas'] = [{'f1': 3, 'f2': 4}] + assert _merge_info([info_a, info_d])['hpi_meas'] == info_a['hpi_meas'] + info_d._unlocked = True + info_d['hpi_meas'] = [{'f1': 3, 'f2': 4}] + assert _merge_info([info_a, info_d])['hpi_meas'] == info_d['hpi_meas'] + # This will break because of inconsistency + info_d['hpi_meas'] = [{'f1': 3, 'f2': 5}] + pytest.raises(ValueError, _merge_info, [info_a, info_d]) + + info_0 = read_info(raw_fname) + info_0['bads'] = ['MEG 2443', 'EEG 053'] + assert len(info_0['chs']) == 376 + assert len(info_0['dig']) == 146 + info_1 = create_info(["STI YYY"], info_0['sfreq'], ['stim']) + assert info_1['bads'] == [] + info_out = _merge_info([info_0, info_1], force_update_to_first=True) + assert len(info_out['chs']) == 377 + assert len(info_out['bads']) == 2 + assert len(info_out['dig']) == 146 + assert len(info_0['chs']) == 376 + assert len(info_0['bads']) == 2 + assert len(info_0['dig']) == 146 + + +def test_check_consistency(): + """Test consistency check of Info objects.""" + info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.) + + # This should pass + info._check_consistency() + + # Info without any channels + info_empty = create_info(ch_names=[], sfreq=1000.) + info_empty._check_consistency() + + # Bad channels that are not in the info object + info2 = info.copy() + info2['bads'] = ['b', 'foo', 'bar'] + pytest.raises(RuntimeError, info2._check_consistency) + + # Bad data types + info2 = info.copy() + with info2._unlock(): + info2['sfreq'] = 'foo' + pytest.raises(ValueError, info2._check_consistency) + + info2 = info.copy() + with info2._unlock(): + info2['highpass'] = 'foo' + pytest.raises(ValueError, info2._check_consistency) + + info2 = info.copy() + with info2._unlock(): + info2['lowpass'] = 'foo' + pytest.raises(ValueError, info2._check_consistency) + + # Silent type conversion to float + info2 = info.copy() + with info2._unlock(check_after=True): + info2['sfreq'] = 1 + info2['highpass'] = 2 + info2['lowpass'] = 2 + assert (isinstance(info2['sfreq'], float)) + assert (isinstance(info2['highpass'], float)) + assert (isinstance(info2['lowpass'], float)) + + # Duplicate channel names + info2 = info.copy() + with info2._unlock(): + info2['chs'][2]['ch_name'] = 'b' + pytest.raises(RuntimeError, info2._check_consistency) + + # Duplicates appended with running numbers + with pytest.warns(RuntimeWarning, match='Channel names are not'): + info3 = create_info(ch_names=['a', 'b', 'b', 'c', 'b'], sfreq=1000.) + assert_array_equal(info3['ch_names'], ['a', 'b-0', 'b-1', 'c', 'b-2']) + + # a few bad ones + idx = 0 + ch = info['chs'][idx] + for key, bad, match in (('ch_name', 1., 'not a string'), + ('loc', np.zeros(15), '12 elements'), + ('cal', np.ones(1), 'float or int')): + info._check_consistency() # okay + old = ch[key] + ch[key] = bad + if key == 'ch_name': + info['ch_names'][idx] = bad + with pytest.raises(TypeError, match=match): + info._check_consistency() + ch[key] = old + if key == 'ch_name': + info['ch_names'][idx] = old + + # bad channel entries + info2 = info.copy() + info2['chs'][0]['foo'] = 'bar' + with pytest.raises(KeyError, match='key errantly present'): + info2._check_consistency() + info2 = info.copy() + del info2['chs'][0]['loc'] + with pytest.raises(KeyError, match='key missing'): + info2._check_consistency() + + +def _test_anonymize_info(base_info): + """Test that sensitive information can be anonymized.""" + pytest.raises(TypeError, anonymize_info, 'foo') + + default_anon_dos = datetime(2000, 1, 1, 0, 0, 0, tzinfo=timezone.utc) + default_str = "mne_anonymize" + default_subject_id = 0 + default_desc = ("Anonymized using a time shift" + + " to preserve age at acquisition") + + # Test no error for incomplete info + info = base_info.copy() + info.pop('file_id') + anonymize_info(info) + + # Fake some subject data + meas_date = datetime(2010, 1, 1, 0, 0, 0, tzinfo=timezone.utc) + with base_info._unlock(): + base_info['meas_date'] = meas_date + base_info['subject_info'] = dict(id=1, + his_id='foobar', + last_name='bar', + first_name='bar', + birthday=(1987, 4, 8), + sex=0, hand=1) + + # generate expected info... + # first expected result with no options. + # will move DOS from 2010/1/1 to 2000/1/1 which is 3653 days. + exp_info = base_info.copy() + exp_info._unlocked = True + exp_info['description'] = default_desc + exp_info['experimenter'] = default_str + exp_info['proj_name'] = default_str + exp_info['proj_id'] = np.array([0]) + exp_info['subject_info']['first_name'] = default_str + exp_info['subject_info']['last_name'] = default_str + exp_info['subject_info']['id'] = default_subject_id + exp_info['subject_info']['his_id'] = str(default_subject_id) + exp_info['subject_info']['sex'] = 0 + del exp_info['subject_info']['hand'] # there's no "unknown" setting + + # this bday is 3653 days different. the change in day is due to a + # different number of leap days between 1987 and 1977 than between + # 2010 and 2000. + exp_info['subject_info']['birthday'] = (1977, 4, 7) + exp_info['meas_date'] = default_anon_dos + exp_info._unlocked = False + + # make copies + exp_info_3 = exp_info.copy() + + # adjust each expected outcome + delta_t = timedelta(days=3653) + for key in ('file_id', 'meas_id'): + value = exp_info.get(key) + if value is not None: + assert 'msecs' not in value + tmp = _add_timedelta_to_stamp( + (value['secs'], value['usecs']), -delta_t) + value['secs'] = tmp[0] + value['usecs'] = tmp[1] + value['machid'][:] = 0 + + # exp 2 tests the keep_his option + exp_info_2 = exp_info.copy() + with exp_info_2._unlock(): + exp_info_2['subject_info']['his_id'] = 'foobar' + exp_info_2['subject_info']['sex'] = 0 + exp_info_2['subject_info']['hand'] = 1 + + # exp 3 tests is a supplied daysback + delta_t_2 = timedelta(days=43) + with exp_info_3._unlock(): + exp_info_3['subject_info']['birthday'] = (1987, 2, 24) + exp_info_3['meas_date'] = meas_date - delta_t_2 + for key in ('file_id', 'meas_id'): + value = exp_info_3.get(key) + if value is not None: + assert 'msecs' not in value + tmp = _add_timedelta_to_stamp( + (value['secs'], value['usecs']), -delta_t_2) + value['secs'] = tmp[0] + value['usecs'] = tmp[1] + value['machid'][:] = 0 + + # exp 4 tests is a supplied daysback + delta_t_3 = timedelta(days=223 + 364 * 500) + + new_info = anonymize_info(base_info.copy()) + assert_object_equal(new_info, exp_info) + + new_info = anonymize_info(base_info.copy(), keep_his=True) + assert_object_equal(new_info, exp_info_2) + + new_info = anonymize_info(base_info.copy(), daysback=delta_t_2.days) + assert_object_equal(new_info, exp_info_3) + + with pytest.raises(RuntimeError, match='anonymize_info generated'): + anonymize_info(base_info.copy(), daysback=delta_t_3.days) + # assert_object_equal(new_info, exp_info_4) + + # test with meas_date = None + with base_info._unlock(): + base_info['meas_date'] = None + exp_info_3._unlocked = True + exp_info_3['meas_date'] = None + exp_info_3['file_id']['secs'] = DATE_NONE[0] + exp_info_3['file_id']['usecs'] = DATE_NONE[1] + exp_info_3['meas_id']['secs'] = DATE_NONE[0] + exp_info_3['meas_id']['usecs'] = DATE_NONE[1] + exp_info_3['subject_info'].pop('birthday', None) + exp_info_3._unlocked = False + + if base_info['meas_date'] is None: + with pytest.warns(RuntimeWarning, match='all information'): + new_info = anonymize_info(base_info.copy(), + daysback=delta_t_2.days) + else: + new_info = anonymize_info(base_info.copy(), daysback=delta_t_2.days) + assert_object_equal(new_info, exp_info_3) + + with _record_warnings(): # meas_date is None + new_info = anonymize_info(base_info.copy()) + assert_object_equal(new_info, exp_info_3) + + +@pytest.mark.parametrize('stamp, dt', [ + [(1346981585, 835782), (2012, 9, 7, 1, 33, 5, 835782)], + # test old dates for BIDS anonymization + [(-1533443343, 24382), (1921, 5, 29, 19, 30, 57, 24382)], + # gh-7116 + [(-908196946, 988669), (1941, 3, 22, 11, 4, 14, 988669)], +]) +def test_meas_date_convert(stamp, dt): + """Test conversions of meas_date to datetime objects.""" + meas_datetime = _stamp_to_dt(stamp) + stamp2 = _dt_to_stamp(meas_datetime) + assert stamp == stamp2 + assert meas_datetime == datetime(*dt, tzinfo=timezone.utc) + # smoke test for info __repr__ + info = create_info(1, 1000., 'eeg') + with info._unlock(): + info['meas_date'] = meas_datetime + assert str(dt[0]) in repr(info) + + +def test_anonymize(tmp_path): + """Test that sensitive information can be anonymized.""" + pytest.raises(TypeError, anonymize_info, 'foo') + + # Fake some subject data + raw = read_raw_fif(raw_fname) + raw.set_annotations(Annotations(onset=[0, 1], + duration=[1, 1], + description='dummy', + orig_time=None)) + first_samp = raw.first_samp + expected_onset = np.arange(2) + raw._first_time + assert raw.first_samp == first_samp + assert_allclose(raw.annotations.onset, expected_onset) + + # test mne.anonymize_info() + events = read_events(event_name) + epochs = Epochs(raw, events[:1], 2, 0., 0.1, baseline=None) + _test_anonymize_info(raw.info.copy()) + _test_anonymize_info(epochs.info.copy()) + + # test instance methods & I/O roundtrip + for inst, keep_his in zip((raw, epochs), (True, False)): + inst = inst.copy() + + subject_info = dict(his_id='Volunteer', sex=2, hand=1) + inst.info['subject_info'] = subject_info + inst.anonymize(keep_his=keep_his) + + si = inst.info['subject_info'] + if keep_his: + assert si == subject_info + else: + assert si['his_id'] == '0' + assert si['sex'] == 0 + assert 'hand' not in si + + # write to disk & read back + inst_type = 'raw' if isinstance(inst, BaseRaw) else 'epo' + fname = 'tmp_raw.fif' if inst_type == 'raw' else 'tmp_epo.fif' + out_path = tmp_path / fname + inst.save(out_path, overwrite=True) + if inst_type == 'raw': + read_raw_fif(out_path) + else: + read_epochs(out_path) + + # test that annotations are correctly zeroed + raw.anonymize() + assert raw.first_samp == first_samp + assert_allclose(raw.annotations.onset, expected_onset) + assert raw.annotations.orig_time == raw.info['meas_date'] + stamp = _dt_to_stamp(raw.info['meas_date']) + assert raw.annotations.orig_time == _stamp_to_dt(stamp) + + with raw.info._unlock(): + raw.info['meas_date'] = None + raw.anonymize(daysback=None) + with pytest.warns(RuntimeWarning, match='None'): + raw.anonymize(daysback=123) + assert raw.annotations.orig_time is None + assert raw.first_samp == first_samp + assert_allclose(raw.annotations.onset, expected_onset) + + +def test_anonymize_with_io(tmp_path): + """Test that IO does not break anonymization.""" + raw = read_raw_fif(raw_fname) + + temp_path = tmp_path / 'tmp_raw.fif' + raw.save(temp_path) + + raw2 = read_raw_fif(temp_path) + + daysback = (raw2.info['meas_date'].date() - date(1924, 1, 1)).days + raw2.anonymize(daysback=daysback) + + +@testing.requires_testing_data +def test_csr_csc(tmp_path): + """Test CSR and CSC.""" + info = read_info(sss_ctc_fname) + info = pick_info(info, pick_types(info, meg=True, exclude=[])) + sss_ctc = info['proc_history'][0]['max_info']['sss_ctc'] + ct = sss_ctc['decoupler'].copy() + # CSC + assert isinstance(ct, sparse.csc_matrix) + fname = tmp_path / 'test.fif' + write_info(fname, info) + info_read = read_info(fname) + ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler'] + assert isinstance(ct_read, sparse.csc_matrix) + assert_array_equal(ct_read.toarray(), ct.toarray()) + # Now CSR + csr = ct.tocsr() + assert isinstance(csr, sparse.csr_matrix) + assert_array_equal(csr.toarray(), ct.toarray()) + info['proc_history'][0]['max_info']['sss_ctc']['decoupler'] = csr + fname = tmp_path / 'test1.fif' + write_info(fname, info) + info_read = read_info(fname) + ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler'] + assert isinstance(ct_read, sparse.csc_matrix) # this gets cast to CSC + assert_array_equal(ct_read.toarray(), ct.toarray()) + + +@testing.requires_testing_data +def test_check_compensation_consistency(): + """Test check picks compensation.""" + raw = read_raw_ctf(ctf_fname, preload=False) + events = make_fixed_length_events(raw, 99999) + picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=True) + pick_ch_names = [raw.info['ch_names'][idx] for idx in picks] + for (comp, expected_result) in zip([0, 1], [False, False]): + raw.apply_gradient_compensation(comp) + ret, missing = _bad_chans_comp(raw.info, pick_ch_names) + assert ret == expected_result + assert len(missing) == 0 + Epochs(raw, events, None, -0.2, 0.2, preload=False, picks=picks) + + picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=False) + pick_ch_names = [raw.info['ch_names'][idx] for idx in picks] + + for (comp, expected_result) in zip([0, 1], [False, True]): + raw.apply_gradient_compensation(comp) + ret, missing = _bad_chans_comp(raw.info, pick_ch_names) + assert ret == expected_result + assert len(missing) == 17 + with catch_logging() as log: + Epochs(raw, events, None, -0.2, 0.2, preload=False, + picks=picks, verbose=True) + assert'Removing 5 compensators' in log.getvalue() + + +def test_field_round_trip(tmp_path): + """Test round-trip for new fields.""" + info = create_info(1, 1000., 'eeg') + with info._unlock(): + for key in ('file_id', 'meas_id'): + info[key] = _generate_meas_id() + info['device_info'] = dict( + type='a', model='b', serial='c', site='d') + info['helium_info'] = dict( + he_level_raw=1., helium_level=2., + orig_file_guid='e', meas_date=(1, 2)) + fname = tmp_path / 'temp-info.fif' + write_info(fname, info) + info_read = read_info(fname) + assert_object_equal(info, info_read) + + +def test_equalize_channels(): + """Test equalization of channels for instances of Info.""" + info1 = create_info(['CH1', 'CH2', 'CH3'], sfreq=1.) + info2 = create_info(['CH4', 'CH2', 'CH1'], sfreq=1.) + info1, info2 = equalize_channels([info1, info2]) + + assert info1.ch_names == ['CH1', 'CH2'] + assert info2.ch_names == ['CH1', 'CH2'] + + +def test_repr(): + """Test Info repr.""" + info = create_info(1, 1000, 'eeg') + assert '7 non-empty values' in repr(info) + + t = Transform('meg', 'head', np.ones((4, 4))) + info['dev_head_t'] = t + assert 'dev_head_t: MEG device -> head transform' in repr(info) + + +def test_repr_html(): + """Test Info HTML repr.""" + info = read_info(raw_fname) + assert 'Projections' in info._repr_html_() + with info._unlock(): + info['projs'] = [] + assert 'Projections' not in info._repr_html_() + info['bads'] = [] + assert 'None' in info._repr_html_() + info['bads'] = ['MEG 2443', 'EEG 053'] + assert 'MEG 2443' in info._repr_html_() + assert 'EEG 053' in info._repr_html_() + + html = info._repr_html_() + for ch in ['204 Gradiometers', '102 Magnetometers', '9 Stimulus', + '60 EEG', '1 EOG']: + assert ch in html + + +@testing.requires_testing_data +def test_invalid_subject_birthday(): + """Test handling of an invalid birthday in the raw file.""" + with pytest.warns(RuntimeWarning, match='No birthday will be set'): + raw = read_raw_fif(raw_invalid_bday_fname) + assert 'birthday' not in raw.info['subject_info'] + + +@pytest.mark.parametrize('fname', [ + pytest.param(ctf_fname, marks=testing._pytest_mark()), + raw_fname, +]) +def test_channel_name_limit(tmp_path, monkeypatch, fname): + """Test that our remapping works properly.""" + # + # raw + # + if fname.endswith('fif'): + raw = read_raw_fif(fname) + raw.pick_channels(raw.ch_names[:3]) + ref_names = [] + data_names = raw.ch_names + else: + assert fname.endswith('.ds') + raw = read_raw_ctf(fname) + ref_names = [raw.ch_names[pick] + for pick in pick_types(raw.info, meg=False, ref_meg=True)] + data_names = raw.ch_names[32:35] + proj = dict(data=np.ones((1, len(data_names))), + col_names=data_names[:2].copy(), row_names=None, nrow=1) + proj = Projection( + data=proj, active=False, desc='test', kind=0, explained_var=0.) + raw.add_proj(proj, remove_existing=True) + raw.info.normalize_proj() + raw.pick_channels(data_names + ref_names).crop(0, 2) + long_names = ['123456789abcdefg' + name for name in raw.ch_names] + fname = tmp_path / 'test-raw.fif' + with catch_logging() as log: + raw.save(fname) + log = log.getvalue() + assert 'truncated' not in log + rename = dict(zip(raw.ch_names, long_names)) + long_data_names = [rename[name] for name in data_names] + long_proj_names = long_data_names[:2] + raw.rename_channels(rename) + for comp in raw.info['comps']: + for key in ('row_names', 'col_names'): + for name in comp['data'][key]: + assert name in raw.ch_names + if raw.info['comps']: + assert raw.compensation_grade == 0 + raw.apply_gradient_compensation(3) + assert raw.compensation_grade == 3 + assert len(raw.info['projs']) == 1 + assert raw.info['projs'][0]['data']['col_names'] == long_proj_names + raw.info['bads'] = bads = long_data_names[2:3] + good_long_data_names = [ + name for name in long_data_names if name not in bads] + with catch_logging() as log: + raw.save(fname, overwrite=True, verbose=True) + log = log.getvalue() + assert 'truncated to 15' in log + for name in raw.ch_names: + assert len(name) > 15 + # first read the full way + with catch_logging() as log: + raw_read = read_raw_fif(fname, verbose=True) + log = log.getvalue() + assert 'Reading extended channel information' in log + for ra in (raw, raw_read): + assert ra.ch_names == long_names + assert raw_read.info['projs'][0]['data']['col_names'] == long_proj_names + del raw_read + # next read as if no longer names could be read + monkeypatch.setattr( + meas_info, '_read_extended_ch_info', lambda x, y, z: None) + with catch_logging() as log: + raw_read = read_raw_fif(fname, verbose=True) + log = log.getvalue() + assert 'extended' not in log + if raw.info['comps']: + assert raw_read.compensation_grade == 3 + raw_read.apply_gradient_compensation(0) + assert raw_read.compensation_grade == 0 + monkeypatch.setattr( # restore + meas_info, '_read_extended_ch_info', _read_extended_ch_info) + short_proj_names = [ + f'{name[:13 - bool(len(ref_names))]}-{len(ref_names) + ni}' + for ni, name in enumerate(long_data_names[:2])] + assert raw_read.info['projs'][0]['data']['col_names'] == short_proj_names + # + # epochs + # + epochs = Epochs(raw, make_fixed_length_events(raw)) + fname = tmp_path / 'test-epo.fif' + epochs.save(fname) + epochs_read = read_epochs(fname) + for ep in (epochs, epochs_read): + assert ep.info['ch_names'] == long_names + assert ep.ch_names == long_names + del raw, epochs_read + # cov + epochs.info['bads'] = [] + cov = compute_covariance(epochs, verbose='error') + fname = tmp_path / 'test-cov.fif' + write_cov(fname, cov) + cov_read = read_cov(fname) + for co in (cov, cov_read): + assert co['names'] == long_data_names + assert co['bads'] == [] + del cov_read + + # + # evoked + # + evoked = epochs.average() + evoked.info['bads'] = bads + assert evoked.nave == 1 + fname = tmp_path / 'test-ave.fif' + evoked.save(fname) + evoked_read = read_evokeds(fname)[0] + for ev in (evoked, evoked_read): + assert ev.ch_names == long_names + assert ev.info['bads'] == bads + del evoked_read, epochs + + # + # forward + # + with _record_warnings(): # not enough points for CTF + sphere = make_sphere_model('auto', 'auto', evoked.info) + src = setup_volume_source_space( + pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]])) + fwd = make_forward_solution(evoked.info, None, src, sphere) + fname = tmp_path / 'temp-fwd.fif' + write_forward_solution(fname, fwd) + fwd_read = read_forward_solution(fname) + for fw in (fwd, fwd_read): + assert fw['sol']['row_names'] == long_data_names + assert fw['info']['ch_names'] == long_data_names + assert fw['info']['bads'] == bads + del fwd_read + + # + # inv + # + inv = make_inverse_operator(evoked.info, fwd, cov) + fname = tmp_path / 'test-inv.fif' + write_inverse_operator(fname, inv) + inv_read = read_inverse_operator(fname) + for iv in (inv, inv_read): + assert iv['info']['ch_names'] == good_long_data_names + apply_inverse(evoked, inv) # smoke test + + +@pytest.mark.parametrize('fname_info', (raw_fname, 'create_info')) +@pytest.mark.parametrize('unlocked', (True, False)) +def test_pickle(fname_info, unlocked): + """Test that Info can be (un)pickled.""" + if fname_info == 'create_info': + info = create_info(3, 1000., 'eeg') + else: + info = read_info(fname_info) + assert not info._unlocked + info._unlocked = unlocked + data = pickle.dumps(info) + info_un = pickle.loads(data) + assert isinstance(info_un, Info) + assert_object_equal(info, info_un) + assert info_un._unlocked == unlocked + + +def test_info_bad(): + """Test our info sanity checkers.""" + info = create_info(2, 1000., 'eeg') + info['description'] = 'foo' + info['experimenter'] = 'bar' + info['line_freq'] = 50. + info['bads'] = info['ch_names'][:1] + info['temp'] = ('whatever', 1.) + # After 0.24 these should be pytest.raises calls + check, klass = pytest.raises, RuntimeError + with check(klass, match=r"info\['temp'\]"): + info['bad_key'] = 1. + for (key, match) in ([ + ('sfreq', r'inst\.resample'), + ('chs', r'inst\.add_channels')]): + with check(klass, match=match): + info[key] = info[key] + with pytest.raises(ValueError, match='between meg<->head'): + info['dev_head_t'] = Transform('mri', 'head', np.eye(4)) + + +def test_info_pick_channels(): + """Test that info.pick_channels emits a deprecation warning.""" + info = create_info(2, 1000., 'eeg') + with pytest.deprecated_call(match='use inst.pick_channels instead.'): + info.pick_channels(['0']) + + +def test_get_montage(): + """Test ContainsMixin.get_montage().""" + ch_names = make_standard_montage('standard_1020').ch_names + sfreq = 512 + data = np.zeros((len(ch_names), sfreq * 2)) + raw = RawArray(data, create_info(ch_names, sfreq, 'eeg')) + raw.set_montage('standard_1020') + + assert len(raw.get_montage().ch_names) == len(ch_names) + raw.info['bads'] = [ch_names[0]] + assert len(raw.get_montage().ch_names) == len(ch_names) + + # test info + raw = RawArray(data, create_info(ch_names, sfreq, 'eeg')) + raw.set_montage('standard_1020') + + assert len(raw.info.get_montage().ch_names) == len(ch_names) + raw.info['bads'] = [ch_names[0]] + assert len(raw.info.get_montage().ch_names) == len(ch_names) diff --git a/python/libs/mne/io/tests/test_pick.py b/python/libs/mne/io/tests/test_pick.py new file mode 100644 index 0000000..8cd38c2 --- /dev/null +++ b/python/libs/mne/io/tests/test_pick.py @@ -0,0 +1,618 @@ +from copy import deepcopy +import os.path as op + +from numpy.testing import assert_array_equal, assert_equal +import pytest +import numpy as np + +from mne import (pick_channels_regexp, pick_types, Epochs, + read_forward_solution, rename_channels, + pick_info, pick_channels, create_info, make_ad_hoc_cov) +from mne import __file__ as _root_init_fname +from mne.io import (read_raw_fif, RawArray, read_raw_bti, read_raw_kit, + read_info) +from mne.io.pick import (channel_indices_by_type, channel_type, + pick_types_forward, _picks_by_type, _picks_to_idx, + _contains_ch_type, pick_channels_cov, + _get_channel_types, get_channel_type_constants, + _DATA_CH_TYPES_SPLIT) +from mne.io.constants import FIFF +from mne.datasets import testing +from mne.utils import catch_logging, assert_object_equal + +data_path = testing.data_path(download=False) +fname_meeg = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') +fname_mc = op.join(data_path, 'SSS', 'test_move_anon_movecomp_raw_sss.fif') + +io_dir = op.join(op.dirname(__file__), '..') +ctf_fname = op.join(io_dir, 'tests', 'data', 'test_ctf_raw.fif') +fif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif') + + +def _picks_by_type_old(info, meg_combined=False, ref_meg=False, + exclude='bads'): + """Use the old, slower _picks_by_type code.""" + picks_list = [] + has = [_contains_ch_type(info, k) for k in _DATA_CH_TYPES_SPLIT] + has = dict(zip(_DATA_CH_TYPES_SPLIT, has)) + if has['mag'] and (meg_combined is not True or not has['grad']): + picks_list.append( + ('mag', pick_types(info, meg='mag', eeg=False, stim=False, + ref_meg=ref_meg, exclude=exclude)) + ) + if has['grad'] and (meg_combined is not True or not has['mag']): + picks_list.append( + ('grad', pick_types(info, meg='grad', eeg=False, stim=False, + ref_meg=ref_meg, exclude=exclude)) + ) + if has['mag'] and has['grad'] and meg_combined is True: + picks_list.append( + ('meg', pick_types(info, meg=True, eeg=False, stim=False, + ref_meg=ref_meg, exclude=exclude)) + ) + for ch_type in _DATA_CH_TYPES_SPLIT: + if ch_type in ['grad', 'mag']: # exclude just MEG channels + continue + if has[ch_type]: + picks_list.append( + (ch_type, pick_types(info, meg=False, stim=False, + ref_meg=ref_meg, exclude=exclude, + **{ch_type: True})) + ) + return picks_list + + +def _channel_type_old(info, idx): + """Get channel type using old, slower scheme.""" + ch = info['chs'][idx] + + # iterate through all defined channel types until we find a match with ch + # go in order from most specific (most rules entries) to least specific + channel_types = sorted(get_channel_type_constants().items(), + key=lambda x: len(x[1]), reverse=True) + for t, rules in channel_types: + for key, vals in rules.items(): # all keys must match the values + if ch.get(key, None) not in np.array(vals): + break # not channel type t, go to next iteration + else: + return t + + raise ValueError(f'Unknown channel type for {ch["ch_name"]}') + + +def _assert_channel_types(info): + for k in range(info['nchan']): + a, b = channel_type(info, k), _channel_type_old(info, k) + assert a == b + + +def test_pick_refs(): + """Test picking of reference sensors.""" + infos = list() + # KIT + kit_dir = op.join(io_dir, 'kit', 'tests', 'data') + sqd_path = op.join(kit_dir, 'test.sqd') + mrk_path = op.join(kit_dir, 'test_mrk.sqd') + elp_path = op.join(kit_dir, 'test_elp.txt') + hsp_path = op.join(kit_dir, 'test_hsp.txt') + raw_kit = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path) + infos.append(raw_kit.info) + # BTi + bti_dir = op.join(io_dir, 'bti', 'tests', 'data') + bti_pdf = op.join(bti_dir, 'test_pdf_linux') + bti_config = op.join(bti_dir, 'test_config_linux') + bti_hs = op.join(bti_dir, 'test_hs_linux') + raw_bti = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False) + infos.append(raw_bti.info) + # CTF + fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif') + raw_ctf = read_raw_fif(fname_ctf_raw) + raw_ctf.apply_gradient_compensation(2) + for info in infos: + info['bads'] = [] + _assert_channel_types(info) + with pytest.raises(ValueError, match="'planar2'] or bool, not foo"): + pick_types(info, meg='foo') + with pytest.raises(ValueError, match="'planar2', 'auto'] or bool,"): + pick_types(info, ref_meg='foo') + picks_meg_ref = pick_types(info, meg=True, ref_meg=True) + picks_meg = pick_types(info, meg=True, ref_meg=False) + picks_ref = pick_types(info, meg=False, ref_meg=True) + assert_array_equal(picks_meg_ref, + np.sort(np.concatenate([picks_meg, picks_ref]))) + picks_grad = pick_types(info, meg='grad', ref_meg=False) + picks_ref_grad = pick_types(info, meg=False, ref_meg='grad') + picks_meg_ref_grad = pick_types(info, meg='grad', ref_meg='grad') + assert_array_equal(picks_meg_ref_grad, + np.sort(np.concatenate([picks_grad, + picks_ref_grad]))) + picks_mag = pick_types(info, meg='mag', ref_meg=False) + picks_ref_mag = pick_types(info, meg=False, ref_meg='mag') + picks_meg_ref_mag = pick_types(info, meg='mag', ref_meg='mag') + assert_array_equal(picks_meg_ref_mag, + np.sort(np.concatenate([picks_mag, + picks_ref_mag]))) + assert_array_equal(picks_meg, + np.sort(np.concatenate([picks_mag, picks_grad]))) + assert_array_equal(picks_ref, + np.sort(np.concatenate([picks_ref_mag, + picks_ref_grad]))) + assert_array_equal(picks_meg_ref, np.sort(np.concatenate( + [picks_grad, picks_mag, picks_ref_grad, picks_ref_mag]))) + + for pick in (picks_meg_ref, picks_meg, picks_ref, + picks_grad, picks_ref_grad, picks_meg_ref_grad, + picks_mag, picks_ref_mag, picks_meg_ref_mag): + if len(pick) > 0: + pick_info(info, pick) + + # test CTF expected failures directly + info = raw_ctf.info + info['bads'] = [] + picks_meg_ref = pick_types(info, meg=True, ref_meg=True) + picks_meg = pick_types(info, meg=True, ref_meg=False) + picks_ref = pick_types(info, meg=False, ref_meg=True) + picks_mag = pick_types(info, meg='mag', ref_meg=False) + picks_ref_mag = pick_types(info, meg=False, ref_meg='mag') + picks_meg_ref_mag = pick_types(info, meg='mag', ref_meg='mag') + for pick in (picks_meg_ref, picks_ref, picks_ref_mag, picks_meg_ref_mag): + if len(pick) > 0: + pick_info(info, pick) + + for pick in (picks_meg, picks_mag): + if len(pick) > 0: + with catch_logging() as log: + pick_info(info, pick, verbose=True) + assert ('Removing {} compensators'.format(len(info['comps'])) + in log.getvalue()) + picks_ref_grad = pick_types(info, meg=False, ref_meg='grad') + assert set(picks_ref_mag) == set(picks_ref) + assert len(picks_ref_grad) == 0 + all_meg = np.arange(3, 306) + assert_array_equal(np.concatenate([picks_ref, picks_meg]), all_meg) + assert_array_equal(picks_meg_ref_mag, all_meg) + + +def test_pick_channels_regexp(): + """Test pick with regular expression.""" + ch_names = ['MEG 2331', 'MEG 2332', 'MEG 2333'] + assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...1'), [0]) + assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...[2-3]'), [1, 2]) + assert_array_equal(pick_channels_regexp(ch_names, 'MEG *'), [0, 1, 2]) + + +def assert_indexing(info, picks_by_type, ref_meg=False, all_data=True): + """Assert our indexing functions work properly.""" + # First that our old and new channel typing functions are equivalent + _assert_channel_types(info) + # Next that channel_indices_by_type works + if not ref_meg: + idx = channel_indices_by_type(info) + for key in idx: + for p in picks_by_type: + if key == p[0]: + assert_array_equal(idx[key], p[1]) + break + else: + assert len(idx[key]) == 0 + # Finally, picks_by_type (if relevant) + if not all_data: + picks_by_type = [p for p in picks_by_type + if p[0] in _DATA_CH_TYPES_SPLIT] + picks_by_type = [(p[0], np.array(p[1], int)) for p in picks_by_type] + actual = _picks_by_type(info, ref_meg=ref_meg) + assert_object_equal(actual, picks_by_type) + if not ref_meg and idx['hbo']: # our old code had a bug + with pytest.raises(TypeError, match='unexpected keyword argument'): + _picks_by_type_old(info, ref_meg=ref_meg) + else: + old = _picks_by_type_old(info, ref_meg=ref_meg) + assert_object_equal(old, picks_by_type) + # test bads + info = info.copy() + info['bads'] = [info['chs'][picks_by_type[0][1][0]]['ch_name']] + picks_by_type = deepcopy(picks_by_type) + picks_by_type[0] = (picks_by_type[0][0], picks_by_type[0][1][1:]) + actual = _picks_by_type(info, ref_meg=ref_meg) + assert_object_equal(actual, picks_by_type) + + +def test_pick_seeg_ecog(): + """Test picking with sEEG and ECoG.""" + names = 'A1 A2 Fz O OTp1 OTp2 E1 OTp3 E2 E3'.split() + types = 'mag mag eeg eeg seeg seeg ecog seeg ecog ecog'.split() + info = create_info(names, 1024., types) + picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), + ('seeg', [4, 5, 7]), ('ecog', [6, 8, 9])] + assert_indexing(info, picks_by_type) + assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 7]) + for i, t in enumerate(types): + assert_equal(channel_type(info, i), types[i]) + raw = RawArray(np.zeros((len(names), 10)), info) + events = np.array([[1, 0, 0], [2, 0, 0]]) + epochs = Epochs(raw, events=events, event_id={'event': 0}, + tmin=-1e-5, tmax=1e-5, + baseline=(0, 0)) # only one sample + evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True)) + e_seeg = evoked.copy().pick_types(meg=False, seeg=True) + for lt, rt in zip(e_seeg.ch_names, [names[4], names[5], names[7]]): + assert lt == rt + # Deal with constant debacle + raw = read_raw_fif(op.join(io_dir, 'tests', 'data', + 'test_chpi_raw_sss.fif')) + assert_equal(len(pick_types(raw.info, meg=False, seeg=True, ecog=True)), 0) + + +def test_pick_dbs(): + """Test picking with DBS.""" + # gh-8739 + names = 'A1 A2 Fz O OTp1 OTp2 OTp3'.split() + types = 'mag mag eeg eeg dbs dbs dbs'.split() + info = create_info(names, 1024., types) + picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), ('dbs', [4, 5, 6])] + assert_indexing(info, picks_by_type) + assert_array_equal(pick_types(info, meg=False, dbs=True), [4, 5, 6]) + for i, t in enumerate(types): + assert channel_type(info, i) == types[i] + raw = RawArray(np.zeros((len(names), 7)), info) + events = np.array([[1, 0, 0], [2, 0, 0]]) + epochs = Epochs(raw, events=events, event_id={'event': 0}, + tmin=-1e-5, tmax=1e-5, + baseline=(0, 0)) # only one sample + evoked = epochs.average(pick_types(epochs.info, meg=True, dbs=True)) + e_dbs = evoked.copy().pick_types(meg=False, dbs=True) + for lt, rt in zip(e_dbs.ch_names, [names[4], names[5], names[6]]): + assert lt == rt + raw = read_raw_fif(op.join(io_dir, 'tests', 'data', + 'test_chpi_raw_sss.fif')) + assert len(pick_types(raw.info, meg=False, dbs=True)) == 0 + + +def test_pick_chpi(): + """Test picking cHPI.""" + # Make sure we don't mis-classify cHPI channels + info = read_info(op.join(io_dir, 'tests', 'data', 'test_chpi_raw_sss.fif')) + _assert_channel_types(info) + channel_types = _get_channel_types(info) + assert 'chpi' in channel_types + assert 'seeg' not in channel_types + assert 'ecog' not in channel_types + + +def test_pick_csd(): + """Test picking current source density channels.""" + # Make sure we don't mis-classify cHPI channels + names = ['MEG 2331', 'MEG 2332', 'MEG 2333', 'A1', 'A2', 'Fz'] + types = 'mag mag grad csd csd csd'.split() + info = create_info(names, 1024., types) + picks_by_type = [('mag', [0, 1]), ('grad', [2]), ('csd', [3, 4, 5])] + assert_indexing(info, picks_by_type, all_data=False) + + +def test_pick_bio(): + """Test picking BIO channels.""" + names = 'A1 A2 Fz O BIO1 BIO2 BIO3'.split() + types = 'mag mag eeg eeg bio bio bio'.split() + info = create_info(names, 1024., types) + picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), ('bio', [4, 5, 6])] + assert_indexing(info, picks_by_type, all_data=False) + + +def test_pick_fnirs(): + """Test picking fNIRS channels.""" + names = 'A1 A2 Fz O hbo1 hbo2 hbr1 fnirsRaw1 fnirsRaw2 fnirsOD1'.split() + types = 'mag mag eeg eeg hbo hbo hbr fnirs_cw_' \ + 'amplitude fnirs_cw_amplitude fnirs_od'.split() + info = create_info(names, 1024., types) + picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), + ('hbo', [4, 5]), ('hbr', [6]), + ('fnirs_cw_amplitude', [7, 8]), ('fnirs_od', [9])] + assert_indexing(info, picks_by_type) + + +def test_pick_ref(): + """Test picking ref_meg channels.""" + info = read_info(ctf_fname) + picks_by_type = [('stim', [0]), ('eog', [306, 307]), ('ecg', [308]), + ('misc', [1]), + ('mag', np.arange(31, 306)), + ('ref_meg', np.arange(2, 31))] + assert_indexing(info, picks_by_type, all_data=False) + picks_by_type.append(('mag', np.concatenate([picks_by_type.pop(-1)[1], + picks_by_type.pop(-1)[1]]))) + assert_indexing(info, picks_by_type, ref_meg=True, all_data=False) + + +def _check_fwd_n_chan_consistent(fwd, n_expected): + n_ok = len(fwd['info']['ch_names']) + n_sol = fwd['sol']['data'].shape[0] + assert_equal(n_expected, n_sol) + assert_equal(n_expected, n_ok) + + +@testing.requires_testing_data +def test_pick_forward_seeg_ecog(): + """Test picking forward with SEEG and ECoG.""" + fwd = read_forward_solution(fname_meeg) + counts = channel_indices_by_type(fwd['info']) + for key in counts.keys(): + counts[key] = len(counts[key]) + counts['meg'] = counts['mag'] + counts['grad'] + fwd_ = pick_types_forward(fwd, meg=True) + _check_fwd_n_chan_consistent(fwd_, counts['meg']) + fwd_ = pick_types_forward(fwd, meg=False, eeg=True) + _check_fwd_n_chan_consistent(fwd_, counts['eeg']) + # should raise exception related to emptiness + pytest.raises(ValueError, pick_types_forward, fwd, meg=False, seeg=True) + pytest.raises(ValueError, pick_types_forward, fwd, meg=False, ecog=True) + # change last chan from EEG to sEEG, second-to-last to ECoG + ecog_name = 'E1' + seeg_name = 'OTp1' + rename_channels(fwd['info'], {'EEG 059': ecog_name}) + rename_channels(fwd['info'], {'EEG 060': seeg_name}) + for ch in fwd['info']['chs']: + if ch['ch_name'] == seeg_name: + ch['kind'] = FIFF.FIFFV_SEEG_CH + ch['coil_type'] = FIFF.FIFFV_COIL_EEG + elif ch['ch_name'] == ecog_name: + ch['kind'] = FIFF.FIFFV_ECOG_CH + ch['coil_type'] = FIFF.FIFFV_COIL_EEG + fwd['sol']['row_names'][-1] = fwd['info']['chs'][-1]['ch_name'] + fwd['sol']['row_names'][-2] = fwd['info']['chs'][-2]['ch_name'] + counts['eeg'] -= 2 + counts['seeg'] += 1 + counts['ecog'] += 1 + # repick & check + fwd_seeg = pick_types_forward(fwd, meg=False, seeg=True) + assert_equal(fwd_seeg['sol']['row_names'], [seeg_name]) + assert_equal(fwd_seeg['info']['ch_names'], [seeg_name]) + # should work fine + fwd_ = pick_types_forward(fwd, meg=True) + _check_fwd_n_chan_consistent(fwd_, counts['meg']) + fwd_ = pick_types_forward(fwd, meg=False, eeg=True) + _check_fwd_n_chan_consistent(fwd_, counts['eeg']) + fwd_ = pick_types_forward(fwd, meg=False, seeg=True) + _check_fwd_n_chan_consistent(fwd_, counts['seeg']) + fwd_ = pick_types_forward(fwd, meg=False, ecog=True) + _check_fwd_n_chan_consistent(fwd_, counts['ecog']) + + +def test_picks_by_channels(): + """Test creating pick_lists.""" + rng = np.random.RandomState(909) + + test_data = rng.random_sample((4, 2000)) + ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]] + ch_types = ['grad', 'mag', 'mag', 'eeg'] + sfreq = 250.0 + info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) + _assert_channel_types(info) + raw = RawArray(test_data, info) + + pick_list = _picks_by_type(raw.info) + assert_equal(len(pick_list), 3) + assert_equal(pick_list[0][0], 'mag') + pick_list2 = _picks_by_type(raw.info, meg_combined=False) + assert_equal(len(pick_list), len(pick_list2)) + assert_equal(pick_list2[0][0], 'mag') + + pick_list2 = _picks_by_type(raw.info, meg_combined=True) + assert_equal(len(pick_list), len(pick_list2) + 1) + assert_equal(pick_list2[0][0], 'meg') + + test_data = rng.random_sample((4, 2000)) + ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]] + ch_types = ['mag', 'mag', 'mag', 'mag'] + sfreq = 250.0 + info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) + raw = RawArray(test_data, info) + # This acts as a set, not an order + assert_array_equal(pick_channels(info['ch_names'], ['MEG 002', 'MEG 001']), + [0, 1]) + + # Make sure checks for list input work. + pytest.raises(ValueError, pick_channels, ch_names, 'MEG 001') + pytest.raises(ValueError, pick_channels, ch_names, ['MEG 001'], 'hi') + + pick_list = _picks_by_type(raw.info) + assert_equal(len(pick_list), 1) + assert_equal(pick_list[0][0], 'mag') + pick_list2 = _picks_by_type(raw.info, meg_combined=True) + assert_equal(len(pick_list), len(pick_list2)) + assert_equal(pick_list2[0][0], 'mag') + + # pick_types type check + with pytest.raises(ValueError, match='must be of type'): + raw.pick_types(eeg='string') + + # duplicate check + names = ['MEG 002', 'MEG 002'] + assert len(pick_channels(raw.info['ch_names'], names)) == 1 + assert len(raw.copy().pick_channels(names)[0][0]) == 1 + + +def test_clean_info_bads(): + """Test cleaning info['bads'] when bad_channels are excluded.""" + raw_file = op.join(op.dirname(_root_init_fname), 'io', 'tests', 'data', + 'test_raw.fif') + raw = read_raw_fif(raw_file) + _assert_channel_types(raw.info) + + # select eeg channels + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + + # select 3 eeg channels as bads + idx_eeg_bad_ch = picks_eeg[[1, 5, 14]] + eeg_bad_ch = [raw.info['ch_names'][k] for k in idx_eeg_bad_ch] + + # select meg channels + picks_meg = pick_types(raw.info, meg=True, eeg=False) + + # select randomly 3 meg channels as bads + idx_meg_bad_ch = picks_meg[[0, 15, 34]] + meg_bad_ch = [raw.info['ch_names'][k] for k in idx_meg_bad_ch] + + # simulate the bad channels + raw.info['bads'] = eeg_bad_ch + meg_bad_ch + + # simulate the call to pick_info excluding the bad eeg channels + info_eeg = pick_info(raw.info, picks_eeg) + + # simulate the call to pick_info excluding the bad meg channels + info_meg = pick_info(raw.info, picks_meg) + + assert_equal(info_eeg['bads'], eeg_bad_ch) + assert_equal(info_meg['bads'], meg_bad_ch) + + info = pick_info(raw.info, picks_meg) + info._check_consistency() + info['bads'] += ['EEG 053'] + pytest.raises(RuntimeError, info._check_consistency) + with pytest.raises(ValueError, match='unique'): + pick_info(raw.info, [0, 0]) + + +@testing.requires_testing_data +def test_picks_to_idx(): + """Test checking type integrity checks of picks.""" + info = create_info(12, 1000., 'eeg') + _assert_channel_types(info) + picks = np.arange(info['nchan']) + # Array and list + assert_array_equal(picks, _picks_to_idx(info, picks)) + assert_array_equal(picks, _picks_to_idx(info, list(picks))) + with pytest.raises(TypeError, match='data type of float64'): + _picks_to_idx(info, 1.) + # None + assert_array_equal(picks, _picks_to_idx(info, None)) + # Type indexing + assert_array_equal(picks, _picks_to_idx(info, 'eeg')) + assert_array_equal(picks, _picks_to_idx(info, ['eeg'])) + # Negative indexing + assert_array_equal([len(picks) - 1], _picks_to_idx(info, len(picks) - 1)) + assert_array_equal([len(picks) - 1], _picks_to_idx(info, -1)) + assert_array_equal([len(picks) - 1], _picks_to_idx(info, [-1])) + # Name indexing + assert_array_equal([2], _picks_to_idx(info, info['ch_names'][2])) + assert_array_equal(np.arange(5, 9), + _picks_to_idx(info, info['ch_names'][5:9])) + with pytest.raises(ValueError, match='must be >= '): + _picks_to_idx(info, -len(picks) - 1) + with pytest.raises(ValueError, match='must be < '): + _picks_to_idx(info, len(picks)) + with pytest.raises(ValueError, match='could not be interpreted'): + _picks_to_idx(info, ['a', 'b']) + with pytest.raises(ValueError, match='could not be interpreted'): + _picks_to_idx(info, 'b') + # bads behavior + info['bads'] = info['ch_names'][1:2] + picks_good = np.array([0] + list(range(2, 12))) + assert_array_equal(picks_good, _picks_to_idx(info, None)) + assert_array_equal(picks_good, _picks_to_idx(info, None, + exclude=info['bads'])) + assert_array_equal(picks, _picks_to_idx(info, None, exclude=())) + with pytest.raises(ValueError, match=' 1D, got'): + _picks_to_idx(info, [[1]]) + # MEG types + info = read_info(fname_mc) + meg_picks = np.arange(306) + mag_picks = np.arange(2, 306, 3) + grad_picks = np.setdiff1d(meg_picks, mag_picks) + assert_array_equal(meg_picks, _picks_to_idx(info, 'meg')) + assert_array_equal(meg_picks, _picks_to_idx(info, ('mag', 'grad'))) + assert_array_equal(mag_picks, _picks_to_idx(info, 'mag')) + assert_array_equal(grad_picks, _picks_to_idx(info, 'grad')) + + info = create_info(['eeg', 'foo'], 1000., 'eeg') + with pytest.raises(RuntimeError, match='equivalent to channel types'): + _picks_to_idx(info, 'eeg') + with pytest.raises(ValueError, match='same length'): + create_info(['a', 'b'], 1000., dict(hbo=['a'], hbr=['b'])) + info = create_info(['a', 'b'], 1000., ['hbo', 'hbr']) + assert_array_equal(np.arange(2), _picks_to_idx(info, 'fnirs')) + assert_array_equal([0], _picks_to_idx(info, 'hbo')) + assert_array_equal([1], _picks_to_idx(info, 'hbr')) + info = create_info(['a', 'b'], 1000., ['hbo', 'misc']) + assert_array_equal(np.arange(len(info['ch_names'])), + _picks_to_idx(info, 'all')) + assert_array_equal([0], _picks_to_idx(info, 'data')) + info = create_info(['a', 'b'], 1000., ['fnirs_cw_amplitude', 'fnirs_od']) + assert_array_equal(np.arange(2), _picks_to_idx(info, 'fnirs')) + assert_array_equal([0], _picks_to_idx(info, 'fnirs_cw_amplitude')) + assert_array_equal([1], _picks_to_idx(info, 'fnirs_od')) + info = create_info(['a', 'b'], 1000., ['fnirs_cw_amplitude', 'misc']) + assert_array_equal(np.arange(len(info['ch_names'])), + _picks_to_idx(info, 'all')) + assert_array_equal([0], _picks_to_idx(info, 'data')) + info = create_info(['a', 'b'], 1000., ['fnirs_od', 'misc']) + assert_array_equal(np.arange(len(info['ch_names'])), + _picks_to_idx(info, 'all')) + assert_array_equal([0], _picks_to_idx(info, 'data')) + + +def test_pick_channels_cov(): + """Test picking channels from a Covariance object.""" + info = create_info(['CH1', 'CH2', 'CH3'], 1., ch_types='eeg') + cov = make_ad_hoc_cov(info) + cov['data'] = np.array([1., 2., 3.]) + + cov_copy = pick_channels_cov(cov, ['CH2', 'CH1'], ordered=False, copy=True) + assert cov_copy.ch_names == ['CH1', 'CH2'] + assert_array_equal(cov_copy['data'], [1., 2.]) + + # Test re-ordering channels + cov_copy = pick_channels_cov(cov, ['CH2', 'CH1'], ordered=True, copy=True) + assert cov_copy.ch_names == ['CH2', 'CH1'] + assert_array_equal(cov_copy['data'], [2., 1.]) + + # Test picking in-place + pick_channels_cov(cov, ['CH2', 'CH1'], copy=False) + assert cov.ch_names == ['CH1', 'CH2'] + assert_array_equal(cov['data'], [1., 2.]) + + # Test whether `method` and `loglik` are dropped when None + cov['method'] = None + cov['loglik'] = None + cov_copy = pick_channels_cov(cov, ['CH1', 'CH2'], copy=True) + assert 'method' not in cov_copy + assert 'loglik' not in cov_copy + + +def test_pick_types_meg(): + """Test pick_types(meg=True).""" + # info with MEG channels at indices 1, 2, and 4 + info1 = create_info(6, 256, ["eeg", "mag", "grad", "misc", "grad", "hbo"]) + + assert list(pick_types(info1, meg=True)) == [1, 2, 4] + assert list(pick_types(info1, meg=True, eeg=True)) == [0, 1, 2, 4] + + assert list(pick_types(info1, meg=True)) == [1, 2, 4] + assert not list(pick_types(info1, meg=False)) # empty + assert list(pick_types(info1, meg='planar1')) == [2] + assert not list(pick_types(info1, meg='planar2')) # empty + + # info without any MEG channels + info2 = create_info(6, 256, ["eeg", "eeg", "eog", "misc", "stim", "hbo"]) + + assert not list(pick_types(info2)) # empty + assert list(pick_types(info2, eeg=True)) == [0, 1] + + +@pytest.mark.parametrize('meg', [True, False, 'grad', 'mag']) +@pytest.mark.parametrize('eeg', [True, False]) +@pytest.mark.parametrize('ordered', [True, False]) +def test_get_channel_types_equiv(meg, eeg, ordered): + """Test equivalence of get_channel_types.""" + raw = read_raw_fif(fif_fname) + pick_types(raw.info, meg=meg, eeg=eeg) + picks = pick_types(raw.info, meg=meg, eeg=eeg) + if not ordered: + picks = np.random.RandomState(0).permutation(picks) + if not meg and not eeg: + with pytest.raises(ValueError, match='No appropriate channels'): + raw.get_channel_types(picks=picks) + return + types = np.array(raw.get_channel_types(picks=picks)) + types_iter = np.array([channel_type(raw.info, idx) for idx in picks]) + assert_array_equal(types, types_iter) diff --git a/python/libs/mne/io/tests/test_proc_history.py b/python/libs/mne/io/tests/test_proc_history.py new file mode 100644 index 0000000..a1234ea --- /dev/null +++ b/python/libs/mne/io/tests/test_proc_history.py @@ -0,0 +1,38 @@ +# Authors: Denis A. Engemann +# Eric Larson +# License: Simplified BSD + +import os.path as op + +import numpy as np +from numpy.testing import assert_array_equal + +from mne.io import read_info +from mne.io.constants import FIFF + +base_dir = op.join(op.dirname(__file__), 'data') +raw_fname = op.join(base_dir, 'test_chpi_raw_sss.fif') + + +def test_maxfilter_io(): + """Test maxfilter io.""" + info = read_info(raw_fname) + mf = info['proc_history'][1]['max_info'] + + assert mf['sss_info']['frame'] == FIFF.FIFFV_COORD_HEAD + # based on manual 2.0, rev. 5.0 page 23 + assert 5 <= mf['sss_info']['in_order'] <= 11 + assert mf['sss_info']['out_order'] <= 5 + assert mf['sss_info']['nchan'] > len(mf['sss_info']['components']) + + assert (info['ch_names'][:mf['sss_info']['nchan']] == + mf['sss_ctc']['proj_items_chs']) + assert (mf['sss_ctc']['decoupler'].shape == + (mf['sss_info']['nchan'], mf['sss_info']['nchan'])) + assert_array_equal( + np.unique(np.diag(mf['sss_ctc']['decoupler'].toarray())), + np.array([1.], dtype=np.float32)) + assert mf['sss_cal']['cal_corrs'].shape == (306, 14) + assert mf['sss_cal']['cal_chans'].shape == (306, 2) + vv_coils = [v for k, v in FIFF.items() if 'FIFFV_COIL_VV' in k] + assert all(k in vv_coils for k in set(mf['sss_cal']['cal_chans'][:, 1])) diff --git a/python/libs/mne/io/tests/test_raw.py b/python/libs/mne/io/tests/test_raw.py new file mode 100644 index 0000000..6c74484 --- /dev/null +++ b/python/libs/mne/io/tests/test_raw.py @@ -0,0 +1,820 @@ +# -*- coding: utf-8 -*- +"""Generic tests that all raw classes should run.""" +# Authors: MNE Developers +# Stefan Appelhoff +# +# License: BSD-3-Clause + +from contextlib import redirect_stdout +from io import StringIO +import math +import os +from os import path as op +from pathlib import Path +import re + +import pytest +import numpy as np +from numpy.testing import (assert_allclose, assert_array_almost_equal, + assert_array_equal, assert_array_less) + +from mne import concatenate_raws, create_info, Annotations, pick_types +from mne.datasets import testing +from mne.io import read_raw_fif, RawArray, BaseRaw, Info, _writing_info_hdf5 +from mne.io.base import _get_scaling +from mne.utils import (_TempDir, catch_logging, _raw_annot, _stamp_to_dt, + object_diff, check_version, requires_pandas, + _import_h5io_funcs) +from mne.io.meas_info import _get_valid_units +from mne.io._digitization import DigPoint +from mne.io.proj import Projection +from mne.io.utils import _mult_cal_one +from mne.io.constants import FIFF + + +def assert_named_constants(info): + """Assert that info['chs'] has named constants.""" + # for now we just check one + __tracebackhide__ = True + r = repr(info['chs'][0]) + for check in ('.*FIFFV_COORD_.*', '.*FIFFV_COIL_.*', '.*FIFF_UNIT_.*', + '.*FIFF_UNITM_.*',): + assert re.match(check, r, re.DOTALL) is not None, (check, r) + + +def test_orig_units(): + """Test the error handling for original units.""" + # Should work fine + info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg') + BaseRaw(info, last_samps=[1], orig_units={'Cz': 'nV'}) + + # Should complain that channel Cz does not have a corresponding original + # unit. + with pytest.raises(ValueError, match='has no associated original unit.'): + info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg') + BaseRaw(info, last_samps=[1], orig_units={'not_Cz': 'nV'}) + + # Test that a non-dict orig_units argument raises a ValueError + with pytest.raises(ValueError, match='orig_units must be of type dict'): + info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg') + BaseRaw(info, last_samps=[1], orig_units=True) + + +def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, + boundary_decimal=2, test_scaling=True, test_rank=True, + **kwargs): + """Test reading, writing and slicing of raw classes. + + Parameters + ---------- + reader : function + Function to test. + test_preloading : bool + Whether not preloading is implemented for the reader. If True, both + cases and memory mapping to file are tested. + test_kwargs : dict + Test _init_kwargs support. + boundary_decimal : int + Number of decimals up to which the boundary should match. + **kwargs : + Arguments for the reader. Note: Do not use preload as kwarg. + Use ``test_preloading`` instead. + + Returns + ------- + raw : instance of Raw + A preloaded Raw object. + """ + tempdir = _TempDir() + rng = np.random.RandomState(0) + montage = None + if "montage" in kwargs: + montage = kwargs['montage'] + del kwargs['montage'] + if test_preloading: + raw = reader(preload=True, **kwargs) + rep = repr(raw) + assert rep.count('<') == 1 + assert rep.count('>') == 1 + if montage is not None: + raw.set_montage(montage) + # don't assume the first is preloaded + buffer_fname = op.join(tempdir, 'buffer') + picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10] + picks = np.append(picks, len(raw.ch_names) - 1) # test trigger channel + bnd = min(int(round(raw.buffer_size_sec * + raw.info['sfreq'])), raw.n_times) + slices = [slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd), + slice(3, 300), slice(None), slice(1, bnd)] + if raw.n_times >= 2 * bnd: # at least two complete blocks + slices += [slice(bnd, 2 * bnd), slice(bnd, bnd + 1), + slice(0, bnd + 100)] + other_raws = [reader(preload=buffer_fname, **kwargs), + reader(preload=False, **kwargs)] + for sl_time in slices: + data1, times1 = raw[picks, sl_time] + for other_raw in other_raws: + data2, times2 = other_raw[picks, sl_time] + assert_allclose(data1, data2) + assert_allclose(times1, times2) + + # test projection vs cals and data units + other_raw = reader(preload=False, **kwargs) + other_raw.del_proj() + eeg = meg = fnirs = False + if 'eeg' in raw: + eeg, atol = True, 1e-18 + elif 'grad' in raw: + meg, atol = 'grad', 1e-24 + elif 'mag' in raw: + meg, atol = 'mag', 1e-24 + elif 'hbo' in raw: + fnirs, atol = 'hbo', 1e-10 + elif 'hbr' in raw: + fnirs, atol = 'hbr', 1e-10 + else: + assert 'fnirs_cw_amplitude' in raw, 'New channel type necessary?' + fnirs, atol = 'fnirs_cw_amplitude', 1e-10 + picks = pick_types( + other_raw.info, meg=meg, eeg=eeg, fnirs=fnirs) + col_names = [other_raw.ch_names[pick] for pick in picks] + proj = np.ones((1, len(picks))) + proj /= np.sqrt(proj.shape[1]) + proj = Projection( + data=dict(data=proj, nrow=1, row_names=None, + col_names=col_names, ncol=len(picks)), + active=False) + assert len(other_raw.info['projs']) == 0 + other_raw.add_proj(proj) + assert len(other_raw.info['projs']) == 1 + # Orders of projector application, data loading, and reordering + # equivalent: + # 1. load->apply->get + data_load_apply_get = \ + other_raw.copy().load_data().apply_proj().get_data(picks) + # 2. apply->get (and don't allow apply->pick) + apply = other_raw.copy().apply_proj() + data_apply_get = apply.get_data(picks) + data_apply_get_0 = apply.get_data(picks[0])[0] + with pytest.raises(RuntimeError, match='loaded'): + apply.copy().pick(picks[0]).get_data() + # 3. apply->load->get + data_apply_load_get = apply.copy().load_data().get_data(picks) + data_apply_load_get_0, data_apply_load_get_1 = \ + apply.copy().load_data().pick(picks[:2]).get_data() + # 4. reorder->apply->load->get + all_picks = np.arange(len(other_raw.ch_names)) + reord = np.concatenate(( + picks[1::2], + picks[0::2], + np.setdiff1d(all_picks, picks))) + rev = np.argsort(reord) + assert_array_equal(reord[rev], all_picks) + assert_array_equal(rev[reord], all_picks) + reorder = other_raw.copy().pick(reord) + assert reorder.ch_names == [other_raw.ch_names[r] for r in reord] + assert reorder.ch_names[0] == other_raw.ch_names[picks[1]] + assert_allclose(reorder.get_data([0]), other_raw.get_data(picks[1])) + reorder_apply = reorder.copy().apply_proj() + assert reorder_apply.ch_names == reorder.ch_names + assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]] + assert_allclose(reorder_apply.get_data([0]), apply.get_data(picks[1]), + atol=1e-18) + data_reorder_apply_load_get = \ + reorder_apply.load_data().get_data(rev[:len(picks)]) + data_reorder_apply_load_get_1 = \ + reorder_apply.copy().load_data().pick([0]).get_data()[0] + assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]] + assert (data_load_apply_get.shape == + data_apply_get.shape == + data_apply_load_get.shape == + data_reorder_apply_load_get.shape) + del apply + # first check that our data are (probably) in the right units + data = data_load_apply_get.copy() + data = data - np.mean(data, axis=1, keepdims=True) # can be offsets + np.abs(data, out=data) + if test_scaling: + maxval = atol * 1e16 + assert_array_less(data, maxval) + minval = atol * 1e6 + assert_array_less(minval, np.median(data)) + else: + atol = 1e-7 * np.median(data) # 1e-7 * MAD + # ranks should all be reduced by 1 + if test_rank == 'less': + cmp = np.less + elif test_rank is False: + cmp = None + else: # anything else is like True or 'equal' + assert test_rank is True or test_rank == 'equal', test_rank + cmp = np.equal + rank_load_apply_get = np.linalg.matrix_rank(data_load_apply_get) + rank_apply_get = np.linalg.matrix_rank(data_apply_get) + rank_apply_load_get = np.linalg.matrix_rank(data_apply_load_get) + if cmp is not None: + assert cmp(rank_load_apply_get, len(col_names) - 1) + assert cmp(rank_apply_get, len(col_names) - 1) + assert cmp(rank_apply_load_get, len(col_names) - 1) + # and they should all match + t_kw = dict( + atol=atol, err_msg='before != after, likely _mult_cal_one prob') + assert_allclose(data_apply_get[0], data_apply_get_0, **t_kw) + assert_allclose(data_apply_load_get_1, + data_reorder_apply_load_get_1, **t_kw) + assert_allclose(data_load_apply_get[0], data_apply_load_get_0, **t_kw) + assert_allclose(data_load_apply_get, data_apply_get, **t_kw) + assert_allclose(data_load_apply_get, data_apply_load_get, **t_kw) + if 'eeg' in raw: + other_raw.del_proj() + direct = \ + other_raw.copy().load_data().set_eeg_reference().get_data() + other_raw.set_eeg_reference(projection=True) + assert len(other_raw.info['projs']) == 1 + this_proj = other_raw.info['projs'][0]['data'] + assert this_proj['col_names'] == col_names + assert this_proj['data'].shape == proj['data']['data'].shape + assert_allclose( + np.linalg.norm(proj['data']['data']), 1., atol=1e-6) + assert_allclose( + np.linalg.norm(this_proj['data']), 1., atol=1e-6) + assert_allclose(this_proj['data'], proj['data']['data']) + proj = other_raw.apply_proj().get_data() + assert_allclose(proj[picks], data_load_apply_get, atol=1e-10) + assert_allclose(proj, direct, atol=1e-10, err_msg=t_kw['err_msg']) + else: + raw = reader(**kwargs) + n_samp = len(raw.times) + assert_named_constants(raw.info) + # smoke test for gh #9743 + ids = [id(ch['loc']) for ch in raw.info['chs']] + assert len(set(ids)) == len(ids) + + full_data = raw._data + assert raw.__class__.__name__ in repr(raw) # to test repr + assert raw.info.__class__.__name__ in repr(raw.info) + assert isinstance(raw.info['dig'], (type(None), list)) + data_max = full_data.max() + data_min = full_data.min() + # these limits could be relaxed if we actually find data with + # huge values (in SI units) + assert data_max < 1e5 + assert data_min > -1e5 + if isinstance(raw.info['dig'], list): + for di, d in enumerate(raw.info['dig']): + assert isinstance(d, DigPoint), (di, d) + + # gh-5604 + meas_date = raw.info['meas_date'] + assert meas_date is None or meas_date >= _stamp_to_dt((0, 0)) + + # test repr_html + assert 'Good channels' in raw.info._repr_html_() + + # test resetting raw + if test_kwargs: + raw2 = reader(**raw._init_kwargs) + assert set(raw.info.keys()) == set(raw2.info.keys()) + assert_array_equal(raw.times, raw2.times) + + # Test saving and reading + out_fname = op.join(tempdir, 'test_raw.fif') + raw = concatenate_raws([raw]) + raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1) + + # Test saving with not correct extension + out_fname_h5 = op.join(tempdir, 'test_raw.h5') + with pytest.raises(IOError, match='raw must end with .fif or .fif.gz'): + raw.save(out_fname_h5) + + raw3 = read_raw_fif(out_fname) + assert_named_constants(raw3.info) + assert set(raw.info.keys()) == set(raw3.info.keys()) + assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6, + atol=1e-20) # atol is very small but > 0 + assert_allclose(raw.times, raw3.times, atol=1e-6, rtol=1e-6) + + assert not math.isnan(raw3.info['highpass']) + assert not math.isnan(raw3.info['lowpass']) + assert not math.isnan(raw.info['highpass']) + assert not math.isnan(raw.info['lowpass']) + + assert raw3.info['kit_system_id'] == raw.info['kit_system_id'] + + # Make sure concatenation works + first_samp = raw.first_samp + last_samp = raw.last_samp + concat_raw = concatenate_raws([raw.copy(), raw]) + assert concat_raw.n_times == 2 * raw.n_times + assert concat_raw.first_samp == first_samp + assert concat_raw.last_samp - last_samp + first_samp == last_samp + 1 + idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0] + + expected_bad_boundary_onset = raw._last_time + + assert_array_almost_equal(concat_raw.annotations.onset[idx], + expected_bad_boundary_onset, + decimal=boundary_decimal) + + if raw.info['meas_id'] is not None: + for key in ['secs', 'usecs', 'version']: + assert raw.info['meas_id'][key] == raw3.info['meas_id'][key] + assert_array_equal(raw.info['meas_id']['machid'], + raw3.info['meas_id']['machid']) + + assert isinstance(raw.annotations, Annotations) + + # Make a "soft" test on units: They have to be valid SI units as in + # mne.io.meas_info.valid_units, but we accept any lower/upper case for now. + valid_units = _get_valid_units() + valid_units_lower = [unit.lower() for unit in valid_units] + if raw._orig_units is not None: + assert isinstance(raw._orig_units, dict) + for ch_name, unit in raw._orig_units.items(): + assert unit.lower() in valid_units_lower, ch_name + + # Test picking with and without preload + if test_preloading: + preload_kwargs = (dict(preload=True), dict(preload=False)) + else: + preload_kwargs = (dict(),) + n_ch = len(raw.ch_names) + picks = rng.permutation(n_ch) + for preload_kwarg in preload_kwargs: + these_kwargs = kwargs.copy() + these_kwargs.update(preload_kwarg) + # don't use the same filename or it could create problems + if isinstance(these_kwargs.get('preload', None), str) and \ + op.isfile(these_kwargs['preload']): + these_kwargs['preload'] += '-1' + whole_raw = reader(**these_kwargs) + print(whole_raw) # __repr__ + assert n_ch >= 2 + picks_1 = picks[:n_ch // 2] + picks_2 = picks[n_ch // 2:] + raw_1 = whole_raw.copy().pick(picks_1) + raw_2 = whole_raw.copy().pick(picks_2) + data, times = whole_raw[:] + data_1, times_1 = raw_1[:] + data_2, times_2 = raw_2[:] + assert_array_equal(times, times_1) + assert_array_equal(data[picks_1], data_1) + assert_array_equal(times, times_2,) + assert_array_equal(data[picks_2], data_2) + + # Make sure that writing info to h5 format + # (all fields should be compatible) + if check_version('h5io'): + read_hdf5, write_hdf5 = _import_h5io_funcs() + fname_h5 = op.join(tempdir, 'info.h5') + with _writing_info_hdf5(raw.info): + write_hdf5(fname_h5, raw.info) + new_info = Info(read_hdf5(fname_h5)) + assert object_diff(new_info, raw.info) == '' + + # Make sure that changing directory does not break anything + if test_preloading: + these_kwargs = kwargs.copy() + key = None + for key in ('fname', + 'input_fname', # artemis123 + 'vhdr_fname', # BV + 'pdf_fname', # BTi + 'directory', # CTF + 'filename', # nedf + ): + try: + fname = kwargs[key] + except KeyError: + key = None + else: + break + # len(kwargs) == 0 for the fake arange reader + if len(kwargs): + assert key is not None, sorted(kwargs.keys()) + dirname = op.dirname(fname) + these_kwargs[key] = op.basename(fname) + these_kwargs['preload'] = False + orig_dir = os.getcwd() + try: + os.chdir(dirname) + raw_chdir = reader(**these_kwargs) + finally: + os.chdir(orig_dir) + raw_chdir.load_data() + + # make sure that cropping works (with first_samp shift) + if n_samp >= 50: # we crop to this number of samples below + for t_prop in (0., 0.5): + _test_raw_crop(reader, t_prop, kwargs) + if test_preloading: + use_kwargs = kwargs.copy() + use_kwargs['preload'] = True + _test_raw_crop(reader, t_prop, use_kwargs) + + return raw + + +def _test_raw_crop(reader, t_prop, kwargs): + raw_1 = reader(**kwargs) + n_samp = 50 # crop to this number of samples (per instance) + crop_t = n_samp / raw_1.info['sfreq'] + t_start = t_prop * crop_t # also crop to some fraction into the first inst + extra = f' t_start={t_start}, preload={kwargs.get("preload", False)}' + stop = (n_samp - 1) / raw_1.info['sfreq'] + raw_1.crop(0, stop) + assert len(raw_1.times) == 50 + first_time = raw_1.first_time + atol = 0.5 / raw_1.info['sfreq'] + assert_allclose(raw_1.times[-1], stop, atol=atol) + raw_2, raw_3 = raw_1.copy(), raw_1.copy() + t_tot = raw_1.times[-1] * 3 + 2. / raw_1.info['sfreq'] + raw_concat = concatenate_raws([raw_1, raw_2, raw_3]) + assert len(raw_concat._filenames) == 3 + assert_allclose(raw_concat.times[-1], t_tot) + assert_allclose(raw_concat.first_time, first_time) + # keep all instances, but crop to t_start at the beginning + raw_concat.crop(t_start, None) + assert len(raw_concat._filenames) == 3 + assert_allclose(raw_concat.times[-1], t_tot - t_start, atol=atol) + assert_allclose( + raw_concat.first_time, first_time + t_start, atol=atol, + err_msg=f'Base concat, {extra}') + # drop the first instance + raw_concat.crop(crop_t, None) + assert len(raw_concat._filenames) == 2 + assert_allclose( + raw_concat.times[-1], t_tot - t_start - crop_t, atol=atol) + assert_allclose( + raw_concat.first_time, first_time + t_start + crop_t, + atol=atol, err_msg=f'Dropping one, {extra}') + # drop the second instance, leaving just one + raw_concat.crop(crop_t, None) + assert len(raw_concat._filenames) == 1 + assert_allclose( + raw_concat.times[-1], t_tot - t_start - 2 * crop_t, atol=atol) + assert_allclose( + raw_concat.first_time, first_time + t_start + 2 * crop_t, + atol=atol, err_msg=f'Dropping two, {extra}') + + +def _test_concat(reader, *args): + """Test concatenation of raw classes that allow not preloading.""" + data = None + + for preload in (True, False): + raw1 = reader(*args, preload=preload) + raw2 = reader(*args, preload=preload) + raw1.append(raw2) + raw1.load_data() + if data is None: + data = raw1[:, :][0] + assert_allclose(data, raw1[:, :][0]) + + for first_preload in (True, False): + raw = reader(*args, preload=first_preload) + data = raw[:, :][0] + for preloads in ((True, True), (True, False), (False, False)): + for last_preload in (True, False): + t_crops = raw.times[np.argmin(np.abs(raw.times - 0.5)) + + [0, 1]] + raw1 = raw.copy().crop(0, t_crops[0]) + if preloads[0]: + raw1.load_data() + raw2 = raw.copy().crop(t_crops[1], None) + if preloads[1]: + raw2.load_data() + raw1.append(raw2) + if last_preload: + raw1.load_data() + assert_allclose(data, raw1[:, :][0]) + + +@testing.requires_testing_data +def test_time_as_index(): + """Test indexing of raw times.""" + raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', + 'data', 'test_raw.fif') + raw = read_raw_fif(raw_fname) + + # Test original (non-rounding) indexing behavior + orig_inds = raw.time_as_index(raw.times) + assert(len(set(orig_inds)) != len(orig_inds)) + + # Test new (rounding) indexing behavior + new_inds = raw.time_as_index(raw.times, use_rounding=True) + assert_array_equal(new_inds, np.arange(len(raw.times))) + + +@pytest.mark.parametrize('offset, origin', [ + pytest.param(0, None, id='times in s. relative to first_samp (default)'), + pytest.param(0, 2.0, id='times in s. relative to first_samp'), + pytest.param(1, 1.0, id='times in s. relative to meas_date'), + pytest.param(2, 0.0, id='absolute times in s. relative to 0')]) +def test_time_as_index_ref(offset, origin): + """Test indexing of raw times.""" + info = create_info(ch_names=10, sfreq=10.) + raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10) + raw.set_meas_date(1) + + relative_times = raw.times + inds = raw.time_as_index(relative_times + offset, + use_rounding=True, + origin=origin) + assert_array_equal(inds, np.arange(raw.n_times)) + + +def test_meas_date_orig_time(): + """Test the relation between meas_time in orig_time.""" + # meas_time is set and orig_time is set: + # clips the annotations based on raw.data and resets the annotation based + # on raw.info['meas_date] + raw = _raw_annot(1, 1.5) + assert raw.annotations.orig_time == _stamp_to_dt((1, 0)) + assert raw.annotations.onset[0] == 1 + + # meas_time is set and orig_time is None: + # Consider annot.orig_time to be raw.frist_sample, clip and reset + # annotations to have the raw.annotations.orig_time == raw.info['meas_date] + raw = _raw_annot(1, None) + assert raw.annotations.orig_time == _stamp_to_dt((1, 0)) + assert raw.annotations.onset[0] == 1.5 + + # meas_time is None and orig_time is set: + # Raise error, it makes no sense to have an annotations object that we know + # when was acquired and set it to a raw object that does not know when was + # it acquired. + with pytest.raises(RuntimeError, match='Ambiguous operation'): + _raw_annot(None, 1.5) + + # meas_time is None and orig_time is None: + # Consider annot.orig_time to be raw.first_sample and clip + raw = _raw_annot(None, None) + assert raw.annotations.orig_time is None + assert raw.annotations.onset[0] == 1.5 + assert raw.annotations.duration[0] == 0.2 + + +def test_get_data_reject(): + """Test if reject_by_annotation is working correctly.""" + fs = 256 + ch_names = ["C3", "Cz", "C4"] + info = create_info(ch_names, sfreq=fs) + raw = RawArray(np.zeros((len(ch_names), 10 * fs)), info) + raw.set_annotations(Annotations(onset=[2, 4], duration=[3, 2], + description="bad")) + + with catch_logging() as log: + data = raw.get_data(reject_by_annotation="omit", verbose=True) + msg = ('Omitting 1024 of 2560 (40.00%) samples, retaining 1536' + + ' (60.00%) samples.') + assert log.getvalue().strip() == msg + assert data.shape == (len(ch_names), 1536) + with catch_logging() as log: + data = raw.get_data(reject_by_annotation="nan", verbose=True) + msg = ('Setting 1024 of 2560 (40.00%) samples to NaN, retaining 1536' + + ' (60.00%) samples.') + assert log.getvalue().strip() == msg + assert data.shape == (len(ch_names), 2560) # shape doesn't change + assert np.isnan(data).sum() == 3072 # but NaNs are introduced instead + + +def test_5839(): + """Test concatenating raw objects with annotations.""" + # Global Time 0 1 2 3 4 + # . + # raw_A |---------XXXXXXXXXX + # annot |--------------AA + # latency . 0 0 1 1 2 2 3 + # . 5 0 5 0 5 0 + # + # raw_B . |---------YYYYYYYYYY + # annot . |--------------AA + # latency . 0 1 + # . 5 0 + # . + # output |---------XXXXXXXXXXYYYYYYYYYY + # annot |--------------AA---|----AA + # latency . 0 0 1 1 2 2 3 + # . 5 0 5 0 5 0 + # + EXPECTED_ONSET = [1.5, 2., 2., 2.5] + EXPECTED_DURATION = [0.2, 0., 0., 0.2] + EXPECTED_DESCRIPTION = ['dummy', 'BAD boundary', 'EDGE boundary', 'dummy'] + + def raw_factory(meas_date): + raw = RawArray(data=np.empty((10, 10)), + info=create_info(ch_names=10, sfreq=10.), + first_samp=10) + raw.set_meas_date(meas_date) + raw.set_annotations(annotations=Annotations(onset=[.5], + duration=[.2], + description='dummy', + orig_time=None)) + return raw + + raw_A, raw_B = [raw_factory((x, 0)) for x in [0, 2]] + raw_A.append(raw_B) + + assert_array_equal(raw_A.annotations.onset, EXPECTED_ONSET) + assert_array_equal(raw_A.annotations.duration, EXPECTED_DURATION) + assert_array_equal(raw_A.annotations.description, EXPECTED_DESCRIPTION) + assert raw_A.annotations.orig_time == _stamp_to_dt((0, 0)) + + +def test_repr(): + """Test repr of Raw.""" + sfreq = 256 + info = create_info(3, sfreq) + raw = RawArray(np.zeros((3, 10 * sfreq)), info) + r = repr(raw) + assert re.search('', + r) is not None, r + assert raw._repr_html_() + + +# A class that sets channel data to np.arange, for testing _test_raw_reader +class _RawArange(BaseRaw): + + def __init__(self, preload=False, verbose=None): + info = create_info(list(str(x) for x in range(1, 9)), 1000., 'eeg') + super().__init__(info, preload, last_samps=(999,), verbose=verbose) + assert len(self.times) == 1000 + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + one = np.full((8, stop - start), np.nan) + one[idx] = np.arange(1, 9)[idx, np.newaxis] + _mult_cal_one(data, one, idx, cals, mult) + + +def _read_raw_arange(preload=False, verbose=None): + return _RawArange(preload, verbose) + + +def test_test_raw_reader(): + """Test _test_raw_reader.""" + _test_raw_reader(_read_raw_arange, test_scaling=False, test_rank='less') + + +@pytest.mark.slowtest +def test_describe_print(): + """Test print output of describe method.""" + fname = Path(__file__).parent / "data" / "test_raw.fif" + raw = read_raw_fif(fname) + + # test print output + f = StringIO() + with redirect_stdout(f): + raw.describe() + s = f.getvalue().strip().split("\n") + assert len(s) == 378 + # Can be 3.1, 3.3, etc. + assert re.match( + r'', s[0]) is not None, s[0] + assert s[1] == " ch name type unit min Q1 median Q3 max" # noqa + assert s[2] == " 0 MEG 0113 GRAD fT/cm -221.80 -38.57 -9.64 19.29 414.67" # noqa + assert s[-1] == "375 EOG 061 EOG µV -231.41 271.28 277.16 285.66 334.69" # noqa + + +@requires_pandas +@pytest.mark.slowtest +def test_describe_df(): + """Test returned data frame of describe method.""" + fname = Path(__file__).parent / "data" / "test_raw.fif" + raw = read_raw_fif(fname) + + df = raw.describe(data_frame=True) + assert df.shape == (376, 8) + assert (df.columns.tolist() == ["name", "type", "unit", "min", "Q1", + "median", "Q3", "max"]) + assert df.index.name == "ch" + assert_allclose(df.iloc[0, 3:].astype(float), + np.array([-2.218017605790535e-11, + -3.857421923113974e-12, + -9.643554807784935e-13, + 1.928710961556987e-12, + 4.146728567347522e-11])) + + +def test_get_data_units(): + """Test the "units" argument of get_data method.""" + # Test the unit conversion function + assert _get_scaling('eeg', 'uV') == 1e6 + assert _get_scaling('eeg', 'dV') == 1e1 + assert _get_scaling('eeg', 'pV') == 1e12 + assert _get_scaling('mag', 'fT') == 1e15 + assert _get_scaling('grad', 'T/m') == 1 + assert _get_scaling('grad', 'T/mm') == 1e-3 + assert _get_scaling('grad', 'fT/m') == 1e15 + assert _get_scaling('grad', 'fT/cm') == 1e13 + assert _get_scaling('csd', 'uV/cm²') == 1e2 + + fname = Path(__file__).parent / "data" / "test_raw.fif" + raw = read_raw_fif(fname) + + last = np.array([4.63803098e-05, 7.66563736e-05, 2.71933595e-04]) + last_eeg = np.array([7.12207023e-05, 4.63803098e-05, 7.66563736e-05]) + last_grad = np.array([-3.85742192e-12, 9.64355481e-13, -1.06079103e-11]) + + # None + data_none = raw.get_data() + assert data_none.shape == (376, 14400) + assert_array_almost_equal(data_none[-3:, -1], last) + + # str: unit no conversion + data_str_noconv = raw.get_data(picks=['eeg'], units='V') + assert data_str_noconv.shape == (60, 14400) + assert_array_almost_equal(data_str_noconv[-3:, -1], last_eeg) + # str: simple unit + data_str_simple = raw.get_data(picks=['eeg'], units='uV') + assert data_str_simple.shape == (60, 14400) + assert_array_almost_equal(data_str_simple[-3:, -1], last_eeg * 1e6) + # str: fraction unit + data_str_fraction = raw.get_data(picks=['grad'], units='fT/cm') + assert data_str_fraction.shape == (204, 14400) + assert_array_almost_equal(data_str_fraction[-3:, -1], + last_grad * (1e15 / 1e2)) + # str: more than one channel type but one with unit + data_str_simplestim = raw.get_data(picks=['eeg', 'stim'], units='V') + assert data_str_simplestim.shape == (69, 14400) + assert_array_almost_equal(data_str_simplestim[-3:, -1], last_eeg) + # str: too many channels + with pytest.raises(ValueError, match='more than one channel'): + raw.get_data(units='uV') + # str: invalid unit + with pytest.raises(ValueError, match='is not a valid unit'): + raw.get_data(picks=['eeg'], units='fV/cm') + + # dict: combination of simple and fraction units + data_dict = raw.get_data(units=dict(grad='fT/cm', mag='fT', eeg='uV')) + assert data_dict.shape == (376, 14400) + assert_array_almost_equal(data_dict[0, -1], + -3.857421923113974e-12 * (1e15 / 1e2)) + assert_array_almost_equal(data_dict[2, -1], -2.1478272253525944e-13 * 1e15) + assert_array_almost_equal(data_dict[-2, -1], 7.665637356879529e-05 * 1e6) + # dict: channel type not in instance + data_dict_notin = raw.get_data(units=dict(hbo='uM')) + assert data_dict_notin.shape == (376, 14400) + assert_array_almost_equal(data_dict_notin[-3:, -1], last) + # dict: one invalid unit + with pytest.raises(ValueError, match='is not a valid unit'): + raw.get_data(units=dict(grad='fT/cV', mag='fT', eeg='uV')) + # dict: one invalid channel type + with pytest.raises(KeyError, match='is not a channel type'): + raw.get_data(units=dict(bad_type='fT/cV', mag='fT', eeg='uV')) + + # not the good type + with pytest.raises(TypeError, match='instance of None, str, or dict'): + raw.get_data(units=['fT/cm', 'fT', 'uV']) + + +def test_repr_dig_point(): + """Test printing of DigPoint.""" + dp = DigPoint(r=np.arange(3), coord_frame=FIFF.FIFFV_COORD_HEAD, + kind=FIFF.FIFFV_POINT_EEG, ident=0) + assert 'mm' in repr(dp) + + dp = DigPoint(r=np.arange(3), coord_frame=FIFF.FIFFV_MNE_COORD_MRI_VOXEL, + kind=FIFF.FIFFV_POINT_CARDINAL, ident=0) + assert 'mm' not in repr(dp) + assert 'voxel' in repr(dp) + + +def test_get_data_tmin_tmax(): + """Test tmin and tmax parameters of get_data method.""" + fname = Path(__file__).parent / "data" / "test_raw.fif" + raw = read_raw_fif(fname) + + # tmin and tmax just use time_as_index under the hood + tmin, tmax = (1, 9) + d1 = raw.get_data() + d2 = raw.get_data(tmin=tmin, tmax=tmax) + + idxs = raw.time_as_index([tmin, tmax]) + assert_allclose(d1[:, idxs[0]:idxs[1]], d2) + + # specifying a too low tmin truncates to idx 0 + d3 = raw.get_data(tmin=-5) + assert_allclose(d3, d1) + + # specifying a too high tmax truncates to idx n_times + d4 = raw.get_data(tmax=1e6) + assert_allclose(d4, d1) + + # when start/stop are passed, tmin/tmax are ignored + d5 = raw.get_data(start=1, stop=2, tmin=tmin, tmax=tmax) + assert d5.shape[1] == 1 + + # validate inputs are properly raised + with pytest.raises(TypeError, match='start must be .* int'): + raw.get_data(start=None) + + with pytest.raises(TypeError, match='stop must be .* int'): + raw.get_data(stop=2.3) + + with pytest.raises(TypeError, match='tmin must be .* float'): + raw.get_data(tmin=[1, 2]) + + with pytest.raises(TypeError, match='tmax must be .* float'): + raw.get_data(tmax=[1, 2]) diff --git a/python/libs/mne/io/tests/test_read_raw.py b/python/libs/mne/io/tests/test_read_raw.py new file mode 100644 index 0000000..3cf3852 --- /dev/null +++ b/python/libs/mne/io/tests/test_read_raw.py @@ -0,0 +1,46 @@ +"""Test generic read_raw function.""" + +# Authors: Clemens Brunner +# +# License: BSD-3-Clause + +from pathlib import Path + +import pytest + +from mne.io import read_raw +from mne.datasets import testing + + +base = Path(__file__).parent.parent +test_base = Path(testing.data_path(download=False)) + + +@pytest.mark.parametrize('fname', ['x.xxx', 'x']) +def test_read_raw_unsupported(fname): + """Test handling of unsupported file types.""" + with pytest.raises(ValueError, match='Unsupported file type'): + read_raw(fname) + + +@pytest.mark.parametrize('fname', ['x.vmrk', 'x.eeg']) +def test_read_raw_suggested(fname): + """Test handling of unsupported file types with suggested alternatives.""" + with pytest.raises(ValueError, match='Try reading'): + read_raw(fname) + + +@pytest.mark.parametrize('fname', [ + base / 'edf/tests/data/test.edf', + base / 'edf/tests/data/test.bdf', + base / 'brainvision/tests/data/test.vhdr', + base / 'kit/tests/data/test.sqd', + pytest.param(test_base / 'KIT/data_berlin.con', + marks=testing._pytest_mark()), +]) +def test_read_raw_supported(fname): + """Test supported file types.""" + read_raw(fname) + read_raw(fname, verbose=False) + raw = read_raw(fname, preload=True) + assert "data loaded" in str(raw) diff --git a/python/libs/mne/io/tests/test_reference.py b/python/libs/mne/io/tests/test_reference.py new file mode 100644 index 0000000..567c01e --- /dev/null +++ b/python/libs/mne/io/tests/test_reference.py @@ -0,0 +1,676 @@ +# Authors: Marijn van Vliet +# Alexandre Gramfort +# Teon Brooks +# +# License: BSD-3-Clause + +from contextlib import nullcontext +import itertools +import os.path as op + +import numpy as np +from numpy.testing import assert_array_equal, assert_allclose, assert_equal +import pytest + +from mne import (pick_channels, pick_types, Epochs, read_events, + set_eeg_reference, set_bipolar_reference, + add_reference_channels, create_info, make_sphere_model, + make_forward_solution, setup_volume_source_space, + pick_channels_forward, read_evokeds, + find_events) +from mne.epochs import BaseEpochs +from mne.io import RawArray, read_raw_fif +from mne.io.constants import FIFF +from mne.io.proj import _has_eeg_average_ref_proj, Projection +from mne.io.reference import _apply_reference +from mne.datasets import testing +from mne.utils import catch_logging, _record_warnings + +base_dir = op.join(op.dirname(__file__), 'data') +raw_fname = op.join(base_dir, 'test_raw.fif') + +data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample') +fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif') +eve_fname = op.join(data_dir, 'sample_audvis_trunc_raw-eve.fif') +ave_fname = op.join(data_dir, 'sample_audvis-ave.fif') + + +def _test_reference(raw, reref, ref_data, ref_from): + """Test whether a reference has been correctly applied.""" + # Separate EEG channels from other channel types + picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads') + picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True, + stim=True, exclude='bads') + + # Calculate indices of reference channesl + picks_ref = [raw.ch_names.index(ch) for ch in ref_from] + + # Get data + _data = raw._data + _reref = reref._data + + # Check that the ref has been properly computed + if ref_data is not None: + assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2)) + + # Get the raw EEG data and other channel data + raw_eeg_data = _data[..., picks_eeg, :] + raw_other_data = _data[..., picks_other, :] + + # Get the rereferenced EEG data + reref_eeg_data = _reref[..., picks_eeg, :] + reref_other_data = _reref[..., picks_other, :] + + # Check that non-EEG channels are untouched + assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15) + + # Undo rereferencing of EEG channels if possible + if ref_data is not None: + if isinstance(raw, BaseEpochs): + unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :] + else: + unref_eeg_data = reref_eeg_data + ref_data + assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15) + + +@testing.requires_testing_data +def test_apply_reference(): + """Test base function for rereferencing.""" + raw = read_raw_fif(fif_fname, preload=True) + + # Rereference raw data by creating a copy of original data + reref, ref_data = _apply_reference( + raw.copy(), ref_from=['EEG 001', 'EEG 002']) + assert (reref.info['custom_ref_applied']) + _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002']) + + # The CAR reference projection should have been removed by the function + assert (not _has_eeg_average_ref_proj(reref.info['projs'])) + + # Test that data is modified in place when copy=False + reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002']) + assert (raw is reref) + + # Test that disabling the reference does not change anything + reref, ref_data = _apply_reference(raw.copy(), []) + assert_array_equal(raw._data, reref._data) + + # Test re-referencing Epochs object + raw = read_raw_fif(fif_fname, preload=False) + events = read_events(eve_fname) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, + picks=picks_eeg, preload=True) + reref, ref_data = _apply_reference( + epochs.copy(), ref_from=['EEG 001', 'EEG 002']) + assert (reref.info['custom_ref_applied']) + _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002']) + + # Test re-referencing Evoked object + evoked = epochs.average() + reref, ref_data = _apply_reference( + evoked.copy(), ref_from=['EEG 001', 'EEG 002']) + assert (reref.info['custom_ref_applied']) + _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002']) + + # Referencing needs data to be preloaded + raw_np = read_raw_fif(fif_fname, preload=False) + pytest.raises(RuntimeError, _apply_reference, raw_np, ['EEG 001']) + + # Test having inactive SSP projections that deal with channels involved + # during re-referencing + raw = read_raw_fif(fif_fname, preload=True) + raw.add_proj( + Projection( + active=False, + data=dict( + col_names=['EEG 001', 'EEG 002'], + row_names=None, + data=np.array([[1, 1]]), + ncol=2, + nrow=1 + ), + desc='test', + kind=1, + ) + ) + # Projection concerns channels mentioned in projector + with pytest.raises(RuntimeError, match='Inactive signal space'): + _apply_reference(raw, ['EEG 001']) + + # Projection does not concern channels mentioned in projector, no error + _apply_reference(raw, ['EEG 003'], ['EEG 004']) + + # CSD cannot be rereferenced + with raw.info._unlock(): + raw.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_CSD + with pytest.raises(RuntimeError, match="Cannot set.* type 'CSD'"): + raw.set_eeg_reference() + + +@testing.requires_testing_data +def test_set_eeg_reference(): + """Test rereference eeg data.""" + raw = read_raw_fif(fif_fname, preload=True) + with raw.info._unlock(): + raw.info['projs'] = [] + + # Test setting an average reference projection + assert (not _has_eeg_average_ref_proj(raw.info['projs'])) + reref, ref_data = set_eeg_reference(raw, projection=True) + assert (_has_eeg_average_ref_proj(reref.info['projs'])) + assert (not reref.info['projs'][0]['active']) + assert (ref_data is None) + reref.apply_proj() + eeg_chans = [raw.ch_names[ch] + for ch in pick_types(raw.info, meg=False, eeg=True)] + _test_reference(raw, reref, ref_data, + [ch for ch in eeg_chans if ch not in raw.info['bads']]) + + # Test setting an average reference when one was already present + with pytest.warns(RuntimeWarning, match='untouched'): + reref, ref_data = set_eeg_reference(raw, copy=False, projection=True) + assert ref_data is None + + # Test setting an average reference on non-preloaded data + raw_nopreload = read_raw_fif(fif_fname, preload=False) + with raw_nopreload.info._unlock(): + raw_nopreload.info['projs'] = [] + reref, ref_data = set_eeg_reference(raw_nopreload, projection=True) + assert _has_eeg_average_ref_proj(reref.info['projs']) + assert not reref.info['projs'][0]['active'] + + # Rereference raw data by creating a copy of original data + reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True) + assert reref.info['custom_ref_applied'] + _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002']) + + # Test that data is modified in place when copy=False + reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], + copy=False) + assert raw is reref + + # Test moving from custom to average reference + reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002']) + reref, _ = set_eeg_reference(reref, projection=True) + assert _has_eeg_average_ref_proj(reref.info['projs']) + assert not reref.info['custom_ref_applied'] + + # When creating an average reference fails, make sure the + # custom_ref_applied flag remains untouched. + reref = raw.copy() + with reref.info._unlock(): + reref.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON + reref.pick_types(meg=True, eeg=False) # Cause making average ref fail + pytest.raises(ValueError, set_eeg_reference, reref, projection=True) + assert reref.info['custom_ref_applied'] == FIFF.FIFFV_MNE_CUSTOM_REF_ON + + # Test moving from average to custom reference + reref, ref_data = set_eeg_reference(raw, projection=True) + reref, _ = set_eeg_reference(reref, ['EEG 001', 'EEG 002']) + assert not _has_eeg_average_ref_proj(reref.info['projs']) + assert len(reref.info['projs']) == 0 + assert reref.info['custom_ref_applied'] == FIFF.FIFFV_MNE_CUSTOM_REF_ON + + # Test that disabling the reference does not change the data + assert _has_eeg_average_ref_proj(raw.info['projs']) + reref, _ = set_eeg_reference(raw, []) + assert_array_equal(raw._data, reref._data) + assert not _has_eeg_average_ref_proj(reref.info['projs']) + + # make sure ref_channels=[] removes average reference projectors + assert _has_eeg_average_ref_proj(raw.info['projs']) + reref, _ = set_eeg_reference(raw, []) + assert (not _has_eeg_average_ref_proj(reref.info['projs'])) + + # Test that average reference gives identical results when calculated + # via SSP projection (projection=True) or directly (projection=False) + with raw.info._unlock(): + raw.info['projs'] = [] + reref_1, _ = set_eeg_reference(raw.copy(), projection=True) + reref_1.apply_proj() + reref_2, _ = set_eeg_reference(raw.copy(), projection=False) + assert_allclose(reref_1._data, reref_2._data, rtol=1e-6, atol=1e-15) + + # Test average reference without projection + reref, ref_data = set_eeg_reference(raw.copy(), ref_channels="average", + projection=False) + _test_reference(raw, reref, ref_data, eeg_chans) + + with pytest.raises(ValueError, match='supported for ref_channels="averag'): + set_eeg_reference(raw, [], True, True) + with pytest.raises(ValueError, match='supported for ref_channels="averag'): + set_eeg_reference(raw, ['EEG 001'], True, True) + + +@pytest.mark.parametrize('ch_type, msg', + [('auto', ('ECoG',)), + ('ecog', ('ECoG',)), + ('dbs', ('DBS',)), + (['ecog', 'dbs'], ('ECoG', 'DBS'))]) +def test_set_eeg_reference_ch_type(ch_type, msg): + """Test setting EEG reference for ECoG or DBS.""" + # gh-6454 + # gh-8739 added DBS + ch_names = ['ECOG01', 'ECOG02', 'DBS01', 'DBS02', 'MISC'] + rng = np.random.RandomState(0) + data = rng.randn(5, 1000) + raw = RawArray(data, create_info(ch_names, 1000., ['ecog'] * 2 + + ['dbs'] * 2 + ['misc'])) + if ch_type == 'auto': + ref_ch = ch_names[:2] + else: + ref_ch = raw.copy().pick(picks=ch_type).ch_names + with catch_logging() as log: + reref, ref_data = set_eeg_reference(raw.copy(), ch_type=ch_type, + verbose=True) + assert f"Applying a custom {msg}" in log.getvalue() + assert reref.info['custom_ref_applied'] # gh-7350 + _test_reference(raw, reref, ref_data, ref_ch) + with pytest.raises(ValueError, match='No channels supplied'): + set_eeg_reference(raw, ch_type='eeg') + # gh-8739 + raw2 = RawArray(data, create_info(5, 1000., ['mag'] * 4 + ['misc'])) + with pytest.raises(ValueError, match='No EEG, ECoG, sEEG or DBS channels ' + 'found to rereference.'): + set_eeg_reference(raw2, ch_type='auto') + + +@testing.requires_testing_data +def test_set_eeg_reference_rest(): + """Test setting a REST reference.""" + raw = read_raw_fif(fif_fname).crop(0, 1).pick_types( + meg=False, eeg=True, exclude=()).load_data() + raw.info['bads'] = ['EEG 057'] # should be excluded + same = [raw.ch_names.index(raw.info['bads'][0])] + picks = np.setdiff1d(np.arange(len(raw.ch_names)), same) + trans = None + sphere = make_sphere_model('auto', 'auto', raw.info) + src = setup_volume_source_space(pos=20., sphere=sphere, exclude=30.) + assert src[0]['nuse'] == 223 # low but fast + fwd = make_forward_solution(raw.info, trans, src, sphere) + orig_data = raw.get_data() + avg_data = raw.copy().set_eeg_reference('average').get_data() + assert_array_equal(avg_data[same], orig_data[same]) # not processed + raw.set_eeg_reference('REST', forward=fwd) + rest_data = raw.get_data() + assert_array_equal(rest_data[same], orig_data[same]) + # should be more similar to an avg ref than nose ref + orig_corr = np.corrcoef(rest_data[picks].ravel(), + orig_data[picks].ravel())[0, 1] + avg_corr = np.corrcoef(rest_data[picks].ravel(), + avg_data[picks].ravel())[0, 1] + assert -0.6 < orig_corr < -0.5 + assert 0.1 < avg_corr < 0.2 + # and applying an avg ref after should work + avg_after = raw.set_eeg_reference('average').get_data() + assert_allclose(avg_after, avg_data, atol=1e-12) + with pytest.raises(TypeError, match='forward when ref_channels="REST"'): + raw.set_eeg_reference('REST') + fwd_bad = pick_channels_forward(fwd, raw.ch_names[:-1]) + with pytest.raises(ValueError, match='Missing channels'): + raw.set_eeg_reference('REST', forward=fwd_bad) + # compare to FieldTrip + evoked = read_evokeds(ave_fname, baseline=(None, 0))[0] + evoked.info['bads'] = [] + evoked.pick_types(meg=False, eeg=True, exclude=()) + assert len(evoked.ch_names) == 60 + # Data obtained from FieldTrip with something like (after evoked.save'ing + # then scipy.io.savemat'ing fwd['sol']['data']): + # dat = ft_read_data('ft-ave.fif'); + # load('leadfield.mat', 'G'); + # dat_ref = ft_preproc_rereference(dat, 'all', 'rest', true, G); + # sprintf('%g ', dat_ref(:, 171)); + want = np.array('-3.3265e-05 -3.2419e-05 -3.18758e-05 -3.24079e-05 -3.39801e-05 -3.40573e-05 -3.24163e-05 -3.26896e-05 -3.33814e-05 -3.54734e-05 -3.51289e-05 -3.53229e-05 -3.51532e-05 -3.53149e-05 -3.4505e-05 -3.03462e-05 -2.81848e-05 -3.08895e-05 -3.27158e-05 -3.4605e-05 -3.47728e-05 -3.2459e-05 -3.06552e-05 -2.53255e-05 -2.69671e-05 -2.83425e-05 -3.12836e-05 -3.30965e-05 -3.34099e-05 -3.32766e-05 -3.32256e-05 -3.36385e-05 -3.20796e-05 -2.7108e-05 -2.47054e-05 -2.49589e-05 -2.7382e-05 -3.09774e-05 -3.12003e-05 -3.1246e-05 -3.07572e-05 -2.64942e-05 -2.25505e-05 -2.67194e-05 -2.86e-05 -2.94903e-05 -2.96249e-05 -2.92653e-05 -2.86472e-05 -2.81016e-05 -2.69737e-05 -2.48076e-05 -3.00473e-05 -2.73404e-05 -2.60153e-05 -2.41608e-05 -2.61937e-05 -2.5539e-05 -2.47104e-05 -2.35194e-05'.split(' '), float) # noqa: E501 + norm = np.linalg.norm(want) + idx = np.argmin(np.abs(evoked.times - 0.083)) + assert idx == 170 + old = evoked.data[:, idx].ravel() + exp_var = 1 - np.linalg.norm(want - old) / norm + assert 0.006 < exp_var < 0.008 + evoked.set_eeg_reference('REST', forward=fwd) + exp_var_old = 1 - np.linalg.norm(evoked.data[:, idx] - old) / norm + assert 0.005 < exp_var_old <= 0.009 + exp_var = 1 - np.linalg.norm(evoked.data[:, idx] - want) / norm + assert 0.995 < exp_var <= 1 + + +@testing.requires_testing_data +@pytest.mark.parametrize('inst_type', ('raw', 'epochs', 'evoked')) +def test_set_bipolar_reference(inst_type): + """Test bipolar referencing.""" + raw = read_raw_fif(fif_fname, preload=True) + raw.apply_proj() + + if inst_type == 'raw': + inst = raw + del raw + elif inst_type in ['epochs', 'evoked']: + events = find_events(raw, stim_channel='STI 014') + epochs = Epochs(raw, events, tmin=-0.3, tmax=0.7, preload=True) + inst = epochs + if inst_type == 'evoked': + inst = epochs.average() + del epochs + + ch_info = {'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'} + with pytest.raises(KeyError, match='key errantly present'): + set_bipolar_reference(inst, 'EEG 001', 'EEG 002', 'bipolar', ch_info) + ch_info.pop('extra') + reref = set_bipolar_reference( + inst, 'EEG 001', 'EEG 002', 'bipolar', ch_info) + assert (reref.info['custom_ref_applied']) + + # Compare result to a manual calculation + a = inst.copy().pick_channels(['EEG 001', 'EEG 002']) + a = a._data[..., 0, :] - a._data[..., 1, :] + b = reref.copy().pick_channels(['bipolar'])._data[..., 0, :] + assert_allclose(a, b) + + # Original channels should be replaced by a virtual one + assert ('EEG 001' not in reref.ch_names) + assert ('EEG 002' not in reref.ch_names) + assert ('bipolar' in reref.ch_names) + + # Check channel information + bp_info = reref.info['chs'][reref.ch_names.index('bipolar')] + an_info = inst.info['chs'][inst.ch_names.index('EEG 001')] + for key in bp_info: + if key == 'coil_type': + assert bp_info[key] == FIFF.FIFFV_COIL_EEG_BIPOLAR, key + elif key == 'kind': + assert bp_info[key] == FIFF.FIFFV_EOG_CH, key + elif key != 'ch_name': + assert_equal(bp_info[key], an_info[key], err_msg=key) + + # Minimalist call + reref = set_bipolar_reference(inst, 'EEG 001', 'EEG 002') + assert ('EEG 001-EEG 002' in reref.ch_names) + + # Minimalist call with twice the same anode + reref = set_bipolar_reference(inst, + ['EEG 001', 'EEG 001', 'EEG 002'], + ['EEG 002', 'EEG 003', 'EEG 003']) + assert ('EEG 001-EEG 002' in reref.ch_names) + assert ('EEG 001-EEG 003' in reref.ch_names) + + # Set multiple references at once + reref = set_bipolar_reference( + inst, + ['EEG 001', 'EEG 003'], + ['EEG 002', 'EEG 004'], + ['bipolar1', 'bipolar2'], + [{'kind': FIFF.FIFFV_EOG_CH}, + {'kind': FIFF.FIFFV_EOG_CH}], + ) + a = inst.copy().pick_channels(['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004']) + a = np.concatenate( + [a._data[..., :1, :] - a._data[..., 1:2, :], + a._data[..., 2:3, :] - a._data[..., 3:4, :]], + axis=-2 + ) + b = reref.copy().pick_channels(['bipolar1', 'bipolar2'])._data + assert_allclose(a, b) + + # Test creating a bipolar reference that doesn't involve EEG channels: + # it should not set the custom_ref_applied flag + reref = set_bipolar_reference(inst, 'MEG 0111', 'MEG 0112', + ch_info={'kind': FIFF.FIFFV_MEG_CH}, + verbose='error') + assert (not reref.info['custom_ref_applied']) + assert ('MEG 0111-MEG 0112' in reref.ch_names) + + # Test a battery of invalid inputs + pytest.raises(ValueError, set_bipolar_reference, inst, + 'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar') + pytest.raises(ValueError, set_bipolar_reference, inst, + ['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar') + pytest.raises(ValueError, set_bipolar_reference, inst, + 'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2']) + pytest.raises(ValueError, set_bipolar_reference, inst, + 'EEG 001', 'EEG 002', 'bipolar', + ch_info=[{'foo': 'bar'}, {'foo': 'bar'}]) + pytest.raises(ValueError, set_bipolar_reference, inst, + 'EEG 001', 'EEG 002', ch_name='EEG 003') + + +def _check_channel_names(inst, ref_names): + """Check channel names.""" + if isinstance(ref_names, str): + ref_names = [ref_names] + + # Test that the names of the reference channels are present in `ch_names` + ref_idx = pick_channels(inst.info['ch_names'], ref_names) + assert len(ref_idx) == len(ref_names) + + # Test that the names of the reference channels are present in the `chs` + # list + inst.info._check_consistency() # Should raise no exceptions + + +@testing.requires_testing_data +def test_add_reference(): + """Test adding a reference.""" + raw = read_raw_fif(fif_fname, preload=True) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + # check if channel already exists + pytest.raises(ValueError, add_reference_channels, + raw, raw.info['ch_names'][0]) + # add reference channel to Raw + raw_ref = add_reference_channels(raw, 'Ref', copy=True) + assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) + assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) + _check_channel_names(raw_ref, 'Ref') + + orig_nchan = raw.info['nchan'] + raw = add_reference_channels(raw, 'Ref', copy=False) + assert_array_equal(raw._data, raw_ref._data) + assert_equal(raw.info['nchan'], orig_nchan + 1) + _check_channel_names(raw, 'Ref') + + # for Neuromag fif's, the reference electrode location is placed in + # elements [3:6] of each "data" electrode location + assert_allclose(raw.info['chs'][-1]['loc'][:3], + raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6) + + ref_idx = raw.ch_names.index('Ref') + ref_data, _ = raw[ref_idx] + assert_array_equal(ref_data, 0) + + # add reference channel to Raw when no digitization points exist + raw = read_raw_fif(fif_fname).crop(0, 1).load_data() + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + del raw.info['dig'] + + raw_ref = add_reference_channels(raw, 'Ref', copy=True) + + assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) + assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) + _check_channel_names(raw_ref, 'Ref') + + orig_nchan = raw.info['nchan'] + raw = add_reference_channels(raw, 'Ref', copy=False) + assert_array_equal(raw._data, raw_ref._data) + assert_equal(raw.info['nchan'], orig_nchan + 1) + _check_channel_names(raw, 'Ref') + + # Test adding an existing channel as reference channel + pytest.raises(ValueError, add_reference_channels, raw, + raw.info['ch_names'][0]) + + # add two reference channels to Raw + raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True) + _check_channel_names(raw_ref, ['M1', 'M2']) + assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2) + assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) + assert_array_equal(raw_ref._data[-2:, :], 0) + + raw = add_reference_channels(raw, ['M1', 'M2'], copy=False) + _check_channel_names(raw, ['M1', 'M2']) + ref_idx = raw.ch_names.index('M1') + ref_idy = raw.ch_names.index('M2') + ref_data, _ = raw[[ref_idx, ref_idy]] + assert_array_equal(ref_data, 0) + + # add reference channel to epochs + raw = read_raw_fif(fif_fname, preload=True) + events = read_events(eve_fname) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, + picks=picks_eeg, preload=True) + # default: proj=True, after which adding a Ref channel is prohibited + pytest.raises(RuntimeError, add_reference_channels, epochs, 'Ref') + + # create epochs in delayed mode, allowing removal of CAR when re-reffing + epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, + picks=picks_eeg, preload=True, proj='delayed') + epochs_ref = add_reference_channels(epochs, 'Ref', copy=True) + + assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1) + _check_channel_names(epochs_ref, 'Ref') + ref_idx = epochs_ref.ch_names.index('Ref') + ref_data = epochs_ref.get_data()[:, ref_idx, :] + assert_array_equal(ref_data, 0) + picks_eeg = pick_types(epochs.info, meg=False, eeg=True) + assert_array_equal(epochs.get_data()[:, picks_eeg, :], + epochs_ref.get_data()[:, picks_eeg, :]) + + # add two reference channels to epochs + raw = read_raw_fif(fif_fname, preload=True) + events = read_events(eve_fname) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + # create epochs in delayed mode, allowing removal of CAR when re-reffing + epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, + picks=picks_eeg, preload=True, proj='delayed') + with pytest.warns(RuntimeWarning, match='reference channels are ignored'): + epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True) + assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2) + _check_channel_names(epochs_ref, ['M1', 'M2']) + ref_idx = epochs_ref.ch_names.index('M1') + ref_idy = epochs_ref.ch_names.index('M2') + assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1') + assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2') + ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :] + assert_array_equal(ref_data, 0) + picks_eeg = pick_types(epochs.info, meg=False, eeg=True) + assert_array_equal(epochs.get_data()[:, picks_eeg, :], + epochs_ref.get_data()[:, picks_eeg, :]) + + # add reference channel to evoked + raw = read_raw_fif(fif_fname, preload=True) + events = read_events(eve_fname) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + # create epochs in delayed mode, allowing removal of CAR when re-reffing + epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, + picks=picks_eeg, preload=True, proj='delayed') + evoked = epochs.average() + evoked_ref = add_reference_channels(evoked, 'Ref', copy=True) + assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1) + _check_channel_names(evoked_ref, 'Ref') + ref_idx = evoked_ref.ch_names.index('Ref') + ref_data = evoked_ref.data[ref_idx, :] + assert_array_equal(ref_data, 0) + picks_eeg = pick_types(evoked.info, meg=False, eeg=True) + assert_array_equal(evoked.data[picks_eeg, :], + evoked_ref.data[picks_eeg, :]) + + # add two reference channels to evoked + raw = read_raw_fif(fif_fname, preload=True) + events = read_events(eve_fname) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + # create epochs in delayed mode, allowing removal of CAR when re-reffing + epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, + picks=picks_eeg, preload=True, proj='delayed') + evoked = epochs.average() + with pytest.warns(RuntimeWarning, match='reference channels are ignored'): + evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True) + assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2) + _check_channel_names(evoked_ref, ['M1', 'M2']) + ref_idx = evoked_ref.ch_names.index('M1') + ref_idy = evoked_ref.ch_names.index('M2') + ref_data = evoked_ref.data[[ref_idx, ref_idy], :] + assert_array_equal(ref_data, 0) + picks_eeg = pick_types(evoked.info, meg=False, eeg=True) + assert_array_equal(evoked.data[picks_eeg, :], + evoked_ref.data[picks_eeg, :]) + + # Test invalid inputs + raw = read_raw_fif(fif_fname, preload=False) + with pytest.raises(RuntimeError, match='loaded'): + add_reference_channels(raw, ['Ref']) + raw.load_data() + with pytest.raises(ValueError, match='Channel.*already.*'): + add_reference_channels(raw, raw.ch_names[:1]) + with pytest.raises(TypeError, match='instance of'): + add_reference_channels(raw, 1) + + +@pytest.mark.parametrize('n_ref', (1, 2)) +def test_add_reorder(n_ref): + """Test that a reference channel can be added and then data reordered.""" + # gh-8300 + raw = read_raw_fif(raw_fname).crop(0, 0.1).del_proj().pick('eeg') + assert len(raw.ch_names) == 60 + chs = ['EEG %03d' % (60 + ii) for ii in range(1, n_ref)] + ['EEG 000'] + with pytest.raises(RuntimeError, match='preload'): + with _record_warnings(): # ignore multiple warning + add_reference_channels(raw, chs, copy=False) + raw.load_data() + if n_ref == 1: + ctx = nullcontext() + else: + assert n_ref == 2 + ctx = pytest.warns(RuntimeWarning, match='locations of multiple') + with ctx: + add_reference_channels(raw, chs, copy=False) + data = raw.get_data() + assert_array_equal(data[-1], 0.) + assert raw.ch_names[-n_ref:] == chs + raw.reorder_channels(raw.ch_names[-1:] + raw.ch_names[:-1]) + assert raw.ch_names == ['EEG %03d' % ii for ii in range(60 + n_ref)] + data_new = raw.get_data() + data_new = np.concatenate([data_new[1:], data_new[:1]]) + assert_allclose(data, data_new) + + +def test_bipolar_combinations(): + """Test bipolar channel generation.""" + ch_names = ['CH' + str(ni + 1) for ni in range(10)] + info = create_info( + ch_names=ch_names, sfreq=1000., ch_types=['eeg'] * len(ch_names)) + raw_data = np.random.randn(len(ch_names), 1000) + raw = RawArray(raw_data, info) + + def _check_bipolar(raw_test, ch_a, ch_b): + picks = [raw_test.ch_names.index(ch_a + '-' + ch_b)] + get_data_res = raw_test.get_data(picks=picks)[0, :] + manual_a = raw_data[ch_names.index(ch_a), :] + manual_b = raw_data[ch_names.index(ch_b), :] + assert_array_equal(get_data_res, manual_a - manual_b) + + # test classic EOG/ECG bipolar reference (only two channels per pair). + raw_test = set_bipolar_reference(raw, ['CH2'], ['CH1'], copy=True) + _check_bipolar(raw_test, 'CH2', 'CH1') + + # test all combinations. + a_channels, b_channels = zip(*itertools.combinations(ch_names, 2)) + a_channels, b_channels = list(a_channels), list(b_channels) + raw_test = set_bipolar_reference(raw, a_channels, b_channels, copy=True) + for ch_a, ch_b in zip(a_channels, b_channels): + _check_bipolar(raw_test, ch_a, ch_b) + # check if reference channels have been dropped. + assert (len(raw_test.ch_names) == len(a_channels)) + + raw_test = set_bipolar_reference( + raw, a_channels, b_channels, drop_refs=False, copy=True) + # check if reference channels have been kept correctly. + assert (len(raw_test.ch_names) == len(a_channels) + len(ch_names)) + for idx, ch_label in enumerate(ch_names): + manual_ch = raw_data[np.newaxis, idx] + assert_array_equal(raw_test.get_data(ch_label), manual_ch) + + # test bipolars with a channel in both list (anode & cathode). + raw_test = set_bipolar_reference( + raw, ['CH2', 'CH1'], ['CH1', 'CH2'], copy=True) + _check_bipolar(raw_test, 'CH2', 'CH1') + _check_bipolar(raw_test, 'CH1', 'CH2') diff --git a/python/libs/mne/io/tests/test_show_fiff.py b/python/libs/mne/io/tests/test_show_fiff.py new file mode 100644 index 0000000..6e7228e --- /dev/null +++ b/python/libs/mne/io/tests/test_show_fiff.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Author: Eric Larson +# +# License: BSD-3-Clause + +import os.path as op + +from mne.io import show_fiff + +base_dir = op.join(op.dirname(__file__), 'data') +fname_evoked = op.join(base_dir, 'test-ave.fif') +fname_raw = op.join(base_dir, 'test_raw.fif') +fname_c_annot = op.join(base_dir, 'test_raw-annot.fif') + + +def test_show_fiff(): + """Test show_fiff.""" + # this is not exhaustive, but hopefully bugs will be found in use + info = show_fiff(fname_evoked) + assert 'BAD' not in info + keys = ['FIFF_EPOCH', 'FIFFB_HPI_COIL', 'FIFFB_PROJ_ITEM', + 'FIFFB_PROCESSED_DATA', 'FIFFB_EVOKED', 'FIFF_NAVE', + 'FIFF_EPOCH', 'COORD_TRANS'] + assert all(key in info for key in keys) + info = show_fiff(fname_raw, read_limit=1024) + assert 'BAD' not in info + info = show_fiff(fname_c_annot) + assert 'BAD' not in info + assert '>B' in info, info diff --git a/python/libs/mne/io/tests/test_utils.py b/python/libs/mne/io/tests/test_utils.py new file mode 100644 index 0000000..6bfe5ae --- /dev/null +++ b/python/libs/mne/io/tests/test_utils.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +"""Run tests for the utilities.""" +# Author: Stefan Appelhoff +# +# License: BSD-3-Clause + +from mne.io.utils import _check_orig_units + + +def test_check_orig_units(): + """Test the checking of original units.""" + orig_units = dict(FC1='nV', Hfp3erz='n/a', Pz='uV', greekMu='μV', + microSign='µV') + orig_units = _check_orig_units(orig_units) + assert orig_units['FC1'] == 'nV' + assert orig_units['Hfp3erz'] == 'n/a' + assert orig_units['Pz'] == 'µV' + assert orig_units['greekMu'] == 'µV' + assert orig_units['microSign'] == 'µV' diff --git a/python/libs/mne/io/tests/test_what.py b/python/libs/mne/io/tests/test_what.py new file mode 100644 index 0000000..f94f401 --- /dev/null +++ b/python/libs/mne/io/tests/test_what.py @@ -0,0 +1,52 @@ +# Authors: Eric Larson +# License: BSD + +import glob +import os.path as op + +import numpy as np +import pytest + +from mne import what, create_info +from mne.datasets import testing +from mne.io import RawArray +from mne.preprocessing import ICA +from mne.utils import requires_sklearn, _record_warnings + +data_path = testing.data_path(download=False) + + +@pytest.mark.slowtest +@requires_sklearn +@testing.requires_testing_data +def test_what(tmp_path, verbose_debug): + """Test mne.what.""" + # ICA + ica = ICA(max_iter=1) + raw = RawArray(np.random.RandomState(0).randn(3, 10), + create_info(3, 1000., 'eeg')) + with _record_warnings(): # convergence sometimes + ica.fit(raw) + fname = op.join(str(tmp_path), 'x-ica.fif') + ica.save(fname) + assert what(fname) == 'ica' + # test files + fnames = glob.glob( + op.join(data_path, 'MEG', 'sample', '*.fif')) + fnames += glob.glob( + op.join(data_path, 'subjects', 'sample', 'bem', '*.fif')) + fnames = sorted(fnames) + want_dict = dict(eve='events', ave='evoked', cov='cov', inv='inverse', + fwd='forward', trans='transform', proj='proj', + raw='raw', meg='raw', sol='bem solution', + bem='bem surfaces', src='src', dense='bem surfaces', + sparse='bem surfaces', head='bem surfaces', + fiducials='fiducials') + for fname in fnames: + kind = op.splitext(fname)[0].split('-')[-1] + if len(kind) > 5: + kind = kind.split('_')[-1] + this = what(fname) + assert this == want_dict[kind] + fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave_xfit.dip') + assert what(fname) == 'unknown' diff --git a/python/libs/mne/io/tests/test_write.py b/python/libs/mne/io/tests/test_write.py new file mode 100644 index 0000000..2a67566 --- /dev/null +++ b/python/libs/mne/io/tests/test_write.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +"""Run tests for writing.""" +# Author: Eric Larson +# +# License: BSD-3-Clause + +import pytest + +from mne.io.constants import FIFF +from mne.io.write import start_file, write_int + + +def test_write_int(tmp_path): + """Test that write_int raises an error on bad values.""" + with start_file(tmp_path / 'temp.fif') as fid: + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, [2147483647]) # 2 ** 31 - 1 + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, []) # 2 ** 31 - 1 + with pytest.raises(TypeError, match=r'.*exceeds max.*EVENT_LIST\)'): + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, [2147483648]) # 2 ** 31 + with pytest.raises(TypeError, match='Cannot safely write'): + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, [0.]) # float diff --git a/python/libs/mne/io/tree.py b/python/libs/mne/io/tree.py new file mode 100644 index 0000000..16293df --- /dev/null +++ b/python/libs/mne/io/tree.py @@ -0,0 +1,153 @@ +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# +# License: BSD-3-Clause + +import numpy as np + +from .constants import FIFF +from .tag import Tag +from .tag import read_tag +from .write import write_id, start_block, end_block, _write +from ..utils import logger, verbose + + +def dir_tree_find(tree, kind): + """Find nodes of the given kind from a directory tree structure. + + Parameters + ---------- + tree : dict + Directory tree. + kind : int + Kind to find. + + Returns + ------- + nodes : list + List of matching nodes. + """ + nodes = [] + + if isinstance(tree, list): + for t in tree: + nodes += dir_tree_find(t, kind) + else: + # Am I desirable myself? + if tree['block'] == kind: + nodes.append(tree) + + # Search the subtrees + for child in tree['children']: + nodes += dir_tree_find(child, kind) + return nodes + + +@verbose +def make_dir_tree(fid, directory, start=0, indent=0, verbose=None): + """Create the directory tree structure.""" + FIFF_BLOCK_START = 104 + FIFF_BLOCK_END = 105 + FIFF_FILE_ID = 100 + FIFF_BLOCK_ID = 103 + FIFF_PARENT_BLOCK_ID = 110 + + if directory[start].kind == FIFF_BLOCK_START: + tag = read_tag(fid, directory[start].pos) + block = tag.data + else: + block = 0 + + logger.debug(' ' * indent + 'start { %d' % block) + + this = start + + tree = dict() + tree['block'] = block + tree['id'] = None + tree['parent_id'] = None + tree['nent'] = 0 + tree['nchild'] = 0 + tree['directory'] = directory[this] + tree['children'] = [] + + while this < len(directory): + if directory[this].kind == FIFF_BLOCK_START: + if this != start: + child, this = make_dir_tree(fid, directory, this, indent + 1) + tree['nchild'] += 1 + tree['children'].append(child) + elif directory[this].kind == FIFF_BLOCK_END: + tag = read_tag(fid, directory[start].pos) + if tag.data == block: + break + else: + tree['nent'] += 1 + if tree['nent'] == 1: + tree['directory'] = list() + tree['directory'].append(directory[this]) + + # Add the id information if available + if block == 0: + if directory[this].kind == FIFF_FILE_ID: + tag = read_tag(fid, directory[this].pos) + tree['id'] = tag.data + else: + if directory[this].kind == FIFF_BLOCK_ID: + tag = read_tag(fid, directory[this].pos) + tree['id'] = tag.data + elif directory[this].kind == FIFF_PARENT_BLOCK_ID: + tag = read_tag(fid, directory[this].pos) + tree['parent_id'] = tag.data + + this += 1 + + # Eliminate the empty directory + if tree['nent'] == 0: + tree['directory'] = None + + logger.debug(' ' * (indent + 1) + 'block = %d nent = %d nchild = %d' + % (tree['block'], tree['nent'], tree['nchild'])) + logger.debug(' ' * indent + 'end } %d' % block) + last = this + return tree, last + + +############################################################################### +# Writing + +def copy_tree(fidin, in_id, nodes, fidout): + """Copy directory subtrees from fidin to fidout.""" + if len(nodes) <= 0: + return + + if not isinstance(nodes, list): + nodes = [nodes] + + for node in nodes: + start_block(fidout, node['block']) + if node['id'] is not None: + if in_id is not None: + write_id(fidout, FIFF.FIFF_PARENT_FILE_ID, in_id) + + write_id(fidout, FIFF.FIFF_BLOCK_ID, in_id) + write_id(fidout, FIFF.FIFF_PARENT_BLOCK_ID, node['id']) + + if node['directory'] is not None: + for d in node['directory']: + # Do not copy these tags + if d.kind == FIFF.FIFF_BLOCK_ID or \ + d.kind == FIFF.FIFF_PARENT_BLOCK_ID or \ + d.kind == FIFF.FIFF_PARENT_FILE_ID: + continue + + # Read and write tags, pass data through transparently + fidin.seek(d.pos, 0) + tag = Tag(*np.fromfile(fidin, ('>i4,>I4,>i4,>i4'), 1)[0]) + tag.data = np.fromfile(fidin, '>B', tag.size) + _write(fidout, tag.data, tag.kind, 1, tag.type, '>B') + + for child in node['children']: + copy_tree(fidin, in_id, child, fidout) + + end_block(fidout, node['block']) diff --git a/python/libs/mne/io/utils.py b/python/libs/mne/io/utils.py new file mode 100644 index 0000000..4db2b40 --- /dev/null +++ b/python/libs/mne/io/utils.py @@ -0,0 +1,315 @@ +# -*- coding: utf-8 -*- +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# Martin Luessi +# Denis Engemann +# Teon Brooks +# Marijn van Vliet +# Mainak Jas +# Stefan Appelhoff +# +# License: BSD-3-Clause + +import numpy as np +import os +import os.path as op + +from .constants import FIFF +from .meas_info import _get_valid_units + + +def _check_orig_units(orig_units): + """Check original units from a raw file. + + Units that are close to a valid_unit but not equal can be remapped to fit + into the valid_units. All other units that are not valid will be replaced + with "n/a". + + Parameters + ---------- + orig_units : dict + Dictionary mapping channel names to their units as specified in + the header file. Example: {'FC1': 'nV'} + + Returns + ------- + orig_units_remapped : dict + Dictionary mapping channel names to their VALID units as specified in + the header file. Invalid units are now labeled "n/a". + Example: {'FC1': 'nV', 'Hfp3erz': 'n/a'} + """ + if orig_units is None: + return + valid_units = _get_valid_units() + valid_units_lowered = [unit.lower() for unit in valid_units] + orig_units_remapped = dict(orig_units) + for ch_name, unit in orig_units.items(): + + # Be lenient: we ignore case for now. + if unit.lower() in valid_units_lowered: + continue + + # Common "invalid units" can be remapped to their valid equivalent + remap_dict = dict() + remap_dict['uv'] = 'µV' + remap_dict['μv'] = 'µV' # greek letter mu vs micro sign. use micro + remap_dict['\x83\xeav'] = 'µV' # for shift-jis mu, use micro + if unit.lower() in remap_dict: + orig_units_remapped[ch_name] = remap_dict[unit.lower()] + continue + + # Some units cannot be saved, they are invalid: assign "n/a" + orig_units_remapped[ch_name] = 'n/a' + + return orig_units_remapped + + +def _find_channels(ch_names, ch_type='EOG'): + """Find EOG channel.""" + substrings = (ch_type,) + substrings = [s.upper() for s in substrings] + if ch_type == 'EOG': + substrings = ('EOG', 'EYE') + eog_idx = [idx for idx, ch in enumerate(ch_names) if + any(substring in ch.upper() for substring in substrings)] + return eog_idx + + +def _mult_cal_one(data_view, one, idx, cals, mult): + """Take a chunk of raw data, multiply by mult or cals, and store.""" + one = np.asarray(one, dtype=data_view.dtype) + assert data_view.shape[1] == one.shape[1], (data_view.shape[1], one.shape[1]) # noqa: E501 + if mult is not None: + mult.ndim == one.ndim == 2 + data_view[:] = mult @ one[idx] + else: + assert cals is not None + if isinstance(idx, slice): + data_view[:] = one[idx] + else: + # faster than doing one = one[idx] + np.take(one, idx, axis=0, out=data_view) + data_view *= cals + + +def _blk_read_lims(start, stop, buf_len): + """Deal with indexing in the middle of a data block. + + Parameters + ---------- + start : int + Starting index. + stop : int + Ending index (exclusive). + buf_len : int + Buffer size in samples. + + Returns + ------- + block_start_idx : int + The first block to start reading from. + r_lims : list + The read limits. + d_lims : list + The write limits. + + Notes + ----- + Consider this example:: + + >>> start, stop, buf_len = 2, 27, 10 + + +---------+---------+--------- + File structure: | buf0 | buf1 | buf2 | + +---------+---------+--------- + File time: 0 10 20 30 + +---------+---------+--------- + Requested time: 2 27 + + | | + blockstart blockstop + | | + start stop + + We need 27 - 2 = 25 samples (per channel) to store our data, and + we need to read from 3 buffers (30 samples) to get all of our data. + + On all reads but the first, the data we read starts at + the first sample of the buffer. On all reads but the last, + the data we read ends on the last sample of the buffer. + + We call ``this_data`` the variable that stores the current buffer's data, + and ``data`` the variable that stores the total output. + + On the first read, we need to do this:: + + >>> data[0:buf_len-2] = this_data[2:buf_len] # doctest: +SKIP + + On the second read, we need to do:: + + >>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len] # doctest: +SKIP + + On the final read, we need to do:: + + >>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3] # doctest: +SKIP + + This function encapsulates this logic to allow a loop over blocks, where + data is stored using the following limits:: + + >>> data[d_lims[ii, 0]:d_lims[ii, 1]] = this_data[r_lims[ii, 0]:r_lims[ii, 1]] # doctest: +SKIP + + """ # noqa: E501 + # this is used to deal with indexing in the middle of a sampling period + assert all(isinstance(x, int) for x in (start, stop, buf_len)) + block_start_idx = (start // buf_len) + block_start = block_start_idx * buf_len + last_used_samp = stop - 1 + block_stop = last_used_samp - last_used_samp % buf_len + buf_len + read_size = block_stop - block_start + n_blk = read_size // buf_len + (read_size % buf_len != 0) + start_offset = start - block_start + end_offset = block_stop - stop + d_lims = np.empty((n_blk, 2), int) + r_lims = np.empty((n_blk, 2), int) + for bi in range(n_blk): + # Triage start (sidx) and end (eidx) indices for + # data (d) and read (r) + if bi == 0: + d_sidx = 0 + r_sidx = start_offset + else: + d_sidx = bi * buf_len - start_offset + r_sidx = 0 + if bi == n_blk - 1: + d_eidx = stop - start + r_eidx = buf_len - end_offset + else: + d_eidx = (bi + 1) * buf_len - start_offset + r_eidx = buf_len + d_lims[bi] = [d_sidx, d_eidx] + r_lims[bi] = [r_sidx, r_eidx] + return block_start_idx, r_lims, d_lims + + +def _file_size(fname): + """Get the file size in bytes.""" + with open(fname, 'rb') as f: + f.seek(0, os.SEEK_END) + return f.tell() + + +def _read_segments_file(raw, data, idx, fi, start, stop, cals, mult, + dtype, n_channels=None, offset=0, trigger_ch=None): + """Read a chunk of raw data.""" + if n_channels is None: + n_channels = raw._raw_extras[fi]['orig_nchan'] + + n_bytes = np.dtype(dtype).itemsize + # data_offset and data_left count data samples (channels x time points), + # not bytes. + data_offset = n_channels * start * n_bytes + offset + data_left = (stop - start) * n_channels + + # Read up to 100 MB of data at a time, block_size is in data samples + block_size = ((int(100e6) // n_bytes) // n_channels) * n_channels + block_size = min(data_left, block_size) + with open(raw._filenames[fi], 'rb', buffering=0) as fid: + fid.seek(data_offset) + # extract data in chunks + for sample_start in np.arange(0, data_left, block_size) // n_channels: + count = min(block_size, data_left - sample_start * n_channels) + block = np.fromfile(fid, dtype, count) + if block.size != count: + raise RuntimeError('Incorrect number of samples (%s != %s), ' + 'please report this error to MNE-Python ' + 'developers' % (block.size, count)) + block = block.reshape(n_channels, -1, order='F') + n_samples = block.shape[1] # = count // n_channels + sample_stop = sample_start + n_samples + if trigger_ch is not None: + stim_ch = trigger_ch[start:stop][sample_start:sample_stop] + block = np.vstack((block, stim_ch)) + data_view = data[:, sample_start:sample_stop] + _mult_cal_one(data_view, block, idx, cals, mult) + + +def read_str(fid, count=1): + """Read string from a binary file in a python version compatible way.""" + dtype = np.dtype('>S%i' % count) + string = fid.read(dtype.itemsize) + data = np.frombuffer(string, dtype=dtype)[0] + bytestr = b''.join([data[0:data.index(b'\x00') if + b'\x00' in data else count]]) + + return str(bytestr.decode('ascii')) # Return native str type for Py2/3 + + +def _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc): + """Initialize info['chs'] for eeg channels.""" + chs = list() + for idx, ch_name in enumerate(ch_names): + if ch_name in eog or idx in eog: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_EOG_CH + elif ch_name in ecg or idx in ecg: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_ECG_CH + elif ch_name in emg or idx in emg: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_EMG_CH + elif ch_name in misc or idx in misc: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_MISC_CH + else: + coil_type = ch_coil + kind = ch_kind + + chan_info = {'cal': cals[idx], 'logno': idx + 1, 'scanno': idx + 1, + 'range': 1.0, 'unit_mul': FIFF.FIFF_UNITM_NONE, + 'ch_name': ch_name, 'unit': FIFF.FIFF_UNIT_V, + 'coord_frame': FIFF.FIFFV_COORD_HEAD, + 'coil_type': coil_type, 'kind': kind, 'loc': np.zeros(12)} + chs.append(chan_info) + return chs + + +def _synthesize_stim_channel(events, n_samples): + """Synthesize a stim channel from events read from an event file. + + Parameters + ---------- + events : array, shape (n_events, 3) + Each row representing an event. + n_samples : int + The number of samples. + + Returns + ------- + stim_channel : array, shape (n_samples,) + An array containing the whole recording's event marking. + """ + # select events overlapping buffer + events = events.copy() + events[events[:, 1] < 1, 1] = 1 + # create output buffer + stim_channel = np.zeros(n_samples, int) + for onset, duration, trigger in events: + stim_channel[onset:onset + duration] = trigger + return stim_channel + + +def _construct_bids_filename(base, ext, part_idx, validate=True): + """Construct a BIDS compatible filename for split files.""" + # insert index in filename + dirname = op.dirname(base) + base = op.basename(base) + deconstructed_base = base.split('_') + if len(deconstructed_base) < 2 and validate: + raise ValueError('Filename base must end with an underscore followed ' + f'by the modality (e.g., _eeg or _meg), got {base}') + suffix = deconstructed_base[-1] + base = '_'.join(deconstructed_base[:-1]) + use_fname = '{}_split-{:02}_{}{}'.format(base, part_idx, suffix, ext) + if dirname: + use_fname = op.join(dirname, use_fname) + return use_fname diff --git a/python/libs/mne/io/what.py b/python/libs/mne/io/what.py new file mode 100644 index 0000000..bf3305d --- /dev/null +++ b/python/libs/mne/io/what.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +# Authors: Eric Larson +# +# License: BSD-3-Clause + +from collections import OrderedDict + +from ..fixes import _get_args +from ..utils import _check_fname, logger + + +def what(fname): + """Try to determine the type of the FIF file. + + Parameters + ---------- + fname : str + The filename. Should end in .fif or .fif.gz. + + Returns + ------- + what : str | None + The type of the file. Will be 'unknown' if it could not be determined. + + Notes + ----- + .. versionadded:: 0.19 + """ + from .fiff import read_raw_fif + from ..epochs import read_epochs + from ..evoked import read_evokeds + from ..preprocessing import read_ica + from ..forward import read_forward_solution + from ..minimum_norm import read_inverse_operator + from ..source_space import read_source_spaces + from ..bem import read_bem_solution, read_bem_surfaces + from ..cov import read_cov + from ..transforms import read_trans + from ..event import read_events + from ..proj import read_proj + from .meas_info import read_fiducials + _check_fname(fname, overwrite='read', must_exist=True) + checks = OrderedDict() + checks['raw'] = read_raw_fif + checks['ica'] = read_ica + checks['epochs'] = read_epochs + checks['evoked'] = read_evokeds + checks['forward'] = read_forward_solution + checks['inverse'] = read_inverse_operator + checks['src'] = read_source_spaces + checks['bem solution'] = read_bem_solution + checks['bem surfaces'] = read_bem_surfaces + checks['cov'] = read_cov + checks['transform'] = read_trans + checks['events'] = read_events + checks['fiducials'] = read_fiducials + checks['proj'] = read_proj + for what, func in checks.items(): + args = _get_args(func) + assert 'verbose' in args, func + kwargs = dict(verbose='error') + if 'preload' in args: + kwargs['preload'] = False + try: + func(fname, **kwargs) + except Exception as exp: + logger.debug('Not %s: %s' % (what, exp)) + else: + return what + return 'unknown' diff --git a/python/libs/mne/io/write.py b/python/libs/mne/io/write.py new file mode 100644 index 0000000..aa56088 --- /dev/null +++ b/python/libs/mne/io/write.py @@ -0,0 +1,486 @@ +# Authors: Alexandre Gramfort +# Matti Hämäläinen +# +# License: BSD-3-Clause + +from contextlib import contextmanager +from gzip import GzipFile +import os.path as op +import re +import time +import uuid + +import numpy as np + +from .constants import FIFF +from ..utils import logger, _file_like +from ..utils.numerics import _cal_to_julian + +# We choose a "magic" date to store (because meas_date is obligatory) +# to treat as meas_date=None. This one should be impossible for systems +# to write -- the second field is microseconds, so anything >= 1e6 +# should be moved into the first field (seconds). +DATE_NONE = (0, 2 ** 31 - 1) + + +def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype): + """Write data.""" + if isinstance(data, np.ndarray): + data_size *= data.size + + # XXX for string types the data size is used as + # computed in ``write_string``. + + fid.write(np.array(kind, dtype='>i4').tobytes()) + fid.write(np.array(FIFFT_TYPE, dtype='>i4').tobytes()) + fid.write(np.array(data_size, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) + fid.write(np.array(data, dtype=dtype).tobytes()) + + +def _get_split_size(split_size): + """Convert human-readable bytes to machine-readable bytes.""" + if isinstance(split_size, str): + exp = dict(MB=20, GB=30).get(split_size[-2:], None) + if exp is None: + raise ValueError('split_size has to end with either' + '"MB" or "GB"') + split_size = int(float(split_size[:-2]) * 2 ** exp) + + if split_size > 2147483648: + raise ValueError('split_size cannot be larger than 2GB') + return split_size + + +_NEXT_FILE_BUFFER = 1048576 # 2 ** 20 extra cushion for last post-data tags + + +def write_nop(fid, last=False): + """Write a FIFF_NOP.""" + fid.write(np.array(FIFF.FIFF_NOP, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFT_VOID, dtype='>i4').tobytes()) + fid.write(np.array(0, dtype='>i4').tobytes()) + next_ = FIFF.FIFFV_NEXT_NONE if last else FIFF.FIFFV_NEXT_SEQ + fid.write(np.array(next_, dtype='>i4').tobytes()) + + +INT32_MAX = 2147483647 + + +def write_int(fid, kind, data): + """Write a 32-bit integer tag to a fif file.""" + data_size = 4 + data = np.asarray(data) + if data.dtype.kind not in 'uib' and data.size > 0: + raise TypeError( + f'Cannot safely write data with dtype {data.dtype} as int') + max_val = data.max() if data.size > 0 else 0 + if max_val > INT32_MAX: + raise TypeError( + f'Value {max_val} exceeds maximum allowed ({INT32_MAX}) for ' + f'tag {kind}') + data = data.astype('>i4').T + _write(fid, data, kind, data_size, FIFF.FIFFT_INT, '>i4') + + +def write_double(fid, kind, data): + """Write a double-precision floating point tag to a fif file.""" + data_size = 8 + data = np.array(data, dtype='>f8').T + _write(fid, data, kind, data_size, FIFF.FIFFT_DOUBLE, '>f8') + + +def write_float(fid, kind, data): + """Write a single-precision floating point tag to a fif file.""" + data_size = 4 + data = np.array(data, dtype='>f4').T + _write(fid, data, kind, data_size, FIFF.FIFFT_FLOAT, '>f4') + + +def write_dau_pack16(fid, kind, data): + """Write a dau_pack16 tag to a fif file.""" + data_size = 2 + data = np.array(data, dtype='>i2').T + _write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, '>i2') + + +def write_complex64(fid, kind, data): + """Write a 64 bit complex floating point tag to a fif file.""" + data_size = 8 + data = np.array(data, dtype='>c8').T + _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c8') + + +def write_complex128(fid, kind, data): + """Write a 128 bit complex floating point tag to a fif file.""" + data_size = 16 + data = np.array(data, dtype='>c16').T + _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c16') + + +def write_julian(fid, kind, data): + """Write a Julian-formatted date to a FIF file.""" + assert len(data) == 3 + data_size = 4 + jd = np.sum(_cal_to_julian(*data)) + data = np.array(jd, dtype='>i4') + _write(fid, data, kind, data_size, FIFF.FIFFT_JULIAN, '>i4') + + +def write_string(fid, kind, data): + """Write a string tag.""" + str_data = data.encode('latin1') + data_size = len(str_data) # therefore compute size here + my_dtype = '>a' # py2/3 compatible on writing -- don't ask me why + if data_size > 0: + _write(fid, str_data, kind, data_size, FIFF.FIFFT_STRING, my_dtype) + + +def write_name_list(fid, kind, data): + """Write a colon-separated list of names. + + Parameters + ---------- + data : list of strings + """ + write_string(fid, kind, ':'.join(data)) + + +def write_float_matrix(fid, kind, mat): + """Write a single-precision floating-point matrix tag.""" + FIFFT_MATRIX = 1 << 30 + FIFFT_MATRIX_FLOAT = FIFF.FIFFT_FLOAT | FIFFT_MATRIX + + data_size = 4 * mat.size + 4 * (mat.ndim + 1) + + fid.write(np.array(kind, dtype='>i4').tobytes()) + fid.write(np.array(FIFFT_MATRIX_FLOAT, dtype='>i4').tobytes()) + fid.write(np.array(data_size, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) + fid.write(np.array(mat, dtype='>f4').tobytes()) + + dims = np.empty(mat.ndim + 1, dtype=np.int32) + dims[:mat.ndim] = mat.shape[::-1] + dims[-1] = mat.ndim + fid.write(np.array(dims, dtype='>i4').tobytes()) + check_fiff_length(fid) + + +def write_double_matrix(fid, kind, mat): + """Write a double-precision floating-point matrix tag.""" + FIFFT_MATRIX = 1 << 30 + FIFFT_MATRIX_DOUBLE = FIFF.FIFFT_DOUBLE | FIFFT_MATRIX + + data_size = 8 * mat.size + 4 * (mat.ndim + 1) + + fid.write(np.array(kind, dtype='>i4').tobytes()) + fid.write(np.array(FIFFT_MATRIX_DOUBLE, dtype='>i4').tobytes()) + fid.write(np.array(data_size, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) + fid.write(np.array(mat, dtype='>f8').tobytes()) + + dims = np.empty(mat.ndim + 1, dtype=np.int32) + dims[:mat.ndim] = mat.shape[::-1] + dims[-1] = mat.ndim + fid.write(np.array(dims, dtype='>i4').tobytes()) + check_fiff_length(fid) + + +def write_int_matrix(fid, kind, mat): + """Write integer 32 matrix tag.""" + FIFFT_MATRIX = 1 << 30 + FIFFT_MATRIX_INT = FIFF.FIFFT_INT | FIFFT_MATRIX + + data_size = 4 * mat.size + 4 * 3 + + fid.write(np.array(kind, dtype='>i4').tobytes()) + fid.write(np.array(FIFFT_MATRIX_INT, dtype='>i4').tobytes()) + fid.write(np.array(data_size, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) + fid.write(np.array(mat, dtype='>i4').tobytes()) + + dims = np.empty(3, dtype=np.int32) + dims[0] = mat.shape[1] + dims[1] = mat.shape[0] + dims[2] = 2 + fid.write(np.array(dims, dtype='>i4').tobytes()) + check_fiff_length(fid) + + +def write_complex_float_matrix(fid, kind, mat): + """Write complex 64 matrix tag.""" + FIFFT_MATRIX = 1 << 30 + FIFFT_MATRIX_COMPLEX_FLOAT = FIFF.FIFFT_COMPLEX_FLOAT | FIFFT_MATRIX + + data_size = 4 * 2 * mat.size + 4 * (mat.ndim + 1) + + fid.write(np.array(kind, dtype='>i4').tobytes()) + fid.write(np.array(FIFFT_MATRIX_COMPLEX_FLOAT, dtype='>i4').tobytes()) + fid.write(np.array(data_size, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) + fid.write(np.array(mat, dtype='>c8').tobytes()) + + dims = np.empty(mat.ndim + 1, dtype=np.int32) + dims[:mat.ndim] = mat.shape[::-1] + dims[-1] = mat.ndim + fid.write(np.array(dims, dtype='>i4').tobytes()) + check_fiff_length(fid) + + +def write_complex_double_matrix(fid, kind, mat): + """Write complex 128 matrix tag.""" + FIFFT_MATRIX = 1 << 30 + FIFFT_MATRIX_COMPLEX_DOUBLE = FIFF.FIFFT_COMPLEX_DOUBLE | FIFFT_MATRIX + + data_size = 8 * 2 * mat.size + 4 * (mat.ndim + 1) + + fid.write(np.array(kind, dtype='>i4').tobytes()) + fid.write(np.array(FIFFT_MATRIX_COMPLEX_DOUBLE, dtype='>i4').tobytes()) + fid.write(np.array(data_size, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) + fid.write(np.array(mat, dtype='>c16').tobytes()) + + dims = np.empty(mat.ndim + 1, dtype=np.int32) + dims[:mat.ndim] = mat.shape[::-1] + dims[-1] = mat.ndim + fid.write(np.array(dims, dtype='>i4').tobytes()) + check_fiff_length(fid) + + +def get_machid(): + """Get (mostly) unique machine ID. + + Returns + ------- + ids : array (length 2, int32) + The machine identifier used in MNE. + """ + mac = b'%012x' % uuid.getnode() # byte conversion for Py3 + mac = re.findall(b'..', mac) # split string + mac += [b'00', b'00'] # add two more fields + + # Convert to integer in reverse-order (for some reason) + from codecs import encode + mac = b''.join([encode(h, 'hex_codec') for h in mac[::-1]]) + ids = np.flipud(np.frombuffer(mac, np.int32, count=2)) + return ids + + +def get_new_file_id(): + """Create a new file ID tag.""" + secs, usecs = divmod(time.time(), 1.) + secs, usecs = int(secs), int(usecs * 1e6) + return {'machid': get_machid(), 'version': FIFF.FIFFC_VERSION, + 'secs': secs, 'usecs': usecs} + + +def write_id(fid, kind, id_=None): + """Write fiff id.""" + id_ = _generate_meas_id() if id_ is None else id_ + + data_size = 5 * 4 # The id comprises five integers + fid.write(np.array(kind, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFT_ID_STRUCT, dtype='>i4').tobytes()) + fid.write(np.array(data_size, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) + + # Collect the bits together for one write + arr = np.array([id_['version'], + id_['machid'][0], id_['machid'][1], + id_['secs'], id_['usecs']], dtype='>i4') + fid.write(arr.tobytes()) + + +def start_block(fid, kind): + """Write a FIFF_BLOCK_START tag.""" + write_int(fid, FIFF.FIFF_BLOCK_START, kind) + + +def end_block(fid, kind): + """Write a FIFF_BLOCK_END tag.""" + write_int(fid, FIFF.FIFF_BLOCK_END, kind) + + +def start_file(fname, id_=None): + """Open a fif file for writing and writes the compulsory header tags. + + Parameters + ---------- + fname : string | fid + The name of the file to open. It is recommended + that the name ends with .fif or .fif.gz. Can also be an + already opened file. + id_ : dict | None + ID to use for the FIFF_FILE_ID. + """ + if _file_like(fname): + logger.debug('Writing using %s I/O' % type(fname)) + fid = fname + fid.seek(0) + else: + fname = str(fname) + if op.splitext(fname)[1].lower() == '.gz': + logger.debug('Writing using gzip') + # defaults to compression level 9, which is barely smaller but much + # slower. 2 offers a good compromise. + fid = GzipFile(fname, "wb", compresslevel=2) + else: + logger.debug('Writing using normal I/O') + fid = open(fname, "wb") + # Write the compulsory items + write_id(fid, FIFF.FIFF_FILE_ID, id_) + write_int(fid, FIFF.FIFF_DIR_POINTER, -1) + write_int(fid, FIFF.FIFF_FREE_LIST, -1) + return fid + + +@contextmanager +def start_and_end_file(fname, id_=None): + """Start and (if successfully written) close the file.""" + with start_file(fname, id_=id_) as fid: + yield fid + end_file(fid) # we only hit this line if the yield does not err + + +def check_fiff_length(fid, close=True): + """Ensure our file hasn't grown too large to work properly.""" + if fid.tell() > 2147483648: # 2 ** 31, FIFF uses signed 32-bit locations + if close: + fid.close() + raise IOError('FIFF file exceeded 2GB limit, please split file, reduce' + ' split_size (if possible), or save to a different ' + 'format') + + +def end_file(fid): + """Write the closing tags to a fif file and closes the file.""" + write_nop(fid, last=True) + check_fiff_length(fid) + fid.close() + + +def write_coord_trans(fid, trans): + """Write a coordinate transformation structure.""" + data_size = 4 * 2 * 12 + 4 * 2 + fid.write(np.array(FIFF.FIFF_COORD_TRANS, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFT_COORD_TRANS_STRUCT, dtype='>i4').tobytes()) + fid.write(np.array(data_size, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) + fid.write(np.array(trans['from'], dtype='>i4').tobytes()) + fid.write(np.array(trans['to'], dtype='>i4').tobytes()) + + # The transform... + rot = trans['trans'][:3, :3] + move = trans['trans'][:3, 3] + fid.write(np.array(rot, dtype='>f4').tobytes()) + fid.write(np.array(move, dtype='>f4').tobytes()) + + # ...and its inverse + trans_inv = np.linalg.inv(trans['trans']) + rot = trans_inv[:3, :3] + move = trans_inv[:3, 3] + fid.write(np.array(rot, dtype='>f4').tobytes()) + fid.write(np.array(move, dtype='>f4').tobytes()) + + +def write_ch_info(fid, ch): + """Write a channel information record to a fif file.""" + data_size = 4 * 13 + 4 * 7 + 16 + + fid.write(np.array(FIFF.FIFF_CH_INFO, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFT_CH_INFO_STRUCT, dtype='>i4').tobytes()) + fid.write(np.array(data_size, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) + + # Start writing fiffChInfoRec + fid.write(np.array(ch['scanno'], dtype='>i4').tobytes()) + fid.write(np.array(ch['logno'], dtype='>i4').tobytes()) + fid.write(np.array(ch['kind'], dtype='>i4').tobytes()) + fid.write(np.array(ch['range'], dtype='>f4').tobytes()) + fid.write(np.array(ch['cal'], dtype='>f4').tobytes()) + fid.write(np.array(ch['coil_type'], dtype='>i4').tobytes()) + fid.write(np.array(ch['loc'], dtype='>f4').tobytes()) # writing 12 values + + # unit and unit multiplier + fid.write(np.array(ch['unit'], dtype='>i4').tobytes()) + fid.write(np.array(ch['unit_mul'], dtype='>i4').tobytes()) + + # Finally channel name + ch_name = ch['ch_name'][:15] + fid.write(np.array(ch_name, dtype='>c').tobytes()) + fid.write(b'\0' * (16 - len(ch_name))) + + +def write_dig_points(fid, dig, block=False, coord_frame=None): + """Write a set of digitizer data points into a fif file.""" + if dig is not None: + data_size = 5 * 4 + if block: + start_block(fid, FIFF.FIFFB_ISOTRAK) + if coord_frame is not None: + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame) + for d in dig: + fid.write(np.array(FIFF.FIFF_DIG_POINT, '>i4').tobytes()) + fid.write(np.array(FIFF.FIFFT_DIG_POINT_STRUCT, '>i4').tobytes()) + fid.write(np.array(data_size, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, '>i4').tobytes()) + # Start writing fiffDigPointRec + fid.write(np.array(d['kind'], '>i4').tobytes()) + fid.write(np.array(d['ident'], '>i4').tobytes()) + fid.write(np.array(d['r'][:3], '>f4').tobytes()) + if block: + end_block(fid, FIFF.FIFFB_ISOTRAK) + + +def write_float_sparse_rcs(fid, kind, mat): + """Write a single-precision sparse compressed row matrix tag.""" + return write_float_sparse(fid, kind, mat, fmt='csr') + + +def write_float_sparse_ccs(fid, kind, mat): + """Write a single-precision sparse compressed column matrix tag.""" + return write_float_sparse(fid, kind, mat, fmt='csc') + + +def write_float_sparse(fid, kind, mat, fmt='auto'): + """Write a single-precision floating-point sparse matrix tag.""" + from scipy import sparse + from .tag import _matrix_coding_CCS, _matrix_coding_RCS + if fmt == 'auto': + fmt = 'csr' if isinstance(mat, sparse.csr_matrix) else 'csc' + if fmt == 'csr': + need = sparse.csr_matrix + bits = _matrix_coding_RCS + else: + need = sparse.csc_matrix + bits = _matrix_coding_CCS + if not isinstance(mat, need): + raise TypeError('Must write %s, got %s' % (fmt.upper(), type(mat),)) + FIFFT_MATRIX = bits << 16 + FIFFT_MATRIX_FLOAT_RCS = FIFF.FIFFT_FLOAT | FIFFT_MATRIX + + nnzm = mat.nnz + nrow = mat.shape[0] + data_size = 4 * nnzm + 4 * nnzm + 4 * (nrow + 1) + 4 * 4 + + fid.write(np.array(kind, dtype='>i4').tobytes()) + fid.write(np.array(FIFFT_MATRIX_FLOAT_RCS, dtype='>i4').tobytes()) + fid.write(np.array(data_size, dtype='>i4').tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) + + fid.write(np.array(mat.data, dtype='>f4').tobytes()) + fid.write(np.array(mat.indices, dtype='>i4').tobytes()) + fid.write(np.array(mat.indptr, dtype='>i4').tobytes()) + + dims = [nnzm, mat.shape[0], mat.shape[1], 2] + fid.write(np.array(dims, dtype='>i4').tobytes()) + check_fiff_length(fid) + + +def _generate_meas_id(): + """Generate a new meas_id dict.""" + id_ = dict() + id_['version'] = FIFF.FIFFC_VERSION + id_['machid'] = get_machid() + id_['secs'], id_['usecs'] = DATE_NONE + return id_ diff --git a/python/libs/mne/label.py b/python/libs/mne/label.py new file mode 100644 index 0000000..ce03df5 --- /dev/null +++ b/python/libs/mne/label.py @@ -0,0 +1,2739 @@ +# Authors: Alexandre Gramfort +# Martin Luessi +# Denis Engemann +# +# License: BSD-3-Clause + +from collections import defaultdict +from colorsys import hsv_to_rgb, rgb_to_hsv +import copy as cp +import os +import os.path as op +import re + +import numpy as np + +from .morph_map import read_morph_map +from .parallel import parallel_func, check_n_jobs +from .source_estimate import (SourceEstimate, VolSourceEstimate, + _center_of_mass, extract_label_time_course, + spatial_src_adjacency) +from .source_space import (add_source_space_distances, SourceSpaces, + _ensure_src) +from .stats.cluster_level import _find_clusters, _get_components +from .surface import (complete_surface_info, read_surface, fast_cross_3d, + _mesh_borders, mesh_edges, mesh_dist) +from .utils import (get_subjects_dir, _check_subject, logger, verbose, warn, + check_random_state, _validate_type, fill_doc, + _check_option, check_version, _check_fname, _VerboseDep) + + +def _blend_colors(color_1, color_2): + """Blend two colors in HSV space. + + Parameters + ---------- + color_1, color_2 : None | tuple + RGBA tuples with values between 0 and 1. None if no color is available. + If both colors are None, the output is None. If only one is None, the + output is the other color. + + Returns + ------- + color : None | tuple + RGBA tuple of the combined color. Saturation, value and alpha are + averaged, whereas the new hue is determined as angle half way between + the two input colors' hues. + """ + if color_1 is None and color_2 is None: + return None + elif color_1 is None: + return color_2 + elif color_2 is None: + return color_1 + + r_1, g_1, b_1, a_1 = color_1 + h_1, s_1, v_1 = rgb_to_hsv(r_1, g_1, b_1) + r_2, g_2, b_2, a_2 = color_2 + h_2, s_2, v_2 = rgb_to_hsv(r_2, g_2, b_2) + hue_diff = abs(h_1 - h_2) + if hue_diff < 0.5: + h = min(h_1, h_2) + hue_diff / 2. + else: + h = max(h_1, h_2) + (1. - hue_diff) / 2. + h %= 1. + s = (s_1 + s_2) / 2. + v = (v_1 + v_2) / 2. + r, g, b = hsv_to_rgb(h, s, v) + a = (a_1 + a_2) / 2. + color = (r, g, b, a) + return color + + +def _split_colors(color, n): + """Create n colors in HSV space that occupy a gradient in value. + + Parameters + ---------- + color : tuple + RGBA tuple with values between 0 and 1. + n : int >= 2 + Number of colors on the gradient. + + Returns + ------- + colors : tuple of tuples, len = n + N RGBA tuples that occupy a gradient in value (low to high) but share + saturation and hue with the input color. + """ + r, g, b, a = color + h, s, v = rgb_to_hsv(r, g, b) + gradient_range = np.sqrt(n / 10.) + if v > 0.5: + v_max = min(0.95, v + gradient_range / 2) + v_min = max(0.05, v_max - gradient_range) + else: + v_min = max(0.05, v - gradient_range / 2) + v_max = min(0.95, v_min + gradient_range) + + hsv_colors = ((h, s, v_) for v_ in np.linspace(v_min, v_max, n)) + rgb_colors = (hsv_to_rgb(h_, s_, v_) for h_, s_, v_ in hsv_colors) + rgba_colors = ((r_, g_, b_, a,) for r_, g_, b_ in rgb_colors) + return tuple(rgba_colors) + + +def _n_colors(n, bytes_=False, cmap='hsv'): + """Produce a list of n unique RGBA color tuples based on a colormap. + + Parameters + ---------- + n : int + Number of colors. + bytes : bool + Return colors as integers values between 0 and 255 (instead of floats + between 0 and 1). + cmap : str + Which colormap to use. + + Returns + ------- + colors : array, shape (n, 4) + RGBA color values. + """ + n_max = 2 ** 10 + if n > n_max: + raise NotImplementedError("Can't produce more than %i unique " + "colors" % n_max) + + from matplotlib.cm import get_cmap + cm = get_cmap(cmap, n_max) + pos = np.linspace(0, 1, n, False) + colors = cm(pos, bytes=bytes_) + if bytes_: + # make sure colors are unique + for ii, c in enumerate(colors): + if np.any(np.all(colors[:ii] == c, 1)): + raise RuntimeError('Could not get %d unique colors from %s ' + 'colormap. Try using a different colormap.' + % (n, cmap)) + return colors + + +@fill_doc +class Label(_VerboseDep): + """A FreeSurfer/MNE label with vertices restricted to one hemisphere. + + Labels can be combined with the ``+`` operator: + + * Duplicate vertices are removed. + * If duplicate vertices have conflicting position values, an error + is raised. + * Values of duplicate vertices are summed. + + Parameters + ---------- + vertices : array, shape (N,) + Vertex indices (0 based). + pos : array, shape (N, 3) | None + Locations in meters. If None, then zeros are used. + values : array, shape (N,) | None + Values at the vertices. If None, then ones are used. + hemi : 'lh' | 'rh' + Hemisphere to which the label applies. + comment : str + Kept as information but not used by the object itself. + name : str + Kept as information but not used by the object itself. + filename : str + Kept as information but not used by the object itself. + %(subject_label)s + color : None | matplotlib color + Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red). + %(verbose)s + + Attributes + ---------- + color : None | tuple + Default label color, represented as RGBA tuple with values between 0 + and 1. + comment : str + Comment from the first line of the label file. + hemi : 'lh' | 'rh' + Hemisphere. + name : None | str + A name for the label. It is OK to change that attribute manually. + pos : array, shape (N, 3) + Locations in meters. + subject : str | None + The label subject. + It is best practice to set this to the proper + value on initialization, but it can also be set manually. + values : array, shape (N,) + Values at the vertices. + vertices : array, shape (N,) + Vertex indices (0 based) + """ + + @verbose + def __init__(self, vertices=(), pos=None, values=None, hemi=None, + comment="", name=None, filename=None, subject=None, + color=None, *, verbose=None): # noqa: D102 + # check parameters + if not isinstance(hemi, str): + raise ValueError('hemi must be a string, not %s' % type(hemi)) + vertices = np.asarray(vertices, int) + if np.any(np.diff(vertices.astype(int)) <= 0): + raise ValueError('Vertices must be ordered in increasing order.') + + if color is not None: + from matplotlib.colors import colorConverter + color = colorConverter.to_rgba(color) + + if values is None: + values = np.ones(len(vertices)) + else: + values = np.asarray(values) + + if pos is None: + pos = np.zeros((len(vertices), 3)) + else: + pos = np.asarray(pos) + + if not (len(vertices) == len(values) == len(pos)): + raise ValueError("vertices, values and pos need to have same " + "length (number of vertices)") + + # name + if name is None and filename is not None: + name = op.basename(filename[:-6]) + + self.vertices = vertices + self.pos = pos + self.values = values + self.hemi = hemi + self.comment = comment + self.subject = _check_subject(None, subject, raise_error=False) + self.color = color + self.name = name + self.filename = filename + + def __setstate__(self, state): # noqa: D105 + self.vertices = state['vertices'] + self.pos = state['pos'] + self.values = state['values'] + self.hemi = state['hemi'] + self.comment = state['comment'] + self.subject = state.get('subject', None) + self.color = state.get('color', None) + self.name = state['name'] + self.filename = state['filename'] + + def __getstate__(self): # noqa: D105 + out = dict(vertices=self.vertices, + pos=self.pos, + values=self.values, + hemi=self.hemi, + comment=self.comment, + subject=self.subject, + color=self.color, + name=self.name, + filename=self.filename) + return out + + def __repr__(self): # noqa: D105 + name = 'unknown, ' if self.subject is None else self.subject + ', ' + name += repr(self.name) if self.name is not None else "unnamed" + n_vert = len(self) + return "