diff --git a/rsHRF/CLI.py b/rsHRF/CLI.py index c561d5d..ed087d4 100755 --- a/rsHRF/CLI.py +++ b/rsHRF/CLI.py @@ -4,12 +4,19 @@ import json from argparse import ArgumentParser from bids.layout import BIDSLayout +from bids.config import set_option from pathlib import Path from rsHRF import spm_dep, fourD_rsHRF, utils +try: + from .rsHRF_GUI import run +except ModuleNotFoundError: + run = None + import warnings +from .utils.default_parameters import default_parameters, available_estimations -warnings.filterwarnings("ignore") +set_option("extension_initial_dot", True) with open(op.join(op.dirname(op.realpath(__file__)), "VERSION"), "r") as fh: __version__ = fh.read().strip("\n") @@ -22,41 +29,37 @@ def get_parser(): "voxel-wise signal" ) - group_input = parser.add_mutually_exclusive_group(required=True) - - group_input.add_argument( - "--ts", - action="store", - type=op.abspath, - help="the absolute path to a single data file", + parser.add_argument( + "bids_dir", + help="the input data for the analysis: a path to a data file or the root " + "folder of a BIDS valid dataset, or 'GUI' to run in graphical user interface " + "mode", + default="GUI", ) - group_input.add_argument( - "--input_file", + parser.add_argument( + "output_dir", action="store", type=op.abspath, - help="the absolute path to a single data file", - ) - - group_input.add_argument( - "--bids_dir", + help="the output path for the outcomes of processing", nargs="?", - action="store", - type=op.abspath, - help="the root folder of a BIDS valid dataset " - "(sub-XXXXX folders should be found at the " - "top level in this folder).", ) - group_input.add_argument( - "--GUI", action="store_true", help="to execute the toolbox in GUI mode" + parser.add_argument( + "analysis_level", + help="Level of the analysis that will be performed. " + "Multiple participant level analyses can be run independently " + "(in parallel) using the same output_dir. Only 'participant' level analysis is" + " allowed.", + choices=["participant"], + nargs="?", ) parser.add_argument( - "--output_dir", - action="store", - type=op.abspath, - help="the output path for the outcomes of processing", + "--no-bids", + action="store_true", + help="Explicitly disable bids input. Necessary when using txt or nii/gii files" + " directly.", ) parser.add_argument( @@ -64,7 +67,8 @@ def get_parser(): action="store", type=int, default=-1, - help="the number of parallel processing elements", + help="the number of parallel processing elements to use, default is -1 (use " + "all available cores)", ) parser.add_argument( @@ -75,16 +79,7 @@ def get_parser(): ) parser.add_argument( - "--analysis_level", - help="Level of the analysis that will be performed. " - "Multiple participant level analyses can be run independently " - "(in parallel) using the same output_dir.", - choices=["participant"], - nargs="?", - ) - - parser.add_argument( - "--participant_label", + "--participant-label", help="The label(s) of the participant(s) that should be analyzed. The label " "corresponds to sub- from the BIDS spec " '(so it does not include "sub-"). If this parameter is not ' @@ -94,26 +89,21 @@ def get_parser(): ) parser.add_argument( - "--bids_filter_file", + "--bids-filter-file", action="store", type=op.abspath, help="a JSON file describing custom BIDS input filters using PyBIDS. " "For further details, please check out http://bids-apps.neuroimaging.io/rsHRF/", ) - group_mask = parser.add_mutually_exclusive_group(required=False) - - group_mask.add_argument( - "--atlas", + parser.add_argument( + "-m", + "--mask", action="store", - type=op.abspath, - help="the absolute path to a single atlas file", - ) - - group_mask.add_argument( - "--brainmask", - action="store_true", - help="to enable the use of mask files present in the BIDS " "directory itself", + type=str, + help="the absolute path to a single mask file, which should be of the same " + "type as the input file (NIfTI or GIfTI). Use 'BIDS' to enable the use of " + "mask files present in the BIDS directory itself.", ) group_para = parser.add_argument_group("Parameters") @@ -121,14 +111,15 @@ def get_parser(): group_para.add_argument( "--estimation", action="store", - choices=["canon2dd", "sFIR", "FIR", "fourier", "hanning", "gamma"], + choices=available_estimations, help="Choose the estimation procedure from " "canon2dd (canonical shape with 2 derivatives), " "sFIR (smoothed Finite Impulse Response), " "FIR (Finite Impulse Response), " "fourier (Fourier Basis Set), " "hanning (Fourier Basis w Hanning), " - "gamma (Gamma Basis Set)", + f"gamma (Gamma Basis Set).", + default=default_parameters["estimation"], ) group_para.add_argument( @@ -137,80 +128,122 @@ def get_parser(): type=float, nargs=2, metavar=("LOW_FREQ", "HIGH_FREQ"), - default=[0.01, 0.08], + default=default_parameters["passband"], help="set intervals for bandpass filter, default is 0.01 - 0.08", ) group_para.add_argument( - "--passband_deconvolve", + "--passband-deconvolve", action="store", type=float, nargs=2, metavar=("LOW_FREQ", "HIGH_FREQ"), - default=[0.0, sys.float_info.max], - help="set intervals for bandpass filter (used while deconvolving BOLD), default is no-filtering", + default=default_parameters["passband_deconvolve"], + help="set intervals for bandpass filter (used while deconvolving BOLD), " + "default is no-filtering", ) group_para.add_argument( - "-TR", action="store", type=float, default=-1, help="set TR parameter" + "--TR", + action="store", + type=float, + help="set TR parameter", + default=default_parameters["TR"], ) group_para.add_argument( - "-T", action="store", type=int, default=3, help="set T parameter" + "--T", + "-T", + action="store", + type=int, + help=f"set T parameter, default is {default_parameters['T']}", + default=default_parameters["T"], ) group_para.add_argument( - "-T0", action="store", type=int, default=1, help="set T0 parameter" + "--T0", + action="store", + type=int, + default=default_parameters["T0"], + help=f"set T0 parameter, default is {default_parameters['T0']}", ) group_para.add_argument( - "-TD_DD", action="store", type=int, default=2, help="set TD_DD parameter" + "--TD", + action="store", + dest="TD_DD", + type=int, + default=default_parameters["TD_DD"], + help=f"set TD_DD parameter, default is {default_parameters['TD_DD']}", ) group_para.add_argument( - "-AR_lag", action="store", type=int, default=1, help="set AR_lag parameter" + "--AR-lag", + action="store", + type=int, + default=default_parameters["AR_lag"], + help=f"set AR_lag parameter, default is {default_parameters['AR_lag']}", ) group_para.add_argument( - "--thr", action="store", type=float, default=1, help="set thr parameter" + "--thr", + "--threshold", + action="store", + type=float, + default=default_parameters["thr"], + help=f"set thr parameter, default is {default_parameters['thr']}", ) group_para.add_argument( - "--temporal_mask", + "--temporal-mask", + "--tmask", action="store", type=op.abspath, - help='the path for the (temporal) mask file.\n The mask file should be a ".dat" file, consisting of a binary string of the same length as the signal', + help="the path for the (temporal) mask file.\n The mask file should be a text " + "file, a sequence of 0s and 1s of the same length as the signal", ) group_para.add_argument( "--order", action="store", type=int, - default=3, - help="set the number of basis vectors", + default=default_parameters["order"], + help=f"set the number of basis vectors, default is " + f"{default_parameters['order']} (used for fourier, hanning and gamma basis " + "functions)", ) group_para.add_argument( - "--len", action="store", type=int, default=24, help="set len parameter" + "--len", + action="store", + type=int, + default=default_parameters["len"], + help=f"set len parameter, default is {default_parameters['len']}s", ) group_para.add_argument( - "--min_onset_search", + "--min-onset-search", action="store", type=int, - default=4, - help="set min_onset_search parameter", + default=default_parameters["min_onset_search"], + help=f"set min_onset_search parameter, default is {default_parameters['min_onset_search']}s", ) group_para.add_argument( - "--max_onset_search", + "--max-onset-search", action="store", type=int, - default=8, - help="set max_onset_search parameter", + default=default_parameters["max_onset_search"], + help=f"set max_onset_search parameter, default is {default_parameters['max_onset_search']}s", ) - group_para.add_argument("--localK", action="store", type=int, help="set localK") + group_para.add_argument( + "--localK", + action="store", + type=int, + help=f"set localK, default is {default_parameters['localK']}", + default=default_parameters["localK"], + ) group_para.add_argument( "--wiener", @@ -229,391 +262,309 @@ def run_rsHRF(): group_dict = {a.dest: getattr(args, a.dest, None) for a in group._group_actions} arg_groups[group.title] = group_dict para = arg_groups["Parameters"] - nargs = len(sys.argv) temporal_mask = [] - if (not args.GUI) and (args.output_dir is None): - parser.error( - "--output_dir is required when executing in command-line interface" - ) + if args.bids_dir == "GUI" and args.no_bids: + if run is not None: - if (not args.GUI) and (args.estimation is None): - parser.error( - "--estimation rule is required when executing in command-line interface" - ) - - if args.GUI: - if nargs == 2: - try: - from .rsHRF_GUI import run - - run.run() - except ModuleNotFoundError: - parser.error("--GUI should not be used inside a Docker container") - else: - parser.error("--no other arguments should be supplied with --GUI") - - if (args.input_file is not None or args.ts is not None) and args.analysis_level: - parser.error( - "analysis_level cannot be used with --input_file or --ts, do not supply it" - ) - - if (args.input_file is not None or args.ts is not None) and args.participant_label: - parser.error( - "participant_labels are not to be used with --input_file or --ts, do not supply it" - ) - - if args.input_file is not None and args.brainmask: - parser.error( - "--brainmask cannot be used with --input_file, use --atlas instead" - ) - - if args.ts is not None and (args.brainmask or args.atlas): - parser.error( - "--atlas or --brainmask cannot be used with --ts, do not supply it" - ) - - if args.bids_dir is not None and not (args.brainmask or args.atlas): - parser.error("--atlas or --brainmask needs to be supplied with --bids_dir") - - if args.bids_dir is not None and not args.analysis_level: - parser.error( - "analysis_level needs to be supplied with bids_dir, choices=[participant]" - ) - - if args.input_file is not None and ( - not args.input_file.endswith((".nii", ".nii.gz", ".gii", ".gii.gz")) - ): - parser.error("--input_file should end with .gii, .gii.gz, .nii or .nii.gz") - - if args.atlas is not None and ( - not args.atlas.endswith((".nii", ".nii.gz", ".gii", ".gii.gz")) - ): - parser.error("--atlas should end with .gii, .gii.gz, .nii or .nii.gz") - - if args.ts is not None and (not args.ts.endswith((".txt"))): - parser.error("--ts file should end with .txt") - - if args.temporal_mask is not None and (not args.temporal_mask.endswith((".dat"))): - parser.error('--temporal_mask ile should end with ".dat"') - - if args.temporal_mask is not None: - f = open(args.temporal_mask, "r") - for line in f: - for each in line: - if each in ["0", "1"]: - temporal_mask.append(int(each)) - - if args.estimation == "sFIR" or args.estimation == "FIR": - para["T"] = 1 - - if args.ts is not None: - file_type = op.splitext(args.ts) - if para["TR"] <= 0: - parser.error("Please supply a valid TR using -TR argument") + run.run(para) + return 0 else: - TR = para["TR"] - para["dt"] = para["TR"] / para["T"] - para["lag"] = np.arange( - np.fix(para["min_onset_search"] / para["dt"]), - np.fix(para["max_onset_search"] / para["dt"]) + 1, - dtype="int", - ) - fourD_rsHRF.demo_rsHRF( - args.ts, - None, - args.output_dir, - para, - args.n_jobs, - file_type, - mode="time-series", - temporal_mask=temporal_mask, - wiener=args.wiener, - ) - - if args.input_file is not None: - if args.atlas is not None: - if ( - args.input_file.endswith((".nii", ".nii.gz")) - and args.atlas.endswith((".gii", ".gii.gz")) - ) or ( - args.input_file.endswith((".gii", ".gii.gz")) - and args.atlas.endswith((".nii", ".nii.gz")) - ): - parser.error( - "--atlas and input_file should be of the same type [NIfTI or GIfTI]" - ) + parser.error("--GUI should not be used inside a Docker container") + else: + if args.output_dir is None: + parser.error( + "--output_dir is required when executing in command-line interface" + ) + elif not op.isdir(args.output_dir): + parser.error( + "--output_dir must be a valid directory path that already exists." + ) - # carry analysis with input_file and atlas - file_type = op.splitext(args.input_file) - if file_type[-1] == ".gz": - file_type = op.splitext(file_type[-2])[-1] + file_type[-1] - else: - file_type = file_type[-1] - if ".nii" in file_type: - TR = (spm_dep.spm.spm_vol(args.input_file).header.get_zooms())[-1] - else: - if para["TR"] == -1: - parser.error("Please supply a valid TR using -TR argument") - else: - TR = para["TR"] - if TR <= 0: - if para["TR"] <= 0: - parser.error("Please supply a valid TR using -TR argument") - else: - if para["TR"] == -1: - para["TR"] = TR - elif para["TR"] <= 0: - print("Invalid TR supplied, using implicit TR: {0}".format(TR)) - para["TR"] = TR - para["dt"] = para["TR"] / para["T"] - para["lag"] = np.arange( - np.fix(para["min_onset_search"] / para["dt"]), - np.fix(para["max_onset_search"] / para["dt"]) + 1, - dtype="int", - ) - fourD_rsHRF.demo_rsHRF( - args.input_file, - args.atlas, - args.output_dir, - para, - args.n_jobs, - file_type, - mode="input", - temporal_mask=temporal_mask, - wiener=args.wiener, - ) - - if args.bids_dir is not None: - utils.bids.write_derivative_description(args.bids_dir, args.output_dir) - bids_dir = Path(args.bids_dir) - fname = bids_dir / "dataset_description.json" - - if fname.exists(): - desc = json.loads(Path(fname).read_text()) - if "DataType" in desc: - if desc["DataType"] != "derivative": - parser.error( - "Input data is not a derivative dataset" - ' (DataType in dataset_description.json is not equal to "derivative")' - ) + if not op.exists(args.bids_dir): + parser.error( + "The input path provided does not exist, please provide a valid path." + ) - else: + if op.isdir(args.bids_dir): + input_type = "BIDS" + if args.analysis_level is None: parser.error( - "DataType is not defined in the dataset_description.json file. Please make sure DataType is defined. " - "Information on the dataset_description.json file can be found online " - "(https://bids-specification.readthedocs.io/en/stable/03-modality-agnostic-files.html" - "#derived-dataset-and-pipeline-description)" + "When running BIDS analysis you must provide the analysis level 'participant'." ) + elif ( + args.bids_dir.endswith((".nii", ".nii.gz", ".gii", ".gii.gz")) + and args.no_bids + ): + input_type = "4Dimage" + elif args.bids_dir.endswith(".txt") and args.no_bids: + input_type = "text" else: parser.error( - "Could not find dataset_description.json file. Please make sure the BIDS data " - "structure is present and correct. Datasets can be validated online " - "using the BIDS Validator (http://incf.github.io/bids-validator/)." + "When not using BIDS structure you must specify --no-bids and the input file " + "should be a 4D NIfTI or GIfTI file, or a text file containing the time-series" ) - if args.bids_dir is not None and args.atlas is not None: - # carry analysis with bids_dir and 1 atlas - layout = BIDSLayout( - args.bids_dir, validate=False, config=["bids", "derivatives"] - ) - - if args.participant_label: - input_subjects = args.participant_label - subjects_to_analyze = layout.get_subjects(subject=input_subjects) - else: - subjects_to_analyze = layout.get_subjects() - - if not subjects_to_analyze: - parser.error( - "Could not find participants. Please make sure the BIDS data " - "structure is present and correct. Datasets can be validated online " - "using the BIDS Validator (http://incf.github.io/bids-validator/)." + if input_type != "BIDS" and args.participant_label is not None: + warnings.warn( + "Participant_labels are not to be used with 4Dimage or text input, do not supply it", ) - if not args.atlas.endswith((".nii", ".nii.gz")): - parser.error("--atlas should end with .nii or .nii.gz") + if input_type == "text" and args.mask is not None: + warnings.warn( + "No brainmask can be applied with text input, ignoring it.", + ) + args.mask = None - if args.bids_filter_file is not None: - filter_list = json.loads(Path(args.bids_filter_file).read_text()) + if input_type == "4Dimage" and args.mask is not None: + if args.mask == "BIDS": + warnings.warn( + "BIDS masks cannot be applied with 4D image input, ignoring it.", + ) + args.mask = None + elif ("nii" in args.bids_dir and "gii" in args.mask) or ( + "gii" in args.bids_dir and "nii" in args.mask + ): + parser.error( + "The mask file should be of the same type as the input file (NIfTI or GIfTI)" + ) - default_input = { - "extension": "nii.gz", - "datatype": "func", - "desc": "preproc", - "task": "rest", - "suffix": "bold", - } - default_input["subject"] = subjects_to_analyze - default_input.update(filter_list["bold"]) + if args.mask is not None and args.mask != "BIDS": + args.mask = op.abspath(args.mask) + if not args.mask.endswith((".nii", ".nii.gz", ".gii", ".gii.gz")): + parser.error( + "The mask file should be of the same type as the input file (NIfTI or GIfTI)" + ) + if not op.isfile(args.mask): + parser.error( + "The mask file provided does not exist, please provide a valid path." + ) - all_inputs = layout.get(return_type="filename", **default_input) + if args.temporal_mask is not None: + try: + f = open(args.temporal_mask, "r") + for line in f: + for each in line: + if each in ["0", "1"]: + temporal_mask.append(int(each)) + except: + parser.error( + "Unable to read temporal mask file. Please make sure the file is a text file, consisting of a sequence of 0s and 1s of the same length as the signal" + ) - else: - all_inputs = layout.get( - return_type="filename", - datatype="func", - subject=subjects_to_analyze, - task="rest", - desc="preproc", - suffix="bold", - extension=["nii", "nii.gz"], + if input_type != "BIDS": + if para["TR"] <= 0: + if input_type == "text": + parser.error("Please supply a valid TR using -TR argument") + else: # it's 4D image + if ".nii" in args.bids_dir: + TR = (spm_dep.spm.spm_vol(args.bids_dir).header.get_zooms())[-1] + else: + parser.error("Please supply a valid TR using -TR argument") + if TR <= 0: + parser.error("Please supply a valid TR using -TR argument") + else: + print( + "Invalid or no TR supplied, using implicit TR: {0}".format( + TR + ), + file=sys.stderr, + ) + para["TR"] = TR + para["dt"] = para["TR"] / para["T"] + para["lag"] = np.arange( + np.trunc(para["min_onset_search"] / para["dt"]), + np.trunc(para["max_onset_search"] / para["dt"]) + 1, + dtype="int", ) - if not all_inputs != []: - parser.error( - "There are no files of type *bold.nii / *bold.nii.gz " - "Please make sure to have at least one file of the above type " - "in the BIDS specification" - ) - else: - num_errors = 0 - for file_count in range(len(all_inputs)): - try: - TR = layout.get_metadata(all_inputs[file_count])["RepetitionTime"] - except KeyError as e: - TR = spm_dep.spm.spm_vol(all_inputs[file_count]).header.get_zooms()[ - -1 - ] - para["TR"] = TR - para["dt"] = para["TR"] / para["T"] - para["lag"] = np.arange( - np.fix(para["min_onset_search"] / para["dt"]), - np.fix(para["max_onset_search"] / para["dt"]) + 1, - dtype="int", + if input_type == "text": + file_type = op.splitext(args.bids_dir)[-1] + fourD_rsHRF.demo_rsHRF( + args.bids_dir, + None, + args.output_dir, + para, + args.n_jobs, + file_type, + mode="time-series", + temporal_mask=temporal_mask, + wiener=args.wiener, ) - num_errors += 1 - try: - fourD_rsHRF.demo_rsHRF( - all_inputs[file_count], - args.atlas, - args.output_dir, - para, - args.n_jobs, - file_type, - mode="bids w/ atlas", - temporal_mask=temporal_mask, - wiener=args.wiener, - ) - num_errors -= 1 - except ValueError as err: - print(err.args[0]) - except: - print("Unexpected error:", sys.exc_info()[0]) - success = len(all_inputs) - num_errors - if success == 0: - raise RuntimeError( - "Dimensions were inconsistent for all input-mask pairs; \n" - "No inputs were processed!" + return 0 + + else: # it's 4D image + # carry analysis with input_file and atlas + file_type = op.splitext(args.bids_dir) + if file_type[-1] == ".gz": + file_type = op.splitext(file_type[-2])[-1] + file_type[-1] + else: + file_type = file_type[-1] + + fourD_rsHRF.demo_rsHRF( + args.bids_dir, + args.mask, + args.output_dir, + para, + args.n_jobs, + file_type, + mode="input", + temporal_mask=temporal_mask, + wiener=args.wiener, ) + return 0 + + else: # it's BIDS + utils.bids.write_derivative_description(args.bids_dir, args.output_dir) + bids_dir = Path(args.bids_dir) + fname = bids_dir / "dataset_description.json" + + if fname.exists(): + desc = json.loads(Path(fname).read_text()) + if "DataType" in desc: + if desc["DataType"] != "derivative": + parser.error( + "Input data is not a derivative dataset" + ' (DataType in dataset_description.json is not equal to "derivative")' + ) - if args.bids_dir is not None and args.brainmask: - # carry analysis with bids_dir and brainmask - layout = BIDSLayout( - args.bids_dir, validate=False, config=["bids", "derivatives"] - ) + else: + parser.error( + "DataType is not defined in the dataset_description.json file. Please make sure DataType is defined. " + "Information on the dataset_description.json file can be found online " + "(https://bids-specification.readthedocs.io/en/stable/03-modality-agnostic-files.html" + "#derived-dataset-and-pipeline-description)" + ) + else: + parser.error( + "Could not find dataset_description.json file. Please make sure the BIDS data " + "structure is present and correct. Datasets can be validated online " + "using the BIDS Validator (http://incf.github.io/bids-validator/)." + ) - if args.participant_label: - input_subjects = args.participant_label - subjects_to_analyze = layout.get_subjects(subject=input_subjects) - else: - subjects_to_analyze = layout.get_subjects() + if para["TR"] >= 0: + warnings.warn( + "Explicit TR value is ignored when input is BIDS, as TR will be " + "read from the metadata of the input files." + ) - if not subjects_to_analyze: - parser.error( - "Could not find participants. Please make sure the BIDS data " - "structure is present and correct. Datasets can be validated online " - "using the BIDS Validator (http://incf.github.io/bids-validator/)." + layout = BIDSLayout( + args.bids_dir, validate=False, config=["bids", "derivatives"] ) - if args.bids_filter_file is not None: - filter_list = json.loads(Path(args.bids_filter_file).read_text()) - - default_input = { - "extension": "nii.gz", - "datatype": "func", - "desc": "preproc", - "task": "rest", - "suffix": "bold", - } - default_input["subject"] = subjects_to_analyze - default_input.update(filter_list["bold"]) - - all_inputs = layout.get(return_type="filename", **default_input) - - default_mask = { - "extension": "nii.gz", - "datatype": "func", - "desc": "brain", - "task": "rest", - "suffix": "mask", - } - default_mask["subject"] = subjects_to_analyze - default_mask.update(filter_list["mask"]) - - all_masks = layout.get(return_type="filename", **default_mask) - - else: - all_inputs = layout.get( - return_type="filename", - datatype="func", - subject=subjects_to_analyze, - task="rest", - desc="preproc", - suffix="bold", - extension=["nii", "nii.gz"], - ) - all_masks = layout.get( - return_type="filename", - datatype="func", - subject=subjects_to_analyze, - task="rest", - desc="brain", - suffix="mask", - extension=["nii", "nii.gz"], - ) + if args.participant_label: + input_subjects = args.participant_label + subjects_to_analyze = layout.get_subjects(subject=input_subjects) + else: + subjects_to_analyze = layout.get_subjects() - if not all_inputs != []: - parser.error( - "There are no files of type *bold.nii / *bold.nii.gz " - "Please make sure to have at least one file of the above type " - "in the BIDS specification" - ) - if not all_masks != []: - parser.error( - "There are no files of type *mask.nii / *mask.nii.gz " - "Please make sure to have at least one file of the above type " - "in the BIDS specification" - ) - if len(all_inputs) != len(all_masks): - parser.error( - "The number of *bold.nii / .nii.gz and the number of " - "*mask.nii / .nii.gz are different. Please make sure that " - "there is one mask for each input_file present" - ) + if len(subjects_to_analyze) == 0: + parser.error( + "Could not find participants. Please make sure the BIDS data " + "structure is present and correct. Datasets can be validated online " + "using the BIDS Validator (http://incf.github.io/bids-validator/)." + ) - all_inputs.sort() - all_masks.sort() + if ( + args.mask is not None + and args.mask != "BIDS" + and not args.mask.endswith((".nii", ".nii.gz")) + ): + parser.error("Mask for BIDS input should end with .nii or .nii.gz") + + if args.bids_filter_file is not None: + filter_list = json.loads(Path(args.bids_filter_file).read_text()) + + default_input = { + "extension": "nii.gz", + "datatype": "func", + "desc": "preproc", + "task": "rest", + "suffix": "bold", + } + default_input["subject"] = subjects_to_analyze + default_input.update(filter_list["bold"]) + + all_inputs = layout.get(return_type="filename", **default_input) + + if args.mask == "BIDS": + default_mask = { + "extension": "nii.gz", + "datatype": "func", + "desc": "brain", + "task": "rest", + "suffix": "mask", + } + default_mask["subject"] = subjects_to_analyze + default_mask.update(filter_list["mask"]) + + all_masks = layout.get(return_type="filename", **default_mask) - all_prefix_match = False - prefix_match_count = 0 - for i in range(len(all_inputs)): - input_prefix = all_inputs[i].split("/")[-1].split("_desc")[0] - mask_prefix = all_masks[i].split("/")[-1].split("_desc")[0] - if input_prefix == mask_prefix: - prefix_match_count += 1 else: + all_inputs = layout.get( + return_type="filename", + datatype="func", + subject=subjects_to_analyze, + task="rest", + desc="preproc", + suffix="bold", + extension=["nii", "nii.gz"], + ) + if args.mask == "BIDS": + all_masks = layout.get( + return_type="filename", + datatype="func", + subject=subjects_to_analyze, + task="rest", + desc="brain", + suffix="mask", + extension=["nii", "nii.gz"], + ) + + if not all_inputs != []: + parser.error( + "There are no files of type *bold.nii / *bold.nii.gz " + "Please make sure to have at least one file of the above type " + "in the BIDS specification" + ) + all_inputs.sort() + + if args.mask == "BIDS": + if not all_masks != []: + parser.error( + "There are no files of type *mask.nii / *mask.nii.gz " + "Please make sure to have at least one file of the above type " + "in the BIDS specification" + ) + if len(all_inputs) != len(all_masks): + parser.error( + "The number of *bold.nii / .nii.gz and the number of " + "*mask.nii / .nii.gz are different. Please make sure that " + "there is one mask for each input_file present" + ) + + all_masks.sort() + all_prefix_match = False - break - if prefix_match_count == len(all_inputs): - all_prefix_match = True - if not all_prefix_match: - parser.error( - "The mask and input files should have the same prefix for correspondence. " - "Please consider renaming your files" - ) - else: + prefix_match_count = 0 + for i in range(len(all_inputs)): + input_prefix = all_inputs[i].split("/")[-1].split("_desc")[0] + mask_prefix = all_masks[i].split("/")[-1].split("_desc")[0] + if input_prefix == mask_prefix: + prefix_match_count += 1 + else: + all_prefix_match = False + break + if prefix_match_count == len(all_inputs): + all_prefix_match = True + + if not all_prefix_match: + parser.error( + "The mask and input files should have the same prefix for correspondence. " + "Please consider renaming your files" + ) + num_errors = 0 for file_count in range(len(all_inputs)): file_type = all_inputs[file_count].split("bold")[1] @@ -638,19 +589,20 @@ def run_rsHRF(): para["dt"] = para["TR"] / para["T"] para["lag"] = np.arange( - np.fix(para["min_onset_search"] / para["dt"]), - np.fix(para["max_onset_search"] / para["dt"]) + 1, + np.trunc(para["min_onset_search"] / para["dt"]), + np.trunc(para["max_onset_search"] / para["dt"]) + 1, dtype="int", ) num_errors += 1 try: fourD_rsHRF.demo_rsHRF( all_inputs[file_count], - all_masks[file_count], + all_masks[file_count] if args.mask == "BIDS" else args.mask, args.output_dir, para, args.n_jobs, - mode="bids", + file_type, + mode="bids" + (" w/ atlas" if args.mask != "BIDS" else ""), temporal_mask=temporal_mask, wiener=args.wiener, ) @@ -665,10 +617,10 @@ def run_rsHRF(): "Dimensions were inconsistent for all input-mask pairs; \n" "No inputs were processed!" ) + return 0 def main(): - warnings.filterwarnings("ignore") run_rsHRF() diff --git a/rsHRF/rsHRF_GUI/datatypes/misc/parameters.py b/rsHRF/rsHRF_GUI/datatypes/misc/parameters.py index d87b86c..d7a2b6d 100755 --- a/rsHRF/rsHRF_GUI/datatypes/misc/parameters.py +++ b/rsHRF/rsHRF_GUI/datatypes/misc/parameters.py @@ -2,6 +2,7 @@ import numpy as np from copy import deepcopy from ...misc.status import Status +from ....utils.default_parameters import default_parameters class Parameters: @@ -12,31 +13,11 @@ class Parameters: def __init__(self): # initialize default parameters - self.estimation = "canon2dd" - self.passband = [0.01, 0.08] - self.passband_deconvolve = [0.0, sys.float_info.max] - self.TR = 2.0 - self.localK = 1 - self.T = 3 - self.T0 = 1 - self.TD_DD = 2 - self.AR_lag = 1 - self.thr = 1 - self.order = 3 - self.volterra = 0 - self.len = 24 - self.temporal_mask = [] - self.min_onset_search = 4 - self.max_onset_search = 8 - self.dt = self.TR / self.T - self.lag = np.arange( - np.fix(self.min_onset_search / self.dt), - np.fix(self.max_onset_search / self.dt) + 1, - dtype="int", - ) + for key, value in default_parameters.items(): + setattr(self, key, value) # getters - def get_estimation(self): + def get_estimation(self) -> str: return self.estimation def get_passband(self): @@ -281,7 +262,7 @@ def set_Volterra(self, volterra): """ Sets Volterra if the estimation rule is canon2dd """ - if self.estimation == "canon": + if "canon" in self.estimation: try: volterra = int(volterra) except: @@ -373,8 +354,8 @@ def update_lag(self): Re-calculating lag """ self.lag = np.arange( - np.fix(self.min_onset_search / self.dt), - np.fix(self.max_onset_search / self.dt) + 1, + np.trunc(self.min_onset_search / self.dt), + np.trunc(self.max_onset_search / self.dt) + 1, dtype="int", ) diff --git a/rsHRF/rsHRF_GUI/gui_windows/inputWindow.py b/rsHRF/rsHRF_GUI/gui_windows/inputWindow.py index 26985e5..1746c3a 100755 --- a/rsHRF/rsHRF_GUI/gui_windows/inputWindow.py +++ b/rsHRF/rsHRF_GUI/gui_windows/inputWindow.py @@ -11,6 +11,7 @@ StringVar, Label, ) +from ...utils.default_parameters import available_estimations class InputWindow: @@ -146,14 +147,7 @@ def getOutputDir(): maskFileLabel = Label(window, text="") outputPathLabel = Label(window, text="") estimationDropDown = OptionMenu( - window, - self.estimationOption, - "canon2dd", - "sFIR", - "FIR", - "gamma", - "fourier", - "fourier w/ hanning", + window, self.estimationOption, *available_estimations ) # placing widgets inputFormat.grid(row=0, column=0, padx=(5, 5), pady=(5, 5)) diff --git a/rsHRF/rsHRF_GUI/gui_windows/main.py b/rsHRF/rsHRF_GUI/gui_windows/main.py index d1b85e6..6834d67 100755 --- a/rsHRF/rsHRF_GUI/gui_windows/main.py +++ b/rsHRF/rsHRF_GUI/gui_windows/main.py @@ -13,7 +13,7 @@ class Main: - def __init__(self): + def __init__(self, parameters=None): # main window root = Tk() root.title("rsHRF Toolbox") @@ -46,7 +46,9 @@ def __init__(self): input = () # receives the input from the input window output = {} # receives the output from the core # initializing parameter window - parameter_window.setParameters(core.get_parameters()) + parameter_window.setParameters( + parameters if parameters is not None else core.get_parameters() + ) parameter_window.display() """ Gets the input from the input toplevel. diff --git a/rsHRF/rsHRF_GUI/gui_windows/parameterWindow.py b/rsHRF/rsHRF_GUI/gui_windows/parameterWindow.py index a89f452..8381602 100755 --- a/rsHRF/rsHRF_GUI/gui_windows/parameterWindow.py +++ b/rsHRF/rsHRF_GUI/gui_windows/parameterWindow.py @@ -7,8 +7,8 @@ def __init__(self): self.window = Toplevel() self.window.title("Parameters") self.parameters = {} - self.labels = [] - self.entries = [] + self.labels: list[Label] = [] + self.entries: list[Entry] = [] # get screen width and height screen_width = self.window.winfo_screenwidth() screen_height = self.window.winfo_screenheight() diff --git a/rsHRF/rsHRF_GUI/run.py b/rsHRF/rsHRF_GUI/run.py index 04e788f..a2cfa06 100755 --- a/rsHRF/rsHRF_GUI/run.py +++ b/rsHRF/rsHRF_GUI/run.py @@ -1,5 +1,5 @@ from .gui_windows.main import Main -def run(): - Main() +def run(parameters=None): + Main(parameters) diff --git a/rsHRF/unit_tests/test_cli.py b/rsHRF/unit_tests/test_cli.py new file mode 100644 index 0000000..369f010 --- /dev/null +++ b/rsHRF/unit_tests/test_cli.py @@ -0,0 +1,797 @@ +from typing import Any + +import sys +import pytest +from unittest import mock +from .. import CLI +import numpy as np +import nibabel as nib +from ..utils.default_parameters import default_parameters +import pandas as pd +import json +from pathlib import Path + +SHAPE = (10, 10, 10, 10) +mockTR = 2 + + +def get_data(image_type, TR=mockTR): + data = np.zeros(SHAPE, dtype=np.int16) + + if image_type == "nifti": + data = nib.Nifti1Image(data, np.eye(4)) + hdr = data.header + hdr.set_zooms(TR * np.ones(len(SHAPE))) + data = nib.Nifti1Image(data, np.eye(4), hdr) + else: + data = nib.gifti.GiftiDataArray(data.astype(np.float32), datatype="float32") + return data + + +def fake_BIDS_dataset( + path: Path, + participants: list[str], + description: dict[str, str], + inner_files: dict[str, str], +) -> Path: + root = path / "dsFAKE_BIDS" + root.mkdir() + partic = pd.DataFrame( + { + "participant_id": participants, + "sex": [ + "F", + ] + * len(participants), + "age": [ + 10, + ] + * len(participants), + } + ) + partic.to_csv(root / "participants.tsv", sep="\t") + descri = { + "BIDSVersion": "1.0.0", + "License": " ", + "Name": "rest", + "ReferencesAndLinks": ["References", "Links"], + } + df = root / "dataset_description.json" + df.write_text(json.dumps(descri, indent=1)) + der = root / "derivatives" + der.mkdir() + rshrf = der / "rsHRF" + rshrf.mkdir() + fmrip = der / "fmriprep" + fmrip.mkdir() + descri.update(description) + df = fmrip / "dataset_description.json" + df.write_text(json.dumps(descri, indent=1)) + for subject in participants: + sf = fmrip / f"sub-{subject}" + sf.mkdir() + func = sf / "func" + func.mkdir() + for file in inner_files: + fil = func / file.format(subject) + fil.write_text(inner_files[file]) + return root + + +def test_GUI(monkeypatch): + monkeypatch.setattr(sys, "argv", ["rsHRF", "GUI", "--no-bids"]) + with mock.patch("rsHRF.rsHRF_GUI.run.run") as mock_call: + CLI.run_rsHRF() + mock_call.assert_called_once() + + +def test_text(monkeypatch, tmp_path): + d = tmp_path / "sub" + d.mkdir() + p = d / "hello.txt" + p.write_text("mock", encoding="utf-8") + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + monkeypatch.setattr( + sys, "argv", ["rsHRF", p._str, d._str, "--no-bids", "--TR", "2"] + ) + CLI.run_rsHRF() + mock_call.assert_called_once() + + monkeypatch.setattr(sys, "argv", ["rsHRF", p._str, d._str, "--TR", "2"]) + with pytest.raises(SystemExit): + CLI.run_rsHRF() + + monkeypatch.setattr(sys, "argv", ["rsHRF", p._str, d._str, "--no-bids"]) + with pytest.raises(SystemExit): + CLI.run_rsHRF() + + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + with pytest.warns(Warning): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + p._str, + d._str, + "--no-bids", + "--TR", + "2", + "--participant-label", + "001", + ], + ) + CLI.run_rsHRF() + mock_call.assert_called_once() + + +def test_temporal_mask(monkeypatch, tmp_path): + d = tmp_path / "sub" + d.mkdir() + p = d / "hello.txt" + p.write_text("mock", encoding="utf-8") + m = d / "mask.txt" + m.write_text("01011100", encoding="utf-8") + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + p._str, + d._str, + "--no-bids", + "--TR", + "2", + "--temporal-mask", + m._str, + ], + ) + CLI.run_rsHRF() + mock_call.assert_called_once() + + n = d / "mask2.txt" + n.write_bytes(np.arange(5, dtype=float).tobytes()) + with pytest.raises(SystemExit): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + p._str, + d._str, + "--no-bids", + "--TR", + "2", + "--temporal-mask", + n._str, + ], + ) + CLI.run_rsHRF() + + +def test_bad_paths(monkeypatch, tmp_path): + di = tmp_path / "inp" + di.mkdir() + p = di / "hello.txt" + p.write_text("mock", encoding="utf-8") + do = tmp_path / "outp" + do.mkdir() + + cli_inputs = [ + ["rsHRF", di._str, do._str], # assumes BIDS input but "participant" is missing + [ + "rsHRF", + p._str, + "--no-bids", + "--TR", + "2", + ], # assumes text but no output directory + [ + "rsHRF", + p._str, + str(tmp_path / "fake"), + "--no-bids", + "--TR", + "2", + ], # assumes text but missing output directory + [ + "rsHRF", + str(di / "fake.txt"), + do._str, + "--no-bids", + "--TR", + "2", + ], # assumes text but missing input file + ] + for cli_input in cli_inputs: + monkeypatch.setattr(sys, "argv", cli_input) + with pytest.raises(SystemExit): + CLI.run_rsHRF() + + +def test_gnii(monkeypatch, tmp_path): + test_file_1 = "test.gii" + test_file_2 = "test.gii.gz" + test_file_3 = "test.nii" + test_file_4 = "test.nii.gz" + test_files = [test_file_1, test_file_2, test_file_3, test_file_4] + di = tmp_path / "inp" + di.mkdir() + with mock.patch("nibabel.load") as load_mock: + for test_file in test_files: + if "nii" in test_file: + load_mock.return_value = get_data("nifti") + elif "gii" in test_file: + load_mock.return_value = get_data("gifti") + p = di / test_file + p.write_text("mock", encoding="utf-8") + + monkeypatch.setattr( + sys, + "argv", + ["rsHRF", p._str, di._str, "--no-bids"] + + (["--TR", "2"] if "gii" in test_file else []), + ) + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + CLI.run_rsHRF() + para = { + k: default_parameters[k] + for k in [ + "estimation", + "passband", + "passband_deconvolve", + "T", + "T0", + "TD_DD", + "AR_lag", + "thr", + "order", + "len", + "min_onset_search", + "max_onset_search", + "localK", + "wiener", + ] + } + para["temporal_mask"] = None + para["TR"] = mockTR + para["dt"] = para["TR"] / para["T"] + para["lag"] = np.arange( + np.trunc(para["min_onset_search"] / para["dt"]), + np.trunc(para["max_onset_search"] / para["dt"]) + 1, + dtype="int", + ) + mock_call.assert_called_once() + + call_args = mock_call.call_args + print("call_args[0]", call_args[0]) + for good, mocked in zip( + [ + p._str, + None, + di._str, + para, + -1, + test_file[4:], + "input", + [], + False, + ], + call_args[0], + ): + print("good", good) + print("mocked", mocked) + if isinstance(good, dict): + good = { + k: v.tolist() if isinstance(v, np.ndarray) else v + for k, v in good.items() + } + mocked = { + k: v.tolist() if isinstance(v, np.ndarray) else v + for k, v in mocked.items() + } + assert good == mocked + else: + assert good == mocked + + +def test_gnii_bad_TR(monkeypatch, tmp_path): + di = tmp_path / "inp" + di.mkdir() + with mock.patch("nibabel.load") as load_mock: + load_mock.return_value = get_data("nifti", 0) + p = di / "test.nii" + p.write_text("mock", encoding="utf-8") + + monkeypatch.setattr(sys, "argv", ["rsHRF", p._str, di._str, "--no-bids"]) + with pytest.raises(SystemExit): + CLI.run_rsHRF() + + with mock.patch("nibabel.load") as load_mock: + load_mock.return_value = get_data("gifti") + p = di / "test.gii" + p.write_text("mock", encoding="utf-8") + + monkeypatch.setattr(sys, "argv", ["rsHRF", p._str, di._str, "--no-bids"]) + with pytest.raises(SystemExit): + CLI.run_rsHRF() + + +def test_bad_masks(monkeypatch, tmp_path): + d = tmp_path / "sub" + d.mkdir() + p = d / "hello.txt" + p.write_text("mock", encoding="utf-8") + m = d / "mask.bad" + m.write_text("bad", encoding="utf-8") + with pytest.warns(Warning): + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + monkeypatch.setattr( + sys, + "argv", + ["rsHRF", p._str, d._str, "--no-bids", "--TR", "2", "-m", m._str], + ) + CLI.run_rsHRF() + mock_call.assert_called_once() + + with mock.patch("nibabel.load") as load_mock: + load_mock.return_value = get_data("nifti") + p = d / "test.nii" + p.write_text("mock", encoding="utf-8") + + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + monkeypatch.setattr( + sys, "argv", ["rsHRF", p._str, d._str, "--no-bids", "-m", "BIDS"] + ) + with pytest.warns(Warning): + CLI.run_rsHRF() + mock_call.assert_called_once() + + with pytest.raises(SystemExit): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + p._str, + d._str, + "--no-bids", + "-m", + str(d / "fake.gii"), + ], # format mismatch + ) + CLI.run_rsHRF() + + with pytest.raises(SystemExit): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + p._str, + d._str, + "--no-bids", + "-m", + str(d / "fake.nii"), + ], # missing file + ) + CLI.run_rsHRF() + + with pytest.raises(SystemExit): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + p._str, + d._str, + "--no-bids", + "-m", + m._str, + ], # wrong extension + ) + CLI.run_rsHRF() + + +def test_BIDS(monkeypatch, tmp_path): + ds = fake_BIDS_dataset( + tmp_path, + ["01"], + {"DataType": "derivative"}, + { + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz": "fake", + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.json": json.dumps( + {"TaskName": "Rest", "RepetitionTime": 2} + ), + }, + ) + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + with pytest.warns(Warning): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + "--TR", + "2", + ], + ) + CLI.run_rsHRF() + mock_call.assert_called_once() + + +def test_BIDS_failedComp(monkeypatch, tmp_path): + ds = fake_BIDS_dataset( + tmp_path, + ["01"], + {"DataType": "derivative"}, + { + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz": "fake", + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.json": json.dumps( + {"TaskName": "Rest", "RepetitionTime": 2} + ), + }, + ) + with mock.patch( + "rsHRF.fourD_rsHRF.demo_rsHRF", side_effect=ValueError("mock error") + ) as mock_call: + with pytest.raises(RuntimeError): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + ], + ) + CLI.run_rsHRF() + mock_call.assert_called_once() + with mock.patch( + "rsHRF.fourD_rsHRF.demo_rsHRF", side_effect=KeyError("mock error") + ) as mock_call: + with pytest.raises(RuntimeError): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + ], + ) + CLI.main() + mock_call.assert_called_once() + + +def test_BIDS_TR(monkeypatch, tmp_path): + ds = fake_BIDS_dataset( + tmp_path, + ["01"], + {"DataType": "derivative"}, + { + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz": "fake", + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.json": json.dumps( + {"TaskName": "Rest"} + ), + }, + ) + with mock.patch("nibabel.load") as load_mock: + load_mock.return_value = get_data("nifti") + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + ], + ) + CLI.run_rsHRF() + mock_call.assert_called_once() + + +def test_BIDS_nonDerivative(monkeypatch, tmp_path): + ds = fake_BIDS_dataset( + tmp_path, + ["01"], + {"DataType": "original"}, + { + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz": "fake", + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.json": json.dumps( + {"TaskName": "Rest", "RepetitionTime": 2} + ), + }, + ) + with pytest.raises(SystemExit): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + ], + ) + CLI.run_rsHRF() + + +def test_BIDS_noDerivativeInfo(monkeypatch, tmp_path): + ds = fake_BIDS_dataset( + tmp_path, + ["01"], + {}, + { + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz": "fake", + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.json": json.dumps( + {"TaskName": "Rest", "RepetitionTime": 2} + ), + }, + ) + with pytest.raises(SystemExit): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + ], + ) + CLI.run_rsHRF() + + +def test_BIDS_noDatasetDescription(monkeypatch, tmp_path): + ds = fake_BIDS_dataset( + tmp_path, + ["01"], + {}, + { + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz": "fake", + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.json": json.dumps( + {"TaskName": "Rest", "RepetitionTime": 2} + ), + }, + ) + (ds / "derivatives" / "fmriprep" / "dataset_description.json").unlink() + with pytest.raises(SystemExit): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + ], + ) + CLI.run_rsHRF() + + +def test_BIDS_participantLabels(monkeypatch, tmp_path): + ds = fake_BIDS_dataset( + tmp_path, + ["01"], + {"DataType": "derivative"}, + { + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz": "fake", + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.json": json.dumps( + {"TaskName": "Rest", "RepetitionTime": 2} + ), + }, + ) + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + "--participant-label", + "01", + ], + ) + CLI.run_rsHRF() + mock_call.assert_called_once() + with pytest.raises(SystemExit): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + "--participant-label", + "02", + ], + ) + CLI.run_rsHRF() + + +def test_BIDS_mask(monkeypatch, tmp_path): + ds = fake_BIDS_dataset( + tmp_path, + ["01"], + {"DataType": "derivative"}, + { + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz": "fake", + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.json": json.dumps( + {"TaskName": "Rest", "RepetitionTime": 2} + ), + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz": "fake", + }, + ) + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + "-m", + "BIDS", + ], + ) + CLI.run_rsHRF() + mock_call.assert_called_once() + + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + "-m", + str( + ds + / "derivatives" + / "fmriprep" + / "sub-01" + / "func" + / "sub-01_task-rest_run-01_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz" + ), + ], + ) + CLI.run_rsHRF() + mock_call.assert_called_once() + + +def test_BIDS_mask_bad(monkeypatch, tmp_path): + ds = fake_BIDS_dataset( + tmp_path, + ["01"], + {"DataType": "derivative"}, + { + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz": "fake", + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.json": json.dumps( + {"TaskName": "Rest", "RepetitionTime": 2} + ), + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-brain_mask.gii.gz": "fake", + }, + ) + with pytest.raises(SystemExit): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + "-m", + "BIDS", + ], + ) + CLI.run_rsHRF() + + with pytest.raises(SystemExit): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + "-m", + str( + ds + / "derivatives" + / "fmriprep" + / "sub-01" + / "func" + / "sub-01_task-rest_run-01_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz" + ), + ], + ) + CLI.run_rsHRF() + + with pytest.raises(SystemExit): + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + "-m", + str( + ds + / "derivatives" + / "fmriprep" + / "sub-01" + / "func" + / "sub-01_task-rest_run-01_space-MNI152NLin2009cAsym_desc-brain_mask.gii.gz" + ), + ], + ) + CLI.run_rsHRF() + + +def test_BIDS_filters(monkeypatch, tmp_path): + ds = fake_BIDS_dataset( + tmp_path, + ["01"], + {"DataType": "derivative"}, + { + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz": "fake", + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold.json": json.dumps( + {"TaskName": "Rest", "RepetitionTime": 2} + ), + "sub-{}_task-rest_run-01_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz": "fake", + }, + ) + filter = ds / "filter.json" + filter.write_text(json.dumps({"bold": {}, "mask": {}})) + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + "--bids-filter-file", + filter._str, + ], + ) + CLI.run_rsHRF() + mock_call.assert_called_once() + + with mock.patch("rsHRF.fourD_rsHRF.demo_rsHRF") as mock_call: + monkeypatch.setattr( + sys, + "argv", + [ + "rsHRF", + str(ds / "derivatives" / "fmriprep"), + str(ds / "derivatives" / "rsHRF"), + "participant", + "--bids-filter-file", + filter._str, + "-m", + "BIDS", + ], + ) + CLI.run_rsHRF() + mock_call.assert_called_once() diff --git a/rsHRF/utils/default_parameters.py b/rsHRF/utils/default_parameters.py new file mode 100644 index 0000000..2d4377b --- /dev/null +++ b/rsHRF/utils/default_parameters.py @@ -0,0 +1,29 @@ +import numpy as np +import sys + +default_parameters = {} +default_parameters["estimation"] = "canon2dd" # why? +default_parameters["passband"] = [0.01, 0.08] # why? +default_parameters["passband_deconvolve"] = [0.0, sys.float_info.max] # why? +default_parameters["TR"] = -1 +default_parameters["localK"] = 1 # why? +default_parameters["T"] = 3 # why? +default_parameters["T0"] = 1 # why? +default_parameters["TD_DD"] = 2 # why? +default_parameters["AR_lag"] = 1 # why? +default_parameters["thr"] = 1 # why? +default_parameters["order"] = 3 # why? +default_parameters["volterra"] = 0 # why? +default_parameters["len"] = 24 # why? +default_parameters["temporal_mask"] = [] # why? +default_parameters["min_onset_search"] = 4 # why? +default_parameters["max_onset_search"] = 8 # why? +default_parameters["wiener"] = False +default_parameters["dt"] = -1 +default_parameters["lag"] = np.arange( + 0, + 0, + dtype="int", +) + +available_estimations = ["canon2dd", "sFIR", "FIR", "fourier", "hanning", "gamma"]