diff --git a/.gitignore b/.gitignore index 701f9ad..b250319 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,7 @@ docs/source/generated docs/source/sg_execution_times.rst docs-mk/site docs-mk/docs/generated +src/osipi/DRO/data +src/osipi/DRO/__pycache__/ +src/osipi/DRO/ROI_saved/ +output diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c2429e4..0f7ecb4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ repos: rev: v0.4.8 # Use the latest version hooks: - id: ruff - args: [--fix] # Optional: to enable lint fixes + args: ["--fix"] - id: ruff-format - repo: https://github.com/pycqa/flake8 rev: 7.0.0 @@ -12,7 +12,7 @@ repos: args: - --exclude=venv,.git,__pycache__,.eggs,.mypy_cache,.pytest_cache - --max-line-length=100 - - --ignore=E203 + - --ignore=E203, W503 - --per-file-ignores=__init__.py:F401 - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.6.0 diff --git a/poetry.lock b/poetry.lock index 99c34be..d7e5951 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1754,6 +1754,20 @@ doc = ["ablog (>=0.11.8)", "colorama", "graphviz", "ipykernel", "ipyleaflet", "i i18n = ["Babel", "jinja2"] test = ["pytest", "pytest-cov", "pytest-regressions", "sphinx[test]"] +[[package]] +name = "pydicom" +version = "2.4.4" +description = "A pure Python package for reading and writing DICOM data" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydicom-2.4.4-py3-none-any.whl", hash = "sha256:f9f8e19b78525be57aa6384484298833e4d06ac1d6226c79459131ddb0bd7c42"}, + {file = "pydicom-2.4.4.tar.gz", hash = "sha256:90b4801d851ce65be3df520e16bbfa3d6c767cf2a3a3b1c18f6780e6b670b87a"}, +] + +[package.extras] +docs = ["matplotlib", "numpy", "numpydoc", "pillow", "sphinx", "sphinx-copybutton", "sphinx-gallery", "sphinx_rtd_theme", "sphinxcontrib-napoleon"] + [[package]] name = "pyflakes" version = "3.2.0" @@ -2599,4 +2613,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "6b6056507de0b7072e84eff4a02da7c1ac369939f4f234c861fa44df3b3c18df" +content-hash = "0665b4c4d2ef51dc8d6b4144a4953cc11fe83d3c3ac3f8a0e748d9202ed0a4bc" diff --git a/pyproject.toml b/pyproject.toml index 7ce1c7b..5357a3e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -114,6 +114,7 @@ numpy = "^1.21.2" scipy = "^1.7.3" matplotlib = "^3.4.3" requests = "^2.32.3" +pydicom = "^2.4.4" [tool.poetry.group.dev.dependencies] flake8 = "^7.0.0" diff --git a/src/osipi/DRO/__init__.py b/src/osipi/DRO/__init__.py new file mode 100644 index 0000000..59bfd88 --- /dev/null +++ b/src/osipi/DRO/__init__.py @@ -0,0 +1,18 @@ +from ._model import (extended_tofts_model, + tofts_model, + extended_tofts_model_1vox, + forward_extended_tofts) + +from _dicom_processing import (read_dicom_slices_as_4d_signal, + signal_enhancement_extract) + +from _roi_selection import (ic_from_roi, roi) + +from _utils import (animate_mri, save_dicoms) +from _filters_and_noise import median_filter + +from _conc_2_dro_signal import (Conc2Sig_Linear, + calcR1_R2, + STDmap, + createR10_withref, + addnoise) diff --git a/src/osipi/DRO/_conc_2_dro_signal.py b/src/osipi/DRO/_conc_2_dro_signal.py new file mode 100644 index 0000000..5a9cdd1 --- /dev/null +++ b/src/osipi/DRO/_conc_2_dro_signal.py @@ -0,0 +1,46 @@ +""" +@author: eveshalom +""" + +import numpy as np + + +def createR10_withref(S0ref, S0, Tr, fa, T1ref, datashape): + R10_ref = 1 / T1ref + ref_frac = (1 - np.cos(fa) * np.exp(-Tr * R10_ref)) / (1 - np.exp(-Tr * R10_ref)) + R10 = (-1 / Tr) * np.log((S0 - (ref_frac * S0ref)) / ((S0 * np.cos(fa)) - (ref_frac * S0ref))) + R10 = np.tile(R10[:, :, :, np.newaxis], (1, 1, 1, datashape[-1])) + return R10 + + +def calcR1_R2(R10, R20st, r1, r2st, Ctiss): + R1 = R10 + r1 * Ctiss + R2st = R20st + r2st * Ctiss + return R1, R2st + + +def Conc2Sig_Linear(Tr, Te, fa, R1, R2st, S, scale, scalevisit1): + dro_S = ((1 - np.exp(-Tr * R1)) / (1 - (np.cos(fa) * np.exp(-Tr * R1)))) * ( + np.sin(fa) * np.exp(-Te * R2st) + ) + + if scale == 1: + ScaleConst = np.percentile(S, 98) / np.percentile(dro_S, 98) + elif scale == 2: + ScaleConst = scalevisit1 + + dro_S = dro_S * ScaleConst + return dro_S, ScaleConst + + +def STDmap(S, t0): + stdev = np.std(S[:, :, :, 0:t0], axis=3) + return stdev + + +def addnoise(a, std, Sexact, Nt): + from numpy.random import normal as gaussian + + std = np.tile(std[:, :, :, np.newaxis], (1, 1, 1, Nt)) + Snoise = abs(Sexact + (a * std * gaussian(0, 1, Sexact.shape))) + return Snoise diff --git a/src/osipi/DRO/_dicom_processing.py b/src/osipi/DRO/_dicom_processing.py new file mode 100644 index 0000000..8ba2b10 --- /dev/null +++ b/src/osipi/DRO/_dicom_processing.py @@ -0,0 +1,55 @@ +import os + +import numpy as np +import pydicom + +from osipi.DRO._filters_and_noise import median_filter + + +def read_dicom_slices_as_4d_signal(folder_path): + """ + Read a DICOM series from a folder path. + Returns the signal data as a 4D numpy array (x, y, z, t). + """ + slices = {} + for root, _, files in os.walk(folder_path): + for file in files: + if file.endswith(".dcm"): + dicom_file = os.path.join(root, file) + slice = pydicom.read_file(dicom_file) + if slice.SliceLocation not in slices: + slices[slice.SliceLocation] = [] + slices[slice.SliceLocation].append((slice.AcquisitionTime, slice)) + + # Sort each list of slices by the first element (AcquisitionTime) + for slice_location in slices: + slices[slice_location].sort(key=lambda x: x[0]) + + spatial_shape = slices[slice_location][0][1].pixel_array.shape + + data_shape = (spatial_shape[0], spatial_shape[1], len(slices), len(slices[slice_location])) + + signal = np.zeros(data_shape) + + for z, slice_location in enumerate(sorted(slices.keys())): # Sort by slice location + slices[z] = slices.pop(slice_location) + for t, (_, slice) in enumerate( + sorted(slices[z], key=lambda x: x[0]) + ): # Sort by acquisition time + signal[:, :, z, t] = slice.pixel_array + slices[z][t] = slice + + return signal, slices, slices[0][0] + + +def signal_enhancement_extract(S, datashape, baselinepoints): + # Take baseline average + S0 = np.average(S[:, :, :, 0:baselinepoints], axis=3) # Take baseline signal + E = np.zeros_like(S) + + # Calcualte siganl enhancement + for i in range(0, datashape[-1]): + E[:, :, :, i] = S[:, :, :, i] - S0 + E[:, :, :, i] = median_filter(E[:, :, :, i]) # Median filter size (3,3) + + return E, S0, S diff --git a/src/osipi/DRO/_filters_and_noise.py b/src/osipi/DRO/_filters_and_noise.py new file mode 100644 index 0000000..1111cc0 --- /dev/null +++ b/src/osipi/DRO/_filters_and_noise.py @@ -0,0 +1,28 @@ +import numpy as np +from scipy import ndimage + + +def median_filter(param_map): + """ + Apply a median filter to a signal. + """ + for i in range(param_map.shape[-1]): + param_map[:, :, i] = ndimage.median_filter(param_map[:, :, i], size=(3, 3)) + return param_map + + +def add_gaussian_noise(signal, mean=0, std=1): + """ + Add Gaussian noise to the 4 MR data. + """ + return signal + np.random.normal(mean, std, signal.shape) + + +def add_rician_noise(signal, mean=0, std=1): + """ + Add Rician noise to the 4D MR data. + """ + noise_real = np.random.normal(0, std, signal.shape) + noise_imag = np.random.normal(0, std, signal.shape) + noisy_signal = np.sqrt(signal**2 + noise_real**2 + noise_imag**2) + return noisy_signal diff --git a/src/osipi/DRO/_model.py b/src/osipi/DRO/_model.py new file mode 100644 index 0000000..d6781dc --- /dev/null +++ b/src/osipi/DRO/_model.py @@ -0,0 +1,103 @@ +import multiprocessing as mp + +import numpy as np +from scipy.optimize import curve_fit + +import osipi + + +def fit_single_voxel_extended_tofts(ct, ca, time): + def fit_func_ET(t, kt, ve, vp): + return osipi.extended_tofts(t, ca, kt, ve, vp) + + ini = [0, 0, 0] + popt, pcov = curve_fit(fit_func_ET, time, ct, p0=ini) + return popt + + +def fit_single_voxel_tofts(ct, ca, time): + def fit_func_T(t, kt, ve): + return osipi.tofts(t, ca, kt, ve) + + ini = [0, 0] + popt, pcov = curve_fit(fit_func_T, time, ct, p0=ini) + return popt + + +def process_voxel(j, i, k, c_tiss, ca, t, type="ET"): + ct = c_tiss[j, i, k, :] + if type == "ET": + popt = fit_single_voxel_extended_tofts(ct, ca, t) + elif type == "T": + popt = fit_single_voxel_tofts(ct, ca, t) + return j, i, k, popt + + +def extended_tofts_model(ca, c_tiss, t): + ktrans = np.zeros(c_tiss.shape[:-1]) + ve = np.zeros(c_tiss.shape[:-1]) + vp = np.zeros(c_tiss.shape[:-1]) + + tasks = [ + (j, i, k, c_tiss, ca, t) + for k in range(c_tiss.shape[2]) + for j in range(c_tiss.shape[0]) + for i in range(c_tiss.shape[1]) + ] + + with mp.Pool(processes=mp.cpu_count()) as pool: + results = pool.starmap(process_voxel, tasks) + + for j, i, k, popt in results: + ktrans[j, i, k], ve[j, i, k], vp[j, i, k] = popt + + return ktrans, ve, vp + + +def extended_tofts_model_1vox(ca, c_tiss, t): + """ + Extended Tofts model for DCE-MRI DRO + ca -- arterial input function + c_tiss -- 1D array of tissue concentration data (time) + dt -- time interval between samples + """ + + ct = c_tiss[:] + popt = fit_single_voxel_extended_tofts(ct, ca, t) + + return popt + + +def forward_extended_tofts(K1, Ve, Vp, Ca, time): + x, y, z = K1.shape + t = Ca.shape[0] + c_tiss = np.zeros((y, x, z, t)) + + for k in range(0, K1.shape[2]): + for j in range(0, K1.shape[0]): + for i in range(0, K1.shape[1]): + c_tiss[i, j, k, :] = osipi.extended_tofts( + time, Ca, K1[i, j, k], Ve[i, j, k], Vp[i, j, k] + ) + + return c_tiss + + +def tofts_model(ca, c_tiss, t): + ktrans = np.zeros(c_tiss.shape[:-1]) + ve = np.zeros(c_tiss.shape[:-1]) + + tasks = [ + (j, i, k, c_tiss, ca, t, "T") + for k in range(c_tiss.shape[2]) + for j in range(c_tiss.shape[0]) + for i in range(c_tiss.shape[1]) + ] + + with mp.Pool(processes=mp.cpu_count()) as pool: + results = pool.starmap(process_voxel, tasks) + + for j, i, k, popt in results: + ktrans[j, i, k], ve[j, i, k] = popt + + return ktrans, ve diff --git a/src/osipi/DRO/_roi_selection.py b/src/osipi/DRO/_roi_selection.py new file mode 100644 index 0000000..32018fd --- /dev/null +++ b/src/osipi/DRO/_roi_selection.py @@ -0,0 +1,60 @@ +import numpy as np +from matplotlib import path +from matplotlib import pyplot as plt +from matplotlib.widgets import LassoSelector + + +def roi(signal, slice_num, data_shape, save=False): + """ + Select a region of interest (ROI) in a slice of a 4D signal. + """ + + fig = plt.figure() + ax1 = fig.add_subplot(121) + ax1.set_title("Slice:") + ax1.imshow(signal[:, :, slice_num], cmap="gray", interpolation="nearest") + + # Empty array to be filled with lasso selector + array = np.zeros_like(signal[:, :, slice_num]) + ax2 = fig.add_subplot(122) + ax2.set_title("numpy array:") + mask = ax2.imshow(array, vmax=1, interpolation="nearest") + + # Pixel coordinates + pix = np.arange(signal.shape[1]) + xv, yv = np.meshgrid(pix, pix) + pix = np.vstack((xv.flatten(), yv.flatten())).T + + def updateArray(array, indices): + lin = np.arange(array.size) + newArray = array.flatten() + newArray[lin[indices]] = 1 + return newArray.reshape(array.shape) + + def onselect(verts): + array = mask._A._data + p = path.Path(verts) + ind = p.contains_points(pix, radius=1) + array = updateArray(array, ind) + mask.set_data(array) + fig.canvas.draw_idle() + + lasso = LassoSelector(ax1, onselect, button=1) + plt.show() + mask2d = mask._A._data.copy() + roivox = np.sum(mask2d) + timemask = np.tile(mask2d[:, :, np.newaxis], (1, 1, signal.shape[-1])) + mask4d = np.zeros(data_shape) + mask4d[:, :, slice_num, :] = timemask + if save: + np.save("ROI_saved/aif_mask.npy", mask4d) + np.save("ROI_saved/roi_voxels.npy", roivox) + return mask4d, roivox, lasso + + +def ic_from_roi(E, mask, roi_vox, numaxis): + E_roi = ( + np.sum(mask * E, axis=tuple(range(0, numaxis))) + ) / roi_vox # calculates average roi signal enhancement + + return E_roi diff --git a/src/osipi/DRO/_utils.py b/src/osipi/DRO/_utils.py new file mode 100644 index 0000000..5b88e4e --- /dev/null +++ b/src/osipi/DRO/_utils.py @@ -0,0 +1,83 @@ +import os + +import numpy as np +import pydicom +from matplotlib import pyplot as plt +from matplotlib.animation import FuncAnimation + + +def animate_mri(slices, mode="time", slice_index=0, time_index=0): + fig, ax = plt.subplots() + if mode == "time": + frames = slices.shape[-1] + + def init(): + ax.imshow(slices[:, :, slice_index, 0], cmap="gray") + ax.set_title(f"Slice: {slice_index}, Time: 0") + + def animate(t): + ax.clear() + ax.imshow(slices[:, :, slice_index, t], cmap="gray") + ax.set_title(f"Slice: {slice_index}, Time: {t}") + + elif mode == "slice": + frames = slices.shape[2] + + def init(): + ax.imshow(slices[:, :, 0, time_index], cmap="gray") + ax.set_title(f"Slice: 0, Time: {time_index}") + + def animate(z): + ax.clear() + ax.imshow(slices[:, :, z, time_index], cmap="gray") + ax.set_title(f"Slice: {z}, Time: {time_index}") + + anim = FuncAnimation( + fig=fig, func=animate, frames=frames, init_func=init, interval=100, blit=False + ) + plt.show() + return anim + + +def save_dicoms(outdir, original_dicom, signal, patient_id_num, study_instance_uid): + new_dicoms = original_dicom.copy() + patient_id_num = "RIDER Neuro MRI-{}".format(patient_id_num) + + uidprefix = "1.3.6.1.4.1.9328.50.16." + + SeriesInstanceUID = pydicom.uid.generate_uid(prefix=uidprefix) + StorageMediaFileSetUID = pydicom.uid.generate_uid(prefix=uidprefix) + FrameOfReferenceUID = pydicom.uid.generate_uid(prefix=uidprefix) + SeriesID = SeriesInstanceUID[-5:] + Seriesdir = "{}.000000-perfusion-{}".format(original_dicom[0][0].SeriesNumber, SeriesID) + StudyDate = "19040323" + DateID = study_instance_uid[-5:] + Datedir = "23-03-1904-BRAINRESEARCH-{}".format(DateID) + + if not os.path.exists(outdir): + os.makedirs("{}/{}/{}/{}".format(outdir, patient_id_num, Datedir, Seriesdir)) + + z = 0 + for entries in new_dicoms: + t = 0 + for snap in new_dicoms[z]: + SOPInstanceUID = pydicom.uid.generate_uid(prefix=uidprefix) + snap.PatientID = patient_id_num + snap.SOPInstanceUID = SOPInstanceUID + snap.StudyInstanceUID = study_instance_uid + snap.SeriesInstanceUID = SeriesInstanceUID + snap.StorageMediaFileSetUID = StorageMediaFileSetUID + snap.FrameOfReferenceUID = FrameOfReferenceUID + snap.StudyDate = StudyDate + snap.ContentDate = StudyDate + Sout = abs(signal[:, :, z, t]) + Sout = Sout.astype(np.uint16) + snap.PixelData = Sout.tobytes() + fname = str(t + 1).zfill(2) + "-" + str(z + 1).zfill(4) + snap.save_as( + "{}/{}/{}/{}/{}.dcm".format(outdir, patient_id_num, Datedir, Seriesdir, fname), + write_like_original=False, + ) + t += 1 + z += 1 + return diff --git a/src/osipi/DRO/main.py b/src/osipi/DRO/main.py new file mode 100644 index 0000000..3d84ee0 --- /dev/null +++ b/src/osipi/DRO/main.py @@ -0,0 +1,184 @@ +import numpy as np +import pydicom + +import osipi.DRO as dro + +if __name__ == "__main__": + uidprefix = "1.3.6.1.4.1.9328.50.16." + study_instance_uid = pydicom.uid.generate_uid(prefix=uidprefix) + dro_IDnum = "9215224289" + + signal, slices, dicom_ref = dro.read_dicom_slices_as_4d_signal( + "data/subject2/12.000000-perfusion-17557" + ) + # shape of the mri data + data_shape = signal.shape + + # extract the signal enhancement + # E is the signal enhancement, S0 is the baseline signal, signal is the original signal + E, S0, signal = dro.signal_enhancement_extract(signal, data_shape, 5) + + dt = 4.8 / 60 # mins from the RIDER website DCE description + t = np.linspace(0, dt * signal.shape[-1], signal.shape[-1]) # time series points + + # load the ROI masks + # Will be added to create ROI masks from the GUI not just saved ones + + aif_dir = "ROI_saved/" + + aifmask = np.load("{}aifmask.npy".format(aif_dir)) + sagmask = np.load("{}sagmask.npy".format(aif_dir)) + roivoxa = np.load("{}aifmaskvox.npy".format(aif_dir)) + roivoxv = np.load("{}sagmaskvox.npy".format(aif_dir)) + + z = 5 + + Ea = dro.ic_from_roi(E, aifmask, roivoxa, aifmask.ndim - 1) + Ev = dro.ic_from_roi(E, sagmask, roivoxv, sagmask.ndim - 1) + S0ref = dro.ic_from_roi(S0[:, :, z], sagmask[:, :, z, 0], roivoxv, sagmask.ndim - 2) + + # choose a voxel with maximum enhancement to display the fitting process + max_index = np.unravel_index(np.argmax(E * aifmask, axis=None), E.shape) + + # hematocrit value for partial volume correction + hct = 0.45 + + # fitting using extended tofts model for first 8 slices + # choose only first 8 slices due to memory constrains + kt, ve, vp = dro.extended_tofts_model(Ea, E[:, :, :8, :], t) + + # A partial volume correction was applied using the sagittal sinus signal. + pvc_K1, pvc_Ve, pvc_Vp = dro.extended_tofts_model_1vox(Ea, Ev, t) + pvc = abs( + (1 - hct) / pvc_Vp + ) # sagittal sinus Vp should be 0.55, so calc correction factor if not + + # Apply correction factor to fitted parameters + cor_Kt = kt * pvc + cor_Ve = ve * pvc + cor_Vp = vp * pvc + + # Apply Median Filter to parameters all with footprint (3,3) + mf_Kt = dro.median_filter(cor_Kt) + mf_Ve = dro.median_filter(cor_Ve) + mf_Vp = dro.median_filter(cor_Vp) + + # Apply thresholds to remove negative values + # limit volume fraction to max value of 1 + mf_Vp[mf_Vp <= 0] = 0 + mf_Vp[mf_Vp > 1] = 1.0 + mf_Ve[mf_Ve > 1] = 1.0 + mf_Kt[mf_Kt <= 0] = 0 + mf_Ve[mf_Ve <= 0] = 0 + + # the aif signal is corrected with hematocrit, scaled to 6mM as a realistic value + + aif_cor = np.max((Ea / (1 - hct)) * pvc) / 6 + + # caluclate enhancement to concentration correction + # Cp = ( + # (Ea / (1 - hct)) * pvc + # ) / aif_cor # calculate Cp using signal enhancement aif and concentration conversion factor + + # calculate the concentration in the tissue using the fitted parameters + c_tissue = dro.forward_extended_tofts(mf_Kt, mf_Ve, mf_Vp, (Ea / aif_cor), t) + + # Choosing a specific voxel to plot Concentration curves and the fitting process + kt_vox1 = mf_Kt[96, 118, 5] + ve_vox1 = mf_Ve[96, 118, 5] + vp_vox1 = mf_Vp[96, 118, 5] + + # calculate the relaxation rates R1 and R2* for the tissue + + r1 = 3.9 # longitudinal relaxivity Gd-DTPA (Hz/mM) source: (Pintaske,2006) + r2st = 10 # transverse relaxivity Gd-DTPA (Hz/mM) + # roughly estimated using (Pintaske,2006) and (Siemonsen, 2008) + + fa = np.deg2rad(float(dicom_ref.FlipAngle)) # flip angle (rads) + + Te = 1e-3 * float(dicom_ref.EchoTime) # Echo Time (s) + Tr = 1e-3 * float(dicom_ref.RepetitionTime) # Repetition Time (s) + T1b = 1.48 # T1 for blood measured in sagittal sinus @ 1.5T (s) (Zhang et al., 2013) + + R10_value = 1.18 # precontrast T1 relaxation rate (Hz) brain avg (radiopedia) + R20st_value = ( + 17.24 # precontrast T1 relaxation rate (Hz) brain avg using T2* from (Siemonsen, 2008) + ) + R10 = dro.createR10_withref( + S0ref, S0[:, :, :8], Tr, fa, T1b, data_shape + ) # precontrast R1 map (normalised to sagittal sinus) + + R1, R2st = dro.calcR1_R2(R10, R20st_value, r1, r2st, c_tissue) # returns R10 and R2st maps + + # calculate the signal using the concentration and relaxation rates + # spoiled gradient echo sequence + dro_s, M = dro.Conc2Sig_Linear(Tr, Te, fa, R1, R2st, signal[:, :, :8, :], 1, 0) + + stdS = dro.STDmap(signal[:, :, :8, :], t0=5) # caluclate Standard deviation for original data + # add noise to the signal + dro_s_noise = dro.addnoise(1, stdS, dro_s, Nt=data_shape[-1]) + + trans_Kt = mf_Kt.copy() + trans_Ve = mf_Ve.copy() + trans_Vp = mf_Vp.copy() + + vmax_Kt, vmax_ke, vmax_Vp = 1, 1, 0.2 + vmin_Kt, vmin_ke, vmin_Vp = 0.2, 0.2, 0.01 + lb_Kt, lb_ke, lb_Vp = 0.54, 0.52, 0.49 + ub_Kt, ub_ke, ub_Vp = 1.52, 1.5, 1.43 + lim_Kt, lim_ke, lim_Vp = vmax_Kt + 0.5, vmax_ke + 0.1, vmax_Vp + 0.5 + ub_lim = 1.01 + + trans_Kt[trans_Kt <= vmin_Kt] = trans_Kt[trans_Kt <= vmin_Kt] * lb_Kt + trans_Kt[trans_Kt >= lim_Kt] = trans_Kt[trans_Kt >= lim_Kt] * ub_lim + trans_Kt[(trans_Kt >= vmax_Kt) & (trans_Kt < lim_Kt)] = ( + trans_Kt[(trans_Kt >= vmax_Kt) & (trans_Kt < lim_Kt)] * ub_Kt + ) + trans_Kt[(trans_Kt > vmin_Kt) & (trans_Kt < vmax_Kt)] = trans_Kt[ + (trans_Kt > vmin_Kt) & (trans_Kt < vmax_Kt) + ] * ( + lb_Kt + + ( + ( + (trans_Kt[(trans_Kt > vmin_Kt) & (trans_Kt < vmax_Kt)] - vmin_Kt) + / (vmax_Kt - vmin_Kt) + ) + * (ub_Kt - lb_Kt) + ) + ) + + trans_Vp[trans_Vp > 1] = 1 + + trans_Vp[trans_Vp <= vmin_Vp] = trans_Vp[trans_Vp <= vmin_Vp] * lb_Vp + trans_Vp[(trans_Vp >= lim_Vp)] = trans_Vp[trans_Vp >= lim_Vp] * ub_lim + trans_Vp[(trans_Vp >= vmax_Vp) & (trans_Vp < lim_Vp)] = ( + trans_Vp[(trans_Vp >= vmax_Vp) & (trans_Vp < lim_Vp)] * ub_Vp + ) + trans_Vp[(trans_Vp > vmin_Vp) & (trans_Vp < vmax_Vp)] = trans_Vp[ + (trans_Vp > vmin_Vp) & (trans_Vp < vmax_Vp) + ] * ( + lb_Vp + + ( + ( + (trans_Vp[(trans_Vp > vmin_Vp) & (trans_Vp < vmax_Vp)] - vmin_Vp) + / (vmax_Vp - vmin_Vp) + ) + * (ub_Vp - lb_Vp) + ) + ) + + c_tissue_tr = dro.forward_extended_tofts(trans_Kt, trans_Ve, trans_Vp, (Ea / aif_cor), t) + + R1_tr, R2st_tr = dro.calcR1_R2(R10, R20st_value, r1, r2st, c_tissue_tr) + dro_s_tr, M_tr = dro.Conc2Sig_Linear(Tr, Te, fa, R1_tr, R2st_tr, signal[:, :, :8, :], 1, M) + + dro_s_noise_tr = dro.addnoise(1, stdS, dro_s_tr, Nt=data_shape[-1]) + + dro.animate_mri(signal, mode="time", slice_index=7, time_index=5) + dro.animate_mri(dro_s_noise_tr, mode="time", slice_index=7, time_index=5) + dro.animate_mri(dro_s_noise, mode="time", slice_index=7, time_index=5) + + save = True + + if save: + dro.save_dicoms("output", slices, dro_s_noise, dro_IDnum, study_instance_uid)