diff --git a/MANIFEST.in b/MANIFEST.in index 2074477..a6eb818 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,3 +1,12 @@ -include emll/test_models/*.json -include emll/test_models/BIOMD0000000064.xml -include emll/test_models/jol2012_vstar.p +include *.txt +include *.yml +include tox.ini +recursive-include notebooks *.csv +recursive-include notebooks *.ipynb +recursive-include notebooks *.pgz +recursive-include notebooks *.py +recursive-include notebooks *.yaml +recursive-include src *.json +recursive-include src *.p +recursive-include src *.xml +recursive-include tests *.py \ No newline at end of file diff --git a/notebooks/contador.ipynb b/notebooks/contador.ipynb index ba7b0ea..d8a23dd 100644 --- a/notebooks/contador.ipynb +++ b/notebooks/contador.ipynb @@ -27,7 +27,8 @@ ], "source": [ "import os\n", - "os.environ['MKL_THREADING_LAYER'] = 'GNU'\n", + "\n", + "os.environ[\"MKL_THREADING_LAYER\"] = \"GNU\"\n", "\n", "\n", "import numpy as np\n", @@ -36,18 +37,21 @@ "\n", "np.random.seed(0)\n", "\n", + "import matplotlib.pyplot as plt\n", "import pymc as pm\n", - "\n", "import pytensor\n", "import pytensor.tensor as T\n", + "import seaborn as sns\n", "from pytensor import sparse\n", - "\n", "from tqdm import tqdm\n", "\n", - "import matplotlib.pyplot as plt\n", - "import seaborn as sns\n", - "sns.set(context='notebook', style='ticks', color_codes=True, font_scale=1.2,\n", - " rc={'legend.frameon': False})\n", + "sns.set(\n", + " context=\"notebook\",\n", + " style=\"ticks\",\n", + " color_codes=True,\n", + " font_scale=1.2,\n", + " rc={\"legend.frameon\": False},\n", + ")\n", "\n", "%matplotlib inline" ] @@ -66,18 +70,19 @@ "outputs": [], "source": [ "import cobra\n", + "\n", "import emll\n", "from emll.test_models import models\n", "from emll.util import create_elasticity_matrix\n", "\n", - "model, N, v_star = models['contador']()\n", + "model, N, v_star = models[\"contador\"]()\n", "\n", "with model:\n", " model.reactions.EX_glc.lower_bound = -1.243\n", - " model.reactions.EX_lys.lower_bound = .30 * 1.243\n", - " model.reactions.zwf.bounds = (.778, .778)\n", + " model.reactions.EX_lys.lower_bound = 0.30 * 1.243\n", + " model.reactions.zwf.bounds = (0.778, 0.778)\n", "\n", - "# model.summary()\n", + " # model.summary()\n", "\n", " fluxes = model.optimize().fluxes\n", "\n", @@ -93,17 +98,18 @@ "m_labels = [m.id for m in model.metabolites]\n", "r_labels = [r.id for r in model.reactions]\n", "\n", - "ex_labels = np.array([['$\\epsilon_{' + '{0},{1}'.format(rlabel, mlabel) + '}$'\n", - " for mlabel in m_labels] for rlabel in r_labels]).flatten()\n", + "ex_labels = np.array(\n", + " [\n", + " [\"$\\epsilon_{\" + \"{0},{1}\".format(rlabel, mlabel) + \"}$\" for mlabel in m_labels]\n", + " for rlabel in r_labels\n", + " ]\n", + ").flatten()\n", "\n", "r_compartments = [\n", - " list(r.compartments)[0] if len(r.compartments) == 1 else 't'\n", - " for r in model.reactions\n", + " list(r.compartments)[0] if len(r.compartments) == 1 else \"t\" for r in model.reactions\n", "]\n", "\n", - "m_compartments = [\n", - " m.compartment for m in model.metabolites\n", - "]\n", + "m_compartments = [m.compartment for m in model.metabolites]\n", "\n", "# Add inhibitors from Contador\n", "# NADH inhibition of CS\n", @@ -113,7 +119,7 @@ "# Ex[model.reactions.index('pfk'),\n", "# model.metabolites.index('PEP')] = -1\n", "\n", - "Ex[22] = 0. # Glucose import" + "Ex[22] = 0.0 # Glucose import" ] }, { @@ -131,29 +137,29 @@ "source": [ "# flipped reaction order\n", "ox_order = [\n", - " ['dapD'],\n", - " ['dapE'],\n", - " ['dapD', 'dapE'],\n", - " ['dapD', 'dapE', 'dapB'],\n", - " ['dapD', 'dapE', 'dapB', 'lysC'],\n", - " ['dapD', 'dapE', 'dapB', 'lysC', 'dapA']\n", + " [\"dapD\"],\n", + " [\"dapE\"],\n", + " [\"dapD\", \"dapE\"],\n", + " [\"dapD\", \"dapE\", \"dapB\"],\n", + " [\"dapD\", \"dapE\", \"dapB\", \"lysC\"],\n", + " [\"dapD\", \"dapE\", \"dapB\", \"lysC\", \"dapA\"],\n", "]\n", "\n", "strains = [\n", - " 'dapA, lysC, dapB, dapE',\n", - " 'dapA, lysC, dapB, dapD',\n", - " 'dapA, lysC, dapB',\n", - " 'dapA, lysC',\n", - " 'dapA',\n", - " 'Wild Type'\n", + " \"dapA, lysC, dapB, dapE\",\n", + " \"dapA, lysC, dapB, dapD\",\n", + " \"dapA, lysC, dapB\",\n", + " \"dapA, lysC\",\n", + " \"dapA\",\n", + " \"Wild Type\",\n", "]\n", "\n", "n_exp = len(strains)\n", "nm, nr = N.shape\n", - " \n", + "\n", "# Assumed 20% improvement from each doubling\n", "ox_results = np.array([27.3, 27.2, 26.5, 22.5, 8.9, 0.2]) / 30.0\n", - "#ox_results = np.array([8.9, 0.2]) / 30.0" + "# ox_results = np.array([8.9, 0.2]) / 30.0" ] }, { @@ -207,7 +213,7 @@ "metadata": {}, "outputs": [], "source": [ - "ll = emll.LinLogLeastNorm(N, Ex, Ey, v_star, driver='gelsy')\n", + "ll = emll.LinLogLeastNorm(N, Ex, Ey, v_star, driver=\"gelsy\")\n", "from emll.util import initialize_elasticity" ] }, @@ -217,13 +223,7 @@ "metadata": {}, "outputs": [], "source": [ - "rxn_index = {\n", - " 'dapD': 0,\n", - " 'dapE': 1,\n", - " 'dapB': 2,\n", - " 'lysC': 3,\n", - " 'dapA': 4\n", - "}" + "rxn_index = {\"dapD\": 0, \"dapE\": 1, \"dapB\": 2, \"lysC\": 3, \"dapA\": 4}" ] }, { @@ -244,31 +244,36 @@ "source": [ "np.random.seed(1)\n", "\n", - "lys_index = model.reactions.index('EX_lys')\n", + "lys_index = model.reactions.index(\"EX_lys\")\n", "\n", "with pm.Model() as pymc_model:\n", - " \n", - " Ex_t = pm.Deterministic('Ex', initialize_elasticity(\n", - " ll.N, b=0.05, sigma=1, alpha=None,\n", - " m_compartments=m_compartments,\n", - " r_compartments=r_compartments\n", - " ))\n", - " Ey_t = T.zeros_like(ll.Ey, dtype='float32')\n", - " \n", + " Ex_t = pm.Deterministic(\n", + " \"Ex\",\n", + " initialize_elasticity(\n", + " ll.N,\n", + " b=0.05,\n", + " sigma=1,\n", + " alpha=None,\n", + " m_compartments=m_compartments,\n", + " r_compartments=r_compartments,\n", + " ),\n", + " )\n", + " Ey_t = T.zeros_like(ll.Ey, dtype=\"float32\")\n", + "\n", " # We don't know the expression (here, underexpression vs. reference) of\n", " # each protein as a result of the plasmid integration. So, we use a\n", " # symbolic variable for each protein in the enzyme expression matrix.\n", " e_hat = T.ones((n_exp, nr))\n", - " e_hat_entries = pm.Uniform('e_hat_entries', lower=0, upper=1, shape=(5,))\n", - " \n", + " e_hat_entries = pm.Uniform(\"e_hat_entries\", lower=0, upper=1, shape=(5,))\n", + "\n", " for i, rxns in enumerate(ox_order):\n", " for rxn in rxns:\n", " e_hat = T.set_subtensor(\n", - " e_hat[i, model.reactions.index(rxn)],\n", - " e_hat_entries[rxn_index[rxn]])\n", - " \n", + " e_hat[i, model.reactions.index(rxn)], e_hat_entries[rxn_index[rxn]]\n", + " )\n", + "\n", " # Store the fitted e_hat in the trace object\n", - " e_hat = pm.Deterministic('e_hat', e_hat) \n", + " e_hat = pm.Deterministic(\"e_hat\", e_hat)\n", "\n", "with pymc_model:\n", " trace_prior = pm.sample_prior_predictive()" @@ -297,15 +302,14 @@ ], "source": [ "with pymc_model:\n", - " \n", " # Calculate steady-state concentrations and fluxes from elasticities\n", " xn, vn = ll.steady_state_pytensor(Ex_t, Ey_t, e_hat, np.ones((n_exp, ll.ny)))\n", "\n", " # Error distribution for observed steady-state lysine out\n", - " lys_mean = pm.Deterministic('lys_mean', vn[:, lys_index])\n", - " ox_result = pm.Normal('ox_result', mu=lys_mean, sigma=0.01, observed=ox_results)\n", + " lys_mean = pm.Deterministic(\"lys_mean\", vn[:, lys_index])\n", + " ox_result = pm.Normal(\"ox_result\", mu=lys_mean, sigma=0.01, observed=ox_results)\n", "\n", - "#print(pymc_model.logpt.tag.test_value)" + "# print(pymc_model.logpt.tag.test_value)" ] }, { @@ -387,8 +391,7 @@ "source": [ "with pymc_model:\n", " approx = pm.ADVI(random_seed=1)\n", - " hist = approx.fit(n=25000, obj_optimizer=pm.adagrad_window(learning_rate=0.01),\n", - " obj_n_mc=1)" + " hist = approx.fit(n=25000, obj_optimizer=pm.adagrad_window(learning_rate=0.01), obj_n_mc=1)" ] }, { @@ -408,12 +411,12 @@ } ], "source": [ - "fig = plt.figure(figsize=(5,4))\n", - "plt.semilogy(approx.hist, '.', ms=1)\n", + "fig = plt.figure(figsize=(5, 4))\n", + "plt.semilogy(approx.hist, \".\", ms=1)\n", "sns.despine(offset=10)\n", "\n", - "plt.ylabel('-ELBO')\n", - "plt.xlabel('Iteration')\n", + "plt.ylabel(\"-ELBO\")\n", + "plt.xlabel(\"Iteration\")\n", "plt.tight_layout()\n", "\n", "# plt.savefig('contador_elbo.svg')" @@ -436,12 +439,12 @@ } ], "source": [ - "fig = plt.figure(figsize=(5,4))\n", - "plt.semilogy(approx.hist, '.', ms=1)\n", + "fig = plt.figure(figsize=(5, 4))\n", + "plt.semilogy(approx.hist, \".\", ms=1)\n", "sns.despine(offset=10)\n", "\n", - "plt.ylabel('-ELBO')\n", - "plt.xlabel('Iteration')\n", + "plt.ylabel(\"-ELBO\")\n", + "plt.xlabel(\"Iteration\")\n", "plt.tight_layout()\n", "\n", "# plt.savefig('contador_elbo.svg')" @@ -945,7 +948,7 @@ } ], "source": [ - "ppc_vi.posterior_predictive['ox_result'].mean()" + "ppc_vi.posterior_predictive[\"ox_result\"].mean()" ] }, { @@ -1333,7 +1336,7 @@ } ], "source": [ - "trace_vi.posterior['e_hat_entries'].mean()" + "trace_vi.posterior[\"e_hat_entries\"].mean()" ] }, { @@ -3492,7 +3495,7 @@ } ], "source": [ - "prior_vi.prior['lys_mean']" + "prior_vi.prior[\"lys_mean\"]" ] }, { @@ -3891,7 +3894,7 @@ } ], "source": [ - "trace_vi.posterior['lys_mean'][:,0]" + "trace_vi.posterior[\"lys_mean\"][:, 0]" ] }, { @@ -4294,7 +4297,7 @@ } ], "source": [ - "pm.hdi(trace_vi, var_names='lys_mean',hdi_prob = 0.94).as_numpy()" + "pm.hdi(trace_vi, var_names=\"lys_mean\", hdi_prob=0.94).as_numpy()" ] }, { @@ -4686,7 +4689,7 @@ } ], "source": [ - "trace_vi.posterior['lys_mean'][:,1].mean()" + "trace_vi.posterior[\"lys_mean\"][:, 1].mean()" ] }, { @@ -4706,35 +4709,40 @@ } ], "source": [ - "fig = plt.figure(figsize=(3.5,3.5))\n", + "fig = plt.figure(figsize=(3.5, 3.5))\n", "ax = fig.add_subplot(111)\n", "\n", "for i in range(n_exp):\n", - " \n", - " #hpd = pm.hpd(trace_vi['lys_mean'][:, i]) * v_star[lys_index]\n", - " mean = trace_vi.posterior['lys_mean'][:, i].mean() * v_star[lys_index]\n", - " #ax.errorbar(ox_results[i] * v_star[lys_index], mean,\n", + " # hpd = pm.hpd(trace_vi['lys_mean'][:, i]) * v_star[lys_index]\n", + " mean = trace_vi.posterior[\"lys_mean\"][:, i].mean() * v_star[lys_index]\n", + " # ax.errorbar(ox_results[i] * v_star[lys_index], mean,\n", " # yerr=np.atleast_2d([mean-hpd[0], hpd[1]-mean]).T, marker='o',\n", " # label=strains[i], capsize=4, capthick=2, ls='none', ms=7.5)\n", - " \n", - "ax.plot(v_star[lys_index], v_star[lys_index], 'o', color='.4', ms=7.5,\n", - " label='dapA, lysC, dapB, dapD, dapE\\n(reference)')\n", "\n", - "legend = ax.legend(loc='center left', bbox_to_anchor=(1., .5), title='Strain')\n", - "plt.setp(legend.get_title(),fontsize='x-large')\n", + "ax.plot(\n", + " v_star[lys_index],\n", + " v_star[lys_index],\n", + " \"o\",\n", + " color=\".4\",\n", + " ms=7.5,\n", + " label=\"dapA, lysC, dapB, dapD, dapE\\n(reference)\",\n", + ")\n", "\n", - "ax.plot([0, .4], [0, .4], ls='--', color='.8', zorder=0, lw=2)\n", + "legend = ax.legend(loc=\"center left\", bbox_to_anchor=(1.0, 0.5), title=\"Strain\")\n", + "plt.setp(legend.get_title(), fontsize=\"x-large\")\n", "\n", - "ax.set_xlabel('Measured lysene flux (mol L$^{-1}$ week$^{-1}$)')\n", - "ax.set_ylabel('Predicted lysene flux\\n(mol L$^{-1}$ week$^{-1}$)')\n", + "ax.plot([0, 0.4], [0, 0.4], ls=\"--\", color=\".8\", zorder=0, lw=2)\n", + "\n", + "ax.set_xlabel(\"Measured lysene flux (mol L$^{-1}$ week$^{-1}$)\")\n", + "ax.set_ylabel(\"Predicted lysene flux\\n(mol L$^{-1}$ week$^{-1}$)\")\n", "\n", "ax.set_xlim((-0.025, 0.4))\n", "ax.set_ylim((-0.025, 0.4))\n", "\n", - "ax.set_xticks([0, .1, .2, .3, .4])\n", + "ax.set_xticks([0, 0.1, 0.2, 0.3, 0.4])\n", "\n", "sns.despine(trim=True)\n", - "fig.savefig('lysine_flux_prediction.svg')" + "fig.savefig(\"lysine_flux_prediction.svg\")" ] }, { @@ -4751,10 +4759,10 @@ "# for amount in ox_results:\n", "# ax.axvline(amount, lw=2.5, color='.4', ls='--')\n", "\n", - "# for i in range(n_exp): \n", + "# for i in range(n_exp):\n", "# pm.kdeplot(trace_vi['lys_mean'][:, i], shade=.1, ax=ax, label=strains[i])\n", - " \n", - " \n", + "\n", + "\n", "# legend = ax.legend(loc='center left', bbox_to_anchor=(1., .5), title='Strain')\n", "# plt.setp(legend.get_title(),fontsize='x-large')\n", "\n", @@ -4823,7 +4831,9 @@ } ], "source": [ - "(np.diff(pm.hdi(trace_vi.posterior['ex_kinetic_entries']).to_array().to_numpy()[0])<0.75).flatten()" + "(\n", + " np.diff(pm.hdi(trace_vi.posterior[\"ex_kinetic_entries\"]).to_array().to_numpy()[0]) < 0.75\n", + ").flatten()" ] }, { @@ -5240,7 +5250,7 @@ } ], "source": [ - "(trace_vi.posterior['ex_kinetic_entries'] * e_sign)[0][:,identifiable_elasticies]" + "(trace_vi.posterior[\"ex_kinetic_entries\"] * e_sign)[0][:, identifiable_elasticies]" ] }, { @@ -5249,16 +5259,24 @@ "metadata": {}, "outputs": [], "source": [ - "identifiable_elasticies = (np.diff(pm.hdi(trace_vi.posterior['ex_kinetic_entries']).to_array().to_numpy()[0]) < .75).flatten()\n", + "identifiable_elasticies = (\n", + " np.diff(pm.hdi(trace_vi.posterior[\"ex_kinetic_entries\"]).to_array().to_numpy()[0]) < 0.75\n", + ").flatten()\n", "\n", "mlabels = [m.id for m in model.metabolites]\n", "rlabels = [r.id for r in model.reactions]\n", "\n", - "e_labels = np.array([['$\\epsilon_{' + '{0},{1}'.format(rlabel, mlabel) + '}$'\n", - " for mlabel in mlabels] for rlabel in rlabels]).flatten()\n", + "e_labels = np.array(\n", + " [\n", + " [\"$\\epsilon_{\" + \"{0},{1}\".format(rlabel, mlabel) + \"}$\" for mlabel in mlabels]\n", + " for rlabel in rlabels\n", + " ]\n", + ").flatten()\n", "\n", - "elast_nonzero = pd.DataFrame((trace_vi.posterior['ex_kinetic_entries'] * e_sign)[0][:, identifiable_elasticies],\n", - " columns=e_labels[nonzero_inds][identifiable_elasticies])" + "elast_nonzero = pd.DataFrame(\n", + " (trace_vi.posterior[\"ex_kinetic_entries\"] * e_sign)[0][:, identifiable_elasticies],\n", + " columns=e_labels[nonzero_inds][identifiable_elasticies],\n", + ")" ] }, { @@ -5284,7 +5302,7 @@ } ], "source": [ - "pm.hdi(trace_vi.posterior['ex_capacity_entries']).to_array().to_numpy()[0]" + "pm.hdi(trace_vi.posterior[\"ex_capacity_entries\"]).to_array().to_numpy()[0]" ] }, { @@ -5304,7 +5322,7 @@ } ], "source": [ - "null = pd.DataFrame(pm.hdi(trace_vi.posterior['ex_capacity_entries']).to_array().to_numpy()[0])\n", + "null = pd.DataFrame(pm.hdi(trace_vi.posterior[\"ex_capacity_entries\"]).to_array().to_numpy()[0])\n", "sig = np.sign(null)[0] == np.sign(null)[1]\n", "sum(sig)" ] @@ -5315,7 +5333,9 @@ "metadata": {}, "outputs": [], "source": [ - "elast_zero = pd.DataFrame(trace_vi.posterior['ex_capacity_entries'][0][:, sig], columns=e_labels[zero_inds[sig]])\n", + "elast_zero = pd.DataFrame(\n", + " trace_vi.posterior[\"ex_capacity_entries\"][0][:, sig], columns=e_labels[zero_inds[sig]]\n", + ")\n", "elast_posterior = elast_nonzero.iloc[:, elast_nonzero.mean().argsort()].join(elast_zero)" ] }, @@ -5326,7 +5346,7 @@ "outputs": [], "source": [ "elast_prior = pd.DataFrame(\n", - " trace_prior.prior['Ex'].to_numpy().reshape(500, -1), columns=e_labels\n", + " trace_prior.prior[\"Ex\"].to_numpy().reshape(500, -1), columns=e_labels\n", ").reindex(columns=elast_posterior.columns)" ] }, @@ -5371,30 +5391,38 @@ "ax = fig.add_subplot(111)\n", "\n", "_ = sns.boxplot(data=elast_posterior, fliersize=0, ax=ax, zorder=2)\n", - "_ = sns.boxplot(data=elast_prior, fliersize=0, zorder=0, showmeans=False,\n", - " capprops=dict(color='.9', zorder=0), medianprops=dict(color='.9', zorder=0.5),\n", - " whiskerprops=dict(color='.9', zorder=0), boxprops=dict(color='.9', facecolor='w', zorder=0), ax=ax)\n", + "_ = sns.boxplot(\n", + " data=elast_prior,\n", + " fliersize=0,\n", + " zorder=0,\n", + " showmeans=False,\n", + " capprops=dict(color=\".9\", zorder=0),\n", + " medianprops=dict(color=\".9\", zorder=0.5),\n", + " whiskerprops=dict(color=\".9\", zorder=0),\n", + " boxprops=dict(color=\".9\", facecolor=\"w\", zorder=0),\n", + " ax=ax,\n", + ")\n", "\n", "\n", "_ = ax.set_xticklabels(ax.get_xticklabels(), rotation=90)\n", - "ax.axhline(0, ls='--', color='.5', zorder=1)\n", - "ax.axvline(elast_nonzero.shape[1] - .5, color='.5', ls='--')\n", + "ax.axhline(0, ls=\"--\", color=\".5\", zorder=1)\n", + "ax.axvline(elast_nonzero.shape[1] - 0.5, color=\".5\", ls=\"--\")\n", "\n", - "ax.set_ylabel('Elasticity')\n", + "ax.set_ylabel(\"Elasticity\")\n", "sns.despine(trim=True)\n", "\n", "ax.set_yticks([-3, -2, -1, 0, 1, 2, 3])\n", "ax.set_ylim([-3, 3])\n", "\n", "\n", - "ax.set_xlim(-.75, elast_nonzero.shape[1] + elast_zero.shape[1] - .5)\n", + "ax.set_xlim(-0.75, elast_nonzero.shape[1] + elast_zero.shape[1] - 0.5)\n", "sns.despine(ax=ax, trim=True)\n", "\n", - "#for tick in ax.xaxis.get_major_ticks():\n", - "# tick.label.set_fontsize(18) \n", + "# for tick in ax.xaxis.get_major_ticks():\n", + "# tick.label.set_fontsize(18)\n", "\n", "plt.tight_layout()\n", - "plt.savefig('contador_elasticities.svg')" + "plt.savefig(\"contador_elasticities.svg\")" ] }, { @@ -5412,14 +5440,25 @@ } ], "source": [ - "fcc = pd.DataFrame(np.array([ll.flux_control_coefficient(Ex=trace_vi.posterior['Ex'].to_numpy()[0][i])[lys_index]\n", - " for i in range(len(trace_vi))]),\n", - " columns=[r.id for r in model.reactions])\n", + "fcc = pd.DataFrame(\n", + " np.array(\n", + " [\n", + " ll.flux_control_coefficient(Ex=trace_vi.posterior[\"Ex\"].to_numpy()[0][i])[lys_index]\n", + " for i in range(len(trace_vi))\n", + " ]\n", + " ),\n", + " columns=[r.id for r in model.reactions],\n", + ")\n", "\n", - "fcc_prior = pd.DataFrame(np.array([ll.flux_control_coefficient(\n", - " Ex=trace_prior.prior['Ex'].to_numpy()[0][i])[lys_index]\n", - " for i in range(len(trace_prior))]),\n", - " columns=[r.id for r in model.reactions])\n", + "fcc_prior = pd.DataFrame(\n", + " np.array(\n", + " [\n", + " ll.flux_control_coefficient(Ex=trace_prior.prior[\"Ex\"].to_numpy()[0][i])[lys_index]\n", + " for i in range(len(trace_prior))\n", + " ]\n", + " ),\n", + " columns=[r.id for r in model.reactions],\n", + ")\n", "\n", "# Calculate the fcc values that have 95% Highest Posterior Densities with a consistent direction.\n", "hpd = pm.hdi(fcc.values)\n", @@ -5467,51 +5506,58 @@ "ax = fig.add_subplot(111)\n", "\n", "for i, (name, col) in enumerate(fcc.items()):\n", - " \n", " if name in fcc.columns[fcc_consistent]:\n", - " color='g'\n", - " \n", + " color = \"g\"\n", + "\n", " l_g = ax.plot([i, i], pm.hdi(col.to_numpy()), color=color)\n", - " dot_g = ax.plot(i, col.median(), '.', color=color, ms=10)\n", + " dot_g = ax.plot(i, col.median(), \".\", color=color, ms=10)\n", "\n", " else:\n", - " color='.4'\n", - " \n", + " color = \".4\"\n", + "\n", " l_4 = ax.plot([i, i], pm.hdi(col.to_numpy()), color=color)\n", - " dot_4 = ax.plot(i, col.median(), '.', color=color, ms=10)\n", - " \n", - " l_p = ax.plot([i, i], pm.hdi(fcc_prior[name].to_numpy()), color='.8', zorder=0)\n", - " \n", + " dot_4 = ax.plot(i, col.median(), \".\", color=color, ms=10)\n", + "\n", + " l_p = ax.plot([i, i], pm.hdi(fcc_prior[name].to_numpy()), color=\".8\", zorder=0)\n", "\n", "\n", - "ax.axhline(0, ls='--', color='.8', zorder=0)\n", + "ax.axhline(0, ls=\"--\", color=\".8\", zorder=0)\n", "\n", - "#ax.set_ylim([-5E-5, 1E-4])\n", + "# ax.set_ylim([-5E-5, 1E-4])\n", "# ax.set_xlim([-1, 16])\n", "\n", - "plt.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))\n", - "ax.set_ylabel('Flux control coefficient\\n(lysene out)')\n", + "plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(-2, 2))\n", + "ax.set_ylabel(\"Flux control coefficient\\n(lysene out)\")\n", "\n", - "ax.set_xlabel('Reaction')\n", + "ax.set_xlabel(\"Reaction\")\n", "\n", - "plt.legend(((l_4[0], dot_4[0]), l_p[0], (l_g[0], dot_g[0])),\n", - " ('Posterior\\n(Overlaps zero)', 'Prior', 'Posterior\\n(Consistent)',),\n", - " borderaxespad=0, borderpad=0, ncol=2, fontsize='medium')\n", + "plt.legend(\n", + " ((l_4[0], dot_4[0]), l_p[0], (l_g[0], dot_g[0])),\n", + " (\n", + " \"Posterior\\n(Overlaps zero)\",\n", + " \"Prior\",\n", + " \"Posterior\\n(Consistent)\",\n", + " ),\n", + " borderaxespad=0,\n", + " borderpad=0,\n", + " ncol=2,\n", + " fontsize=\"medium\",\n", + ")\n", "\n", "\n", "xs = np.where(fcc_consistent)[0]\n", "ys = fcc.loc[:, fcc_consistent].mean(0).values\n", "labels = fcc.columns[fcc_consistent]\n", "\n", - "plt.ylim([-.6, .6])\n", + "plt.ylim([-0.6, 0.6])\n", "plt.xlim([-1, ll.nr])\n", "\n", - "#ax.set_xticks([0, 10, 20, 30, 40, 45])\n", + "# ax.set_xticks([0, 10, 20, 30, 40, 45])\n", "\n", - "#plt.tight_layout()\n", + "# plt.tight_layout()\n", "\n", "sns.despine(trim=False, offset=10)\n", - "plt.savefig('fccs_contador.svg', transparent=True)" + "plt.savefig(\"fccs_contador.svg\", transparent=True)" ] }, { @@ -5560,14 +5606,25 @@ "colors = sns.color_palette(\"hls\", len(rxn))\n", "\n", "for rxn, index in rxn_index.items():\n", - " pm.plot_kde(trace_vi.posterior['e_hat_entries'].to_numpy()[0][:, index], ax=ax, label=rxn, fill_kwargs={\"facecolor\": cm.viridis(np.linspace(0, 1, num=trace_vi.posterior['e_hat_entries'].to_numpy()[0][:, index].shape[0]))})\n", - "plt.legend(loc='upper right')\n", - "plt.ylabel('Frequency')\n", - "plt.xlabel('Enzyme expression in WT / expression after amplification')\n", + " pm.plot_kde(\n", + " trace_vi.posterior[\"e_hat_entries\"].to_numpy()[0][:, index],\n", + " ax=ax,\n", + " label=rxn,\n", + " fill_kwargs={\n", + " \"facecolor\": cm.viridis(\n", + " np.linspace(\n", + " 0, 1, num=trace_vi.posterior[\"e_hat_entries\"].to_numpy()[0][:, index].shape[0]\n", + " )\n", + " )\n", + " },\n", + " )\n", + "plt.legend(loc=\"upper right\")\n", + "plt.ylabel(\"Frequency\")\n", + "plt.xlabel(\"Enzyme expression in WT / expression after amplification\")\n", "\n", "sns.despine(trim=True)\n", "plt.tight_layout()\n", - "plt.savefig('posterior_enzyme_expression.svg')" + "plt.savefig(\"posterior_enzyme_expression.svg\")" ] }, { @@ -5597,7 +5654,7 @@ ], "source": [ "# Create a list of colormaps\n", - "colormaps = [cm.viridis, cm.coolwarm,cm.coolwarm, cm.inferno, cm.inferno]\n", + "colormaps = [cm.viridis, cm.coolwarm, cm.coolwarm, cm.inferno, cm.inferno]\n", "\n", "fig, ax = plt.subplots()\n", "\n", diff --git a/notebooks/hackett.ipynb b/notebooks/hackett.ipynb index 8e99758..2fe076e 100644 --- a/notebooks/hackett.ipynb +++ b/notebooks/hackett.ipynb @@ -32,6 +32,7 @@ ], "source": [ "import sys\n", + "\n", "print(sys.version)" ] }, @@ -60,7 +61,8 @@ "source": [ "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", - "sns.set(context='talk', style='ticks', color_codes=True)\n", + "\n", + "sns.set(context=\"talk\", style=\"ticks\", color_codes=True)\n", "\n", "from tqdm import tqdm\n", "\n", @@ -90,9 +92,10 @@ "outputs": [], "source": [ "import gzip\n", + "\n", "import cloudpickle\n", "\n", - "with gzip.open('/Users/mcna892/Downloads/hackett_advi.pgz', 'rb') as f:\n", + "with gzip.open(\"/Users/mcna892/Downloads/hackett_advi.pgz\", \"rb\") as f:\n", " hist = cloudpickle.load(f)" ] }, @@ -123,13 +126,13 @@ "fig = plt.figure()\n", "ax = fig.add_subplot(111)\n", "\n", - "ax.semilogy(hist['hist'].hist, '-', ms=1, rasterized=True)\n", + "ax.semilogy(hist[\"hist\"].hist, \"-\", ms=1, rasterized=True)\n", "ax.set_xlim([0, 40000])\n", "\n", "sns.despine(offset=10)\n", - "ax.set_xlabel('Iteration')\n", - "ax.set_ylabel('-ELBO')\n", - "ax.set_title('Multiomics ADVI convergence')\n", + "ax.set_xlabel(\"Iteration\")\n", + "ax.set_ylabel(\"-ELBO\")\n", + "ax.set_title(\"Multiomics ADVI convergence\")\n", "\n", "plt.tight_layout()\n", "\n", @@ -202,8 +205,7 @@ ], "source": [ "with pymc_model:\n", - " \n", - " trace = hist['hist'].sample(500)\n", + " trace = hist[\"hist\"].sample(500)\n", " ppc = pm.sample_posterior_predictive(trace)" ] }, @@ -646,7 +648,7 @@ } ], "source": [ - "trace.posterior['vn_ss'][:, :, :,197]" + "trace.posterior[\"vn_ss\"][:, :, :, 197]" ] }, { @@ -655,11 +657,17 @@ "metadata": {}, "outputs": [], "source": [ - "def plot_hpd(ax, real, ppc, error=True, ms=3, alpha=0.5, color='b'):\n", + "def plot_hpd(ax, real, ppc, error=True, ms=3, alpha=0.5, color=\"b\"):\n", " if error:\n", - " ax.plot(np.vstack([real.values.flatten(), real.values.flatten()]),\n", - " pm.hdi(ppc).to_array().to_numpy().reshape(-1, 2).T, color=color, lw=1, alpha=0.05, zorder=0)\n", - " ax.plot(real, ppc.median(dim=['chain','draw']), '.', ms=ms, color=color, alpha=alpha, zorder=0)" + " ax.plot(\n", + " np.vstack([real.values.flatten(), real.values.flatten()]),\n", + " pm.hdi(ppc).to_array().to_numpy().reshape(-1, 2).T,\n", + " color=color,\n", + " lw=1,\n", + " alpha=0.05,\n", + " zorder=0,\n", + " )\n", + " ax.plot(real, ppc.median(dim=[\"chain\", \"draw\"]), \".\", ms=ms, color=color, alpha=alpha, zorder=0)" ] }, { @@ -696,72 +704,96 @@ "source": [ "plt.rcParams[\"axes.axisbelow\"] = False\n", "\n", - "fig, ax_matrix = plt.subplots(ncols=3, nrows=2, figsize=(6.5, 5),\n", - " sharex='row', sharey='row')\n", + "fig, ax_matrix = plt.subplots(ncols=3, nrows=2, figsize=(6.5, 5), sharex=\"row\", sharey=\"row\")\n", "\n", - "for ax in ax_matrix[1,:].flatten():\n", - " ax.set_aspect('equal')\n", + "for ax in ax_matrix[1, :].flatten():\n", + " ax.set_aspect(\"equal\")\n", "\n", - "_ = ax_matrix[0,0].hist(xn.values.flatten(), bins=15, lw=1,\n", - " edgecolor='w', density=True, facecolor='.4')\n", - "_ = ax_matrix[0,1].hist(np.log(np.clip(vn.values.flatten(), 1E-8, 1E8)), bins=15, lw=1,\n", - " edgecolor='w', density=True, facecolor='.4')\n", - "_ = ax_matrix[0,2].hist(np.log(en.values.flatten()), bins=15, lw=1, edgecolor='w',\n", - " density=True, facecolor='.4')\n", + "_ = ax_matrix[0, 0].hist(\n", + " xn.values.flatten(), bins=15, lw=1, edgecolor=\"w\", density=True, facecolor=\".4\"\n", + ")\n", + "_ = ax_matrix[0, 1].hist(\n", + " np.log(np.clip(vn.values.flatten(), 1e-8, 1e8)),\n", + " bins=15,\n", + " lw=1,\n", + " edgecolor=\"w\",\n", + " density=True,\n", + " facecolor=\".4\",\n", + ")\n", + "_ = ax_matrix[0, 2].hist(\n", + " np.log(en.values.flatten()), bins=15, lw=1, edgecolor=\"w\", density=True, facecolor=\".4\"\n", + ")\n", "\n", - "plot_hpd(ax_matrix[1,0], xn,\n", - " trace.posterior['chi_ss'][:, :, :, x_inds])\n", - "plot_hpd(ax_matrix[1,1], np.log(vn),\n", - " np.log(np.clip(trace.posterior['vn_ss'][:, :, :, v_inds], 1E-8, 1E8)))\n", - "plot_hpd(ax_matrix[1,2], np.log(en),\n", - " trace.posterior['log_en_t'][:, :, :, e_inds])\n", + "plot_hpd(ax_matrix[1, 0], xn, trace.posterior[\"chi_ss\"][:, :, :, x_inds])\n", + "plot_hpd(\n", + " ax_matrix[1, 1],\n", + " np.log(vn),\n", + " np.log(np.clip(trace.posterior[\"vn_ss\"][:, :, :, v_inds], 1e-8, 1e8)),\n", + ")\n", + "plot_hpd(ax_matrix[1, 2], np.log(en), trace.posterior[\"log_en_t\"][:, :, :, e_inds])\n", "\n", - "for ax in ax_matrix[1,:]:\n", + "for ax in ax_matrix[1, :]:\n", " ax.set_rasterization_zorder(1)\n", "\n", - "ax_matrix[1,0].set_xlim([-4, 4])\n", - "ax_matrix[1,1].set_xlim([-4, 4])\n", - "ax_matrix[1,2].set_xlim([-4, 4])\n", - "ax_matrix[1,0].set_ylim([-4, 4])\n", - "ax_matrix[1,1].set_ylim([-4, 4])\n", - "ax_matrix[1,2].set_ylim([-4, 4])\n", + "ax_matrix[1, 0].set_xlim([-4, 4])\n", + "ax_matrix[1, 1].set_xlim([-4, 4])\n", + "ax_matrix[1, 2].set_xlim([-4, 4])\n", + "ax_matrix[1, 0].set_ylim([-4, 4])\n", + "ax_matrix[1, 1].set_ylim([-4, 4])\n", + "ax_matrix[1, 2].set_ylim([-4, 4])\n", "\n", - "for ax in ax_matrix[0,:]:\n", + "for ax in ax_matrix[0, :]:\n", " ax.set_xlim([-4, 4])\n", " ax.set_xticks([-4, -2, 0, 2, 4])\n", "\n", - "for ax in ax_matrix[1,:]:\n", - " ax.plot([-4, 4], [-4, 4], '--', color='.3', zorder=4, lw=1.5)\n", + "for ax in ax_matrix[1, :]:\n", + " ax.plot([-4, 4], [-4, 4], \"--\", color=\".3\", zorder=4, lw=1.5)\n", " ax.set_xlim([-4, 4])\n", " ax.set_ylim([-4, 4])\n", "\n", " ax.set_xticks([-4, -2, 0, 2, 4])\n", " ax.set_yticks([-4, -2, 0, 2, 4])\n", - " \n", - " \n", - "ax_matrix[1,0].fill_between([-1.5, 1.5], [1.5, 1.5], [-1.5, -1.5],\n", - " zorder=4, color='k', alpha=.1)\n", - "ax_matrix[1,1].fill_between([-1.5, 1.5], [1.5, 1.5], [-1.5, -1.5],\n", - " zorder=4, color='k', alpha=.1)\n", "\n", - "ax_matrix[0, 0].set_ylim([0, 1.])\n", "\n", - "ax_matrix[0,0].set_title('Metabolomics', fontsize=13)\n", - "ax_matrix[0,1].set_title('Fluxomics', fontsize=13)\n", - "ax_matrix[0,2].set_title('Proteomics', fontsize=13)\n", + "ax_matrix[1, 0].fill_between([-1.5, 1.5], [1.5, 1.5], [-1.5, -1.5], zorder=4, color=\"k\", alpha=0.1)\n", + "ax_matrix[1, 1].fill_between([-1.5, 1.5], [1.5, 1.5], [-1.5, -1.5], zorder=4, color=\"k\", alpha=0.1)\n", + "\n", + "ax_matrix[0, 0].set_ylim([0, 1.0])\n", + "\n", + "ax_matrix[0, 0].set_title(\"Metabolomics\", fontsize=13)\n", + "ax_matrix[0, 1].set_title(\"Fluxomics\", fontsize=13)\n", + "ax_matrix[0, 2].set_title(\"Proteomics\", fontsize=13)\n", "\n", "\n", - "ax_matrix[0,0].text(0.5, 1., '$\\chi$, n={}'.format(xn.shape[0] * xn.shape[1]),\n", - " ha='center', va='top', transform=ax_matrix[0,0].transAxes)\n", - "ax_matrix[0,1].text(0.5, 1., '$\\log\\; \\hat{v}$, n='+ str(vn.shape[0] * vn.shape[1]),\n", - " ha='center', va='top', transform=ax_matrix[0,1].transAxes)\n", - "ax_matrix[0,2].text(0.5, 1., '$\\log\\; \\hat{e}$, n=' + str(en.shape[0] * en.shape[1]),\n", - " ha='center', va='top', transform=ax_matrix[0,2].transAxes)\n", + "ax_matrix[0, 0].text(\n", + " 0.5,\n", + " 1.0,\n", + " \"$\\chi$, n={}\".format(xn.shape[0] * xn.shape[1]),\n", + " ha=\"center\",\n", + " va=\"top\",\n", + " transform=ax_matrix[0, 0].transAxes,\n", + ")\n", + "ax_matrix[0, 1].text(\n", + " 0.5,\n", + " 1.0,\n", + " \"$\\log\\; \\hat{v}$, n=\" + str(vn.shape[0] * vn.shape[1]),\n", + " ha=\"center\",\n", + " va=\"top\",\n", + " transform=ax_matrix[0, 1].transAxes,\n", + ")\n", + "ax_matrix[0, 2].text(\n", + " 0.5,\n", + " 1.0,\n", + " \"$\\log\\; \\hat{e}$, n=\" + str(en.shape[0] * en.shape[1]),\n", + " ha=\"center\",\n", + " va=\"top\",\n", + " transform=ax_matrix[0, 2].transAxes,\n", + ")\n", "\n", - "ax_matrix[0,1].set_xlabel('Measured')\n", - "ax_matrix[-1,1].set_xlabel('Measured')\n", - "ax_matrix[1,0].set_ylabel('Predicted')\n", - "ax_matrix[0,0].set_ylabel('Frequency')\n", + "ax_matrix[0, 1].set_xlabel(\"Measured\")\n", + "ax_matrix[-1, 1].set_xlabel(\"Measured\")\n", + "ax_matrix[1, 0].set_ylabel(\"Predicted\")\n", + "ax_matrix[0, 0].set_ylabel(\"Frequency\")\n", "\n", "sns.despine(offset=2.5, trim=True)\n", "# plt.tight_layout()\n", @@ -807,10 +839,9 @@ } ], "source": [ - "chi_median = trace.posterior[\"chi_ss\"][:, :, :, x_inds].median(dim=[\"chain\",\"draw\"])\n", + "chi_median = trace.posterior[\"chi_ss\"][:, :, :, x_inds].median(dim=[\"chain\", \"draw\"])\n", "\n", - "np.nanmedian((xn[(xn <= 1.5) & (xn >= -1.5)]\n", - " - chi_median.to_numpy()).abs().values)" + "np.nanmedian((xn[(xn <= 1.5) & (xn >= -1.5)] - chi_median.to_numpy()).abs().values)" ] }, { @@ -830,11 +861,16 @@ } ], "source": [ - "vn_median = trace.posterior[\"vn_ss\"][:, :, :, v_inds].median(dim=[\"chain\",\"draw\"])\n", + "vn_median = trace.posterior[\"vn_ss\"][:, :, :, v_inds].median(dim=[\"chain\", \"draw\"])\n", "\n", - "np.nanmedian((np.log(vn)[(np.log(vn) <= 1.5) & (np.log(vn) >= -1.5)]\n", - " - np.median(np.log(np.clip(vn_median.to_numpy(),\n", - " 1E-8, 1E8)), 0)).abs().values)" + "np.nanmedian(\n", + " (\n", + " np.log(vn)[(np.log(vn) <= 1.5) & (np.log(vn) >= -1.5)]\n", + " - np.median(np.log(np.clip(vn_median.to_numpy(), 1e-8, 1e8)), 0)\n", + " )\n", + " .abs()\n", + " .values\n", + ")" ] }, { @@ -854,10 +890,11 @@ } ], "source": [ - "en_median = trace.posterior[\"log_en_t\"][:, :, :, e_inds].median(dim=[\"chain\",\"draw\"])\n", + "en_median = trace.posterior[\"log_en_t\"][:, :, :, e_inds].median(dim=[\"chain\", \"draw\"])\n", "\n", - "np.nanmedian((np.log(en)[(np.log(en) <= 1.5) & (np.log(en) >= -1.5)]\n", - " - en_median.to_numpy()).abs().values)" + "np.nanmedian(\n", + " (np.log(en)[(np.log(en) <= 1.5) & (np.log(en) >= -1.5)] - en_median.to_numpy()).abs().values\n", + ")" ] }, { @@ -873,8 +910,10 @@ "metadata": {}, "outputs": [], "source": [ - "e_unmeasured_hpd = pm.hdi(trace.posterior['log_e_unmeasured'])\n", - "e_consistent = np.sign(e_unmeasured_hpd.to_array()[:, :, 0]) == np.sign(e_unmeasured_hpd.to_array()[:, :, 1])" + "e_unmeasured_hpd = pm.hdi(trace.posterior[\"log_e_unmeasured\"])\n", + "e_consistent = np.sign(e_unmeasured_hpd.to_array()[:, :, 0]) == np.sign(\n", + " e_unmeasured_hpd.to_array()[:, :, 1]\n", + ")" ] }, { @@ -908,7 +947,7 @@ "metadata": {}, "outputs": [], "source": [ - "capacity_hpd = pm.hdi(trace.posterior['ex_capacity_entries'])" + "capacity_hpd = pm.hdi(trace.posterior[\"ex_capacity_entries\"])" ] }, { @@ -917,7 +956,9 @@ "metadata": {}, "outputs": [], "source": [ - "capacity_consistent = np.sign(capacity_hpd.to_array()[:, :, 0][0]) == np.sign(capacity_hpd.to_array()[:, :, 1][0])" + "capacity_consistent = np.sign(capacity_hpd.to_array()[:, :, 0][0]) == np.sign(\n", + " capacity_hpd.to_array()[:, :, 1][0]\n", + ")" ] }, { @@ -5766,8 +5807,12 @@ } ], "source": [ - "fccs = np.array([ll.flux_control_coefficient(Ex=ex) for ex in tqdm(trace.posterior['Ex'][0].to_numpy())])\n", - "fccs_link = np.array([ll_link.flux_control_coefficient(Ex=ex) for ex in tqdm(trace.posterior['Ex'][0].to_numpy())])" + "fccs = np.array(\n", + " [ll.flux_control_coefficient(Ex=ex) for ex in tqdm(trace.posterior[\"Ex\"][0].to_numpy())]\n", + ")\n", + "fccs_link = np.array(\n", + " [ll_link.flux_control_coefficient(Ex=ex) for ex in tqdm(trace.posterior[\"Ex\"][0].to_numpy())]\n", + ")" ] }, { @@ -5799,10 +5844,14 @@ "r_med = np.median(fccs, 0).flatten()\n", "l_med = np.median(fccs_link, 0).flatten()\n", "\n", - "r_err = [r_med - np.percentile(fccs, 25, axis=0).flatten(),\n", - " np.percentile(fccs, 75, axis=0).flatten() - r_med]\n", - "l_err = [l_med - np.percentile(fccs_link, 25, axis=0).flatten(),\n", - " np.percentile(fccs_link, 75, axis=0).flatten() - l_med]" + "r_err = [\n", + " r_med - np.percentile(fccs, 25, axis=0).flatten(),\n", + " np.percentile(fccs, 75, axis=0).flatten() - r_med,\n", + "]\n", + "l_err = [\n", + " l_med - np.percentile(fccs_link, 25, axis=0).flatten(),\n", + " np.percentile(fccs_link, 75, axis=0).flatten() - l_med,\n", + "]" ] }, { @@ -5841,19 +5890,30 @@ } ], "source": [ - "fig = plt.figure(figsize=(5,4))\n", - "ax = fig.add_subplot(111, aspect='equal', adjustable='box')\n", + "fig = plt.figure(figsize=(5, 4))\n", + "ax = fig.add_subplot(111, aspect=\"equal\", adjustable=\"box\")\n", "\n", - "ax.set_aspect('equal')\n", - "ax.errorbar(r_med, l_med, ms=5, lw=0, xerr=r_err, yerr=l_err,\n", - " marker='.', color='.4', zorder=3, alpha=0.5)\n", - "ax.errorbar(r_med, l_med, ms=5, lw=0, xerr=r_err, yerr=l_err,\n", - " elinewidth=.5, color='.8', alpha=0.5, zorder=2)\n", + "ax.set_aspect(\"equal\")\n", + "ax.errorbar(\n", + " r_med, l_med, ms=5, lw=0, xerr=r_err, yerr=l_err, marker=\".\", color=\".4\", zorder=3, alpha=0.5\n", + ")\n", + "ax.errorbar(\n", + " r_med,\n", + " l_med,\n", + " ms=5,\n", + " lw=0,\n", + " xerr=r_err,\n", + " yerr=l_err,\n", + " elinewidth=0.5,\n", + " color=\".8\",\n", + " alpha=0.5,\n", + " zorder=2,\n", + ")\n", "\n", - "ax.plot([-5, 5], [-5, 5], '--', color='.8', zorder=0)\n", + "ax.plot([-5, 5], [-5, 5], \"--\", color=\".8\", zorder=0)\n", "\n", - "ax.set_xlabel('FCC (Pseudoinverse)')\n", - "ax.set_ylabel('FCC (Link Matrix)')\n", + "ax.set_xlabel(\"FCC (Pseudoinverse)\")\n", + "ax.set_ylabel(\"FCC (Link Matrix)\")\n", "\n", "ax.set_rasterization_zorder(4)\n", "\n", @@ -5863,8 +5923,14 @@ "\n", "plt.tight_layout()\n", "sns.despine(trim=True, offset=10)\n", - "ax.text(1, -1, '$\\\\rho = {:.2f}$'.format(\n", - " pd.Series(r_med).corr(pd.Series(l_med))), ha='right', va='bottom', fontsize=12)\n", + "ax.text(\n", + " 1,\n", + " -1,\n", + " \"$\\\\rho = {:.2f}$\".format(pd.Series(r_med).corr(pd.Series(l_med))),\n", + " ha=\"right\",\n", + " va=\"bottom\",\n", + " fontsize=12,\n", + ")\n", "\n", "# plt.savefig('fcc_comparison.svg', dpi=300)" ] @@ -5897,12 +5963,13 @@ "\n", " dom = (left.shape[1] - 1) * left_tiled.std(-1) * right_tiled.std(-1)\n", " correl = num / dom\n", - " \n", + "\n", " if not df:\n", " return correl\n", - " else: \n", + " else:\n", " return pd.DataFrame(correl, index=left.index, columns=right.index)\n", "\n", + "\n", "corr_df = corrwith(e, v).T" ] }, @@ -5913,8 +5980,11 @@ "outputs": [], "source": [ "def random_corr_iterator():\n", - " return corrwith(e.loc[:, np.random.permutation(e.columns)],\n", - " v.loc[:, np.random.permutation(e.columns)], df=False)" + " return corrwith(\n", + " e.loc[:, np.random.permutation(e.columns)],\n", + " v.loc[:, np.random.permutation(e.columns)],\n", + " df=False,\n", + " )" ] }, { @@ -5946,10 +6016,11 @@ "z_scores = (corr_df - permuted_corr_mat.mean(0).T) / permuted_corr_mat.std(0).T\n", "p_vals = 2 * z_scores.abs().apply(scipy.stats.norm.sf)\n", "\n", + "\n", "def sort_df(df):\n", " sort_x = cluster.hierarchy.leaves_list(cluster.hierarchy.linkage(df, \"complete\"))\n", " sort_y = cluster.hierarchy.leaves_list(cluster.hierarchy.linkage(df.T, \"complete\"))\n", - " \n", + "\n", " return df.iloc[sort_x, sort_y]" ] }, @@ -5986,7 +6057,7 @@ "for i in range(fccs.shape[1]):\n", " for j in range(fccs.shape[2]):\n", " hdi = pm.hdi(fccs[:, i, j]) # Compute HDI for each element (across the 500 dimension)\n", - " fccs_hdi[i, j] = hdi # Store HDI results\n" + " fccs_hdi[i, j] = hdi # Store HDI results" ] }, { @@ -6007,10 +6078,10 @@ "source": [ "sorted_corr_df = sort_df(corr_df)\n", "\n", - "fcc_med_measured = fcc_med.reindex(\n", - " columns=sorted_corr_df.columns, index=sorted_corr_df.index)\n", + "fcc_med_measured = fcc_med.reindex(columns=sorted_corr_df.columns, index=sorted_corr_df.index)\n", "fcc_consistent_measured = fcc_consistent_df.reindex(\n", - " columns=sorted_corr_df.columns, index=sorted_corr_df.index)" + " columns=sorted_corr_df.columns, index=sorted_corr_df.index\n", + ")" ] }, { @@ -6048,10 +6119,10 @@ "\n", "sorted_corr_df = sort_df(corr_df)\n", "\n", - "fcc_med_measured = fcc_med.reindex(\n", - " columns=sorted_corr_df.columns, index=sorted_corr_df.index)\n", + "fcc_med_measured = fcc_med.reindex(columns=sorted_corr_df.columns, index=sorted_corr_df.index)\n", "fcc_consistent_measured = fcc_consistent_df.reindex(\n", - " columns=sorted_corr_df.columns, index=sorted_corr_df.index)" + " columns=sorted_corr_df.columns, index=sorted_corr_df.index\n", + ")" ] }, { @@ -6071,20 +6142,27 @@ } ], "source": [ - "with sns.plotting_context('notebook'):\n", - " fig = plt.figure(figsize=(40,8))\n", - " ax = fig.add_subplot(111, aspect='equal', adjustable='box')\n", + "with sns.plotting_context(\"notebook\"):\n", + " fig = plt.figure(figsize=(40, 8))\n", + " ax = fig.add_subplot(111, aspect=\"equal\", adjustable=\"box\")\n", "\n", - "sns.heatmap(sorted_corr_df[p_vals < 0.05].values, vmin=-1, vmax=1,\n", - " cmap='RdBu', cbar=True, rasterized=True, lw=1)\n", + "sns.heatmap(\n", + " sorted_corr_df[p_vals < 0.05].values,\n", + " vmin=-1,\n", + " vmax=1,\n", + " cmap=\"RdBu\",\n", + " cbar=True,\n", + " rasterized=True,\n", + " lw=1,\n", + ")\n", "\n", - "_= ax.set_yticks(np.arange(sorted_corr_df.shape[0]) + 0.5)\n", - "_= ax.set_xticks(np.arange(sorted_corr_df.shape[1]) + 0.5)\n", + "_ = ax.set_yticks(np.arange(sorted_corr_df.shape[0]) + 0.5)\n", + "_ = ax.set_xticks(np.arange(sorted_corr_df.shape[1]) + 0.5)\n", "_ = ax.set_yticklabels(sorted_corr_df.index, rotation=0)\n", "_ = ax.set_xticklabels(sorted_corr_df.columns)\n", "\n", - "ax.set_xlabel('Enzyme')\n", - "ax.set_ylabel('Boundary Flux')\n", + "ax.set_xlabel(\"Enzyme\")\n", + "ax.set_ylabel(\"Boundary Flux\")\n", "\n", "plt.tight_layout()\n", "# plt.savefig('corr_heatmap_large_confident.svg', dpi=75)" @@ -6107,21 +6185,27 @@ } ], "source": [ - "with sns.plotting_context('notebook'):\n", - "\n", - " fig = plt.figure(figsize=(40,8))\n", - " ax = fig.add_subplot(111, aspect='equal', adjustable='box')\n", + "with sns.plotting_context(\"notebook\"):\n", + " fig = plt.figure(figsize=(40, 8))\n", + " ax = fig.add_subplot(111, aspect=\"equal\", adjustable=\"box\")\n", "\n", - "sns.heatmap(fcc_med_measured[fcc_consistent_measured].values,\n", - " center=0, robust=True, cmap='RdBu', cbar=True, rasterized=True, lw=1)\n", + "sns.heatmap(\n", + " fcc_med_measured[fcc_consistent_measured].values,\n", + " center=0,\n", + " robust=True,\n", + " cmap=\"RdBu\",\n", + " cbar=True,\n", + " rasterized=True,\n", + " lw=1,\n", + ")\n", "\n", - "_= ax.set_yticks(np.arange(sorted_corr_df.shape[0]) + 0.5)\n", - "_= ax.set_xticks(np.arange(sorted_corr_df.shape[1]) + 0.5)\n", + "_ = ax.set_yticks(np.arange(sorted_corr_df.shape[0]) + 0.5)\n", + "_ = ax.set_xticks(np.arange(sorted_corr_df.shape[1]) + 0.5)\n", "_ = ax.set_yticklabels(sorted_corr_df.index, rotation=0)\n", "_ = ax.set_xticklabels(sorted_corr_df.columns)\n", "\n", - "ax.set_xlabel('Enzyme')\n", - "ax.set_ylabel('Boundary Flux')\n", + "ax.set_xlabel(\"Enzyme\")\n", + "ax.set_ylabel(\"Boundary Flux\")\n", "\n", "plt.tight_layout()\n", "# plt.savefig('fcc_heatmap_large.svg', dpi=75)" diff --git a/notebooks/run_hackett_inference.py b/notebooks/run_hackett_inference.py index 25d1fae..4ac3c45 100644 --- a/notebooks/run_hackett_inference.py +++ b/notebooks/run_hackett_inference.py @@ -8,11 +8,13 @@ # I guess that's expected import os -import pandas as pd + +import cobra import numpy as np +import pandas as pd import pymc as pm import pytensor.tensor as T -import cobra + import emll from emll.util import initialize_elasticity @@ -149,6 +151,7 @@ # ppc = pm.sample_ppc(trace) import gzip + import cloudpickle with gzip.open("data/hackett_advi.pgz", "wb") as f: diff --git a/notebooks/wu2004.ipynb b/notebooks/wu2004.ipynb index 588fcf1..600f3df 100644 --- a/notebooks/wu2004.ipynb +++ b/notebooks/wu2004.ipynb @@ -7,7 +7,8 @@ "outputs": [], "source": [ "import os\n", - "os.environ['MKL_THREADING_LAYER'] = 'GNU'" + "\n", + "os.environ[\"MKL_THREADING_LAYER\"] = \"GNU\"" ] }, { @@ -44,14 +45,15 @@ ], "source": [ "import numpy as np\n", + "\n", "np.random.seed(0)\n", "\n", "import pandas as pd\n", - "import scipy\n", - "\n", "import pymc as pm\n", "import pytensor\n", "import pytensor.tensor as T\n", + "import scipy\n", + "\n", "floatX = pytensor.config.floatX\n", "\n", "%matplotlib inline" @@ -63,12 +65,11 @@ "metadata": {}, "outputs": [], "source": [ - "from tqdm import tqdm\n", - "\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", - "sns.set(context='talk', style='ticks',\n", - " color_codes=True, rc={'legend.frameon': False})" + "from tqdm import tqdm\n", + "\n", + "sns.set(context=\"talk\", style=\"ticks\", color_codes=True, rc={\"legend.frameon\": False})" ] }, { @@ -88,22 +89,26 @@ "\n", "model = cobra.Model()\n", "\n", - "model.add_metabolites([\n", - " cobra.Metabolite('2pg'),\n", - " cobra.Metabolite('pep'),\n", - " ])\n", + "model.add_metabolites(\n", + " [\n", + " cobra.Metabolite(\"2pg\"),\n", + " cobra.Metabolite(\"pep\"),\n", + " ]\n", + ")\n", "\n", - "model.add_reactions([\n", - " cobra.Reaction('PGM'),\n", - " cobra.Reaction('ENO'),\n", - " cobra.Reaction('PK'),\n", - " ])\n", + "model.add_reactions(\n", + " [\n", + " cobra.Reaction(\"PGM\"),\n", + " cobra.Reaction(\"ENO\"),\n", + " cobra.Reaction(\"PK\"),\n", + " ]\n", + ")\n", "\n", - "model.reactions.PGM.reaction = '<=> 2pg'\n", - "model.reactions.ENO.reaction = '2pg <=> pep'\n", - "model.reactions.PK.reaction = 'pep <=>'\n", + "model.reactions.PGM.reaction = \"<=> 2pg\"\n", + "model.reactions.ENO.reaction = \"2pg <=> pep\"\n", + "model.reactions.PK.reaction = \"pep <=>\"\n", "\n", - "N = cobra.util.create_stoichiometric_matrix(model, dtype='int')" + "N = cobra.util.create_stoichiometric_matrix(model, dtype=\"int\")" ] }, { @@ -224,7 +229,7 @@ } ], "source": [ - "giersch = pd.read_csv('data/giersch.csv').astype(float)\n", + "giersch = pd.read_csv(\"data/giersch.csv\").astype(float)\n", "n_exp = len(giersch)\n", "giersch.head()" ] @@ -242,10 +247,10 @@ "metadata": {}, "outputs": [], "source": [ - "e = giersch.loc[:, ['PGM', 'ENO', 'PK']]\n", - "y = giersch.loc[:, ['BPG', 'ADP']]\n", - "v = giersch.loc[:, ['Flux']]\n", - "x = giersch.loc[:, ['2PG', 'PEP']]\n", + "e = giersch.loc[:, [\"PGM\", \"ENO\", \"PK\"]]\n", + "y = giersch.loc[:, [\"BPG\", \"ADP\"]]\n", + "v = giersch.loc[:, [\"Flux\"]]\n", + "x = giersch.loc[:, [\"2PG\", \"PEP\"]]\n", "\n", "ref_ind = 1\n", "\n", @@ -295,6 +300,7 @@ "outputs": [], "source": [ "import emll\n", + "\n", "Ex = emll.create_elasticity_matrix(model)\n", "\n", "Ey = np.zeros((3, 2))\n", @@ -365,14 +371,14 @@ "s_dist = pm.Laplace.dist(mu=0, b=0.05)\n", "\n", "x_eval = np.linspace(-3, 3, 300)\n", - "plt.plot(x_eval, T.exp(pm.logp(normal_dist,x_eval)).eval(), lw=2.)\n", - "plt.plot(x_eval, T.exp(pm.logp(t_dist, x_eval)).eval(), lw=2.)\n", - "plt.plot(x_eval, T.exp(pm.logp(s_dist,x_eval)).eval(), lw=2.)\n", + "plt.plot(x_eval, T.exp(pm.logp(normal_dist, x_eval)).eval(), lw=2.0)\n", + "plt.plot(x_eval, T.exp(pm.logp(t_dist, x_eval)).eval(), lw=2.0)\n", + "plt.plot(x_eval, T.exp(pm.logp(s_dist, x_eval)).eval(), lw=2.0)\n", "\n", "plt.ylim([0, 2])\n", "\n", - "plt.xlabel('x')\n", - "plt.ylabel('Probability density')" + "plt.xlabel(\"x\")\n", + "plt.ylabel(\"Probability density\")" ] }, { @@ -451,11 +457,10 @@ ], "source": [ "with pm.Model() as pymc_model:\n", - " \n", " # Initialize elasticities\n", - " Ex_t = pm.Deterministic('Ex', initialize_elasticity(N, 'ex', b=0.05, sigma=1, alpha=5))\n", - " Ey_t = pm.Deterministic('Ey', initialize_elasticity(-Ey.T, 'ey', b=0.05, sigma=1, alpha=5))\n", - " \n", + " Ex_t = pm.Deterministic(\"Ex\", initialize_elasticity(N, \"ex\", b=0.05, sigma=1, alpha=5))\n", + " Ey_t = pm.Deterministic(\"Ey\", initialize_elasticity(-Ey.T, \"ey\", b=0.05, sigma=1, alpha=5))\n", + "\n", "with pymc_model:\n", " trace_prior = pm.sample_prior_predictive()" ] @@ -467,20 +472,20 @@ "outputs": [], "source": [ "with pymc_model:\n", - " \n", - " # Error priors. \n", - " v_err = pm.HalfNormal('v_error', sigma=0.05, initval=.1)\n", - " x_err = pm.HalfNormal('x_error', sigma=0.05, initval=.1)\n", + " # Error priors.\n", + " v_err = pm.HalfNormal(\"v_error\", sigma=0.05, initval=0.1)\n", + " x_err = pm.HalfNormal(\"x_error\", sigma=0.05, initval=0.1)\n", "\n", " # Calculate steady-state concentrations and fluxes from elasticities\n", " chi_ss, v_hat_ss = ll.steady_state_pytensor(Ex_t, Ey_t, en, yn)\n", "\n", " # Error distributions for observed steady-state concentrations and fluxes\n", - " chi_obs = pm.Normal('chi_obs', mu=chi_ss, sigma=x_err, observed=xn)\n", - " v_hat_obs = pm.Normal('v_hat_obs', mu=v_hat_ss[:, 0].squeeze(),\n", - " sigma=v_err, observed=vn.squeeze())\n", + " chi_obs = pm.Normal(\"chi_obs\", mu=chi_ss, sigma=x_err, observed=xn)\n", + " v_hat_obs = pm.Normal(\n", + " \"v_hat_obs\", mu=v_hat_ss[:, 0].squeeze(), sigma=v_err, observed=vn.squeeze()\n", + " )\n", "\n", - "#print(pymc_model.logpt.tag.test_value)" + "# print(pymc_model.logpt.tag.test_value)" ] }, { @@ -558,9 +563,16 @@ ], "source": [ "with pymc_model:\n", - " trace = pm.sample(500, random_seed=123, tune=1000,step=pm.NUTS(), n_init=1000, nuts_sampler_kwargs=dict(njobs=4))\n", - "\n", - " #trace = pm.sample(500, random_seed=123, tune=1000,\n", + " trace = pm.sample(\n", + " 500,\n", + " random_seed=123,\n", + " tune=1000,\n", + " step=pm.NUTS(),\n", + " n_init=1000,\n", + " nuts_sampler_kwargs=dict(njobs=4),\n", + " )\n", + "\n", + " # trace = pm.sample(500, random_seed=123, tune=1000,\n", " # init='nuts', n_init=1000)" ] }, @@ -596,8 +608,9 @@ "outputs": [], "source": [ "# Values for wu 'optimized elasticity' from the same reference state\n", - "e_wu = np.array([-0.55 , -1.30 , 1.53 , 0 , 0.76 , -3.10 ,\n", - " 1.86 , 0 , -0.25 , 0 , 0.54 , 0.70]).reshape((3, -1))\n", + "e_wu = np.array([-0.55, -1.30, 1.53, 0, 0.76, -3.10, 1.86, 0, -0.25, 0, 0.54, 0.70]).reshape(\n", + " (3, -1)\n", + ")\n", "\n", "ex_wu = e_wu[:, :2]\n", "ey_wu = e_wu[:, 2:]" @@ -628,8 +641,12 @@ } ], "source": [ - "tp = pm.plot_trace(trace, var_names=['Ex', 'Ey', 'v_error', 'x_error'], combined=True,\n", - " lines={'Ex': ex_wu.T.flatten(), 'Ey': ey_wu.T.flatten()})\n", + "tp = pm.plot_trace(\n", + " trace,\n", + " var_names=[\"Ex\", \"Ey\", \"v_error\", \"x_error\"],\n", + " combined=True,\n", + " lines={\"Ex\": ex_wu.T.flatten(), \"Ey\": ey_wu.T.flatten()},\n", + ")\n", "sns.despine()\n", "\n", "for ax in tp.flatten():\n", @@ -644,8 +661,8 @@ "metadata": {}, "outputs": [], "source": [ - "ex = trace.posterior['Ex'].to_numpy().reshape((2000, -1))\n", - "ey = trace.posterior['Ey'].to_numpy().reshape((2000, -1))\n", + "ex = trace.posterior[\"Ex\"].to_numpy().reshape((2000, -1))\n", + "ey = trace.posterior[\"Ey\"].to_numpy().reshape((2000, -1))\n", "e_all = np.hstack([ex, ey])" ] }, @@ -657,12 +674,20 @@ "source": [ "m_labels = [m.id for m in model.metabolites]\n", "r_labels = [r.id for r in model.reactions]\n", - "y_labels = ['BPG', 'ADP']\n", - "\n", - "ex_labels = np.array([['$\\epsilon_{' + '{0},{1}'.format(rlabel, mlabel) + '}$'\n", - " for mlabel in m_labels] for rlabel in r_labels]).flatten()\n", - "ey_labels = np.array([['$\\epsilon_{' + '{0},{1}'.format(rlabel, mlabel) + '}$'\n", - " for mlabel in y_labels] for rlabel in r_labels]).flatten()\n", + "y_labels = [\"BPG\", \"ADP\"]\n", + "\n", + "ex_labels = np.array(\n", + " [\n", + " [\"$\\epsilon_{\" + \"{0},{1}\".format(rlabel, mlabel) + \"}$\" for mlabel in m_labels]\n", + " for rlabel in r_labels\n", + " ]\n", + ").flatten()\n", + "ey_labels = np.array(\n", + " [\n", + " [\"$\\epsilon_{\" + \"{0},{1}\".format(rlabel, mlabel) + \"}$\" for mlabel in y_labels]\n", + " for rlabel in r_labels\n", + " ]\n", + ").flatten()\n", "\n", "e_labels = np.hstack((ex_labels, ey_labels))" ] @@ -694,19 +719,19 @@ } ], "source": [ - "fig = plt.figure(figsize=(6,3))\n", + "fig = plt.figure(figsize=(6, 3))\n", "\n", "ax = plt.subplot2grid((1, 4), (0, 0), colspan=3)\n", "\n", - "ax.plot(ex, '.', ms=4, alpha=.25, rasterized=True)\n", - "ax.axvspan(500, 1000, zorder=0, color='.85')\n", - "ax.axvspan(1500, 2000, zorder=0, color='.85')\n", + "ax.plot(ex, \".\", ms=4, alpha=0.25, rasterized=True)\n", + "ax.axvspan(500, 1000, zorder=0, color=\".85\")\n", + "ax.axvspan(1500, 2000, zorder=0, color=\".85\")\n", "ax.set_xlim([0, 2000])\n", "\n", "\n", "sns.despine(trim=True, offset=10, ax=ax)\n", - "ax.set_ylabel('Elasticity')\n", - "ax.set_xlabel('Sample')\n", + "ax.set_ylabel(\"Elasticity\")\n", + "ax.set_xlabel(\"Sample\")\n", "\n", "ax2 = plt.subplot2grid((1, 4), (0, 3), colspan=1, fig=fig, sharey=ax)\n", "ax2.xaxis.set_visible(False)\n", @@ -719,9 +744,9 @@ "\n", "plt.setp(ax2.get_yticklabels(), visible=False)\n", "plt.tight_layout()\n", - " \n", - "ax2.legend(loc='center left', fontsize=12, bbox_to_anchor=(1.2, .5), labelspacing=0)\n", - " \n", + "\n", + "ax2.legend(loc=\"center left\", fontsize=12, bbox_to_anchor=(1.2, 0.5), labelspacing=0)\n", + "\n", "# plt.savefig('wu_trace_subset.svg', dpi=200)" ] }, @@ -1121,7 +1146,7 @@ } ], "source": [ - "pm.hdi(trace.posterior['ey_kinetic_entries'])" + "pm.hdi(trace.posterior[\"ey_kinetic_entries\"])" ] }, { @@ -1522,7 +1547,7 @@ } ], "source": [ - "pm.hdi(trace.posterior['ey_capacity_entries'])" + "pm.hdi(trace.posterior[\"ey_capacity_entries\"])" ] }, { @@ -1571,11 +1596,10 @@ "for i in range(12):\n", " for j in range(12):\n", " if i is not j:\n", - " axmatrix[i,j].plot(e_all[:, i], e_all[:, j],\n", - " 'k.', alpha=0.1, ms=1, rasterized=True)\n", + " axmatrix[i, j].plot(e_all[:, i], e_all[:, j], \"k.\", alpha=0.1, ms=1, rasterized=True)\n", " else:\n", - " axmatrix[i,j].hist(e_all[:, i], bins=5, color='k', edgecolor='w', lw=1)\n", - " \n", + " axmatrix[i, j].hist(e_all[:, i], bins=5, color=\"k\", edgecolor=\"w\", lw=1)\n", + "\n", "_ = ax_all = fig.add_subplot(111, frameon=False)\n", "_ = ax_all.set_xticks(np.arange(12) + 0.5)\n", "_ = ax_all.set_xticklabels(e_labels, rotation=90)\n", @@ -1764,9 +1788,8 @@ "source": [ "with pymc_model:\n", " approx = pm.ADVI()\n", - " hist = approx.fit(n=25000, obj_optimizer=pm.adagrad_window(learning_rate=5E-3),\n", - " obj_n_mc=1)\n", - " \n", + " hist = approx.fit(n=25000, obj_optimizer=pm.adagrad_window(learning_rate=5e-3), obj_n_mc=1)\n", + "\n", "with pymc_model:\n", " trace_vi = hist.sample(1000)\n", " ppc_vi = pm.sample_posterior_predictive(trace_vi)" @@ -1796,17 +1819,16 @@ } ], "source": [ - "with sns.plotting_context('notebook', font_scale=1.2):\n", - "\n", - " fig = plt.figure(figsize=(5,4))\n", - " plt.plot(approx.hist + 30, '.', rasterized=True, ms=1)\n", - " plt.ylim([-1E1, 1E3])\n", + "with sns.plotting_context(\"notebook\", font_scale=1.2):\n", + " fig = plt.figure(figsize=(5, 4))\n", + " plt.plot(approx.hist + 30, \".\", rasterized=True, ms=1)\n", + " plt.ylim([-1e1, 1e3])\n", " plt.xlim([0, 25000])\n", " sns.despine(trim=True, offset=10)\n", "\n", - " plt.ylabel('-ELBO')\n", - " plt.xlabel('Iteration')\n", - " plt.title('in vitro ADVI convergence')\n", + " plt.ylabel(\"-ELBO\")\n", + " plt.xlabel(\"Iteration\")\n", + " plt.title(\"in vitro ADVI convergence\")\n", " plt.tight_layout()\n", " # plt.savefig('wu_elbo.svg', transparent=True, dpi=200)" ] @@ -1817,7 +1839,7 @@ "metadata": {}, "outputs": [], "source": [ - "x = giersch.loc[:, ['2PG', 'PEP']]" + "x = giersch.loc[:, [\"2PG\", \"PEP\"]]" ] }, { @@ -1837,57 +1859,73 @@ } ], "source": [ - "with sns.plotting_context('notebook', font_scale=1.2):\n", - "\n", - " fig, axmatrix = plt.subplots(ncols=3, figsize=(8,3.5))\n", + "with sns.plotting_context(\"notebook\", font_scale=1.2):\n", + " fig, axmatrix = plt.subplots(ncols=3, figsize=(8, 3.5))\n", "\n", " for ax in axmatrix.flatten():\n", - " ax.set_aspect('equal')\n", + " ax.set_aspect(\"equal\")\n", "\n", " def plot_hpd_err(truth, ppc, ax, color=None):\n", - " median = ppc.median(dim=['chain','draw'])\n", + " median = ppc.median(dim=[\"chain\", \"draw\"])\n", " hpd = pm.hdi(ppc)\n", - " err = np.abs(hpd - median).to_array()[0]#median[:, np.newaxis])\n", - "\n", - " return ax.errorbar(truth, median, yerr=err.T, ls='',\n", - " marker='.', elinewidth=1., color=color)\n", - "\n", - " plot_hpd_err(vn * v_star[0] - 2, ppc.posterior_predictive['v_hat_obs'] * v_star[0], axmatrix[0])\n", - " plot_hpd_err(x['2PG'].values - 3, x_star[0] * np.exp(ppc.posterior_predictive['chi_obs'][:,:,:,0]),\n", - " axmatrix[1])\n", - " l0 = plot_hpd_err(x['PEP'].values - 1.25, x_star[1] * np.exp(ppc.posterior_predictive['chi_obs'][:,:,:,1]),\n", - " axmatrix[2])\n", - "\n", - " plot_hpd_err(vn * v_star[0] + 2, ppc_vi.posterior_predictive['v_hat_obs'] * v_star[0], axmatrix[0], 'g')\n", - " plot_hpd_err(x['2PG'].values + 3, x_star[0] * np.exp(ppc_vi.posterior_predictive['chi_obs'][:,:,:,0]),\n", - " axmatrix[1], 'g')\n", - " l1 = plot_hpd_err(x['PEP'].values + 1.25, x_star[1] * np.exp(ppc_vi.posterior_predictive['chi_obs'][:,:,:,1]),\n", - " axmatrix[2], 'g')\n", - "\n", - " axmatrix[0].set_title('Flux')\n", - " axmatrix[1].set_title('2PG')\n", - " axmatrix[2].set_title('PEP')\n", + " err = np.abs(hpd - median).to_array()[0] # median[:, np.newaxis])\n", + "\n", + " return ax.errorbar(\n", + " truth, median, yerr=err.T, ls=\"\", marker=\".\", elinewidth=1.0, color=color\n", + " )\n", + "\n", + " plot_hpd_err(vn * v_star[0] - 2, ppc.posterior_predictive[\"v_hat_obs\"] * v_star[0], axmatrix[0])\n", + " plot_hpd_err(\n", + " x[\"2PG\"].values - 3,\n", + " x_star[0] * np.exp(ppc.posterior_predictive[\"chi_obs\"][:, :, :, 0]),\n", + " axmatrix[1],\n", + " )\n", + " l0 = plot_hpd_err(\n", + " x[\"PEP\"].values - 1.25,\n", + " x_star[1] * np.exp(ppc.posterior_predictive[\"chi_obs\"][:, :, :, 1]),\n", + " axmatrix[2],\n", + " )\n", + "\n", + " plot_hpd_err(\n", + " vn * v_star[0] + 2, ppc_vi.posterior_predictive[\"v_hat_obs\"] * v_star[0], axmatrix[0], \"g\"\n", + " )\n", + " plot_hpd_err(\n", + " x[\"2PG\"].values + 3,\n", + " x_star[0] * np.exp(ppc_vi.posterior_predictive[\"chi_obs\"][:, :, :, 0]),\n", + " axmatrix[1],\n", + " \"g\",\n", + " )\n", + " l1 = plot_hpd_err(\n", + " x[\"PEP\"].values + 1.25,\n", + " x_star[1] * np.exp(ppc_vi.posterior_predictive[\"chi_obs\"][:, :, :, 1]),\n", + " axmatrix[2],\n", + " \"g\",\n", + " )\n", + "\n", + " axmatrix[0].set_title(\"Flux\")\n", + " axmatrix[1].set_title(\"2PG\")\n", + " axmatrix[2].set_title(\"PEP\")\n", " # axmatrix[3].set_title('Enzymes')\n", "\n", - " axmatrix[1].set_xlabel('Measured')\n", - " axmatrix[0].set_ylabel('Predicted')\n", + " axmatrix[1].set_xlabel(\"Measured\")\n", + " axmatrix[0].set_ylabel(\"Predicted\")\n", "\n", " fig.tight_layout()\n", "\n", " ax0 = [75, 175]\n", " ax2 = [40, 125]\n", - " \n", + "\n", " axmatrix[0].set_xlim(*ax0)\n", " axmatrix[0].set_ylim(ax0)\n", - " axmatrix[0].plot(ax0, ax0, '--', color='.7', zorder=0)\n", + " axmatrix[0].plot(ax0, ax0, \"--\", color=\".7\", zorder=0)\n", "\n", " axmatrix[1].set_xlim([0, 300])\n", " axmatrix[1].set_ylim([0, 300])\n", - " axmatrix[1].plot([0, 300], [0, 300], '--', color='.7', zorder=0)\n", + " axmatrix[1].plot([0, 300], [0, 300], \"--\", color=\".7\", zorder=0)\n", "\n", " axmatrix[2].set_xlim(*ax2)\n", " axmatrix[2].set_ylim(*ax2)\n", - " axmatrix[2].plot(ax2, ax2, '--', color='.7', zorder=0)\n", + " axmatrix[2].plot(ax2, ax2, \"--\", color=\".7\", zorder=0)\n", "\n", " # axmatrix[3].plot([0, 60], [0, 60], '--', color='.7', zorder=0)\n", "\n", @@ -1900,8 +1938,7 @@ " axmatrix[2].set_yticks(np.arange(50, 150, 25))\n", " axmatrix[2].set_xticks(np.arange(50, 150, 25))\n", "\n", - " leg = plt.legend([l0, l1], ['NUTS', 'ADVI'],\n", - " loc='lower right', borderpad=0, borderaxespad=0)\n", + " leg = plt.legend([l0, l1], [\"NUTS\", \"ADVI\"], loc=\"lower right\", borderpad=0, borderaxespad=0)\n", "\n", " sns.despine(trim=False, offset=10)\n", " # fig.savefig('ppc_predictions.svg')" @@ -1913,8 +1950,8 @@ "metadata": {}, "outputs": [], "source": [ - "ex = trace.posterior['Ex'].to_numpy().reshape((2000, -1))\n", - "ey = trace.posterior['Ey'].to_numpy().reshape((2000, -1))\n", + "ex = trace.posterior[\"Ex\"].to_numpy().reshape((2000, -1))\n", + "ey = trace.posterior[\"Ey\"].to_numpy().reshape((2000, -1))\n", "e_all = np.hstack([ex, ey])" ] }, @@ -2047,9 +2084,13 @@ "metadata": {}, "outputs": [], "source": [ - "fcc = np.array([ll.flux_control_coefficient(Ex=ex) for ex in trace.posterior['Ex'][0].to_numpy()])\n", - "fcc_mb = np.array([ll.flux_control_coefficient(Ex=ex) for ex in trace_vi.posterior['Ex'][0].to_numpy()])\n", - "fcc_prior = np.array([ll.flux_control_coefficient(Ex=ex) for ex in trace_prior.prior['Ex'][0].to_numpy()])" + "fcc = np.array([ll.flux_control_coefficient(Ex=ex) for ex in trace.posterior[\"Ex\"][0].to_numpy()])\n", + "fcc_mb = np.array(\n", + " [ll.flux_control_coefficient(Ex=ex) for ex in trace_vi.posterior[\"Ex\"][0].to_numpy()]\n", + ")\n", + "fcc_prior = np.array(\n", + " [ll.flux_control_coefficient(Ex=ex) for ex in trace_prior.prior[\"Ex\"][0].to_numpy()]\n", + ")" ] }, { @@ -2061,7 +2102,7 @@ "# Control coefficients estimated from elasticities\n", "# Wu table 7\n", "\n", - "wu_ccs = np.array([-0.34 , 0.14 , 1.20])" + "wu_ccs = np.array([-0.34, 0.14, 1.20])" ] }, { @@ -2070,23 +2111,26 @@ "metadata": {}, "outputs": [], "source": [ - "df1 = pd.DataFrame(fcc[:, 0], columns=[r.id for r in model.reactions]\n", - " ).stack().reset_index(level=1)\n", - "df2 = pd.DataFrame(fcc_mb[:, 0], columns=[r.id for r in model.reactions]\n", - " ).stack().reset_index(level=1)\n", - "df3 = pd.DataFrame(fcc_prior[:, 0], columns=[r.id for r in model.reactions]\n", - " ).stack().reset_index(level=1)\n", + "df1 = pd.DataFrame(fcc[:, 0], columns=[r.id for r in model.reactions]).stack().reset_index(level=1)\n", + "df2 = (\n", + " pd.DataFrame(fcc_mb[:, 0], columns=[r.id for r in model.reactions]).stack().reset_index(level=1)\n", + ")\n", + "df3 = (\n", + " pd.DataFrame(fcc_prior[:, 0], columns=[r.id for r in model.reactions])\n", + " .stack()\n", + " .reset_index(level=1)\n", + ")\n", "\n", - "df1['type'] = 'NUTS'\n", - "df2['type'] = 'ADVI'\n", - "df3['type'] = 'Prior'\n", + "df1[\"type\"] = \"NUTS\"\n", + "df2[\"type\"] = \"ADVI\"\n", + "df3[\"type\"] = \"Prior\"\n", "\n", "\n", "fcc_df = pd.concat([df1, df2, df3])\n", - "fcc_df.columns = ['Reaction', 'FCC', 'Type']\n", + "fcc_df.columns = [\"Reaction\", \"FCC\", \"Type\"]\n", "\n", - "fcc_df.loc[fcc_df.FCC < -.5, 'FCC'] = np.nan\n", - "fcc_df.loc[fcc_df.FCC > 1.5, 'FCC'] = np.nan" + "fcc_df.loc[fcc_df.FCC < -0.5, \"FCC\"] = np.nan\n", + "fcc_df.loc[fcc_df.FCC > 1.5, \"FCC\"] = np.nan" ] }, { @@ -2126,8 +2170,8 @@ } ], "source": [ - "import seaborn as sns\n", "import matplotlib.pyplot as plt\n", + "import seaborn as sns\n", "\n", "# Reset index to avoid duplicate labels\n", "fcc_df_reset = fcc_df.reset_index()\n", @@ -2141,24 +2185,38 @@ "my_pal = {\"Prior\": \".8\", \"NUTS\": \"g\", \"ADVI\": \"b\"}\n", "\n", "# Filter data for specific type 'Prior'\n", - "data_prior = fcc_df_reset[fcc_df_reset.Type == 'Prior']\n", + "data_prior = fcc_df_reset[fcc_df_reset.Type == \"Prior\"]\n", "\n", "# Create violin plot for 'Prior' type on ax\n", "sns.violinplot(\n", - " x='Reaction', y='FCC', hue='Type', data=data_prior,\n", - " density_norm='width', width=0.5, palette=my_pal, saturation=1., alpha=0.1, ax=ax\n", + " x=\"Reaction\",\n", + " y=\"FCC\",\n", + " hue=\"Type\",\n", + " data=data_prior,\n", + " density_norm=\"width\",\n", + " width=0.5,\n", + " palette=my_pal,\n", + " saturation=1.0,\n", + " alpha=0.1,\n", + " ax=ax,\n", ")\n", "\n", "# Create violin plot for all types ('NUTS' and 'ADVI') on ax2 (twin axes)\n", "sns.violinplot(\n", - " x='Reaction', y='FCC', hue='Type', data=fcc_df_reset,\n", - " density_norm='width', width=0.8, hue_order=['NUTS', 'ADVI'],\n", - " palette=my_pal, ax=ax2\n", + " x=\"Reaction\",\n", + " y=\"FCC\",\n", + " hue=\"Type\",\n", + " data=fcc_df_reset,\n", + " density_norm=\"width\",\n", + " width=0.8,\n", + " hue_order=[\"NUTS\", \"ADVI\"],\n", + " palette=my_pal,\n", + " ax=ax2,\n", ")\n", "\n", "# Additional plot elements or customizations\n", "for i, cc in enumerate(wu_ccs):\n", - " ax2.plot([i - .4, i + .4], [cc, cc], '-', color=sns.color_palette('muted')[3])\n", + " ax2.plot([i - 0.4, i + 0.4], [cc, cc], \"-\", color=sns.color_palette(\"muted\")[3])\n", "\n", "# Get legend handles and labels for both axes\n", "phandles, plabels = ax.get_legend_handles_labels()\n", @@ -2169,19 +2227,24 @@ "ax2.legend().remove()\n", "\n", "# Combine legends for both axes and Wu 2004 into a single legend\n", - "legend = ax2.legend(phandles + handles + [plt.Line2D([0], [0], color=sns.color_palette('muted')[3])],\n", - " plabels + labels + ['Wu 2004'], loc='upper center', ncol=4, fontsize=13)\n", + "legend = ax2.legend(\n", + " phandles + handles + [plt.Line2D([0], [0], color=sns.color_palette(\"muted\")[3])],\n", + " plabels + labels + [\"Wu 2004\"],\n", + " loc=\"upper center\",\n", + " ncol=4,\n", + " fontsize=13,\n", + ")\n", "\n", "# Set y-axis limit for ax\n", "ax.set_ylim([-1, 2])\n", "\n", "# Add horizontal line at y=0 to ax\n", - "ax.axhline(0, ls='--', color='.7', zorder=0)\n", + "ax.axhline(0, ls=\"--\", color=\".7\", zorder=0)\n", "\n", "# Remove spines for aesthetics\n", "sns.despine(trim=True)\n", "# Save the figure\n", - "#fig.savefig('wu_fccs.svg', transparent=True)\n" + "# fig.savefig('wu_fccs.svg', transparent=True)" ] }, { @@ -2283,8 +2346,8 @@ } ], "source": [ - "ex_vi = trace_vi.posterior['Ex'].to_numpy().reshape((len(trace_vi.posterior['Ex'][0]), -1))\n", - "ey_vi = trace_vi.posterior['Ey'].to_numpy().reshape((len(trace_vi.posterior['Ey'][0]), -1))\n", + "ex_vi = trace_vi.posterior[\"Ex\"].to_numpy().reshape((len(trace_vi.posterior[\"Ex\"][0]), -1))\n", + "ey_vi = trace_vi.posterior[\"Ey\"].to_numpy().reshape((len(trace_vi.posterior[\"Ey\"][0]), -1))\n", "e_all_vi = np.hstack([ex_vi, ey_vi])\n", "\n", "e_df_vi = pd.DataFrame(e_all_vi, columns=e_labels)\n", @@ -2321,42 +2384,52 @@ "source": [ "import arviz as az\n", "import matplotlib.pyplot as plt\n", - "import seaborn as sns\n", "import numpy as np\n", + "import seaborn as sns\n", "\n", "# Assuming you have the data arrays ex, ex_vi, ey, ey_vi\n", "# Modify as needed based on your data setup\n", "\n", - "colors = sns.color_palette('husl', n_colors=12) # Assuming a total of 12 colors needed\n", + "colors = sns.color_palette(\"husl\", n_colors=12) # Assuming a total of 12 colors needed\n", "\n", "fig, axmatrix = plt.subplots(nrows=2, sharex=False, figsize=(6, 5))\n", "\n", "for i, color in enumerate(colors[:6]):\n", - " az.plot_kde(np.array(ex[:, i]), plot_kwargs={'color': color}, fill_kwargs={'color': color, 'alpha': 0.1}, ax=axmatrix[0])\n", - " az.plot_kde(np.array(ex_vi[:, i]), plot_kwargs={'color': color}, ax=axmatrix[0])\n", + " az.plot_kde(\n", + " np.array(ex[:, i]),\n", + " plot_kwargs={\"color\": color},\n", + " fill_kwargs={\"color\": color, \"alpha\": 0.1},\n", + " ax=axmatrix[0],\n", + " )\n", + " az.plot_kde(np.array(ex_vi[:, i]), plot_kwargs={\"color\": color}, ax=axmatrix[0])\n", " lines = axmatrix[0].get_lines() # Get the lines from the plot\n", - " lines[-1].set_linestyle('--') # Set linestyle for the last line\n", + " lines[-1].set_linestyle(\"--\") # Set linestyle for the last line\n", "\n", "for i, color in enumerate(colors[6:]):\n", - " az.plot_kde(np.array(ey[:, i]), plot_kwargs={'color': color}, fill_kwargs={'color': color, 'alpha': 0.1}, ax=axmatrix[1])\n", - " az.plot_kde(np.array(ey_vi[:, i]), plot_kwargs={'color': color}, ax=axmatrix[1])\n", + " az.plot_kde(\n", + " np.array(ey[:, i]),\n", + " plot_kwargs={\"color\": color},\n", + " fill_kwargs={\"color\": color, \"alpha\": 0.1},\n", + " ax=axmatrix[1],\n", + " )\n", + " az.plot_kde(np.array(ey_vi[:, i]), plot_kwargs={\"color\": color}, ax=axmatrix[1])\n", " lines = axmatrix[1].get_lines() # Get the lines from the plot\n", - " lines[-1].set_linestyle('--') # Set linestyle for the last line\n", + " lines[-1].set_linestyle(\"--\") # Set linestyle for the last line\n", "\n", - "hmc_line = plt.Line2D([], [], color='.6', ls='-', label='NUTS') # Create Line2D for legend\n", - "vi_line = plt.Line2D([], [], color='.6', ls='--', label='ADVI') # Create Line2D for legend\n", + "hmc_line = plt.Line2D([], [], color=\".6\", ls=\"-\", label=\"NUTS\") # Create Line2D for legend\n", + "vi_line = plt.Line2D([], [], color=\".6\", ls=\"--\", label=\"ADVI\") # Create Line2D for legend\n", "\n", - "axmatrix[0].set_ylabel('Frequency')\n", - "axmatrix[1].set_ylabel('Frequency')\n", + "axmatrix[0].set_ylabel(\"Frequency\")\n", + "axmatrix[1].set_ylabel(\"Frequency\")\n", "\n", - "axmatrix[0].set_title('$\\epsilon_x^*$')\n", - "axmatrix[1].set_title('$\\epsilon_y^*$')\n", + "axmatrix[0].set_title(\"$\\epsilon_x^*$\")\n", + "axmatrix[1].set_title(\"$\\epsilon_y^*$\")\n", "\n", - "axmatrix[0].legend(handles=[hmc_line, vi_line], loc='upper left')\n", + "axmatrix[0].legend(handles=[hmc_line, vi_line], loc=\"upper left\")\n", "plt.tight_layout()\n", "sns.despine(trim=True)\n", "plt.show() # Display the plot\n", - "# fig.savefig('advi_nuts_elasticity.svg', transparent=True)\n" + "# fig.savefig('advi_nuts_elasticity.svg', transparent=True)" ] }, { diff --git a/pyproject.toml b/pyproject.toml index d95d496..35ddaba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,4 +11,77 @@ profile = "black" multi_line_output = 3 line_length = 100 include_trailing_comma = true -reverse_relative = true \ No newline at end of file +reverse_relative = true + +[tool.ruff] +line-length = 100 +select = [ + # mccabe + "C90", + # flake-8 comprehensions + "C4", + # pycodestyle + "E", # Error + "W", # Warning + # PyFlakes + "F", + # flake8-bugbear + "B", + # flake8-simplify + "SIM", + # isort + "I", + # type checking + "TCH", + # flake8-print + "T2", + # pep8-naming + "N", + # pydocstyle + "D", + # flake8-bandit + "S", + +] +show-fixes = true +exclude = [ + ".tox", + ".git", + "__pycache__", + "build", + "dist", + "tests/fixtures/*", + "*.pyc", + "*.egg-info", + ".cache", + ".eggs", + "data", +] + +[tool.ruff.lint] +# 1. Enable flake8-bugbear (`B`) rules, in addition to the defaults. +# select = ["E4", "E7", "E9", "F", "B"] + +# 2. Avoid enforcing line-length violations (`E501`) +ignore = ["E501","D203", "D213"] + +# 3. Avoid trying to fix flake8-bugbear (`B`) violations. +unfixable = ["B"] + +# 4. Ignore `E402` (import violations) in all `__init__.py` files, and in select subdirectories. +[tool.ruff.lint.per-file-ignores] +"__init__.py" = ["E402"] +"**/{tests,docs,tools}/*" = ["E402"] + +[tool.ruff.format] +# Like Black, use double quotes for non-triple-quoted strings. +quote-style = "double" +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false +# Like Black, automatically detect the appropriate line ending. +line-ending = "auto" + +[tool.ruff.mccabe] +max-complexity = 20 diff --git a/src/emll/__init__.py b/src/emll/__init__.py index 8fdfddb..179dd4a 100644 --- a/src/emll/__init__.py +++ b/src/emll/__init__.py @@ -1,4 +1,3 @@ +import emll.test_models from emll.linlog_model import * from emll.util import create_elasticity_matrix, create_Ey_matrix - -import emll.test_models diff --git a/src/emll/linlog_model.py b/src/emll/linlog_model.py index 556a67c..7dd3b87 100644 --- a/src/emll/linlog_model.py +++ b/src/emll/linlog_model.py @@ -1,171 +1,178 @@ -""" Model Definition File for LinLog algorithm """ +"""Model Definition File for LinLog algorithm.""" + import warnings import numpy as np -import scipy as sp - import pytensor import pytensor.tensor as at import pytensor.tensor.slinalg +import scipy as sp -from emll.pytensor_utils import RegularizedSolve, LeastSquaresSolve, lstsq_wrapper +from emll.pytensor_utils import LeastSquaresSolve, RegularizedSolve, lstsq_wrapper from emll.util import compute_smallbone_reduction, compute_waldherr_reduction - -_floatX = pytensor.config.floatX +_floatx = pytensor.config.floatX class LinLogBase: - def __init__(self, N, Ex, Ey, v_star, reduction_method="smallbone"): - """A class to perform the linear algebra underlying the - decomposition method. - + def __init__(self, stoich, epsilon_x, epsilon_y, v_star, reduction_method="smallbone"): + """Perform linear algebra underlying decomposition methods. Parameters ---------- - N : np.array - The full stoichiometric matrix of the considered model. Must be of - dimensions MxN - Ex : np.array - An NxM array of the elasticity coefficients for the given linlog - model. - Ey : np.array - An NxP array of the elasticity coefficients for the external - species. - v_star : np.array - A length M vector specifying the original steady-state flux - solution of the model. - lam : float - The λ value to use for tikhonov regularization - reduction_method : 'waldherr', 'smallbone', or None - Type of stoichiometric decomposition to perform (default - 'smallbone') - - + stoich (numpy.ndarray): The stoichiometric matrix of the model. + epsilon_x (numpy.ndarray): The elasticity coefficients for the internal species. + epsilon_y (numpy.ndarray): The elasticity coefficients for the external species. + v_star (numpy.ndarray): The reference steady-state flux. + lam (float): The Tikhonov regularization parameter. + reduction_method (str): The reduction method to use ('waldherr' or 'smallbone'). + + Returns + ------- + None """ - self.nm, self.nr = N.shape - self.ny = Ey.shape[1] + self.n_m, self.n_r = stoich.shape + self.n_y = epsilon_y.shape[1] - self.N = N + self.stoich = stoich if reduction_method == "smallbone": - self.Nr, self.L, _ = compute_smallbone_reduction(N, Ex, v_star) + self.stoich_reduced, self.link_matrix, _ = compute_smallbone_reduction( + stoich, epsilon_x, v_star + ) elif reduction_method == "waldherr": - self.Nr, _, _ = compute_waldherr_reduction(N) + self.stoich_reduced, _, _ = compute_waldherr_reduction(stoich) elif reduction_method is None: - self.Nr = N + self.stoich_reduced = stoich - self.Ex = Ex - self.Ey = Ey + self.epsilon_x = epsilon_x + self.epsilon_y = epsilon_y - assert np.all(v_star >= 0), "reference fluxes should be nonnegative" + if np.all(v_star >= 0): + raise ValueError("reference fluxes should be nonnegative") if np.any(np.isclose(v_star, 0)): - warnings.warn("v_star contains zero entries, this will cause problems") + warnings.warn("v_star contains zero entries, this will cause problems", stacklevel=2) self.v_star = v_star - assert Ex.shape == (self.nr, self.nm), "Ex is the wrong shape" - assert Ey.shape == (self.nr, self.ny), "Ey is the wrong shape" - assert len(v_star) == self.nr, "v_star is the wrong length" - assert np.allclose(self.Nr @ v_star, 0), "reference not steady state" + if epsilon_x.shape != (self.n_r, self.n_m): + raise ValueError("epsilon_x is the wrong shape") + if epsilon_y.shape != (self.n_r, self.n_y): + raise ValueError("epsilon_y is the wrong shape") + if len(v_star) != self.n_r: + raise ValueError("v_star is the wrong length") + if not np.allclose(self.stoich_reduced @ v_star, 0): + raise ValueError("reference not steady state") - def _generate_default_inputs(self, Ex=None, Ey=None, en=None, yn=None): + def _generate_default_inputs(self, epsilon_x=None, epsilon_y=None, e_n=None, y_n=None): """Create matricies representing no perturbation is input is None.""" - if Ex is None: - Ex = self.Ex + if epsilon_x is None: + epsilon_x = self.epsilon_x - if Ey is None: - Ey = self.Ey + if epsilon_y is None: + epsilon_y = self.epsilon_y - if en is None: - en = np.ones(self.nr) + if e_n is None: + e_n = np.ones(self.n_r) - if yn is None: - yn = np.zeros(self.ny) + if y_n is None: + y_n = np.zeros(self.n_y) - return Ex, Ey, en, yn + return epsilon_x, epsilon_y, e_n, y_n def steady_state_mat( self, - Ex=None, - Ey=None, - en=None, - yn=None, + epsilon_x=None, + epsilon_y=None, + e_n=None, + y_n=None, ): - """Calculate a the steady-state transformed metabolite concentrations + """Calcualte steady-state matrix. + + Calculate a the steady-state transformed metabolite concentrations and fluxes using a matrix solve method. - en: np.ndarray + Parameters + ---------- + e_n: np.ndarray a NR vector of perturbed normalized enzyme activities - yn: np.ndarray + y_n: np.ndarray a NY vector of normalized external metabolite concentrations - Ex, Ey: optional replacement elasticity matrices + epsilon_x, epsilon_y: optional replacement elasticity matrices + + Returns + ------- + x_n: the steady state concentration values + v_n: the steady state flux values """ - Ex, Ey, en, yn = self._generate_default_inputs(Ex, Ey, en, yn) + epsilon_x, epsilon_y, e_n, y_n = self._generate_default_inputs( + epsilon_x, epsilon_y, e_n, y_n + ) # Calculate steady-state concentrations using linear solve. - N_hat = self.Nr @ np.diag(self.v_star * en) - A = N_hat @ Ex - b = -N_hat @ (np.ones(self.nr) + Ey @ yn) - xn = self.solve(A, b) + n_hat = self.stoich_reduced @ np.diag(self.v_star * e_n) + a_matrix = n_hat @ epsilon_x + b_matrix = -n_hat @ (np.ones(self.n_r) + epsilon_y @ y_n) + x_n = self.solve(a_matrix, b_matrix) # Plug concentrations into the flux equation. - vn = en * (np.ones(self.nr) + Ex @ xn + Ey @ yn) + v_n = e_n * (np.ones(self.n_r) + epsilon_x @ x_n + epsilon_y @ y_n) - return xn, vn + return x_n, v_n - def steady_state_pytensor(self, Ex, Ey=None, en=None, yn=None, method="scan"): + def steady_state_pytensor(self, epsilon_x, epsilon_y=None, e_n=None, y_n=None, method="scan"): """Calculate a the steady-state transformed metabolite concentrations and fluxes using PyTensor. - Ex, Ey, en and yn should be pytensor matrices + epsilon_x, epsilon_y, e_n and y_n should be pytensor matrices solver: function A function to solve Ax = b for a (possibly) singular A. Should - accept pytensor matrices A and b, and return a symbolic x. + accept pytensor matrices a_matrix and b_matrix, and return a symbolic x. """ + if epsilon_y is None: + epsilon_y = at.as_tensor_variable(epsilon_y) - if Ey is None: - Ey = at.as_tensor_variable(Ey) - - if isinstance(en, np.ndarray): - en = np.atleast_2d(en) - n_exp = en.shape[0] + if isinstance(e_n, np.ndarray): + e_n = np.atleast_2d(e_n) + n_exp = e_n.shape[0] else: - n_exp = en.shape.eval()[0] + n_exp = e_n.shape.eval()[0] - if isinstance(yn, np.ndarray): - yn = np.atleast_2d(yn) + if isinstance(y_n, np.ndarray): + y_n = np.atleast_2d(y_n) - en = at.as_tensor_variable(en) - yn = at.as_tensor_variable(yn) + e_n = at.as_tensor_variable(e_n) + y_n = at.as_tensor_variable(y_n) - e_diag = en.dimshuffle(0, 1, "x") * np.diag(self.v_star) - N_rep = self.Nr.reshape((-1, *self.Nr.shape)).repeat(n_exp, axis=0) - N_hat = at.batched_dot(N_rep, e_diag) + e_diag = e_n.dimshuffle(0, 1, "x") * np.diag(self.v_star) + n_rep = self.stoich_reduced.reshape((-1, *self.stoich_reduced.shape)).repeat(n_exp, axis=0) + n_hat = at.batched_dot(n_rep, e_diag) - inner_v = Ey.dot(yn.T).T + np.ones(self.nr, dtype=_floatX) - As = at.dot(N_hat, Ex) + inner_v = epsilon_y.dot(y_n.T).T + np.ones(self.n_r, dtype=_floatx) + a_matrix_s = at.dot(n_hat, epsilon_x) - bs = at.batched_dot(-N_hat, inner_v.dimshuffle(0, 1, "x")) + b_matrix_s = at.batched_dot(-n_hat, inner_v.dimshuffle(0, 1, "x")) if method == "scan": - xn, _ = pytensor.scan( - lambda A, b: self.solve_pytensor(A, b), sequences=[As, bs], strict=True + x_n, _ = pytensor.scan( + lambda a_matrix, b_matrix: self.solve_pytensor(a_matrix, b_matrix), + sequences=[a_matrix_s, b_matrix_s], + strict=True, ) else: - xn_list = [None] * n_exp + x_n_list = [None] * n_exp for i in range(n_exp): - xn_list[i] = self.solve_pytensor(As[i], bs[i]) - xn = at.stack(xn_list) + x_n_list[i] = self.solve_pytensor(a_matrix_s[i], b_matrix_s[i]) + x_n = at.stack(x_n_list) - vn = en * (np.ones(self.nr) + at.dot(Ex, xn.T).T + at.dot(Ey, yn.T).T) + v_n = e_n * (np.ones(self.n_r) + at.dot(epsilon_x, x_n.T).T + at.dot(epsilon_y, y_n.T).T) - return xn, vn + return x_n, v_n - def metabolite_control_coefficient(self, Ex=None, Ey=None, en=None, yn=None): + def metabolite_control_coefficient(self, epsilon_x=None, epsilon_y=None, e_n=None, y_n=None): """Calculate the metabolite control coefficient matrix at the desired perturbed state. @@ -173,177 +180,201 @@ def metabolite_control_coefficient(self, Ex=None, Ey=None, en=None, yn=None): link matrix), so maybe don't trust MCC's all that much. FCC's agree though. """ + epsilon_x, epsilon_y, e_n, y_n = self._generate_default_inputs( + epsilon_x, epsilon_y, e_n, y_n + ) - Ex, Ey, en, yn = self._generate_default_inputs(Ex, Ey, en, yn) - - xn, vn = self.steady_state_mat(Ex, Ey, en, yn) + x_n, v_n = self.steady_state_mat(epsilon_x, epsilon_y, e_n, y_n) # Calculate the elasticity matrix at the new steady-state - Ex_ss = np.diag(en / vn) @ Ex + epsilon_x_ss = np.diag(e_n / v_n) @ epsilon_x - Cx = -self.solve( - self.Nr @ np.diag(vn * self.v_star) @ Ex_ss, - self.Nr @ np.diag(vn * self.v_star), + c_x = -self.solve( + self.stoich_reduced @ np.diag(v_n * self.v_star) @ epsilon_x_ss, + self.stoich_reduced @ np.diag(v_n * self.v_star), ) - return Cx + return c_x - def flux_control_coefficient(self, Ex=None, Ey=None, en=None, yn=None): + def flux_control_coefficient(self, epsilon_x=None, epsilon_y=None, e_n=None, y_n=None): """Calculate the metabolite control coefficient matrix at the desired - perturbed state""" - - Ex, Ey, en, yn = self._generate_default_inputs(Ex, Ey, en, yn) + perturbed state + """ + epsilon_x, epsilon_y, e_n, y_n = self._generate_default_inputs( + epsilon_x, epsilon_y, e_n, y_n + ) - xn, vn = self.steady_state_mat(Ex, Ey, en, yn) + x_n, v_n = self.steady_state_mat(epsilon_x, epsilon_y, e_n, y_n) # Calculate the elasticity matrix at the new steady-state - Ex_ss = np.diag(en / vn) @ Ex + epsilon_x_ss = np.diag(e_n / v_n) @ epsilon_x - Cx = self.metabolite_control_coefficient(Ex, Ey, en, yn) - Cv = np.eye(self.nr) + Ex_ss @ Cx + c_x = self.metabolite_control_coefficient(epsilon_x, epsilon_y, e_n, y_n) + c_v = np.eye(self.n_r) + epsilon_x_ss @ c_x - return Cv + return c_v class LinLogSymbolic2x2(LinLogBase): """Class for handling special case of a 2x2 full rank A matrix""" - def solve(self, A, bi): - a = A[0, 0] - b = A[0, 1] - c = A[1, 0] - d = A[1, 1] + def solve(self, a_matrix, b_matrix_i): + """Solves the linear system Ax = b for a 2x2 matrix A and a 2x1 vector b. - A_inv = np.array([[d, -b], [-c, a]]) / (a * d - b * c) - return A_inv @ bi + Args: + ---- + a_matrix (numpy.ndarray): The 2x2 matrix A. + b_matrix_i (numpy.ndarray): The 2x1 vector b. - def solve_pytensor(self, A, bi): - a = A[0, 0] - b = A[0, 1] - c = A[1, 0] - d = A[1, 1] + Returns: + ------- + numpy.ndarray: The 2x1 solution vector x. + """ + a = a_matrix[0, 0] + b = a_matrix[0, 1] + c = a_matrix[1, 0] + d = a_matrix[1, 1] + + a_matrix_inv = np.array([[d, -b], [-c, a]]) / (a * d - b * c) + return a_matrix_inv @ b_matrix_i - A_inv = at.stacklists([[d, -b], [-c, a]]) / (a * d - b * c) - return at.dot(A_inv, bi).squeeze() + def solve_pytensor(self, a_matrix, b_matrix_i): + """Solves the linear system Ax = b for a 2x2 matrix A and a 2x1 vector b using PyTensor. + + Args: + ---- + a_matrix (pytensor.Tensor): The 2x2 matrix A. + b_matrix_i (pytensor.Tensor): The 2x1 vector b. + + Returns: + ------- + pytensor.Tensor: The 2x1 solution vector x. + """ + a = a_matrix[0, 0] + b = a_matrix[0, 1] + c = a_matrix[1, 0] + d = a_matrix[1, 1] + + a_matrix_inv = at.stacklists([[d, -b], [-c, a]]) / (a * d - b * c) + return at.dot(a_matrix_inv, b_matrix_i).squeeze() class LinLogLinkMatrix(LinLogBase): - def solve(self, A, b): - A_linked = A @ self.L - z = sp.linalg.solve(A_linked, b) - return self.L @ z + def solve(self, a_matrix, b_matrix): + a_matrix_linked = a_matrix @ self.link_matrix + z = sp.linalg.solve(a_matrix_linked, b_matrix) + return self.link_matrix @ z - def solve_pytensor(self, A, b): - A_linked = at.dot(A, self.L) - z = pytensor.tensor.slinalg.solve(A_linked, b).squeeze() - return at.dot(self.L, z) + def solve_pytensor(self, a_matrix, b_matrix): + a_matrix_linked = at.dot(a_matrix, self.link_matrix) + z = pytensor.tensor.slinalg.solve(a_matrix_linked, b_matrix).squeeze() + return at.dot(self.link_matrix, z) class LinLogLeastNorm(LinLogBase): """Uses dgels to solve for the least-norm solution to the linear equation""" - def __init__(self, N, Ex, Ey, v_star, driver="gelsy", **kwargs): + def __init__(self, stoich, epsilon_x, epsilon_y, v_star, driver="gelsy", **kwargs): self.driver = driver - LinLogBase.__init__(self, N, Ex, Ey, v_star, **kwargs) + LinLogBase.__init__(self, stoich, epsilon_x, epsilon_y, v_star, **kwargs) - def solve(self, A, b): - return lstsq_wrapper(A, b, self.driver) + def solve(self, a_matrix, b_matrix): + return lstsq_wrapper(a_matrix, b_matrix, self.driver) - def solve_pytensor(self, A, b): + def solve_pytensor(self, a_matrix, b_matrix): rsolve_op = LeastSquaresSolve(driver=self.driver, b_ndim=2) - return rsolve_op(A, b).squeeze() + return rsolve_op(a_matrix, b_matrix).squeeze() class LinLogTikhonov(LinLogBase): """Adds regularization to the linear solve, assumes A matrix is positive semi-definite""" - def __init__(self, N, Ex, Ey, v_star, lambda_=None, **kwargs): + def __init__(self, stoich, epsilon_x, epsilon_y, v_star, lambda_=None, **kwargs): self.lambda_ = lambda_ if lambda_ else 0 - assert self.lambda_ >= 0, "lambda must be positive" + if self.lambda_ >= 0: + raise ValueError("lambda must be positive") - LinLogBase.__init__(self, N, Ex, Ey, v_star, **kwargs) + LinLogBase.__init__(self, stoich, epsilon_x, epsilon_y, v_star, **kwargs) - def solve(self, A, b): - A_hat = A.T @ A + self.lambda_ * np.eye(A.shape[1]) - b_hat = A.T @ b + def solve(self, a_matrix, b_matrix): + a_matrix_hat = a_matrix.T @ a_matrix + self.lambda_ * np.eye(a_matrix.shape[1]) + b_matrix_hat = a_matrix.T @ b_matrix - cho = sp.linalg.cho_factor(A_hat) - return sp.linalg.cho_solve(cho, b_hat) + cho = sp.linalg.cho_factor(a_matrix_hat) + return sp.linalg.cho_solve(cho, b_matrix_hat) - def solve_pytensor(self, A, b): + def solve_pytensor(self, a_matrix, b_matrix): rsolve_op = RegularizedSolve(self.lambda_) - return rsolve_op(A, b).squeeze() + return rsolve_op(a_matrix, b_matrix).squeeze() class LinLogPinv(LinLogLeastNorm): def steady_state_pytensor( self, - Ex, - Ey=None, - en=None, - yn=None, + epsilon_x, + epsilon_y=None, + e_n=None, + y_n=None, solution_basis=None, method="scan", - driver="gelsy", ): """Calculate a the steady-state transformed metabolite concentrations and fluxes using pytensor. - Ex, Ey, en and yn should be pytensor matrices + epsilon_x, epsilon_y, e_n and y_n should be pytensor matrices - solution_basis is a (n_exp, nr) pytensor matrix of the current solution + solution_basis is a (n_exp, n_r) pytensor matrix of the current solution basis. solver: function A function to solve Ax = b for a (possibly) singular A. Should - accept pytensor matrices A and b, and return a symbolic x. + accept pytensor matrices a_matrix and b_matrix, and return a symbolic x. """ + if epsilon_y is None: + epsilon_y = at.as_tensor_variable(epsilon_y) - if Ey is None: - Ey = at.as_tensor_variable(Ey) - - if isinstance(en, np.ndarray): - en = np.atleast_2d(en) - n_exp = en.shape[0] + if isinstance(e_n, np.ndarray): + e_n = np.atleast_2d(e_n) + n_exp = e_n.shape[0] else: - n_exp = en.tag.test_value.shape[0] + n_exp = e_n.tag.test_value.shape[0] - if isinstance(yn, np.ndarray): - yn = np.atleast_2d(yn) + if isinstance(y_n, np.ndarray): + y_n = np.atleast_2d(y_n) - en = at.as_tensor_variable(en) - yn = at.as_tensor_variable(yn) + e_n = at.as_tensor_variable(e_n) + y_n = at.as_tensor_variable(y_n) - e_diag = en.dimshuffle(0, 1, "x") * np.diag(self.v_star) - N_rep = self.Nr.reshape((-1, *self.Nr.shape)).repeat(n_exp, axis=0) - N_hat = at.batched_dot(N_rep, e_diag) + e_diag = e_n.dimshuffle(0, 1, "x") * np.diag(self.v_star) + n_rep = self.stoich_reduced.reshape((-1, *self.stoich_reduced.shape)).repeat(n_exp, axis=0) + n_hat = at.batched_dot(n_rep, e_diag) - inner_v = Ey.dot(yn.T).T + np.ones(self.nr, dtype=_floatX) - As = at.dot(N_hat, Ex) + inner_v = epsilon_y.dot(y_n.T).T + np.ones(self.n_r, dtype=_floatx) + a_matrix_s = at.dot(n_hat, epsilon_x) - bs = at.batched_dot(-N_hat, inner_v.dimshuffle(0, 1, "x")) + b_matrix_s = at.batched_dot(-n_hat, inner_v.dimshuffle(0, 1, "x")) # Here we have to redefine the entire function, since we have to pass # an additional argument to solve. - def pinv_solution(A, b, basis=None): - A_pinv = at.nlinalg.pinv(A) - x_ln = at.dot(A_pinv, b).squeeze() - x = x_ln + at.dot((at.eye(self.nm) - at.dot(A_pinv, A)), basis) + def pinv_solution(a_matrix, b_matrix, basis=None): + a_matrix_pinv = at.nlinalg.pinv(a_matrix) + x_ln = at.dot(a_matrix_pinv, b_matrix).squeeze() + x = x_ln + at.dot((at.eye(self.n_m) - at.dot(a_matrix_pinv, a_matrix)), basis) return x if method == "scan": - xn, _ = pytensor.scan( - lambda A, b, w: pinv_solution(A, b, basis=w), - sequences=[As, bs, solution_basis], + x_n, _ = pytensor.scan( + lambda a_matrix, b_matrix, w: pinv_solution(a_matrix, b_matrix, basis=w), + sequences=[a_matrix_s, b_matrix_s, solution_basis], strict=True, ) else: - xn_list = [None] * n_exp + x_n_list = [None] * n_exp for i in range(n_exp): - xn_list[i] = pinv_solution(As[i], bs[i], solution_basis[i]) - xn = at.stack(xn_list) + x_n_list[i] = pinv_solution(a_matrix_s[i], b_matrix_s[i], solution_basis[i]) + x_n = at.stack(x_n_list) - vn = en * (np.ones(self.nr) + at.dot(Ex, xn.T).T + at.dot(Ey, yn.T).T) + v_n = e_n * (np.ones(self.n_r) + at.dot(epsilon_x, x_n.T).T + at.dot(epsilon_y, y_n.T).T) - return xn, vn + return x_n, v_n diff --git a/src/emll/pytensor_utils.py b/src/emll/pytensor_utils.py index 487929f..52dd7de 100644 --- a/src/emll/pytensor_utils.py +++ b/src/emll/pytensor_utils.py @@ -1,17 +1,14 @@ +"""Utility scripts for PyTensor.""" import numpy as np import scipy as sp - -from scipy.linalg import LinAlgError -from scipy.linalg.lapack import get_lapack_funcs - from pytensor import tensor from pytensor.tensor.slinalg import Solve +from scipy.linalg import LinAlgError +from scipy.linalg.lapack import get_lapack_funcs class SymPosSolve(Solve): - """ - Class to allow `solve` to accept a symmetric matrix - """ + """Class to allow `solve` to accept a symmetric matrix.""" def perform(self, node, inputs, output_storage): A, b = inputs @@ -23,10 +20,9 @@ def perform(self, node, inputs, output_storage): class RegularizedSolve(Solve): - """ - Solve a system of linear equations, Ax = b, while minimizing the norm of x. - Applies tikhovov regularization. + """Applies tikhovov regularization. + Solve a system of linear equations, Ax = b, while minimizing the norm of x. """ __props__ = ("lambda_", *Solve.__props__) @@ -46,11 +42,7 @@ def perform(self, node, inputs, output_storage): output_storage[0][0] = rval def L_op(self, inputs, outputs, output_gradients): - """ - Reverse-mode gradient updates for matrix solve operation. - - """ - + """Reverse-mode gradient updates for matrix solve operation.""" A, b = inputs c = outputs[0] c_bar = output_gradients[0] @@ -68,10 +60,7 @@ def force_outer(l, r): class LeastSquaresSolve(Solve): - """ - Solve a system of linear equations, Ax = b, while minimizing the norm of x. - - """ + """Solve a system of linear equations, Ax = b, while minimizing the norm of x.""" __props__ = ("driver", *Solve.__props__) # contains b_ndim @@ -85,11 +74,7 @@ def perform(self, node, inputs, output_storage): output_storage[0][0] = lstsq_wrapper(A, b, driver=self.driver) def L_op(self, inputs, outputs, output_gradients): - """ - Reverse-mode gradient updates for matrix solve operation. - - """ - + """Reverse-mode gradient updates for matrix solve operation.""" A, b = inputs c = outputs[0] c_bar = output_gradients[0] @@ -117,8 +102,7 @@ def sympos_solve_wrapper(A, b): def lstsq_wrapper(A, b, driver="gelsy"): - """Wrap sp.linalg.lstsq to also support the faster _gels solver""" - + """Wrap sp.linalg.lstsq to also support the faster _gels solver.""" # if driver == 'gels': # # m, n = A.shape diff --git a/src/emll/test_linlog_model.py b/src/emll/test_linlog_model.py index 4343b14..b6bc29b 100644 --- a/src/emll/test_linlog_model.py +++ b/src/emll/test_linlog_model.py @@ -1,12 +1,11 @@ """ Test File for linlog model methods """ import numpy as np -import pytest - import pytensor import pytensor.tensor as at +import pytest -from emll.test_models import models from emll.linlog_model import LinLogLeastNorm, LinLogLinkMatrix, LinLogTikhonov +from emll.test_models import models from emll.util import create_elasticity_matrix, create_Ey_matrix pytensor.config.compute_test_value = "ignore" diff --git a/src/emll/test_models/__init__.py b/src/emll/test_models/__init__.py index d1bd98d..de84ba7 100644 --- a/src/emll/test_models/__init__.py +++ b/src/emll/test_models/__init__.py @@ -1,15 +1,14 @@ import os + import cobra import numpy as np - import pandas as pd - from cobra.util.array import create_stoichiometric_matrix currdir = os.path.dirname(os.path.abspath(__file__)) -def get_N_v(model): +def get_N_v(model): solution = model.optimize() N = create_stoichiometric_matrix(model) @@ -26,17 +25,18 @@ def get_N_v(model): def load_contador(): - model = cobra.io.load_json_model(currdir + '/contador.json') + model = cobra.io.load_json_model(currdir + "/contador.json") model.reactions.EX_glc.bounds = (-1.243, 1000) - model.reactions.EX_lys.lower_bound = .139 - model.reactions.zwf.lower_bound = .778 + model.reactions.EX_lys.lower_bound = 0.139 + model.reactions.zwf.lower_bound = 0.778 N, v_star = get_N_v(model) return model, N, v_star + def load_teusink(): - model = cobra.io.read_sbml_model(currdir + '/BIOMD0000000064.xml') + model = cobra.io.read_sbml_model(currdir + "/BIOMD0000000064.xml") model.reactions.vGLT.bounds = (-88.1, 88.1) for rxn in model.reactions: rxn.lower_bound = 0.1 @@ -47,32 +47,42 @@ def load_teusink(): return model, N, v_star + def load_mendes(): from .mendes_model import model + N = create_stoichiometric_matrix(model) - v_star = np.array([0.0289273 , 0.0289273 , 0.01074245, 0.01074245, 0.01074245, - 0.01818485, 0.01818485, 0.01818485]) + v_star = np.array( + [ + 0.0289273, + 0.0289273, + 0.01074245, + 0.01074245, + 0.01074245, + 0.01818485, + 0.01818485, + 0.01818485, + ] + ) return model, N, v_star -def load_textbook(): - model = cobra.io.load_json_model(currdir + '/textbook_reduced.json') +def load_textbook(): + model = cobra.io.load_json_model(currdir + "/textbook_reduced.json") N, v_star = get_N_v(model) return model, N, v_star -def load_greene_small(): - N = np.array([ - [-1, 0, 0, 1, 0, 0], - [1, 1, -1, 0, 0, 0], - [1, -1, 0, 0, 0, -1], - [0, 0, 1, 0, -1, 0]]) +def load_greene_small(): + N = np.array( + [[-1, 0, 0, 1, 0, 0], [1, 1, -1, 0, 0, 0], [1, -1, 0, 0, 0, -1], [0, 0, 1, 0, -1, 0]] + ) v_star = np.array([1, 0.5, 1.5, 1, 1.5, 0.5]) - rxn_names = ['V1', 'V2', 'V3', 'Vin', 'Vout', 'Voutx3'] - met_names = ['x1', 'x2', 'x3', 'x4'] + rxn_names = ["V1", "V2", "V3", "Vin", "Vout", "Voutx3"] + met_names = ["x1", "x2", "x3", "x4"] assert np.allclose(N @ v_star, 0) @@ -80,35 +90,105 @@ def load_greene_small(): def load_greene_large(): - - N = np.array([ - [-1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0], - [0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, -1, 0], - [-1, 1, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 1, -1, 0, 0], - [1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0]]) - - v_star = np.array([1, 0, 1, 0, 1, 0, 0.5, 0, 0.5, 0, 0.5, 0, 1.5, 0, 1.5, - 0, 1.5, 0, 1, 0, 1, 0, 1, 0, 1.5, 0.5]) - - rxn_names = ['v1_1', 'v1_2', 'v1_3', 'v1_4', 'v1_5', 'v1_6', 'v2_1', - 'v2_2', 'v2_3', 'v2_4', 'v2_5', 'v2_6', 'v3_1', 'v3_2', 'v3_3', - 'v3_4', 'v3_5', 'v3_6', 'vin_1', 'vin_2', 'vin_3', 'vin_4', - 'vin_5', 'vin_6', 'vout', 'voutx3'] - - met_names = ['x1', 'x2', 'x3', 'x4', 'E1', 'E2', 'E3', 'Ein', 'x1E1', - 'x3E1', 'x3E2', 'x2E2', 'x2E3', 'x4E3', 'xoutEin', 'xinEin'] + N = np.array( + [ + [-1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0], + [0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, -1, 0], + [-1, 1, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 1, -1, 0, 0], + [1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, 1, 0, 0], + ] + ) + + v_star = np.array( + [ + 1, + 0, + 1, + 0, + 1, + 0, + 0.5, + 0, + 0.5, + 0, + 0.5, + 0, + 1.5, + 0, + 1.5, + 0, + 1.5, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1.5, + 0.5, + ] + ) + + rxn_names = [ + "v1_1", + "v1_2", + "v1_3", + "v1_4", + "v1_5", + "v1_6", + "v2_1", + "v2_2", + "v2_3", + "v2_4", + "v2_5", + "v2_6", + "v3_1", + "v3_2", + "v3_3", + "v3_4", + "v3_5", + "v3_6", + "vin_1", + "vin_2", + "vin_3", + "vin_4", + "vin_5", + "vin_6", + "vout", + "voutx3", + ] + + met_names = [ + "x1", + "x2", + "x3", + "x4", + "E1", + "E2", + "E3", + "Ein", + "x1E1", + "x3E1", + "x3E2", + "x2E2", + "x2E3", + "x4E3", + "xoutEin", + "xinEin", + ] assert np.allclose(N @ v_star, 0) @@ -116,9 +196,8 @@ def load_greene_large(): def load_jol2012_edit(): - - model = cobra.io.load_json_model(currdir + '/jol2012_trimmed.json') - v_star = pd.read_pickle(currdir + '/jol2012_vstar.p').values + model = cobra.io.load_json_model(currdir + "/jol2012_trimmed.json") + v_star = pd.read_pickle(currdir + "/jol2012_vstar.p").values N = cobra.util.create_stoichiometric_matrix(model) @@ -128,27 +207,26 @@ def load_jol2012_edit(): def construct_model_from_mat(N, rxn_names, met_names): - - model = cobra.Model('test_model') + model = cobra.Model("test_model") model.add_metabolites([cobra.Metabolite(id=met_name) for met_name in met_names]) for row, rxn_name in zip(N.T, rxn_names): reaction = cobra.Reaction(id=rxn_name) model.add_reactions([reaction]) - reaction.add_metabolites({ - met_id: float(stoich) for met_id, stoich in zip(met_names, row) - if abs(stoich) > 1E-6}) + reaction.add_metabolites( + {met_id: float(stoich) for met_id, stoich in zip(met_names, row) if abs(stoich) > 1e-6} + ) return model models = { - 'teusink': load_teusink, - 'mendes': load_mendes, - 'textbook': load_textbook, - 'greene_small': load_greene_small, - 'greene_large': load_greene_large, - 'contador': load_contador, - 'jol2012': load_jol2012_edit, + "teusink": load_teusink, + "mendes": load_mendes, + "textbook": load_textbook, + "greene_small": load_greene_small, + "greene_large": load_greene_large, + "contador": load_contador, + "jol2012": load_jol2012_edit, } diff --git a/src/emll/test_models/common_utils.py b/src/emll/test_models/common_utils.py index 7ada891..3cd0c72 100644 --- a/src/emll/test_models/common_utils.py +++ b/src/emll/test_models/common_utils.py @@ -1,10 +1,9 @@ +import casadi as cs import numpy as np import scipy as sp -import casadi as cs class StateCompressor(object): - def __init__(self, N, Ex, v_star, x_star): """A class to handle the stochiometric analysis and matrix reduction required to ensure the relevant matrices are invertable. @@ -37,7 +36,7 @@ def __init__(self, N, Ex, v_star, x_star): assert len(x_star) == self.nm, "x_star is the wrong length" q, r, p = sp.linalg.qr((N @ np.diag(v_star) @ Ex).T, pivoting=True) - + # # Construct permutation matrix # self.P = np.zeros((len(p), len(p)), dtype=int) # for i, pi in enumerate(p): @@ -51,11 +50,11 @@ def __init__(self, N, Ex, v_star, x_star): self.rank = (~(np.abs(r) < tol).all(1)).sum() # Permutation vector - self.p = np.sort(p[:self.rank]) + self.p = np.sort(p[: self.rank]) self.z_star = self.x_to_z(self.x_star) self.Nr = Nr = N[self.p] - self.L = np.diag(1/self.x_star) @ N @ np.linalg.pinv(Nr) @ np.diag(self.z_star) + self.L = np.diag(1 / self.x_star) @ N @ np.linalg.pinv(Nr) @ np.diag(self.z_star) # Sanity checks assert np.linalg.matrix_rank(Nr) == self.rank @@ -63,45 +62,45 @@ def __init__(self, N, Ex, v_star, x_star): def x_to_z(self, x): return x[self.p] - + def z_to_x(self, z): - chi = self.L @ np.log(z/self.z_star).T + chi = self.L @ np.log(z / self.z_star).T return np.exp(chi) * self.x_star def z_to_x_alt(self, z): - chi = self.L @ (z/self.z_star - 1).T + chi = self.L @ (z / self.z_star - 1).T return (chi + 1) * self.x_star def calc_xs_full_ode(Ex, Ey, e_hat, y_hat, state_compressor): + t = cs.SX.sym("t", 1) + x_sym = cs.SX.sym("x", sc.nm) + y_sym = cs.SX.sym("y", ny) + e_sym = cs.SX.sym("e", sc.nr) + Ex_sym = cs.SX.sym("Ex", sc.nr, sc.nm) + Ey_sym = cs.SX.sym("Ex", sc.nr, Ey.shape[1]) - t = cs.SX.sym('t', 1) - x_sym = cs.SX.sym('x', sc.nm) - y_sym = cs.SX.sym('y', ny) - e_sym = cs.SX.sym('e', sc.nr) - Ex_sym = cs.SX.sym('Ex', sc.nr, sc.nm) - Ey_sym = cs.SX.sym('Ex', sc.nr, Ey.shape[1]) - - - v = e_sym * v_star * (1 + Ex_sym @ cs.log(x_sym/sc.x_star) + Ey @ np.log(y_hat)) - v_fun = cs.Function('v', [x_sym, e_sym, Ex_sym], [v]) + v = e_sym * v_star * (1 + Ex_sym @ cs.log(x_sym / sc.x_star) + Ey @ np.log(y_hat)) + v_fun = cs.Function("v", [x_sym, e_sym, Ex_sym], [v]) ode = cs.mtimes(cs.DM(N), v_fun(x_sym, e_sym, Ex_sym)) integrator = cs.integrator( - 'full', 'cvodes', + "full", + "cvodes", { - 't': t, - 'x': x_sym, - 'p': cs.vertcat(e_sym, Ex_sym.reshape((-1, 1))), - 'ode': ode, + "t": t, + "x": x_sym, + "p": cs.vertcat(e_sym, Ex_sym.reshape((-1, 1))), + "ode": ode, }, { - 'tf': 2000, - 'regularity_check': True, - }) + "tf": 2000, + "regularity_check": True, + }, + ) - p = np.hstack([e_hat, Ex.reshape((-1, 1), order='F').squeeze()]) - xs = np.array(integrator(x0=sc.x_star, p=p)['xf']).flatten() + p = np.hstack([e_hat, Ex.reshape((-1, 1), order="F").squeeze()]) + xs = np.array(integrator(x0=sc.x_star, p=p)["xf"]).flatten() return xs diff --git a/src/emll/test_models/mendes_model.py b/src/emll/test_models/mendes_model.py index 47bf209..b9a8fed 100644 --- a/src/emll/test_models/mendes_model.py +++ b/src/emll/test_models/mendes_model.py @@ -1,81 +1,110 @@ import cobra import numpy as np - from cobra.util.array import create_stoichiometric_matrix -model = cobra.Model('mendes') +model = cobra.Model("mendes") -M1 = cobra.Metabolite('M1') -M2 = cobra.Metabolite('M2') -M3 = cobra.Metabolite('M3') -M4 = cobra.Metabolite('M4') -M5 = cobra.Metabolite('M5') -M6 = cobra.Metabolite('M6') +M1 = cobra.Metabolite("M1") +M2 = cobra.Metabolite("M2") +M3 = cobra.Metabolite("M3") +M4 = cobra.Metabolite("M4") +M5 = cobra.Metabolite("M5") +M6 = cobra.Metabolite("M6") -A = cobra.Metabolite('A') -AH = cobra.Metabolite('AH') +A = cobra.Metabolite("A") +AH = cobra.Metabolite("AH") model.add_metabolites([M1, M2, M3, M4, M5, M6, A, AH]) -R1 = cobra.Reaction('R1') +R1 = cobra.Reaction("R1") model.add_reactions([R1]) -R1.build_reaction_from_string(' --> M1') +R1.build_reaction_from_string(" --> M1") -R2 = cobra.Reaction('R2') +R2 = cobra.Reaction("R2") model.add_reactions([R2]) -R2.build_reaction_from_string('A + M1 --> AH + M2') +R2.build_reaction_from_string("A + M1 --> AH + M2") -R3 = cobra.Reaction('R3') +R3 = cobra.Reaction("R3") model.add_reactions([R3]) -R3.build_reaction_from_string('M2 --> M3') +R3.build_reaction_from_string("M2 --> M3") -R4 = cobra.Reaction('R4') +R4 = cobra.Reaction("R4") model.add_reactions([R4]) -R4.build_reaction_from_string('AH + M3 --> A + M4') +R4.build_reaction_from_string("AH + M3 --> A + M4") -R5 = cobra.Reaction('R5') +R5 = cobra.Reaction("R5") model.add_reactions([R5]) -R5.build_reaction_from_string('M4 -->') +R5.build_reaction_from_string("M4 -->") -R6 = cobra.Reaction('R6') +R6 = cobra.Reaction("R6") model.add_reactions([R6]) -R6.build_reaction_from_string('M2 --> M5') +R6.build_reaction_from_string("M2 --> M5") -R7 = cobra.Reaction('R7') +R7 = cobra.Reaction("R7") model.add_reactions([R7]) -R7.build_reaction_from_string('AH + M5 --> M6 + A') +R7.build_reaction_from_string("AH + M5 --> M6 + A") -R8 = cobra.Reaction('R8') +R8 = cobra.Reaction("R8") model.add_reactions([R8]) -R8.build_reaction_from_string('M6 -->') +R8.build_reaction_from_string("M6 -->") S = create_stoichiometric_matrix(model) def reversible_hill(substrate, product, Modifier, Keq, Vf, Shalve, Phalve, h, Mhalve, alpha): - return Vf*substrate/Shalve*(1-product/(substrate*Keq))*(substrate/Shalve+product/Phalve)**(h-1)/((1+(Modifier/Mhalve)**h)/(1+alpha*(Modifier/Mhalve)**h)+(substrate/Shalve+product/Phalve)**h) + return ( + Vf + * substrate + / Shalve + * (1 - product / (substrate * Keq)) + * (substrate / Shalve + product / Phalve) ** (h - 1) + / ( + (1 + (Modifier / Mhalve) ** h) / (1 + alpha * (Modifier / Mhalve) ** h) + + (substrate / Shalve + product / Phalve) ** h + ) + ) + + +def ordered_bi_bi( + substratea, substrateb, productp, productq, Keq, Vf, Vr, Kma, Kmb, Kmp, Kmq, Kia, Kib, Kip +): + return ( + Vf + * (substratea * substrateb - productp * productq / Keq) + / ( + substratea * substrateb * (1 + productp / Kip) + + Kma * substrateb + + Kmb * (substratea + Kia) + + Vf + / (Vr * Keq) + * ( + Kmq * productp * (1 + substratea / Kia) + + productq + * (Kmp * (1 + Kma * substrateb / (Kia * Kmb)) + productp * (1 + substrateb / Kib)) + ) + ) + ) -def ordered_bi_bi(substratea, substrateb, productp, productq, Keq, Vf, Vr, Kma, Kmb, Kmp, Kmq, Kia, Kib, Kip): - return Vf*(substratea*substrateb-productp*productq/Keq)/(substratea*substrateb*(1+productp/Kip)+Kma*substrateb+Kmb*(substratea+Kia)+Vf/(Vr*Keq)*(Kmq*productp*(1+substratea/Kia)+productq*(Kmp*(1+Kma*substrateb/(Kia*Kmb))+productp*(1+substrateb/Kib)))) def uni_uni(substrate, product, Kms, Kmp, Vf, Keq): - return Vf*(substrate-product/Keq)/(substrate+Kms*(1+product/Kmp)) - - - -A = np.array([ -[-31.4 , 4.41 , 0.135 , 0.313 , 0.31 , 0.135 , -0.424 , 0.97], -[-2.47 , 0.0608 , 0 , 0 , 0 , 0 , 0 , 0], -[-17.4 , 0.219 , 0.0831 , 0 , 0 , 0.0932 , 0 , 0], -[0 , 0 , 0.0147 , 0.0268 , 0 , 0 , 0 , 0], -[0 , 0 , 0 , 0.00278 , 0.848 , 0 , 0 , 0], -[0 , 0 , 0 , 0 , 0 , 0 , -0.486 , 2.16], -[0 , 0.351 , 0 , -0.0015 , 0 , 0 , -0.0389 , 0], -[0 , 0 , 0 , 0 , 0 , -0.00364 , 0.09 , 0], -[0 , 1.04 , -0.0288 , 0.0859 , 0 , -0.0167 , 0.0993 , 0], -[3.88 , 0 , 0 , 0 , 0 , 0 , 0 , 0], -[0 , 0 , 0 , 0 , -0.713 , 0 , 0 , 0], -[0 , 0 , 0 , 0 , 0 , 0 , 0 , -1.56] -]) - -x_ss = np.array([1., .996, .164, .0428, .101, .0726, .202, .0202, .0789, .1, .2]) + return Vf * (substrate - product / Keq) / (substrate + Kms * (1 + product / Kmp)) + + +A = np.array( + [ + [-31.4, 4.41, 0.135, 0.313, 0.31, 0.135, -0.424, 0.97], + [-2.47, 0.0608, 0, 0, 0, 0, 0, 0], + [-17.4, 0.219, 0.0831, 0, 0, 0.0932, 0, 0], + [0, 0, 0.0147, 0.0268, 0, 0, 0, 0], + [0, 0, 0, 0.00278, 0.848, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -0.486, 2.16], + [0, 0.351, 0, -0.0015, 0, 0, -0.0389, 0], + [0, 0, 0, 0, 0, -0.00364, 0.09, 0], + [0, 1.04, -0.0288, 0.0859, 0, -0.0167, 0.0993, 0], + [3.88, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, -0.713, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, -1.56], + ] +) + +x_ss = np.array([1.0, 0.996, 0.164, 0.0428, 0.101, 0.0726, 0.202, 0.0202, 0.0789, 0.1, 0.2]) diff --git a/src/emll/test_pytensor_utils.py b/src/emll/test_pytensor_utils.py index 950262b..5f59394 100644 --- a/src/emll/test_pytensor_utils.py +++ b/src/emll/test_pytensor_utils.py @@ -5,7 +5,7 @@ from tests import unittest_tools as utt # Problem Importing utt? this is correct syntax -from .pytensor_utils import RegularizedSolve, LeastSquaresSolve +from .pytensor_utils import LeastSquaresSolve, RegularizedSolve def test_regularized_solve(): diff --git a/src/emll/util.py b/src/emll/util.py index b729de9..3f6c5bd 100644 --- a/src/emll/util.py +++ b/src/emll/util.py @@ -1,7 +1,7 @@ import numpy as np -import scipy as sp -import pytensor.tensor as at import pymc as pm +import pytensor.tensor as at +import scipy as sp def create_elasticity_matrix(model): diff --git a/tests/unittest_tools.py b/tests/unittest_tools.py index d8c1bd0..97087f3 100644 --- a/tests/unittest_tools.py +++ b/tests/unittest_tools.py @@ -4,9 +4,8 @@ from functools import wraps import numpy as np -import pytest - import pytensor +import pytest from pytensor.compile.debugmode import str_diagnostic from pytensor.configdefaults import config from pytensor.gradient import verify_grad as orig_verify_grad @@ -14,7 +13,6 @@ from pytensor.tensor.math import _allclose from pytensor.tensor.math import add as at_add - _logger = logging.getLogger("tests.unittest_tools") @@ -42,10 +40,7 @@ def fetch_seed(pseed=None): seed = None except ValueError: print( - ( - "Error: config.unittests__rseed contains " - "invalid seed, using None instead" - ), + ("Error: config.unittests__rseed contains " "invalid seed, using None instead"), file=sys.stderr, ) seed = None @@ -224,11 +219,7 @@ def _compile_and_check( # remove broadcasted dims as it is sure they can't be # changed to prevent the same dim problem. if hasattr(var.type, "broadcastable"): - shp = [ - inp.shape[i] - for i in range(inp.ndim) - if not var.type.broadcastable[i] - ] + shp = [inp.shape[i] for i in range(inp.ndim) if not var.type.broadcastable[i]] else: shp = inp.shape if len(set(shp)) != len(shp): diff --git a/tox.ini b/tox.ini index e69de29..bba8a01 100644 --- a/tox.ini +++ b/tox.ini @@ -0,0 +1,208 @@ +[tox] +isolated_build = True + +# These environments are run in order if you just use `tox`: +envlist = + lint + manifest + pyroma + flake8 + mypy + # the actual tests + py + +[testenv] +# Runs on the "tests" directory by default, or passes the positional +# arguments from `tox -e py ... +commands = + coverage run -p -m pytest --durations=20 {posargs:tests} + coverage combine + coverage xml +extras = + # See the [options.extras_require] entry in setup.cfg for "tests" + tests + +[testenv:doctests] +description = Test that documentation examples run properly +commands = + xdoctest -m src +deps = + xdoctest + pygments + +# [testenv:treon] +# description = Test that notebooks can run to completion +# commants = +# treon notebooks/ +# deps = +# treon + +[testenv:coverage-clean] +deps = coverage +skip_install = true +commands = coverage erase + +[testenv:lint] +deps = + black[jupyter] + isort + nbqa +skip_install = true +commands = + black . + isort . + nbqa isort . +description = Run linters. + +# [testenv:doclint] +# deps = +# rstfmt +# skip_install = true +# commands = +# rstfmt docs/source/ +# description = Run documentation linters. + +[testenv:manifest] +deps = check-manifest +skip_install = true +commands = check-manifest +description = Check that the MANIFEST.in is written properly and give feedback how to fix it. + +[testenv:flake8] +skip_install = true +deps = + darglint + flake8<5.0.0 + flake8-black + flake8-bandit + flake8-bugbear + flake8-colors + flake8-docstrings + flake8-isort + flake8-print + pep8-naming + pydocstyle +commands = + flake8 src/ tests/ +description = Run the flake8 tool with several plugins (bandit, docstrings, import order, pep8 naming) taken from https://cthoyt.com/2020/04/25/how-to-code-with-me-flake8.html + +[testenv:pyroma] +deps = + pygments + pyroma +skip_install = true +commands = pyroma --min=10 . +description = Run the pyroma tool to check the package friendliness of the project. + +[testenv:mypy] +deps = mypy +skip_install = true +commands = mypy --install-types --non-interactive --ignore-missing-imports src/ +description = Run the mypy tool to check static typing on the project. + +# [testenv:doc8] +# skip_install = true +# deps = +# sphinx +# doc8 +# commands = +# doc8 docs/source/ +# description = Run the doc8 tool to check the style of the RST files in the project docs. + +# [testenv:docstr-coverage] +# skip_install = true +# deps = +# docstr-coverage +# commands = +# docstr-coverage src/ tests/ --skip-private --skip-magic +# description = Run the docstr-coverage tool to check documentation coverage +# +# [testenv:docs] +# description = Build the documentation locally, allowing warnings. +# extras = +# # See the [options.extras_require] entry in setup.cfg for "docs" +# docs +# # You might need to add additional extras if your documentation covers it +# commands = +# python -m sphinx -b html -d docs/build/doctrees docs/source docs/build/html +# +# [testenv:docs-test] +# description = Test building the documentation in an isolated environment. Warnings are considered as errors via -W. +# changedir = docs +# extras = +# {[testenv:docs]extras} +# commands = +# mkdir -p {envtmpdir} +# cp -r source {envtmpdir}/source +# python -m sphinx -W -b html -d {envtmpdir}/build/doctrees {envtmpdir}/source {envtmpdir}/build/html +# python -m sphinx -W -b coverage -d {envtmpdir}/build/doctrees {envtmpdir}/source {envtmpdir}/build/coverage +# cat {envtmpdir}/build/coverage/c.txt +# cat {envtmpdir}/build/coverage/python.txt +# allowlist_externals = +# cp +# cat +# mkdir + +[testenv:coverage-report] +deps = coverage +skip_install = true +commands = + coverage combine + coverage report + +#################### +# Deployment tools # +#################### + +[testenv:bumpversion] +commands = bump2version {posargs} +skip_install = true +passenv = HOME +deps = + bump2version + +[testenv:build] +skip_install = true +deps = + wheel + build +commands = + python -m build --sdist --wheel --no-isolation + +[testenv:release] +description = Release the code to PyPI so users can pip install it +skip_install = true +deps = + {[testenv:build]deps} + twine >= 1.5.0 +commands = + {[testenv:build]commands} + twine upload --skip-existing dist/* + +[testenv:testrelease] +description = Release the code to the test PyPI site +skip_install = true +deps = + {[testenv:build]deps} + twine >= 1.5.0 +commands = + {[testenv:build]commands} + twine upload --skip-existing --repository-url https://test.pypi.org/simple/ dist/* + +[testenv:finish] +skip_install = true +passenv = + HOME + TWINE_USERNAME + TWINE_PASSWORD +deps = + {[testenv:release]deps} + bump2version +commands = + bump2version release --tag + {[testenv:release]commands} + git push --tags + bump2version patch + git push +allowlist_externals = + git \ No newline at end of file