Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: ahendriksen/tomosipo
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: v0.6.0
Choose a base ref
...
head repository: ahendriksen/tomosipo
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: master
Choose a head ref
  • 6 commits
  • 8 files changed
  • 4 contributors

Commits on Oct 5, 2023

  1. Copy the full SHA
    1971532 View commit details
  2. Copy the full SHA
    865511b View commit details
  3. Copy the full SHA
    98835f3 View commit details
  4. Copy the full SHA
    9a3843a View commit details

Commits on Nov 15, 2023

  1. Copy the full SHA
    6aa366e View commit details

Commits on Oct 24, 2024

  1. Copy the full SHA
    ecaf867 View commit details
Showing with 389 additions and 221 deletions.
  1. +3 −1 README.md
  2. +17 −17 notebooks/learned_pd.ipynb
  3. +99 −0 notebooks/learned_pd.py
  4. +1 −85 notebooks/learned_pd_benchmark.py
  5. +1 −85 notebooks/learned_pd_lightning.py
  6. +67 −9 tests/test_torch_support.py
  7. +1 −1 tomosipo/geometry/volume.py
  8. +200 −23 tomosipo/torch_support.py
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -77,10 +77,12 @@ conda create -n tomosipo cudatoolkit=<X.X> tomosipo -c astra-toolbox -c aahendri

An installation with Pytorch and [ts_algorithms](https://github.com/ahendriksen/ts_algorithms) can be created with the following snippet
```
conda create -n tomosipo cudatoolkit=<X.X> tomosipo tqdm pytorch -c pytorch -c astra-toolbox -c aahendriksen -c defaults
conda create -n tomosipo tomosipo pytorch==2.0.1 pytorch-cuda=11.7 tqdm -c pytorch -c nvidia -c astra-toolbox/label/dev -c aahendriksen -c defaults
conda activate tomosipo
pip install git+https://github.com/ahendriksen/ts_algorithms.git
```
From PyTorch version 2 the cuda toolkit dependencies have changed from the _cudatoolkit_ package to the _pytorch-cuda_ package. The development version of Astra uses the _cuda-cudart_ and _libcufft_ packages which are automatically included by installing _pytorch-cuda_.

More information about installation is provided in the [documentation](https://aahendriksen.gitlab.io/tomosipo/intro/install.html).

34 changes: 17 additions & 17 deletions notebooks/learned_pd.ipynb
Original file line number Diff line number Diff line change
@@ -140,20 +140,23 @@
" self.vg = vg\n",
" self.pg = pg\n",
" self.do_pingpong = do_pingpong\n",
" self.ts_op = ts.operator(self.vg[:1], self.pg.to_vec()[:, :1, :])\n",
" self.op = to_autograd(self.ts_op, is_2d=True, num_extra_dims=2)\n",
" self.opT = to_autograd(self.ts_op.T, is_2d=True, num_extra_dims=2)\n",
" \n",
" for i in range(n_iters): \n",
" # To ensure that the parameters of the primal and dual modules \n",
" for i in range(n_iters):\n",
" # To ensure that the parameters of the primal and dual modules\n",
" # are correctly distributed during parallel training, we register\n",
" # them as modules. \n",
" # them as modules.\n",
" self.add_module(f\"{i}_primal\", PrimalModule())\n",
" self.add_module(f\"{i}_dual\", DualModule())\n",
" \n",
"\n",
" def forward(self, g):\n",
" B, C, H, W = g.shape\n",
" assert C == 1, \"single channel support only for now\"\n",
" h = g.new_zeros(B, 5, H, W)\n",
" f_primal = g.new_zeros(B, 5, *self.vg.shape[1:])\n",
" \n",
"\n",
" def dual_step(g, h, f, module):\n",
" x = torch.cat((h, f, g), dim=1)\n",
" out = module(x)\n",
@@ -162,28 +165,25 @@
" x = torch.cat((f, update), dim=1)\n",
" out = module(x)\n",
" return f + out\n",
" \n",
" ts_op = ts.operator(self.vg[:1], self.pg.to_vec()[:, :1, :])\n",
" op = to_autograd(ts_op)\n",
" opT = to_autograd(ts_op.T)\n",
"\n",
" def fp(x):\n",
" if self.do_pingpong:\n",
" x = x.cpu()\n",
" return op(x).cuda()\n",
" return op(x).to(x.device)\n",
" def bp(x):\n",
" if self.do_pingpong:\n",
" x = x.cpu()\n",
" return opT(x).cuda()\n",
" \n",
" return opT(x).to(x.device)\n",
"\n",
" for i in range(self.n_iters):\n",
" primal_module = getattr(self, f\"{i}_primal\")\n",
" dual_module = getattr(self, f\"{i}_dual\")\n",
" \n",
"\n",
" f_dual = fp(f_primal[:, :1])\n",
" h = dual_step(g, h, f_dual, dual_module)\n",
" update = bp(h[:, :1])\n",
" f_primal = primal_step(f_primal, update, primal_module)\n",
" \n",
"\n",
" return f_primal[:, 0:1]"
]
},
@@ -408,9 +408,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "tomosipo",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "tomosipo"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
@@ -422,7 +422,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.10"
"version": "3.11.5"
}
},
"nbformat": 4,
99 changes: 99 additions & 0 deletions notebooks/learned_pd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
import torch
import tomosipo as ts
from tomosipo.torch_support import (
to_autograd,
)
import torch.nn as nn


# The network defined below should be almost the same as the primal-dual network described in:

# Adler, J., & Öktem, Ozan, Learned Primal-Dual Reconstruction, IEEE Transactions on Medical Imaging, (), 1–1 (2018)
# http://dx.doi.org/10.1109/tmi.2018.2799231

# Intended differences are the use of ReLU instead of PReLU, and the lack of biases.

# All other differences are unintentional..

class PrimalModule(nn.Module):
def __init__(self):
super().__init__()
layers = [
nn.Conv2d(6, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 5, kernel_size=3, padding=1, bias=False),
]
self.block = nn.Sequential(*layers)

def forward(self, x):
return self.block(x)

class DualModule(nn.Module):
def __init__(self):
super().__init__()
layers = [
nn.Conv2d(7, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 5, kernel_size=3, padding=1, bias=False),
]
self.block = nn.Sequential(*layers)

def forward(self, x):
return self.block(x)

class LearnedPD(nn.Module):
def __init__(self, vg, pg, n_iters, do_pingpong=False):
super().__init__()
self.n_iters = n_iters
self.vg = vg
self.pg = pg
self.do_pingpong = do_pingpong
self.ts_op = ts.operator(self.vg[:1], self.pg.to_vec()[:, :1, :])
self.op = to_autograd(self.ts_op, is_2d=True, num_extra_dims=2)
self.opT = to_autograd(self.ts_op.T, is_2d=True, num_extra_dims=2)

for i in range(n_iters):
# To ensure that the parameters of the primal and dual modules
# are correctly distributed during parallel training, we register
# them as modules.
self.add_module(f"{i}_primal", PrimalModule())
self.add_module(f"{i}_dual", DualModule())

def forward(self, g):
B, C, H, W = g.shape
assert C == 1, "single channel support only for now"
h = g.new_zeros(B, 5, H, W)
f_primal = g.new_zeros(B, 5, *self.vg.shape[1:])

def dual_step(g, h, f, module):
x = torch.cat((h, f, g), dim=1)
out = module(x)
return h + out
def primal_step(f, update, module):
x = torch.cat((f, update), dim=1)
out = module(x)
return f + out

def fp(x):
if self.do_pingpong:
x = x.cpu()
return op(x).to(x.device)
def bp(x):
if self.do_pingpong:
x = x.cpu()
return opT(x).to(x.device)

for i in range(self.n_iters):
primal_module = getattr(self, f"{i}_primal")
dual_module = getattr(self, f"{i}_dual")

f_dual = fp(f_primal[:, :1])
h = dual_step(g, h, f_dual, dual_module)
update = bp(h[:, :1])
f_primal = primal_step(f_primal, update, primal_module)

return f_primal[:, 0:1]
86 changes: 1 addition & 85 deletions notebooks/learned_pd_benchmark.py
Original file line number Diff line number Diff line change
@@ -59,91 +59,7 @@
from argparse import ArgumentParser
from tqdm import tqdm
import numpy as np


# Copy definition of learned primal-dual
class PrimalModule(nn.Module):
def __init__(self):
super().__init__()
layers = [
nn.Conv2d(6, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 5, kernel_size=3, padding=1, bias=False),
]
self.block = nn.Sequential(*layers)

def forward(self, x):
return self.block(x)

class DualModule(nn.Module):
def __init__(self):
super().__init__()
layers = [
nn.Conv2d(7, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 5, kernel_size=3, padding=1, bias=False),
]
self.block = nn.Sequential(*layers)

def forward(self, x):
return self.block(x)

class LearnedPD(nn.Module):
def __init__(self, vg, pg, n_iters, do_pingpong=False):
super().__init__()
self.n_iters = n_iters
self.vg = vg
self.pg = pg
self.do_pingpong = do_pingpong

for i in range(n_iters):
# To ensure that the parameters of the primal and dual modules
# are correctly distributed during parallel training, we register
# them as modules.
self.add_module(f"{i}_primal", PrimalModule())
self.add_module(f"{i}_dual", DualModule())

def forward(self, g):
B, C, H, W = g.shape
assert C == 1, "single channel support only for now"
h = g.new_zeros(B, 5, H, W)
f_primal = g.new_zeros(B, 5, *self.vg.shape[1:])

def dual_step(g, h, f, module):
x = torch.cat((h, f, g), dim=1)
out = module(x)
return h + out
def primal_step(f, update, module):
x = torch.cat((f, update), dim=1)
out = module(x)
return f + out

ts_op = ts.operator(self.vg[:1], self.pg.to_vec()[:, :1, :])
op = to_autograd(ts_op)
opT = to_autograd(ts_op.T)
def fp(x):
if self.do_pingpong:
x = x.cpu()
return op(x).cuda()
def bp(x):
if self.do_pingpong:
x = x.cpu()
return opT(x).cuda()

for i in range(self.n_iters):
primal_module = getattr(self, f"{i}_primal")
dual_module = getattr(self, f"{i}_dual")

f_dual = fp(f_primal[:, :1])
h = dual_step(g, h, f_dual, dual_module)
update = bp(h[:, :1])
f_primal = primal_step(f_primal, update, primal_module)

return f_primal[:, 0:1]
from learned_pd import LearnedPD


# Timing functions
86 changes: 1 addition & 85 deletions notebooks/learned_pd_lightning.py
Original file line number Diff line number Diff line change
@@ -65,94 +65,10 @@
import numpy as np
from tqdm import tqdm
from argparse import ArgumentParser
from learned_pd import LearnedPD

pl.seed_everything(123)

# Copy definition of learned primal-dual from previous notebook
class PrimalModule(nn.Module):
def __init__(self):
super().__init__()
layers = [
nn.Conv2d(6, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 5, kernel_size=3, padding=1, bias=False),
]
self.block = nn.Sequential(*layers)

def forward(self, x):
return self.block(x)

class DualModule(nn.Module):
def __init__(self):
super().__init__()
layers = [
nn.Conv2d(7, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 5, kernel_size=3, padding=1, bias=False),
]
self.block = nn.Sequential(*layers)

def forward(self, x):
return self.block(x)

class LearnedPD(nn.Module):
def __init__(self, vg, pg, n_iters, do_pingpong=False):
super().__init__()
self.n_iters = n_iters
self.vg = vg
self.pg = pg
self.do_pingpong = do_pingpong

for i in range(n_iters):
# To ensure that the parameters of the primal and dual modules
# are correctly distributed during parallel training, we register
# them as modules.
self.add_module(f"{i}_primal", PrimalModule())
self.add_module(f"{i}_dual", DualModule())

def forward(self, g):
B, C, H, W = g.shape
assert C == 1, "single channel support only for now"
h = g.new_zeros(B, 5, H, W)
f_primal = g.new_zeros(B, 5, *self.vg.shape[1:])

def dual_step(g, h, f, module):
x = torch.cat((h, f, g), dim=1)
out = module(x)
return h + out
def primal_step(f, update, module):
x = torch.cat((f, update), dim=1)
out = module(x)
return f + out

ts_op = ts.operator(self.vg[:1], self.pg.to_vec()[:, :1, :])
op = to_autograd(ts_op)
opT = to_autograd(ts_op.T)
def fp(x):
if self.do_pingpong:
x = x.cpu()
return op(x).to(x.device)
def bp(x):
if self.do_pingpong:
x = x.cpu()
return opT(x).to(x.device)

for i in range(self.n_iters):
primal_module = getattr(self, f"{i}_primal")
dual_module = getattr(self, f"{i}_dual")

f_dual = fp(f_primal[:, :1])
h = dual_step(g, h, f_dual, dual_module)
update = bp(h[:, :1])
f_primal = primal_step(f_primal, update, primal_module)

return f_primal[:, 0:1]


class LightningPD(pl.LightningModule):

def __init__(self, hparams):
Loading