Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Model 00023 #66

Merged
merged 18 commits into from
Nov 29, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 33 additions & 34 deletions README.md

Large diffs are not rendered by default.

146 changes: 142 additions & 4 deletions config/global.json
Original file line number Diff line number Diff line change
Expand Up @@ -952,7 +952,7 @@
"00007_INPAINT_BRAIN_MRI": {
"execution": {
"package_name": "00007_INPAINT_BRAIN_MRI",
"package_link": "https://zenodo.org/record/7042217/files/00007_INPAINT_BRAIN_MRI.zip?download=1",
"package_link": "https://zenodo.org/records/10214796/files/00007_INPAINT_BRAIN_MRI.zip?download=1",
"model_name": "inp_gen",
"extension": ".pth",
"image_size": [
Expand Down Expand Up @@ -3251,7 +3251,7 @@
"pytorch-lightning==1.4.7",
"pandas",
"comet-ml",
"monai",
"monai<=1.0.1",
"grad-cam",
"matplotlib",
"monai[skimage]",
Expand Down Expand Up @@ -3354,7 +3354,145 @@
"view: default=\"la\" help=",
"subcat: default=\"2ch\", help="
],
"comment": ""
"comment": "Conditional WGAN-GP Model for Cardiac Image generation with age offset (Trained on UK Biobank). A conditional wasserstein generative adversarial network with gradient penalty (WGAN_GP) that generates MRI cardiac images. The pixel dimension of the generated images is 256x256. The uploaded ZIP file contains the files model.ckpt (model weights), __init__.py (image generation method and utils), a requirements.txt, and the used GAN training config file. A sample_image.png is provided for example generation."
}
}
},
"00023_PIX2PIXHD_BREAST_DCEMRI": {
"execution": {
"package_name": "00023",
"package_link": "https://zenodo.org/records/10215478/files/00023.zip?download=1",
"model_name": "30_net_G",
"extension": ".pth",
"image_size": [
512, 512
],
"dependencies": [
"numpy",
"torch",
"torchvision",
"pillow"
],
"generate_method": {
"name": "generate",
"args": {
"base": [
"model_file",
"num_samples",
"output_path",
"save_images"
],
"custom": {
"input_path": "input/",
"image_size": "512",
"gpu_id": "0"
}
}
}
},
"selection": {
"performance": {
"SSIM": 0.726,
"MSE": 34.88,
"NSME": null,
"PSNR": 32.91,
"IS": null,
"FID": 28.71,
"turing_test": "",
"downstream_task": {
"CLF": {
"trained_on_fake": {
"accuracy": null,
"precision": null,
"recall": null,
"f1": null,
"specificity": null,
"AUROC": null,
"AUPRC": null
},
"trained_on_real_and_fake": {},
"trained_on_real": {}
},
"SEG": {
"trained_on_fake": {
"dice": 0.687,
"jaccard": null,
"accuracy": null,
"precision": null,
"recall": null,
"f1": null
},
"trained_on_real_and_fake": {
"dice": "0.797"
},
"trained_on_real": {
"dice": "0.790"
}
}
}
},
"use_cases": [
"segmentation",
"tumour localization",
"classification",
"simulation"
],
"organ": [
"breast"
],
"modality": [
"dce-mri",
"mri",
"t1",
"t1-weighted",
"fat-saturated"
],
"vendors": [],
"centres": [
"Duke Hospital"
],
"function": [],
"condition": [],
"dataset": [
"DUKE"
],
"augmentations": [],
"generates": [],
"height": 512,
"width": 512,
"depth": 1,
"type": "pix2pixHD",
"license": "BSD License",
"dataset_type": "DCE-MRI",
"privacy_preservation": "",
"tags": [
"dce-mri",
"postcontrast",
"synthesis",
"breast",
"mri",
"treatment",
"i2i",
"pix2pixHD",
"SPIE"
],
"year": 2023
},
"description": {
"title": "Pre- to Post-Contrast Breast MRI Synthesis for Enhanced Tumour Segmentation",
"provided_date": "11.2023",
"trained_date": "2023",
"provided_after_epoch": 30,
"version": "1.0",
"publication": "https://doi.org/10.48550/arXiv.2311.10879",
"doi": [
"https://doi.org/10.48550/arXiv.2311.10879"
],
"inputs": [
"input_path: default=input/, help=the path to .png breast DCE-MRI images that are translated from pre-contrast to the first DCE post-contrast sequence. ",
"image_size: default=[512, 512], help=list with image height and width. ",
"gpu_id: default=0, help=the gpu to run the model on."
],
"comment": "Pix2Pix model for DCE-MRI slice generation from pre-contrast image input (Trained on Duke Breast MRI Dataset). A pix2pixHD mmodel that generates DCE-MRI axial slices based on checkpoint after 30 training epochs. \nThe pixel dimension of the generated images is 512x512. Several generated 2d slices can be merged together to create a 3D MRI volume with tumour tissue highlighted by synthetic contrast. \nThe uploaded ZIP file contains the files 30_net_G.pth (model weights), __init__.py (image generation method and utils), a requirements.txt, and further code below the /src folder for handling of model, data, and training utils. Sample input images are provided as an example for image generation."
}
}
}
Binary file added docs/source/_static/samples/00023.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
32 changes: 32 additions & 0 deletions docs/source/model_documentation.md
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,7 @@ PGGAN Model for Patch Generation of Polyps with Corresponding Segmentation Masks

```python
# create samples with this model
from medigan import Generators
Generators().generate(
model_id="00009_PGGAN_POLYP_PATCHES_W_MASKS",
gpu_id=None,
Expand Down Expand Up @@ -638,4 +639,35 @@ inputs= [
"view: default=\"la\" help=",
"subcat: default=\"2ch\", help="
]
```

# 00023_PIX2PIXHD_BREAST_DCEMRI

Pix2Pix model for DCE-MRI slice generation from pre-contrast image input (Trained on Duke Breast MRI Dataset) \
<sub> **Note:** A pix2pixHD mmodel that generates DCE-MRI axial slices based on checkpoint after 30 training epochs.
The pixel dimension of the generated images is 512x512. Several generated 2d slices can be merged together to create a 3D MRI volume with tumour tissue highlighted by synthetic contrast.
The uploaded ZIP file contains the files 30_net_G.pth (model weights), __init__.py (image generation method and utils), a requirements.txt, and further code below the /src folder for handling of model, data, and training utils. Sample input images are provided as an example for image generation. </sub>


| Output type | Modality | Model type | Output size | Base dataset | Output examples | `model_id` | Hosted on | Reference |
|--------------------------------|:--------:|:----------:|:-----------:|:------------:|:------------------------------------:|:------:|:------:|:------:|
| DCE-MRI sequence 1 axial slice | DCE-MRI | pix2pixHD | 512x512 | [Duke Dataset](https://sites.duke.edu/mazurowski/resources/breast-cancer-mri-dataset/) | ![sample](_static/samples/00023.png) | `00023_PIX2PIXHD_BREAST_DCEMRI` | [Zenodo (10210944)](https://zenodo.org/doi/10.5281/zenodo.10210944) | [Osuala et al (2023)](https://doi.org/10.48550/arXiv.2311.10879) |


```python
# create samples with this model
from medigan import Generators
Generators().generate(
model_id="00023_PIX2PIXHD_BREAST_DCEMRI",
input_path= "input/",
image_size=[512, 512],
gpu_id=0,
)

# model specific parameters
inputs = [
"input_path: default=input/, help=the path to .png breast DCE-MRI images that are translated from pre-contrast to the first DCE post-contrast sequence. ",
"image_size: default=[512, 512], help=list with image height and width. ",
"gpu_id: default=0, help=the gpu to run the model on.",
]
```
5 changes: 5 additions & 0 deletions src/medigan/contribute_model/model_contributor.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,9 @@ def push_to_zenodo(
self.zenodo_model_uploader = ZenodoModelUploader(
model_id=self.model_id, access_token=access_token
)
# Update in case previous access token gave an error
self.zenodo_model_uploader.access_token = access_token

return self.zenodo_model_uploader.push(
metadata=self.metadata,
package_path=self.package_path,
Expand Down Expand Up @@ -301,6 +304,8 @@ def push_to_github(
self.github_model_uploader = GithubModelUploader(
model_id=self.model_id, access_token=access_token
)
# Update in case previous access token gave an error
self.github_model_uploader.access_token = access_token

return self.github_model_uploader.push(
metadata=self.metadata,
Expand Down
8 changes: 4 additions & 4 deletions src/medigan/contribute_model/zenodo_model_uploader.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,9 +203,9 @@ def upload(self, file_path: str, filename: str, bucket_url: str) -> dict:
params=self.params,
)

if not r.status_code == 200:
if not r.status_code == 200 and not r.status_code == 201:
raise Exception(
f"{self.model_id}: Error ({r.status_code}!=200) during Zenodo ('{bucket_url}') upload (step 2: uploading model as zip file): {r.json()}"
f"{self.model_id}: Error ({r.status_code}!= any of (200, 201) ) during Zenodo ('{bucket_url}') upload (step 2: uploading model as zip file): {r.json()}"
)
return r

Expand All @@ -231,9 +231,9 @@ def upload_descriptive_data(self, deposition_id: str, data: dict) -> dict:
data=json.dumps(data),
headers=ZENODO_HEADERS,
)
if not r.status_code == 200:
if not r.status_code == 200 and not r.status_code == 201:
raise Exception(
f"{self.model_id}: Error ({r.status_code}!=200) during Zenodo ('{deposition_url}') upload (step 3: updating metadata): {r.json()}"
f"{self.model_id}: Error ({r.status_code}!= any of (200, 201) ) during Zenodo ('{deposition_url}') upload (step 3: updating metadata): {r.json()}"
)
return r

Expand Down
5 changes: 4 additions & 1 deletion src/medigan/execute_model/model_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,10 @@ def _import_package_as_lib(self):
self.serialised_model_file_path = f"{MODEL_FOLDER}/{self.model_id}/{self.model_name}{self.model_extension}"
except Exception as e:
logging.error(
f"{self.model_id}: Error while importing {self.package_name} from /{self.model_id}: {e}"
f"{self.model_id}: Error occurred while trying to import "
f"'{MODEL_FOLDER}.{self.model_id}.{self.package_name}'."
f"Fallback import of '{MODEL_FOLDER}.{self.model_id}' also failed. "
f"Please make sure the module '{MODEL_FOLDER}' is not imported from elsewhere in your syspath: {e}"
)
raise e

Expand Down
25 changes: 22 additions & 3 deletions tests/model_integration_test_manual.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
import logging

MODEL_ID = "YOUR_MODEL_ID_HERE"
MODEL_ID = "00002_DCGAN_MMG_MASS_ROI" # "00007_BEZIERCURVE_TUMOUR_MASK"
NUM_SAMPLES = 10
MODEL_ID = 23 # "00023_PIX2PIXHD_BREAST_DCEMRI" #"00002_DCGAN_MMG_MASS_ROI" # "00007_BEZIERCURVE_TUMOUR_MASK"
NUM_SAMPLES = 2
OUTPUT_PATH = f"output/{MODEL_ID}/"
try:
from src.medigan.generators import Generators
Expand All @@ -21,11 +21,30 @@
model_id=MODEL_ID,
num_samples=NUM_SAMPLES,
output_path=OUTPUT_PATH,
input_path="input/",
gpu_id=0,
image_size=448,
install_dependencies=True,
)

generators.get_as_torch_dataloader(
data_loader = generators.get_as_torch_dataloader(
model_id=MODEL_ID,
num_samples=NUM_SAMPLES,
output_path=OUTPUT_PATH,
input_path="input/",
gpu_id=0,
image_size=448,
# prefetch_factor=2, # debugging with torch v2.0.0: This will raise an error for torch DataLoader if num_workers == None at the same time.
)

print(f"len(data_loader): {len(data_loader)}")

if len(data_loader) != NUM_SAMPLES:
logging.warning(
f"{MODEL_ID}: The number of samples in the dataloader (={len(data_loader)}) is not equal the number of samples requested (={NUM_SAMPLES})."
)

#### Get the object at index 0 from the dataloader
data_dict = next(iter(data_loader))

print(f"data_dict: {data_dict}")
20 changes: 18 additions & 2 deletions tests/test_model_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,15 @@
3,
),
("00022_WGAN_CARDIAC_AGING", {}, 3),
(
"00023_PIX2PIXHD_BREAST_DCEMRI",
{
"input_path": "input",
"gpu_id": 0,
"image_size": 448,
},
3,
),
]


Expand Down Expand Up @@ -252,7 +261,14 @@ def test_get_dataloader_method(self, model_id):
data_loader = self.generators.get_as_torch_dataloader(
model_id=model_id, num_samples=self.num_samples
)
self.logger.debug(f"len(data_loader): {len(data_loader)}")
self.logger.debug(f"{model_id}: len(data_loader): {len(data_loader)}")

if len(data_loader) != self.num_samples:
logging.warning(
f"{model_id}: The number of samples in the dataloader (={len(data_loader)}) is not equal the number of samples requested (={self.num_samples}). "
f"Hint: Revise if the model's internal generate() function returned tuples as required in get_as_torch_dataloader()."
)

#### Get the object at index 0 from the dataloader
data_dict = next(iter(data_loader))

Expand Down Expand Up @@ -296,7 +312,7 @@ def _check_if_samples_were_generated(
):
# check if the number of generated samples of model_id_1 is as expected.
file_list = glob.glob(self.test_output_path + "/*")
self.logger.debug(f"{len(file_list)} == {self.num_samples} ?")
self.logger.debug(f"{model_id}: {len(file_list)} == {self.num_samples} ?")
if num_samples is None:
num_samples = self.num_samples

Expand Down
Loading