From b3c589b2960158d3c02a1bc694817ddccdfec872 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Wed, 2 Apr 2025 22:16:00 +0000 Subject: [PATCH 1/9] fix Error in MAISI Signed-off-by: Can-Zhao --- generation/maisi/maisi_train_vae_tutorial.ipynb | 2 +- generation/maisi/scripts/augmentation.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/generation/maisi/maisi_train_vae_tutorial.ipynb b/generation/maisi/maisi_train_vae_tutorial.ipynb index 26343a6a9..780e215a5 100644 --- a/generation/maisi/maisi_train_vae_tutorial.ipynb +++ b/generation/maisi/maisi_train_vae_tutorial.ipynb @@ -326,7 +326,7 @@ } ], "source": [ - "config_file = \"./configs/config_maisi.json\"\n", + "config_file = \"./configs/config_maisi3d-rflow.json\"\n", "config_dict = json.load(open(config_file, \"r\"))\n", "for k, v in config_dict.items():\n", " setattr(args, k, v)\n", diff --git a/generation/maisi/scripts/augmentation.py b/generation/maisi/scripts/augmentation.py index 64469403a..c406c48ef 100644 --- a/generation/maisi/scripts/augmentation.py +++ b/generation/maisi/scripts/augmentation.py @@ -60,7 +60,7 @@ def dilate3d(input_tensor, erosion=3): return output.squeeze(0).squeeze(0) -def augmentation_tumor_bone(pt_nda, output_size, random_seed): +def augmentation_tumor_bone(pt_nda, output_size, random_seed = None): volume = pt_nda.squeeze(0) real_l_volume_ = torch.zeros_like(volume) real_l_volume_[volume == 128] = 1 @@ -113,7 +113,7 @@ def augmentation_tumor_bone(pt_nda, output_size, random_seed): return pt_nda -def augmentation_tumor_liver(pt_nda, output_size, random_seed): +def augmentation_tumor_liver(pt_nda, output_size, random_seed = None): volume = pt_nda.squeeze(0) real_l_volume_ = torch.zeros_like(volume) real_l_volume_[volume == 1] = 1 @@ -163,7 +163,7 @@ def augmentation_tumor_liver(pt_nda, output_size, random_seed): return pt_nda -def augmentation_tumor_lung(pt_nda, output_size, random_seed): +def augmentation_tumor_lung(pt_nda, output_size, random_seed = None): volume = pt_nda.squeeze(0) real_l_volume_ = torch.zeros_like(volume) real_l_volume_[volume == 23] = 1 @@ -227,7 +227,7 @@ def augmentation_tumor_lung(pt_nda, output_size, random_seed): return pt_nda -def augmentation_tumor_pancreas(pt_nda, output_size, random_seed): +def augmentation_tumor_pancreas(pt_nda, output_size, random_seed = None): volume = pt_nda.squeeze(0) real_l_volume_ = torch.zeros_like(volume) real_l_volume_[volume == 4] = 1 @@ -277,7 +277,7 @@ def augmentation_tumor_pancreas(pt_nda, output_size, random_seed): return pt_nda -def augmentation_tumor_colon(pt_nda, output_size, random_seed): +def augmentation_tumor_colon(pt_nda, output_size, random_seed = None): volume = pt_nda.squeeze(0) real_l_volume_ = torch.zeros_like(volume) real_l_volume_[volume == 27] = 1 @@ -335,7 +335,7 @@ def augmentation_tumor_colon(pt_nda, output_size, random_seed): return pt_nda -def augmentation_body(pt_nda, random_seed): +def augmentation_body(pt_nda, random_seed = None): volume = pt_nda.squeeze(0) zoom = RandZoom(min_zoom=0.99, max_zoom=1.01, mode="nearest", align_corners=None, prob=1.0) @@ -347,7 +347,7 @@ def augmentation_body(pt_nda, random_seed): return pt_nda -def augmentation(pt_nda, output_size, random_seed): +def augmentation(pt_nda, output_size, random_seed = None): label_list = torch.unique(pt_nda) label_list = list(label_list.cpu().numpy()) From b32390c4d973731078540d13d570f43f69653e1b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 22:18:20 +0000 Subject: [PATCH 2/9] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- generation/maisi/scripts/augmentation.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/generation/maisi/scripts/augmentation.py b/generation/maisi/scripts/augmentation.py index c406c48ef..55f586594 100644 --- a/generation/maisi/scripts/augmentation.py +++ b/generation/maisi/scripts/augmentation.py @@ -60,7 +60,7 @@ def dilate3d(input_tensor, erosion=3): return output.squeeze(0).squeeze(0) -def augmentation_tumor_bone(pt_nda, output_size, random_seed = None): +def augmentation_tumor_bone(pt_nda, output_size, random_seed=None): volume = pt_nda.squeeze(0) real_l_volume_ = torch.zeros_like(volume) real_l_volume_[volume == 128] = 1 @@ -113,7 +113,7 @@ def augmentation_tumor_bone(pt_nda, output_size, random_seed = None): return pt_nda -def augmentation_tumor_liver(pt_nda, output_size, random_seed = None): +def augmentation_tumor_liver(pt_nda, output_size, random_seed=None): volume = pt_nda.squeeze(0) real_l_volume_ = torch.zeros_like(volume) real_l_volume_[volume == 1] = 1 @@ -163,7 +163,7 @@ def augmentation_tumor_liver(pt_nda, output_size, random_seed = None): return pt_nda -def augmentation_tumor_lung(pt_nda, output_size, random_seed = None): +def augmentation_tumor_lung(pt_nda, output_size, random_seed=None): volume = pt_nda.squeeze(0) real_l_volume_ = torch.zeros_like(volume) real_l_volume_[volume == 23] = 1 @@ -227,7 +227,7 @@ def augmentation_tumor_lung(pt_nda, output_size, random_seed = None): return pt_nda -def augmentation_tumor_pancreas(pt_nda, output_size, random_seed = None): +def augmentation_tumor_pancreas(pt_nda, output_size, random_seed=None): volume = pt_nda.squeeze(0) real_l_volume_ = torch.zeros_like(volume) real_l_volume_[volume == 4] = 1 @@ -277,7 +277,7 @@ def augmentation_tumor_pancreas(pt_nda, output_size, random_seed = None): return pt_nda -def augmentation_tumor_colon(pt_nda, output_size, random_seed = None): +def augmentation_tumor_colon(pt_nda, output_size, random_seed=None): volume = pt_nda.squeeze(0) real_l_volume_ = torch.zeros_like(volume) real_l_volume_[volume == 27] = 1 @@ -335,7 +335,7 @@ def augmentation_tumor_colon(pt_nda, output_size, random_seed = None): return pt_nda -def augmentation_body(pt_nda, random_seed = None): +def augmentation_body(pt_nda, random_seed=None): volume = pt_nda.squeeze(0) zoom = RandZoom(min_zoom=0.99, max_zoom=1.01, mode="nearest", align_corners=None, prob=1.0) @@ -347,7 +347,7 @@ def augmentation_body(pt_nda, random_seed = None): return pt_nda -def augmentation(pt_nda, output_size, random_seed = None): +def augmentation(pt_nda, output_size, random_seed=None): label_list = torch.unique(pt_nda) label_list = list(label_list.cpu().numpy()) From 3e815e987a54eadfdfde5e2ef1effeed3540b884 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 15 Dec 2025 14:32:01 -0800 Subject: [PATCH 3/9] update new url Signed-off-by: Can-Zhao --- generation/maisi/README.md | 3 + .../maisi/maisi_inference_tutorial.ipynb | 70 +------ .../maisi/scripts/download_model_data.py | 186 ++++++++++++++++++ generation/maisi/scripts/inference.py | 74 +------ 4 files changed, 195 insertions(+), 138 deletions(-) create mode 100644 generation/maisi/scripts/download_model_data.py diff --git a/generation/maisi/README.md b/generation/maisi/README.md index c9f4c117e..ea9ea072d 100644 --- a/generation/maisi/README.md +++ b/generation/maisi/README.md @@ -1,3 +1,6 @@ +# 🚨🚨🚨 THIS FOLDER IS DEPRECATED 🚨🚨🚨 +# 👉 Please switch to: [https://github.com/NVIDIA-Medtech/NV-Generate-CTMR/tree/main](https://github.com/NVIDIA-Medtech/NV-Generate-CTMR/tree/main) + # Medical AI for Synthetic Imaging (MAISI) This example demonstrates the applications of training and validating NVIDIA MAISI, a 3D Latent Diffusion Model (LDM) capable of generating large CT images accompanied by corresponding segmentation masks. It supports variable volume size and voxel spacing and allows for the precise control of organ/tumor size. diff --git a/generation/maisi/maisi_inference_tutorial.ipynb b/generation/maisi/maisi_inference_tutorial.ipynb index 69ce91b9b..a2ff3e612 100644 --- a/generation/maisi/maisi_inference_tutorial.ipynb +++ b/generation/maisi/maisi_inference_tutorial.ipynb @@ -53,7 +53,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "67e2019e-1556-41a6-95e8-5d1a65f8b3a1", "metadata": { "scrolled": true @@ -112,6 +112,7 @@ "from scripts.utils import define_instance\n", "from scripts.utils_plot import find_label_center_loc, get_xyz_plot, show_image\n", "from scripts.diff_model_setting import setup_logging\n", + "from scripts.download_model_data import download_model_data\n", "\n", "print_config()\n", "\n", @@ -170,7 +171,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "e3c12dcc", "metadata": {}, "outputs": [ @@ -204,70 +205,7 @@ " os.makedirs(directory, exist_ok=True)\n", "root_dir = tempfile.mkdtemp() if directory is None else directory\n", "\n", - "# TODO: remove the `files` after the files are uploaded to the NGC\n", - "files = [\n", - " {\n", - " \"path\": \"models/autoencoder_epoch273.pt\",\n", - " \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials\"\n", - " \"/model_zoo/model_maisi_autoencoder_epoch273_alternative.pt\",\n", - " },\n", - " {\n", - " \"path\": \"models/mask_generation_autoencoder.pt\",\n", - " \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai\" \"/tutorials/mask_generation_autoencoder.pt\",\n", - " },\n", - " {\n", - " \"path\": \"models/mask_generation_diffusion_unet.pt\",\n", - " \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai\"\n", - " \"/tutorials/model_zoo/model_maisi_mask_generation_diffusion_unet_v2.pt\",\n", - " },\n", - " {\n", - " \"path\": \"configs/all_anatomy_size_condtions.json\",\n", - " \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/all_anatomy_size_condtions.json\",\n", - " },\n", - " {\n", - " \"path\": \"datasets/all_masks_flexible_size_and_spacing_4000.zip\",\n", - " \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai\"\n", - " \"/tutorials/all_masks_flexible_size_and_spacing_4000.zip\",\n", - " },\n", - "]\n", - "\n", - "if maisi_version == \"maisi3d-ddpm\":\n", - " files += [\n", - " {\n", - " \"path\": \"models/diff_unet_3d_ddpm.pt\",\n", - " \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo\"\n", - " \"/model_maisi_input_unet3d_data-all_steps1000size512ddpm_random_current_inputx_v1_alternative.pt\",\n", - " },\n", - " {\n", - " \"path\": \"models/controlnet_3d_ddpm.pt\",\n", - " \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo\"\n", - " \"/model_maisi_controlnet-20datasets-e20wl100fold0bc_noi_dia_fsize_current_alternative.pt\",\n", - " },\n", - " {\n", - " \"path\": \"configs/candidate_masks_flexible_size_and_spacing_3000.json\",\n", - " \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai\"\n", - " \"/tutorials/candidate_masks_flexible_size_and_spacing_3000.json\",\n", - " },\n", - " ]\n", - "elif maisi_version == \"maisi3d-rflow\":\n", - " files += [\n", - " {\n", - " \"path\": \"models/diff_unet_3d_rflow.pt\",\n", - " \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/\"\n", - " \"diff_unet_ckpt_rflow_epoch19350.pt\",\n", - " },\n", - " {\n", - " \"path\": \"models/controlnet_3d_rflow.pt\",\n", - " \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/controlnet_rflow_epoch60.pt\",\n", - " },\n", - " {\n", - " \"path\": \"configs/candidate_masks_flexible_size_and_spacing_4000.json\",\n", - " \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai\"\n", - " \"/tutorials/candidate_masks_flexible_size_and_spacing_4000.json\",\n", - " },\n", - " ]\n", - "else:\n", - " raise ValueError(f\"maisi_version has to be chosen from ['maisi3d-ddpm', 'maisi3d-rflow'], yet got {maisi_version}.\")\n", + "download_model_data(maisi_version,root_dir)\n", "\n", "for file in files:\n", " file[\"path\"] = file[\"path\"] if \"datasets/\" not in file[\"path\"] else os.path.join(root_dir, file[\"path\"])\n", diff --git a/generation/maisi/scripts/download_model_data.py b/generation/maisi/scripts/download_model_data.py new file mode 100644 index 000000000..2218d8209 --- /dev/null +++ b/generation/maisi/scripts/download_model_data.py @@ -0,0 +1,186 @@ +import os, subprocess, shutil +import argparse +from tqdm.auto import tqdm +from monai.apps import download_url +from pathlib import Path +from huggingface_hub import snapshot_download +from typing import List, Dict, Optional + +def fetch_to_hf_path_cmd( + items: List[Dict[str, str]], + root_dir: str = "./", # staging dir for CLI output + revision: str = "main", + overwrite: bool = False, + token: Optional[str] = None, # or rely on env HUGGINGFACE_HUB_TOKEN +) -> list[str]: + """ + items: list of {"repo_id": "...", "filename": "path/in/repo.ext", "path": "local/target.ext"} + Returns list of saved local paths (in the same order as items). + """ + saved = [] + root = Path(root_dir) + root.mkdir(parents=True, exist_ok=True) + + # Env for subprocess; keep Rust fast-path off to avoid notebook progress quirks + env = os.environ.copy() + if token: + env["HUGGINGFACE_HUB_TOKEN"] = token + env.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "0") # safer in Jupyter + env.setdefault("HF_HUB_DISABLE_PROGRESS_BARS", "0") # show CLI progress in terminal + + for it in items: + repo_id = it["repo_id"] + repo_file = it["filename"] + dst = Path(it["path"]) + dst.parent.mkdir(parents=True, exist_ok=True) + + if dst.exists() and not overwrite: + saved.append(str(dst)) + continue + + # Build command (no shell=True; no quoting issues) + cmd = [ + "huggingface-cli", "download", + repo_id, + "--include", repo_file, + "--revision", revision, + "--local-dir", str(root), + ] + # Run + subprocess.run(cmd, check=True, env=env) + + # Source path where CLI placed the file + src = root / repo_file + if not src.exists(): + raise FileNotFoundError( + f"Expected downloaded file missing: {src}\n" + f"Tip: authenticate (`huggingface-cli login` or pass token=...)," + f" and avoid shared-IP 429s." + ) + + # Move to desired target + if dst.exists() and overwrite: + dst.unlink() + if src.resolve() != dst.resolve(): + dst.parent.mkdir(parents=True, exist_ok=True) + shutil.move(str(src), str(dst)) + saved.append(str(dst)) + + return saved + + + +def download_model_data(generate_version,root_dir, model_only=False): + # TODO: remove the `files` after the files are uploaded to the NGC + if generate_version == "ddpm-ct" or generate_version == "rflow-ct": + files = [ + { + "path": "models/autoencoder_v1.pt", + "repo_id": "nvidia/NV-Generate-CT", + "filename":"models/autoencoder_v1.pt", + }, + { + "path": "models/mask_generation_autoencoder.pt", + "repo_id": "nvidia/NV-Generate-CT", + "filename": "models/mask_generation_autoencoder.pt", + }, + { + "path": "models/mask_generation_diffusion_unet.pt", + "repo_id": "nvidia/NV-Generate-CT", + "filename": "models/mask_generation_diffusion_unet.pt", + }] + if not model_only: + files += [ + { + "path": "datasets/all_anatomy_size_conditions.json", + "repo_id": "nvidia/NV-Generate-CT", + "filename": "datasets/all_anatomy_size_conditions.json", + }, + { + "path": "datasets/all_masks_flexible_size_and_spacing_4000.zip", + "repo_id": "nvidia/NV-Generate-CT", + "filename": "datasets/all_masks_flexible_size_and_spacing_4000.zip", + }, + ] + elif generate_version == "rflow-mr": + files = [ + { + "path": "models/autoencoder_v2.pt", + "repo_id": "nvidia/NV-Generate-MR", + "filename": "models/autoencoder_v2.pt", + }, + { + "path": "models/diff_unet_3d_rflow-mr.pt", + "repo_id": "nvidia/NV-Generate-MR", + "filename": "models/diff_unet_3d_rflow-mr.pt", + } + ] + else: + raise ValueError(f"generate_version has to be chosen from ['ddpm-ct', 'rflow-ct', 'rflow-mr'], yet got {generate_version}.") + if generate_version == "ddpm-ct": + files += [ + { + "path": "models/diff_unet_3d_ddpm-ct.pt", + "repo_id": "nvidia/NV-Generate-CT", + "filename": "models/diff_unet_3d_ddpm-ct.pt", + }, + { + "path": "models/controlnet_3d_ddpm-ct.pt", + "repo_id": "nvidia/NV-Generate-CT", + "filename": "models/controlnet_3d_ddpm-ct.pt", + }] + if not model_only: + files += [ + { + "path": "datasets/candidate_masks_flexible_size_and_spacing_3000.json", + "repo_id": "nvidia/NV-Generate-CT", + "filename": "datasets/candidate_masks_flexible_size_and_spacing_3000.json", + }, + ] + elif generate_version == "rflow-ct": + files += [ + { + "path": "models/diff_unet_3d_rflow-ct.pt", + "repo_id": "nvidia/NV-Generate-CT", + "filename": "models/diff_unet_3d_rflow-ct.pt", + }, + { + "path": "models/controlnet_3d_rflow-ct.pt", + "repo_id": "nvidia/NV-Generate-CT", + "filename": "models/controlnet_3d_rflow-ct.pt", + }] + if not model_only: + files += [ + { + "path": "datasets/candidate_masks_flexible_size_and_spacing_4000.json", + "repo_id": "nvidia/NV-Generate-CT", + "filename": "datasets/candidate_masks_flexible_size_and_spacing_4000.json", + }, + ] + + for file in files: + file["path"] = file["path"] if "datasets/" not in file["path"] else os.path.join(root_dir, file["path"]) + if "repo_id" in file.keys(): + path = fetch_to_hf_path_cmd([file],root_dir=root_dir, revision="main") + print("saved to:", path) + else: + download_url(url=file["url"], filepath=file["path"]) + return + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Model downloading") + parser.add_argument( + "--version", + type=str, + default="rflow-ct", + ) + parser.add_argument( + "--root_dir", + type=str, + default="./", + ) + parser.add_argument("--model_only", dest="model_only", action="store_true", help="Download model only, not any dataset") + + args = parser.parse_args() + download_model_data(args.version, args.root_dir, args.model_only) diff --git a/generation/maisi/scripts/inference.py b/generation/maisi/scripts/inference.py index 3f81f9c49..162a8c1b7 100644 --- a/generation/maisi/scripts/inference.py +++ b/generation/maisi/scripts/inference.py @@ -27,6 +27,7 @@ from scripts.sample import LDMSampler, check_input from scripts.utils import define_instance from scripts.utils_plot import find_label_center_loc, get_xyz_plot, show_image +from scripts.download_model_data import download_model_data def main(): @@ -88,78 +89,7 @@ def main(): root_dir = tempfile.mkdtemp() if directory is None else directory print(root_dir) - # TODO: remove the `files` after the files are uploaded to the NGC - files = [ - { - "path": "models/autoencoder_epoch273.pt", - "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials" - "/model_zoo/model_maisi_autoencoder_epoch273_alternative.pt", - }, - { - "path": "models/mask_generation_autoencoder.pt", - "url": "https://developer.download.nvidia.com/assets/Clara/monai" - "/tutorials/mask_generation_autoencoder.pt", - }, - { - "path": "models/mask_generation_diffusion_unet.pt", - "url": "https://developer.download.nvidia.com/assets/Clara/monai" - "/tutorials/model_zoo/model_maisi_mask_generation_diffusion_unet_v2.pt", - }, - { - "path": "configs/all_anatomy_size_condtions.json", - "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/all_anatomy_size_condtions.json", - }, - { - "path": "datasets/all_masks_flexible_size_and_spacing_4000.zip", - "url": "https://developer.download.nvidia.com/assets/Clara/monai" - "/tutorials/all_masks_flexible_size_and_spacing_4000.zip", - }, - ] - - if maisi_version == "maisi3d-ddpm": - files += [ - { - "path": "models/diff_unet_3d_ddpm.pt", - "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo" - "/model_maisi_input_unet3d_data-all_steps1000size512ddpm_random_current_inputx_v1_alternative.pt", - }, - { - "path": "models/controlnet_3d_ddpm.pt", - "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo" - "/model_maisi_controlnet-20datasets-e20wl100fold0bc_noi_dia_fsize_current_alternative.pt", - }, - { - "path": "configs/candidate_masks_flexible_size_and_spacing_3000.json", - "url": "https://developer.download.nvidia.com/assets/Clara/monai" - "/tutorials/candidate_masks_flexible_size_and_spacing_3000.json", - }, - ] - elif maisi_version == "maisi3d-rflow": - files += [ - { - "path": "models/diff_unet_3d_rflow.pt", - "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/" - "diff_unet_ckpt_rflow_epoch19350.pt", - }, - { - "path": "models/controlnet_3d_rflow.pt", - "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/" - "controlnet_rflow_epoch60.pt", - }, - { - "path": "configs/candidate_masks_flexible_size_and_spacing_4000.json", - "url": "https://developer.download.nvidia.com/assets/Clara/monai" - "/tutorials/candidate_masks_flexible_size_and_spacing_4000.json", - }, - ] - else: - raise ValueError( - f"maisi_version has to be chosen from ['maisi3d-ddpm', 'maisi3d-rflow'], yet got {maisi_version}." - ) - - for file in files: - file["path"] = file["path"] if "datasets/" not in file["path"] else os.path.join(root_dir, file["path"]) - download_url(url=file["url"], filepath=file["path"]) + download_model_data(maisi_version,root_dir) # ## Read in environment setting, including data directory, model directory, and output directory # The information for data directory, model directory, and output directory are saved in ./configs/environment.json From 9b0a18f2927e6fdaabd62fe430a2e8f06ace1756 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Dec 2025 22:35:22 +0000 Subject: [PATCH 4/9] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- generation/maisi/README.md | 2 +- .../maisi/maisi_inference_tutorial.ipynb | 2 +- .../maisi/scripts/download_model_data.py | 51 +++++++++++-------- generation/maisi/scripts/inference.py | 2 +- 4 files changed, 34 insertions(+), 23 deletions(-) diff --git a/generation/maisi/README.md b/generation/maisi/README.md index ea9ea072d..7a24790b3 100644 --- a/generation/maisi/README.md +++ b/generation/maisi/README.md @@ -1,4 +1,4 @@ -# 🚨🚨🚨 THIS FOLDER IS DEPRECATED 🚨🚨🚨 +# 🚨🚨🚨 THIS FOLDER IS DEPRECATED 🚨🚨🚨 # 👉 Please switch to: [https://github.com/NVIDIA-Medtech/NV-Generate-CTMR/tree/main](https://github.com/NVIDIA-Medtech/NV-Generate-CTMR/tree/main) # Medical AI for Synthetic Imaging (MAISI) diff --git a/generation/maisi/maisi_inference_tutorial.ipynb b/generation/maisi/maisi_inference_tutorial.ipynb index a2ff3e612..3a12879a7 100644 --- a/generation/maisi/maisi_inference_tutorial.ipynb +++ b/generation/maisi/maisi_inference_tutorial.ipynb @@ -205,7 +205,7 @@ " os.makedirs(directory, exist_ok=True)\n", "root_dir = tempfile.mkdtemp() if directory is None else directory\n", "\n", - "download_model_data(maisi_version,root_dir)\n", + "download_model_data(maisi_version, root_dir)\n", "\n", "for file in files:\n", " file[\"path\"] = file[\"path\"] if \"datasets/\" not in file[\"path\"] else os.path.join(root_dir, file[\"path\"])\n", diff --git a/generation/maisi/scripts/download_model_data.py b/generation/maisi/scripts/download_model_data.py index 2218d8209..3d96c5381 100644 --- a/generation/maisi/scripts/download_model_data.py +++ b/generation/maisi/scripts/download_model_data.py @@ -6,12 +6,13 @@ from huggingface_hub import snapshot_download from typing import List, Dict, Optional + def fetch_to_hf_path_cmd( items: List[Dict[str, str]], - root_dir: str = "./", # staging dir for CLI output + root_dir: str = "./", # staging dir for CLI output revision: str = "main", overwrite: bool = False, - token: Optional[str] = None, # or rely on env HUGGINGFACE_HUB_TOKEN + token: Optional[str] = None, # or rely on env HUGGINGFACE_HUB_TOKEN ) -> list[str]: """ items: list of {"repo_id": "...", "filename": "path/in/repo.ext", "path": "local/target.ext"} @@ -25,11 +26,11 @@ def fetch_to_hf_path_cmd( env = os.environ.copy() if token: env["HUGGINGFACE_HUB_TOKEN"] = token - env.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "0") # safer in Jupyter - env.setdefault("HF_HUB_DISABLE_PROGRESS_BARS", "0") # show CLI progress in terminal + env.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "0") # safer in Jupyter + env.setdefault("HF_HUB_DISABLE_PROGRESS_BARS", "0") # show CLI progress in terminal for it in items: - repo_id = it["repo_id"] + repo_id = it["repo_id"] repo_file = it["filename"] dst = Path(it["path"]) dst.parent.mkdir(parents=True, exist_ok=True) @@ -40,11 +41,15 @@ def fetch_to_hf_path_cmd( # Build command (no shell=True; no quoting issues) cmd = [ - "huggingface-cli", "download", + "huggingface-cli", + "download", repo_id, - "--include", repo_file, - "--revision", revision, - "--local-dir", str(root), + "--include", + repo_file, + "--revision", + revision, + "--local-dir", + str(root), ] # Run subprocess.run(cmd, check=True, env=env) @@ -69,15 +74,14 @@ def fetch_to_hf_path_cmd( return saved - -def download_model_data(generate_version,root_dir, model_only=False): +def download_model_data(generate_version, root_dir, model_only=False): # TODO: remove the `files` after the files are uploaded to the NGC if generate_version == "ddpm-ct" or generate_version == "rflow-ct": files = [ { "path": "models/autoencoder_v1.pt", "repo_id": "nvidia/NV-Generate-CT", - "filename":"models/autoencoder_v1.pt", + "filename": "models/autoencoder_v1.pt", }, { "path": "models/mask_generation_autoencoder.pt", @@ -88,7 +92,8 @@ def download_model_data(generate_version,root_dir, model_only=False): "path": "models/mask_generation_diffusion_unet.pt", "repo_id": "nvidia/NV-Generate-CT", "filename": "models/mask_generation_diffusion_unet.pt", - }] + }, + ] if not model_only: files += [ { @@ -113,10 +118,12 @@ def download_model_data(generate_version,root_dir, model_only=False): "path": "models/diff_unet_3d_rflow-mr.pt", "repo_id": "nvidia/NV-Generate-MR", "filename": "models/diff_unet_3d_rflow-mr.pt", - } + }, ] else: - raise ValueError(f"generate_version has to be chosen from ['ddpm-ct', 'rflow-ct', 'rflow-mr'], yet got {generate_version}.") + raise ValueError( + f"generate_version has to be chosen from ['ddpm-ct', 'rflow-ct', 'rflow-mr'], yet got {generate_version}." + ) if generate_version == "ddpm-ct": files += [ { @@ -128,7 +135,8 @@ def download_model_data(generate_version,root_dir, model_only=False): "path": "models/controlnet_3d_ddpm-ct.pt", "repo_id": "nvidia/NV-Generate-CT", "filename": "models/controlnet_3d_ddpm-ct.pt", - }] + }, + ] if not model_only: files += [ { @@ -148,7 +156,8 @@ def download_model_data(generate_version,root_dir, model_only=False): "path": "models/controlnet_3d_rflow-ct.pt", "repo_id": "nvidia/NV-Generate-CT", "filename": "models/controlnet_3d_rflow-ct.pt", - }] + }, + ] if not model_only: files += [ { @@ -157,11 +166,11 @@ def download_model_data(generate_version,root_dir, model_only=False): "filename": "datasets/candidate_masks_flexible_size_and_spacing_4000.json", }, ] - + for file in files: file["path"] = file["path"] if "datasets/" not in file["path"] else os.path.join(root_dir, file["path"]) if "repo_id" in file.keys(): - path = fetch_to_hf_path_cmd([file],root_dir=root_dir, revision="main") + path = fetch_to_hf_path_cmd([file], root_dir=root_dir, revision="main") print("saved to:", path) else: download_url(url=file["url"], filepath=file["path"]) @@ -180,7 +189,9 @@ def download_model_data(generate_version,root_dir, model_only=False): type=str, default="./", ) - parser.add_argument("--model_only", dest="model_only", action="store_true", help="Download model only, not any dataset") + parser.add_argument( + "--model_only", dest="model_only", action="store_true", help="Download model only, not any dataset" + ) args = parser.parse_args() download_model_data(args.version, args.root_dir, args.model_only) diff --git a/generation/maisi/scripts/inference.py b/generation/maisi/scripts/inference.py index 162a8c1b7..38364f378 100644 --- a/generation/maisi/scripts/inference.py +++ b/generation/maisi/scripts/inference.py @@ -89,7 +89,7 @@ def main(): root_dir = tempfile.mkdtemp() if directory is None else directory print(root_dir) - download_model_data(maisi_version,root_dir) + download_model_data(maisi_version, root_dir) # ## Read in environment setting, including data directory, model directory, and output directory # The information for data directory, model directory, and output directory are saved in ./configs/environment.json From 7883209f5e04bfcb257e2e5ae6860bf5eeb50484 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 15 Dec 2025 14:38:01 -0800 Subject: [PATCH 5/9] copyright Signed-off-by: Can-Zhao --- generation/maisi/scripts/download_model_data.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/generation/maisi/scripts/download_model_data.py b/generation/maisi/scripts/download_model_data.py index 2218d8209..2d0f29eab 100644 --- a/generation/maisi/scripts/download_model_data.py +++ b/generation/maisi/scripts/download_model_data.py @@ -1,3 +1,14 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import os, subprocess, shutil import argparse from tqdm.auto import tqdm From ee66f22d6a7d599c6237ea9c218574d875b78324 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 15 Dec 2025 19:20:18 -0800 Subject: [PATCH 6/9] update inference code Signed-off-by: Can-Zhao --- generation/maisi/maisi_inference_tutorial.ipynb | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/generation/maisi/maisi_inference_tutorial.ipynb b/generation/maisi/maisi_inference_tutorial.ipynb index 3a12879a7..6ee9852a7 100644 --- a/generation/maisi/maisi_inference_tutorial.ipynb +++ b/generation/maisi/maisi_inference_tutorial.ipynb @@ -205,11 +205,7 @@ " os.makedirs(directory, exist_ok=True)\n", "root_dir = tempfile.mkdtemp() if directory is None else directory\n", "\n", - "download_model_data(maisi_version, root_dir)\n", - "\n", - "for file in files:\n", - " file[\"path\"] = file[\"path\"] if \"datasets/\" not in file[\"path\"] else os.path.join(root_dir, file[\"path\"])\n", - " download_url(url=file[\"url\"], filepath=file[\"path\"])" + "download_model_data(maisi_version, root_dir)" ] }, { From d26f3ace0f3c5d5e74a033b5523ed1d701e1c79b Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 15 Dec 2025 19:26:56 -0800 Subject: [PATCH 7/9] update code Signed-off-by: Can-Zhao --- generation/maisi/maisi_inference_tutorial.ipynb | 1 - generation/maisi/scripts/inference.py | 3 --- 2 files changed, 4 deletions(-) diff --git a/generation/maisi/maisi_inference_tutorial.ipynb b/generation/maisi/maisi_inference_tutorial.ipynb index 6ee9852a7..19283f2df 100644 --- a/generation/maisi/maisi_inference_tutorial.ipynb +++ b/generation/maisi/maisi_inference_tutorial.ipynb @@ -104,7 +104,6 @@ "\n", "import monai\n", "import torch\n", - "from monai.apps import download_url\n", "from monai.config import print_config\n", "from monai.transforms import LoadImage, Orientation\n", "from monai.utils import set_determinism\n", diff --git a/generation/maisi/scripts/inference.py b/generation/maisi/scripts/inference.py index 38364f378..1eeef0c39 100644 --- a/generation/maisi/scripts/inference.py +++ b/generation/maisi/scripts/inference.py @@ -19,9 +19,6 @@ import monai import torch -from monai.apps import download_url -from monai.config import print_config -from monai.transforms import LoadImage, Orientation from monai.utils import set_determinism from scripts.sample import LDMSampler, check_input From 7307f3cf79f72650a60c47e2a626e0c78eb28d2c Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 15 Dec 2025 19:28:43 -0800 Subject: [PATCH 8/9] readme Signed-off-by: Can-Zhao --- generation/maisi/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generation/maisi/README.md b/generation/maisi/README.md index 7a24790b3..e43597eb3 100644 --- a/generation/maisi/README.md +++ b/generation/maisi/README.md @@ -1,4 +1,4 @@ -# 🚨🚨🚨 THIS FOLDER IS DEPRECATED 🚨🚨🚨 +# 🚨🚨🚨 THIS FOLDER IS DEPRECATED (From Oct 2025) 🚨🚨🚨 # 👉 Please switch to: [https://github.com/NVIDIA-Medtech/NV-Generate-CTMR/tree/main](https://github.com/NVIDIA-Medtech/NV-Generate-CTMR/tree/main) # Medical AI for Synthetic Imaging (MAISI) From ae57ac3aa877cecbde3e97cbd293d552478411a0 Mon Sep 17 00:00:00 2001 From: Can-Zhao Date: Mon, 15 Dec 2025 19:39:27 -0800 Subject: [PATCH 9/9] readme Signed-off-by: Can-Zhao --- generation/maisi/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generation/maisi/README.md b/generation/maisi/README.md index e43597eb3..659b3c23a 100644 --- a/generation/maisi/README.md +++ b/generation/maisi/README.md @@ -1,4 +1,4 @@ -# 🚨🚨🚨 THIS FOLDER IS DEPRECATED (From Oct 2025) 🚨🚨🚨 +# 🚨🚨🚨 THIS FOLDER IS DEPRECATED (as of Oct 2025) 🚨🚨🚨 # 👉 Please switch to: [https://github.com/NVIDIA-Medtech/NV-Generate-CTMR/tree/main](https://github.com/NVIDIA-Medtech/NV-Generate-CTMR/tree/main) # Medical AI for Synthetic Imaging (MAISI)