Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .devcontainer/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM mcr.microsoft.com/devcontainers/python:3.8-bullseye
FROM mcr.microsoft.com/devcontainers/python:3.9-bullseye

# Copy environment.yml (if found) to a temp location so we update the environment. Also
# copy "noop.txt" so the COPY instruction does not fail if no environment.yml exists.
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/black.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.8]
python-version: [3.9]
steps:
- uses: actions/checkout@v2
- uses: psf/black@stable
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.8'
python-version: 3.9
# This second step is unnecessary but highly recommended because
# It will cache database and saves time re-downloading it if database isn't stale.
- name: Cache pip
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/mlcube-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,11 @@ jobs:
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Set up Python 3.8
- name: Set up Python 3.9
if: steps.changed-files-specific.outputs.only_modified == 'false' # Run on any non-docs change
uses: actions/setup-python@v4
with:
python-version: 3.8
python-version: 3.9
- name: Install dependencies and package
if: steps.changed-files-specific.outputs.only_modified == 'false' # Run on any non-docs change
run: |
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/openfl-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,11 @@ jobs:
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Set up Python 3.8
- name: Set up Python 3.9
if: steps.changed-files-specific.outputs.only_modified == 'false' # Run on any non-docs change
uses: actions/setup-python@v4
with:
python-version: 3.8
python-version: 3.9
- name: Install dependencies and package
if: steps.changed-files-specific.outputs.only_modified == 'false' # Run on any non-docs change
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/publish-nightly.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.8'
python-version: 3.9
- name: Check dev version
run: | # Get current canonical version, append current date as an identifier
currentVer=$(python -c "from GANDLF import version; print(version)")
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/python-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,11 @@ jobs:
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Set up Python 3.8
- name: Set up Python 3.9
if: steps.changed-files-specific.outputs.only_modified == 'false' # Run on any non-docs change
uses: actions/setup-python@v4
with:
python-version: 3.8
python-version: 3.9
- name: Install dependencies and package
if: steps.changed-files-specific.outputs.only_modified == 'false' # Run on any non-docs change
run: |
Expand Down
20 changes: 11 additions & 9 deletions Dockerfile-CPU
Original file line number Diff line number Diff line change
@@ -1,27 +1,29 @@
FROM ubuntu:18.04
FROM ubuntu:20.04
LABEL github="https://github.com/mlcommons/GaNDLF"
LABEL docs="https://mlcommons.github.io/GaNDLF/"
LABEL version=1.0

# Install fresh Python and dependencies for build-from-source
RUN apt-get update && apt-get install -y python3.8 python3-pip libjpeg8-dev zlib1g-dev python3-dev libpython3.8-dev libffi-dev libgl1
RUN python3.8 -m pip install --upgrade pip
RUN apt-get update && apt-get install -y software-properties-common
RUN add-apt-repository ppa:deadsnakes/ppa
RUN apt-get update && apt-get install -y python3.9 python3-pip libjpeg8-dev zlib1g-dev python3-dev libpython3.8-dev libffi-dev libgl1
RUN python3.9 -m pip install --upgrade pip
# EXPLICITLY install cpu versions of torch/torchvision (not all versions have +cpu modes on PyPI...)
RUN python3.8 -m pip install torch==1.13.1+cpu torchvision==0.14.1+cpu torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cpu
RUN python3.8 -m pip install openvino-dev==2023.0.1 opencv-python-headless mlcube_docker
RUN python3.9 -m pip install torch==1.13.1+cpu torchvision==0.14.1+cpu torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cpu
RUN python3.9 -m pip install openvino-dev==2023.0.1 opencv-python-headless mlcube_docker

# Do some dependency installation separately here to make layer caching more efficient
COPY ./setup.py ./setup.py
RUN python3.8 -c "from setup import requirements; file = open('requirements.txt', 'w'); file.writelines([req + '\n' for req in requirements]); file.close()" \
&& python3.8 -m pip install -r ./requirements.txt
RUN python3.9 -c "from setup import requirements; file = open('requirements.txt', 'w'); file.writelines([req + '\n' for req in requirements]); file.close()" \
&& python3.9 -m pip install -r ./requirements.txt

COPY . /GaNDLF
WORKDIR /GaNDLF
RUN python3.8 -m pip install -e .
RUN python3.9 -m pip install -e .
# Entrypoint forces all commands given via "docker run" to go through python, CMD forces the default entrypoint script argument to be gandlf_run
# If a user calls "docker run gandlf:[tag] gandlf_anonymize", it will resolve to running "python gandlf_anonymize" instead.
# CMD is inherently overridden by args to "docker run", entrypoint is constant.
ENTRYPOINT python3.8
ENTRYPOINT python3.9
CMD gandlf_run

# The below force the container commands to run as a nonroot user with UID > 10000.
Expand Down
23 changes: 13 additions & 10 deletions Dockerfile-CUDA11.6
Original file line number Diff line number Diff line change
@@ -1,30 +1,33 @@
FROM pytorch/pytorch:1.13.1-cuda11.6-cudnn8-runtime
FROM nvidia/cuda:11.6.2-devel-ubuntu20.04
LABEL github="https://github.com/mlcommons/GaNDLF"
LABEL docs="https://mlcommons.github.io/GaNDLF/"
LABEL version=1.0

# Install instructions for NVIDIA Container Toolkit allowing you to use the host's GPU: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html
# Note that to do this on a Windows host you need experimental feature "CUDA on WSL" -- not yet stable.
ENV DEBIAN_FRONTEND=noninteractive

# Explicitly install python3.8 (this uses 11.1 for now, as PyTorch LTS 1.8.2 is built against it)
RUN apt-get update && apt-get install -y python3.8 python3-pip libjpeg8-dev zlib1g-dev python3-dev libpython3.8-dev libffi-dev libgl1
RUN python3.8 -m pip install --upgrade pip
RUN python3.8 -m pip install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116
RUN python3.8 -m pip install openvino-dev==2023.0.1 opencv-python-headless mlcube_docker
# Explicitly install python3.9 (this uses 11.1 for now, as PyTorch LTS 1.8.2 is built against it)
RUN apt-get update && apt-get install -y software-properties-common
RUN add-apt-repository ppa:deadsnakes/ppa
RUN apt-get update && apt-get install -y python3.9 python3-pip libjpeg8-dev zlib1g-dev python3-dev libpython3.8-dev libffi-dev libgl1
RUN python3.9 -m pip install --upgrade pip
RUN python3.9 -m pip install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116
RUN python3.9 -m pip install openvino-dev==2023.0.1 opencv-python-headless mlcube_docker

# Do some dependency installation separately here to make layer caching more efficient
COPY ./setup.py ./setup.py
RUN python3.8 -c "from setup import requirements; file = open('requirements.txt', 'w'); file.writelines([req + '\n' for req in requirements]); file.close()" \
&& python3.8 -m pip install -r ./requirements.txt
RUN python3.9 -c "from setup import requirements; file = open('requirements.txt', 'w'); file.writelines([req + '\n' for req in requirements]); file.close()" \
&& python3.9 -m pip install -r ./requirements.txt

COPY . /GaNDLF
WORKDIR /GaNDLF
RUN python3.8 -m pip install -e .
RUN python3.9 -m pip install -e .

# Entrypoint forces all commands given via "docker run" to go through python, CMD forces the default entrypoint script argument to be gandlf_run
# If a user calls "docker run gandlf:[tag] gandlf_anonymize", it will resolve to running "python gandlf_anonymize" instead.
# CMD is inherently overridden by args to "docker run", entrypoint is constant.
ENTRYPOINT python3.8
ENTRYPOINT python3.9
CMD gandlf_run

# The below force the container commands to run as a nonroot user with UID > 10000.
Expand Down
17 changes: 10 additions & 7 deletions Dockerfile-ROCm
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,27 @@ LABEL version=1.0
# Quick start instructions on using Docker with ROCm: https://github.com/RadeonOpenCompute/ROCm-docker/blob/master/quick-start.md

# The base image contains ROCm, python 3.8 and pytorch already, no need to install those
RUN python3 -m pip install --upgrade pip
RUN python3.8 -m pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/rocm5.2
RUN python3 -m pip install --upgrade pip && python3 -m pip install openvino-dev==2023.0.1 opencv-python-headless mlcube_docker
RUN apt-get update && apt-get install -y software-properties-common
RUN add-apt-repository ppa:deadsnakes/ppa
RUN apt-get update && apt-get install -y python3.9 python3-pip libjpeg8-dev zlib1g-dev python3-dev libpython3.8-dev libffi-dev libgl1
RUN python3.9 -m pip install --upgrade pip
RUN python3.9 -m pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/rocm5.2
RUN python3.9 -m pip install --upgrade pip && python3.9 -m pip install openvino-dev==2023.0.1 opencv-python-headless mlcube_docker
RUN apt-get update && apt-get install -y libgl1

# Do some dependency installation separately here to make layer caching more efficient
COPY ./setup.py ./setup.py
RUN python3.8 -c "from setup import requirements; file = open('requirements.txt', 'w'); file.writelines([req + '\n' for req in requirements]); file.close()" \
&& python3.8 -m pip install -r ./requirements.txt
RUN python3.9 -c "from setup import requirements; file = open('requirements.txt', 'w'); file.writelines([req + '\n' for req in requirements]); file.close()" \
&& python3.9 -m pip install -r ./requirements.txt

COPY . /GaNDLF
WORKDIR /GaNDLF
RUN python3 -m pip install -e .
RUN python3.9 -m pip install -e .

# Entrypoint forces all commands given via "docker run" to go through python, CMD forces the default entrypoint script argument to be gandlf_run
# If a user calls "docker run gandlf:[tag] gandlf_anonymize", it will resolve to running "python gandlf_anonymize" instead.
# CMD is inherently overridden by args to "docker run", entrypoint is constant.
ENTRYPOINT python3
ENTRYPOINT python3.9
CMD gandlf_run


Expand Down
4 changes: 2 additions & 2 deletions GANDLF/cli/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ def get_metrics_mlcube_config(mlcube_config_file, entrypoint_script):
mlcube_config = yaml.safe_load(f)
if entrypoint_script:
# modify the entrypoint to run a custom script
mlcube_config["tasks"]["evaluate"]["entrypoint"] = "python3.8 /entrypoint.py"
mlcube_config["tasks"]["evaluate"]["entrypoint"] = "python3.9 /entrypoint.py"
mlcube_config["docker"]["build_strategy"] = "auto"
return mlcube_config

Expand Down Expand Up @@ -315,7 +315,7 @@ def get_model_mlcube_config(mlcube_config_file, requires_gpu, entrypoint_script)
device = "cuda" if requires_gpu else "cpu"
mlcube_config["tasks"]["infer"][
"entrypoint"
] = f"python3.8 /entrypoint.py --device {device}"
] = f"python3.9 /entrypoint.py --device {device}"

return mlcube_config
# Duplicate training task into one from reset (must be explicit) and one that resumes with new data
Expand Down
4 changes: 2 additions & 2 deletions GANDLF/metrics/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@ def __surface_distances(result, reference, voxelspacing=None, connectivity=1):
Returns:
float: The symmetric Hausdorff Distance between the object(s) in ```result``` and the object(s) in ```reference```. The distance unit is the same as for the spacing of elements along each dimension, which is usually given in mm.
"""
result = np.atleast_1d(result.astype(np.bool))
reference = np.atleast_1d(reference.astype(np.bool))
result = np.atleast_1d(result.astype(bool))
reference = np.atleast_1d(reference.astype(bool))
if voxelspacing is not None:
voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim)
voxelspacing = np.asarray(voxelspacing, dtype=np.float64)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def run_gandlf(output_path, parameters_file):
parameters_file (str): The path to the parameters file
"""
exit_status = os.system(
f"python3.8 gandlf_generateMetrics -c {parameters_file} -i ./data.csv -o {output_path}"
f"python3.9 gandlf_generateMetrics -c {parameters_file} -i ./data.csv -o {output_path}"
)
exit_code = os.WEXITSTATUS(exit_status)
sys.exit(exit_code)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def run_gandlf(output_path, parameters_file):
parameters_file (str): The path to the parameters file
"""
exit_status = os.system(
f"python3.8 gandlf_generateMetrics -c {parameters_file} -i ./data.csv -o {output_path}"
f"python3.9 gandlf_generateMetrics -c {parameters_file} -i ./data.csv -o {output_path}"
)
exit_code = os.WEXITSTATUS(exit_status)
sys.exit(exit_code)
Expand Down
2 changes: 1 addition & 1 deletion mlcube/metrics_mlcube/mlcube.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ singularity:
tasks:
evaluate:
# Runs metrics calculation on predictions
entrypoint: "python3.8 gandlf_generateMetrics"
entrypoint: "python3.9 gandlf_generateMetrics"
parameters:
inputs: {
data_path: data/,
Expand Down
2 changes: 1 addition & 1 deletion mlcube/metrics_mlcube/mlcube_medperf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ singularity:
tasks:
evaluate:
# Runs metrics calculation on predictions
entrypoint: "python3.8 /entrypoint.py"
entrypoint: "python3.9 /entrypoint.py"
parameters:
inputs: {
predictions: predictions/,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def run_gandlf(output_path, device):
device (str): device to run on (i.e. CPU or GPU)
"""
exit_status = os.system(
"python3.8 gandlf_run --train False "
"python3.9 gandlf_run --train False "
f"--device {device} --config /embedded_config.yml "
f"--modeldir /embedded_model/ -i ./data.csv -o {output_path}"
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def run_gandlf(output_path, device):
parameters_file (str): The path to the parameters file
"""
exit_status = os.system(
"python3.8 gandlf_run --train False "
"python3.9 gandlf_run --train False "
f"--device {device} --config /embedded_config.yml "
f"--modeldir /embedded_model/ -i ./data.csv -o {output_path}"
)
Expand Down
8 changes: 4 additions & 4 deletions mlcube/model_mlcube/mlcube.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ singularity:
tasks:
train:
# Trains a new model, creating a model directory, or resumes training on an existing model.
entrypoint: "python3.8 gandlf_run --train True --device cpu"
entrypoint: "python3.9 gandlf_run --train True --device cpu"
parameters:
inputs: {
# Path to a data csv such as that constructed by the "construct_csv" task.
Expand All @@ -67,7 +67,7 @@ tasks:

infer:
# Runs inference on some existing model given new data
entrypoint: "python3.8 gandlf_run --train False --device cpu"
entrypoint: "python3.9 gandlf_run --train False --device cpu"
parameters:
inputs: {
# Path to a data csv such as that constructed by the "construct_csv" task.
Expand All @@ -84,7 +84,7 @@ tasks:

construct_csv:
# Constructs a data csv from a data directory that can be passed to future steps, to prevent issues with path translation between host and container.
entrypoint: "python3.8 gandlf_constructCSV --relativizePaths True"
entrypoint: "python3.9 gandlf_constructCSV --relativizePaths True"
parameters:
inputs: {
# Do NOT change the position of the inputDir parameter! It is relevant due to MLCube mounting rules.
Expand All @@ -99,7 +99,7 @@ tasks:

recover_config:
# Extracts the config file from the embedded model (if any) in the MLCube.
entrypoint: "python3.8 gandlf_recoverConfig --mlcube internal"
entrypoint: "python3.9 gandlf_recoverConfig --mlcube internal"
parameters:
outputs: {
outputFile: {type: "file", default: "recovered_config.yml"},
Expand Down
8 changes: 4 additions & 4 deletions mlcube/model_mlcube/mlcube_medperf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ singularity:
tasks:
train:
# Trains a new model, creating a model directory, or resumes training on an existing model.
entrypoint: "python3.8 gandlf_run --train True --device cpu"
entrypoint: "python3.9 gandlf_run --train True --device cpu"
parameters:
inputs: {
# Path to a data csv such as that constructed by the "construct_csv" task.
Expand All @@ -67,7 +67,7 @@ tasks:

infer:
# Runs inference on some existing model given new data
entrypoint: "python3.8 gandlf_run --train False --device cpu"
entrypoint: "python3.9 gandlf_run --train False --device cpu"
parameters:
inputs: {
# Path to a data csv such as that constructed by the "construct_csv" task.
Expand All @@ -85,7 +85,7 @@ tasks:

construct_csv:
# Constructs a data csv from a data directory that can be passed to future steps, to prevent issues with path translation between host and container.
entrypoint: "python3.8 gandlf_constructCSV --relativizePaths True"
entrypoint: "python3.9 gandlf_constructCSV --relativizePaths True"
parameters:
inputs: {
# Do NOT change the position of the inputDir parameter! It is relevant due to MLCube mounting rules.
Expand All @@ -100,7 +100,7 @@ tasks:

recover_config:
# Extracts the config file from the embedded model (if any) in the MLCube.
entrypoint: "python3.8 gandlf_recoverConfig --mlcube internal"
entrypoint: "python3.9 gandlf_recoverConfig --mlcube internal"
parameters:
outputs: {
outputFile: {type: "file", default: "recovered_config.yml"},
Expand Down
5 changes: 2 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def run(self):
requirements = [
"torch==1.13.1",
"black",
"numpy==1.22.0",
"numpy==1.25.0",
"scipy",
"SimpleITK!=2.0.*",
"SimpleITK!=2.2.1", # https://github.com/mlcommons/GaNDLF/issues/536
Expand Down Expand Up @@ -119,7 +119,7 @@ def run(self):
version=__version__,
author="MLCommons",
author_email="gandlf@mlcommons.org",
python_requires=">=3.8",
python_requires=">=3.9, <=3.10",
packages=find_packages(
where=os.path.dirname(os.path.abspath(__file__)),
exclude=toplevel_package_excludes,
Expand Down Expand Up @@ -149,7 +149,6 @@ def run(self):
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
Expand Down