diff --git a/.conda/bin/build b/.conda/bin/build index b40f4f5bd..7bbc59057 100755 --- a/.conda/bin/build +++ b/.conda/bin/build @@ -14,11 +14,6 @@ then out_dir=conda_build_out_dir/ fi -export BRAINIAK_HOME=$DIR/../../ - -# See run-tests.sh -export MKL_THREADING_LAYER=GNU - # See https://github.com/brainiak/brainiak/issues/377 export KMP_DUPLICATE_LIB_OK=TRUE diff --git a/.conda/build.sh b/.conda/build.sh deleted file mode 100755 index 378ec7baa..000000000 --- a/.conda/build.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -# Install from PyPI because there is no current conda package for the -# following. Explicitly install dependencies with no conda package as well -# because otherwise conda-build does not include them in the output package. -PIP_NO_INDEX=False $PYTHON -m pip install "pymanopt<=0.2.5" - -# NOTE: This is the recommended way to install packages -$PYTHON setup.py install --single-version-externally-managed --record=record.txt diff --git a/.conda/meta.yaml b/.conda/meta.yaml index 212234b2f..938704b19 100644 --- a/.conda/meta.yaml +++ b/.conda/meta.yaml @@ -1,18 +1,22 @@ -{% set conda_package_nonexistent = ( - "pymanopt<=0.2.5", -) %} -{% set data = load_setup_py_data() %} - package: # Repeating name because of the following issue: # https://github.com/conda/conda-build/issues/2475 name: brainiak - version: {{ environ.get('GIT_DESCRIBE_TAG', 'v0.1.dev1')[1:] }} + + # Can't find a good way to get this from setuptools_scm. This needs to be defined before running conda-build + version: {{ environ.get('BRAINIAK_VERSION') }} about: - home: {{ data.get('url') }} - license: {{ data.get('license') }} - summary: {{ data.get('description') }} + home: http://brainiak.org + license: Apache-2.0 + license_family: Apache + license_file: LICENSE + summary: | + The Brain Imaging Analysis Kit is a package of Python modules + useful for neuroscience, primarily focused on functional + Magnetic Resonance Imaging (fMRI) analysis. The package was originally + created by a collaboration between Intel and the + Princeton Neuroscience Institute (PNI). source: path: ../ @@ -20,53 +24,56 @@ source: build: number: {{ environ.get('GIT_DESCRIBE_NUMBER', 0) }} script_env: - - # We need the source directory because conda removes source / working - # directories by the time we reach the test phase. We can optionally pass in - # the --keep-old-work flag later on to use the $PREFIX/work directory - - BRAINIAK_HOME - - MKL_THREADING_LAYER - KMP_DUPLICATE_LIB_OK + script: + - PIP_NO_INDEX=False {{ PYTHON }} -m pip install pymanopt + - {{ PYTHON }} -m pip install . -v + requirements: build: + - python - {{ compiler('cxx') }} host: - python - pip - - mpich + - mpich # [not win] - llvm-openmp - - setuptools>=42 - - wheel + - scikit-build-core + - cmake + - setuptools_scm>=8.0 - pybind11>=2.9.0 - scipy!=1.0.0 - cython - - numpy<=1.23.1 - - setuptools_scm + - numpy run: - python - - numpy<=1.23.1 - - mpich - - llvm-openmp - - tensorflow - - tensorflow-probability - {% for req in data.get('install_requires', []) - if req not in conda_package_nonexistent -%} - - {{req}} - {% endfor %} + - {{ pin_compatible('numpy') }} + - mpi4py>=3 + - nitime + - scikit-learn>=0.18 + - scipy!=1.0.0 + - statsmodels + - psutil + - nibabel + - joblib + - wheel + - pydicom + - tensorflow # [not win] + - tensorflow-probability # [not win] test: + source_files: + - tests + - pyproject.toml + imports: + - brainiak commands: - - find $BRAINIAK_HOME/tests | grep pycache | xargs rm -rf - - mpiexec -n 2 pytest $BRAINIAK_HOME - - # Known issue: https://github.com/travis-ci/travis-ci/issues/4704#issuecomment-348435959 - - python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);' - - conda inspect linkages -p $PREFIX brainiak # [not win] - - conda inspect objects -p $PREFIX brainiak # [osx] + - pytest requires: - pytest - testbook - numdifftools + - pytest-reportlog diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..8c4a573a9 --- /dev/null +++ b/.flake8 @@ -0,0 +1,10 @@ +[flake8] +max-complexity = 10 +extend-ignore = + # Docstrings + D, + E721, + E231 + +per-file-ignores = + src/brainiak/__init__.py:F401 diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml new file mode 100644 index 000000000..39adf53b5 --- /dev/null +++ b/.github/workflows/cd.yml @@ -0,0 +1,200 @@ +name: wheels and conda + +on: + workflow_dispatch: + release: + types: + - published + pull_request: + paths: + - .github/workflows/cd.yml + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + # Many color libraries just need this to be set to any value, but at least + # one distinguishes color depth, where "3" -> "256-bit color". + FORCE_COLOR: 3 + +jobs: + make_sdist: + name: Make SDist + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Build SDist + run: pipx run build --sdist + + - uses: actions/upload-artifact@v4 + with: + name: cibw-sdist + path: dist/*.tar.gz + + build_wheels: + name: Wheel on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-13, macos-14] + fail-fast: true + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + # Use intel mpi on windows + - uses: mpi4py/setup-mpi@v1 + if: ${{ contains(matrix.os, 'windows') }} + with: + mpi: msmpi + + # Else, use the default for the OS and setup-mpi action + - uses: mpi4py/setup-mpi@v1 + if: ${{ !contains(matrix.os, 'windows') }} + + - name: Checkout LLVM on macOS + if: runner.os == 'macOS' + uses: actions/checkout@v4 + with: + repository: llvm/llvm-project + ref: release/18.x + path: llvm-project + + - name: Build OpenMP on macOS + if: runner.os == 'macOS' + env: + MACOSX_DEPLOYMENT_TARGET: "10.9" + working-directory: llvm-project + run: | + cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=$(brew --prefix) \ + -DCMAKE_INSTALL_NAME_DIR=$(brew --prefix)/lib \ + -DCMAKE_C_COMPILER=clang \ + -DCMAKE_CXX_COMPILER=clang++ \ + -DLIBOMP_INSTALL_ALIASES=OFF \ + -S openmp \ + -B build + cmake --build build --parallel + cmake --install build + + - uses: actions/setup-python@v5 + + - name: Install cibuildwheel + run: python -m pip install cibuildwheel + + - name: Build wheels + run: python -m cibuildwheel --output-dir wheelhouse + + - uses: actions/upload-artifact@v4 + with: + name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} + path: ./wheelhouse/*.whl + + build_conda: + name: Conda on ${{ matrix.os }} with Python ${{ matrix.python-version }} + runs-on: ${{ matrix.os }} + defaults: + run: + shell: bash -leo pipefail {0} + strategy: + matrix: + os: [ ubuntu-latest, macos-13, macos-latest ] + python-version: [ '3.9', '3.10', '3.11', '3.12' ] + fail-fast: false + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + # Can't figure out a way to get the package version from setuptools_scm inside the conda build + # We need to install setuptools_scm, call it as a module, and store the version in an environment variable + - name: Run setuptools_scm to get package version and store in environment variable BRAINIAK_VERSION (Linux\Mac) + if: ${{ !contains(matrix.os, 'windows') }} + run: | + python -m pip install setuptools_scm + export BRAINIAK_VERSION=$(python -m setuptools_scm) + echo "BRAINIAK_VERSION=${BRAINIAK_VERSION}" >> "$GITHUB_ENV" + + - name: Run setuptools_scm to get package version and store in environment variable BRAINIAK_VERSION (Windows) + if: ${{ contains(matrix.os, 'windows') }} + run: | + python -m pip install setuptools_scm + set BRAINIAK_VERSION=$(python -m setuptools_scm) + echo "BRAINIAK_VERSION=${BRAINIAK_VERSION}" >> "$GITHUB_ENV" + + - name: Setup micromamba and boa + uses: mamba-org/setup-micromamba@v1 + with: + environment-name: test-env + create-args: >- + python=${{ matrix.python-version }} + conda-forge::conda-build + boa + init-shell: >- + bash + powershell + + - name: Build and test package + id: build-package + run: | + conda config --add channels conda-forge + conda config --set channel_priority strict + conda mambabuild --output-folder=conda-package .conda/ + + - uses: actions/upload-artifact@v4 + with: + name: conda-package-${{ matrix.os }}-${{ matrix.python-version }} + path: conda-package + + publish_pypi: + name: Publish to PyPI + needs: [ build_wheels, build_conda, make_sdist ] + environment: + name: pypi + url: https://pypi.org/p/brainiak + permissions: + id-token: write + runs-on: ubuntu-latest + if: github.event_name == 'push' && contains(github.ref, 'refs/tags/v') + steps: + - uses: actions/download-artifact@v4 + with: + pattern: cibw-* + path: dist + merge-multiple: true + + - name: List files in artifact(s) + run: ls -l dist + + - uses: pypa/gh-action-pypi-publish@release/v1 + + publish_conda: + name: Publish to Anaconda + needs: [ build_conda, build_wheels, make_sdist ] + environment: anaconda + permissions: + id-token: write + runs-on: ubuntu-latest + if: github.event_name == 'push' && contains(github.ref, 'refs/tags/v') + + steps: + - uses: actions/download-artifact@v4 + with: + pattern: conda-package-* + path: conda-packages + merge-multiple: false + + - name: List files in artifact + run: find conda-packages -type f -name "*.tar.bz2" + + - name: Upload to Anaconda + run: | + export ANACONDA_API_TOKEN=${{ secrets.ANACONDA_TOKEN }} + find conda-packages -type f -name "*.tar.bz2" -exec echo "anaconda upload {}" \; + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..08f71004b --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,76 @@ +name: CI + +on: + pull_request: + push: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + # Many color libraries just need this to be set to any value, but at least + # one distinguishes color depth, where "3" -> "256-bit color". + FORCE_COLOR: 3 + +jobs: + checks: + name: Check Python ${{ matrix.python-version }} on ${{ matrix.os }} + env: + IGNORE_CONDA: true + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ ubuntu-20.04, macos-latest, windows-latest ] + python-version: [ "3.9", "3.10", "3.11", "3.12" ] + fail-fast: true + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + # Use msmpi on windows + - uses: mpi4py/setup-mpi@v1 + if: ${{ contains(matrix.os, 'windows') }} + with: + mpi: msmpi + + # Else, use the default for the OS and setup-mpi action + - uses: mpi4py/setup-mpi@v1 + if: ${{ !contains(matrix.os, 'windows') }} + + # Setup openmp on macOS + - name: Install openmp if on macos + if: ${{ contains(matrix.os, 'macos') }} + run: | + brew install libomp llvm + + - name: Upgrade pip + run: | + python3 -m pip install -U pip + + - name: Run tests and other checks + if: runner.os == 'Linux' + run: | + ./pr-check.sh + + - name: Run tests and other checks + if: runner.os == 'macOS' + run: | + export CLANG_PREFIX=$(brew --prefix llvm) + export CC=$CLANG_PREFIX/bin/clang + export CXX=$CLANG_PREFIX/bin/clang++ + ./pr-check.sh + + # On window, just install the package and run tests for now, we need to port the pr-check script to windows + - name: Run tests and other checks + if: runner.os == 'Windows' + run: | + python -m pip install .[all] -v + python -m pytest -v + + # - uses: codecov/codecov-action@v1 diff --git a/.github/workflows/della_notebooks.yml b/.github/workflows/della_notebooks.yml index 50953aaab..5002b400d 100644 --- a/.github/workflows/della_notebooks.yml +++ b/.github/workflows/della_notebooks.yml @@ -14,7 +14,7 @@ jobs: notebook_tests: runs-on: self-hosted steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - run: | chmod a+x pr-check.sh ./pr-check.sh diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 810aa58c8..000000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,118 +0,0 @@ -on: - pull_request: - push: - - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - pypi: - env: - IGNORE_CONDA: true - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-20.04, macos-latest] - python-version: ["3.8", "3.9", "3.10"] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - uses: mpi4py/setup-mpi@v1 - - run: | - python3 -m pip install -U pip - - if: ${{ contains(matrix.os, 'ubuntu') }} - run: | - ./pr-check.sh - - if: ${{ contains(matrix.os, 'macos') }} - run: | - export CLANG_PREFIX=$(brew --prefix llvm@15) - export CC=$CLANG_PREFIX/bin/clang - export CXX=$CLANG_PREFIX/bin/clang++ - export LDFLAGS="-L$CLANG_PREFIX/lib - -Wl,-rpath,$CLANG_PREFIX/lib $LDFLAGS -L/usr/local/opt/libomp/lib" - export CPPFLAGS="-I$CLANG_PREFIX/include $CPPFLAGS -I/usr/local/opt/libomp/include" - ./pr-check.sh - - uses: codecov/codecov-action@v1 - conda: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, macos-latest] - python-version: ['3.8', '3.9', '3.10'] - steps: - - uses: actions/checkout@v2 - - name: Setup micromamba and boa - uses: mamba-org/setup-micromamba@v1 - with: - environment-name: test-env - create-args: >- - python=${{ matrix.python-version }} - boa - - name: Build and test package - id: build-package - run: | - export CONDA_HOME=$CONDA - conda install conda-build - out_dir="${{ matrix.os }}-build" - .conda/bin/build ${{ matrix.python-version }} $out_dir - echo "PACKAGE_PATH=${out_dir}" >> "$GITHUB_OUTPUT" - - - uses: actions/upload-artifact@v3 - with: - path: ${{ steps.build-package.outputs.PACKAGE_PATH }} - - publish_conda: - needs: [conda, pypi] - name: Publish to Anaconda - environment: anaconda - permissions: - id-token: write - runs-on: ubuntu-latest - if: github.event_name == 'push' && contains(github.ref, 'refs/tags/v') - - steps: - - uses: actions/download-artifact@v3 - with: - name: artifact - path: ${{ steps.build-package.outputs.PACKAGE_PATH }} - - - name: List files in artifact - run: find ${{ steps.build-package.outputs.PACKAGE_PATH }} -type f -name "*.tar.bz2" - - - name: Upload to Anaconda - run: | - export ANACONDA_API_TOKEN=${{ secrets.ANACONDA_TOKEN }} - find ${{ steps.build-package.outputs.PACKAGE_PATH }} -type f -name "*.tar.bz2" -exec echo "anaconda upload {}" \; - - dist: - name: Distribution build - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - uses: hynek/build-and-inspect-python-package@v1 - - publish_pypi: - name: Publish to PyPI - needs: [dist, pypi, conda] - environment: - name: pypi - url: https://pypi.org/p/brainiak - permissions: - id-token: write - runs-on: ubuntu-latest - if: github.event_name == 'push' && contains(github.ref, 'refs/tags/v') - steps: - - uses: actions/download-artifact@v3 - with: - name: Packages - path: dist - - - uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.gitignore b/.gitignore index 6a62af127..ccb79555e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ +src/brainiak/_version.py + +venv/ + *.iml *.eggs *.mat @@ -11,10 +15,14 @@ *.nii *.nii.gz *.png -*.txt *.zip *.pkl +temp.txt +docs/examples/fmrisim/Condition_A.txt +docs/examples/fmrisim/Condition_B.txt + + data tutorials @@ -35,8 +43,8 @@ __pycache__ .DS_Store __MACOSX -brainiak/fcma/cython_blas.c -brainiak/eventseg/_utils.c +src/brainiak/fcma/cython_blas.c +src/brainiak/eventseg/_utils.c examples/fcma/face_scene/ @@ -44,3 +52,5 @@ docs/examples/fmrisim/Corr_MVPA_archive.tar.gz docs/examples/iem/RademakerEtAl2019_WM_S05_avgTime.npz docs/examples/isc/brainiak-aperture-isc-data.tgz docs/examples/srm/brainiak-aperture-srm-data.tgz + +wheelhouse diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 000000000..3b7477554 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,15 @@ +cmake_minimum_required(VERSION 3.15...3.26) + +project(${SKBUILD_PROJECT_NAME} VERSION ${SKBUILD_PROJECT_VERSION} LANGUAGES C CXX) + +find_package( + Python + COMPONENTS Interpreter Development.Module NumPy + REQUIRED) + +set(PYBIND11_NEWPYTHON ON) +find_package(pybind11 CONFIG REQUIRED) + +add_subdirectory(src/brainiak/fcma) +add_subdirectory(src/brainiak/factoranalysis) +add_subdirectory(src/brainiak/eventseg) \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile index ffc44f531..e97c452dc 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -196,4 +196,4 @@ pseudoxml: @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." apidoc: - sphinx-apidoc -f -M -o . ../brainiak ../brainiak/fcma/cython_blas.* + sphinx-apidoc -f -M -o . ../src/brainiak ../src/brainiak/fcma/cython_blas.* diff --git a/docs/conf.py b/docs/conf.py index d85c79376..ca1b7a079 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -16,8 +16,7 @@ import sys import os import shlex - -from pkg_resources import get_distribution +import brainiak # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -66,7 +65,8 @@ copyright = '2016, Princeton Neuroscience Institute and Intel Corporation' author = 'Princeton Neuroscience Institute and Intel Corporation' -version = get_distribution(project).version +# Get the package version +version = brainiak.__version__ release = version diff --git a/docs/examples/fcma/FCMA_script/fcma_voxel_selection_cv.py b/docs/examples/fcma/FCMA_script/fcma_voxel_selection_cv.py index 53c2215e1..edeb2b5eb 100644 --- a/docs/examples/fcma/FCMA_script/fcma_voxel_selection_cv.py +++ b/docs/examples/fcma/FCMA_script/fcma_voxel_selection_cv.py @@ -89,7 +89,7 @@ # Load in the mask with nibabel mask_img = nib.load(mask_file) - mask = mask_img.get_fdata().astype(np.bool) + mask = mask_img.get_fdata().astype(bool) # Preset the volumes score_volume = np.zeros(mask.shape, dtype=np.float32) diff --git a/docs/examples/htfa/htfa.ipynb b/docs/examples/htfa/htfa.ipynb index d5f0bfce6..6397836da 100644 --- a/docs/examples/htfa/htfa.ipynb +++ b/docs/examples/htfa/htfa.ipynb @@ -1679,7 +1679,6 @@ "import pandas as pd\n", "import nibabel as nb\n", "import nilearn as nl\n", - "import nltools as nlt\n", "import timecorr as tc\n", "import seaborn as sns\n", "\n", diff --git a/docs/examples/requirements-examples.txt b/docs/examples/requirements-examples.txt index 403764cb2..6001164e5 100644 --- a/docs/examples/requirements-examples.txt +++ b/docs/examples/requirements-examples.txt @@ -2,7 +2,6 @@ testbook brainiak nilearn nxviz<=0.6.3 -nltools timecorr seaborn holoviews diff --git a/examples/fcma/voxel_selection.py b/examples/fcma/voxel_selection.py index eca68ead7..8c33871cc 100644 --- a/examples/fcma/voxel_selection.py +++ b/examples/fcma/voxel_selection.py @@ -86,7 +86,7 @@ ) #print(results[0:100]) mask_img = nib.load(mask_file) - mask = mask_img.get_fdata().astype(np.bool) + mask = mask_img.get_fdata().astype(bool) score_volume = np.zeros(mask.shape, dtype=np.float32) score = np.zeros(len(results), dtype=np.float32) seq_volume = np.zeros(mask.shape, dtype=int) diff --git a/examples/funcalign/searchlight_srm_example.py b/examples/funcalign/searchlight_srm_example.py index 10f004204..a62df8fa5 100644 --- a/examples/funcalign/searchlight_srm_example.py +++ b/examples/funcalign/searchlight_srm_example.py @@ -93,7 +93,7 @@ # Generate mask: mask is a 3D binary array, with active voxels being 1. I simply set # all voxels to be active in this example, but you should set the mask to fit your ROI # in practice. -mask = np.ones((dim1,dim2,dim3), dtype=np.bool) +mask = np.ones((dim1,dim2,dim3), dtype=bool) # Create searchlight object sl = Searchlight(sl_rad=sl_rad) diff --git a/examples/funcalign/sssrm_image_prediction_example.py b/examples/funcalign/sssrm_image_prediction_example.py index d5e99db84..9890e292f 100644 --- a/examples/funcalign/sssrm_image_prediction_example.py +++ b/examples/funcalign/sssrm_image_prediction_example.py @@ -15,14 +15,9 @@ from scipy.stats import stats import numpy as np -# Define the Theano flags to use cpu and float64 before theano is imported in brainiak -import os -os.environ['THEANO_FLAGS'] = 'device=cpu, floatX=float64' - import brainiak.funcalign.sssrm - # Load the input data that contains the movie stimuli for unsupervised training with SS-SRM movie_file = scipy.io.loadmat('data/movie_data.mat') movie_data_left = movie_file['movie_data_lh'] diff --git a/examples/searchlight/example_searchlight.py b/examples/searchlight/example_searchlight.py index c10e8ac4f..e2e754644 100644 --- a/examples/searchlight/example_searchlight.py +++ b/examples/searchlight/example_searchlight.py @@ -39,7 +39,7 @@ # Generate data data = np.random.random((dim,dim,dim,ntr)) if rank == 0 else None -mask = np.zeros((dim,dim,dim), dtype=np.bool) +mask = np.zeros((dim,dim,dim), dtype=bool) for i in range(dim): for j in range(dim): for k in range(dim): diff --git a/pr-check.sh b/pr-check.sh index 64a767a1d..308727137 100755 --- a/pr-check.sh +++ b/pr-check.sh @@ -46,7 +46,7 @@ if [[ "$is_della" == true ]]; then fi -if [ ! -f brainiak/__init__.py ] +if [ ! -f src/brainiak/__init__.py ] then echo "Run "$(basename "$0")" from the root of the BrainIAK hierarchy." exit 1 @@ -179,7 +179,9 @@ fi # install brainiak in editable mode (required for testing) # Install with all dependencies (testing, documentation, examples, etc.) -python3 -m pip install $ignore_installed -U -e .[all] || \ +python3 -m pip install $ignore_installed -U \ + -v --config-settings=cmake.verbose=true --config-settings=logging.level=INFO \ + -e .[all] || \ exit_with_error_and_venv "Failed to install BrainIAK." @@ -206,7 +208,6 @@ if [[ "$is_della" == true ]]; then echo "Skipping docs build on della" else cd docs - export THEANO_FLAGS='device=cpu,floatX=float64,blas.ldflags=-lblas' if [ ! -z $SLURM_NODELIST ] then diff --git a/pyproject.toml b/pyproject.toml index a16e7fa6b..86cc16b75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,182 @@ +[build-system] +requires = [ + "scikit-build-core", + "setuptools_scm>=8.0", + "pybind11>=2.9.0", + "scipy!=1.0.0", + "cython", + "numpy>=2.0.0rc1", +] +build-backend = "scikit_build_core.build" + +[project] +name = "brainiak" +description = "Brain Imaging Analysis Kit" +requires-python = ">=3.9" +readme = "README.rst" +license = { file = "LICENSE" } +authors = [ + { name = "Princeton Neuroscience Institute and Intel Corporation", email = "mihai.capota@intel.com" } +] +maintainers = [ + { name = "Mihai Capota", email = "mihai.capota@intel.com" } +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: Information Technology", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Operating System :: Unix", + "Programming Language :: C++", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Information Analysis", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Scientific/Engineering :: Medical Science Apps.", + "Topic :: Scientific/Engineering :: Bio-Informatics", + "Topic :: Software Development", + "Topic :: Utilities", + "Typing :: Typed" +] +keywords = ["neuroscience", "algorithm", "fMRI", "distributed", "scalable"] + +dynamic = ["version"] + +dependencies = [ + "numpy>=1.23.5", + "mpi4py>=3", # https://travis-ci.org/brainiak/brainiak/jobs/545838666 + "nitime", + "scikit-learn>=0.18", + "scipy!=1.0.0", + "statsmodels", + "pymanopt", + "psutil", + "nibabel", + "joblib", + "wheel", # See https://github.com/astropy/astropy-helpers/issues/501 + "pydicom", +] + +[project.optional-dependencies] +dev = [ + "pytest", + "coverage", + "flake8", + "flake8-print", + "mypy", + "myst-nb", + "restructuredtext-lint", + "setuptools_scm", + "sphinx", + "sphinx_rtd_theme", + "towncrier", + "numdifftools", + "testbook", + "pytest-reportlog", +] + +matnormal = [ + 'tensorflow>=2.16', + 'tensorflow_probability[tf]<=0.24.0', # Issues with TF 2.18 on intel macs (requreied by TFP 0.25) +] + +examples = [ + "nilearn", + "nxviz<=0.6.3", + "timecorr", + "seaborn", + "holoviews", + "pyOpenSSL", + "awscli", + "bcrypt", + "indexed_gzip", + "inflect", + "ipython", + "jupyter", + "mypy", + "nibabel", + "nilearn", + "nodejs", + "numpy", + "pydicom", + "requests", + "rpyc", + "scikit-learn", + "scipy>=1.6.0", + "toml", + "tornado", + "websocket-client", + "wsaccel", + "inotify", + "pybids", + "watchdog" +] + +all=['brainiak[dev,matnormal,examples]'] + +[project.urls] +Homepage = "http://brainiak.org" +Documentation = "https://brainiak.org/docs/" +"Bug Tracker" = "https://github.com/brainiak/brainiak/issues" +Changelog = "https://brainiak.org/docs/release_notes.html" +Chat = "https://gitter.im/brainiak/brainiak" + +[tool.setuptools_scm] +write_to = "src/brainiak/_version.py" + +[tool.scikit-build] +minimum-version = "0.4" +build-dir = "build/{wheel_tag}" +metadata.version.provider = "scikit_build_core.metadata.setuptools_scm" +sdist.include = ["src/brainiak/_version.py", "src/brainiak/utils/sim_parameters"] +cmake.build-type = "Release" + +[tool.cibuildwheel] +test-extras = ["matnormal", "dev"] +test-command = "python -m pytest {project}/tests" +archs = ["auto64"] +skip = [ + "pp*", + "cp313*", "cp314*", # Tensorflow wheels are not available for Python 3.13 and 3.14 + "*musllinux*", # Tensorflow wheels are not available for musllinux +] + +[tool.cibuildwheel.linux] +before-all = [ + "yum --disablerepo=epel install -y mpich mpich-devel", +] + +[tool.cibuildwheel.linux.environment] +PATH = "/usr/lib64/mpich/bin:$PATH" + +[tool.coverage.run] +source = ["brainiak"] +branch = true +concurrency = ["multiprocessing"] +parallel = true + +[tool.coverage.report] +fail_under = 90 + +[tool.pytest.ini_options] +addopts = "-s --durations=0" +markers = [ + "notebook: marks example notebook tests", +] + [tool.towncrier] directory = "docs/newsfragments" package = "brainiak" @@ -23,20 +202,3 @@ title_format = "BrainIAK {version} ({project_date})" directory = "removal" name = "Deprecations and removals" showcontent = true - -[tool.pytest.ini_options] -markers = [ - "notebook: marks example notebook tests", -] - -[build-system] -requires = [ - "setuptools>=42", - "wheel", - "pybind11>=2.9.0", - "scipy!=1.0.0", - "cython", - "oldest-supported-numpy", - "setuptools_scm", -] -build-backend = "setuptools.build_meta" diff --git a/run-checks.sh b/run-checks.sh index 6df6cfe9f..dceabaadd 100755 --- a/run-checks.sh +++ b/run-checks.sh @@ -17,9 +17,9 @@ set -e set -o pipefail -flake8 --config setup.cfg brainiak +flake8 --config .flake8 src/brainiak flake8 --config tests/.flake8 tests -mypy --ignore-missing-imports brainiak tests/[!_]* +mypy --ignore-missing-imports src/brainiak tests/[!_]* rst-lint ./*.rst | { grep -v "is clean.$" || true; } towncrier --version=100 --draft > /dev/null 2>&1 \ || echo "Error assembling news fragments using towncrier." diff --git a/run-tests.sh b/run-tests.sh index e0ea42fb8..15946fe1e 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Copyright 2016 Intel Corporation # @@ -28,24 +28,14 @@ python3 -m pip freeze | grep -qi /brainiak \ exit 1 } -# Define MKL env variable that is required by Theano to run with MKL 2018 -# If removing, also remove from .conda/* -export MKL_THREADING_LAYER=GNU - -mpi_command=$BRAINIAKDEV_MPI_COMMAND -if [ -z $mpi_command ] -then - mpi_command=mpiexec -fi -echo "Using mpi command: ${mpi_command}" -$mpi_command -n 2 coverage run -m mpi4py -m pytest +coverage run -m pytest # Check whether we are running on Princeton's della compute cluster. # If so, run the notebook tests separately if [[ $(hostname -s) == della* ]]; then echo "Running non-MPI notebook tests on della" - $mpi_command -n 1 pytest -s --durations=0 tests/test_notebooks.py --enable_notebook_tests + pytest -s --durations=0 tests/test_notebooks.py --enable_notebook_tests fi # Coverage produces empty files which trigger warnings on combine diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 15c732f50..000000000 --- a/setup.cfg +++ /dev/null @@ -1,103 +0,0 @@ -[metadata] -name = brainiak -description = Brain Imaging Analysis Kit -long_description = file: README.rst -long_description_content_type = text/x-rst -url = http://brainiak.org -author = Princeton Neuroscience Institute and Intel Corporation -author_email = mihai.capota@intel.com -maintainer = Mihai Capota -maintainer_email = mihai.capota@intel.com -license = Apache 2 -license_files = LICENSE -classifiers = - Development Status :: 5 - Production/Stable - Intended Audience :: Developers - Intended Audience :: Information Technology - Intended Audience :: Science/Research - License :: OSI Approved :: Apache Software License - Operating System :: MacOS - Operating System :: Microsoft :: Windows - Operating System :: POSIX - Operating System :: Unix - Programming Language :: C++ - Programming Language :: Python - Programming Language :: Python :: 3 - Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3.5 - Programming Language :: Python :: 3.6 - Programming Language :: Python :: 3.7 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - Programming Language :: Python :: 3.10 - Programming Language :: Python :: Implementation :: CPython - Topic :: Scientific/Engineering - Topic :: Scientific/Engineering :: Information Analysis - Topic :: Scientific/Engineering :: Mathematics - Topic :: Scientific/Engineering :: Medical Science Apps. - Topic :: Scientific/Engineering :: Bio-Informatics - Topic :: Software Development - Topic :: Utilities - Typing :: Typed -keywords = - neuroscience - algorithm - fMRI - distributed - scalable -project_urls = - Documentation = https://brainiak.org/docs/ - Bug Tracker = https://github.com/brainiak/brainiak/issues - Changelog = https://brainiak.org/docs/release_notes.html - Chat = https://gitter.im/brainiak/brainiak - -[options] -packages = find: -install_requires = - numpy<=1.23.1 - - # https://travis-ci.org/brainiak/brainiak/jobs/545838666 - mpi4py>=3 - nitime - scikit-learn>=0.18 - - # See https://github.com/scipy/scipy/pull/8082 - scipy!=1.0.0 - statsmodels - pymanopt<=0.2.5 - theano>=1.0.4 # See https://github.com/Theano/Theano/pull/6671 - psutil - nibabel - joblib - wheel # See https://github.com/astropy/astropy-helpers/issues/501 - pydicom - -python_requires = >=3.5 -include_package_data = True -zip_safe = False -use_scm_version=True - -[options.packages.find] -where = . - -[flake8] -max-complexity = 10 -extend-ignore = - # Docstrings - D, - E721, - E231 - -[coverage:run] -source = brainiak -branch = True -concurrency = multiprocessing -parallel = True - -[coverage:report] -fail_under = 90 - -[tool:pytest] -addopts = - -s - --durations=0 diff --git a/setup.py b/setup.py deleted file mode 100644 index b8dd77ad2..000000000 --- a/setup.py +++ /dev/null @@ -1,144 +0,0 @@ -from distutils import sysconfig - -from setuptools import setup, Extension, find_packages -from setuptools.command.build_ext import build_ext -import os -import site -import sys -import setuptools -from copy import deepcopy - -assert sys.version_info >= (3, 5), ( - "Please use Python version 3.5 or higher, " - "lower versions are not supported" -) - -# https://github.com/pypa/pip/issues/7953#issuecomment-645133255 -site.ENABLE_USER_SITE = "--user" in sys.argv[1:] - -ext_modules = [ - Extension( - 'brainiak.factoranalysis.tfa_extension', - ['brainiak/factoranalysis/tfa_extension.cpp'], - ), - Extension( - 'brainiak.fcma.fcma_extension', - ['brainiak/fcma/src/fcma_extension.cc'], - ), - Extension( - 'brainiak.fcma.cython_blas', - ['brainiak/fcma/cython_blas.pyx'], - ), - Extension( - 'brainiak.eventseg._utils', - ['brainiak/eventseg/_utils.pyx'], - ), -] - - -class BuildExt(build_ext): - """A custom build extension for adding compiler-specific options.""" - c_opts = { - 'unix': ['-g0', '-fopenmp'], - } - - # FIXME Workaround for using the Intel compiler by setting the CC env var - # Other uses of ICC (e.g., cc binary linked to icc) are not supported - if (('CC' in os.environ and 'icc' in os.environ['CC']) - or (sysconfig.get_config_var('CC') and 'icc' in sysconfig.get_config_var('CC'))): - c_opts['unix'] += ['-lirc', '-lintlc'] - - if sys.platform == 'darwin': - c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.9', - '-ftemplate-depth-1024'] - - def build_extensions(self): - ct = self.compiler.compiler_type - opts = self.c_opts.get(ct, []) - if ct == 'unix': - opts.append('-DVERSION_INFO="%s"' % - self.distribution.get_version()) - for ext in self.extensions: - ext.extra_compile_args = deepcopy(opts) - ext.extra_link_args = deepcopy(opts) - lang = ext.language or self.compiler.detect_language(ext.sources) - if lang == 'c++': - ext.extra_compile_args.append("-std=c++11") - ext.extra_link_args.append("-std=c++11") - build_ext.build_extensions(self) - - def finalize_options(self): - super().finalize_options() - import numpy - import pybind11 - self.include_dirs.extend([ - numpy.get_include(), - pybind11.get_include(user=True), - pybind11.get_include(), - ]) - - -extras = { - "dev": [ - "pytest", - "coverage", - "flake8", - "flake8-print", - "mypy", - "myst-nb", - "restructuredtext-lint", - "setuptools_scm", - "sphinx", - "sphinx_rtd_theme", - "towncrier", - "numdifftools", - "testbook" - ], - - 'matnormal': [ - 'tensorflow<=2.12.0', - 'tensorflow_probability<=0.15.0', - ], - - # All requirements for notebook examples in docs/examples - "examples": [ - "nilearn", - "nxviz<=0.6.3", - "nltools", - "timecorr", - "seaborn", - "holoviews", - "pyOpenSSL", - "awscli", - "bcrypt", - "indexed_gzip", - "inflect", - "ipython", - "jupyter", - "mypy", - "nibabel", - "nilearn", - "nodejs", - "numpy", - "pydicom", - "requests", - "rpyc", - "scikit-learn", - "scipy", - "toml", - "tornado", - "websocket-client", - "wsaccel", - "inotify", - "pybids", - "watchdog" - ], -} -extras["all"] = sum(extras.values(), []) - - -setup( - extras_require=extras, - ext_modules=ext_modules, - cmdclass={'build_ext': BuildExt}, -) diff --git a/brainiak/__init__.py b/src/brainiak/__init__.py similarity index 94% rename from brainiak/__init__.py rename to src/brainiak/__init__.py index 93278b43f..3222a6ae5 100644 --- a/brainiak/__init__.py +++ b/src/brainiak/__init__.py @@ -13,6 +13,8 @@ # limitations under the License. """Brain Imaging Analysis Kit.""" +from ._version import version as __version__ + import sys if sys.version_info < (3, 5): diff --git a/src/brainiak/eventseg/CMakeLists.txt b/src/brainiak/eventseg/CMakeLists.txt new file mode 100644 index 000000000..85f0ba1f6 --- /dev/null +++ b/src/brainiak/eventseg/CMakeLists.txt @@ -0,0 +1,13 @@ +add_custom_command( + OUTPUT _utils.c + COMMENT + "Making ${CMAKE_CURRENT_BINARY_DIR}/_utils.c from ${CMAKE_CURRENT_SOURCE_DIR}/_utils.pyx" + COMMAND Python::Interpreter -m cython + "${CMAKE_CURRENT_SOURCE_DIR}/_utils.pyx" --output-file _utils.c + DEPENDS _utils.pyx + VERBATIM) + +python_add_library(_utils MODULE _utils.c WITH_SOABI) +target_link_libraries(_utils PUBLIC Python::NumPy) + +install(TARGETS _utils DESTINATION ${SKBUILD_PROJECT_NAME}/eventseg) diff --git a/brainiak/eventseg/__init__.py b/src/brainiak/eventseg/__init__.py similarity index 100% rename from brainiak/eventseg/__init__.py rename to src/brainiak/eventseg/__init__.py diff --git a/brainiak/eventseg/_utils.pyx b/src/brainiak/eventseg/_utils.pyx similarity index 98% rename from brainiak/eventseg/_utils.pyx rename to src/brainiak/eventseg/_utils.pyx index cbf4b997b..521153327 100644 --- a/brainiak/eventseg/_utils.pyx +++ b/src/brainiak/eventseg/_utils.pyx @@ -20,6 +20,8 @@ from typing import TypeVar, Union import numpy as np cimport numpy as np +np.import_array() + T = TypeVar("T", bound=Real) def masked_log(x): diff --git a/brainiak/eventseg/event.py b/src/brainiak/eventseg/event.py similarity index 100% rename from brainiak/eventseg/event.py rename to src/brainiak/eventseg/event.py diff --git a/src/brainiak/factoranalysis/CMakeLists.txt b/src/brainiak/factoranalysis/CMakeLists.txt new file mode 100644 index 000000000..d640cf22e --- /dev/null +++ b/src/brainiak/factoranalysis/CMakeLists.txt @@ -0,0 +1,11 @@ +pybind11_add_module(tfa_extension tfa_extension.cpp) + +find_package(OpenMP) +if(OpenMP_CXX_FOUND) + target_link_libraries(tfa_extension PUBLIC OpenMP::OpenMP_CXX) +else() + + +endif() + +install(TARGETS tfa_extension LIBRARY DESTINATION ${SKBUILD_PROJECT_NAME}/factoranalysis) \ No newline at end of file diff --git a/brainiak/factoranalysis/__init__.py b/src/brainiak/factoranalysis/__init__.py similarity index 100% rename from brainiak/factoranalysis/__init__.py rename to src/brainiak/factoranalysis/__init__.py diff --git a/brainiak/factoranalysis/htfa.py b/src/brainiak/factoranalysis/htfa.py similarity index 99% rename from brainiak/factoranalysis/htfa.py rename to src/brainiak/factoranalysis/htfa.py index 10c543e8f..a177cc482 100644 --- a/brainiak/factoranalysis/htfa.py +++ b/src/brainiak/factoranalysis/htfa.py @@ -332,10 +332,10 @@ def _map_update_posterior(self): (prior_widths_mean_var[k] + self.global_widths_var_scaled) observation_mean = np.mean(next_widths) tmp = common * self.global_widths_var_scaled - self.global_posterior_[self.map_offset[1] + k] = \ + self.global_posterior_[self.map_offset[1].item() + k] = \ prior_widths_mean_var[k] * common * observation_mean +\ tmp * prior_widths[k] - self.global_posterior_[self.map_offset[3] + k] = \ + self.global_posterior_[self.map_offset[3].item() + k] = \ prior_widths_mean_var[k] * tmp return self diff --git a/brainiak/factoranalysis/tfa.py b/src/brainiak/factoranalysis/tfa.py similarity index 100% rename from brainiak/factoranalysis/tfa.py rename to src/brainiak/factoranalysis/tfa.py diff --git a/brainiak/factoranalysis/tfa_extension.cpp b/src/brainiak/factoranalysis/tfa_extension.cpp similarity index 98% rename from brainiak/factoranalysis/tfa_extension.cpp rename to src/brainiak/factoranalysis/tfa_extension.cpp index a73c3f928..cec91080b 100644 --- a/brainiak/factoranalysis/tfa_extension.cpp +++ b/src/brainiak/factoranalysis/tfa_extension.cpp @@ -15,12 +15,11 @@ // */ // #include "Python.h" -#include #include -#include #include #include #include +#include #include #include @@ -125,7 +124,7 @@ factor_native(py::array_t F_a int64_t *Iy = (int64_t*) Iy_buf.ptr; int64_t *Iz = (int64_t*) Iz_buf.ptr; - double invW[K]; + std::vector invW(K); double tmp1 = 0.0; double tmp2 = 0.0; double tmp3 = 0.0; @@ -133,9 +132,10 @@ factor_native(py::array_t F_a #pragma omp parallel for for(int k = 0 ; k < K; k++) { - double Tx[nx]; - double Ty[ny]; - double Tz[nz]; + std::vector Tx(nx); + std::vector Ty(ny); + std::vector Tz(nz); + invW[k] = 1.0/W[k]; for(int x = 0 ; x < nx; x++) { diff --git a/src/brainiak/fcma/CMakeLists.txt b/src/brainiak/fcma/CMakeLists.txt new file mode 100644 index 000000000..a7fee1298 --- /dev/null +++ b/src/brainiak/fcma/CMakeLists.txt @@ -0,0 +1,19 @@ +add_custom_command( + OUTPUT cython_blas.c + COMMENT + "Making ${CMAKE_CURRENT_BINARY_DIR}/cython_blas.c from ${CMAKE_CURRENT_SOURCE_DIR}/cython_blas.pyx" + COMMAND Python::Interpreter -m cython + "${CMAKE_CURRENT_SOURCE_DIR}/cython_blas.pyx" --output-file cython_blas.c + DEPENDS cython_blas.pyx + VERBATIM) + +python_add_library(cython_blas MODULE cython_blas.c WITH_SOABI) +pybind11_add_module(fcma_extension MODULE src/fcma_extension.cc) + +find_package(OpenMP) +if(OpenMP_CXX_FOUND) + target_link_libraries(fcma_extension PUBLIC OpenMP::OpenMP_CXX) +endif() + +install(TARGETS cython_blas DESTINATION ${SKBUILD_PROJECT_NAME}/fcma) +install(TARGETS fcma_extension LIBRARY DESTINATION ${SKBUILD_PROJECT_NAME}/fcma) \ No newline at end of file diff --git a/brainiak/fcma/__init__.py b/src/brainiak/fcma/__init__.py similarity index 100% rename from brainiak/fcma/__init__.py rename to src/brainiak/fcma/__init__.py diff --git a/brainiak/fcma/classifier.py b/src/brainiak/fcma/classifier.py similarity index 100% rename from brainiak/fcma/classifier.py rename to src/brainiak/fcma/classifier.py diff --git a/brainiak/fcma/cython_blas.pyx b/src/brainiak/fcma/cython_blas.pyx similarity index 100% rename from brainiak/fcma/cython_blas.pyx rename to src/brainiak/fcma/cython_blas.pyx diff --git a/brainiak/fcma/mvpa_voxelselector.py b/src/brainiak/fcma/mvpa_voxelselector.py similarity index 99% rename from brainiak/fcma/mvpa_voxelselector.py rename to src/brainiak/fcma/mvpa_voxelselector.py index 8b97cd2fa..6e61cee63 100644 --- a/brainiak/fcma/mvpa_voxelselector.py +++ b/src/brainiak/fcma/mvpa_voxelselector.py @@ -79,7 +79,7 @@ def __init__(self, sl ): self.data = data - self.mask = mask.astype(np.bool) + self.mask = mask.astype(bool) self.labels = labels self.num_folds = num_folds self.sl = sl diff --git a/brainiak/fcma/preprocessing.py b/src/brainiak/fcma/preprocessing.py similarity index 100% rename from brainiak/fcma/preprocessing.py rename to src/brainiak/fcma/preprocessing.py diff --git a/brainiak/fcma/src/fcma_extension.cc b/src/brainiak/fcma/src/fcma_extension.cc similarity index 98% rename from brainiak/fcma/src/fcma_extension.cc rename to src/brainiak/fcma/src/fcma_extension.cc index 260b71e78..99f4453c5 100644 --- a/brainiak/fcma/src/fcma_extension.cc +++ b/src/brainiak/fcma/src/fcma_extension.cc @@ -15,14 +15,15 @@ // */ // #include "Python.h" -#include #include -#include -#include +#include #include +#include #include #include +using namespace std; + namespace py = pybind11; void within_subject_norm_native(py::array_t= 0, 'sl_rad should not be negative' assert max_blk_edge > 0, 'max_blk_edge should be positive' self.sl_rad = sl_rad @@ -163,6 +171,7 @@ def __init__(self, sl_rad=1, max_blk_edge=10, shape=Cube, self.comm = MPI.COMM_WORLD self.shape = shape(sl_rad).mask_ self.bcast_var = None + self.pool_size = pool_size def _get_ownership(self, data): """Determine on which rank each subject currently resides @@ -422,6 +431,11 @@ def run_block_function(self, block_fn, extra_block_fn_params=None, results = [] usable_cpus = usable_cpu_count() + + # The user specified a pool_size on the searchlight object, so use that + if pool_size is None and self.pool_size is not None: + pool_size = self.pool_size + if pool_size is None: processes = usable_cpus else: @@ -513,6 +527,9 @@ def run_searchlight(self, voxel_fn, pool_size=None): and None elsewhere. """ + # The user specified a pool_size on the searchlight object, so use that + if pool_size is None and self.pool_size is not None: + pool_size = self.pool_size extra_block_fn_params = (voxel_fn, self.shape, self.min_active_voxels_proportion) diff --git a/brainiak/utils/__init__.py b/src/brainiak/utils/__init__.py similarity index 100% rename from brainiak/utils/__init__.py rename to src/brainiak/utils/__init__.py diff --git a/brainiak/utils/fmrisim.py b/src/brainiak/utils/fmrisim.py similarity index 99% rename from brainiak/utils/fmrisim.py rename to src/brainiak/utils/fmrisim.py index d425f28f6..bd4d96035 100644 --- a/brainiak/utils/fmrisim.py +++ b/src/brainiak/utils/fmrisim.py @@ -84,13 +84,15 @@ # See pyflakes issue #248 # https://github.com/PyCQA/pyflakes/issues/248 from numpy.linalg import LinAlgError -from pkg_resources import resource_stream # type: ignore + from scipy import stats from scipy import signal import scipy.ndimage as ndimage import copy from scipy import optimize +from importlib.resources import files + __all__ = [ "apply_signal", "calc_noise", @@ -1094,7 +1096,7 @@ def _calc_sfnr(volume, Returns ------- - snr : float
 + snr : float The SFNR of the volume """ @@ -1163,7 +1165,7 @@ def _calc_snr(volume, Returns ------- - snr : float
 + snr : float The SNR of the volume """ @@ -2283,8 +2285,10 @@ def mask_brain(volume, if mask_self is True: mask_raw = volume elif template_name is None: - mfn = resource_stream(__name__, "sim_parameters/grey_matter_mask.npy") - mask_raw = np.load(mfn) + rf = files('brainiak').joinpath( + 'utils/sim_parameters/grey_matter_mask.npy') + with rf.open('rb') as f: + mask_raw = np.load(f) else: mask_raw = np.load(template_name) @@ -3320,7 +3324,7 @@ def generate_1d_gaussian_rfs(n_voxels, feature_resolution, feature_range, voxel_tuning = np.linspace(range_start, range_stop, n_voxels + 1) voxel_tuning = voxel_tuning[0:-1] voxel_tuning = np.floor(voxel_tuning).astype(int) - gaussian = signal.gaussian(feature_resolution, rf_size) + gaussian = signal.windows.gaussian(feature_resolution, rf_size) voxel_rfs = np.zeros((n_voxels, feature_resolution)) for i in range(0, n_voxels): voxel_rfs[i, :] = np.roll(gaussian, voxel_tuning[i] - diff --git a/brainiak/utils/fmrisim_real_time_generator.py b/src/brainiak/utils/fmrisim_real_time_generator.py similarity index 96% rename from brainiak/utils/fmrisim_real_time_generator.py rename to src/brainiak/utils/fmrisim_real_time_generator.py index 1cca5e08c..a5ec92db9 100644 --- a/brainiak/utils/fmrisim_real_time_generator.py +++ b/src/brainiak/utils/fmrisim_real_time_generator.py @@ -33,14 +33,17 @@ import nibabel # type: ignore import numpy as np # type: ignore import pydicom as dicom -from brainiak.utils import fmrisim as sim # type: ignore +from brainiak.utils import fmrisim as sim import logging -from pkg_resources import resource_stream # type: ignore from nibabel.nifti1 import Nifti1Image import gzip +from pathlib import Path + __all__ = ["generate_data"] +from importlib.resources import files + logger = logging.getLogger(__name__) script_datetime = datetime.datetime.now() @@ -304,21 +307,27 @@ def _get_input_names(data_dict): # Load in the ROIs if data_dict.get('ROI_A_file') is None: - vol = resource_stream(__name__, "sim_parameters/ROI_A.nii.gz").read() + rf = files('brainiak').joinpath('utils/sim_parameters/ROI_A.nii.gz') + with rf.open("rb") as f: + vol = f.read() ROI_A_file = Nifti1Image.from_bytes(gzip.decompress(vol)).get_fdata() else: ROI_A_file = data_dict['ROI_A_file'] if data_dict.get('ROI_B_file') is None: - vol = resource_stream(__name__, "sim_parameters/ROI_B.nii.gz").read() + rf = files('brainiak').joinpath('utils/sim_parameters/ROI_B.nii.gz') + with rf.open("rb") as f: + vol = f.read() ROI_B_file = Nifti1Image.from_bytes(gzip.decompress(vol)).get_fdata() else: ROI_B_file = data_dict['ROI_B_file'] # Get the path to the template if data_dict.get('template_path') is None: - vol = resource_stream(__name__, - "sim_parameters/sub_template.nii.gz").read() + rf = files('brainiak').joinpath( + 'utils/sim_parameters/sub_template.nii.gz') + with rf.open("rb") as f: + vol = f.read() template_path = Nifti1Image.from_bytes( gzip.decompress(vol)).get_fdata() else: @@ -326,9 +335,10 @@ def _get_input_names(data_dict): # Load in the noise dict if supplied if data_dict.get('noise_dict_file') is None: - file = resource_stream(__name__, - 'sim_parameters/sub_noise_dict.txt').read() - noise_dict_file = file + rf = files('brainiak').joinpath( + 'utils/sim_parameters/sub_noise_dict.txt') + with rf.open("rb") as f: + noise_dict_file = f.read() else: noise_dict_file = data_dict['noise_dict_file'] @@ -371,7 +381,7 @@ def generate_data(outputDir, data_dict.update(user_settings) # If the folder doesn't exist then make it - os.system('mkdir -p %s' % outputDir) + Path(outputDir).mkdir(parents=True, exist_ok=True) logger.info('Load template of average voxel value') diff --git a/brainiak/utils/kronecker_solvers.py b/src/brainiak/utils/kronecker_solvers.py similarity index 100% rename from brainiak/utils/kronecker_solvers.py rename to src/brainiak/utils/kronecker_solvers.py diff --git a/brainiak/utils/sim_parameters/ROI_A.nii.gz b/src/brainiak/utils/sim_parameters/ROI_A.nii.gz similarity index 100% rename from brainiak/utils/sim_parameters/ROI_A.nii.gz rename to src/brainiak/utils/sim_parameters/ROI_A.nii.gz diff --git a/brainiak/utils/sim_parameters/ROI_B.nii.gz b/src/brainiak/utils/sim_parameters/ROI_B.nii.gz similarity index 100% rename from brainiak/utils/sim_parameters/ROI_B.nii.gz rename to src/brainiak/utils/sim_parameters/ROI_B.nii.gz diff --git a/brainiak/utils/sim_parameters/grey_matter_mask.npy b/src/brainiak/utils/sim_parameters/grey_matter_mask.npy similarity index 100% rename from brainiak/utils/sim_parameters/grey_matter_mask.npy rename to src/brainiak/utils/sim_parameters/grey_matter_mask.npy diff --git a/brainiak/utils/sim_parameters/sub_noise_dict.txt b/src/brainiak/utils/sim_parameters/sub_noise_dict.txt similarity index 100% rename from brainiak/utils/sim_parameters/sub_noise_dict.txt rename to src/brainiak/utils/sim_parameters/sub_noise_dict.txt diff --git a/brainiak/utils/sim_parameters/sub_template.nii.gz b/src/brainiak/utils/sim_parameters/sub_template.nii.gz similarity index 100% rename from brainiak/utils/sim_parameters/sub_template.nii.gz rename to src/brainiak/utils/sim_parameters/sub_template.nii.gz diff --git a/brainiak/utils/utils.py b/src/brainiak/utils/utils.py similarity index 99% rename from brainiak/utils/utils.py rename to src/brainiak/utils/utils.py index cda83babd..bc0116c69 100644 --- a/brainiak/utils/utils.py +++ b/src/brainiak/utils/utils.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import math import numpy as np import re import warnings @@ -281,11 +282,11 @@ def __init__(self, fname=None, include_orth=True, include_pols=True): self.n_TR = np.size(self.design_task, axis=0) self.cols_nuisance = np.array([]) if self.include_orth: - self.cols_nuisance = np.int0( + self.cols_nuisance = np.intp( np.sort(np.append(self.cols_nuisance, np.where(self.column_types == 0)[0]))) if self.include_pols: - self.cols_nuisance = np.int0( + self.cols_nuisance = np.intp( np.sort(np.append(self.cols_nuisance, np.where(self.column_types == -1)[0]))) if np.size(self.cols_nuisance) > 0: @@ -468,7 +469,7 @@ def gen_design(stimtime_files, scan_duration, TR, style='FSL', design = [np.empty([int(np.round(duration / TR)), n_C]) for duration in scan_duration] else: - design = [np.empty([int(np.round(scan_duration / TR)), n_C])] + design = [np.empty([int(np.round(scan_duration.item() / TR)), n_C])] scan_onoff = np.insert(np.cumsum(scan_duration), 0, 0) if style == 'FSL': design_info = _read_stimtime_FSL(stimtime_files, n_C, n_S, scan_onoff) @@ -777,10 +778,10 @@ def phase_randomize(data, voxelwise=False, random_state=None): if not voxelwise: phase_shifts = (prng.rand(len(pos_freq), 1, n_subjects) - * 2 * np.math.pi) + * 2 * math.pi) else: phase_shifts = (prng.rand(len(pos_freq), n_voxels, n_subjects) - * 2 * np.math.pi) + * 2 * math.pi) # Fast Fourier transform along time dimension of data fft_data = fft(data, axis=0) diff --git a/tests/.flake8 b/tests/.flake8 index 1f6c9b76c..9e9cdca95 100644 --- a/tests/.flake8 +++ b/tests/.flake8 @@ -9,3 +9,4 @@ extend-ignore = E231, # Docstrings D, + C901 diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/conftest.py b/tests/conftest.py index 23c990747..2d55013bc 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,12 +1,17 @@ import multiprocessing +import sys from mpi4py import MPI + import pytest import numpy import random import tensorflow +pytest_plugins = ["tests.pytest_mpiexec_plugin"] + + def pytest_configure(config): config.option.xmlpath = "junit-{}.xml".format(MPI.COMM_WORLD.Get_rank()) @@ -42,8 +47,29 @@ def seeded_rng(): tensorflow.random.set_seed(0) -skip_non_fork = pytest.mark.skipif( - multiprocessing.get_start_method() != "fork" - and MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is not None, - reason="MPI only works with multiprocessing fork start method.", -) +@pytest.fixture(scope="module", autouse=True) +def pool_size(): + """ + Set the pool_size to 1 for MPI tests when start_method for multiprocessing + is not fork. + + This replaces the old skip_non_fork fixture. We don't need to skip these + tests completely, but we need to ensure that the pool_size is set to 1 so + they don't launch any multiprocessing pools within the MPI environment. + On windows, it seems like intel mpi and msmpi both have issues with fork, + so we need to set the pool_size to 1 there as well. + """ + if (multiprocessing.get_start_method() != "fork" and + MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is not None): + return 1 + + # OpenMPI has issues with fork, so we need to set the pool_size to 1 + if "Open MPI" in MPI.get_vendor()[0]: + return 1 + + # On Windows, we need to set the pool_size to 1 for intel mpi and msmpi + elif sys.platform == "win32": + return 1 + + else: + return 2 diff --git a/tests/factoranalysis/test_htfa.py b/tests/factoranalysis/test_htfa.py index 58591b130..d58f60f5b 100644 --- a/tests/factoranalysis/test_htfa.py +++ b/tests/factoranalysis/test_htfa.py @@ -48,29 +48,26 @@ def test_X(): X = np.random.rand(n_voxel, n_tr) # Check that does NOT run with wrong data type - with pytest.raises(TypeError) as excinfo: + with pytest.raises(TypeError, match="Input data should be a list"): htfa.fit(X, R=R) - assert "Input data should be a list" in str(excinfo.value) X = [] # Check that does NOT run with wrong array dimension - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, + match="Need at leat one subject to train the model"): htfa.fit(X, R=R) - assert "Need at leat one subject to train the model" in str(excinfo.value) X = [] X.append([1, 2, 3]) # Check that does NOT run with wrong array dimension - with pytest.raises(TypeError) as excinfo: + with pytest.raises(TypeError, match="data should be an array"): htfa.fit(X, R=R) - assert "data should be an array" in str(excinfo.value) X = [] X.append(np.random.rand(n_voxel)) # Check that does NOT run with wrong array dimension - with pytest.raises(TypeError) as excinfo: + with pytest.raises(TypeError, match="subject data should be 2D array"): htfa.fit(X, R=R) - assert "subject data should be 2D array" in str(excinfo.value) X = [] for s in np.arange(n_subj): @@ -78,36 +75,36 @@ def test_X(): R = np.random.randint(2, high=102, size=(n_voxel, 3)) # Check that does NOT run with wrong data type - with pytest.raises(TypeError) as excinfo: + with pytest.raises(TypeError, match="Coordinates should be a list"): htfa.fit(X, R=R) - assert "Coordinates should be a list" in str(excinfo.value) R = [] R.append([1, 2, 3]) # Check that does NOT run with wrong data type - with pytest.raises(TypeError) as excinfo: + with pytest.raises( + TypeError, + match="Each scanner coordinate matrix should be an array"): htfa.fit(X, R=R) - assert ("Each scanner coordinate matrix should be an array" - in str(excinfo.value)) R = [] R.append(np.random.rand(n_voxel)) # Check that does NOT run with wrong array dimension - with pytest.raises(TypeError) as excinfo: + with pytest.raises( + TypeError, + match="Each scanner coordinate matrix should be 2D array"): htfa.fit(X, R=R) - assert ("Each scanner coordinate matrix should be 2D array" - in str(excinfo.value)) R = [] for s in np.arange(n_subj): R.append(np.random.rand(n_voxel - 1, 3)) # Check that does NOT run with wrong array dimension - with pytest.raises(TypeError) as excinfo: + with pytest.raises( + TypeError, + match=r"n_voxel should be the same in X\[idx\] and R\[idx\]"): htfa.fit(X, R=R) - assert ("n_voxel should be the same in X[idx] and R[idx]" - in str(excinfo.value)) +@pytest.mark.mpiexec(n=2, timeout=100) def test_can_run(): import numpy as np from brainiak.factoranalysis.htfa import HTFA @@ -166,3 +163,16 @@ def test_can_run(): "Invalid result of HTFA! (wrong # element in local_weights)" assert htfa.local_posterior_.shape[0] == htfa.prior_size,\ "Invalid result of HTFA! (wrong # element in local_posterior)" + + +def test_dummy(): + """ + This is a dummy test to work around for the issue of pytest and + pytest-mpiexec. See here the discussion of the same issue in + pytest-forked: + + https://github.com/pytest-dev/pytest-forked/issues/67 + #issuecomment-1964718720 + + """ + pass diff --git a/tests/fcma/test_mvpa_voxel_selection.py b/tests/fcma/test_mvpa_voxel_selection.py index 2258e234a..d607d7b49 100644 --- a/tests/fcma/test_mvpa_voxel_selection.py +++ b/tests/fcma/test_mvpa_voxel_selection.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import conftest +import pytest from brainiak.fcma.mvpa_voxelselector import MVPAVoxelSelector from brainiak.searchlight.searchlight import Searchlight @@ -21,19 +21,20 @@ from mpi4py import MPI from numpy.random import RandomState + # specify the random state to fix the random numbers prng = RandomState(1234567890) -@conftest.skip_non_fork -def test_mvpa_voxel_selection(): +@pytest.mark.mpiexec(n=2) +def test_mvpa_voxel_selection(pool_size): data = prng.rand(5, 5, 5, 8).astype(np.float32) # all MPI processes read the mask; the mask file is small mask = np.ones([5, 5, 5], dtype=bool) mask[0, 0, :] = False labels = [0, 1, 0, 1, 0, 1, 0, 1] # 2 subjects, 4 epochs per subject - sl = Searchlight(sl_rad=1) + sl = Searchlight(sl_rad=1, pool_size=pool_size) mvs = MVPAVoxelSelector(data, mask, labels, 2, sl) # for cross validation, use SVM with precomputed kernel diff --git a/tests/fcma/test_voxel_selection.py b/tests/fcma/test_voxel_selection.py index ed32d2b62..dc206ba9e 100644 --- a/tests/fcma/test_voxel_selection.py +++ b/tests/fcma/test_voxel_selection.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import conftest +import pytest from brainiak.fcma.voxelselector import VoxelSelector from scipy.stats.mstats import zscore @@ -23,11 +23,8 @@ from mpi4py import MPI from numpy.random import RandomState -# specify the random state to fix the random numbers -prng = RandomState(1234567890) - -def create_epoch(): +def create_epoch(prng): row = 12 col = 5 mat = prng.rand(row, col).astype(np.float32) @@ -39,12 +36,21 @@ def create_epoch(): return mat -@conftest.skip_non_fork -def test_voxel_selection(): - fake_raw_data = [create_epoch() for i in range(8)] +@pytest.mark.mpiexec(n=2) +def test_voxel_selection(pool_size): + + # For VoxelSelector, process_num=0 means no multiprocessing + if pool_size == 1: + process_num = 0 + else: + process_num = pool_size + + prng = RandomState(1234567890) + fake_raw_data = [create_epoch(prng) for i in range(8)] labels = [0, 1, 0, 1, 0, 1, 0, 1] # 2 subjects, 4 epochs per subject - vs = VoxelSelector(labels, 4, 2, fake_raw_data, voxel_unit=1) + vs = VoxelSelector(labels, 4, 2, fake_raw_data, voxel_unit=1, + process_num=process_num) # test scipy normalization fake_corr = prng.rand(1, 4, 5).astype(np.float32) fake_corr = vs._correlation_normalization(fake_corr) @@ -83,14 +89,23 @@ def test_voxel_selection(): "results") -@conftest.skip_non_fork -def test_voxel_selection_with_two_masks(): - fake_raw_data1 = [create_epoch() for i in range(8)] - fake_raw_data2 = [create_epoch() for i in range(8)] +@pytest.mark.mpiexec(n=2) +def test_voxel_selection_with_two_masks(pool_size): + + # For VoxelSelector, process_num=0 means no multiprocessing + if pool_size == 1: + process_num = 0 + else: + process_num = pool_size + + prng = RandomState(1234567890) + fake_raw_data1 = [create_epoch(prng) for i in range(8)] + fake_raw_data2 = [create_epoch(prng) for i in range(8)] labels = [0, 1, 0, 1, 0, 1, 0, 1] # 2 subjects, 4 epochs per subject vs = VoxelSelector(labels, 4, 2, fake_raw_data1, - raw_data2=fake_raw_data2, voxel_unit=1) + raw_data2=fake_raw_data2, voxel_unit=1, + process_num=process_num) # for cross validation, use SVM with precomputed kernel # no shrinking, set C=1 clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto') @@ -99,7 +114,7 @@ def test_voxel_selection_with_two_masks(): output = [None] * len(results) for tuple in results: output[tuple[0]] = int(8*tuple[1]) - expected_output = [3, 3, 3, 6, 6] + expected_output = [3, 3, 7, 5, 7] assert np.allclose(output, expected_output, atol=1), \ 'voxel selection via SVM does not provide correct results' # for cross validation, use logistic regression @@ -109,12 +124,23 @@ def test_voxel_selection_with_two_masks(): output = [None] * len(results) for tuple in results: output[tuple[0]] = int(8*tuple[1]) - expected_output = [3, 4, 4, 6, 6] + expected_output = [4, 3, 7, 4, 6] assert np.allclose(output, expected_output, atol=1), ( "voxel selection via logistic regression does not provide correct " "results") +def test_dummy(): + """ + This is a dummy test to work around for the issue of pytest and + pytest-mpiexec. See here the discussion of the same issue in + pytest-forked: + + https://github.com/pytest-dev/pytest-forked/issues/67#issuecomment-1964718720 + """ + pass + + if __name__ == '__main__': test_voxel_selection() test_voxel_selection_with_two_masks() diff --git a/tests/funcalign/test_fastsrm.py b/tests/funcalign/test_fastsrm.py index 66ceab231..70ac02d28 100644 --- a/tests/funcalign/test_fastsrm.py +++ b/tests/funcalign/test_fastsrm.py @@ -1,5 +1,6 @@ import os import tempfile +import re import numpy as np import pytest @@ -593,12 +594,13 @@ def test_fastsrm_class(): srm.transform(paths) srm.fit(paths) + # An error can occur if temporary directory already exists with pytest.raises(ValueError, match=("Path %s already exists. When a model " "is used, filesystem should be " r"cleaned by using the .clean\(\) " - "method" % srm.temp_dir)): + "method" % re.escape(srm.temp_dir))): # Error can occur if the filesystem is uncleaned create_temp_dir(srm.temp_dir) create_temp_dir(srm.temp_dir) diff --git a/tests/funcalign/test_srm_distributed.py b/tests/funcalign/test_srm_distributed.py index 7a5b69051..dab24ba2a 100644 --- a/tests/funcalign/test_srm_distributed.py +++ b/tests/funcalign/test_srm_distributed.py @@ -16,6 +16,7 @@ from mpi4py import MPI +@pytest.mark.mpiexec(n=2) def test_distributed_srm(): # noqa: C901 import brainiak.funcalign.srm s = brainiak.funcalign.srm.SRM() @@ -150,6 +151,3 @@ def test_distributed_srm(): # noqa: C901 s.fit(X) if rank == 0: print("Test: different number of samples per subject") - - -test_distributed_srm() diff --git a/tests/funcalign/test_sssrm.py b/tests/funcalign/test_sssrm.py index a0a9ab864..592752a1b 100644 --- a/tests/funcalign/test_sssrm.py +++ b/tests/funcalign/test_sssrm.py @@ -13,23 +13,17 @@ # limitations under the License. import pytest +import brainiak.funcalign.sssrm -def test_instance(): - import os - os.environ['THEANO_FLAGS'] = 'device=cpu, floatX=float64' - import brainiak.funcalign.sssrm +def test_instance(): model = brainiak.funcalign.sssrm.SSSRM() assert model, "Invalid SSSRM instance!" def test_wrong_input(): - import os - os.environ['THEANO_FLAGS'] = 'device=cpu, floatX=float64' - from sklearn.utils.validation import NotFittedError import numpy as np - import brainiak.funcalign.sssrm voxels = 100 align_samples = 400 @@ -124,11 +118,7 @@ def test_wrong_input(): def test_sssrm(): - import os - os.environ['THEANO_FLAGS'] = 'device=cpu, floatX=float64' - import numpy as np - import brainiak.funcalign.sssrm voxels = 100 align_samples = 400 diff --git a/tests/isc/test_isc.py b/tests/isc/test_isc.py index 0f0256b81..cce4f0d6b 100644 --- a/tests/isc/test_isc.py +++ b/tests/isc/test_isc.py @@ -16,12 +16,14 @@ def simulated_timeseries(n_subjects, n_TRs, n_voxels=30, prng = np.random.RandomState(random_state) if n_voxels: signal = prng.randn(n_TRs, n_voxels) - prng = np.random.RandomState(prng.randint(0, 2**32 - 1)) + prng = np.random.RandomState(prng.randint(0, 2**32 - 1, + dtype=np.int64)) data = [signal + prng.randn(n_TRs, n_voxels) * noise for subject in np.arange(n_subjects)] elif not n_voxels: signal = prng.randn(n_TRs) - prng = np.random.RandomState(prng.randint(0, 2**32 - 1)) + prng = np.random.RandomState(prng.randint(0, 2**32 - 1, + dtype=np.int64)) data = [signal + prng.randn(n_TRs) * noise for subject in np.arange(n_subjects)] if data_type == 'array': diff --git a/tests/pytest_mpiexec_plugin.py b/tests/pytest_mpiexec_plugin.py new file mode 100644 index 000000000..6ee275411 --- /dev/null +++ b/tests/pytest_mpiexec_plugin.py @@ -0,0 +1,316 @@ +""" +A pytest plugin to run tests with mpiexec who are marked with the `mpiexec` +marker. + +Taken from: https://github.com/minrk/pytest-mpiexec + +Slightly modified to work with BrainIAK's pytest configuration. + +""" + +import json +import os +import shlex +import subprocess +import sys +from enum import Enum +from functools import partial +from itertools import chain +from pathlib import Path +from tempfile import TemporaryDirectory + +import pytest +from pytest_reportlog.plugin import ReportLogPlugin + +MPI_SUBPROCESS_ENV = "TEST_MPI_SUBTEST" +TEST_REPORT_DIR_ENV = "TEST_MPI_REPORT_DIR" + +MPI_MARKER_NAME = "mpiexec" + +MPIEXEC = "mpiexec" + + +def pytest_addoption(parser): + group = parser.getgroup("mpiexec") + group.addoption( + "--mpiexec", + action="store", + dest="mpiexec", + default=MPIEXEC, + help="Executable for running MPI, default=mpiexec", + ) + group.addoption( + "--mpiexec-report", + action="store", + dest="mpiexec_report", + choices=[r.value for r in ReportStyle], + default=ReportStyle.first_failure, + help="""style of mpi error reporting. + + Since each mpi test represents one test run per rank, + there are lots of ways to represent a failed parallel run: + + Options: + + - first_failure (default): report only one result per test, + PASSED or FAILED, where FAILED will be the failure of the first + rank that failed. + - all_failures: report failures from all ranks that failed + - all: report all results, including all passes + - concise: like first_failure, but try to report all _unique_ + failures (experimental) + """, + ) + + +def pytest_configure(config): + global MPIEXEC + global REPORT_STYLE + mpiexec = config.getoption("mpiexec") + if mpiexec: + MPIEXEC = mpiexec + + REPORT_STYLE = config.getoption("mpiexec_report") + + config.addinivalue_line("markers", + f"{MPI_MARKER_NAME}: Run this text with mpiexec") + if os.getenv(MPI_SUBPROCESS_ENV): + from mpi4py import MPI + + rank = MPI.COMM_WORLD.rank + reportlog_dir = Path(os.getenv(TEST_REPORT_DIR_ENV, "")) + report_path = reportlog_dir / f"reportlog-{rank}.jsonl" + config._mpiexec_reporter = reporter = ( + ReportLogPlugin(config, report_path)) + config.pluginmanager.register(reporter) + + +def pytest_unconfigure(config): + reporter = getattr(config, "_mpiexec_reporter", None) + if reporter: + reporter.close() + + +def mpi_runtest_protocol(item): + """The runtest protocol for mpi tests + + Runs the test in an mpiexec subprocess + + instead of the current process + """ + hook = item.config.hook + hook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + call = pytest.CallInfo.from_call( + partial(mpi_runtest, item), "setup") + if call.excinfo: + report = hook.pytest_runtest_makereport(item=item, call=call) + hook.pytest_runtest_logreport(report=report) + hook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + + +def pytest_runtest_protocol(item, nextitem): + """Run the MPI protocol for mpi tests + + otherwise, do nothing + """ + if os.getenv(MPI_SUBPROCESS_ENV): + return + mpi_mark = item.get_closest_marker(MPI_MARKER_NAME) + if not mpi_mark: + return + mpi_runtest_protocol(item) + return True + + +class ReportStyle(Enum): + all = "all" + all_failures = "all_failures" + first_failure = "first_failure" + concise = "concise" + + +def _report_key(report): + """Determine if a given report has been 'seen' before""" + # use reprcrash for 'same' error message + + message_key = None + if report["outcome"] != "passed": + # for failures, use first line of reprcrash + # (i.e. the line used) + longrepr = report["longrepr"] + if longrepr: + reprcrash = longrepr["reprcrash"] + if reprcrash: + message_key_items = [] + for key, value in sorted( + report["longrepr"]["reprcrash"].items()): + if key == "message": + value = value.splitlines()[0] + message_key_items.append((key, value)) + message_key = tuple(message_key_items) + + if not message_key and report["outcome"] != "passed": + # warn about missing message key? + # warnings.warn("Expected reprcrash...", RuntimeWarning, stacklevel=2) + pass + return (report["when"], report["outcome"], message_key) + + +def consolidate_reports(nodeid, reports, style=ReportStyle.first_failure): + """Consolidate a collection of TestReports + + - collapses to single success if all succeed + - all_failures reports all failures + - first_failure reports only the first failure + """ + style = ReportStyle(style) + + all_ranks = {report["_mpi_rank"] for report in reports} + if len(all_ranks) == 1: + # only one rank, nothing to consolidate + return reports + + if (style != ReportStyle.all and + all(r["outcome"] == "passed" for r in reports)): + # report from rank 0 if everything passed + return [report for report in reports if report["_mpi_rank"] == 0] + + failed_ranks = set() + for report in reports: + rank = report["_mpi_rank"] + # add rank to labels for ranks after 0, unless reporting all failures + if rank > 0 or style in {ReportStyle.all, ReportStyle.all_failures}: + report["nodeid"] = f"{nodeid} [rank={rank}]" + report["location"][-1] = report["location"][-1] + f" [rank={rank}]" + if report["outcome"] != "passed": + failed_ranks.add(report["_mpi_rank"]) + failed_ranks = sorted(failed_ranks) + + if style == ReportStyle.all: + return reports + + elif style == ReportStyle.all_failures: + # select all reports on failed ranks + return [r for r in reports if r["_mpi_rank"] in failed_ranks] + + elif style == ReportStyle.first_failure: + # return just the first error + first_failed_rank = failed_ranks[0] + + return [r for r in reports if r["_mpi_rank"] == first_failed_rank] + elif style == ReportStyle.concise: + # group by 'unique' reports + reports_by_rank = {} + for report in reports: + reports_by_rank.setdefault(report["_mpi_rank"], []).append(report) + _seen_keys = {} + collected_reports = [] + for rank, rank_reports in reports_by_rank.items(): + rank_key = tuple(_report_key(report) for report in rank_reports) + if rank_key in _seen_keys: + _seen_keys[rank_key].append(rank) + else: + _seen_keys[rank_key] = [rank] + collected_reports.extend(rank_reports) + return collected_reports + else: + raise ValueError(f"Unhandled ReportStyle: {style}") + + return reports + + +def mpi_runtest(item): + """Replacement for runtest + + Runs a single test with mpiexec + """ + mpi_mark = item.get_closest_marker(MPI_MARKER_NAME) + # allow parametrization + if getattr(item, "callspec", None) and "mpiexec_n" in item.callspec.params: + n = item.callspec.params["mpiexec_n"] + else: + n = mpi_mark.kwargs.get("n", 2) + timeout = mpi_mark.kwargs.get("timeout", 120) + exe = [ + MPIEXEC, + "-n", + str(n), + sys.executable, + "-m", + "pytest", + "--quiet", + "--no-header", + "--no-summary", + f"{item.fspath}::{item.name}", + ] + env = dict(os.environ) + env[MPI_SUBPROCESS_ENV] = "1" + # add the mpiexec command for easy re-run + item.add_report_section( + "setup", "mpiexec command", f"{MPI_SUBPROCESS_ENV}=1 {shlex.join(exe)}" + ) + + with TemporaryDirectory() as reportlog_dir: + env[TEST_REPORT_DIR_ENV] = reportlog_dir + try: + p = subprocess.run( + exe, + capture_output=True, + text=True, + env=env, + timeout=timeout, + ) + except subprocess.TimeoutExpired as e: + if e.stdout: + item.add_report_section( + "mpiexec pytest", "stdout", + e.stdout.decode("utf8", "replace") + ) + if e.stderr: + item.add_report_section( + "mpiexec pytest", "stderr", + e.stderr.decode("utf8", "replace") + ) + pytest.fail( + f"mpi test did not complete in {timeout} seconds", + pytrace=False, + ) + + # Collect logs from all ranks + reports = {} + for rank in range(n): + reportlog_file = os.path.join(reportlog_dir, + f"reportlog-{rank}.jsonl") + if os.path.exists(reportlog_file): + with open(reportlog_file) as f: + for line in f: + report = json.loads(line) + if report["$report_type"] != "TestReport": + continue + report["_mpi_rank"] = rank + nodeid = report["nodeid"] + reports.setdefault(nodeid, []).append(report) + + for nodeid, report_list in reports.items(): + # consolidate reports according to config + reports[nodeid] = consolidate_reports( + nodeid, report_list, REPORT_STYLE) + + # collect report items for the test + for report in chain(*reports.values()): + if report["$report_type"] == "TestReport": + # reconstruct and redisplay the report + r = item.config.hook.pytest_report_from_serializable( + config=item.config, data=report + ) + item.config.hook.pytest_runtest_logreport( + config=item.config, report=r) + + if p.returncode or not reports: + if p.stdout: + item.add_report_section("mpiexec pytest", "stdout", p.stdout) + if p.stderr: + item.add_report_section("mpiexec pytest", "stderr", p.stderr) + if not reports: + pytest.fail("No test reports captured from mpi subprocess!", + pytrace=False) diff --git a/tests/reprsimil/test_gbrsa.py b/tests/reprsimil/test_gbrsa.py index 8eb98994e..8c45c3df6 100644 --- a/tests/reprsimil/test_gbrsa.py +++ b/tests/reprsimil/test_gbrsa.py @@ -581,7 +581,7 @@ def setup_for_test(): n_C, n_T, n_V, n_X0, n_grid, rank) result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw) - scipy_sum = scipy.integrate.simps(y=result_exp, axis=0) + scipy_sum = scipy.integrate.simpson(y=result_exp, axis=0) LL_total_scipy = np.sum(np.log(scipy_sum) + max_value) tol = 1e-3 @@ -622,7 +622,7 @@ def setup_for_test(): n_C, n_T, n_V, n_X0, n_grid, rank) result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw) - scipy_sum = scipy.integrate.simps(y=result_exp, axis=0) + scipy_sum = scipy.integrate.simpson(y=result_exp, axis=0) LL_total_scipy = np.sum(np.log(scipy_sum) + max_value) tol = 1e-3 diff --git a/tests/searchlight/test_searchlight.py b/tests/searchlight/test_searchlight.py index ee1bf1a01..39daae14f 100644 --- a/tests/searchlight/test_searchlight.py +++ b/tests/searchlight/test_searchlight.py @@ -16,11 +16,13 @@ import numpy as np from mpi4py import MPI -import conftest + +import pytest from brainiak.searchlight.searchlight import Searchlight from brainiak.searchlight.searchlight import Diamond, Ball + """Distributed Searchlight Test """ @@ -31,8 +33,8 @@ def cube_sfn(data, msk, myrad, bcast_var): return None -@conftest.skip_non_fork -def test_searchlight_with_cube(): +@pytest.mark.mpiexec(n=2) +def test_searchlight_with_cube(pool_size): sl = Searchlight(sl_rad=3) comm = MPI.COMM_WORLD rank = comm.rank @@ -50,7 +52,7 @@ def test_searchlight_with_cube(): mask[10:17, 10:17, 10:17] = True sl.distribute(data, mask) - global_outputs = sl.run_searchlight(cube_sfn) + global_outputs = sl.run_searchlight(cube_sfn, pool_size=pool_size) if rank == 0: assert global_outputs[13, 13, 13] == 1.0 @@ -62,6 +64,7 @@ def test_searchlight_with_cube(): assert global_outputs[i, j, k] is None +@pytest.mark.mpiexec(n=2) def test_searchlight_with_cube_poolsize_1(): sl = Searchlight(sl_rad=3) comm = MPI.COMM_WORLD @@ -99,8 +102,8 @@ def diamond_sfn(data, msk, myrad, bcast_var): return None -@conftest.skip_non_fork -def test_searchlight_with_diamond(): +@pytest.mark.mpiexec(n=2) +def test_searchlight_with_diamond(pool_size): sl = Searchlight(sl_rad=3, shape=Diamond) comm = MPI.COMM_WORLD rank = comm.rank @@ -118,7 +121,7 @@ def test_searchlight_with_diamond(): mask[10:17, 10:17, 10:17] = Diamond(3).mask_ sl.distribute(data, mask) - global_outputs = sl.run_searchlight(diamond_sfn) + global_outputs = sl.run_searchlight(diamond_sfn, pool_size=pool_size) if rank == 0: assert global_outputs[13, 13, 13] == 1.0 @@ -139,8 +142,8 @@ def ball_sfn(data, msk, myrad, bcast_var): return None -@conftest.skip_non_fork -def test_searchlight_with_ball(): +@pytest.mark.mpiexec(n=2) +def test_searchlight_with_ball(pool_size): sl = Searchlight(sl_rad=3, shape=Ball) comm = MPI.COMM_WORLD rank = comm.rank @@ -158,7 +161,7 @@ def test_searchlight_with_ball(): mask[10:17, 10:17, 10:17] = Ball(3).mask_ sl.distribute(data, mask) - global_outputs = sl.run_searchlight(ball_sfn) + global_outputs = sl.run_searchlight(ball_sfn, pool_size=pool_size) if rank == 0: assert global_outputs[13, 13, 13] == 1.0 @@ -216,8 +219,8 @@ def block_test_sfn(data, msk, myrad, bcast_var, extra_params): return outmat[myrad:-myrad, myrad:-myrad, myrad:-myrad] -@conftest.skip_non_fork -def test_correctness(): # noqa: C901 +@pytest.mark.mpiexec(n=2, timeout=120) +def test_correctness(pool_size): # noqa: C901 def voxel_test(data, mask, max_blk_edge, rad): comm = MPI.COMM_WORLD @@ -238,7 +241,8 @@ def voxel_test(data, mask, max_blk_edge, rad): sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge) sl.distribute(data, mask) sl.broadcast(MaskRadBcast(mask, rad)) - global_outputs = sl.run_searchlight(voxel_test_sfn) + global_outputs = sl.run_searchlight(voxel_test_sfn, + pool_size=pool_size) if rank == 0: for d0 in range(rad, global_outputs.shape[0]-rad): @@ -269,7 +273,8 @@ def block_test(data, mask, max_blk_edge, rad): sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge) sl.distribute(data, mask) sl.broadcast(mask) - global_outputs = sl.run_block_function(block_test_sfn) + global_outputs = sl.run_block_function(block_test_sfn, + pool_size=pool_size) if rank == 0: for d0 in range(rad, global_outputs.shape[0]-rad): diff --git a/tests/utils/test_fmrisim_real_time.py b/tests/utils/test_fmrisim_real_time.py index 3f0918fab..caf9f45a2 100644 --- a/tests/utils/test_fmrisim_real_time.py +++ b/tests/utils/test_fmrisim_real_time.py @@ -18,11 +18,12 @@ """ import numpy as np from brainiak.utils import fmrisim_real_time_generator as gen +from importlib.resources import files import pytest import os import time import glob -from pkg_resources import resource_stream # type: ignore + from typing import Dict from nibabel.nifti1 import Nifti1Image import gzip @@ -32,21 +33,31 @@ gen.generate_data() # type: ignore data_dict: Dict = {} -vol = resource_stream(gen.__name__, "sim_parameters/ROI_A.nii.gz").read() + +rf = files('brainiak').joinpath('utils/sim_parameters/ROI_A.nii.gz') +with rf.open("rb") as f: + vol = f.read() data_dict["ROI_A_file"] = np.asanyarray( Nifti1Image.from_bytes(gzip.decompress(vol)).dataobj ) -vol = resource_stream(gen.__name__, "sim_parameters/ROI_B.nii.gz").read() + +rf = files('brainiak').joinpath('utils/sim_parameters/ROI_B.nii.gz') +with rf.open("rb") as f: + vol = f.read() data_dict["ROI_B_file"] = np.asanyarray( Nifti1Image.from_bytes(gzip.decompress(vol)).dataobj ) -vol = resource_stream(gen.__name__, - "sim_parameters/sub_template.nii.gz").read() +rf = files('brainiak').joinpath('utils/sim_parameters/sub_template.nii.gz') +with rf.open("rb") as f: + vol = f.read() data_dict["template_path"] = np.asanyarray( Nifti1Image.from_bytes(gzip.decompress(vol)).dataobj ) -noise_dict_file = resource_stream(gen.__name__, - "sim_parameters/sub_noise_dict.txt").read() + +rf = files('brainiak').joinpath('utils/sim_parameters/sub_noise_dict.txt') +with rf.open("rb") as f: + noise_dict_file = f.read() + data_dict['noise_dict_file'] = noise_dict_file data_dict['numTRs'] = 30 data_dict['event_duration'] = 2