Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
d4415fe
Numpy ports
matthew-brett Jun 17, 2024
527408e
Finish up Numpy 2.0 port
matthew-brett Jun 17, 2024
b4237d7
Refactor to prevent warnings from test_numpy
matthew-brett Jun 18, 2024
e0bf273
Run style checker
matthew-brett Jun 18, 2024
dcb9769
Update some workflows
matthew-brett Jun 18, 2024
4e10e2f
Adapt to Numpy 2.0 casting rules
matthew-brett Jun 18, 2024
0779a6b
Another couple of np.complex_ examples
matthew-brett Jun 18, 2024
dddec91
Fix a deprecated import
matthew-brett Jun 18, 2024
657ccb1
Add spin gdb, lldb, and install
stefanv Jun 19, 2024
3d0821c
Note return type of import_array as void *
matthew-brett Jun 20, 2024
ad09805
Hack fix for segfaults in multi object
matthew-brett Oct 3, 2024
4815127
Allow for floating point error in affine tests
matthew-brett Oct 3, 2024
3ea60ee
Probably correct fix for multiiter size
matthew-brett Oct 3, 2024
7034758
Fixes to Numpy 2.0 doctesting
matthew-brett Oct 3, 2024
df6926a
Fix unstable import of fromrecords
matthew-brett Oct 3, 2024
39fef66
Another fix for imprecision in coordinate_map
matthew-brett Oct 3, 2024
6915943
Style fix
matthew-brett Oct 3, 2024
934515f
Try fix for random registration seeding
matthew-brett Oct 3, 2024
2de7140
Trying again with int fix for registration
matthew-brett Oct 3, 2024
7eebf47
Drop Python 3.8
matthew-brett Oct 4, 2024
7f9a5d0
Try fixing Numpy datatype
matthew-brett Oct 4, 2024
a419dbc
Revert "Drop Python 3.8"
matthew-brett Oct 4, 2024
d8989ab
Convert all np.int_ to np.intp in graph.py
matthew-brett Oct 4, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions .github/workflows/coverage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
allow-prereleases: true
Expand All @@ -41,6 +41,8 @@ jobs:
pwd
ls -lR ..
- name: Upload to codecov
uses: codecov/codecov-action@v3
uses: codecov/codecov-action@v4
with:
files: tmp/coverage.xml
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
2 changes: 1 addition & 1 deletion .github/workflows/doc-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ jobs:
sudo apt install -y graphviz
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
allow-prereleases: true
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ jobs:
- uses: actions/checkout@v4

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python_version }}
allow-prereleases: true
Expand All @@ -47,7 +47,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install
Expand Down
6 changes: 4 additions & 2 deletions doc/devel/code_discussions/coordmap_notes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -495,8 +495,10 @@ AffineTransform(
[ 0., 0., 1.]])
)

>>> bounding_box(y70, (x_spec[1], z_spec[1]))
((-92.0, 92.0), (70.0, 70.0), (-70.0, 100.0))
>>> x_lims, y_lims, z_lims = bounding_box(y70, (x_spec[1], z_spec[1]))
>>> assert np.all(x_lims == (-92, 92))
>>> assert np.all(y_lims == (70, 70))
>>> assert np.all(z_lims == (-70, 100))

Maybe these aren't things that "normal human beings" (to steal a quote from
Gael) can use, but they're explicit and they are tied to precise mathematical
Expand Down
4 changes: 2 additions & 2 deletions doc/users/coordinate_map.rst
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,8 @@ We resample the 'subject' image to the 'atlas image
True
>>> normalized_subject_im.coordmap == atlas_im.coordmap
True
>>> np.all(normalized_subject_im.affine == atlas_im.affine)
True
>>> # Normalized image now has atlas affine.
>>> assert np.all(normalized_subject_im.affine == atlas_im.affine)

***********************
Mathematical definition
Expand Down
16 changes: 8 additions & 8 deletions lib/fff_python_wrapper/fffpy.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,7 @@
because PyArray_API is defined static, in order not to share that symbol
within the dso. (import_array() asks the pointer value to the python process)
*/
/*
* deal with differences in macro return result between Python 2 and 3
* http://mail.scipy.org/pipermail/numpy-discussion/2010-December/054350.html
*/
IMP_OUT fffpy_import_array(void) {
void* fffpy_import_array(void) {
import_array();
}

Expand Down Expand Up @@ -47,7 +43,7 @@ void fff_vector_fetch_using_NumPy(fff_vector* y, const char* x, npy_intp stride,
PyArrayObject* X = (PyArrayObject*) PyArray_New(&PyArray_Type, 1, dim, type, strides,
(void*)x, itemsize, NPY_BEHAVED, NULL);
PyArrayObject* Y = (PyArrayObject*) PyArray_SimpleNewFromData(1, dim, NPY_DOUBLE, (void*)y->data);
PyArray_CastTo(Y, X);
PyArray_CopyInto(Y, X);
Py_XDECREF(Y);
Py_XDECREF(X);
return;
Expand Down Expand Up @@ -215,7 +211,7 @@ fff_matrix* fff_matrix_fromPyArray(const PyArrayObject* x)
dim[1] = dim1;

xd = (PyArrayObject*) PyArray_SimpleNewFromData(2, dim, NPY_DOUBLE, (void*)y->data);
PyArray_CastTo(xd, (PyArrayObject*)x);
PyArray_CopyInto(xd, (PyArrayObject*)x);
Py_XDECREF(xd);
}

Expand Down Expand Up @@ -527,7 +523,11 @@ fffpy_multi_iterator* fffpy_multi_iterator_new(int narr, int axis, ...)

/* Create new instance */
thisone = (fffpy_multi_iterator*)malloc(sizeof(fffpy_multi_iterator));
multi = PyArray_malloc(sizeof(PyArrayMultiIterObject));
/* Static size of PyArrayMultiIterObject.
*
* https://github.com/numpy/numpy/issues/26765#issuecomment-2391737671
*/
multi = PyArray_malloc(PyArrayMultiIter_Type.tp_basicsize);
vector = (fff_vector**)malloc(narr*sizeof(fff_vector*));

/* Initialize the PyArrayMultiIterObject instance from the variadic arguments */
Expand Down
11 changes: 1 addition & 10 deletions lib/fff_python_wrapper/fffpy.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,8 @@
static, in order not to share that symbol within the
dso. (import_array() asks the pointer value to the python process)
*/
/*
* deal with differences in macro return result between Python 2 and 3
* http://mail.scipy.org/pipermail/numpy-discussion/2010-December/054350.html
*/
#if PY_MAJOR_VERSION >= 3
typedef int IMP_OUT;
#else
typedef void IMP_OUT;
#endif

extern IMP_OUT fffpy_import_array(void);
extern void* fffpy_import_array(void);

/*!
\brief Convert \c PyArrayObject to \c fff_vector
Expand Down
3 changes: 2 additions & 1 deletion nipy/algorithms/diagnostics/tsdiffplot.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

import nipy

from ...utils import deprecate_with_doc
from .timediff import time_slice_diffs


Expand Down Expand Up @@ -76,7 +77,7 @@ def xmax_labels(ax, val, xlabel, ylabel):
return axes


@np.deprecate_with_doc('Please see docstring for alternative code')
@deprecate_with_doc('please see docstring for alternative code')
def plot_tsdiffs_image(img, axes=None, show=True):
''' Plot time series diagnostics for image

Expand Down
32 changes: 16 additions & 16 deletions nipy/algorithms/graph/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,15 +291,15 @@
the corresponding WeightedGraph instance
"""
n = X.shape[0]
label = np.arange(n).astype(np.int_)
label = np.arange(n).astype(np.intp)

edges = np.zeros((0, 2)).astype(np.int_)
edges = np.zeros((0, 2)).astype(np.intp)
# upper bound on maxdist**2
maxdist = 4 * np.sum((X - X[0]) ** 2, 1).max()
nbcc = n
while nbcc > 1:
mindist = maxdist * np.ones(nbcc)
link = - np.ones((nbcc, 2)).astype(np.int_)
link = - np.ones((nbcc, 2)).astype(np.intp)

# find nearest neighbors
for n1 in range(n):
Expand Down Expand Up @@ -427,8 +427,8 @@
Dramatically slow for non-sparse graphs
"""
n = len(lil)
visited = np.zeros(n).astype(np.int_)
label = - np.ones(n).astype(np.int_)
visited = np.zeros(n).astype(np.intp)
label = - np.ones(n).astype(np.intp)
k = 0
while (visited == 0).any():
front = [np.argmin(visited)]
Expand Down Expand Up @@ -500,7 +500,7 @@
i, j, d = create_edges(lxyz, n18, 2, i, j, d)
if k == 26:
i, j, d = create_edges(lxyz, n26, 3, i, j, d)
i, j = i.astype(np.int_), j.astype(np.int_)
i, j = i.astype(np.intp), j.astype(np.intp)

# reorder the edges to have a more standard order
order = np.argsort(i + j * (len(i) + 1))
Expand All @@ -515,7 +515,7 @@

Parameters
----------
xyz: array of shape (nsamples, 3) and type np.int_,
xyz: array of shape (nsamples, 3) and type np.intp,
k = 18: the number of neighbours considered. (6, 18 or 26)

Returns
Expand Down Expand Up @@ -617,7 +617,7 @@

Parameters
----------
xyz: array of shape (self.V, 3) and type np.int_,
xyz: array of shape (self.V, 3) and type np.intp,
k = 18: the number of neighbours considered. (6, 18 or 26)

Returns
Expand All @@ -637,7 +637,7 @@
raise TypeError('Creating graph from grid failed. '\
'Maybe the grid is too big')
self.E = np.size(i)
self.edges = np.zeros((self.E, 2), np.int_)
self.edges = np.zeros((self.E, 2), np.intp)

Check warning on line 640 in nipy/algorithms/graph/graph.py

View check run for this annotation

Codecov / codecov/patch

nipy/algorithms/graph/graph.py#L640

Added line #L640 was not covered by tests
self.edges[:, 0] = i
self.edges[:, 1] = j
self.weights = np.array(d)
Expand Down Expand Up @@ -719,18 +719,18 @@
weights: array of shape(self.E), concatenated list of weights
"""
order = np.argsort(self.edges[:, 0] * float(self.V) + self.edges[:, 1])
neighb = self.edges[order, 1].astype(np.int_)
neighb = self.edges[order, 1].astype(np.intp)
weights = self.weights[order]
degree, _ = self.degrees()
idx = np.hstack((0, np.cumsum(degree))).astype(np.int_)
idx = np.hstack((0, np.cumsum(degree))).astype(np.intp)
return idx, neighb, weights

def floyd(self, seed=None):
""" Compute all the geodesic distances starting from seeds

Parameters
----------
seed= None: array of shape (nbseed), type np.int_
seed: None or array of shape (nbseed), type np.intp
vertex indexes from which the distances are computed
if seed==None, then every edge is a seed point

Expand Down Expand Up @@ -880,7 +880,7 @@

Parameters
----------
seed: array of shape (nseeds), type (np.int_),
seed: array of shape (nseeds), type (np.intp),
vertices from which the cells are built

Returns
Expand All @@ -896,7 +896,7 @@
except:
raise ValueError('undefined weights')
dist, active = np.inf * np.ones(self.V), np.ones(self.V)
label = - np.ones(self.V, np.int_)
label = - np.ones(self.V, np.intp)
idx, neighb, weight = self.compact_neighb()
dist[seed] = 0
label[seed] = np.arange(len(seed))
Expand Down Expand Up @@ -930,7 +930,7 @@

Returns
-------
cliques: array of shape (self.V), type (np.int_)
cliques: array of shape (self.V), type (np.intp)
labelling of the vertices according to the clique they belong to
"""
if (self.weights < 0).any():
Expand Down Expand Up @@ -1034,7 +1034,7 @@
k = self.cc().max() + 1
E = 2 * self.V - 2
V = self.V
Kedges = np.zeros((E, 2)).astype(np.int_)
Kedges = np.zeros((E, 2)).astype(np.intp)
Kweights = np.zeros(E)
iw = np.argsort(self.weights)
label = np.arange(V)
Expand Down
2 changes: 1 addition & 1 deletion nipy/algorithms/registration/cubic_spline.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ void cubic_spline_transform(PyArrayObject* res, const PyArrayObject* src)
unsigned int axis, aux=0, dimmax=0;

/* Copy src into res */
PyArray_CastTo(res, (PyArrayObject*)src);
PyArray_CopyInto(res, (PyArrayObject*)src);

/* Compute the maximum array dimension over axes */
for(axis=0; axis<PyArray_NDIM(res); axis++) {
Expand Down
10 changes: 7 additions & 3 deletions nipy/algorithms/registration/histogram_registration.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from .optimizer import configure_optimizer
from .similarity_measures import similarity_measures as _sms

MAX_INT = np.iinfo(np.int_).max
MAX_INTC = np.iinfo(np.intc).max

# Module globals
VERBOSE = True # enables online print statements
Expand All @@ -41,7 +41,8 @@ def __init__(self, from_img, to_img,
from_bins=256, to_bins=None,
from_mask=None, to_mask=None,
similarity='crl1', interp='pv',
smooth=0, renormalize=False, dist=None):
smooth=0, renormalize=False, dist=None,
rng=None):
"""
Creates a new histogram registration object.

Expand Down Expand Up @@ -77,6 +78,8 @@ def __init__(self, from_img, to_img,
Standard deviation in millimeters of an isotropic Gaussian
kernel used to smooth the `To` image. If 0, no smoothing is
applied.
rng : None :class:`numpy.random.Generator`
Random number generator.
"""
# Function assumes xyx_affine for inputs
from_img = as_xyz_image(from_img)
Expand Down Expand Up @@ -125,6 +128,7 @@ def __init__(self, from_img, to_img,
# Set default registration parameters
self._set_interp(interp)
self._set_similarity(similarity, renormalize=renormalize, dist=dist)
self.rng = np.random.default_rng() if rng is None else rng

def _get_interp(self):
return list(interp_methods.keys())[\
Expand Down Expand Up @@ -306,7 +310,7 @@ def _eval(self, Tv):
trans_vox_coords = Tv.apply(self._vox_coords)
interp = self._interp
if self._interp < 0:
interp = - np.random.randint(MAX_INT)
interp = -self.rng.integers(MAX_INTC)
_joint_histogram(self._joint_hist,
self._from_data.flat, # array iterator
self._to_data,
Expand Down
4 changes: 2 additions & 2 deletions nipy/algorithms/statistics/formula/formulae.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,14 +123,14 @@
from nipy.algorithms.utils.matrices import full_rank, matrix_rank

# Legacy repr printing from numpy.
from nipy.utils import VisibleDeprecationWarning, _NoValue
from nipy.utils import VisibleDeprecationWarning, _NoValue, deprecate_with_doc


def _to_str(s):
return s.decode('latin1') if isinstance(s, bytes) else str(s)


@np.deprecate(message = "Please use sympy.Dummy instead of this function")
@deprecate_with_doc("please use sympy.Dummy instead")
def make_dummy(name):
""" Make dummy variable of given name

Expand Down
2 changes: 1 addition & 1 deletion nipy/algorithms/statistics/formula/tests/test_formula.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import numpy as np
import pytest
import sympy
from numpy.core.records import fromrecords
from numpy.testing import assert_almost_equal, assert_array_equal
from sympy.utilities.lambdify import implemented_function

Expand Down Expand Up @@ -219,6 +218,7 @@ def test_make_recarray():
# Test make_array
# From list / sequence
# 2D case
fromrecords = np.rec.fromrecords
data_2d = [(3, 4), (4, 6), (7, 9)]
m = F.make_recarray(data_2d, 'wv', [np.float64, np.int_])
assert_starr_equal(m, fromrecords(
Expand Down
Loading
Loading