Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions numpy/_core/arrayprint.py
Original file line number Diff line number Diff line change
Expand Up @@ -851,7 +851,7 @@ def recurser(index, hanging_indent, curr_width):
axes_left = a.ndim - axis

if axes_left == 0:
return format_function(a[index])
return format_function(a[index + (...,)].to_scalar())

# when recursing, add a space to align with the [ added, and reduce the
# length of the line by 1
Expand Down Expand Up @@ -1709,7 +1709,7 @@ def _array_str_implementation(
# obtain a scalar and call str on it, avoiding problems for subclasses
# for which indexing with () returns a 0d instead of a scalar by using
# ndarray's getindex. Also guard against recursive 0d object arrays.
return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))
return _guarded_repr_or_str(np.ndarray.to_scalar(a))

return array2string(a, max_line_width, precision, suppress_small, ' ', "")

Expand Down
8 changes: 3 additions & 5 deletions numpy/_core/getlimits.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,15 @@
def _fr0(a):
"""fix rank-0 --> rank-1"""
if a.ndim == 0:
a = a.copy()
a.shape = (1,)
return a.reshape(1)
return a


def _fr1(a):
"""fix rank > 0 --> rank-0"""
if a.size == 1:
a = a.copy()
a.shape = ()
return a
return a.reshape(())
return a[()]


class MachArLike:
Expand Down
8 changes: 4 additions & 4 deletions numpy/_core/numeric.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
ndarray, nditer, nested_iters, promote_types, putmask, result_type,
shares_memory, vdot, where, zeros, normalize_axis_index, vecdot
)
from ._multiarray_umath import _array_converter

from . import overrides
from . import umath
Expand Down Expand Up @@ -2426,9 +2427,8 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):

"""
# Turn all but python scalars into arrays.
x, y, atol, rtol = (
a if isinstance(a, (int, float, complex)) else asanyarray(a)
for a in (a, b, atol, rtol))
conv = _array_converter(a, b, atol, rtol)
x, y, atol, rtol = conv.as_arrays(pyscalars="preserve")

# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
Expand All @@ -2450,7 +2450,7 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
if equal_nan:
result |= isnan(x) & isnan(y)

return result[()] # Flatten 0d arrays to scalars
return conv.wrap(result)


def _array_equal_dispatcher(a1, a2, equal_nan=None):
Expand Down
3 changes: 3 additions & 0 deletions numpy/_core/src/multiarray/arraywrap.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include "arraywrap.h"
#include "npy_static_data.h"
#include "multiarraymodule.h"

/*
* Find the array wrap or array prepare method that applies to the inputs.
Expand Down Expand Up @@ -139,6 +140,8 @@ npy_apply_wrap(
PyArrayObject *arr = NULL;
PyObject *err_type, *err_value, *traceback;

return_scalar = (return_scalar && !npy_thread_unsafe_state.dislike_scalars);

/* If provided, we prefer the actual out objects wrap: */
if (original_out != NULL && original_out != Py_None) {
/*
Expand Down
5 changes: 3 additions & 2 deletions numpy/_core/src/multiarray/mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
/* TODO: Only for `NpyIter_GetTransferFlags` until it is public */
#define NPY_ITERATOR_IMPLEMENTATION_CODE
#include "nditer_impl.h"
#include "multiarraymodule.h"

#include "umathmodule.h"

Expand Down Expand Up @@ -1302,7 +1303,7 @@ array_item_asarray(PyArrayObject *self, npy_intp i)
NPY_NO_EXPORT PyObject *
array_item(PyArrayObject *self, Py_ssize_t i)
{
if (PyArray_NDIM(self) == 1) {
if (PyArray_NDIM(self) == 1 && !npy_thread_unsafe_state.dislike_scalars) {
char *item;
npy_index_info index;

Expand Down Expand Up @@ -1485,7 +1486,7 @@ array_subscript(PyArrayObject *self, PyObject *op)
}

/* Full integer index */
else if (index_type == HAS_INTEGER) {
else if (index_type == HAS_INTEGER && !npy_thread_unsafe_state.dislike_scalars) {
char *item;
if (get_item_pointer(self, &item, indices, index_num) < 0) {
goto finish;
Expand Down
24 changes: 24 additions & 0 deletions numpy/_core/src/multiarray/methods.c
Original file line number Diff line number Diff line change
Expand Up @@ -2797,6 +2797,27 @@ array_class_getitem(PyObject *cls, PyObject *args)
return Py_GenericAlias(cls, args);
}

static PyObject *
array_to_scalar(PyArrayObject *mp, PyObject *NPY_UNUSED(args))
{
/* TODO, just a silly copy of PyArray_Result, as I disabled that! */
Py_INCREF(mp);
if (!PyArray_Check(mp)) {
return (PyObject *)mp;
}
if (PyArray_NDIM(mp) == 0) {
PyObject *ret;
ret = PyArray_ToScalar(PyArray_DATA(mp), mp);
Py_DECREF(mp);
return ret;
}
else {
return (PyObject *)mp;
}
}



NPY_NO_EXPORT PyMethodDef array_methods[] = {

/* for subtypes */
Expand Down Expand Up @@ -3025,6 +3046,9 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
{"to_device",
(PyCFunction)array_to_device,
METH_VARARGS | METH_KEYWORDS, NULL},
{"to_scalar",
(PyCFunction)array_to_scalar,
METH_NOARGS, NULL},

{NULL, NULL, 0, NULL} /* sentinel */
};
8 changes: 8 additions & 0 deletions numpy/_core/src/multiarray/multiarraymodule.c
Original file line number Diff line number Diff line change
Expand Up @@ -4770,6 +4770,14 @@ initialize_thread_unsafe_state(void) {
npy_thread_unsafe_state.warn_if_no_mem_policy = 0;
}

env = getenv("NUMPY_DISLIKE_SCALARS");
if ((env != NULL) && (strncmp(env, "1", 1) == 0)) {
npy_thread_unsafe_state.dislike_scalars = 1;
}
else {
npy_thread_unsafe_state.dislike_scalars = 0;
}

return 0;
}

Expand Down
1 change: 1 addition & 0 deletions numpy/_core/src/multiarray/multiarraymodule.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ typedef struct npy_thread_unsafe_state_struct {
* if there is no memory policy set
*/
int warn_if_no_mem_policy;
int dislike_scalars;

} npy_thread_unsafe_state_struct;

Expand Down
5 changes: 4 additions & 1 deletion numpy/_core/src/multiarray/scalarapi.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "ctors.h"
#include "descriptor.h"
#include "dtypemeta.h"
#include "multiarraymodule.h"
#include "scalartypes.h"

#include "common.h"
Expand Down Expand Up @@ -631,7 +632,9 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
NPY_NO_EXPORT PyObject *
PyArray_Return(PyArrayObject *mp)
{

if (npy_thread_unsafe_state.dislike_scalars) {
return (PyObject *)mp;
}
if (mp == NULL) {
return NULL;
}
Expand Down
20 changes: 16 additions & 4 deletions numpy/_core/src/multiarray/scalartypes.c.src
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,8 @@ gentype_@name@@suff@(PyObject *m1, PyObject *m2)
res = PyArray_GenericBinaryFunction(other_op, m2, n_ops.@ufunc@);
}
Py_DECREF(other_op);
return res;
/* Bug convert back to scalar (TODO: It would be nice to signal this) */
return PyArray_Return((PyArrayObject *)res);
}
else {
assert(other_op == NULL);
Expand Down Expand Up @@ -452,7 +453,8 @@ gentype_power(PyObject *m1, PyObject *m2, PyObject *modulo)
res = PyArray_GenericBinaryFunction(other_op, m2, n_ops.power);
}
Py_DECREF(other_op);
return res;
/* Bug convert back to scalar (TODO: It would be nice to signal this) */
return PyArray_Return(res);
}
else {
assert(other_op == NULL);
Expand Down Expand Up @@ -497,7 +499,7 @@ gentype_@name@(PyObject *m1)
}
ret = Py_TYPE(arr)->tp_as_number->nb_@name@(arr);
Py_DECREF(arr);
return ret;
return PyArray_Return((PyArrayObject *)ret);
}
/**end repeat**/

Expand Down Expand Up @@ -1617,7 +1619,8 @@ gentype_richcompare(PyObject *self, PyObject *other, int cmp_op)
PyObject *res = PyObject_RichCompare(self_op, other_op, cmp_op);
Py_DECREF(self_op);
Py_DECREF(other_op);
return res;
/* assume if other was an array-like, we deferred above. */
return PyArray_Return(res);
}
else if (self_op != NULL) {
/* Try again, since other is an object scalar and this one mutated */
Expand Down Expand Up @@ -2588,6 +2591,12 @@ integer_is_integer(PyObject *self, PyObject *NPY_UNUSED(args)) {
Py_RETURN_TRUE;
}

static PyObject *
gentype_to_scalar(PyObject *self, PyObject *NPY_UNUSED(args)) {
Py_INCREF(self);
return self;
}

/*
* need to fill in doc-strings for these methods on import -- copy from
* array docstrings
Expand Down Expand Up @@ -2786,6 +2795,9 @@ static PyMethodDef gentype_methods[] = {
{"to_device",
(PyCFunction)array_to_device,
METH_VARARGS | METH_KEYWORDS, NULL},
{"to_scalar",
(PyCFunction)gentype_to_scalar,
METH_NOARGS, NULL},

{NULL, NULL, 0, NULL} /* sentinel */
};
Expand Down
48 changes: 41 additions & 7 deletions numpy/_core/src/umath/ufunc_object.c
Original file line number Diff line number Diff line change
Expand Up @@ -613,6 +613,7 @@ convert_ufunc_arguments(PyUFuncObject *ufunc,
PyArray_DTypeMeta *out_op_DTypes[],
npy_bool *force_legacy_promotion,
npy_bool *promoting_pyscalars,
npy_bool *all_inputs_were_scalars,
PyObject *order_obj, NPY_ORDER *out_order,
PyObject *casting_obj, NPY_CASTING *out_casting,
PyObject *subok_obj, npy_bool *out_subok,
Expand All @@ -626,26 +627,42 @@ convert_ufunc_arguments(PyUFuncObject *ufunc,

/* Convert and fill in input arguments */
npy_bool all_scalar = NPY_TRUE;
npy_bool any_scalar = NPY_FALSE;
npy_bool any_scalar = NPY_FALSE; /* any 0-D object, legacy scalar */
*force_legacy_promotion = NPY_FALSE;
*promoting_pyscalars = NPY_FALSE;
*all_inputs_were_scalars = NPY_TRUE;
for (int i = 0; i < nin; i++) {
obj = PyTuple_GET_ITEM(full_args.in, i);
int was_pyscalar = NPY_FALSE;

if (PyArray_Check(obj)) {
out_op[i] = (PyArrayObject *)obj;
Py_INCREF(out_op[i]);
}
else {
/* Convert the input to an array and check for special cases */
out_op[i] = (PyArrayObject *)PyArray_FromAny(obj, NULL, 0, 0, 0, NULL);
/*
* Convert the input to an array. Note that we figure out if the
* input was a scalar here. This is useful below, because we need
* to check for NumPy 2.0 style promotion and pass the information
* further through.
* Also we want to know if the object was anything NumPy considers
* a true scalar (not 0-D array). This is used to decide if a
* 0-D result should be unpacked.
*/
out_op[i] = (PyArrayObject *)PyArray_FromAny_int(
obj, NULL, NULL, 0, 0, 0, NULL,
&was_pyscalar);
if (out_op[i] == NULL) {
goto fail;
}
}
out_op_DTypes[i] = NPY_DTYPE(PyArray_DESCR(out_op[i]));
Py_INCREF(out_op_DTypes[i]);

if (!was_pyscalar) {
*all_inputs_were_scalars = NPY_FALSE;
}

if (nin == 1) {
/*
* TODO: If nin == 1 we don't promote! This has exactly the effect
Expand Down Expand Up @@ -677,7 +694,8 @@ convert_ufunc_arguments(PyUFuncObject *ufunc,
* `np.can_cast(operand, dtype)`. The flag is local to this use, but
* necessary to propagate the information to the legacy type resolution.
*/
if (npy_mark_tmp_array_if_pyscalar(obj, out_op[i], &out_op_DTypes[i])) {
if (was_pyscalar &&
npy_mark_tmp_array_if_pyscalar(obj, out_op[i], &out_op_DTypes[i])) {
if (PyArray_FLAGS(out_op[i]) & NPY_ARRAY_WAS_PYTHON_INT
&& PyArray_TYPE(out_op[i]) != NPY_LONG) {
/*
Expand Down Expand Up @@ -3746,7 +3764,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
/* TODO: Data is mutated, so force_wrap like a normal ufunc call does */
PyObject *wrapped_result = npy_apply_wrap(
(PyObject *)ret, out_obj, wrap, wrap_type, NULL,
PyArray_NDIM(ret) == 0 && return_scalar, NPY_FALSE);
axes_obj == Py_None && return_scalar, NPY_FALSE);

Py_DECREF(ret);
Py_DECREF(wrap);
Py_DECREF(wrap_type);
Expand Down Expand Up @@ -4244,7 +4263,7 @@ replace_with_wrapped_result_and_return(PyUFuncObject *ufunc,
PyObject *ret_i = npy_apply_wrap(
(PyObject *)result_arrays[out_i], original_out, wrap, wrap_type,
/* Always try to return a scalar right now: */
&context, PyArray_NDIM(result_arrays[out_i]) == 0 && return_scalar, NPY_TRUE);
&context, return_scalar, NPY_TRUE);
Py_CLEAR(result_arrays[out_i]);
if (ret_i == NULL) {
goto fail;
Expand Down Expand Up @@ -4496,12 +4515,14 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
int keepdims = -1; /* We need to know if it was passed */
npy_bool force_legacy_promotion;
npy_bool promoting_pyscalars;
npy_bool all_inputs_were_scalars;
if (convert_ufunc_arguments(ufunc,
/* extract operand related information: */
full_args, operands,
operand_DTypes,
&force_legacy_promotion,
&promoting_pyscalars,
&all_inputs_were_scalars,
/* extract general information: */
order_obj, &order,
casting_obj, &casting,
Expand Down Expand Up @@ -4564,9 +4585,22 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
Py_DECREF(operands[i]);
}
}

/*
* all_inputs_were_scalars is used to decide if 0-D results should be
* unpacked to a scalar. But, `np.matmul(vector, vector)` should do this.
* So for now we assume that if gufuncs return a 0-D result, we should
* unpack. (unless keepdims=True, since that behave like a normal ufunc)
* So we pretend inputs were scalars...
*
* TODO: We may need a way to customize that at some point.
*/
if (ufunc->core_enabled && keepdims != 1) {
all_inputs_were_scalars = NPY_TRUE;
}
/* The following steals the references to the outputs: */
PyObject *result = replace_with_wrapped_result_and_return(ufunc,
full_args, subok, operands+nin, return_scalar);
full_args, subok, operands+nin, all_inputs_were_scalars && return_scalar);
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);

Expand Down
3 changes: 3 additions & 0 deletions numpy/_core/tests/test_array_coercion.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,9 @@ def __init__(self, a):
def scalar_instances(times=True, extended_precision=True, user_dtype=True):
# Hard-coded list of scalar instances.
# Floats:
if type(np.array(1)[()]) is np.ndarray:
return # whooops doesn't work at all

yield param(np.sqrt(np.float16(5)), id="float16")
yield param(np.sqrt(np.float32(5)), id="float32")
yield param(np.sqrt(np.float64(5)), id="float64")
Expand Down
2 changes: 1 addition & 1 deletion numpy/_core/tests/test_custom_dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def test_possible_and_impossible_reduce(self):
a = self._get_array(2.)
# Addition reduction works (as of writing requires to pass initial
# because setting a scaled-float from the default `0` fails).
res = np.add.reduce(a, initial=0.)
res = np.add.reduce(a, initial=0., axis=None)
assert res == a.astype(np.float64).sum()

# But each multiplication changes the factor, so a reduction is not
Expand Down
Loading
Loading