diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 3c1c2f98f6b2..bba68b578665 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -851,7 +851,7 @@ def recurser(index, hanging_indent, curr_width): axes_left = a.ndim - axis if axes_left == 0: - return format_function(a[index]) + return format_function(a[index + (...,)].to_scalar()) # when recursing, add a space to align with the [ added, and reduce the # length of the line by 1 @@ -1709,7 +1709,7 @@ def _array_str_implementation( # obtain a scalar and call str on it, avoiding problems for subclasses # for which indexing with () returns a 0d instead of a scalar by using # ndarray's getindex. Also guard against recursive 0d object arrays. - return _guarded_repr_or_str(np.ndarray.__getitem__(a, ())) + return _guarded_repr_or_str(np.ndarray.to_scalar(a)) return array2string(a, max_line_width, precision, suppress_small, ' ', "") diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index 2dc6d1e7fad2..eef830a990db 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -17,17 +17,15 @@ def _fr0(a): """fix rank-0 --> rank-1""" if a.ndim == 0: - a = a.copy() - a.shape = (1,) + return a.reshape(1) return a def _fr1(a): """fix rank > 0 --> rank-0""" if a.size == 1: - a = a.copy() - a.shape = () - return a + return a.reshape(()) + return a[()] class MachArLike: diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 7adeaeddda54..c16bfac91da4 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -19,6 +19,7 @@ ndarray, nditer, nested_iters, promote_types, putmask, result_type, shares_memory, vdot, where, zeros, normalize_axis_index, vecdot ) +from ._multiarray_umath import _array_converter from . import overrides from . import umath @@ -2426,9 +2427,8 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): """ # Turn all but python scalars into arrays. - x, y, atol, rtol = ( - a if isinstance(a, (int, float, complex)) else asanyarray(a) - for a in (a, b, atol, rtol)) + conv = _array_converter(a, b, atol, rtol) + x, y, atol, rtol = conv.as_arrays(pyscalars="preserve") # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). # This will cause casting of x later. Also, make sure to allow subclasses @@ -2450,7 +2450,7 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): if equal_nan: result |= isnan(x) & isnan(y) - return result[()] # Flatten 0d arrays to scalars + return conv.wrap(result) def _array_equal_dispatcher(a1, a2, equal_nan=None): diff --git a/numpy/_core/src/multiarray/arraywrap.c b/numpy/_core/src/multiarray/arraywrap.c index 09e46bd4d3e7..e8bb20b024b8 100644 --- a/numpy/_core/src/multiarray/arraywrap.c +++ b/numpy/_core/src/multiarray/arraywrap.c @@ -14,6 +14,7 @@ #include "arraywrap.h" #include "npy_static_data.h" +#include "multiarraymodule.h" /* * Find the array wrap or array prepare method that applies to the inputs. @@ -139,6 +140,8 @@ npy_apply_wrap( PyArrayObject *arr = NULL; PyObject *err_type, *err_value, *traceback; + return_scalar = (return_scalar && !npy_thread_unsafe_state.dislike_scalars); + /* If provided, we prefer the actual out objects wrap: */ if (original_out != NULL && original_out != Py_None) { /* diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index fedeb7d04cd3..d0b9ddfb69ef 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -29,6 +29,7 @@ /* TODO: Only for `NpyIter_GetTransferFlags` until it is public */ #define NPY_ITERATOR_IMPLEMENTATION_CODE #include "nditer_impl.h" +#include "multiarraymodule.h" #include "umathmodule.h" @@ -1302,7 +1303,7 @@ array_item_asarray(PyArrayObject *self, npy_intp i) NPY_NO_EXPORT PyObject * array_item(PyArrayObject *self, Py_ssize_t i) { - if (PyArray_NDIM(self) == 1) { + if (PyArray_NDIM(self) == 1 && !npy_thread_unsafe_state.dislike_scalars) { char *item; npy_index_info index; @@ -1485,7 +1486,7 @@ array_subscript(PyArrayObject *self, PyObject *op) } /* Full integer index */ - else if (index_type == HAS_INTEGER) { + else if (index_type == HAS_INTEGER && !npy_thread_unsafe_state.dislike_scalars) { char *item; if (get_item_pointer(self, &item, indices, index_num) < 0) { goto finish; diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index baa680f3a74a..7233fd0e83d9 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -2797,6 +2797,27 @@ array_class_getitem(PyObject *cls, PyObject *args) return Py_GenericAlias(cls, args); } +static PyObject * +array_to_scalar(PyArrayObject *mp, PyObject *NPY_UNUSED(args)) +{ + /* TODO, just a silly copy of PyArray_Result, as I disabled that! */ + Py_INCREF(mp); + if (!PyArray_Check(mp)) { + return (PyObject *)mp; + } + if (PyArray_NDIM(mp) == 0) { + PyObject *ret; + ret = PyArray_ToScalar(PyArray_DATA(mp), mp); + Py_DECREF(mp); + return ret; + } + else { + return (PyObject *)mp; + } +} + + + NPY_NO_EXPORT PyMethodDef array_methods[] = { /* for subtypes */ @@ -3025,6 +3046,9 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { {"to_device", (PyCFunction)array_to_device, METH_VARARGS | METH_KEYWORDS, NULL}, + {"to_scalar", + (PyCFunction)array_to_scalar, + METH_NOARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 8ba38b555edb..3926731614bf 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4770,6 +4770,14 @@ initialize_thread_unsafe_state(void) { npy_thread_unsafe_state.warn_if_no_mem_policy = 0; } + env = getenv("NUMPY_DISLIKE_SCALARS"); + if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { + npy_thread_unsafe_state.dislike_scalars = 1; + } + else { + npy_thread_unsafe_state.dislike_scalars = 0; + } + return 0; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index de234a8495d3..7572fa794f31 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -76,6 +76,7 @@ typedef struct npy_thread_unsafe_state_struct { * if there is no memory policy set */ int warn_if_no_mem_policy; + int dislike_scalars; } npy_thread_unsafe_state_struct; diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index e133b46d008a..765afda9ede8 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -18,6 +18,7 @@ #include "ctors.h" #include "descriptor.h" #include "dtypemeta.h" +#include "multiarraymodule.h" #include "scalartypes.h" #include "common.h" @@ -631,7 +632,9 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) NPY_NO_EXPORT PyObject * PyArray_Return(PyArrayObject *mp) { - + if (npy_thread_unsafe_state.dislike_scalars) { + return (PyObject *)mp; + } if (mp == NULL) { return NULL; } diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 1f683851f585..50d25234aa96 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -337,7 +337,8 @@ gentype_@name@@suff@(PyObject *m1, PyObject *m2) res = PyArray_GenericBinaryFunction(other_op, m2, n_ops.@ufunc@); } Py_DECREF(other_op); - return res; + /* Bug convert back to scalar (TODO: It would be nice to signal this) */ + return PyArray_Return((PyArrayObject *)res); } else { assert(other_op == NULL); @@ -452,7 +453,8 @@ gentype_power(PyObject *m1, PyObject *m2, PyObject *modulo) res = PyArray_GenericBinaryFunction(other_op, m2, n_ops.power); } Py_DECREF(other_op); - return res; + /* Bug convert back to scalar (TODO: It would be nice to signal this) */ + return PyArray_Return(res); } else { assert(other_op == NULL); @@ -497,7 +499,7 @@ gentype_@name@(PyObject *m1) } ret = Py_TYPE(arr)->tp_as_number->nb_@name@(arr); Py_DECREF(arr); - return ret; + return PyArray_Return((PyArrayObject *)ret); } /**end repeat**/ @@ -1617,7 +1619,8 @@ gentype_richcompare(PyObject *self, PyObject *other, int cmp_op) PyObject *res = PyObject_RichCompare(self_op, other_op, cmp_op); Py_DECREF(self_op); Py_DECREF(other_op); - return res; + /* assume if other was an array-like, we deferred above. */ + return PyArray_Return(res); } else if (self_op != NULL) { /* Try again, since other is an object scalar and this one mutated */ @@ -2588,6 +2591,12 @@ integer_is_integer(PyObject *self, PyObject *NPY_UNUSED(args)) { Py_RETURN_TRUE; } +static PyObject * +gentype_to_scalar(PyObject *self, PyObject *NPY_UNUSED(args)) { + Py_INCREF(self); + return self; +} + /* * need to fill in doc-strings for these methods on import -- copy from * array docstrings @@ -2786,6 +2795,9 @@ static PyMethodDef gentype_methods[] = { {"to_device", (PyCFunction)array_to_device, METH_VARARGS | METH_KEYWORDS, NULL}, + {"to_scalar", + (PyCFunction)gentype_to_scalar, + METH_NOARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 4cdde8d3d77d..8071655c4af7 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -613,6 +613,7 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, PyArray_DTypeMeta *out_op_DTypes[], npy_bool *force_legacy_promotion, npy_bool *promoting_pyscalars, + npy_bool *all_inputs_were_scalars, PyObject *order_obj, NPY_ORDER *out_order, PyObject *casting_obj, NPY_CASTING *out_casting, PyObject *subok_obj, npy_bool *out_subok, @@ -626,19 +627,31 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, /* Convert and fill in input arguments */ npy_bool all_scalar = NPY_TRUE; - npy_bool any_scalar = NPY_FALSE; + npy_bool any_scalar = NPY_FALSE; /* any 0-D object, legacy scalar */ *force_legacy_promotion = NPY_FALSE; *promoting_pyscalars = NPY_FALSE; + *all_inputs_were_scalars = NPY_TRUE; for (int i = 0; i < nin; i++) { obj = PyTuple_GET_ITEM(full_args.in, i); + int was_pyscalar = NPY_FALSE; if (PyArray_Check(obj)) { out_op[i] = (PyArrayObject *)obj; Py_INCREF(out_op[i]); } else { - /* Convert the input to an array and check for special cases */ - out_op[i] = (PyArrayObject *)PyArray_FromAny(obj, NULL, 0, 0, 0, NULL); + /* + * Convert the input to an array. Note that we figure out if the + * input was a scalar here. This is useful below, because we need + * to check for NumPy 2.0 style promotion and pass the information + * further through. + * Also we want to know if the object was anything NumPy considers + * a true scalar (not 0-D array). This is used to decide if a + * 0-D result should be unpacked. + */ + out_op[i] = (PyArrayObject *)PyArray_FromAny_int( + obj, NULL, NULL, 0, 0, 0, NULL, + &was_pyscalar); if (out_op[i] == NULL) { goto fail; } @@ -646,6 +659,10 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, out_op_DTypes[i] = NPY_DTYPE(PyArray_DESCR(out_op[i])); Py_INCREF(out_op_DTypes[i]); + if (!was_pyscalar) { + *all_inputs_were_scalars = NPY_FALSE; + } + if (nin == 1) { /* * TODO: If nin == 1 we don't promote! This has exactly the effect @@ -677,7 +694,8 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, * `np.can_cast(operand, dtype)`. The flag is local to this use, but * necessary to propagate the information to the legacy type resolution. */ - if (npy_mark_tmp_array_if_pyscalar(obj, out_op[i], &out_op_DTypes[i])) { + if (was_pyscalar && + npy_mark_tmp_array_if_pyscalar(obj, out_op[i], &out_op_DTypes[i])) { if (PyArray_FLAGS(out_op[i]) & NPY_ARRAY_WAS_PYTHON_INT && PyArray_TYPE(out_op[i]) != NPY_LONG) { /* @@ -3746,7 +3764,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, /* TODO: Data is mutated, so force_wrap like a normal ufunc call does */ PyObject *wrapped_result = npy_apply_wrap( (PyObject *)ret, out_obj, wrap, wrap_type, NULL, - PyArray_NDIM(ret) == 0 && return_scalar, NPY_FALSE); + axes_obj == Py_None && return_scalar, NPY_FALSE); + Py_DECREF(ret); Py_DECREF(wrap); Py_DECREF(wrap_type); @@ -4244,7 +4263,7 @@ replace_with_wrapped_result_and_return(PyUFuncObject *ufunc, PyObject *ret_i = npy_apply_wrap( (PyObject *)result_arrays[out_i], original_out, wrap, wrap_type, /* Always try to return a scalar right now: */ - &context, PyArray_NDIM(result_arrays[out_i]) == 0 && return_scalar, NPY_TRUE); + &context, return_scalar, NPY_TRUE); Py_CLEAR(result_arrays[out_i]); if (ret_i == NULL) { goto fail; @@ -4496,12 +4515,14 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, int keepdims = -1; /* We need to know if it was passed */ npy_bool force_legacy_promotion; npy_bool promoting_pyscalars; + npy_bool all_inputs_were_scalars; if (convert_ufunc_arguments(ufunc, /* extract operand related information: */ full_args, operands, operand_DTypes, &force_legacy_promotion, &promoting_pyscalars, + &all_inputs_were_scalars, /* extract general information: */ order_obj, &order, casting_obj, &casting, @@ -4564,9 +4585,22 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, Py_DECREF(operands[i]); } } + + /* + * all_inputs_were_scalars is used to decide if 0-D results should be + * unpacked to a scalar. But, `np.matmul(vector, vector)` should do this. + * So for now we assume that if gufuncs return a 0-D result, we should + * unpack. (unless keepdims=True, since that behave like a normal ufunc) + * So we pretend inputs were scalars... + * + * TODO: We may need a way to customize that at some point. + */ + if (ufunc->core_enabled && keepdims != 1) { + all_inputs_were_scalars = NPY_TRUE; + } /* The following steals the references to the outputs: */ PyObject *result = replace_with_wrapped_result_and_return(ufunc, - full_args, subok, operands+nin, return_scalar); + full_args, subok, operands+nin, all_inputs_were_scalars && return_scalar); Py_XDECREF(full_args.in); Py_XDECREF(full_args.out); diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 9301f3fd92c8..6fa782419541 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -84,6 +84,9 @@ def __init__(self, a): def scalar_instances(times=True, extended_precision=True, user_dtype=True): # Hard-coded list of scalar instances. # Floats: + if type(np.array(1)[()]) is np.ndarray: + return # whooops doesn't work at all + yield param(np.sqrt(np.float16(5)), id="float16") yield param(np.sqrt(np.float32(5)), id="float32") yield param(np.sqrt(np.float64(5)), id="float64") diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index ee3a02b612bf..26db2f9c3f11 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -121,7 +121,7 @@ def test_possible_and_impossible_reduce(self): a = self._get_array(2.) # Addition reduction works (as of writing requires to pass initial # because setting a scaled-float from the default `0` fails). - res = np.add.reduce(a, initial=0.) + res = np.add.reduce(a, initial=0., axis=None) assert res == a.astype(np.float64).sum() # But each multiplication changes the factor, so a reduction is not diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 0e92dfc4140b..e3045db8d8dc 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7008,7 +7008,7 @@ def test_result_types(self): # vector vector returns scalars if dt != "O": res = self.matmul(v, v) - assert_(type(res) is np.dtype(dt).type) + assert type(res) is np.dtype(dt).type def test_scalar_output(self): vec1 = np.array([2]) diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 9f60b67ba5b1..0f859af69fc4 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1059,7 +1059,7 @@ def test_iter_object_arrays_basic(): assert_equal(sys.getrefcount(obj), rc) i = nditer(a, ['refs_ok'], ['readonly']) - vals = [x_[()] for x_ in i] + vals = [x_.to_scalar() for x_ in i] assert_equal(np.array(vals, dtype='O'), a) vals, i, x = [None] * 3 if HAS_REFCOUNT: @@ -1068,7 +1068,7 @@ def test_iter_object_arrays_basic(): i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], ['readonly'], order='C') assert_(i.iterationneedsapi) - vals = [x_[()] for x_ in i] + vals = [x_.to_scalar() for x_ in i] assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) vals, i, x = [None] * 3 if HAS_REFCOUNT: @@ -1120,7 +1120,7 @@ def test_iter_object_arrays_conversions(): i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='O') with i: - ob = i[0][()] + ob = i[0, ...].to_scalar() if HAS_REFCOUNT: rc = sys.getrefcount(ob) for x in i: @@ -1356,42 +1356,42 @@ def test_iter_copy(): # Simple iterator i = nditer(a) j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) + assert_equal([x.to_scalar() for x in i], [x.to_scalar() for x in j]) i.iterindex = 3 j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) + assert_equal([x.to_scalar() for x in i], [x.to_scalar() for x in j]) # Buffered iterator i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) + assert_equal([x.to_scalar() for x in i], [x.to_scalar() for x in j]) i.iterindex = 3 j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) + assert_equal([x.to_scalar() for x in i], [x.to_scalar() for x in j]) i.iterrange = (3, 9) j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) + assert_equal([x.to_scalar() for x in i], [x.to_scalar() for x in j]) i.iterrange = (2, 18) next(i) next(i) j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) + assert_equal([x.to_scalar() for x in i], [x.to_scalar() for x in j]) # Casting iterator with nditer(a, ['buffered'], order='F', casting='unsafe', op_dtypes='f8', buffersize=5) as i: j = i.copy() - assert_equal([x[()] for x in j], a.ravel(order='F')) + assert_equal([x.to_scalar() for x in j], a.ravel(order='F')) a = arange(24, dtype=' simple doesn't work sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 26844fabd437..c62a1c155a78 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -656,7 +656,7 @@ def test_true_divide(self): tgt = float(x) / float(y) res = np.true_divide(x, y) - rtol = max(np.finfo(res).resolution, 1e-15) + rtol = max(np.finfo(res.dtype).resolution, 1e-15) assert_allclose(res, tgt, rtol=rtol) if tc in 'bhilqBHILQ': @@ -1664,20 +1664,25 @@ def test_scalar_reduction(self): # Check scalar behaviour for ufuncs without an identity assert_equal(np.power.reduce(3), 3) - # Make sure that scalars are coming out from this operation - assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32) - assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32) - assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32) - assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32) + # If axis=None, scalars should be produced + assert type(np.prod(np.float32(2.5), axis=None)) is np.float32 + assert type(np.sum(np.float32(2.5), axis=None)) is np.float32 + assert type(np.max(np.float32(2.5), axis=None)) is np.float32 + assert type(np.min(np.float32(2.5), axis=None)) is np.float32 + # TODO: In a sense should return an array, but this is axis=0 is + # arguably invalid. + assert type(np.prod(np.float32(2.5), axis=0)) is np.float32 - # check if scalars/0-d arrays get cast - assert_(type(np.any(0, axis=0)) is np.bool) + # axis=None indicates a scalar return + assert type(np.any(0, axis=None)) is np.bool_ + # Not a NumPy scalar (no odd __array_wrap__) and not axis=None: + assert type(np.any(0, axis=0)) is np.ndarray # assert that 0-d arrays get wrapped class MyArray(np.ndarray): pass a = np.array(1).view(MyArray) - assert_(type(np.any(a)) is MyArray) + assert type(np.any(a)) is MyArray def test_casting_out_param(self): # Test that it's possible to do casts on output @@ -1801,7 +1806,7 @@ def test_reduce_identity_depends_on_loop(self): necessarily the output (only relevant for object arrays). """ # For an object loop, the default value 0 with type int is used: - assert type(np.add.reduce([], dtype=object)) is int + assert type(np.add.reduce([], dtype=object, axis=None)) is int out = np.array(None, dtype=object) # When the loop is float64 but `out` is object this does not happen, # the result is float64 cast to object (which gives Python `float`). diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 22ad1b8ac302..c8e140445a3d 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -2798,9 +2798,9 @@ def test_reduction(self): # Non-empty object arrays do not use the identity for f in binary_funcs: - msg = f"dt: '{f}'" btype = np.array([True], dtype=object) - assert_(type(f.reduce(btype)) is bool, msg) + assert f.reduce(btype).dtype == btype.dtype + assert type(f.reduce(btype, axis=None)) is bool @pytest.mark.parametrize("input_dtype_obj, bitsize", zip(bitwise_types, bitwise_bits)) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index a284a9204112..1f27c3b6f91e 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -909,8 +909,8 @@ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # Ravel both arrays, behavior for the first array could be different - ar1 = np.asarray(ar1).ravel() - ar2 = np.asarray(ar2).ravel() + ar1 = ar1.ravel() + ar2 = ar2.ravel() # Ensure that iteration through object arrays yields size-1 arrays if ar2.dtype == object: @@ -1175,10 +1175,18 @@ def isin(element, test_elements, assume_unique=False, invert=False, *, array([[False, True], [ True, False]]) """ - element = np.asarray(element) - return _in1d(element, test_elements, assume_unique=assume_unique, - invert=invert, kind=kind).reshape(element.shape) + conv = _array_converter(element, test_elements) + element, test_elements = conv.as_arrays(subok=False, pyscalars="convert") + dt = conv.result_type() + # TODO(seberg): I had added this cast once, but it seems like that was a bad idea? + # test_elements = test_elements.astype(dt, copy=False) + # element = element.astype(dt, copy=False) + + result = _in1d(element, test_elements, assume_unique=assume_unique, + invert=invert, kind=kind).reshape(element.shape) + + return conv.wrap(result, to_scalar=conv.scalar_input[0]) def _union1d_dispatcher(ar1, ar2): return (ar1, ar2) @@ -1256,6 +1264,7 @@ def setdiff1d(ar1, ar2, assume_unique=False): """ if assume_unique: ar1 = np.asarray(ar1).ravel() + ar2 = np.asarray(ar2) else: ar1 = unique(ar1) ar2 = unique(ar2) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index e44b27a68adb..edf31aee74f6 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -747,7 +747,8 @@ def piecewise(x, condlist, funclist, *args, **kw): n2 = len(funclist) # undocumented: single condition is promoted to a list of one condition - if isscalar(condlist) or ( + # TODO(seberg): Made this even worse as a work-around, this needs improvement! + if isscalar(condlist) or (np.ndim(condlist) == 0) or ( not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): condlist = [condlist] @@ -4034,15 +4035,17 @@ def _median(a, axis=None, out=None, overwrite_input=False): # make 0-D arrays work return part.item() if axis is None: - axis = 0 + paxis = 0 + else: + paxis = axis indexer = [slice(None)] * part.ndim - index = part.shape[axis] // 2 - if part.shape[axis] % 2 == 1: + index = part.shape[paxis] // 2 + if part.shape[paxis] % 2 == 1: # index with slice to allow mean (below) to work - indexer[axis] = slice(index, index + 1) + indexer[paxis] = slice(index, index + 1) else: - indexer[axis] = slice(index - 1, index + 1) + indexer[paxis] = slice(index - 1, index + 1) indexer = tuple(indexer) # Use mean in both odd and even case to coerce data type, @@ -4050,7 +4053,7 @@ def _median(a, axis=None, out=None, overwrite_input=False): rout = mean(part[indexer], axis=axis, out=out) if supports_nans and sz > 0: # If nans are possible, warn and replace by nans like mean would. - rout = np.lib._utils_impl._median_nancheck(part, rout, axis) + rout = np.lib._utils_impl._median_nancheck(part, rout, paxis) return rout @@ -4245,13 +4248,22 @@ def percentile(a, method = _check_interpolation_as_method( method, interpolation, "percentile") - a = np.asanyarray(a) + conv = _array_converter(a, q) + a, q_arr = conv.as_arrays(pyscalars="convert") + if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") # Use dtype of array if possible (e.g., if q is a python int or float) # by making the divisor have the dtype of the data array. q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...) + + # TODO(seberg): from rebase, maybe the following was an improvement? + # if isinstance(q, (int, float)) and a.dtype.kind == "f": + # q = np.true_divide(q, np.array(100, dtype=a.dtype)) + # else: + # q = q_arr / 100 + if not _quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") @@ -4266,8 +4278,10 @@ def percentile(a, if np.any(weights < 0): raise ValueError("Weights must be non-negative.") - return _quantile_unchecked( + result = _quantile_unchecked( a, q, axis, out, overwrite_input, method, keepdims, weights) + # If no broadcasting happened, and q was a scalar, return a scalar: + return conv.wrap(result, to_scalar=conv.scalar_input[1]) def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, @@ -4506,15 +4520,16 @@ def quantile(a, method = _check_interpolation_as_method( method, interpolation, "quantile") - a = np.asanyarray(a) + conv = _array_converter(a, q) + a, q_arr = conv.as_arrays(pyscalars="convert") if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") # Use dtype of array if possible (e.g., if q is a python int or float). if isinstance(q, (int, float)) and a.dtype.kind == "f": - q = np.asanyarray(q, dtype=a.dtype) + q = np.asarray(q, dtype=a.dtype) else: - q = np.asanyarray(q) + q = q_arr if not _quantile_is_valid(q): raise ValueError("Quantiles must be in the range [0, 1]") @@ -4530,8 +4545,10 @@ def quantile(a, if np.any(weights < 0): raise ValueError("Weights must be non-negative.") - return _quantile_unchecked( + res = _quantile_unchecked( a, q, axis, out, overwrite_input, method, keepdims, weights) + # If no broadcasting happened, and q was a scalar, return a scalar: + return conv.wrap(res, to_scalar=conv.scalar_input[1]) def _quantile_unchecked(a, @@ -4636,23 +4653,21 @@ def _lerp(a, b, t, out=None): """ Compute the linear interpolation weighted by gamma on each point of two same shape array. + Function is meant to be used with arrays not scalars. - a : array_like + a : array Left bound. - b : array_like + b : array Right bound. - t : array_like + t : array The interpolation weight. out : array_like Output array. """ diff_b_a = subtract(b, a) - # asanyarray is a stop-gap until gh-13105 - lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out)) - subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5, - casting='unsafe', dtype=type(lerp_interpolation.dtype)) - if lerp_interpolation.ndim == 0 and out is None: - lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays + lerp_interpolation = add(a, diff_b_a * t, out=out) + subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5) + return lerp_interpolation @@ -4845,9 +4860,9 @@ def _quantile( slices_having_nans = np.isnan(arr[-1, ...]) else: slices_having_nans = None - # --- Get values from indexes - previous = arr[previous_indexes] - next = arr[next_indexes] + # --- Get values from indexes (ensure array result) + previous = arr[previous_indexes, ...] + next = arr[next_indexes, ...] # --- Linear interpolation gamma = _get_gamma(virtual_indexes, previous_indexes, method_props) result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index 563a8574cd13..39877fa83387 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -27,6 +27,7 @@ from numpy.lib import _function_base_impl as fnb from numpy.lib._function_base_impl import _weights_are_valid from numpy._core import overrides +from numpy._core._multiarray_umath import _array_converter array_function_dispatch = functools.partial( @@ -1385,11 +1386,22 @@ def nanpercentile( method = fnb._check_interpolation_as_method( method, interpolation, "nanpercentile") - a = np.asanyarray(a) + conv = _array_converter(a, q) + a, q_arr = conv.as_arrays(pyscalars="convert") + if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...) + + # TODO(seberg): Again this change might have made sense but should be meaningless for now + # # Use dtype of array if possible (e.g., if q is a python int or float) + # # by making the divisor have the dtype of the data array. + # if isinstance(q, (int, float)) and a.dtype.kind == "f": + # q = np.true_divide(q, np.array(100, dtype=a.dtype)) + # else: + # q = q_arr / 100 + if not fnb._quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") @@ -1404,8 +1416,10 @@ def nanpercentile( if np.any(weights < 0): raise ValueError("Weights must be non-negative.") - return _nanquantile_unchecked( + result = _nanquantile_unchecked( a, q, axis, out, overwrite_input, method, keepdims, weights) + # If no broadcasting happened, and q was a scalar, return a scalar: + return conv.wrap(result, to_scalar=conv.scalar_input[1]) def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, @@ -1572,7 +1586,9 @@ def nanquantile( method = fnb._check_interpolation_as_method( method, interpolation, "nanquantile") - a = np.asanyarray(a) + conv = _array_converter(a, q) + a, q_arr = conv.as_arrays(pyscalars="convert") + if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") @@ -1580,7 +1596,7 @@ def nanquantile( if isinstance(q, (int, float)) and a.dtype.kind == "f": q = np.asanyarray(q, dtype=a.dtype) else: - q = np.asanyarray(q) + q = q_arr if not fnb._quantile_is_valid(q): raise ValueError("Quantiles must be in the range [0, 1]") @@ -1596,8 +1612,10 @@ def nanquantile( if np.any(weights < 0): raise ValueError("Weights must be non-negative.") - return _nanquantile_unchecked( + result = _nanquantile_unchecked( a, q, axis, out, overwrite_input, method, keepdims, weights) + # If no broadcasting happened, and q was a scalar, return a scalar: + return conv.wrap(result, to_scalar=conv.scalar_input[1]) def _nanquantile_unchecked( diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 904ae10fb19c..4cd5e709f9df 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -26,6 +26,7 @@ from numpy._core.multiarray import dragon4_positional, dragon4_scientific from numpy.exceptions import RankWarning +from ._polybase import ABCPolyBase __all__ = [ 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', @@ -352,8 +353,10 @@ def mapdomain(x, old, new): array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary """ + # TODO(seberg): is there a better way now with new scalar handling?! if type(x) not in (int, float, complex) and not isinstance(x, np.generic): x = np.asanyarray(x) + off, scl = mapparms(old, new) return off + scl * x diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 0cfd50cb2124..ce86e5589e2d 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1162,7 +1162,8 @@ def compare(x, y): if not issubdtype(z.dtype, number): z = z.astype(np.float64) # handle object arrays - return z < 1.5 * 10.0**(-decimal) + # the float64 ensures at least double precision for the comparison. + return z < np.float64(1.5) * 10.0**(-decimal) assert_array_compare(compare, actual, desired, err_msg=err_msg, verbose=verbose,