Lines Matching defs:strides
48 /* Default: NumPy style (strides), read-only, no var-export, C-style layout */
156 base->strides = NULL;
171 PyMem_XFree(base->strides);
229 PyMem_XFree(base->strides);
551 dest->strides[dest->ndim-1] != dest->itemsize ||
552 src->strides[src->ndim-1] != src->itemsize) {
561 dest->buf, dest->strides, dest->suboffsets,
562 src->buf, src->strides, src->suboffsets,
611 const Py_ssize_t *shape, const Py_ssize_t *strides,
619 assert(strides != NULL);
639 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
643 shape+1, strides+1, suboffsets ? suboffsets+1 : NULL,
664 Py_ssize_t *strides = base->strides;
681 assert(base->strides == NULL);
685 strides = simple_strides;
686 strides[0] = base->itemsize;
688 else if (strides == NULL) {
690 strides = strides_from_shape(nd->head, 0);
691 if (strides == NULL)
719 shape, strides, base->suboffsets,
727 if (strides != base->strides && strides != simple_strides)
728 PyMem_XFree(strides);
776 | base.strides | NULL | NULL | OK |
865 is_shape ? "shape" : "strides");
919 i = indices[0] * strides[0] + indices[1] * strides[1] + ...
921 imin is reached when all indices[n] combined with positive strides are 0
922 and all indices combined with negative strides are shape[n]-1, which is
925 imax is reached when all indices[n] combined with negative strides are 0
926 and all indices combined with positive strides are shape[n]-1.
930 const Py_ssize_t *shape, const Py_ssize_t *strides,
942 if (strides[n] % itemsize) {
944 "strides must be a multiple of itemsize");
954 if (strides[n] <= 0)
955 imin += (shape[n]-1) * strides[n];
957 imax += (shape[n]-1) * strides[n];
967 "invalid combination of buffer, shape and strides");
981 strides = {6, 3, 1};
989 strides = {sizeof(char *), 3, 1};
1006 Input (with offset and negative strides):
1009 strides = {-6, 3, -1};
1017 strides = {-sizeof(char *), 3, -1};
1068 if (base->strides[n] <= 0) {
1069 Py_ssize_t x = (base->shape[n]-1) * base->strides[n];
1077 step = base->strides[0] < 0 ? -base->strides[0] : base->strides[0];
1092 /* Adjust strides for the first (zeroth) dimension. */
1093 if (base->strides[0] >= 0) {
1094 base->strides[0] = sizeof(char *);
1098 base->strides[0] = -(Py_ssize_t)sizeof(char *);
1120 init_structure(ndbuf_t *ndbuf, PyObject *shape, PyObject *strides,
1141 /* strides */
1142 if (strides) {
1143 base->strides = seq_as_ssize_array(strides, ndim, 0);
1146 base->strides = strides_from_shape(ndbuf, ndbuf->flags);
1148 if (base->strides == NULL)
1151 base->shape, base->strides, ndim) < 0)
1169 /* modifies base->buf, base->strides and base->suboffsets **/
1177 init_ndbuf(PyObject *items, PyObject *shape, PyObject *strides,
1194 /* len(strides) = len(shape) */
1195 if (strides) {
1196 CHECK_LIST_OR_TUPLE(strides)
1197 if (PySequence_Fast_GET_SIZE(strides) == 0)
1198 strides = NULL;
1201 "ND_FORTRAN cannot be used together with strides");
1204 else if (PySequence_Fast_GET_SIZE(strides) != ndim) {
1206 "len(shape) != len(strides)");
1250 if (init_structure(ndbuf, shape, strides, ndim) < 0)
1265 PyObject *shape, PyObject *strides,
1270 ndbuf = init_ndbuf(items, shape, strides, offset, format, flags);
1284 "obj", "shape", "strides", "offset", "format", "flags", "getbuf", NULL
1288 PyObject *strides = NULL; /* number of bytes to the next elt in each dim */
1297 &v, &shape, &strides, &offset, &format, &flags, &getbuf))
1302 if (strides || offset || format != simple_format ||
1341 return ndarray_push_base(nd, v, shape, strides, offset, format, flags);
1350 "items", "shape", "strides", "offset", "format", "flags", NULL
1354 PyObject *strides = NULL; /* number of bytes to the next elt in each dim */
1360 &items, &shape, &strides, &offset, &format, &flags))
1380 if (ndarray_push_base(nd, items, shape, strides,
1438 (REQ_STRIDES(flags) && base->strides == NULL))) {
1443 "re-exporter does not provide format, shape or strides");
1495 view->strides = NULL;
1577 if (base->strides == NULL)
1580 ptr += base->strides[0] * index;
1626 if (subview->strides) subview->strides++;
1644 ptr = (char *)buf + indices[0] * strides[0] +
1646 indices[ndim-1] * strides[ndim-1]
1652 c = start[0] * strides[0] + ... + start[ndim-1] * strides[ndim-1]
1660 Adjust strides:
1663 by adding strides[n] to the pointer. In the sliced array, elements
1666 strides[n] * step[n]
1692 c = start[1] * strides[1] + ... + start[ndim-1] * strides[ndim-1]
1697 Adjust shape and strides:
1699 Shape and strides are not influenced by the dereferencing step, so
1708 suboffsets[0] = start[1] * strides[1]
1709 suboffsets[1] = start[2] * strides[2] + ...
1724 base->buf = (char *)base->buf + base->strides[dim] * start;
1732 base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
1735 base->strides[dim] = base->strides[dim] * step;
1743 Py_ssize_t *shape = NULL, *strides = NULL, *suboffsets = NULL;
1747 strides = PyMem_Malloc(base->ndim * (sizeof *strides));
1748 if (shape == NULL || strides == NULL)
1760 strides[i] = base->strides[i];
1766 base->strides = strides;
1774 PyMem_XFree(strides);
1816 /* copy shape, strides and suboffsets */
2075 return ssize_array_as_tuple(base->strides, base->ndim);
2141 { "strides", (getter)ndarray_get_strides, NULL, NULL, NULL},
2168 assert(src->strides != NULL);
2180 dest.strides = strides_from_shape(ndbuf, 0);
2181 if (dest.strides == NULL)
2189 PyMem_XFree(dest.strides);
2207 if (base->strides == NULL) {
2209 "cannot add suboffsets to array without strides");
2239 static Py_ssize_t strides[ND_MAX_NDIM];
2290 if (view->strides) {
2291 memcpy(strides, view->strides, view->ndim * sizeof(Py_ssize_t));
2292 info.strides = strides;
2490 /* strides can differ if the dimension is less than 2 */
2539 !!v1.strides != !!v2.strides ||
2545 (v1.strides && !arraycmp(v1.strides, v2.strides, v1.shape, v1.ndim)) ||
2707 static_strides, /* strides */