1 /*
2 * Memoryview object implementation
3 * --------------------------------
4 *
5 * This implementation is a complete rewrite contributed by Stefan Krah in
6 * Python 3.3. Substantial credit goes to Antoine Pitrou (who had already
7 * fortified and rewritten the previous implementation) and Nick Coghlan
8 * (who came up with the idea of the ManagedBuffer) for analyzing the complex
9 * ownership rules.
10 *
11 */
12
13 #include "Python.h"
14 #include "pycore_abstract.h" // _PyIndex_Check()
15 #include "pycore_object.h" // _PyObject_GC_UNTRACK()
16 #include "pycore_strhex.h" // _Py_strhex_with_sep()
17 #include <stddef.h> // offsetof()
18
19 /*[clinic input]
20 class memoryview "PyMemoryViewObject *" "&PyMemoryView_Type"
21 [clinic start generated code]*/
22 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=e2e49d2192835219]*/
23
24 #include "clinic/memoryobject.c.h"
25
26 /****************************************************************************/
27 /* ManagedBuffer Object */
28 /****************************************************************************/
29
30 /*
31 ManagedBuffer Object:
32 ---------------------
33
34 The purpose of this object is to facilitate the handling of chained
35 memoryviews that have the same underlying exporting object. PEP-3118
36 allows the underlying object to change while a view is exported. This
37 could lead to unexpected results when constructing a new memoryview
38 from an existing memoryview.
39
40 Rather than repeatedly redirecting buffer requests to the original base
41 object, all chained memoryviews use a single buffer snapshot. This
42 snapshot is generated by the constructor _PyManagedBuffer_FromObject().
43
44 Ownership rules:
45 ----------------
46
47 The master buffer inside a managed buffer is filled in by the original
48 base object. shape, strides, suboffsets and format are read-only for
49 all consumers.
50
51 A memoryview's buffer is a private copy of the exporter's buffer. shape,
52 strides and suboffsets belong to the memoryview and are thus writable.
53
54 If a memoryview itself exports several buffers via memory_getbuf(), all
55 buffer copies share shape, strides and suboffsets. In this case, the
56 arrays are NOT writable.
57
58 Reference count assumptions:
59 ----------------------------
60
61 The 'obj' member of a Py_buffer must either be NULL or refer to the
62 exporting base object. In the Python codebase, all getbufferprocs
63 return a new reference to view.obj (example: bytes_buffer_getbuffer()).
64
65 PyBuffer_Release() decrements view.obj (if non-NULL), so the
66 releasebufferprocs must NOT decrement view.obj.
67 */
68
69
70 static inline _PyManagedBufferObject *
71 mbuf_alloc(void)
72 {
73 _PyManagedBufferObject *mbuf;
74
75 mbuf = (_PyManagedBufferObject *)
76 PyObject_GC_New(_PyManagedBufferObject, &_PyManagedBuffer_Type);
77 if (mbuf == NULL)
78 return NULL;
79 mbuf->flags = 0;
80 mbuf->exports = 0;
81 mbuf->master.obj = NULL;
82 _PyObject_GC_TRACK(mbuf);
83
84 return mbuf;
85 }
86
87 static PyObject *
88 _PyManagedBuffer_FromObject(PyObject *base, int flags)
89 {
90 _PyManagedBufferObject *mbuf;
91
92 mbuf = mbuf_alloc();
93 if (mbuf == NULL)
94 return NULL;
95
96 if (PyObject_GetBuffer(base, &mbuf->master, flags) < 0) {
97 mbuf->master.obj = NULL;
98 Py_DECREF(mbuf);
99 return NULL;
100 }
101
102 return (PyObject *)mbuf;
103 }
104
105 static void
106 mbuf_release(_PyManagedBufferObject *self)
107 {
108 if (self->flags&_Py_MANAGED_BUFFER_RELEASED)
109 return;
110
111 /* NOTE: at this point self->exports can still be > 0 if this function
112 is called from mbuf_clear() to break up a reference cycle. */
113 self->flags |= _Py_MANAGED_BUFFER_RELEASED;
114
115 /* PyBuffer_Release() decrements master->obj and sets it to NULL. */
116 _PyObject_GC_UNTRACK(self);
117 PyBuffer_Release(&self->master);
118 }
119
120 static void
121 mbuf_dealloc(_PyManagedBufferObject *self)
122 {
123 assert(self->exports == 0);
124 mbuf_release(self);
125 if (self->flags&_Py_MANAGED_BUFFER_FREE_FORMAT)
126 PyMem_Free(self->master.format);
127 PyObject_GC_Del(self);
128 }
129
130 static int
131 mbuf_traverse(_PyManagedBufferObject *self, visitproc visit, void *arg)
132 {
133 Py_VISIT(self->master.obj);
134 return 0;
135 }
136
137 static int
138 mbuf_clear(_PyManagedBufferObject *self)
139 {
140 assert(self->exports >= 0);
141 mbuf_release(self);
142 return 0;
143 }
144
145 PyTypeObject _PyManagedBuffer_Type = {
146 PyVarObject_HEAD_INIT(&PyType_Type, 0)
147 "managedbuffer",
148 sizeof(_PyManagedBufferObject),
149 0,
150 (destructor)mbuf_dealloc, /* tp_dealloc */
151 0, /* tp_vectorcall_offset */
152 0, /* tp_getattr */
153 0, /* tp_setattr */
154 0, /* tp_as_async */
155 0, /* tp_repr */
156 0, /* tp_as_number */
157 0, /* tp_as_sequence */
158 0, /* tp_as_mapping */
159 0, /* tp_hash */
160 0, /* tp_call */
161 0, /* tp_str */
162 PyObject_GenericGetAttr, /* tp_getattro */
163 0, /* tp_setattro */
164 0, /* tp_as_buffer */
165 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
166 0, /* tp_doc */
167 (traverseproc)mbuf_traverse, /* tp_traverse */
168 (inquiry)mbuf_clear /* tp_clear */
169 };
170
171
172 /****************************************************************************/
173 /* MemoryView Object */
174 /****************************************************************************/
175
176 /* In the process of breaking reference cycles mbuf_release() can be
177 called before memory_release(). */
178 #define BASE_INACCESSIBLE(mv) \
179 (((PyMemoryViewObject *)mv)->flags&_Py_MEMORYVIEW_RELEASED || \
180 ((PyMemoryViewObject *)mv)->mbuf->flags&_Py_MANAGED_BUFFER_RELEASED)
181
182 #define CHECK_RELEASED(mv) \
183 if (BASE_INACCESSIBLE(mv)) { \
184 PyErr_SetString(PyExc_ValueError, \
185 "operation forbidden on released memoryview object"); \
186 return NULL; \
187 }
188
189 #define CHECK_RELEASED_INT(mv) \
190 if (BASE_INACCESSIBLE(mv)) { \
191 PyErr_SetString(PyExc_ValueError, \
192 "operation forbidden on released memoryview object"); \
193 return -1; \
194 }
195
196 #define CHECK_RESTRICTED(mv) \
197 if (((PyMemoryViewObject *)(mv))->flags & _Py_MEMORYVIEW_RESTRICTED) { \
198 PyErr_SetString(PyExc_ValueError, \
199 "cannot create new view on restricted memoryview"); \
200 return NULL; \
201 }
202
203 #define CHECK_RESTRICTED_INT(mv) \
204 if (((PyMemoryViewObject *)(mv))->flags & _Py_MEMORYVIEW_RESTRICTED) { \
205 PyErr_SetString(PyExc_ValueError, \
206 "cannot create new view on restricted memoryview"); \
207 return -1; \
208 }
209
210 /* See gh-92888. These macros signal that we need to check the memoryview
211 again due to possible read after frees. */
212 #define CHECK_RELEASED_AGAIN(mv) CHECK_RELEASED(mv)
213 #define CHECK_RELEASED_INT_AGAIN(mv) CHECK_RELEASED_INT(mv)
214
215 #define CHECK_LIST_OR_TUPLE(v) \
216 if (!PyList_Check(v) && !PyTuple_Check(v)) { \
217 PyErr_SetString(PyExc_TypeError, \
218 #v " must be a list or a tuple"); \
219 return NULL; \
220 }
221
222 #define VIEW_ADDR(mv) (&((PyMemoryViewObject *)mv)->view)
223
224 /* Check for the presence of suboffsets in the first dimension. */
225 #define HAVE_PTR(suboffsets, dim) (suboffsets && suboffsets[dim] >= 0)
226 /* Adjust ptr if suboffsets are present. */
227 #define ADJUST_PTR(ptr, suboffsets, dim) \
228 (HAVE_PTR(suboffsets, dim) ? *((char**)ptr) + suboffsets[dim] : ptr)
229
230 /* Memoryview buffer properties */
231 #define MV_C_CONTIGUOUS(flags) (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C))
232 #define MV_F_CONTIGUOUS(flags) \
233 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_FORTRAN))
234 #define MV_ANY_CONTIGUOUS(flags) \
235 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN))
236
237 /* Fast contiguity test. Caller must ensure suboffsets==NULL and ndim==1. */
238 #define MV_CONTIGUOUS_NDIM1(view) \
239 ((view)->shape[0] == 1 || (view)->strides[0] == (view)->itemsize)
240
241 /* getbuffer() requests */
242 #define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT)
243 #define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS)
244 #define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)
245 #define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS)
246 #define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES)
247 #define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND)
248 #define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE)
249 #define REQ_FORMAT(flags) (flags&PyBUF_FORMAT)
250
251
252 /**************************************************************************/
253 /* Copy memoryview buffers */
254 /**************************************************************************/
255
256 /* The functions in this section take a source and a destination buffer
257 with the same logical structure: format, itemsize, ndim and shape
258 are identical, with ndim > 0.
259
260 NOTE: All buffers are assumed to have PyBUF_FULL information, which
261 is the case for memoryviews! */
262
263
264 /* Assumptions: ndim >= 1. The macro tests for a corner case that should
265 perhaps be explicitly forbidden in the PEP. */
266 #define HAVE_SUBOFFSETS_IN_LAST_DIM(view) \
267 (view->suboffsets && view->suboffsets[dest->ndim-1] >= 0)
268
269 static inline int
270 last_dim_is_contiguous(const Py_buffer *dest, const Py_buffer *src)
271 {
272 assert(dest->ndim > 0 && src->ndim > 0);
273 return (!HAVE_SUBOFFSETS_IN_LAST_DIM(dest) &&
274 !HAVE_SUBOFFSETS_IN_LAST_DIM(src) &&
275 dest->strides[dest->ndim-1] == dest->itemsize &&
276 src->strides[src->ndim-1] == src->itemsize);
277 }
278
279 /* This is not a general function for determining format equivalence.
280 It is used in copy_single() and copy_buffer() to weed out non-matching
281 formats. Skipping the '@' character is specifically used in slice
282 assignments, where the lvalue is already known to have a single character
283 format. This is a performance hack that could be rewritten (if properly
284 benchmarked). */
285 static inline int
286 equiv_format(const Py_buffer *dest, const Py_buffer *src)
287 {
288 const char *dfmt, *sfmt;
289
290 assert(dest->format && src->format);
291 dfmt = dest->format[0] == '@' ? dest->format+1 : dest->format;
292 sfmt = src->format[0] == '@' ? src->format+1 : src->format;
293
294 if (strcmp(dfmt, sfmt) != 0 ||
295 dest->itemsize != src->itemsize) {
296 return 0;
297 }
298
299 return 1;
300 }
301
302 /* Two shapes are equivalent if they are either equal or identical up
303 to a zero element at the same position. For example, in NumPy arrays
304 the shapes [1, 0, 5] and [1, 0, 7] are equivalent. */
305 static inline int
306 equiv_shape(const Py_buffer *dest, const Py_buffer *src)
307 {
308 int i;
309
310 if (dest->ndim != src->ndim)
311 return 0;
312
313 for (i = 0; i < dest->ndim; i++) {
314 if (dest->shape[i] != src->shape[i])
315 return 0;
316 if (dest->shape[i] == 0)
317 break;
318 }
319
320 return 1;
321 }
322
323 /* Check that the logical structure of the destination and source buffers
324 is identical. */
325 static int
326 equiv_structure(const Py_buffer *dest, const Py_buffer *src)
327 {
328 if (!equiv_format(dest, src) ||
329 !equiv_shape(dest, src)) {
330 PyErr_SetString(PyExc_ValueError,
331 "memoryview assignment: lvalue and rvalue have different "
332 "structures");
333 return 0;
334 }
335
336 return 1;
337 }
338
339 /* Base case for recursive multi-dimensional copying. Contiguous arrays are
340 copied with very little overhead. Assumptions: ndim == 1, mem == NULL or
341 sizeof(mem) == shape[0] * itemsize. */
342 static void
343 copy_base(const Py_ssize_t *shape, Py_ssize_t itemsize,
344 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
345 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
346 char *mem)
347 {
348 if (mem == NULL) { /* contiguous */
349 Py_ssize_t size = shape[0] * itemsize;
350 if (dptr + size < sptr || sptr + size < dptr)
351 memcpy(dptr, sptr, size); /* no overlapping */
352 else
353 memmove(dptr, sptr, size);
354 }
355 else {
356 char *p;
357 Py_ssize_t i;
358 for (i=0, p=mem; i < shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
359 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
360 memcpy(p, xsptr, itemsize);
361 }
362 for (i=0, p=mem; i < shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
363 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
364 memcpy(xdptr, p, itemsize);
365 }
366 }
367
368 }
369
370 /* Recursively copy a source buffer to a destination buffer. The two buffers
371 have the same ndim, shape and itemsize. */
372 static void
373 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
374 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
375 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
376 char *mem)
377 {
378 Py_ssize_t i;
379
380 assert(ndim >= 1);
381
382 if (ndim == 1) {
383 copy_base(shape, itemsize,
384 dptr, dstrides, dsuboffsets,
385 sptr, sstrides, ssuboffsets,
386 mem);
387 return;
388 }
389
390 for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
391 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
392 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
393
394 copy_rec(shape+1, ndim-1, itemsize,
395 xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
396 xsptr, sstrides+1, ssuboffsets ? ssuboffsets+1 : NULL,
397 mem);
398 }
399 }
400
401 /* Faster copying of one-dimensional arrays. */
402 static int
403 copy_single(PyMemoryViewObject *self, const Py_buffer *dest, const Py_buffer *src)
404 {
405 CHECK_RELEASED_INT_AGAIN(self);
406 char *mem = NULL;
407
408 assert(dest->ndim == 1);
409
410 if (!equiv_structure(dest, src))
411 return -1;
412
413 if (!last_dim_is_contiguous(dest, src)) {
414 mem = PyMem_Malloc(dest->shape[0] * dest->itemsize);
415 if (mem == NULL) {
416 PyErr_NoMemory();
417 return -1;
418 }
419 }
420
421 copy_base(dest->shape, dest->itemsize,
422 dest->buf, dest->strides, dest->suboffsets,
423 src->buf, src->strides, src->suboffsets,
424 mem);
425
426 if (mem)
427 PyMem_Free(mem);
428
429 return 0;
430 }
431
432 /* Recursively copy src to dest. Both buffers must have the same basic
433 structure. Copying is atomic, the function never fails with a partial
434 copy. */
435 static int
436 copy_buffer(const Py_buffer *dest, const Py_buffer *src)
437 {
438 char *mem = NULL;
439
440 assert(dest->ndim > 0);
441
442 if (!equiv_structure(dest, src))
443 return -1;
444
445 if (!last_dim_is_contiguous(dest, src)) {
446 mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize);
447 if (mem == NULL) {
448 PyErr_NoMemory();
449 return -1;
450 }
451 }
452
453 copy_rec(dest->shape, dest->ndim, dest->itemsize,
454 dest->buf, dest->strides, dest->suboffsets,
455 src->buf, src->strides, src->suboffsets,
456 mem);
457
458 if (mem)
459 PyMem_Free(mem);
460
461 return 0;
462 }
463
464 /* Initialize strides for a C-contiguous array. */
465 static inline void
466 init_strides_from_shape(Py_buffer *view)
467 {
468 Py_ssize_t i;
469
470 assert(view->ndim > 0);
471
472 view->strides[view->ndim-1] = view->itemsize;
473 for (i = view->ndim-2; i >= 0; i--)
474 view->strides[i] = view->strides[i+1] * view->shape[i+1];
475 }
476
477 /* Initialize strides for a Fortran-contiguous array. */
478 static inline void
479 init_fortran_strides_from_shape(Py_buffer *view)
480 {
481 Py_ssize_t i;
482
483 assert(view->ndim > 0);
484
485 view->strides[0] = view->itemsize;
486 for (i = 1; i < view->ndim; i++)
487 view->strides[i] = view->strides[i-1] * view->shape[i-1];
488 }
489
490 /* Copy src to a contiguous representation. order is one of 'C', 'F' (Fortran)
491 or 'A' (Any). Assumptions: src has PyBUF_FULL information, src->ndim >= 1,
492 len(mem) == src->len. */
493 static int
494 buffer_to_contiguous(char *mem, const Py_buffer *src, char order)
495 {
496 Py_buffer dest;
497 Py_ssize_t *strides;
498 int ret;
499
500 assert(src->ndim >= 1);
501 assert(src->shape != NULL);
502 assert(src->strides != NULL);
503
504 strides = PyMem_Malloc(src->ndim * (sizeof *src->strides));
505 if (strides == NULL) {
506 PyErr_NoMemory();
507 return -1;
508 }
509
510 /* initialize dest */
511 dest = *src;
512 dest.buf = mem;
513 /* shape is constant and shared: the logical representation of the
514 array is unaltered. */
515
516 /* The physical representation determined by strides (and possibly
517 suboffsets) may change. */
518 dest.strides = strides;
519 if (order == 'C' || order == 'A') {
520 init_strides_from_shape(&dest);
521 }
522 else {
523 init_fortran_strides_from_shape(&dest);
524 }
525
526 dest.suboffsets = NULL;
527
528 ret = copy_buffer(&dest, src);
529
530 PyMem_Free(strides);
531 return ret;
532 }
533
534
535 /****************************************************************************/
536 /* Constructors */
537 /****************************************************************************/
538
539 /* Initialize values that are shared with the managed buffer. */
540 static inline void
541 init_shared_values(Py_buffer *dest, const Py_buffer *src)
542 {
543 dest->obj = src->obj;
544 dest->buf = src->buf;
545 dest->len = src->len;
546 dest->itemsize = src->itemsize;
547 dest->readonly = src->readonly;
548 dest->format = src->format ? src->format : "B";
549 dest->internal = src->internal;
550 }
551
552 /* Copy shape and strides. Reconstruct missing values. */
553 static void
554 init_shape_strides(Py_buffer *dest, const Py_buffer *src)
555 {
556 Py_ssize_t i;
557
558 if (src->ndim == 0) {
559 dest->shape = NULL;
560 dest->strides = NULL;
561 return;
562 }
563 if (src->ndim == 1) {
564 dest->shape[0] = src->shape ? src->shape[0] : src->len / src->itemsize;
565 dest->strides[0] = src->strides ? src->strides[0] : src->itemsize;
566 return;
567 }
568
569 for (i = 0; i < src->ndim; i++)
570 dest->shape[i] = src->shape[i];
571 if (src->strides) {
572 for (i = 0; i < src->ndim; i++)
573 dest->strides[i] = src->strides[i];
574 }
575 else {
576 init_strides_from_shape(dest);
577 }
578 }
579
580 static inline void
581 init_suboffsets(Py_buffer *dest, const Py_buffer *src)
582 {
583 Py_ssize_t i;
584
585 if (src->suboffsets == NULL) {
586 dest->suboffsets = NULL;
587 return;
588 }
589 for (i = 0; i < src->ndim; i++)
590 dest->suboffsets[i] = src->suboffsets[i];
591 }
592
593 /* len = product(shape) * itemsize */
594 static inline void
595 init_len(Py_buffer *view)
596 {
597 Py_ssize_t i, len;
598
599 len = 1;
600 for (i = 0; i < view->ndim; i++)
601 len *= view->shape[i];
602 len *= view->itemsize;
603
604 view->len = len;
605 }
606
607 /* Initialize memoryview buffer properties. */
608 static void
609 init_flags(PyMemoryViewObject *mv)
610 {
611 const Py_buffer *view = &mv->view;
612 int flags = 0;
613
614 switch (view->ndim) {
615 case 0:
616 flags |= (_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|
617 _Py_MEMORYVIEW_FORTRAN);
618 break;
619 case 1:
620 if (MV_CONTIGUOUS_NDIM1(view))
621 flags |= (_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
622 break;
623 default:
624 if (PyBuffer_IsContiguous(view, 'C'))
625 flags |= _Py_MEMORYVIEW_C;
626 if (PyBuffer_IsContiguous(view, 'F'))
627 flags |= _Py_MEMORYVIEW_FORTRAN;
628 break;
629 }
630
631 if (view->suboffsets) {
632 flags |= _Py_MEMORYVIEW_PIL;
633 flags &= ~(_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
634 }
635
636 mv->flags = flags;
637 }
638
639 /* Allocate a new memoryview and perform basic initialization. New memoryviews
640 are exclusively created through the mbuf_add functions. */
641 static inline PyMemoryViewObject *
642 memory_alloc(int ndim)
643 {
644 PyMemoryViewObject *mv;
645
646 mv = (PyMemoryViewObject *)
647 PyObject_GC_NewVar(PyMemoryViewObject, &PyMemoryView_Type, 3*ndim);
648 if (mv == NULL)
649 return NULL;
650
651 mv->mbuf = NULL;
652 mv->hash = -1;
653 mv->flags = 0;
654 mv->exports = 0;
655 mv->view.ndim = ndim;
656 mv->view.shape = mv->ob_array;
657 mv->view.strides = mv->ob_array + ndim;
658 mv->view.suboffsets = mv->ob_array + 2 * ndim;
659 mv->weakreflist = NULL;
660
661 _PyObject_GC_TRACK(mv);
662 return mv;
663 }
664
665 /*
666 Return a new memoryview that is registered with mbuf. If src is NULL,
667 use mbuf->master as the underlying buffer. Otherwise, use src.
668
669 The new memoryview has full buffer information: shape and strides
670 are always present, suboffsets as needed. Arrays are copied to
671 the memoryview's ob_array field.
672 */
673 static PyObject *
674 mbuf_add_view(_PyManagedBufferObject *mbuf, const Py_buffer *src)
675 {
676 PyMemoryViewObject *mv;
677 Py_buffer *dest;
678
679 if (src == NULL)
680 src = &mbuf->master;
681
682 if (src->ndim > PyBUF_MAX_NDIM) {
683 PyErr_SetString(PyExc_ValueError,
684 "memoryview: number of dimensions must not exceed "
685 Py_STRINGIFY(PyBUF_MAX_NDIM));
686 return NULL;
687 }
688
689 mv = memory_alloc(src->ndim);
690 if (mv == NULL)
691 return NULL;
692
693 dest = &mv->view;
694 init_shared_values(dest, src);
695 init_shape_strides(dest, src);
696 init_suboffsets(dest, src);
697 init_flags(mv);
698
699 mv->mbuf = (_PyManagedBufferObject*)Py_NewRef(mbuf);
700 mbuf->exports++;
701
702 return (PyObject *)mv;
703 }
704
705 /* Register an incomplete view: shape, strides, suboffsets and flags still
706 need to be initialized. Use 'ndim' instead of src->ndim to determine the
707 size of the memoryview's ob_array.
708
709 Assumption: ndim <= PyBUF_MAX_NDIM. */
710 static PyObject *
711 mbuf_add_incomplete_view(_PyManagedBufferObject *mbuf, const Py_buffer *src,
712 int ndim)
713 {
714 PyMemoryViewObject *mv;
715 Py_buffer *dest;
716
717 if (src == NULL)
718 src = &mbuf->master;
719
720 assert(ndim <= PyBUF_MAX_NDIM);
721
722 mv = memory_alloc(ndim);
723 if (mv == NULL)
724 return NULL;
725
726 dest = &mv->view;
727 init_shared_values(dest, src);
728
729 mv->mbuf = (_PyManagedBufferObject*)Py_NewRef(mbuf);
730 mbuf->exports++;
731
732 return (PyObject *)mv;
733 }
734
735 /* Expose a raw memory area as a view of contiguous bytes. flags can be
736 PyBUF_READ or PyBUF_WRITE. view->format is set to "B" (unsigned bytes).
737 The memoryview has complete buffer information. */
738 PyObject *
739 PyMemoryView_FromMemory(char *mem, Py_ssize_t size, int flags)
740 {
741 _PyManagedBufferObject *mbuf;
742 PyObject *mv;
743 int readonly;
744
745 assert(mem != NULL);
746 assert(flags == PyBUF_READ || flags == PyBUF_WRITE);
747
748 mbuf = mbuf_alloc();
749 if (mbuf == NULL)
750 return NULL;
751
752 readonly = (flags == PyBUF_WRITE) ? 0 : 1;
753 (void)PyBuffer_FillInfo(&mbuf->master, NULL, mem, size, readonly,
754 PyBUF_FULL_RO);
755
756 mv = mbuf_add_view(mbuf, NULL);
757 Py_DECREF(mbuf);
758
759 return mv;
760 }
761
762 /* Create a memoryview from a given Py_buffer. For simple byte views,
763 PyMemoryView_FromMemory() should be used instead.
764 This function is the only entry point that can create a master buffer
765 without full information. Because of this fact init_shape_strides()
766 must be able to reconstruct missing values. */
767 PyObject *
768 PyMemoryView_FromBuffer(const Py_buffer *info)
769 {
770 _PyManagedBufferObject *mbuf;
771 PyObject *mv;
772
773 if (info->buf == NULL) {
774 PyErr_SetString(PyExc_ValueError,
775 "PyMemoryView_FromBuffer(): info->buf must not be NULL");
776 return NULL;
777 }
778
779 mbuf = mbuf_alloc();
780 if (mbuf == NULL)
781 return NULL;
782
783 /* info->obj is either NULL or a borrowed reference. This reference
784 should not be decremented in PyBuffer_Release(). */
785 mbuf->master = *info;
786 mbuf->master.obj = NULL;
787
788 mv = mbuf_add_view(mbuf, NULL);
789 Py_DECREF(mbuf);
790
791 return mv;
792 }
793
794 /* Create a memoryview from an object that implements the buffer protocol,
795 using the given flags.
796 If the object is a memoryview, the new memoryview must be registered
797 with the same managed buffer. Otherwise, a new managed buffer is created. */
798 static PyObject *
799 PyMemoryView_FromObjectAndFlags(PyObject *v, int flags)
800 {
801 _PyManagedBufferObject *mbuf;
802
803 if (PyMemoryView_Check(v)) {
804 PyMemoryViewObject *mv = (PyMemoryViewObject *)v;
805 CHECK_RELEASED(mv);
806 CHECK_RESTRICTED(mv);
807 return mbuf_add_view(mv->mbuf, &mv->view);
808 }
809 else if (PyObject_CheckBuffer(v)) {
810 PyObject *ret;
811 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(v, flags);
812 if (mbuf == NULL)
813 return NULL;
814 ret = mbuf_add_view(mbuf, NULL);
815 Py_DECREF(mbuf);
816 return ret;
817 }
818
819 PyErr_Format(PyExc_TypeError,
820 "memoryview: a bytes-like object is required, not '%.200s'",
821 Py_TYPE(v)->tp_name);
822 return NULL;
823 }
824
825 /* Create a memoryview from an object that implements the buffer protocol,
826 using the given flags.
827 If the object is a memoryview, the new memoryview must be registered
828 with the same managed buffer. Otherwise, a new managed buffer is created. */
829 PyObject *
830 _PyMemoryView_FromBufferProc(PyObject *v, int flags, getbufferproc bufferproc)
831 {
832 _PyManagedBufferObject *mbuf = mbuf_alloc();
833 if (mbuf == NULL)
834 return NULL;
835
836 int res = bufferproc(v, &mbuf->master, flags);
837 if (res < 0) {
838 mbuf->master.obj = NULL;
839 Py_DECREF(mbuf);
840 return NULL;
841 }
842
843 PyObject *ret = mbuf_add_view(mbuf, NULL);
844 Py_DECREF(mbuf);
845 return ret;
846 }
847
848 /* Create a memoryview from an object that implements the buffer protocol.
849 If the object is a memoryview, the new memoryview must be registered
850 with the same managed buffer. Otherwise, a new managed buffer is created. */
851 PyObject *
852 PyMemoryView_FromObject(PyObject *v)
853 {
854 return PyMemoryView_FromObjectAndFlags(v, PyBUF_FULL_RO);
855 }
856
857 /* Copy the format string from a base object that might vanish. */
858 static int
859 mbuf_copy_format(_PyManagedBufferObject *mbuf, const char *fmt)
860 {
861 if (fmt != NULL) {
862 char *cp = PyMem_Malloc(strlen(fmt)+1);
863 if (cp == NULL) {
864 PyErr_NoMemory();
865 return -1;
866 }
867 mbuf->master.format = strcpy(cp, fmt);
868 mbuf->flags |= _Py_MANAGED_BUFFER_FREE_FORMAT;
869 }
870
871 return 0;
872 }
873
874 /*
875 Return a memoryview that is based on a contiguous copy of src.
876 Assumptions: src has PyBUF_FULL_RO information, src->ndim > 0.
877
878 Ownership rules:
879 1) As usual, the returned memoryview has a private copy
880 of src->shape, src->strides and src->suboffsets.
881 2) src->format is copied to the master buffer and released
882 in mbuf_dealloc(). The releasebufferproc of the bytes
883 object is NULL, so it does not matter that mbuf_release()
884 passes the altered format pointer to PyBuffer_Release().
885 */
886 static PyObject *
887 memory_from_contiguous_copy(const Py_buffer *src, char order)
888 {
889 _PyManagedBufferObject *mbuf;
890 PyMemoryViewObject *mv;
891 PyObject *bytes;
892 Py_buffer *dest;
893 int i;
894
895 assert(src->ndim > 0);
896 assert(src->shape != NULL);
897
898 bytes = PyBytes_FromStringAndSize(NULL, src->len);
899 if (bytes == NULL)
900 return NULL;
901
902 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(bytes, PyBUF_FULL_RO);
903 Py_DECREF(bytes);
904 if (mbuf == NULL)
905 return NULL;
906
907 if (mbuf_copy_format(mbuf, src->format) < 0) {
908 Py_DECREF(mbuf);
909 return NULL;
910 }
911
912 mv = (PyMemoryViewObject *)mbuf_add_incomplete_view(mbuf, NULL, src->ndim);
913 Py_DECREF(mbuf);
914 if (mv == NULL)
915 return NULL;
916
917 dest = &mv->view;
918
919 /* shared values are initialized correctly except for itemsize */
920 dest->itemsize = src->itemsize;
921
922 /* shape and strides */
923 for (i = 0; i < src->ndim; i++) {
924 dest->shape[i] = src->shape[i];
925 }
926 if (order == 'C' || order == 'A') {
927 init_strides_from_shape(dest);
928 }
929 else {
930 init_fortran_strides_from_shape(dest);
931 }
932 /* suboffsets */
933 dest->suboffsets = NULL;
934
935 /* flags */
936 init_flags(mv);
937
938 if (copy_buffer(dest, src) < 0) {
939 Py_DECREF(mv);
940 return NULL;
941 }
942
943 return (PyObject *)mv;
944 }
945
946 /*
947 Return a new memoryview object based on a contiguous exporter with
948 buffertype={PyBUF_READ, PyBUF_WRITE} and order={'C', 'F'ortran, or 'A'ny}.
949 The logical structure of the input and output buffers is the same
950 (i.e. tolist(input) == tolist(output)), but the physical layout in
951 memory can be explicitly chosen.
952
953 As usual, if buffertype=PyBUF_WRITE, the exporter's buffer must be writable,
954 otherwise it may be writable or read-only.
955
956 If the exporter is already contiguous with the desired target order,
957 the memoryview will be directly based on the exporter.
958
959 Otherwise, if the buffertype is PyBUF_READ, the memoryview will be
960 based on a new bytes object. If order={'C', 'A'ny}, use 'C' order,
961 'F'ortran order otherwise.
962 */
963 PyObject *
964 PyMemoryView_GetContiguous(PyObject *obj, int buffertype, char order)
965 {
966 PyMemoryViewObject *mv;
967 PyObject *ret;
968 Py_buffer *view;
969
970 assert(buffertype == PyBUF_READ || buffertype == PyBUF_WRITE);
971 assert(order == 'C' || order == 'F' || order == 'A');
972
973 mv = (PyMemoryViewObject *)PyMemoryView_FromObject(obj);
974 if (mv == NULL)
975 return NULL;
976
977 view = &mv->view;
978 if (buffertype == PyBUF_WRITE && view->readonly) {
979 PyErr_SetString(PyExc_BufferError,
980 "underlying buffer is not writable");
981 Py_DECREF(mv);
982 return NULL;
983 }
984
985 if (PyBuffer_IsContiguous(view, order))
986 return (PyObject *)mv;
987
988 if (buffertype == PyBUF_WRITE) {
989 PyErr_SetString(PyExc_BufferError,
990 "writable contiguous buffer requested "
991 "for a non-contiguous object.");
992 Py_DECREF(mv);
993 return NULL;
994 }
995
996 ret = memory_from_contiguous_copy(view, order);
997 Py_DECREF(mv);
998 return ret;
999 }
1000
1001
1002 /*[clinic input]
1003 @classmethod
1004 memoryview.__new__
1005
1006 object: object
1007
1008 Create a new memoryview object which references the given object.
1009 [clinic start generated code]*/
1010
1011 static PyObject *
1012 memoryview_impl(PyTypeObject *type, PyObject *object)
1013 /*[clinic end generated code: output=7de78e184ed66db8 input=f04429eb0bdf8c6e]*/
1014 {
1015 return PyMemoryView_FromObject(object);
1016 }
1017
1018
1019 /*[clinic input]
1020 @classmethod
1021 memoryview._from_flags
1022
1023 object: object
1024 flags: int
1025
1026 Create a new memoryview object which references the given object.
1027 [clinic start generated code]*/
1028
1029 static PyObject *
1030 memoryview__from_flags_impl(PyTypeObject *type, PyObject *object, int flags)
1031 /*[clinic end generated code: output=bf71f9906c266ee2 input=f5f82fd0e744356b]*/
1032 {
1033 return PyMemoryView_FromObjectAndFlags(object, flags);
1034 }
1035
1036
1037 /****************************************************************************/
1038 /* Previously in abstract.c */
1039 /****************************************************************************/
1040
1041 typedef struct {
1042 Py_buffer view;
1043 Py_ssize_t array[1];
1044 } Py_buffer_full;
1045
1046 int
1047 PyBuffer_ToContiguous(void *buf, const Py_buffer *src, Py_ssize_t len, char order)
1048 {
1049 Py_buffer_full *fb = NULL;
1050 int ret;
1051
1052 assert(order == 'C' || order == 'F' || order == 'A');
1053
1054 if (len != src->len) {
1055 PyErr_SetString(PyExc_ValueError,
1056 "PyBuffer_ToContiguous: len != view->len");
1057 return -1;
1058 }
1059
1060 if (PyBuffer_IsContiguous(src, order)) {
1061 memcpy((char *)buf, src->buf, len);
1062 return 0;
1063 }
1064
1065 /* buffer_to_contiguous() assumes PyBUF_FULL */
1066 fb = PyMem_Malloc(sizeof *fb + 3 * src->ndim * (sizeof *fb->array));
1067 if (fb == NULL) {
1068 PyErr_NoMemory();
1069 return -1;
1070 }
1071 fb->view.ndim = src->ndim;
1072 fb->view.shape = fb->array;
1073 fb->view.strides = fb->array + src->ndim;
1074 fb->view.suboffsets = fb->array + 2 * src->ndim;
1075
1076 init_shared_values(&fb->view, src);
1077 init_shape_strides(&fb->view, src);
1078 init_suboffsets(&fb->view, src);
1079
1080 src = &fb->view;
1081
1082 ret = buffer_to_contiguous(buf, src, order);
1083 PyMem_Free(fb);
1084 return ret;
1085 }
1086
1087
1088 /****************************************************************************/
1089 /* Release/GC management */
1090 /****************************************************************************/
1091
1092 /* Inform the managed buffer that this particular memoryview will not access
1093 the underlying buffer again. If no other memoryviews are registered with
1094 the managed buffer, the underlying buffer is released instantly and
1095 marked as inaccessible for both the memoryview and the managed buffer.
1096
1097 This function fails if the memoryview itself has exported buffers. */
1098 static int
1099 _memory_release(PyMemoryViewObject *self)
1100 {
1101 if (self->flags & _Py_MEMORYVIEW_RELEASED)
1102 return 0;
1103
1104 if (self->exports == 0) {
1105 self->flags |= _Py_MEMORYVIEW_RELEASED;
1106 assert(self->mbuf->exports > 0);
1107 if (--self->mbuf->exports == 0)
1108 mbuf_release(self->mbuf);
1109 return 0;
1110 }
1111 if (self->exports > 0) {
1112 PyErr_Format(PyExc_BufferError,
1113 "memoryview has %zd exported buffer%s", self->exports,
1114 self->exports==1 ? "" : "s");
1115 return -1;
1116 }
1117
1118 PyErr_SetString(PyExc_SystemError,
1119 "_memory_release(): negative export count");
1120 return -1;
1121 }
1122
1123 /*[clinic input]
1124 memoryview.release
1125
1126 Release the underlying buffer exposed by the memoryview object.
1127 [clinic start generated code]*/
1128
1129 static PyObject *
1130 memoryview_release_impl(PyMemoryViewObject *self)
1131 /*[clinic end generated code: output=d0b7e3ba95b7fcb9 input=bc71d1d51f4a52f0]*/
1132 {
1133 if (_memory_release(self) < 0)
1134 return NULL;
1135 Py_RETURN_NONE;
1136 }
1137
1138 static void
1139 memory_dealloc(PyMemoryViewObject *self)
1140 {
1141 assert(self->exports == 0);
1142 _PyObject_GC_UNTRACK(self);
1143 (void)_memory_release(self);
1144 Py_CLEAR(self->mbuf);
1145 if (self->weakreflist != NULL)
1146 PyObject_ClearWeakRefs((PyObject *) self);
1147 PyObject_GC_Del(self);
1148 }
1149
1150 static int
1151 memory_traverse(PyMemoryViewObject *self, visitproc visit, void *arg)
1152 {
1153 Py_VISIT(self->mbuf);
1154 return 0;
1155 }
1156
1157 static int
1158 memory_clear(PyMemoryViewObject *self)
1159 {
1160 (void)_memory_release(self);
1161 Py_CLEAR(self->mbuf);
1162 return 0;
1163 }
1164
1165 static PyObject *
1166 memory_enter(PyObject *self, PyObject *args)
1167 {
1168 CHECK_RELEASED(self);
1169 return Py_NewRef(self);
1170 }
1171
1172 static PyObject *
1173 memory_exit(PyObject *self, PyObject *args)
1174 {
1175 return memoryview_release_impl((PyMemoryViewObject *)self);
1176 }
1177
1178
1179 /****************************************************************************/
1180 /* Casting format and shape */
1181 /****************************************************************************/
1182
1183 #define IS_BYTE_FORMAT(f) (f == 'b' || f == 'B' || f == 'c')
1184
1185 static inline Py_ssize_t
1186 get_native_fmtchar(char *result, const char *fmt)
1187 {
1188 Py_ssize_t size = -1;
1189
1190 if (fmt[0] == '@') fmt++;
1191
1192 switch (fmt[0]) {
1193 case 'c': case 'b': case 'B': size = sizeof(char); break;
1194 case 'h': case 'H': size = sizeof(short); break;
1195 case 'i': case 'I': size = sizeof(int); break;
1196 case 'l': case 'L': size = sizeof(long); break;
1197 case 'q': case 'Q': size = sizeof(long long); break;
1198 case 'n': case 'N': size = sizeof(Py_ssize_t); break;
1199 case 'f': size = sizeof(float); break;
1200 case 'd': size = sizeof(double); break;
1201 case 'e': size = sizeof(float) / 2; break;
1202 case '?': size = sizeof(_Bool); break;
1203 case 'P': size = sizeof(void *); break;
1204 }
1205
1206 if (size > 0 && fmt[1] == '\0') {
1207 *result = fmt[0];
1208 return size;
1209 }
1210
1211 return -1;
1212 }
1213
1214 static inline const char *
1215 get_native_fmtstr(const char *fmt)
1216 {
1217 int at = 0;
1218
1219 if (fmt[0] == '@') {
1220 at = 1;
1221 fmt++;
1222 }
1223 if (fmt[0] == '\0' || fmt[1] != '\0') {
1224 return NULL;
1225 }
1226
1227 #define RETURN(s) do { return at ? "@" s : s; } while (0)
1228
1229 switch (fmt[0]) {
1230 case 'c': RETURN("c");
1231 case 'b': RETURN("b");
1232 case 'B': RETURN("B");
1233 case 'h': RETURN("h");
1234 case 'H': RETURN("H");
1235 case 'i': RETURN("i");
1236 case 'I': RETURN("I");
1237 case 'l': RETURN("l");
1238 case 'L': RETURN("L");
1239 case 'q': RETURN("q");
1240 case 'Q': RETURN("Q");
1241 case 'n': RETURN("n");
1242 case 'N': RETURN("N");
1243 case 'f': RETURN("f");
1244 case 'd': RETURN("d");
1245 case 'e': RETURN("e");
1246 case '?': RETURN("?");
1247 case 'P': RETURN("P");
1248 }
1249
1250 return NULL;
1251 }
1252
1253
1254 /* Cast a memoryview's data type to 'format'. The input array must be
1255 C-contiguous. At least one of input-format, output-format must have
1256 byte size. The output array is 1-D, with the same byte length as the
1257 input array. Thus, view->len must be a multiple of the new itemsize. */
1258 static int
1259 cast_to_1D(PyMemoryViewObject *mv, PyObject *format)
1260 {
1261 Py_buffer *view = &mv->view;
1262 PyObject *asciifmt;
1263 char srcchar, destchar;
1264 Py_ssize_t itemsize;
1265 int ret = -1;
1266
1267 assert(view->ndim >= 1);
1268 assert(Py_SIZE(mv) == 3*view->ndim);
1269 assert(view->shape == mv->ob_array);
1270 assert(view->strides == mv->ob_array + view->ndim);
1271 assert(view->suboffsets == mv->ob_array + 2*view->ndim);
1272
1273 asciifmt = PyUnicode_AsASCIIString(format);
1274 if (asciifmt == NULL)
1275 return ret;
1276
1277 itemsize = get_native_fmtchar(&destchar, PyBytes_AS_STRING(asciifmt));
1278 if (itemsize < 0) {
1279 PyErr_SetString(PyExc_ValueError,
1280 "memoryview: destination format must be a native single "
1281 "character format prefixed with an optional '@'");
1282 goto out;
1283 }
1284
1285 if ((get_native_fmtchar(&srcchar, view->format) < 0 ||
1286 !IS_BYTE_FORMAT(srcchar)) && !IS_BYTE_FORMAT(destchar)) {
1287 PyErr_SetString(PyExc_TypeError,
1288 "memoryview: cannot cast between two non-byte formats");
1289 goto out;
1290 }
1291 if (view->len % itemsize) {
1292 PyErr_SetString(PyExc_TypeError,
1293 "memoryview: length is not a multiple of itemsize");
1294 goto out;
1295 }
1296
1297 view->format = (char *)get_native_fmtstr(PyBytes_AS_STRING(asciifmt));
1298 if (view->format == NULL) {
1299 /* NOT_REACHED: get_native_fmtchar() already validates the format. */
1300 PyErr_SetString(PyExc_RuntimeError,
1301 "memoryview: internal error");
1302 goto out;
1303 }
1304 view->itemsize = itemsize;
1305
1306 view->ndim = 1;
1307 view->shape[0] = view->len / view->itemsize;
1308 view->strides[0] = view->itemsize;
1309 view->suboffsets = NULL;
1310
1311 init_flags(mv);
1312
1313 ret = 0;
1314
1315 out:
1316 Py_DECREF(asciifmt);
1317 return ret;
1318 }
1319
1320 /* The memoryview must have space for 3*len(seq) elements. */
1321 static Py_ssize_t
1322 copy_shape(Py_ssize_t *shape, const PyObject *seq, Py_ssize_t ndim,
1323 Py_ssize_t itemsize)
1324 {
1325 Py_ssize_t x, i;
1326 Py_ssize_t len = itemsize;
1327
1328 for (i = 0; i < ndim; i++) {
1329 PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i);
1330 if (!PyLong_Check(tmp)) {
1331 PyErr_SetString(PyExc_TypeError,
1332 "memoryview.cast(): elements of shape must be integers");
1333 return -1;
1334 }
1335 x = PyLong_AsSsize_t(tmp);
1336 if (x == -1 && PyErr_Occurred()) {
1337 return -1;
1338 }
1339 if (x <= 0) {
1340 /* In general elements of shape may be 0, but not for casting. */
1341 PyErr_Format(PyExc_ValueError,
1342 "memoryview.cast(): elements of shape must be integers > 0");
1343 return -1;
1344 }
1345 if (x > PY_SSIZE_T_MAX / len) {
1346 PyErr_Format(PyExc_ValueError,
1347 "memoryview.cast(): product(shape) > SSIZE_MAX");
1348 return -1;
1349 }
1350 len *= x;
1351 shape[i] = x;
1352 }
1353
1354 return len;
1355 }
1356
1357 /* Cast a 1-D array to a new shape. The result array will be C-contiguous.
1358 If the result array does not have exactly the same byte length as the
1359 input array, raise ValueError. */
1360 static int
1361 cast_to_ND(PyMemoryViewObject *mv, const PyObject *shape, int ndim)
1362 {
1363 Py_buffer *view = &mv->view;
1364 Py_ssize_t len;
1365
1366 assert(view->ndim == 1); /* ndim from cast_to_1D() */
1367 assert(Py_SIZE(mv) == 3*(ndim==0?1:ndim)); /* ndim of result array */
1368 assert(view->shape == mv->ob_array);
1369 assert(view->strides == mv->ob_array + (ndim==0?1:ndim));
1370 assert(view->suboffsets == NULL);
1371
1372 view->ndim = ndim;
1373 if (view->ndim == 0) {
1374 view->shape = NULL;
1375 view->strides = NULL;
1376 len = view->itemsize;
1377 }
1378 else {
1379 len = copy_shape(view->shape, shape, ndim, view->itemsize);
1380 if (len < 0)
1381 return -1;
1382 init_strides_from_shape(view);
1383 }
1384
1385 if (view->len != len) {
1386 PyErr_SetString(PyExc_TypeError,
1387 "memoryview: product(shape) * itemsize != buffer size");
1388 return -1;
1389 }
1390
1391 init_flags(mv);
1392
1393 return 0;
1394 }
1395
1396 static int
1397 zero_in_shape(PyMemoryViewObject *mv)
1398 {
1399 Py_buffer *view = &mv->view;
1400 Py_ssize_t i;
1401
1402 for (i = 0; i < view->ndim; i++)
1403 if (view->shape[i] == 0)
1404 return 1;
1405
1406 return 0;
1407 }
1408
1409 /*
1410 Cast a copy of 'self' to a different view. The input view must
1411 be C-contiguous. The function always casts the input view to a
1412 1-D output according to 'format'. At least one of input-format,
1413 output-format must have byte size.
1414
1415 If 'shape' is given, the 1-D view from the previous step will
1416 be cast to a C-contiguous view with new shape and strides.
1417
1418 All casts must result in views that will have the exact byte
1419 size of the original input. Otherwise, an error is raised.
1420 */
1421 /*[clinic input]
1422 memoryview.cast
1423
1424 format: unicode
1425 shape: object = NULL
1426
1427 Cast a memoryview to a new format or shape.
1428 [clinic start generated code]*/
1429
1430 static PyObject *
1431 memoryview_cast_impl(PyMemoryViewObject *self, PyObject *format,
1432 PyObject *shape)
1433 /*[clinic end generated code: output=bae520b3a389cbab input=138936cc9041b1a3]*/
1434 {
1435 PyMemoryViewObject *mv = NULL;
1436 Py_ssize_t ndim = 1;
1437
1438 CHECK_RELEASED(self);
1439 CHECK_RESTRICTED(self);
1440
1441 if (!MV_C_CONTIGUOUS(self->flags)) {
1442 PyErr_SetString(PyExc_TypeError,
1443 "memoryview: casts are restricted to C-contiguous views");
1444 return NULL;
1445 }
1446 if ((shape || self->view.ndim != 1) && zero_in_shape(self)) {
1447 PyErr_SetString(PyExc_TypeError,
1448 "memoryview: cannot cast view with zeros in shape or strides");
1449 return NULL;
1450 }
1451 if (shape) {
1452 CHECK_LIST_OR_TUPLE(shape)
1453 ndim = PySequence_Fast_GET_SIZE(shape);
1454 if (ndim > PyBUF_MAX_NDIM) {
1455 PyErr_SetString(PyExc_ValueError,
1456 "memoryview: number of dimensions must not exceed "
1457 Py_STRINGIFY(PyBUF_MAX_NDIM));
1458 return NULL;
1459 }
1460 if (self->view.ndim != 1 && ndim != 1) {
1461 PyErr_SetString(PyExc_TypeError,
1462 "memoryview: cast must be 1D -> ND or ND -> 1D");
1463 return NULL;
1464 }
1465 }
1466
1467 mv = (PyMemoryViewObject *)
1468 mbuf_add_incomplete_view(self->mbuf, &self->view, ndim==0 ? 1 : (int)ndim);
1469 if (mv == NULL)
1470 return NULL;
1471
1472 if (cast_to_1D(mv, format) < 0)
1473 goto error;
1474 if (shape && cast_to_ND(mv, shape, (int)ndim) < 0)
1475 goto error;
1476
1477 return (PyObject *)mv;
1478
1479 error:
1480 Py_DECREF(mv);
1481 return NULL;
1482 }
1483
1484 /*[clinic input]
1485 memoryview.toreadonly
1486
1487 Return a readonly version of the memoryview.
1488 [clinic start generated code]*/
1489
1490 static PyObject *
1491 memoryview_toreadonly_impl(PyMemoryViewObject *self)
1492 /*[clinic end generated code: output=2c7e056f04c99e62 input=dc06d20f19ba236f]*/
1493 {
1494 CHECK_RELEASED(self);
1495 CHECK_RESTRICTED(self);
1496 /* Even if self is already readonly, we still need to create a new
1497 * object for .release() to work correctly.
1498 */
1499 self = (PyMemoryViewObject *) mbuf_add_view(self->mbuf, &self->view);
1500 if (self != NULL) {
1501 self->view.readonly = 1;
1502 };
1503 return (PyObject *) self;
1504 }
1505
1506
1507 /**************************************************************************/
1508 /* getbuffer */
1509 /**************************************************************************/
1510
1511 static int
1512 memory_getbuf(PyMemoryViewObject *self, Py_buffer *view, int flags)
1513 {
1514 Py_buffer *base = &self->view;
1515 int baseflags = self->flags;
1516
1517 CHECK_RELEASED_INT(self);
1518 CHECK_RESTRICTED_INT(self);
1519
1520 /* start with complete information */
1521 *view = *base;
1522 view->obj = NULL;
1523
1524 if (REQ_WRITABLE(flags) && base->readonly) {
1525 PyErr_SetString(PyExc_BufferError,
1526 "memoryview: underlying buffer is not writable");
1527 return -1;
1528 }
1529 if (!REQ_FORMAT(flags)) {
1530 /* NULL indicates that the buffer's data type has been cast to 'B'.
1531 view->itemsize is the _previous_ itemsize. If shape is present,
1532 the equality product(shape) * itemsize = len still holds at this
1533 point. The equality calcsize(format) = itemsize does _not_ hold
1534 from here on! */
1535 view->format = NULL;
1536 }
1537
1538 if (REQ_C_CONTIGUOUS(flags) && !MV_C_CONTIGUOUS(baseflags)) {
1539 PyErr_SetString(PyExc_BufferError,
1540 "memoryview: underlying buffer is not C-contiguous");
1541 return -1;
1542 }
1543 if (REQ_F_CONTIGUOUS(flags) && !MV_F_CONTIGUOUS(baseflags)) {
1544 PyErr_SetString(PyExc_BufferError,
1545 "memoryview: underlying buffer is not Fortran contiguous");
1546 return -1;
1547 }
1548 if (REQ_ANY_CONTIGUOUS(flags) && !MV_ANY_CONTIGUOUS(baseflags)) {
1549 PyErr_SetString(PyExc_BufferError,
1550 "memoryview: underlying buffer is not contiguous");
1551 return -1;
1552 }
1553 if (!REQ_INDIRECT(flags) && (baseflags & _Py_MEMORYVIEW_PIL)) {
1554 PyErr_SetString(PyExc_BufferError,
1555 "memoryview: underlying buffer requires suboffsets");
1556 return -1;
1557 }
1558 if (!REQ_STRIDES(flags)) {
1559 if (!MV_C_CONTIGUOUS(baseflags)) {
1560 PyErr_SetString(PyExc_BufferError,
1561 "memoryview: underlying buffer is not C-contiguous");
1562 return -1;
1563 }
1564 view->strides = NULL;
1565 }
1566 if (!REQ_SHAPE(flags)) {
1567 /* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous,
1568 so base->buf = ndbuf->data. */
1569 if (view->format != NULL) {
1570 /* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do
1571 not make sense. */
1572 PyErr_Format(PyExc_BufferError,
1573 "memoryview: cannot cast to unsigned bytes if the format flag "
1574 "is present");
1575 return -1;
1576 }
1577 /* product(shape) * itemsize = len and calcsize(format) = itemsize
1578 do _not_ hold from here on! */
1579 view->ndim = 1;
1580 view->shape = NULL;
1581 }
1582
1583
1584 view->obj = Py_NewRef(self);
1585 self->exports++;
1586
1587 return 0;
1588 }
1589
1590 static void
1591 memory_releasebuf(PyMemoryViewObject *self, Py_buffer *view)
1592 {
1593 self->exports--;
1594 return;
1595 /* PyBuffer_Release() decrements view->obj after this function returns. */
1596 }
1597
1598 /* Buffer methods */
1599 static PyBufferProcs memory_as_buffer = {
1600 (getbufferproc)memory_getbuf, /* bf_getbuffer */
1601 (releasebufferproc)memory_releasebuf, /* bf_releasebuffer */
1602 };
1603
1604
1605 /****************************************************************************/
1606 /* Optimized pack/unpack for all native format specifiers */
1607 /****************************************************************************/
1608
1609 /*
1610 Fix exceptions:
1611 1) Include format string in the error message.
1612 2) OverflowError -> ValueError.
1613 3) The error message from PyNumber_Index() is not ideal.
1614 */
1615 static int
1616 type_error_int(const char *fmt)
1617 {
1618 PyErr_Format(PyExc_TypeError,
1619 "memoryview: invalid type for format '%s'", fmt);
1620 return -1;
1621 }
1622
1623 static int
1624 value_error_int(const char *fmt)
1625 {
1626 PyErr_Format(PyExc_ValueError,
1627 "memoryview: invalid value for format '%s'", fmt);
1628 return -1;
1629 }
1630
1631 static int
1632 fix_error_int(const char *fmt)
1633 {
1634 assert(PyErr_Occurred());
1635 if (PyErr_ExceptionMatches(PyExc_TypeError)) {
1636 PyErr_Clear();
1637 return type_error_int(fmt);
1638 }
1639 else if (PyErr_ExceptionMatches(PyExc_OverflowError) ||
1640 PyErr_ExceptionMatches(PyExc_ValueError)) {
1641 PyErr_Clear();
1642 return value_error_int(fmt);
1643 }
1644
1645 return -1;
1646 }
1647
1648 /* Accept integer objects or objects with an __index__() method. */
1649 static long
1650 pylong_as_ld(PyObject *item)
1651 {
1652 PyObject *tmp;
1653 long ld;
1654
1655 tmp = _PyNumber_Index(item);
1656 if (tmp == NULL)
1657 return -1;
1658
1659 ld = PyLong_AsLong(tmp);
1660 Py_DECREF(tmp);
1661 return ld;
1662 }
1663
1664 static unsigned long
1665 pylong_as_lu(PyObject *item)
1666 {
1667 PyObject *tmp;
1668 unsigned long lu;
1669
1670 tmp = _PyNumber_Index(item);
1671 if (tmp == NULL)
1672 return (unsigned long)-1;
1673
1674 lu = PyLong_AsUnsignedLong(tmp);
1675 Py_DECREF(tmp);
1676 return lu;
1677 }
1678
1679 static long long
1680 pylong_as_lld(PyObject *item)
1681 {
1682 PyObject *tmp;
1683 long long lld;
1684
1685 tmp = _PyNumber_Index(item);
1686 if (tmp == NULL)
1687 return -1;
1688
1689 lld = PyLong_AsLongLong(tmp);
1690 Py_DECREF(tmp);
1691 return lld;
1692 }
1693
1694 static unsigned long long
1695 pylong_as_llu(PyObject *item)
1696 {
1697 PyObject *tmp;
1698 unsigned long long llu;
1699
1700 tmp = _PyNumber_Index(item);
1701 if (tmp == NULL)
1702 return (unsigned long long)-1;
1703
1704 llu = PyLong_AsUnsignedLongLong(tmp);
1705 Py_DECREF(tmp);
1706 return llu;
1707 }
1708
1709 static Py_ssize_t
1710 pylong_as_zd(PyObject *item)
1711 {
1712 PyObject *tmp;
1713 Py_ssize_t zd;
1714
1715 tmp = _PyNumber_Index(item);
1716 if (tmp == NULL)
1717 return -1;
1718
1719 zd = PyLong_AsSsize_t(tmp);
1720 Py_DECREF(tmp);
1721 return zd;
1722 }
1723
1724 static size_t
1725 pylong_as_zu(PyObject *item)
1726 {
1727 PyObject *tmp;
1728 size_t zu;
1729
1730 tmp = _PyNumber_Index(item);
1731 if (tmp == NULL)
1732 return (size_t)-1;
1733
1734 zu = PyLong_AsSize_t(tmp);
1735 Py_DECREF(tmp);
1736 return zu;
1737 }
1738
1739 /* Timings with the ndarray from _testbuffer.c indicate that using the
1740 struct module is around 15x slower than the two functions below. */
1741
1742 #define UNPACK_SINGLE(dest, ptr, type) \
1743 do { \
1744 type x; \
1745 memcpy((char *)&x, ptr, sizeof x); \
1746 dest = x; \
1747 } while (0)
1748
1749 /* Unpack a single item. 'fmt' can be any native format character in struct
1750 module syntax. This function is very sensitive to small changes. With this
1751 layout gcc automatically generates a fast jump table. */
1752 static inline PyObject *
1753 unpack_single(PyMemoryViewObject *self, const char *ptr, const char *fmt)
1754 {
1755 unsigned long long llu;
1756 unsigned long lu;
1757 size_t zu;
1758 long long lld;
1759 long ld;
1760 Py_ssize_t zd;
1761 double d;
1762 unsigned char uc;
1763 void *p;
1764
1765 CHECK_RELEASED_AGAIN(self);
1766
1767 #if PY_LITTLE_ENDIAN
1768 int endian = 1;
1769 #else
1770 int endian = 0;
1771 #endif
1772
1773 switch (fmt[0]) {
1774
1775 /* signed integers and fast path for 'B' */
1776 case 'B': uc = *((const unsigned char *)ptr); goto convert_uc;
1777 case 'b': ld = *((const signed char *)ptr); goto convert_ld;
1778 case 'h': UNPACK_SINGLE(ld, ptr, short); goto convert_ld;
1779 case 'i': UNPACK_SINGLE(ld, ptr, int); goto convert_ld;
1780 case 'l': UNPACK_SINGLE(ld, ptr, long); goto convert_ld;
1781
1782 /* boolean */
1783 case '?': UNPACK_SINGLE(ld, ptr, _Bool); goto convert_bool;
1784
1785 /* unsigned integers */
1786 case 'H': UNPACK_SINGLE(lu, ptr, unsigned short); goto convert_lu;
1787 case 'I': UNPACK_SINGLE(lu, ptr, unsigned int); goto convert_lu;
1788 case 'L': UNPACK_SINGLE(lu, ptr, unsigned long); goto convert_lu;
1789
1790 /* native 64-bit */
1791 case 'q': UNPACK_SINGLE(lld, ptr, long long); goto convert_lld;
1792 case 'Q': UNPACK_SINGLE(llu, ptr, unsigned long long); goto convert_llu;
1793
1794 /* ssize_t and size_t */
1795 case 'n': UNPACK_SINGLE(zd, ptr, Py_ssize_t); goto convert_zd;
1796 case 'N': UNPACK_SINGLE(zu, ptr, size_t); goto convert_zu;
1797
1798 /* floats */
1799 case 'f': UNPACK_SINGLE(d, ptr, float); goto convert_double;
1800 case 'd': UNPACK_SINGLE(d, ptr, double); goto convert_double;
1801 case 'e': d = PyFloat_Unpack2(ptr, endian); goto convert_double;
1802
1803 /* bytes object */
1804 case 'c': goto convert_bytes;
1805
1806 /* pointer */
1807 case 'P': UNPACK_SINGLE(p, ptr, void *); goto convert_pointer;
1808
1809 /* default */
1810 default: goto err_format;
1811 }
1812
1813 convert_uc:
1814 /* PyLong_FromUnsignedLong() is slower */
1815 return PyLong_FromLong(uc);
1816 convert_ld:
1817 return PyLong_FromLong(ld);
1818 convert_lu:
1819 return PyLong_FromUnsignedLong(lu);
1820 convert_lld:
1821 return PyLong_FromLongLong(lld);
1822 convert_llu:
1823 return PyLong_FromUnsignedLongLong(llu);
1824 convert_zd:
1825 return PyLong_FromSsize_t(zd);
1826 convert_zu:
1827 return PyLong_FromSize_t(zu);
1828 convert_double:
1829 return PyFloat_FromDouble(d);
1830 convert_bool:
1831 return PyBool_FromLong(ld);
1832 convert_bytes:
1833 return PyBytes_FromStringAndSize(ptr, 1);
1834 convert_pointer:
1835 return PyLong_FromVoidPtr(p);
1836 err_format:
1837 PyErr_Format(PyExc_NotImplementedError,
1838 "memoryview: format %s not supported", fmt);
1839 return NULL;
1840 }
1841
1842 #define PACK_SINGLE(ptr, src, type) \
1843 do { \
1844 type x; \
1845 x = (type)src; \
1846 memcpy(ptr, (char *)&x, sizeof x); \
1847 } while (0)
1848
1849 /* Pack a single item. 'fmt' can be any native format character in
1850 struct module syntax. */
1851 static int
1852 pack_single(PyMemoryViewObject *self, char *ptr, PyObject *item, const char *fmt)
1853 {
1854 unsigned long long llu;
1855 unsigned long lu;
1856 size_t zu;
1857 long long lld;
1858 long ld;
1859 Py_ssize_t zd;
1860 double d;
1861 void *p;
1862
1863 #if PY_LITTLE_ENDIAN
1864 int endian = 1;
1865 #else
1866 int endian = 0;
1867 #endif
1868 switch (fmt[0]) {
1869 /* signed integers */
1870 case 'b': case 'h': case 'i': case 'l':
1871 ld = pylong_as_ld(item);
1872 if (ld == -1 && PyErr_Occurred())
1873 goto err_occurred;
1874 CHECK_RELEASED_INT_AGAIN(self);
1875 switch (fmt[0]) {
1876 case 'b':
1877 if (ld < SCHAR_MIN || ld > SCHAR_MAX) goto err_range;
1878 *((signed char *)ptr) = (signed char)ld; break;
1879 case 'h':
1880 if (ld < SHRT_MIN || ld > SHRT_MAX) goto err_range;
1881 PACK_SINGLE(ptr, ld, short); break;
1882 case 'i':
1883 if (ld < INT_MIN || ld > INT_MAX) goto err_range;
1884 PACK_SINGLE(ptr, ld, int); break;
1885 default: /* 'l' */
1886 PACK_SINGLE(ptr, ld, long); break;
1887 }
1888 break;
1889
1890 /* unsigned integers */
1891 case 'B': case 'H': case 'I': case 'L':
1892 lu = pylong_as_lu(item);
1893 if (lu == (unsigned long)-1 && PyErr_Occurred())
1894 goto err_occurred;
1895 CHECK_RELEASED_INT_AGAIN(self);
1896 switch (fmt[0]) {
1897 case 'B':
1898 if (lu > UCHAR_MAX) goto err_range;
1899 *((unsigned char *)ptr) = (unsigned char)lu; break;
1900 case 'H':
1901 if (lu > USHRT_MAX) goto err_range;
1902 PACK_SINGLE(ptr, lu, unsigned short); break;
1903 case 'I':
1904 if (lu > UINT_MAX) goto err_range;
1905 PACK_SINGLE(ptr, lu, unsigned int); break;
1906 default: /* 'L' */
1907 PACK_SINGLE(ptr, lu, unsigned long); break;
1908 }
1909 break;
1910
1911 /* native 64-bit */
1912 case 'q':
1913 lld = pylong_as_lld(item);
1914 if (lld == -1 && PyErr_Occurred())
1915 goto err_occurred;
1916 CHECK_RELEASED_INT_AGAIN(self);
1917 PACK_SINGLE(ptr, lld, long long);
1918 break;
1919 case 'Q':
1920 llu = pylong_as_llu(item);
1921 if (llu == (unsigned long long)-1 && PyErr_Occurred())
1922 goto err_occurred;
1923 CHECK_RELEASED_INT_AGAIN(self);
1924 PACK_SINGLE(ptr, llu, unsigned long long);
1925 break;
1926
1927 /* ssize_t and size_t */
1928 case 'n':
1929 zd = pylong_as_zd(item);
1930 if (zd == -1 && PyErr_Occurred())
1931 goto err_occurred;
1932 CHECK_RELEASED_INT_AGAIN(self);
1933 PACK_SINGLE(ptr, zd, Py_ssize_t);
1934 break;
1935 case 'N':
1936 zu = pylong_as_zu(item);
1937 if (zu == (size_t)-1 && PyErr_Occurred())
1938 goto err_occurred;
1939 CHECK_RELEASED_INT_AGAIN(self);
1940 PACK_SINGLE(ptr, zu, size_t);
1941 break;
1942
1943 /* floats */
1944 case 'f': case 'd': case 'e':
1945 d = PyFloat_AsDouble(item);
1946 if (d == -1.0 && PyErr_Occurred())
1947 goto err_occurred;
1948 CHECK_RELEASED_INT_AGAIN(self);
1949 if (fmt[0] == 'f') {
1950 PACK_SINGLE(ptr, d, float);
1951 }
1952 else if (fmt[0] == 'd') {
1953 PACK_SINGLE(ptr, d, double);
1954 }
1955 else {
1956 if (PyFloat_Pack2(d, ptr, endian) < 0) {
1957 goto err_occurred;
1958 }
1959 }
1960 break;
1961
1962 /* bool */
1963 case '?':
1964 ld = PyObject_IsTrue(item);
1965 if (ld < 0)
1966 return -1; /* preserve original error */
1967 CHECK_RELEASED_INT_AGAIN(self);
1968 PACK_SINGLE(ptr, ld, _Bool);
1969 break;
1970
1971 /* bytes object */
1972 case 'c':
1973 if (!PyBytes_Check(item))
1974 return type_error_int(fmt);
1975 if (PyBytes_GET_SIZE(item) != 1)
1976 return value_error_int(fmt);
1977 *ptr = PyBytes_AS_STRING(item)[0];
1978 break;
1979
1980 /* pointer */
1981 case 'P':
1982 p = PyLong_AsVoidPtr(item);
1983 if (p == NULL && PyErr_Occurred())
1984 goto err_occurred;
1985 CHECK_RELEASED_INT_AGAIN(self);
1986 PACK_SINGLE(ptr, p, void *);
1987 break;
1988
1989 /* default */
1990 default: goto err_format;
1991 }
1992
1993 return 0;
1994
1995 err_occurred:
1996 return fix_error_int(fmt);
1997 err_range:
1998 return value_error_int(fmt);
1999 err_format:
2000 PyErr_Format(PyExc_NotImplementedError,
2001 "memoryview: format %s not supported", fmt);
2002 return -1;
2003 }
2004
2005
2006 /****************************************************************************/
2007 /* unpack using the struct module */
2008 /****************************************************************************/
2009
2010 /* For reasonable performance it is necessary to cache all objects required
2011 for unpacking. An unpacker can handle the format passed to unpack_from().
2012 Invariant: All pointer fields of the struct should either be NULL or valid
2013 pointers. */
2014 struct unpacker {
2015 PyObject *unpack_from; /* Struct.unpack_from(format) */
2016 PyObject *mview; /* cached memoryview */
2017 char *item; /* buffer for mview */
2018 Py_ssize_t itemsize; /* len(item) */
2019 };
2020
2021 static struct unpacker *
2022 unpacker_new(void)
2023 {
2024 struct unpacker *x = PyMem_Malloc(sizeof *x);
2025
2026 if (x == NULL) {
2027 PyErr_NoMemory();
2028 return NULL;
2029 }
2030
2031 x->unpack_from = NULL;
2032 x->mview = NULL;
2033 x->item = NULL;
2034 x->itemsize = 0;
2035
2036 return x;
2037 }
2038
2039 static void
2040 unpacker_free(struct unpacker *x)
2041 {
2042 if (x) {
2043 Py_XDECREF(x->unpack_from);
2044 Py_XDECREF(x->mview);
2045 PyMem_Free(x->item);
2046 PyMem_Free(x);
2047 }
2048 }
2049
2050 /* Return a new unpacker for the given format. */
2051 static struct unpacker *
2052 struct_get_unpacker(const char *fmt, Py_ssize_t itemsize)
2053 {
2054 PyObject *Struct = NULL; /* XXX cache it in globals? */
2055 PyObject *structobj = NULL;
2056 PyObject *format = NULL;
2057 struct unpacker *x = NULL;
2058
2059 Struct = _PyImport_GetModuleAttrString("struct", "Struct");
2060 if (Struct == NULL)
2061 return NULL;
2062
2063 x = unpacker_new();
2064 if (x == NULL)
2065 goto error;
2066
2067 format = PyBytes_FromString(fmt);
2068 if (format == NULL)
2069 goto error;
2070
2071 structobj = PyObject_CallOneArg(Struct, format);
2072 if (structobj == NULL)
2073 goto error;
2074
2075 x->unpack_from = PyObject_GetAttrString(structobj, "unpack_from");
2076 if (x->unpack_from == NULL)
2077 goto error;
2078
2079 x->item = PyMem_Malloc(itemsize);
2080 if (x->item == NULL) {
2081 PyErr_NoMemory();
2082 goto error;
2083 }
2084 x->itemsize = itemsize;
2085
2086 x->mview = PyMemoryView_FromMemory(x->item, itemsize, PyBUF_WRITE);
2087 if (x->mview == NULL)
2088 goto error;
2089
2090
2091 out:
2092 Py_XDECREF(Struct);
2093 Py_XDECREF(format);
2094 Py_XDECREF(structobj);
2095 return x;
2096
2097 error:
2098 unpacker_free(x);
2099 x = NULL;
2100 goto out;
2101 }
2102
2103 /* unpack a single item */
2104 static PyObject *
2105 struct_unpack_single(const char *ptr, struct unpacker *x)
2106 {
2107 PyObject *v;
2108
2109 memcpy(x->item, ptr, x->itemsize);
2110 v = PyObject_CallOneArg(x->unpack_from, x->mview);
2111 if (v == NULL)
2112 return NULL;
2113
2114 if (PyTuple_GET_SIZE(v) == 1) {
2115 PyObject *res = Py_NewRef(PyTuple_GET_ITEM(v, 0));
2116 Py_DECREF(v);
2117 return res;
2118 }
2119
2120 return v;
2121 }
2122
2123
2124 /****************************************************************************/
2125 /* Representations */
2126 /****************************************************************************/
2127
2128 /* allow explicit form of native format */
2129 static inline const char *
2130 adjust_fmt(const Py_buffer *view)
2131 {
2132 const char *fmt;
2133
2134 fmt = (view->format[0] == '@') ? view->format+1 : view->format;
2135 if (fmt[0] && fmt[1] == '\0')
2136 return fmt;
2137
2138 PyErr_Format(PyExc_NotImplementedError,
2139 "memoryview: unsupported format %s", view->format);
2140 return NULL;
2141 }
2142
2143 /* Base case for multi-dimensional unpacking. Assumption: ndim == 1. */
2144 static PyObject *
2145 tolist_base(PyMemoryViewObject *self, const char *ptr, const Py_ssize_t *shape,
2146 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2147 const char *fmt)
2148 {
2149 PyObject *lst, *item;
2150 Py_ssize_t i;
2151
2152 lst = PyList_New(shape[0]);
2153 if (lst == NULL)
2154 return NULL;
2155
2156 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2157 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2158 item = unpack_single(self, xptr, fmt);
2159 if (item == NULL) {
2160 Py_DECREF(lst);
2161 return NULL;
2162 }
2163 PyList_SET_ITEM(lst, i, item);
2164 }
2165
2166 return lst;
2167 }
2168
2169 /* Unpack a multi-dimensional array into a nested list.
2170 Assumption: ndim >= 1. */
2171 static PyObject *
2172 tolist_rec(PyMemoryViewObject *self, const char *ptr, Py_ssize_t ndim, const Py_ssize_t *shape,
2173 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2174 const char *fmt)
2175 {
2176 PyObject *lst, *item;
2177 Py_ssize_t i;
2178
2179 assert(ndim >= 1);
2180 assert(shape != NULL);
2181 assert(strides != NULL);
2182
2183 if (ndim == 1)
2184 return tolist_base(self, ptr, shape, strides, suboffsets, fmt);
2185
2186 lst = PyList_New(shape[0]);
2187 if (lst == NULL)
2188 return NULL;
2189
2190 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2191 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2192 item = tolist_rec(self, xptr, ndim-1, shape+1,
2193 strides+1, suboffsets ? suboffsets+1 : NULL,
2194 fmt);
2195 if (item == NULL) {
2196 Py_DECREF(lst);
2197 return NULL;
2198 }
2199 PyList_SET_ITEM(lst, i, item);
2200 }
2201
2202 return lst;
2203 }
2204
2205 /* Return a list representation of the memoryview. Currently only buffers
2206 with native format strings are supported. */
2207 /*[clinic input]
2208 memoryview.tolist
2209
2210 Return the data in the buffer as a list of elements.
2211 [clinic start generated code]*/
2212
2213 static PyObject *
2214 memoryview_tolist_impl(PyMemoryViewObject *self)
2215 /*[clinic end generated code: output=a6cda89214fd5a1b input=21e7d0c1860b211a]*/
2216 {
2217 const Py_buffer *view = &self->view;
2218 const char *fmt;
2219
2220 CHECK_RELEASED(self);
2221
2222 fmt = adjust_fmt(view);
2223 if (fmt == NULL)
2224 return NULL;
2225 if (view->ndim == 0) {
2226 return unpack_single(self, view->buf, fmt);
2227 }
2228 else if (view->ndim == 1) {
2229 return tolist_base(self, view->buf, view->shape,
2230 view->strides, view->suboffsets,
2231 fmt);
2232 }
2233 else {
2234 return tolist_rec(self, view->buf, view->ndim, view->shape,
2235 view->strides, view->suboffsets,
2236 fmt);
2237 }
2238 }
2239
2240 /*[clinic input]
2241 memoryview.tobytes
2242
2243 order: str(accept={str, NoneType}, c_default="NULL") = 'C'
2244
2245 Return the data in the buffer as a byte string.
2246
2247 Order can be {'C', 'F', 'A'}. When order is 'C' or 'F', the data of the
2248 original array is converted to C or Fortran order. For contiguous views,
2249 'A' returns an exact copy of the physical memory. In particular, in-memory
2250 Fortran order is preserved. For non-contiguous views, the data is converted
2251 to C first. order=None is the same as order='C'.
2252 [clinic start generated code]*/
2253
2254 static PyObject *
2255 memoryview_tobytes_impl(PyMemoryViewObject *self, const char *order)
2256 /*[clinic end generated code: output=1288b62560a32a23 input=0efa3ddaeda573a8]*/
2257 {
2258 Py_buffer *src = VIEW_ADDR(self);
2259 char ord = 'C';
2260 PyObject *bytes;
2261
2262 CHECK_RELEASED(self);
2263
2264 if (order) {
2265 if (strcmp(order, "F") == 0) {
2266 ord = 'F';
2267 }
2268 else if (strcmp(order, "A") == 0) {
2269 ord = 'A';
2270 }
2271 else if (strcmp(order, "C") != 0) {
2272 PyErr_SetString(PyExc_ValueError,
2273 "order must be 'C', 'F' or 'A'");
2274 return NULL;
2275 }
2276 }
2277
2278 bytes = PyBytes_FromStringAndSize(NULL, src->len);
2279 if (bytes == NULL)
2280 return NULL;
2281
2282 if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, ord) < 0) {
2283 Py_DECREF(bytes);
2284 return NULL;
2285 }
2286
2287 return bytes;
2288 }
2289
2290 /*[clinic input]
2291 memoryview.hex
2292
2293 sep: object = NULL
2294 An optional single character or byte to separate hex bytes.
2295 bytes_per_sep: int = 1
2296 How many bytes between separators. Positive values count from the
2297 right, negative values count from the left.
2298
2299 Return the data in the buffer as a str of hexadecimal numbers.
2300
2301 Example:
2302 >>> value = memoryview(b'\xb9\x01\xef')
2303 >>> value.hex()
2304 'b901ef'
2305 >>> value.hex(':')
2306 'b9:01:ef'
2307 >>> value.hex(':', 2)
2308 'b9:01ef'
2309 >>> value.hex(':', -2)
2310 'b901:ef'
2311 [clinic start generated code]*/
2312
2313 static PyObject *
2314 memoryview_hex_impl(PyMemoryViewObject *self, PyObject *sep,
2315 int bytes_per_sep)
2316 /*[clinic end generated code: output=430ca760f94f3ca7 input=539f6a3a5fb56946]*/
2317 {
2318 Py_buffer *src = VIEW_ADDR(self);
2319 PyObject *bytes;
2320 PyObject *ret;
2321
2322 CHECK_RELEASED(self);
2323
2324 if (MV_C_CONTIGUOUS(self->flags)) {
2325 return _Py_strhex_with_sep(src->buf, src->len, sep, bytes_per_sep);
2326 }
2327
2328 bytes = PyBytes_FromStringAndSize(NULL, src->len);
2329 if (bytes == NULL)
2330 return NULL;
2331
2332 if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, 'C') < 0) {
2333 Py_DECREF(bytes);
2334 return NULL;
2335 }
2336
2337 ret = _Py_strhex_with_sep(
2338 PyBytes_AS_STRING(bytes), PyBytes_GET_SIZE(bytes),
2339 sep, bytes_per_sep);
2340 Py_DECREF(bytes);
2341
2342 return ret;
2343 }
2344
2345 static PyObject *
2346 memory_repr(PyMemoryViewObject *self)
2347 {
2348 if (self->flags & _Py_MEMORYVIEW_RELEASED)
2349 return PyUnicode_FromFormat("<released memory at %p>", self);
2350 else
2351 return PyUnicode_FromFormat("<memory at %p>", self);
2352 }
2353
2354
2355 /**************************************************************************/
2356 /* Indexing and slicing */
2357 /**************************************************************************/
2358
2359 static char *
2360 lookup_dimension(const Py_buffer *view, char *ptr, int dim, Py_ssize_t index)
2361 {
2362 Py_ssize_t nitems; /* items in the given dimension */
2363
2364 assert(view->shape);
2365 assert(view->strides);
2366
2367 nitems = view->shape[dim];
2368 if (index < 0) {
2369 index += nitems;
2370 }
2371 if (index < 0 || index >= nitems) {
2372 PyErr_Format(PyExc_IndexError,
2373 "index out of bounds on dimension %d", dim + 1);
2374 return NULL;
2375 }
2376
2377 ptr += view->strides[dim] * index;
2378
2379 ptr = ADJUST_PTR(ptr, view->suboffsets, dim);
2380
2381 return ptr;
2382 }
2383
2384 /* Get the pointer to the item at index. */
2385 static char *
2386 ptr_from_index(const Py_buffer *view, Py_ssize_t index)
2387 {
2388 char *ptr = (char *)view->buf;
2389 return lookup_dimension(view, ptr, 0, index);
2390 }
2391
2392 /* Get the pointer to the item at tuple. */
2393 static char *
2394 ptr_from_tuple(const Py_buffer *view, PyObject *tup)
2395 {
2396 char *ptr = (char *)view->buf;
2397 Py_ssize_t dim, nindices = PyTuple_GET_SIZE(tup);
2398
2399 if (nindices > view->ndim) {
2400 PyErr_Format(PyExc_TypeError,
2401 "cannot index %zd-dimension view with %zd-element tuple",
2402 view->ndim, nindices);
2403 return NULL;
2404 }
2405
2406 for (dim = 0; dim < nindices; dim++) {
2407 Py_ssize_t index;
2408 index = PyNumber_AsSsize_t(PyTuple_GET_ITEM(tup, dim),
2409 PyExc_IndexError);
2410 if (index == -1 && PyErr_Occurred())
2411 return NULL;
2412 ptr = lookup_dimension(view, ptr, (int)dim, index);
2413 if (ptr == NULL)
2414 return NULL;
2415 }
2416 return ptr;
2417 }
2418
2419 /* Return the item at index. In a one-dimensional view, this is an object
2420 with the type specified by view->format. Otherwise, the item is a sub-view.
2421 The function is used in memory_subscript() and memory_as_sequence. */
2422 static PyObject *
2423 memory_item(PyMemoryViewObject *self, Py_ssize_t index)
2424 {
2425 Py_buffer *view = &(self->view);
2426 const char *fmt;
2427
2428 CHECK_RELEASED(self);
2429
2430 fmt = adjust_fmt(view);
2431 if (fmt == NULL)
2432 return NULL;
2433
2434 if (view->ndim == 0) {
2435 PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
2436 return NULL;
2437 }
2438 if (view->ndim == 1) {
2439 char *ptr = ptr_from_index(view, index);
2440 if (ptr == NULL)
2441 return NULL;
2442 return unpack_single(self, ptr, fmt);
2443 }
2444
2445 PyErr_SetString(PyExc_NotImplementedError,
2446 "multi-dimensional sub-views are not implemented");
2447 return NULL;
2448 }
2449
2450 /* Return the item at position *key* (a tuple of indices). */
2451 static PyObject *
2452 memory_item_multi(PyMemoryViewObject *self, PyObject *tup)
2453 {
2454 Py_buffer *view = &(self->view);
2455 const char *fmt;
2456 Py_ssize_t nindices = PyTuple_GET_SIZE(tup);
2457 char *ptr;
2458
2459 CHECK_RELEASED(self);
2460
2461 fmt = adjust_fmt(view);
2462 if (fmt == NULL)
2463 return NULL;
2464
2465 if (nindices < view->ndim) {
2466 PyErr_SetString(PyExc_NotImplementedError,
2467 "sub-views are not implemented");
2468 return NULL;
2469 }
2470 ptr = ptr_from_tuple(view, tup);
2471 if (ptr == NULL)
2472 return NULL;
2473 return unpack_single(self, ptr, fmt);
2474 }
2475
2476 static inline int
2477 init_slice(Py_buffer *base, PyObject *key, int dim)
2478 {
2479 Py_ssize_t start, stop, step, slicelength;
2480
2481 if (PySlice_Unpack(key, &start, &stop, &step) < 0) {
2482 return -1;
2483 }
2484 slicelength = PySlice_AdjustIndices(base->shape[dim], &start, &stop, step);
2485
2486
2487 if (base->suboffsets == NULL || dim == 0) {
2488 adjust_buf:
2489 base->buf = (char *)base->buf + base->strides[dim] * start;
2490 }
2491 else {
2492 Py_ssize_t n = dim-1;
2493 while (n >= 0 && base->suboffsets[n] < 0)
2494 n--;
2495 if (n < 0)
2496 goto adjust_buf; /* all suboffsets are negative */
2497 base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
2498 }
2499 base->shape[dim] = slicelength;
2500 base->strides[dim] = base->strides[dim] * step;
2501
2502 return 0;
2503 }
2504
2505 static int
2506 is_multislice(PyObject *key)
2507 {
2508 Py_ssize_t size, i;
2509
2510 if (!PyTuple_Check(key))
2511 return 0;
2512 size = PyTuple_GET_SIZE(key);
2513 if (size == 0)
2514 return 0;
2515
2516 for (i = 0; i < size; i++) {
2517 PyObject *x = PyTuple_GET_ITEM(key, i);
2518 if (!PySlice_Check(x))
2519 return 0;
2520 }
2521 return 1;
2522 }
2523
2524 static Py_ssize_t
2525 is_multiindex(PyObject *key)
2526 {
2527 Py_ssize_t size, i;
2528
2529 if (!PyTuple_Check(key))
2530 return 0;
2531 size = PyTuple_GET_SIZE(key);
2532 for (i = 0; i < size; i++) {
2533 PyObject *x = PyTuple_GET_ITEM(key, i);
2534 if (!_PyIndex_Check(x)) {
2535 return 0;
2536 }
2537 }
2538 return 1;
2539 }
2540
2541 /* mv[obj] returns an object holding the data for one element if obj
2542 fully indexes the memoryview or another memoryview object if it
2543 does not.
2544
2545 0-d memoryview objects can be referenced using mv[...] or mv[()]
2546 but not with anything else. */
2547 static PyObject *
2548 memory_subscript(PyMemoryViewObject *self, PyObject *key)
2549 {
2550 Py_buffer *view;
2551 view = &(self->view);
2552
2553 CHECK_RELEASED(self);
2554
2555 if (view->ndim == 0) {
2556 if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) {
2557 const char *fmt = adjust_fmt(view);
2558 if (fmt == NULL)
2559 return NULL;
2560 return unpack_single(self, view->buf, fmt);
2561 }
2562 else if (key == Py_Ellipsis) {
2563 return Py_NewRef(self);
2564 }
2565 else {
2566 PyErr_SetString(PyExc_TypeError,
2567 "invalid indexing of 0-dim memory");
2568 return NULL;
2569 }
2570 }
2571
2572 if (_PyIndex_Check(key)) {
2573 Py_ssize_t index;
2574 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2575 if (index == -1 && PyErr_Occurred())
2576 return NULL;
2577 return memory_item(self, index);
2578 }
2579 else if (PySlice_Check(key)) {
2580 CHECK_RESTRICTED(self);
2581 PyMemoryViewObject *sliced;
2582
2583 sliced = (PyMemoryViewObject *)mbuf_add_view(self->mbuf, view);
2584 if (sliced == NULL)
2585 return NULL;
2586
2587 if (init_slice(&sliced->view, key, 0) < 0) {
2588 Py_DECREF(sliced);
2589 return NULL;
2590 }
2591 init_len(&sliced->view);
2592 init_flags(sliced);
2593
2594 return (PyObject *)sliced;
2595 }
2596 else if (is_multiindex(key)) {
2597 return memory_item_multi(self, key);
2598 }
2599 else if (is_multislice(key)) {
2600 PyErr_SetString(PyExc_NotImplementedError,
2601 "multi-dimensional slicing is not implemented");
2602 return NULL;
2603 }
2604
2605 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2606 return NULL;
2607 }
2608
2609 static int
2610 memory_ass_sub(PyMemoryViewObject *self, PyObject *key, PyObject *value)
2611 {
2612 Py_buffer *view = &(self->view);
2613 Py_buffer src;
2614 const char *fmt;
2615 char *ptr;
2616
2617 CHECK_RELEASED_INT(self);
2618
2619 fmt = adjust_fmt(view);
2620 if (fmt == NULL)
2621 return -1;
2622
2623 if (view->readonly) {
2624 PyErr_SetString(PyExc_TypeError, "cannot modify read-only memory");
2625 return -1;
2626 }
2627 if (value == NULL) {
2628 PyErr_SetString(PyExc_TypeError, "cannot delete memory");
2629 return -1;
2630 }
2631 if (view->ndim == 0) {
2632 if (key == Py_Ellipsis ||
2633 (PyTuple_Check(key) && PyTuple_GET_SIZE(key)==0)) {
2634 ptr = (char *)view->buf;
2635 return pack_single(self, ptr, value, fmt);
2636 }
2637 else {
2638 PyErr_SetString(PyExc_TypeError,
2639 "invalid indexing of 0-dim memory");
2640 return -1;
2641 }
2642 }
2643
2644 if (_PyIndex_Check(key)) {
2645 Py_ssize_t index;
2646 if (1 < view->ndim) {
2647 PyErr_SetString(PyExc_NotImplementedError,
2648 "sub-views are not implemented");
2649 return -1;
2650 }
2651 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2652 if (index == -1 && PyErr_Occurred())
2653 return -1;
2654 ptr = ptr_from_index(view, index);
2655 if (ptr == NULL)
2656 return -1;
2657 return pack_single(self, ptr, value, fmt);
2658 }
2659 /* one-dimensional: fast path */
2660 if (PySlice_Check(key) && view->ndim == 1) {
2661 Py_buffer dest; /* sliced view */
2662 Py_ssize_t arrays[3];
2663 int ret = -1;
2664
2665 /* rvalue must be an exporter */
2666 if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) < 0)
2667 return ret;
2668
2669 dest = *view;
2670 dest.shape = &arrays[0]; dest.shape[0] = view->shape[0];
2671 dest.strides = &arrays[1]; dest.strides[0] = view->strides[0];
2672 if (view->suboffsets) {
2673 dest.suboffsets = &arrays[2]; dest.suboffsets[0] = view->suboffsets[0];
2674 }
2675
2676 if (init_slice(&dest, key, 0) < 0)
2677 goto end_block;
2678 dest.len = dest.shape[0] * dest.itemsize;
2679
2680 ret = copy_single(self, &dest, &src);
2681
2682 end_block:
2683 PyBuffer_Release(&src);
2684 return ret;
2685 }
2686 if (is_multiindex(key)) {
2687 char *ptr;
2688 if (PyTuple_GET_SIZE(key) < view->ndim) {
2689 PyErr_SetString(PyExc_NotImplementedError,
2690 "sub-views are not implemented");
2691 return -1;
2692 }
2693 ptr = ptr_from_tuple(view, key);
2694 if (ptr == NULL)
2695 return -1;
2696 return pack_single(self, ptr, value, fmt);
2697 }
2698 if (PySlice_Check(key) || is_multislice(key)) {
2699 /* Call memory_subscript() to produce a sliced lvalue, then copy
2700 rvalue into lvalue. This is already implemented in _testbuffer.c. */
2701 PyErr_SetString(PyExc_NotImplementedError,
2702 "memoryview slice assignments are currently restricted "
2703 "to ndim = 1");
2704 return -1;
2705 }
2706
2707 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2708 return -1;
2709 }
2710
2711 static Py_ssize_t
2712 memory_length(PyMemoryViewObject *self)
2713 {
2714 CHECK_RELEASED_INT(self);
2715 if (self->view.ndim == 0) {
2716 PyErr_SetString(PyExc_TypeError, "0-dim memory has no length");
2717 return -1;
2718 }
2719 return self->view.shape[0];
2720 }
2721
2722 /* As mapping */
2723 static PyMappingMethods memory_as_mapping = {
2724 (lenfunc)memory_length, /* mp_length */
2725 (binaryfunc)memory_subscript, /* mp_subscript */
2726 (objobjargproc)memory_ass_sub, /* mp_ass_subscript */
2727 };
2728
2729 /* As sequence */
2730 static PySequenceMethods memory_as_sequence = {
2731 (lenfunc)memory_length, /* sq_length */
2732 0, /* sq_concat */
2733 0, /* sq_repeat */
2734 (ssizeargfunc)memory_item, /* sq_item */
2735 };
2736
2737
2738 /**************************************************************************/
2739 /* Comparisons */
2740 /**************************************************************************/
2741
2742 #define MV_COMPARE_EX -1 /* exception */
2743 #define MV_COMPARE_NOT_IMPL -2 /* not implemented */
2744
2745 /* Translate a StructError to "not equal". Preserve other exceptions. */
2746 static int
2747 fix_struct_error_int(void)
2748 {
2749 assert(PyErr_Occurred());
2750 /* XXX Cannot get at StructError directly? */
2751 if (PyErr_ExceptionMatches(PyExc_ImportError) ||
2752 PyErr_ExceptionMatches(PyExc_MemoryError)) {
2753 return MV_COMPARE_EX;
2754 }
2755 /* StructError: invalid or unknown format -> not equal */
2756 PyErr_Clear();
2757 return 0;
2758 }
2759
2760 /* Unpack and compare single items of p and q using the struct module. */
2761 static int
2762 struct_unpack_cmp(const char *p, const char *q,
2763 struct unpacker *unpack_p, struct unpacker *unpack_q)
2764 {
2765 PyObject *v, *w;
2766 int ret;
2767
2768 /* At this point any exception from the struct module should not be
2769 StructError, since both formats have been accepted already. */
2770 v = struct_unpack_single(p, unpack_p);
2771 if (v == NULL)
2772 return MV_COMPARE_EX;
2773
2774 w = struct_unpack_single(q, unpack_q);
2775 if (w == NULL) {
2776 Py_DECREF(v);
2777 return MV_COMPARE_EX;
2778 }
2779
2780 /* MV_COMPARE_EX == -1: exceptions are preserved */
2781 ret = PyObject_RichCompareBool(v, w, Py_EQ);
2782 Py_DECREF(v);
2783 Py_DECREF(w);
2784
2785 return ret;
2786 }
2787
2788 /* Unpack and compare single items of p and q. If both p and q have the same
2789 single element native format, the comparison uses a fast path (gcc creates
2790 a jump table and converts memcpy into simple assignments on x86/x64).
2791
2792 Otherwise, the comparison is delegated to the struct module, which is
2793 30-60x slower. */
2794 #define CMP_SINGLE(p, q, type) \
2795 do { \
2796 type x; \
2797 type y; \
2798 memcpy((char *)&x, p, sizeof x); \
2799 memcpy((char *)&y, q, sizeof y); \
2800 equal = (x == y); \
2801 } while (0)
2802
2803 static inline int
2804 unpack_cmp(const char *p, const char *q, char fmt,
2805 struct unpacker *unpack_p, struct unpacker *unpack_q)
2806 {
2807 int equal;
2808
2809 switch (fmt) {
2810
2811 /* signed integers and fast path for 'B' */
2812 case 'B': return *((const unsigned char *)p) == *((const unsigned char *)q);
2813 case 'b': return *((const signed char *)p) == *((const signed char *)q);
2814 case 'h': CMP_SINGLE(p, q, short); return equal;
2815 case 'i': CMP_SINGLE(p, q, int); return equal;
2816 case 'l': CMP_SINGLE(p, q, long); return equal;
2817
2818 /* boolean */
2819 case '?': CMP_SINGLE(p, q, _Bool); return equal;
2820
2821 /* unsigned integers */
2822 case 'H': CMP_SINGLE(p, q, unsigned short); return equal;
2823 case 'I': CMP_SINGLE(p, q, unsigned int); return equal;
2824 case 'L': CMP_SINGLE(p, q, unsigned long); return equal;
2825
2826 /* native 64-bit */
2827 case 'q': CMP_SINGLE(p, q, long long); return equal;
2828 case 'Q': CMP_SINGLE(p, q, unsigned long long); return equal;
2829
2830 /* ssize_t and size_t */
2831 case 'n': CMP_SINGLE(p, q, Py_ssize_t); return equal;
2832 case 'N': CMP_SINGLE(p, q, size_t); return equal;
2833
2834 /* floats */
2835 /* XXX DBL_EPSILON? */
2836 case 'f': CMP_SINGLE(p, q, float); return equal;
2837 case 'd': CMP_SINGLE(p, q, double); return equal;
2838 case 'e': {
2839 #if PY_LITTLE_ENDIAN
2840 int endian = 1;
2841 #else
2842 int endian = 0;
2843 #endif
2844 /* Note: PyFloat_Unpack2 should never fail */
2845 double u = PyFloat_Unpack2(p, endian);
2846 double v = PyFloat_Unpack2(q, endian);
2847 return (u == v);
2848 }
2849
2850 /* bytes object */
2851 case 'c': return *p == *q;
2852
2853 /* pointer */
2854 case 'P': CMP_SINGLE(p, q, void *); return equal;
2855
2856 /* use the struct module */
2857 case '_':
2858 assert(unpack_p);
2859 assert(unpack_q);
2860 return struct_unpack_cmp(p, q, unpack_p, unpack_q);
2861 }
2862
2863 /* NOT REACHED */
2864 PyErr_SetString(PyExc_RuntimeError,
2865 "memoryview: internal error in richcompare");
2866 return MV_COMPARE_EX;
2867 }
2868
2869 /* Base case for recursive array comparisons. Assumption: ndim == 1. */
2870 static int
2871 cmp_base(const char *p, const char *q, const Py_ssize_t *shape,
2872 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2873 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2874 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2875 {
2876 Py_ssize_t i;
2877 int equal;
2878
2879 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2880 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2881 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2882 equal = unpack_cmp(xp, xq, fmt, unpack_p, unpack_q);
2883 if (equal <= 0)
2884 return equal;
2885 }
2886
2887 return 1;
2888 }
2889
2890 /* Recursively compare two multi-dimensional arrays that have the same
2891 logical structure. Assumption: ndim >= 1. */
2892 static int
2893 cmp_rec(const char *p, const char *q,
2894 Py_ssize_t ndim, const Py_ssize_t *shape,
2895 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2896 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2897 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2898 {
2899 Py_ssize_t i;
2900 int equal;
2901
2902 assert(ndim >= 1);
2903 assert(shape != NULL);
2904 assert(pstrides != NULL);
2905 assert(qstrides != NULL);
2906
2907 if (ndim == 1) {
2908 return cmp_base(p, q, shape,
2909 pstrides, psuboffsets,
2910 qstrides, qsuboffsets,
2911 fmt, unpack_p, unpack_q);
2912 }
2913
2914 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2915 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2916 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2917 equal = cmp_rec(xp, xq, ndim-1, shape+1,
2918 pstrides+1, psuboffsets ? psuboffsets+1 : NULL,
2919 qstrides+1, qsuboffsets ? qsuboffsets+1 : NULL,
2920 fmt, unpack_p, unpack_q);
2921 if (equal <= 0)
2922 return equal;
2923 }
2924
2925 return 1;
2926 }
2927
2928 static PyObject *
2929 memory_richcompare(PyObject *v, PyObject *w, int op)
2930 {
2931 PyObject *res;
2932 Py_buffer wbuf, *vv;
2933 Py_buffer *ww = NULL;
2934 struct unpacker *unpack_v = NULL;
2935 struct unpacker *unpack_w = NULL;
2936 char vfmt, wfmt;
2937 int equal = MV_COMPARE_NOT_IMPL;
2938
2939 if (op != Py_EQ && op != Py_NE)
2940 goto result; /* Py_NotImplemented */
2941
2942 assert(PyMemoryView_Check(v));
2943 if (BASE_INACCESSIBLE(v)) {
2944 equal = (v == w);
2945 goto result;
2946 }
2947 vv = VIEW_ADDR(v);
2948
2949 if (PyMemoryView_Check(w)) {
2950 if (BASE_INACCESSIBLE(w)) {
2951 equal = (v == w);
2952 goto result;
2953 }
2954 ww = VIEW_ADDR(w);
2955 }
2956 else {
2957 if (PyObject_GetBuffer(w, &wbuf, PyBUF_FULL_RO) < 0) {
2958 PyErr_Clear();
2959 goto result; /* Py_NotImplemented */
2960 }
2961 ww = &wbuf;
2962 }
2963
2964 if (!equiv_shape(vv, ww)) {
2965 PyErr_Clear();
2966 equal = 0;
2967 goto result;
2968 }
2969
2970 /* Use fast unpacking for identical primitive C type formats. */
2971 if (get_native_fmtchar(&vfmt, vv->format) < 0)
2972 vfmt = '_';
2973 if (get_native_fmtchar(&wfmt, ww->format) < 0)
2974 wfmt = '_';
2975 if (vfmt == '_' || wfmt == '_' || vfmt != wfmt) {
2976 /* Use struct module unpacking. NOTE: Even for equal format strings,
2977 memcmp() cannot be used for item comparison since it would give
2978 incorrect results in the case of NaNs or uninitialized padding
2979 bytes. */
2980 vfmt = '_';
2981 unpack_v = struct_get_unpacker(vv->format, vv->itemsize);
2982 if (unpack_v == NULL) {
2983 equal = fix_struct_error_int();
2984 goto result;
2985 }
2986 unpack_w = struct_get_unpacker(ww->format, ww->itemsize);
2987 if (unpack_w == NULL) {
2988 equal = fix_struct_error_int();
2989 goto result;
2990 }
2991 }
2992
2993 if (vv->ndim == 0) {
2994 equal = unpack_cmp(vv->buf, ww->buf,
2995 vfmt, unpack_v, unpack_w);
2996 }
2997 else if (vv->ndim == 1) {
2998 equal = cmp_base(vv->buf, ww->buf, vv->shape,
2999 vv->strides, vv->suboffsets,
3000 ww->strides, ww->suboffsets,
3001 vfmt, unpack_v, unpack_w);
3002 }
3003 else {
3004 equal = cmp_rec(vv->buf, ww->buf, vv->ndim, vv->shape,
3005 vv->strides, vv->suboffsets,
3006 ww->strides, ww->suboffsets,
3007 vfmt, unpack_v, unpack_w);
3008 }
3009
3010 result:
3011 if (equal < 0) {
3012 if (equal == MV_COMPARE_NOT_IMPL)
3013 res = Py_NotImplemented;
3014 else /* exception */
3015 res = NULL;
3016 }
3017 else if ((equal && op == Py_EQ) || (!equal && op == Py_NE))
3018 res = Py_True;
3019 else
3020 res = Py_False;
3021
3022 if (ww == &wbuf)
3023 PyBuffer_Release(ww);
3024
3025 unpacker_free(unpack_v);
3026 unpacker_free(unpack_w);
3027
3028 return Py_XNewRef(res);
3029 }
3030
3031 /**************************************************************************/
3032 /* Hash */
3033 /**************************************************************************/
3034
3035 static Py_hash_t
3036 memory_hash(PyMemoryViewObject *self)
3037 {
3038 if (self->hash == -1) {
3039 Py_buffer *view = &self->view;
3040 char *mem = view->buf;
3041 Py_ssize_t ret;
3042 char fmt;
3043
3044 CHECK_RELEASED_INT(self);
3045
3046 if (!view->readonly) {
3047 PyErr_SetString(PyExc_ValueError,
3048 "cannot hash writable memoryview object");
3049 return -1;
3050 }
3051 ret = get_native_fmtchar(&fmt, view->format);
3052 if (ret < 0 || !IS_BYTE_FORMAT(fmt)) {
3053 PyErr_SetString(PyExc_ValueError,
3054 "memoryview: hashing is restricted to formats 'B', 'b' or 'c'");
3055 return -1;
3056 }
3057 if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
3058 /* Keep the original error message */
3059 return -1;
3060 }
3061
3062 if (!MV_C_CONTIGUOUS(self->flags)) {
3063 mem = PyMem_Malloc(view->len);
3064 if (mem == NULL) {
3065 PyErr_NoMemory();
3066 return -1;
3067 }
3068 if (buffer_to_contiguous(mem, view, 'C') < 0) {
3069 PyMem_Free(mem);
3070 return -1;
3071 }
3072 }
3073
3074 /* Can't fail */
3075 self->hash = _Py_HashBytes(mem, view->len);
3076
3077 if (mem != view->buf)
3078 PyMem_Free(mem);
3079 }
3080
3081 return self->hash;
3082 }
3083
3084
3085 /**************************************************************************/
3086 /* getters */
3087 /**************************************************************************/
3088
3089 static PyObject *
3090 _IntTupleFromSsizet(int len, Py_ssize_t *vals)
3091 {
3092 int i;
3093 PyObject *o;
3094 PyObject *intTuple;
3095
3096 if (vals == NULL)
3097 return PyTuple_New(0);
3098
3099 intTuple = PyTuple_New(len);
3100 if (!intTuple)
3101 return NULL;
3102 for (i=0; i<len; i++) {
3103 o = PyLong_FromSsize_t(vals[i]);
3104 if (!o) {
3105 Py_DECREF(intTuple);
3106 return NULL;
3107 }
3108 PyTuple_SET_ITEM(intTuple, i, o);
3109 }
3110 return intTuple;
3111 }
3112
3113 static PyObject *
3114 memory_obj_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3115 {
3116 Py_buffer *view = &self->view;
3117
3118 CHECK_RELEASED(self);
3119 if (view->obj == NULL) {
3120 Py_RETURN_NONE;
3121 }
3122 return Py_NewRef(view->obj);
3123 }
3124
3125 static PyObject *
3126 memory_nbytes_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3127 {
3128 CHECK_RELEASED(self);
3129 return PyLong_FromSsize_t(self->view.len);
3130 }
3131
3132 static PyObject *
3133 memory_format_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3134 {
3135 CHECK_RELEASED(self);
3136 return PyUnicode_FromString(self->view.format);
3137 }
3138
3139 static PyObject *
3140 memory_itemsize_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3141 {
3142 CHECK_RELEASED(self);
3143 return PyLong_FromSsize_t(self->view.itemsize);
3144 }
3145
3146 static PyObject *
3147 memory_shape_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3148 {
3149 CHECK_RELEASED(self);
3150 return _IntTupleFromSsizet(self->view.ndim, self->view.shape);
3151 }
3152
3153 static PyObject *
3154 memory_strides_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3155 {
3156 CHECK_RELEASED(self);
3157 return _IntTupleFromSsizet(self->view.ndim, self->view.strides);
3158 }
3159
3160 static PyObject *
3161 memory_suboffsets_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3162 {
3163 CHECK_RELEASED(self);
3164 return _IntTupleFromSsizet(self->view.ndim, self->view.suboffsets);
3165 }
3166
3167 static PyObject *
3168 memory_readonly_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3169 {
3170 CHECK_RELEASED(self);
3171 return PyBool_FromLong(self->view.readonly);
3172 }
3173
3174 static PyObject *
3175 memory_ndim_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3176 {
3177 CHECK_RELEASED(self);
3178 return PyLong_FromLong(self->view.ndim);
3179 }
3180
3181 static PyObject *
3182 memory_c_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3183 {
3184 CHECK_RELEASED(self);
3185 return PyBool_FromLong(MV_C_CONTIGUOUS(self->flags));
3186 }
3187
3188 static PyObject *
3189 memory_f_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3190 {
3191 CHECK_RELEASED(self);
3192 return PyBool_FromLong(MV_F_CONTIGUOUS(self->flags));
3193 }
3194
3195 static PyObject *
3196 memory_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3197 {
3198 CHECK_RELEASED(self);
3199 return PyBool_FromLong(MV_ANY_CONTIGUOUS(self->flags));
3200 }
3201
3202 PyDoc_STRVAR(memory_obj_doc,
3203 "The underlying object of the memoryview.");
3204 PyDoc_STRVAR(memory_nbytes_doc,
3205 "The amount of space in bytes that the array would use in\n"
3206 " a contiguous representation.");
3207 PyDoc_STRVAR(memory_readonly_doc,
3208 "A bool indicating whether the memory is read only.");
3209 PyDoc_STRVAR(memory_itemsize_doc,
3210 "The size in bytes of each element of the memoryview.");
3211 PyDoc_STRVAR(memory_format_doc,
3212 "A string containing the format (in struct module style)\n"
3213 " for each element in the view.");
3214 PyDoc_STRVAR(memory_ndim_doc,
3215 "An integer indicating how many dimensions of a multi-dimensional\n"
3216 " array the memory represents.");
3217 PyDoc_STRVAR(memory_shape_doc,
3218 "A tuple of ndim integers giving the shape of the memory\n"
3219 " as an N-dimensional array.");
3220 PyDoc_STRVAR(memory_strides_doc,
3221 "A tuple of ndim integers giving the size in bytes to access\n"
3222 " each element for each dimension of the array.");
3223 PyDoc_STRVAR(memory_suboffsets_doc,
3224 "A tuple of integers used internally for PIL-style arrays.");
3225 PyDoc_STRVAR(memory_c_contiguous_doc,
3226 "A bool indicating whether the memory is C contiguous.");
3227 PyDoc_STRVAR(memory_f_contiguous_doc,
3228 "A bool indicating whether the memory is Fortran contiguous.");
3229 PyDoc_STRVAR(memory_contiguous_doc,
3230 "A bool indicating whether the memory is contiguous.");
3231
3232
3233 static PyGetSetDef memory_getsetlist[] = {
3234 {"obj", (getter)memory_obj_get, NULL, memory_obj_doc},
3235 {"nbytes", (getter)memory_nbytes_get, NULL, memory_nbytes_doc},
3236 {"readonly", (getter)memory_readonly_get, NULL, memory_readonly_doc},
3237 {"itemsize", (getter)memory_itemsize_get, NULL, memory_itemsize_doc},
3238 {"format", (getter)memory_format_get, NULL, memory_format_doc},
3239 {"ndim", (getter)memory_ndim_get, NULL, memory_ndim_doc},
3240 {"shape", (getter)memory_shape_get, NULL, memory_shape_doc},
3241 {"strides", (getter)memory_strides_get, NULL, memory_strides_doc},
3242 {"suboffsets", (getter)memory_suboffsets_get, NULL, memory_suboffsets_doc},
3243 {"c_contiguous", (getter)memory_c_contiguous, NULL, memory_c_contiguous_doc},
3244 {"f_contiguous", (getter)memory_f_contiguous, NULL, memory_f_contiguous_doc},
3245 {"contiguous", (getter)memory_contiguous, NULL, memory_contiguous_doc},
3246 {NULL, NULL, NULL, NULL},
3247 };
3248
3249
3250 static PyMethodDef memory_methods[] = {
3251 MEMORYVIEW_RELEASE_METHODDEF
3252 MEMORYVIEW_TOBYTES_METHODDEF
3253 MEMORYVIEW_HEX_METHODDEF
3254 MEMORYVIEW_TOLIST_METHODDEF
3255 MEMORYVIEW_CAST_METHODDEF
3256 MEMORYVIEW_TOREADONLY_METHODDEF
3257 MEMORYVIEW__FROM_FLAGS_METHODDEF
3258 {"__enter__", memory_enter, METH_NOARGS, NULL},
3259 {"__exit__", memory_exit, METH_VARARGS, NULL},
3260 {NULL, NULL}
3261 };
3262
3263 /**************************************************************************/
3264 /* Memoryview Iterator */
3265 /**************************************************************************/
3266
3267 PyTypeObject _PyMemoryIter_Type;
3268
3269 typedef struct {
3270 PyObject_HEAD
3271 Py_ssize_t it_index;
3272 PyMemoryViewObject *it_seq; // Set to NULL when iterator is exhausted
3273 Py_ssize_t it_length;
3274 const char *it_fmt;
3275 } memoryiterobject;
3276
3277 static void
3278 memoryiter_dealloc(memoryiterobject *it)
3279 {
3280 _PyObject_GC_UNTRACK(it);
3281 Py_XDECREF(it->it_seq);
3282 PyObject_GC_Del(it);
3283 }
3284
3285 static int
3286 memoryiter_traverse(memoryiterobject *it, visitproc visit, void *arg)
3287 {
3288 Py_VISIT(it->it_seq);
3289 return 0;
3290 }
3291
3292 static PyObject *
3293 memoryiter_next(memoryiterobject *it)
3294 {
3295 PyMemoryViewObject *seq;
3296 seq = it->it_seq;
3297 if (seq == NULL) {
3298 return NULL;
3299 }
3300
3301 if (it->it_index < it->it_length) {
3302 CHECK_RELEASED(seq);
3303 Py_buffer *view = &(seq->view);
3304 char *ptr = (char *)seq->view.buf;
3305
3306 ptr += view->strides[0] * it->it_index++;
3307 ptr = ADJUST_PTR(ptr, view->suboffsets, 0);
3308 if (ptr == NULL) {
3309 return NULL;
3310 }
3311 return unpack_single(seq, ptr, it->it_fmt);
3312 }
3313
3314 it->it_seq = NULL;
3315 Py_DECREF(seq);
3316 return NULL;
3317 }
3318
3319 static PyObject *
3320 memory_iter(PyObject *seq)
3321 {
3322 if (!PyMemoryView_Check(seq)) {
3323 PyErr_BadInternalCall();
3324 return NULL;
3325 }
3326 PyMemoryViewObject *obj = (PyMemoryViewObject *)seq;
3327 int ndims = obj->view.ndim;
3328 if (ndims == 0) {
3329 PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
3330 return NULL;
3331 }
3332 if (ndims != 1) {
3333 PyErr_SetString(PyExc_NotImplementedError,
3334 "multi-dimensional sub-views are not implemented");
3335 return NULL;
3336 }
3337
3338 const char *fmt = adjust_fmt(&obj->view);
3339 if (fmt == NULL) {
3340 return NULL;
3341 }
3342
3343 memoryiterobject *it;
3344 it = PyObject_GC_New(memoryiterobject, &_PyMemoryIter_Type);
3345 if (it == NULL) {
3346 return NULL;
3347 }
3348 it->it_fmt = fmt;
3349 it->it_length = memory_length(obj);
3350 it->it_index = 0;
3351 it->it_seq = (PyMemoryViewObject*)Py_NewRef(obj);
3352 _PyObject_GC_TRACK(it);
3353 return (PyObject *)it;
3354 }
3355
3356 PyTypeObject _PyMemoryIter_Type = {
3357 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3358 .tp_name = "memory_iterator",
3359 .tp_basicsize = sizeof(memoryiterobject),
3360 // methods
3361 .tp_dealloc = (destructor)memoryiter_dealloc,
3362 .tp_getattro = PyObject_GenericGetAttr,
3363 .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
3364 .tp_traverse = (traverseproc)memoryiter_traverse,
3365 .tp_iter = PyObject_SelfIter,
3366 .tp_iternext = (iternextfunc)memoryiter_next,
3367 };
3368
3369 PyTypeObject PyMemoryView_Type = {
3370 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3371 "memoryview", /* tp_name */
3372 offsetof(PyMemoryViewObject, ob_array), /* tp_basicsize */
3373 sizeof(Py_ssize_t), /* tp_itemsize */
3374 (destructor)memory_dealloc, /* tp_dealloc */
3375 0, /* tp_vectorcall_offset */
3376 0, /* tp_getattr */
3377 0, /* tp_setattr */
3378 0, /* tp_as_async */
3379 (reprfunc)memory_repr, /* tp_repr */
3380 0, /* tp_as_number */
3381 &memory_as_sequence, /* tp_as_sequence */
3382 &memory_as_mapping, /* tp_as_mapping */
3383 (hashfunc)memory_hash, /* tp_hash */
3384 0, /* tp_call */
3385 0, /* tp_str */
3386 PyObject_GenericGetAttr, /* tp_getattro */
3387 0, /* tp_setattro */
3388 &memory_as_buffer, /* tp_as_buffer */
3389 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
3390 Py_TPFLAGS_SEQUENCE, /* tp_flags */
3391 memoryview__doc__, /* tp_doc */
3392 (traverseproc)memory_traverse, /* tp_traverse */
3393 (inquiry)memory_clear, /* tp_clear */
3394 memory_richcompare, /* tp_richcompare */
3395 offsetof(PyMemoryViewObject, weakreflist),/* tp_weaklistoffset */
3396 memory_iter, /* tp_iter */
3397 0, /* tp_iternext */
3398 memory_methods, /* tp_methods */
3399 0, /* tp_members */
3400 memory_getsetlist, /* tp_getset */
3401 0, /* tp_base */
3402 0, /* tp_dict */
3403 0, /* tp_descr_get */
3404 0, /* tp_descr_set */
3405 0, /* tp_dictoffset */
3406 0, /* tp_init */
3407 0, /* tp_alloc */
3408 memoryview, /* tp_new */
3409 };