Lines Matching refs:size
2674 unsigned long uaddr, size_t size)
2683 if (uaddr & (PAGE_SIZE - 1) || !size)
2686 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2735 size_t size)
2738 size);
2742 size_t size)
2745 size);
2763 void *io_mem_alloc(size_t size)
2768 ret = (void *) __get_free_pages(gfp, get_order(size));
3569 * EXT_ARG is set - ensure we agree on the size of it and copy in our
3742 size_t size, sq_array_offset;
3749 size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
3750 if (size == SIZE_MAX)
3754 rings = io_mem_alloc(size);
3756 rings = io_rings_map(ctx, p->cq_off.user_addr, size);
3770 size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
3772 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3773 if (size == SIZE_MAX) {
3779 ptr = io_mem_alloc(size);
3781 ptr = io_sqes_map(ctx, p->sq_off.user_addr, size);
3836 * application to drive a higher depth than the size of the SQ ring,
4022 * ring size, we return the actual sq/cq ring sizes (among other things) in the
4082 size_t size;
4085 size = struct_size(p, ops, nr_args);
4086 if (size == SIZE_MAX)
4088 p = kzalloc(size, GFP_KERNEL);
4093 if (copy_from_user(p, arg, size))
4096 if (memchr_inv(p, 0, size))
4111 if (copy_to_user(arg, p, size))
4139 size_t size;
4153 size = array_size(nr_args, sizeof(*res));
4154 if (size == SIZE_MAX)
4157 res = memdup_user(arg, size);