1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12
13 #include <uapi/linux/io_uring.h>
14
15 #include "io_uring.h"
16 #include "openclose.h"
17 #include "rsrc.h"
18
19 struct io_rsrc_update {
20 struct file *file;
21 u64 arg;
22 u32 nr_args;
23 u32 offset;
24 };
25
26 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
27 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
28 struct io_mapped_ubuf **pimu,
29 struct page **last_hpage);
30
31 /* only define max */
32 #define IORING_MAX_FIXED_FILES (1U << 20)
33 #define IORING_MAX_REG_BUFFERS (1U << 14)
34
35 static const struct io_mapped_ubuf dummy_ubuf = {
36 /* set invalid range, so io_import_fixed() fails meeting it */
37 .ubuf = -1UL,
38 .ubuf_end = 0,
39 };
40
__io_account_mem(struct user_struct *user, unsigned long nr_pages)41 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
42 {
43 unsigned long page_limit, cur_pages, new_pages;
44
45 if (!nr_pages)
46 return 0;
47
48 /* Don't allow more pages than we can safely lock */
49 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
50
51 cur_pages = atomic_long_read(&user->locked_vm);
52 do {
53 new_pages = cur_pages + nr_pages;
54 if (new_pages > page_limit)
55 return -ENOMEM;
56 } while (!atomic_long_try_cmpxchg(&user->locked_vm,
57 &cur_pages, new_pages));
58 return 0;
59 }
60
io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)61 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
62 {
63 if (ctx->user)
64 __io_unaccount_mem(ctx->user, nr_pages);
65
66 if (ctx->mm_account)
67 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
68 }
69
io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)70 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
71 {
72 int ret;
73
74 if (ctx->user) {
75 ret = __io_account_mem(ctx->user, nr_pages);
76 if (ret)
77 return ret;
78 }
79
80 if (ctx->mm_account)
81 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
82
83 return 0;
84 }
85
io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst, void __user *arg, unsigned index)86 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
87 void __user *arg, unsigned index)
88 {
89 struct iovec __user *src;
90
91 #ifdef CONFIG_COMPAT
92 if (ctx->compat) {
93 struct compat_iovec __user *ciovs;
94 struct compat_iovec ciov;
95
96 ciovs = (struct compat_iovec __user *) arg;
97 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
98 return -EFAULT;
99
100 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
101 dst->iov_len = ciov.iov_len;
102 return 0;
103 }
104 #endif
105 src = (struct iovec __user *) arg;
106 if (copy_from_user(dst, &src[index], sizeof(*dst)))
107 return -EFAULT;
108 return 0;
109 }
110
io_buffer_validate(struct iovec *iov)111 static int io_buffer_validate(struct iovec *iov)
112 {
113 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
114
115 /*
116 * Don't impose further limits on the size and buffer
117 * constraints here, we'll -EINVAL later when IO is
118 * submitted if they are wrong.
119 */
120 if (!iov->iov_base)
121 return iov->iov_len ? -EFAULT : 0;
122 if (!iov->iov_len)
123 return -EFAULT;
124
125 /* arbitrary limit, but we need something */
126 if (iov->iov_len > SZ_1G)
127 return -EFAULT;
128
129 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
130 return -EOVERFLOW;
131
132 return 0;
133 }
134
io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)135 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
136 {
137 struct io_mapped_ubuf *imu = *slot;
138 unsigned int i;
139
140 if (imu != &dummy_ubuf) {
141 for (i = 0; i < imu->nr_bvecs; i++)
142 unpin_user_page(imu->bvec[i].bv_page);
143 if (imu->acct_pages)
144 io_unaccount_mem(ctx, imu->acct_pages);
145 kvfree(imu);
146 }
147 *slot = NULL;
148 }
149
io_rsrc_put_work(struct io_rsrc_node *node)150 static void io_rsrc_put_work(struct io_rsrc_node *node)
151 {
152 struct io_rsrc_put *prsrc = &node->item;
153
154 if (prsrc->tag)
155 io_post_aux_cqe(node->ctx, prsrc->tag, 0, 0);
156
157 switch (node->type) {
158 case IORING_RSRC_FILE:
159 fput(prsrc->file);
160 break;
161 case IORING_RSRC_BUFFER:
162 io_rsrc_buf_put(node->ctx, prsrc);
163 break;
164 default:
165 WARN_ON_ONCE(1);
166 break;
167 }
168 }
169
io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)170 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
171 {
172 if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
173 kfree(node);
174 }
175
176 void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
177 __must_hold(&node->ctx->uring_lock)
178 {
179 struct io_ring_ctx *ctx = node->ctx;
180
181 while (!list_empty(&ctx->rsrc_ref_list)) {
182 node = list_first_entry(&ctx->rsrc_ref_list,
183 struct io_rsrc_node, node);
184 /* recycle ref nodes in order */
185 if (node->refs)
186 break;
187 list_del(&node->node);
188
189 if (likely(!node->empty))
190 io_rsrc_put_work(node);
191 io_rsrc_node_destroy(ctx, node);
192 }
193 if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
194 wake_up_all(&ctx->rsrc_quiesce_wq);
195 }
196
io_rsrc_node_alloc(struct io_ring_ctx *ctx)197 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
198 {
199 struct io_rsrc_node *ref_node;
200 struct io_cache_entry *entry;
201
202 entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
203 if (entry) {
204 ref_node = container_of(entry, struct io_rsrc_node, cache);
205 } else {
206 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
207 if (!ref_node)
208 return NULL;
209 }
210
211 ref_node->ctx = ctx;
212 ref_node->empty = 0;
213 ref_node->refs = 1;
214 return ref_node;
215 }
216
io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)217 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
218 struct io_ring_ctx *ctx)
219 {
220 struct io_rsrc_node *backup;
221 DEFINE_WAIT(we);
222 int ret;
223
224 /* As We may drop ->uring_lock, other task may have started quiesce */
225 if (data->quiesce)
226 return -ENXIO;
227
228 backup = io_rsrc_node_alloc(ctx);
229 if (!backup)
230 return -ENOMEM;
231 ctx->rsrc_node->empty = true;
232 ctx->rsrc_node->type = -1;
233 list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
234 io_put_rsrc_node(ctx, ctx->rsrc_node);
235 ctx->rsrc_node = backup;
236
237 if (list_empty(&ctx->rsrc_ref_list))
238 return 0;
239
240 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
241 atomic_set(&ctx->cq_wait_nr, 1);
242 smp_mb();
243 }
244
245 ctx->rsrc_quiesce++;
246 data->quiesce = true;
247 do {
248 prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
249 mutex_unlock(&ctx->uring_lock);
250
251 ret = io_run_task_work_sig(ctx);
252 if (ret < 0) {
253 mutex_lock(&ctx->uring_lock);
254 if (list_empty(&ctx->rsrc_ref_list))
255 ret = 0;
256 break;
257 }
258
259 schedule();
260 __set_current_state(TASK_RUNNING);
261 mutex_lock(&ctx->uring_lock);
262 ret = 0;
263 } while (!list_empty(&ctx->rsrc_ref_list));
264
265 finish_wait(&ctx->rsrc_quiesce_wq, &we);
266 data->quiesce = false;
267 ctx->rsrc_quiesce--;
268
269 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
270 atomic_set(&ctx->cq_wait_nr, 0);
271 smp_mb();
272 }
273 return ret;
274 }
275
io_free_page_table(void **table, size_t size)276 static void io_free_page_table(void **table, size_t size)
277 {
278 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
279
280 for (i = 0; i < nr_tables; i++)
281 kfree(table[i]);
282 kfree(table);
283 }
284
io_rsrc_data_free(struct io_rsrc_data *data)285 static void io_rsrc_data_free(struct io_rsrc_data *data)
286 {
287 size_t size = data->nr * sizeof(data->tags[0][0]);
288
289 if (data->tags)
290 io_free_page_table((void **)data->tags, size);
291 kfree(data);
292 }
293
io_alloc_page_table(size_t size)294 static __cold void **io_alloc_page_table(size_t size)
295 {
296 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
297 size_t init_size = size;
298 void **table;
299
300 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
301 if (!table)
302 return NULL;
303
304 for (i = 0; i < nr_tables; i++) {
305 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
306
307 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
308 if (!table[i]) {
309 io_free_page_table(table, init_size);
310 return NULL;
311 }
312 size -= this_size;
313 }
314 return table;
315 }
316
io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type, u64 __user *utags, unsigned nr, struct io_rsrc_data **pdata)317 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type,
318 u64 __user *utags,
319 unsigned nr, struct io_rsrc_data **pdata)
320 {
321 struct io_rsrc_data *data;
322 int ret = 0;
323 unsigned i;
324
325 data = kzalloc(sizeof(*data), GFP_KERNEL);
326 if (!data)
327 return -ENOMEM;
328 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
329 if (!data->tags) {
330 kfree(data);
331 return -ENOMEM;
332 }
333
334 data->nr = nr;
335 data->ctx = ctx;
336 data->rsrc_type = type;
337 if (utags) {
338 ret = -EFAULT;
339 for (i = 0; i < nr; i++) {
340 u64 *tag_slot = io_get_tag_slot(data, i);
341
342 if (copy_from_user(tag_slot, &utags[i],
343 sizeof(*tag_slot)))
344 goto fail;
345 }
346 }
347 *pdata = data;
348 return 0;
349 fail:
350 io_rsrc_data_free(data);
351 return ret;
352 }
353
__io_sqe_files_update(struct io_ring_ctx *ctx, struct io_uring_rsrc_update2 *up, unsigned nr_args)354 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
355 struct io_uring_rsrc_update2 *up,
356 unsigned nr_args)
357 {
358 u64 __user *tags = u64_to_user_ptr(up->tags);
359 __s32 __user *fds = u64_to_user_ptr(up->data);
360 struct io_rsrc_data *data = ctx->file_data;
361 struct io_fixed_file *file_slot;
362 int fd, i, err = 0;
363 unsigned int done;
364
365 if (!ctx->file_data)
366 return -ENXIO;
367 if (up->offset + nr_args > ctx->nr_user_files)
368 return -EINVAL;
369
370 for (done = 0; done < nr_args; done++) {
371 u64 tag = 0;
372
373 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
374 copy_from_user(&fd, &fds[done], sizeof(fd))) {
375 err = -EFAULT;
376 break;
377 }
378 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
379 err = -EINVAL;
380 break;
381 }
382 if (fd == IORING_REGISTER_FILES_SKIP)
383 continue;
384
385 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
386 file_slot = io_fixed_file_slot(&ctx->file_table, i);
387
388 if (file_slot->file_ptr) {
389 err = io_queue_rsrc_removal(data, i,
390 io_slot_file(file_slot));
391 if (err)
392 break;
393 file_slot->file_ptr = 0;
394 io_file_bitmap_clear(&ctx->file_table, i);
395 }
396 if (fd != -1) {
397 struct file *file = fget(fd);
398
399 if (!file) {
400 err = -EBADF;
401 break;
402 }
403 /*
404 * Don't allow io_uring instances to be registered.
405 */
406 if (io_is_uring_fops(file)) {
407 fput(file);
408 err = -EBADF;
409 break;
410 }
411 *io_get_tag_slot(data, i) = tag;
412 io_fixed_file_set(file_slot, file);
413 io_file_bitmap_set(&ctx->file_table, i);
414 }
415 }
416 return done ? done : err;
417 }
418
__io_sqe_buffers_update(struct io_ring_ctx *ctx, struct io_uring_rsrc_update2 *up, unsigned int nr_args)419 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
420 struct io_uring_rsrc_update2 *up,
421 unsigned int nr_args)
422 {
423 u64 __user *tags = u64_to_user_ptr(up->tags);
424 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
425 struct page *last_hpage = NULL;
426 __u32 done;
427 int i, err;
428
429 if (!ctx->buf_data)
430 return -ENXIO;
431 if (up->offset + nr_args > ctx->nr_user_bufs)
432 return -EINVAL;
433
434 for (done = 0; done < nr_args; done++) {
435 struct io_mapped_ubuf *imu;
436 u64 tag = 0;
437
438 err = io_copy_iov(ctx, &iov, iovs, done);
439 if (err)
440 break;
441 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
442 err = -EFAULT;
443 break;
444 }
445 err = io_buffer_validate(&iov);
446 if (err)
447 break;
448 if (!iov.iov_base && tag) {
449 err = -EINVAL;
450 break;
451 }
452 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
453 if (err)
454 break;
455
456 i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
457 if (ctx->user_bufs[i] != &dummy_ubuf) {
458 err = io_queue_rsrc_removal(ctx->buf_data, i,
459 ctx->user_bufs[i]);
460 if (unlikely(err)) {
461 io_buffer_unmap(ctx, &imu);
462 break;
463 }
464 ctx->user_bufs[i] = (struct io_mapped_ubuf *)&dummy_ubuf;
465 }
466
467 ctx->user_bufs[i] = imu;
468 *io_get_tag_slot(ctx->buf_data, i) = tag;
469 }
470 return done ? done : err;
471 }
472
__io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type, struct io_uring_rsrc_update2 *up, unsigned nr_args)473 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
474 struct io_uring_rsrc_update2 *up,
475 unsigned nr_args)
476 {
477 __u32 tmp;
478
479 lockdep_assert_held(&ctx->uring_lock);
480
481 if (check_add_overflow(up->offset, nr_args, &tmp))
482 return -EOVERFLOW;
483
484 switch (type) {
485 case IORING_RSRC_FILE:
486 return __io_sqe_files_update(ctx, up, nr_args);
487 case IORING_RSRC_BUFFER:
488 return __io_sqe_buffers_update(ctx, up, nr_args);
489 }
490 return -EINVAL;
491 }
492
io_register_files_update(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)493 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
494 unsigned nr_args)
495 {
496 struct io_uring_rsrc_update2 up;
497
498 if (!nr_args)
499 return -EINVAL;
500 memset(&up, 0, sizeof(up));
501 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
502 return -EFAULT;
503 if (up.resv || up.resv2)
504 return -EINVAL;
505 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
506 }
507
io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, unsigned size, unsigned type)508 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
509 unsigned size, unsigned type)
510 {
511 struct io_uring_rsrc_update2 up;
512
513 if (size != sizeof(up))
514 return -EINVAL;
515 if (copy_from_user(&up, arg, sizeof(up)))
516 return -EFAULT;
517 if (!up.nr || up.resv || up.resv2)
518 return -EINVAL;
519 return __io_register_rsrc_update(ctx, type, &up, up.nr);
520 }
521
io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, unsigned int size, unsigned int type)522 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
523 unsigned int size, unsigned int type)
524 {
525 struct io_uring_rsrc_register rr;
526
527 /* keep it extendible */
528 if (size != sizeof(rr))
529 return -EINVAL;
530
531 memset(&rr, 0, sizeof(rr));
532 if (copy_from_user(&rr, arg, size))
533 return -EFAULT;
534 if (!rr.nr || rr.resv2)
535 return -EINVAL;
536 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
537 return -EINVAL;
538
539 switch (type) {
540 case IORING_RSRC_FILE:
541 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
542 break;
543 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
544 rr.nr, u64_to_user_ptr(rr.tags));
545 case IORING_RSRC_BUFFER:
546 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
547 break;
548 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
549 rr.nr, u64_to_user_ptr(rr.tags));
550 }
551 return -EINVAL;
552 }
553
io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)554 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
555 {
556 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
557
558 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
559 return -EINVAL;
560 if (sqe->rw_flags || sqe->splice_fd_in)
561 return -EINVAL;
562
563 up->offset = READ_ONCE(sqe->off);
564 up->nr_args = READ_ONCE(sqe->len);
565 if (!up->nr_args)
566 return -EINVAL;
567 up->arg = READ_ONCE(sqe->addr);
568 return 0;
569 }
570
io_files_update_with_index_alloc(struct io_kiocb *req, unsigned int issue_flags)571 static int io_files_update_with_index_alloc(struct io_kiocb *req,
572 unsigned int issue_flags)
573 {
574 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
575 __s32 __user *fds = u64_to_user_ptr(up->arg);
576 unsigned int done;
577 struct file *file;
578 int ret, fd;
579
580 if (!req->ctx->file_data)
581 return -ENXIO;
582
583 for (done = 0; done < up->nr_args; done++) {
584 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
585 ret = -EFAULT;
586 break;
587 }
588
589 file = fget(fd);
590 if (!file) {
591 ret = -EBADF;
592 break;
593 }
594 ret = io_fixed_fd_install(req, issue_flags, file,
595 IORING_FILE_INDEX_ALLOC);
596 if (ret < 0)
597 break;
598 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
599 __io_close_fixed(req->ctx, issue_flags, ret);
600 ret = -EFAULT;
601 break;
602 }
603 }
604
605 if (done)
606 return done;
607 return ret;
608 }
609
io_files_update(struct io_kiocb *req, unsigned int issue_flags)610 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
611 {
612 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
613 struct io_ring_ctx *ctx = req->ctx;
614 struct io_uring_rsrc_update2 up2;
615 int ret;
616
617 up2.offset = up->offset;
618 up2.data = up->arg;
619 up2.nr = 0;
620 up2.tags = 0;
621 up2.resv = 0;
622 up2.resv2 = 0;
623
624 if (up->offset == IORING_FILE_INDEX_ALLOC) {
625 ret = io_files_update_with_index_alloc(req, issue_flags);
626 } else {
627 io_ring_submit_lock(ctx, issue_flags);
628 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
629 &up2, up->nr_args);
630 io_ring_submit_unlock(ctx, issue_flags);
631 }
632
633 if (ret < 0)
634 req_set_fail(req);
635 io_req_set_res(req, ret, 0);
636 return IOU_OK;
637 }
638
io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)639 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
640 {
641 struct io_ring_ctx *ctx = data->ctx;
642 struct io_rsrc_node *node = ctx->rsrc_node;
643 u64 *tag_slot = io_get_tag_slot(data, idx);
644
645 ctx->rsrc_node = io_rsrc_node_alloc(ctx);
646 if (unlikely(!ctx->rsrc_node)) {
647 ctx->rsrc_node = node;
648 return -ENOMEM;
649 }
650
651 node->item.rsrc = rsrc;
652 node->type = data->rsrc_type;
653 node->item.tag = *tag_slot;
654 *tag_slot = 0;
655 list_add_tail(&node->node, &ctx->rsrc_ref_list);
656 io_put_rsrc_node(ctx, node);
657 return 0;
658 }
659
__io_sqe_files_unregister(struct io_ring_ctx *ctx)660 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
661 {
662 int i;
663
664 for (i = 0; i < ctx->nr_user_files; i++) {
665 struct file *file = io_file_from_index(&ctx->file_table, i);
666
667 if (!file)
668 continue;
669 io_file_bitmap_clear(&ctx->file_table, i);
670 fput(file);
671 }
672
673 io_free_file_tables(&ctx->file_table);
674 io_file_table_set_alloc_range(ctx, 0, 0);
675 io_rsrc_data_free(ctx->file_data);
676 ctx->file_data = NULL;
677 ctx->nr_user_files = 0;
678 }
679
io_sqe_files_unregister(struct io_ring_ctx *ctx)680 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
681 {
682 unsigned nr = ctx->nr_user_files;
683 int ret;
684
685 if (!ctx->file_data)
686 return -ENXIO;
687
688 /*
689 * Quiesce may unlock ->uring_lock, and while it's not held
690 * prevent new requests using the table.
691 */
692 ctx->nr_user_files = 0;
693 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
694 ctx->nr_user_files = nr;
695 if (!ret)
696 __io_sqe_files_unregister(ctx);
697 return ret;
698 }
699
io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args, u64 __user *tags)700 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
701 unsigned nr_args, u64 __user *tags)
702 {
703 __s32 __user *fds = (__s32 __user *) arg;
704 struct file *file;
705 int fd, ret;
706 unsigned i;
707
708 if (ctx->file_data)
709 return -EBUSY;
710 if (!nr_args)
711 return -EINVAL;
712 if (nr_args > IORING_MAX_FIXED_FILES)
713 return -EMFILE;
714 if (nr_args > rlimit(RLIMIT_NOFILE))
715 return -EMFILE;
716 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_FILE, tags, nr_args,
717 &ctx->file_data);
718 if (ret)
719 return ret;
720
721 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
722 io_rsrc_data_free(ctx->file_data);
723 ctx->file_data = NULL;
724 return -ENOMEM;
725 }
726
727 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
728 struct io_fixed_file *file_slot;
729
730 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
731 ret = -EFAULT;
732 goto fail;
733 }
734 /* allow sparse sets */
735 if (!fds || fd == -1) {
736 ret = -EINVAL;
737 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
738 goto fail;
739 continue;
740 }
741
742 file = fget(fd);
743 ret = -EBADF;
744 if (unlikely(!file))
745 goto fail;
746
747 /*
748 * Don't allow io_uring instances to be registered.
749 */
750 if (io_is_uring_fops(file)) {
751 fput(file);
752 goto fail;
753 }
754 file_slot = io_fixed_file_slot(&ctx->file_table, i);
755 io_fixed_file_set(file_slot, file);
756 io_file_bitmap_set(&ctx->file_table, i);
757 }
758
759 /* default it to the whole table */
760 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
761 return 0;
762 fail:
763 __io_sqe_files_unregister(ctx);
764 return ret;
765 }
766
io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)767 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
768 {
769 io_buffer_unmap(ctx, &prsrc->buf);
770 prsrc->buf = NULL;
771 }
772
__io_sqe_buffers_unregister(struct io_ring_ctx *ctx)773 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
774 {
775 unsigned int i;
776
777 for (i = 0; i < ctx->nr_user_bufs; i++)
778 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
779 kfree(ctx->user_bufs);
780 io_rsrc_data_free(ctx->buf_data);
781 ctx->user_bufs = NULL;
782 ctx->buf_data = NULL;
783 ctx->nr_user_bufs = 0;
784 }
785
io_sqe_buffers_unregister(struct io_ring_ctx *ctx)786 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
787 {
788 unsigned nr = ctx->nr_user_bufs;
789 int ret;
790
791 if (!ctx->buf_data)
792 return -ENXIO;
793
794 /*
795 * Quiesce may unlock ->uring_lock, and while it's not held
796 * prevent new requests using the table.
797 */
798 ctx->nr_user_bufs = 0;
799 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
800 ctx->nr_user_bufs = nr;
801 if (!ret)
802 __io_sqe_buffers_unregister(ctx);
803 return ret;
804 }
805
806 /*
807 * Not super efficient, but this is just a registration time. And we do cache
808 * the last compound head, so generally we'll only do a full search if we don't
809 * match that one.
810 *
811 * We check if the given compound head page has already been accounted, to
812 * avoid double accounting it. This allows us to account the full size of the
813 * page, not just the constituent pages of a huge page.
814 */
headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages, int nr_pages, struct page *hpage)815 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
816 int nr_pages, struct page *hpage)
817 {
818 int i, j;
819
820 /* check current page array */
821 for (i = 0; i < nr_pages; i++) {
822 if (!PageCompound(pages[i]))
823 continue;
824 if (compound_head(pages[i]) == hpage)
825 return true;
826 }
827
828 /* check previously registered pages */
829 for (i = 0; i < ctx->nr_user_bufs; i++) {
830 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
831
832 for (j = 0; j < imu->nr_bvecs; j++) {
833 if (!PageCompound(imu->bvec[j].bv_page))
834 continue;
835 if (compound_head(imu->bvec[j].bv_page) == hpage)
836 return true;
837 }
838 }
839
840 return false;
841 }
842
io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages, int nr_pages, struct io_mapped_ubuf *imu, struct page **last_hpage)843 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
844 int nr_pages, struct io_mapped_ubuf *imu,
845 struct page **last_hpage)
846 {
847 int i, ret;
848
849 imu->acct_pages = 0;
850 for (i = 0; i < nr_pages; i++) {
851 if (!PageCompound(pages[i])) {
852 imu->acct_pages++;
853 } else {
854 struct page *hpage;
855
856 hpage = compound_head(pages[i]);
857 if (hpage == *last_hpage)
858 continue;
859 *last_hpage = hpage;
860 if (headpage_already_acct(ctx, pages, i, hpage))
861 continue;
862 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
863 }
864 }
865
866 if (!imu->acct_pages)
867 return 0;
868
869 ret = io_account_mem(ctx, imu->acct_pages);
870 if (ret)
871 imu->acct_pages = 0;
872 return ret;
873 }
874
io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)875 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
876 {
877 unsigned long start, end, nr_pages;
878 struct page **pages = NULL;
879 int pret, ret = -ENOMEM;
880
881 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
882 start = ubuf >> PAGE_SHIFT;
883 nr_pages = end - start;
884
885 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
886 if (!pages)
887 goto done;
888
889 ret = 0;
890 mmap_read_lock(current->mm);
891 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
892 pages);
893 if (pret == nr_pages)
894 *npages = nr_pages;
895 else
896 ret = pret < 0 ? pret : -EFAULT;
897
898 mmap_read_unlock(current->mm);
899 if (ret) {
900 /* if we did partial map, release any pages we did get */
901 if (pret > 0)
902 unpin_user_pages(pages, pret);
903 goto done;
904 }
905 ret = 0;
906 done:
907 if (ret < 0) {
908 kvfree(pages);
909 pages = ERR_PTR(ret);
910 }
911 return pages;
912 }
913
io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, struct io_mapped_ubuf **pimu, struct page **last_hpage)914 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
915 struct io_mapped_ubuf **pimu,
916 struct page **last_hpage)
917 {
918 struct io_mapped_ubuf *imu = NULL;
919 struct page **pages = NULL;
920 unsigned long off;
921 size_t size;
922 int ret, nr_pages, i;
923 struct folio *folio = NULL;
924
925 *pimu = (struct io_mapped_ubuf *)&dummy_ubuf;
926 if (!iov->iov_base)
927 return 0;
928
929 ret = -ENOMEM;
930 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
931 &nr_pages);
932 if (IS_ERR(pages)) {
933 ret = PTR_ERR(pages);
934 pages = NULL;
935 goto done;
936 }
937
938 /* If it's a huge page, try to coalesce them into a single bvec entry */
939 if (nr_pages > 1) {
940 folio = page_folio(pages[0]);
941 for (i = 1; i < nr_pages; i++) {
942 /*
943 * Pages must be consecutive and on the same folio for
944 * this to work
945 */
946 if (page_folio(pages[i]) != folio ||
947 pages[i] != pages[i - 1] + 1) {
948 folio = NULL;
949 break;
950 }
951 }
952 if (folio) {
953 /*
954 * The pages are bound to the folio, it doesn't
955 * actually unpin them but drops all but one reference,
956 * which is usually put down by io_buffer_unmap().
957 * Note, needs a better helper.
958 */
959 unpin_user_pages(&pages[1], nr_pages - 1);
960 nr_pages = 1;
961 }
962 }
963
964 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
965 if (!imu)
966 goto done;
967
968 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
969 if (ret) {
970 unpin_user_pages(pages, nr_pages);
971 goto done;
972 }
973
974 off = (unsigned long) iov->iov_base & ~PAGE_MASK;
975 size = iov->iov_len;
976 /* store original address for later verification */
977 imu->ubuf = (unsigned long) iov->iov_base;
978 imu->ubuf_end = imu->ubuf + iov->iov_len;
979 imu->nr_bvecs = nr_pages;
980 *pimu = imu;
981 ret = 0;
982
983 if (folio) {
984 bvec_set_page(&imu->bvec[0], pages[0], size, off);
985 goto done;
986 }
987 for (i = 0; i < nr_pages; i++) {
988 size_t vec_len;
989
990 vec_len = min_t(size_t, size, PAGE_SIZE - off);
991 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
992 off = 0;
993 size -= vec_len;
994 }
995 done:
996 if (ret)
997 kvfree(imu);
998 kvfree(pages);
999 return ret;
1000 }
1001
io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)1002 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1003 {
1004 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1005 return ctx->user_bufs ? 0 : -ENOMEM;
1006 }
1007
io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, unsigned int nr_args, u64 __user *tags)1008 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1009 unsigned int nr_args, u64 __user *tags)
1010 {
1011 struct page *last_hpage = NULL;
1012 struct io_rsrc_data *data;
1013 int i, ret;
1014 struct iovec iov;
1015
1016 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1017
1018 if (ctx->user_bufs)
1019 return -EBUSY;
1020 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1021 return -EINVAL;
1022 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, tags, nr_args, &data);
1023 if (ret)
1024 return ret;
1025 ret = io_buffers_map_alloc(ctx, nr_args);
1026 if (ret) {
1027 io_rsrc_data_free(data);
1028 return ret;
1029 }
1030
1031 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1032 if (arg) {
1033 ret = io_copy_iov(ctx, &iov, arg, i);
1034 if (ret)
1035 break;
1036 ret = io_buffer_validate(&iov);
1037 if (ret)
1038 break;
1039 } else {
1040 memset(&iov, 0, sizeof(iov));
1041 }
1042
1043 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1044 ret = -EINVAL;
1045 break;
1046 }
1047
1048 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1049 &last_hpage);
1050 if (ret)
1051 break;
1052 }
1053
1054 WARN_ON_ONCE(ctx->buf_data);
1055
1056 ctx->buf_data = data;
1057 if (ret)
1058 __io_sqe_buffers_unregister(ctx);
1059 return ret;
1060 }
1061
io_import_fixed(int ddir, struct iov_iter *iter, struct io_mapped_ubuf *imu, u64 buf_addr, size_t len)1062 int io_import_fixed(int ddir, struct iov_iter *iter,
1063 struct io_mapped_ubuf *imu,
1064 u64 buf_addr, size_t len)
1065 {
1066 u64 buf_end;
1067 size_t offset;
1068
1069 if (WARN_ON_ONCE(!imu))
1070 return -EFAULT;
1071 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1072 return -EFAULT;
1073 /* not inside the mapped region */
1074 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1075 return -EFAULT;
1076
1077 /*
1078 * Might not be a start of buffer, set size appropriately
1079 * and advance us to the beginning.
1080 */
1081 offset = buf_addr - imu->ubuf;
1082 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1083
1084 if (offset) {
1085 /*
1086 * Don't use iov_iter_advance() here, as it's really slow for
1087 * using the latter parts of a big fixed buffer - it iterates
1088 * over each segment manually. We can cheat a bit here, because
1089 * we know that:
1090 *
1091 * 1) it's a BVEC iter, we set it up
1092 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1093 * first and last bvec
1094 *
1095 * So just find our index, and adjust the iterator afterwards.
1096 * If the offset is within the first bvec (or the whole first
1097 * bvec, just use iov_iter_advance(). This makes it easier
1098 * since we can just skip the first segment, which may not
1099 * be PAGE_SIZE aligned.
1100 */
1101 const struct bio_vec *bvec = imu->bvec;
1102
1103 if (offset < bvec->bv_len) {
1104 /*
1105 * Note, huge pages buffers consists of one large
1106 * bvec entry and should always go this way. The other
1107 * branch doesn't expect non PAGE_SIZE'd chunks.
1108 */
1109 iter->bvec = bvec;
1110 iter->nr_segs = bvec->bv_len;
1111 iter->count -= offset;
1112 iter->iov_offset = offset;
1113 } else {
1114 unsigned long seg_skip;
1115
1116 /* skip first vec */
1117 offset -= bvec->bv_len;
1118 seg_skip = 1 + (offset >> PAGE_SHIFT);
1119
1120 iter->bvec = bvec + seg_skip;
1121 iter->nr_segs -= seg_skip;
1122 iter->count -= bvec->bv_len + offset;
1123 iter->iov_offset = offset & ~PAGE_MASK;
1124 }
1125 }
1126
1127 return 0;
1128 }
1129