Lines Matching refs:bl
35 struct io_buffer_list *bl,
38 if (bl && bgid < BGID_ARRAY)
39 return &bl[bgid];
60 struct io_buffer_list *bl, unsigned int bgid)
67 bl->bgid = bgid;
68 smp_store_release(&bl->is_ready, 1);
73 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
79 struct io_buffer_list *bl;
94 bl = io_buffer_get_list(ctx, buf->bgid);
95 list_add(&buf->list, &bl->buf_list);
137 struct io_buffer_list *bl)
139 if (!list_empty(&bl->buf_list)) {
142 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
155 struct io_buffer_list *bl,
158 struct io_uring_buf_ring *br = bl->buf_ring;
160 __u16 head = bl->head;
165 head &= bl->mask;
167 if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) {
172 buf = page_address(bl->buf_pages[index]);
178 req->buf_list = bl;
193 bl->head++;
202 struct io_buffer_list *bl;
207 bl = io_buffer_get_list(ctx, req->buf_index);
208 if (likely(bl)) {
209 if (bl->is_mapped)
210 ret = io_ring_buffer_select(req, len, bl, issue_flags);
212 ret = io_provided_buffer_select(req, len, bl);
220 struct io_buffer_list *bl;
223 bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
224 if (!bl)
228 INIT_LIST_HEAD(&bl[i].buf_list);
229 bl[i].bgid = i;
232 smp_store_release(&ctx->io_bl, bl);
239 static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
244 if (bl->buf_ring == ibf->mem) {
255 struct io_buffer_list *bl, unsigned nbufs)
263 if (bl->is_mapped) {
264 i = bl->buf_ring->tail - bl->head;
265 if (bl->is_mmap) {
270 io_kbuf_mark_free(ctx, bl);
271 bl->buf_ring = NULL;
272 bl->is_mmap = 0;
273 } else if (bl->buf_nr_pages) {
276 for (j = 0; j < bl->buf_nr_pages; j++)
277 unpin_user_page(bl->buf_pages[j]);
278 kvfree(bl->buf_pages);
279 bl->buf_pages = NULL;
280 bl->buf_nr_pages = 0;
283 INIT_LIST_HEAD(&bl->buf_list);
284 bl->is_mapped = 0;
291 while (!list_empty(&bl->buf_list)) {
294 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
306 struct io_buffer_list *bl;
316 xa_for_each(&ctx->io_bl_xa, index, bl) {
317 xa_erase(&ctx->io_bl_xa, bl->bgid);
318 __io_remove_buffers(ctx, bl, -1U);
319 kfree_rcu(bl, rcu);
354 struct io_buffer_list *bl;
360 bl = io_buffer_get_list(ctx, p->bgid);
361 if (bl) {
364 if (!bl->is_mapped)
365 ret = __io_remove_buffers(ctx, bl, p->nbufs);
454 struct io_buffer_list *bl)
466 list_move_tail(&buf->list, &bl->buf_list);
483 struct io_buffer_list *bl;
494 bl = io_buffer_get_list(ctx, p->bgid);
495 if (unlikely(!bl)) {
496 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
497 if (!bl) {
501 INIT_LIST_HEAD(&bl->buf_list);
502 ret = io_buffer_add_list(ctx, bl, p->bgid);
511 kfree_rcu(bl, rcu);
518 if (bl->is_mapped) {
523 ret = io_add_buffers(ctx, p, bl);
534 struct io_buffer_list *bl)
571 bl->buf_pages = pages;
572 bl->buf_nr_pages = nr_pages;
573 bl->buf_ring = br;
574 bl->is_mapped = 1;
575 bl->is_mmap = 0;
614 struct io_buffer_list *bl)
640 bl->buf_ring = ibf->mem;
641 bl->is_mapped = 1;
642 bl->is_mmap = 1;
649 struct io_buffer_list *bl, *free_bl = NULL;
684 bl = io_buffer_get_list(ctx, reg.bgid);
685 if (bl) {
687 if (bl->is_mapped || !list_empty(&bl->buf_list))
690 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
691 if (!bl)
696 ret = io_pin_pbuf_ring(®, bl);
698 ret = io_alloc_pbuf_ring(ctx, ®, bl);
701 bl->nr_entries = reg.ring_entries;
702 bl->mask = reg.ring_entries - 1;
704 io_buffer_add_list(ctx, bl, reg.bgid);
715 struct io_buffer_list *bl;
726 bl = io_buffer_get_list(ctx, reg.bgid);
727 if (!bl)
729 if (!bl->is_mapped)
732 __io_remove_buffers(ctx, bl, -1U);
733 if (bl->bgid >= BGID_ARRAY) {
734 xa_erase(&ctx->io_bl_xa, bl->bgid);
735 kfree_rcu(bl, rcu);
742 struct io_buffer_list *bl;
744 bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
746 if (!bl || !bl->is_mmap)
753 if (!smp_load_acquire(&bl->is_ready))
756 return bl->buf_ring;