Lines Matching defs:imu
137 struct io_mapped_ubuf *imu = *slot;
140 if (imu != &dummy_ubuf) {
141 for (i = 0; i < imu->nr_bvecs; i++)
142 unpin_user_page(imu->bvec[i].bv_page);
143 if (imu->acct_pages)
144 io_unaccount_mem(ctx, imu->acct_pages);
145 kvfree(imu);
435 struct io_mapped_ubuf *imu;
452 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
461 io_buffer_unmap(ctx, &imu);
467 ctx->user_bufs[i] = imu;
830 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
832 for (j = 0; j < imu->nr_bvecs; j++) {
833 if (!PageCompound(imu->bvec[j].bv_page))
835 if (compound_head(imu->bvec[j].bv_page) == hpage)
844 int nr_pages, struct io_mapped_ubuf *imu,
849 imu->acct_pages = 0;
852 imu->acct_pages++;
862 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
866 if (!imu->acct_pages)
869 ret = io_account_mem(ctx, imu->acct_pages);
871 imu->acct_pages = 0;
918 struct io_mapped_ubuf *imu = NULL;
964 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
965 if (!imu)
968 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
977 imu->ubuf = (unsigned long) iov->iov_base;
978 imu->ubuf_end = imu->ubuf + iov->iov_len;
979 imu->nr_bvecs = nr_pages;
980 *pimu = imu;
984 bvec_set_page(&imu->bvec[0], pages[0], size, off);
991 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
997 kvfree(imu);
1063 struct io_mapped_ubuf *imu,
1069 if (WARN_ON_ONCE(!imu))
1074 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1081 offset = buf_addr - imu->ubuf;
1082 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1101 const struct bio_vec *bvec = imu->bvec;