Lines Matching refs:bio
8 #include <linux/bio.h>
39 * bio_copy_from_iter - copy all pages from iov_iter to bio
40 * @bio: The &struct bio which describes the I/O as destination
43 * Copy all pages from iov_iter to bio.
46 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
51 bio_for_each_segment_all(bvec, bio, iter_all) {
70 * bio_copy_to_iter - copy all pages from bio to iov_iter
71 * @bio: The &struct bio which describes the I/O as source
74 * Copy all pages from bio to iov_iter.
77 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
82 bio_for_each_segment_all(bvec, bio, iter_all) {
101 * bio_uncopy_user - finish previously mapped bio
102 * @bio: bio being terminated
107 static int bio_uncopy_user(struct bio *bio)
109 struct bio_map_data *bmd = bio->bi_private;
120 else if (bio_data_dir(bio) == READ)
121 ret = bio_copy_to_iter(bio, bmd->iter);
123 bio_free_pages(bio);
126 bio_put(bio);
135 struct bio *bio, *bounce_bio;
158 bio = bio_kmalloc(gfp_mask, nr_pages);
159 if (!bio)
161 bio->bi_opf |= req_op(rq);
193 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
204 map_data->offset += bio->bi_iter.bi_size;
212 ret = bio_copy_from_iter(bio, iter);
217 zero_fill_bio(bio);
218 iov_iter_advance(iter, bio->bi_iter.bi_size);
221 bio->bi_private = bmd;
223 bounce_bio = bio;
236 bio_free_pages(bio);
237 bio_put(bio);
247 struct bio *bio, *bounce_bio;
254 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
255 if (!bio)
257 bio->bi_opf |= req_op(rq);
285 if (!bio_add_hw_page(rq->q, bio, page, n, offs,
299 * release the pages we didn't map into the bio, if any
304 /* couldn't stuff something into bio? */
310 * Subtle: if we end up needing to bounce a bio, it would normally
312 * bio for the unmap, so grab an extra reference to it
314 bio_get(bio);
316 bounce_bio = bio;
329 bio_put(bio);
331 bio_release_pages(bio, false);
332 bio_put(bio);
337 * bio_unmap_user - unmap a bio
338 * @bio: the bio being unmapped
340 * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
345 static void bio_unmap_user(struct bio *bio)
347 bio_release_pages(bio, bio_data_dir(bio) == READ);
348 bio_put(bio);
349 bio_put(bio);
352 static void bio_invalidate_vmalloc_pages(struct bio *bio)
355 if (bio->bi_private && !op_is_write(bio_op(bio))) {
358 for (i = 0; i < bio->bi_vcnt; i++)
359 len += bio->bi_io_vec[i].bv_len;
360 invalidate_kernel_vmap_range(bio->bi_private, len);
365 static void bio_map_kern_endio(struct bio *bio)
367 bio_invalidate_vmalloc_pages(bio);
368 bio_put(bio);
372 * bio_map_kern - map kernel address into bio
373 * @q: the struct request_queue for the bio
376 * @gfp_mask: allocation flags for bio allocation
378 * Map the kernel address into a bio suitable for io to a block
381 static struct bio *bio_map_kern(struct request_queue *q, void *data,
391 struct bio *bio;
393 bio = bio_kmalloc(gfp_mask, nr_pages);
394 if (!bio)
399 bio->bi_private = data;
416 if (bio_add_pc_page(q, bio, page, bytes,
419 bio_put(bio);
428 bio->bi_end_io = bio_map_kern_endio;
429 return bio;
432 static void bio_copy_kern_endio(struct bio *bio)
434 bio_free_pages(bio);
435 bio_put(bio);
438 static void bio_copy_kern_endio_read(struct bio *bio)
440 char *p = bio->bi_private;
444 bio_for_each_segment_all(bvec, bio, iter_all) {
449 bio_copy_kern_endio(bio);
453 * bio_copy_kern - copy kernel address into bio
454 * @q: the struct request_queue for the bio
457 * @gfp_mask: allocation flags for bio and page allocation
460 * copy the kernel address into a bio suitable for io to a block
463 static struct bio *bio_copy_kern(struct request_queue *q, void *data,
469 struct bio *bio;
480 bio = bio_kmalloc(gfp_mask, nr_pages);
481 if (!bio)
498 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
506 bio->bi_end_io = bio_copy_kern_endio_read;
507 bio->bi_private = data;
509 bio->bi_end_io = bio_copy_kern_endio;
512 return bio;
515 bio_free_pages(bio);
516 bio_put(bio);
521 * Append a bio to a passthrough request. Only works if the bio can be merged
524 int blk_rq_append_bio(struct request *rq, struct bio **bio)
526 struct bio *orig_bio = *bio;
531 blk_queue_bounce(rq->q, bio);
533 bio_for_each_bvec(bv, *bio, iter)
536 if (!rq->bio) {
537 blk_rq_bio_prep(rq, *bio, nr_segs);
539 if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
540 if (orig_bio != *bio) {
541 bio_put(*bio);
542 *bio = orig_bio;
547 rq->biotail->bi_next = *bio;
548 rq->biotail = *bio;
549 rq->__data_len += (*bio)->bi_iter.bi_size;
550 bio_crypt_free_ctx(*bio);
572 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
575 * original bio must be passed back in to blk_rq_unmap_user() for proper
584 struct bio *bio = NULL;
606 if (!bio)
607 bio = rq->bio;
613 blk_rq_unmap_user(bio);
615 rq->bio = NULL;
637 * @bio: start of bio list
641 * supply the original rq->bio from the blk_rq_map_user() return, since
642 * the I/O completion may have changed rq->bio.
644 int blk_rq_unmap_user(struct bio *bio)
646 struct bio *mapped_bio;
649 while (bio) {
650 mapped_bio = bio;
651 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
652 mapped_bio = bio->bi_private;
654 if (bio->bi_private) {
662 mapped_bio = bio;
663 bio = bio->bi_next;
689 struct bio *bio, *orig_bio;
698 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
700 bio = bio_map_kern(q, kbuf, len, gfp_mask);
702 if (IS_ERR(bio))
703 return PTR_ERR(bio);
705 bio->bi_opf &= ~REQ_OP_MASK;
706 bio->bi_opf |= req_op(rq);
708 orig_bio = bio;
709 ret = blk_rq_append_bio(rq, &bio);