Lines Matching defs:bio

17 #include <linux/bio.h>
55 struct bio *bio_in;
57 struct bio *bio_out;
71 * per bio private data
75 struct bio *base_bio;
217 * pool for per bio private data, crypto requests,
242 static void crypt_endio(struct bio *clone);
993 /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
1151 static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1157 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1160 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
1164 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1168 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1225 struct bio *bio_out, struct bio *bio_in,
1313 /* Reject unexpected unaligned bio. */
1414 /* Reject unexpected unaligned bio. */
1536 struct skcipher_request *req, struct bio *base_bio)
1545 struct aead_request *req, struct bio *base_bio)
1553 static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1562 * Encrypt / decrypt data from one bio to another one (can be the same one)
1572 * if reset_pending is set we are dealing with the bio for the first time,
1573 * else we're continuing to work on the previous bio, so don't mess with
1658 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1661 * Generate a new unfragmented bio with the given size
1680 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
1683 struct bio *clone;
1749 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1753 if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
1767 struct bio *bio, sector_t sector)
1770 io->base_bio = bio;
1795 struct bio *base_bio = io->base_bio;
1840 static void crypt_endio(struct bio *clone)
1876 struct bio *clone;
1895 * We need the original biovec array in order to decrypt the whole bio
1940 struct bio *clone = io->ctx.bio_out;
2002 struct bio *clone = io->ctx.bio_out;
2015 /* crypt_convert should have filled the clone bio */
2100 struct bio *clone;
3309 /* ...| IV + padding | original IV | original sec. number | bio tag offset | */
3447 static int crypt_map(struct dm_target *ti, struct bio *bio)
3453 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
3457 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
3458 bio_op(bio) == REQ_OP_DISCARD)) {
3459 bio_set_dev(bio, cc->dev->bdev);
3460 if (bio_sectors(bio))
3461 bio->bi_iter.bi_sector = cc->start +
3462 dm_target_offset(ti, bio->bi_iter.bi_sector);
3467 * Check if bio is too large, split as needed.
3469 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) &&
3470 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
3471 dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT));
3474 * Ensure that bio is a multiple of internal sector encryption size
3477 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
3480 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
3483 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3484 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
3487 unsigned int tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
3495 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3496 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
3698 * bio that are not as physically contiguous as the original bio.