Lines Matching defs:bio

16 #include <linux/bio.h>
50 struct bio *bio_in;
51 struct bio *bio_out;
64 * per bio private data
68 struct bio *base_bio;
210 * pool for per bio private data, crypto requests,
235 static void clone_init(struct dm_crypt_io *, struct bio *);
978 /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
1136 static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1142 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1145 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
1149 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1154 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1211 struct bio *bio_out, struct bio *bio_in,
1297 /* Reject unexpected unaligned bio. */
1392 /* Reject unexpected unaligned bio. */
1515 struct skcipher_request *req, struct bio *base_bio)
1524 struct aead_request *req, struct bio *base_bio)
1532 static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1541 * Encrypt / decrypt data from one bio to another one (can be the same one)
1551 * if reset_pending is set we are dealing with the bio for the first time,
1552 * else we're continuing to work on the previous bio, so don't mess with
1637 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1640 * Generate a new unfragmented bio with the given size
1656 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1659 struct bio *clone;
1706 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1718 struct bio *bio, sector_t sector)
1721 io->base_bio = bio;
1749 struct bio *base_bio = io->base_bio;
1799 static void crypt_endio(struct bio *clone)
1826 static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1839 struct bio *clone;
1843 * the whole bio data *afterwards* -- thanks to immutable
1886 struct bio *clone = io->ctx.bio_out;
1948 struct bio *clone = io->ctx.bio_out;
1961 /* crypt_convert should have filled the clone bio */
2047 struct bio *clone;
3223 /* ...| IV + padding | original IV | original sec. number | bio tag offset | */
3345 static int crypt_map(struct dm_target *ti, struct bio *bio)
3351 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
3355 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
3356 bio_op(bio) == REQ_OP_DISCARD)) {
3357 bio_set_dev(bio, cc->dev->bdev);
3358 if (bio_sectors(bio))
3359 bio->bi_iter.bi_sector = cc->start +
3360 dm_target_offset(ti, bio->bi_iter.bi_sector);
3365 * Check if bio is too large, split as needed.
3367 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
3368 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
3369 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
3372 * Ensure that bio is a multiple of internal sector encryption size
3375 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
3378 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
3381 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3382 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
3385 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
3390 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3391 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
3566 * bio that are not as physically contiguous as the original bio.