Lines Matching defs:clone

21 	struct request *orig, *clone;
79 static void end_clone_bio(struct bio *clone)
82 container_of(clone, struct dm_rq_clone_bio_info, clone);
85 blk_status_t error = clone->bi_status;
86 bool is_last = !clone->bi_next;
88 bio_put(clone);
93 * Once error occurred, just let clone->end_io() handle
116 * the original request before the clone, and break the ordering.
153 * Complete the clone and the original request.
154 * Must be called without clone's queue lock held,
157 static void dm_end_request(struct request *clone, blk_status_t error)
159 struct dm_rq_target_io *tio = clone->end_io_data;
163 blk_rq_unprep_clone(clone);
164 tio->ti->type->release_clone_rq(clone, NULL);
195 if (tio->clone) {
196 blk_rq_unprep_clone(tio->clone);
197 tio->ti->type->release_clone_rq(tio->clone, NULL);
204 static void dm_done(struct request *clone, blk_status_t error, bool mapped)
207 struct dm_rq_target_io *tio = clone->end_io_data;
214 r = rq_end_io(tio->ti, clone, error, &tio->info);
218 if (req_op(clone) == REQ_OP_DISCARD &&
219 !clone->q->limits.max_discard_sectors)
221 else if (req_op(clone) == REQ_OP_WRITE_SAME &&
222 !clone->q->limits.max_write_same_sectors)
224 else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
225 !clone->q->limits.max_write_zeroes_sectors)
232 dm_end_request(clone, error);
258 struct request *clone = tio->clone;
260 if (!clone) {
272 dm_done(clone, tio->error, mapped);
276 * Complete the clone and the original request with the error status
289 * Complete the not-mapped clone and the original request with the error status
300 static void end_clone_request(struct request *clone, blk_status_t error)
302 struct dm_rq_target_io *tio = clone->end_io_data;
307 static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
311 if (blk_queue_io_stat(clone->q))
312 clone->rq_flags |= RQF_IO_STAT;
314 clone->start_time_ns = ktime_get_ns();
315 r = blk_insert_cloned_request(clone->q, clone);
317 /* must complete clone in terms of original request */
327 container_of(bio, struct dm_rq_clone_bio_info, clone);
336 static int setup_clone(struct request *clone, struct request *rq,
341 r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
346 clone->end_io = end_clone_request;
347 clone->end_io_data = tio;
349 tio->clone = clone;
359 tio->clone = NULL;
384 struct request *clone = NULL;
387 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
393 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
395 ti->type->release_clone_rq(clone, &tio->info);
400 trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
402 ret = dm_dispatch_clone_request(clone, rq);
404 blk_rq_unprep_clone(clone);
405 blk_mq_cleanup_rq(clone);
406 tio->ti->type->release_clone_rq(clone, &tio->info);
407 tio->clone = NULL;