Lines Matching refs:img_req

644 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
1435 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1437 switch (img_req->op_type) {
1669 static void rbd_img_capture_header(struct rbd_img_request *img_req)
1671 struct rbd_device *rbd_dev = img_req->rbd_dev;
1675 if (!rbd_img_is_write(img_req))
1676 img_req->snap_id = rbd_dev->spec->snap_id;
1679 img_request_layered_set(img_req);
2439 struct rbd_img_request *img_req = obj_req->img_request;
2441 switch (img_req->op_type) {
2443 if (!use_object_map(img_req->rbd_dev) ||
2486 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2491 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2492 switch (img_req->op_type) {
2511 rbd_img_obj_request_del(img_req, obj_req);
2516 img_req->state = RBD_IMG_START;
2536 struct rbd_img_request *img_req = arg;
2543 rbd_img_obj_request_add(img_req, obj_req);
2559 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2567 img_req->data_type = fctx->pos_type;
2575 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2578 &img_req->object_extents,
2579 alloc_object_extent, img_req,
2585 return __rbd_img_fill_request(img_req);
2591 * but not always) and add them to @img_req. For each object request,
2601 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2606 struct rbd_device *rbd_dev = img_req->rbd_dev;
2613 return rbd_img_fill_request_nocopy(img_req, img_extents,
2616 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2630 &img_req->object_extents,
2631 alloc_object_extent, img_req,
2637 for_each_obj_request(img_req, obj_req) {
2654 &img_req->object_extents,
2660 return __rbd_img_fill_request(img_req);
2663 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2673 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2713 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2726 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2730 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2736 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2773 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2786 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2790 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2801 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2807 struct rbd_img_request *img_req =
2810 rbd_img_handle_request(img_req, img_req->work_result);
2813 static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2815 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2816 img_req->work_result = result;
2817 queue_work(rbd_wq, &img_req->work);
2858 struct rbd_img_request *img_req = obj_req->img_request;
2859 struct rbd_device *parent = img_req->rbd_dev->parent;
2878 if (!rbd_img_is_write(img_req)) {
2879 switch (img_req->data_type) {
3452 struct rbd_img_request *img_req = obj_req->img_request;
3453 struct rbd_device *rbd_dev = img_req->rbd_dev;
3457 if (!rbd_img_is_write(img_req))
3466 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3482 static bool need_exclusive_lock(struct rbd_img_request *img_req)
3484 struct rbd_device *rbd_dev = img_req->rbd_dev;
3492 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3497 return rbd_img_is_write(img_req);
3500 static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3502 struct rbd_device *rbd_dev = img_req->rbd_dev;
3508 rbd_assert(list_empty(&img_req->lock_item));
3510 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3512 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3517 static void rbd_lock_del_request(struct rbd_img_request *img_req)
3519 struct rbd_device *rbd_dev = img_req->rbd_dev;
3524 if (!list_empty(&img_req->lock_item)) {
3525 list_del_init(&img_req->lock_item);
3534 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3536 struct rbd_device *rbd_dev = img_req->rbd_dev;
3538 if (!need_exclusive_lock(img_req))
3541 if (rbd_lock_add_request(img_req))
3558 static void rbd_img_object_requests(struct rbd_img_request *img_req)
3560 struct rbd_device *rbd_dev = img_req->rbd_dev;
3563 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3564 rbd_assert(!need_exclusive_lock(img_req) ||
3567 if (rbd_img_is_write(img_req)) {
3568 rbd_assert(!img_req->snapc);
3570 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
3574 for_each_obj_request(img_req, obj_req) {
3579 img_req->pending.result = result;
3583 img_req->pending.num_pending++;
3588 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3593 switch (img_req->state) {
3597 ret = rbd_img_exclusive_lock(img_req);
3602 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3610 rbd_img_object_requests(img_req);
3611 if (!img_req->pending.num_pending) {
3612 *result = img_req->pending.result;
3613 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3616 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3619 if (!pending_result_dec(&img_req->pending, result))
3630 * Return true if @img_req is completed.
3632 static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3635 struct rbd_device *rbd_dev = img_req->rbd_dev;
3638 if (need_exclusive_lock(img_req)) {
3640 mutex_lock(&img_req->state_mutex);
3641 done = rbd_img_advance(img_req, result);
3643 rbd_lock_del_request(img_req);
3644 mutex_unlock(&img_req->state_mutex);
3647 mutex_lock(&img_req->state_mutex);
3648 done = rbd_img_advance(img_req, result);
3649 mutex_unlock(&img_req->state_mutex);
3655 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3656 obj_op_name(img_req->op_type), *result);
3661 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3664 if (!__rbd_img_handle_request(img_req, &result))
3667 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3668 struct rbd_obj_request *obj_req = img_req->obj_request;
3670 rbd_img_request_destroy(img_req);
3672 img_req = obj_req->img_request;
3676 struct request *rq = blk_mq_rq_from_pdu(img_req);
3678 rbd_img_request_destroy(img_req);
3897 struct rbd_img_request *img_req;
3912 img_req = list_first_entry(&rbd_dev->acquiring_list,
3914 mutex_lock(&img_req->state_mutex);
3915 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3917 list_move_tail(&img_req->lock_item,
3920 list_del_init(&img_req->lock_item);
3921 rbd_img_schedule(img_req, result);
3922 mutex_unlock(&img_req->state_mutex);
4808 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4834 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
4855 rbd_img_request_init(img_req, rbd_dev, op_type);
4857 if (rbd_img_is_write(img_req)) {
4860 obj_op_name(img_req->op_type));
4866 INIT_WORK(&img_req->work, rbd_queue_workfn);
4867 queue_work(rbd_wq, &img_req->work);