Lines Matching defs:obj_req

343 	struct list_head	object_extents;	/* obj_req.ex structs */
643 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
1245 * Zero a range in @obj_req data buffer defined by a bio (list) or
1250 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1253 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1255 switch (obj_req->img_request->data_type) {
1257 zero_bios(&obj_req->bio_pos, off, bytes);
1261 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1298 struct rbd_obj_request *obj_req = osd_req->r_priv;
1300 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1301 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1302 obj_req->ex.oe_off, obj_req->ex.oe_len);
1321 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1323 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1325 return !obj_req->ex.oe_off &&
1326 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1329 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1331 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1333 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1340 static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
1342 rbd_assert(obj_req->img_request->snapc);
1344 if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
1345 dout("%s %p objno %llu discard\n", __func__, obj_req,
1346 obj_req->ex.oe_objno);
1350 if (!obj_req->num_img_extents) {
1351 dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
1352 obj_req->ex.oe_objno);
1356 if (rbd_obj_is_entire(obj_req) &&
1357 !obj_req->img_request->snapc->num_snaps) {
1358 dout("%s %p objno %llu entire\n", __func__, obj_req,
1359 obj_req->ex.oe_objno);
1363 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
1366 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1368 return ceph_file_extents_bytes(obj_req->img_extents,
1369 obj_req->num_img_extents);
1388 struct rbd_obj_request *obj_req = osd_req->r_priv;
1391 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1392 osd_req->r_result, obj_req);
1399 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1404 rbd_obj_handle_request(obj_req, result);
1427 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1430 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1441 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1443 req->r_priv = obj_req;
1454 obj_req->ex.oe_objno);
1462 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1464 rbd_assert(obj_req->img_request->snapc);
1465 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1950 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
1953 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1978 rbd_assert(objno == obj_req->ex.oe_objno);
1997 struct rbd_obj_request *obj_req = osd_req->r_priv;
2000 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
2001 osd_req->r_result, obj_req);
2003 result = rbd_object_map_update_finish(obj_req, osd_req);
2004 rbd_obj_handle_request(obj_req, result);
2057 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2060 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2068 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2078 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2080 req->r_priv = obj_req;
2098 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2135 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2138 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2144 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2145 entire ? 0 : obj_req->ex.oe_off,
2147 obj_req->ex.oe_len,
2148 &obj_req->img_extents,
2149 &obj_req->num_img_extents);
2153 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2160 struct rbd_obj_request *obj_req = osd_req->r_priv;
2162 switch (obj_req->img_request->data_type) {
2165 &obj_req->bio_pos,
2166 obj_req->ex.oe_len);
2170 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2171 obj_req->ex.oe_len);
2172 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2174 &obj_req->bvec_pos);
2207 struct rbd_obj_request *obj_req = osd_req->r_priv;
2214 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2215 obj_req->copyup_bvec_count, bytes);
2219 static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2221 obj_req->read_state = RBD_OBJ_READ_START;
2228 struct rbd_obj_request *obj_req = osd_req->r_priv;
2229 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2233 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2240 if (rbd_obj_is_entire(obj_req))
2246 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2250 static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2255 ret = rbd_obj_calc_img_extents(obj_req, true);
2259 obj_req->write_state = RBD_OBJ_WRITE_START;
2263 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2265 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2272 struct rbd_obj_request *obj_req = osd_req->r_priv;
2274 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2275 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2279 truncate_or_zero_opcode(obj_req),
2280 obj_req->ex.oe_off, obj_req->ex.oe_len,
2285 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2287 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2300 !rbd_obj_is_tail(obj_req)) {
2301 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2302 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2308 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2310 obj_req->ex.oe_off = off;
2311 obj_req->ex.oe_len = next_off - off;
2315 ret = rbd_obj_calc_img_extents(obj_req, true);
2319 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2320 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2321 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2323 obj_req->write_state = RBD_OBJ_WRITE_START;
2330 struct rbd_obj_request *obj_req = osd_req->r_priv;
2333 if (rbd_obj_is_entire(obj_req)) {
2334 if (obj_req->num_img_extents) {
2335 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2340 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2346 opcode = truncate_or_zero_opcode(obj_req);
2351 obj_req->ex.oe_off, obj_req->ex.oe_len,
2355 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2360 ret = rbd_obj_calc_img_extents(obj_req, true);
2364 if (!obj_req->num_img_extents) {
2365 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2366 if (rbd_obj_is_entire(obj_req))
2367 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2370 obj_req->write_state = RBD_OBJ_WRITE_START;
2374 static int count_write_ops(struct rbd_obj_request *obj_req)
2376 struct rbd_img_request *img_req = obj_req->img_request;
2381 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2388 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2389 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2401 struct rbd_obj_request *obj_req = osd_req->r_priv;
2403 switch (obj_req->img_request->op_type) {
2425 struct rbd_obj_request *obj_req, *next_obj_req;
2428 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2431 ret = rbd_obj_init_read(obj_req);
2434 ret = rbd_obj_init_write(obj_req);
2437 ret = rbd_obj_init_discard(obj_req);
2440 ret = rbd_obj_init_zeroout(obj_req);
2448 rbd_img_obj_request_del(img_req, obj_req);
2474 struct rbd_obj_request *obj_req;
2476 obj_req = rbd_obj_request_create();
2477 if (!obj_req)
2480 rbd_img_obj_request_add(img_req, obj_req);
2481 return &obj_req->ex;
2544 struct rbd_obj_request *obj_req;
2574 for_each_obj_request(img_req, obj_req) {
2575 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2576 sizeof(*obj_req->bvec_pos.bvecs),
2578 if (!obj_req->bvec_pos.bvecs)
2615 struct rbd_obj_request *obj_req =
2620 obj_req->bio_pos = *it;
2626 struct rbd_obj_request *obj_req =
2632 obj_req->bvec_count++;
2639 struct rbd_obj_request *obj_req =
2645 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2646 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2678 struct rbd_obj_request *obj_req =
2682 obj_req->bvec_pos = *it;
2683 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2689 struct rbd_obj_request *obj_req =
2694 obj_req->bvec_count++;
2700 struct rbd_obj_request *obj_req =
2705 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2706 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2757 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2759 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2761 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2762 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2766 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2767 obj_req->ex.oe_objno);
2771 static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2776 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2781 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2793 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2795 struct rbd_img_request *img_req = obj_req->img_request;
2806 child_img_req->obj_request = obj_req;
2812 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2813 obj_req);
2819 obj_req->img_extents,
2820 obj_req->num_img_extents,
2821 &obj_req->bio_pos);
2826 obj_req->img_extents,
2827 obj_req->num_img_extents,
2828 &obj_req->bvec_pos);
2835 obj_req->img_extents,
2836 obj_req->num_img_extents,
2837 obj_req->copyup_bvecs);
2849 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2851 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2855 switch (obj_req->read_state) {
2859 if (!rbd_obj_may_exist(obj_req)) {
2861 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2865 ret = rbd_obj_read_object(obj_req);
2870 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2875 ret = rbd_obj_calc_img_extents(obj_req, false);
2880 if (obj_req->num_img_extents) {
2881 ret = rbd_obj_read_from_parent(obj_req);
2886 obj_req->read_state = RBD_OBJ_READ_PARENT;
2897 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2900 if (*result < obj_req->ex.oe_len)
2901 rbd_obj_zero_range(obj_req, *result,
2902 obj_req->ex.oe_len - *result);
2904 rbd_assert(*result == obj_req->ex.oe_len);
2914 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2916 if (obj_overlap < obj_req->ex.oe_len)
2917 rbd_obj_zero_range(obj_req, obj_overlap,
2918 obj_req->ex.oe_len - obj_overlap);
2926 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2928 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2930 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2931 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2933 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2934 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2935 dout("%s %p noop for nonexistent\n", __func__, obj_req);
2948 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
2950 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2956 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
2961 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
2964 static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
2967 int num_ops = count_write_ops(obj_req);
2971 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
2974 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
2978 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3014 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3020 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3023 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3041 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3045 int num_ops = count_write_ops(obj_req);
3049 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3054 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3075 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3079 rbd_assert(!obj_req->copyup_bvecs);
3080 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3081 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3082 sizeof(*obj_req->copyup_bvecs),
3084 if (!obj_req->copyup_bvecs)
3087 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3094 bvec_set_page(&obj_req->copyup_bvecs[i], page, len, 0);
3107 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3109 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3112 rbd_assert(obj_req->num_img_extents);
3113 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3115 if (!obj_req->num_img_extents) {
3122 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3125 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3129 return rbd_obj_read_from_parent(obj_req);
3132 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3134 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3135 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3140 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3145 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3155 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3158 obj_req->pending.result = ret;
3163 obj_req->pending.num_pending++;
3167 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3169 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3172 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3179 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3182 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3189 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3191 obj_req->pending.result = ret;
3195 obj_req->pending.num_pending++;
3199 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3201 obj_req->pending.result = ret;
3205 obj_req->pending.num_pending++;
3208 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3210 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3214 switch (obj_req->copyup_state) {
3218 ret = rbd_obj_copyup_read_parent(obj_req);
3223 if (obj_req->num_img_extents)
3224 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3226 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3232 if (is_zero_bvecs(obj_req->copyup_bvecs,
3233 rbd_obj_img_extents_bytes(obj_req))) {
3234 dout("%s %p detected zeros\n", __func__, obj_req);
3235 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3238 rbd_obj_copyup_object_maps(obj_req);
3239 if (!obj_req->pending.num_pending) {
3240 *result = obj_req->pending.result;
3241 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3244 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3247 if (!pending_result_dec(&obj_req->pending, result))
3257 rbd_obj_copyup_write_object(obj_req);
3258 if (!obj_req->pending.num_pending) {
3259 *result = obj_req->pending.result;
3260 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3263 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3266 if (!pending_result_dec(&obj_req->pending, result))
3282 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3284 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3290 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3293 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3297 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3299 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3303 switch (obj_req->write_state) {
3307 rbd_obj_set_copyup_enabled(obj_req);
3308 if (rbd_obj_write_is_noop(obj_req))
3311 ret = rbd_obj_write_pre_object_map(obj_req);
3316 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3326 ret = rbd_obj_write_object(obj_req);
3331 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3335 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3337 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3338 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3345 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3351 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3354 if (!rbd_obj_advance_copyup(obj_req, result))
3362 ret = rbd_obj_write_post_object_map(obj_req);
3367 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3382 * Return true if @obj_req is completed.
3384 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3387 struct rbd_img_request *img_req = obj_req->img_request;
3391 mutex_lock(&obj_req->state_mutex);
3393 done = rbd_obj_advance_read(obj_req, result);
3395 done = rbd_obj_advance_write(obj_req, result);
3396 mutex_unlock(&obj_req->state_mutex);
3401 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3402 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3411 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3413 if (__rbd_obj_handle_request(obj_req, &result))
3414 rbd_img_handle_request(obj_req->img_request, result);
3496 struct rbd_obj_request *obj_req;
3509 for_each_obj_request(img_req, obj_req) {
3512 if (__rbd_obj_handle_request(obj_req, &result)) {
3603 struct rbd_obj_request *obj_req = img_req->obj_request;
3606 if (__rbd_obj_handle_request(obj_req, &result)) {
3607 img_req = obj_req->img_request;