Lines Matching refs:rbd_dev

166  * identify an image.  Each rbd_dev structure includes a pointer to
174 * An rbd_dev structure contains a parent_spec pointer which is
177 * by the parent rbd_dev for its own identity (i.e., the structure
331 struct rbd_device *rbd_dev;
455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
457 * by rbd_dev->lock
501 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
513 static bool rbd_is_ro(struct rbd_device *rbd_dev)
515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
518 static bool rbd_is_snap(struct rbd_device *rbd_dev)
520 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
523 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
525 lockdep_assert_held(&rbd_dev->lock_rwsem);
527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
531 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
535 down_read(&rbd_dev->lock_rwsem);
536 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
537 up_read(&rbd_dev->lock_rwsem);
593 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
602 if (!rbd_dev)
604 else if (rbd_dev->disk)
606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
607 else if (rbd_dev->spec && rbd_dev->spec->image_name)
609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
610 else if (rbd_dev->spec && rbd_dev->spec->image_id)
612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME, rbd_dev, &vaf);
632 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
634 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
635 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
637 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
639 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
641 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
664 struct rbd_device *rbd_dev = disk->private_data;
667 spin_lock_irq(&rbd_dev->lock);
668 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
671 rbd_dev->open_count++;
672 spin_unlock_irq(&rbd_dev->lock);
676 (void) get_device(&rbd_dev->dev);
683 struct rbd_device *rbd_dev = disk->private_data;
686 spin_lock_irq(&rbd_dev->lock);
687 open_count_before = rbd_dev->open_count--;
688 spin_unlock_irq(&rbd_dev->lock);
691 put_device(&rbd_dev->dev);
981 static void rbd_init_layout(struct rbd_device *rbd_dev)
983 if (rbd_dev->header.stripe_unit == 0 ||
984 rbd_dev->header.stripe_count == 0) {
985 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
986 rbd_dev->header.stripe_count = 1;
989 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
990 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
991 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
992 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
993 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
994 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1102 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1106 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1110 snap_name = rbd_dev->header.snap_names;
1141 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1143 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1152 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1158 which = rbd_dev_snap_index(rbd_dev, snap_id);
1162 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1166 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1171 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1172 if (rbd_dev->image_format == 1)
1173 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1175 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1178 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1181 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1183 *snap_size = rbd_dev->header.image_size;
1184 } else if (rbd_dev->image_format == 1) {
1187 which = rbd_dev_snap_index(rbd_dev, snap_id);
1191 *snap_size = rbd_dev->header.snap_sizes[which];
1196 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1205 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1207 u64 snap_id = rbd_dev->spec->snap_id;
1211 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1215 rbd_dev->mapping.size = size;
1219 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1221 rbd_dev->mapping.size = 0;
1323 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1326 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1331 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1334 rbd_dev->layout.object_size;
1410 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1411 struct ceph_options *opt = rbd_dev->rbd_client->client->options;
1430 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1431 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1433 const char *name_format = rbd_dev->image_format == 1 ?
1449 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1450 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1453 rbd_dev->header.object_prefix,
1530 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1532 rbd_dev_remove_parent(rbd_dev);
1533 rbd_spec_put(rbd_dev->parent_spec);
1534 rbd_dev->parent_spec = NULL;
1535 rbd_dev->parent_overlap = 0;
1544 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1548 if (!rbd_dev->parent_spec)
1551 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1558 rbd_dev_unparent(rbd_dev);
1560 rbd_warn(rbd_dev, "parent reference underflow");
1571 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1575 if (!rbd_dev->parent_spec)
1578 if (rbd_dev->parent_overlap)
1579 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1582 rbd_warn(rbd_dev, "parent reference overflow");
1588 struct rbd_device *rbd_dev,
1593 img_request->rbd_dev = rbd_dev;
1608 struct rbd_device *rbd_dev = img_req->rbd_dev;
1610 lockdep_assert_held(&rbd_dev->header_rwsem);
1613 img_req->snap_id = rbd_dev->spec->snap_id;
1615 if (rbd_dev_parent_get(rbd_dev))
1631 rbd_dev_parent_put(img_request->rbd_dev);
1644 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1649 rbd_assert(objno < rbd_dev->object_map_size);
1654 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1659 lockdep_assert_held(&rbd_dev->object_map_lock);
1660 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1661 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1664 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1670 lockdep_assert_held(&rbd_dev->object_map_lock);
1673 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1674 p = &rbd_dev->object_map[index];
1678 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1682 spin_lock(&rbd_dev->object_map_lock);
1683 state = __rbd_object_map_get(rbd_dev, objno);
1684 spin_unlock(&rbd_dev->object_map_lock);
1688 static bool use_object_map(struct rbd_device *rbd_dev)
1698 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1701 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1702 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1705 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1710 if (!use_object_map(rbd_dev))
1713 state = rbd_object_map_get(rbd_dev, objno);
1717 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1722 rbd_dev->spec->image_id);
1725 rbd_dev->spec->image_id, snap_id);
1728 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1730 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1739 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1742 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1748 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1752 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1759 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1767 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1770 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1778 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1786 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1788 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1792 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1794 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1797 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1825 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1827 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1838 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1840 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1841 rbd_dev->mapping.size);
1850 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1851 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1864 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1875 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1876 if (!rbd_dev->object_map) {
1881 rbd_dev->object_map_size = object_map_size;
1882 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1890 static void rbd_object_map_free(struct rbd_device *rbd_dev)
1892 kvfree(rbd_dev->object_map);
1893 rbd_dev->object_map = NULL;
1894 rbd_dev->object_map_size = 0;
1897 static int rbd_object_map_load(struct rbd_device *rbd_dev)
1901 ret = __rbd_object_map_load(rbd_dev);
1905 ret = rbd_dev_v2_get_flags(rbd_dev);
1907 rbd_object_map_free(rbd_dev);
1911 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1912 rbd_warn(rbd_dev, "object map is invalid");
1917 static int rbd_object_map_open(struct rbd_device *rbd_dev)
1921 ret = rbd_object_map_lock(rbd_dev);
1925 ret = rbd_object_map_load(rbd_dev);
1927 rbd_object_map_unlock(rbd_dev);
1934 static void rbd_object_map_close(struct rbd_device *rbd_dev)
1936 rbd_object_map_free(rbd_dev);
1937 rbd_object_map_unlock(rbd_dev);
1953 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1985 spin_lock(&rbd_dev->object_map_lock);
1986 state = __rbd_object_map_get(rbd_dev, objno);
1989 __rbd_object_map_set(rbd_dev, objno, new_state);
1990 spin_unlock(&rbd_dev->object_map_lock);
2007 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2009 u8 state = rbd_object_map_get(rbd_dev, objno);
2060 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2061 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2068 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2082 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2083 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2138 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2141 if (!rbd_dev->parent_overlap)
2144 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2146 entire ? rbd_dev->layout.object_size :
2154 rbd_dev->parent_overlap);
2229 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2232 if (!use_object_map(rbd_dev) ||
2235 rbd_dev->layout.object_size,
2236 rbd_dev->layout.object_size,
2237 rbd_dev->opts->alloc_hint_flags);
2287 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2299 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2301 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2303 rbd_dev->opts->alloc_size);
2380 if (!use_object_map(img_req->rbd_dev) ||
2512 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2543 struct rbd_device *rbd_dev = img_req->rbd_dev;
2549 !rbd_layout_is_fancy(&rbd_dev->layout))
2564 ret = ceph_file_to_extents(&rbd_dev->layout,
2588 ret = ceph_iterate_extents(&rbd_dev->layout,
2759 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2761 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2796 struct rbd_device *parent = img_req->rbd_dev->parent;
2851 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2873 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2928 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2930 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2950 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2953 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3109 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3114 rbd_dev->parent_overlap);
3134 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3142 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3149 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3210 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3252 rbd_warn(rbd_dev, "snap object map update failed: %d",
3284 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3287 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3299 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3322 rbd_warn(rbd_dev, "pre object map update failed: %d",
3359 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3373 rbd_warn(rbd_dev, "post object map update failed: %d",
3388 struct rbd_device *rbd_dev = img_req->rbd_dev;
3400 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3419 struct rbd_device *rbd_dev = img_req->rbd_dev;
3421 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3424 if (rbd_is_ro(rbd_dev))
3428 if (rbd_dev->opts->lock_on_read ||
3429 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3437 struct rbd_device *rbd_dev = img_req->rbd_dev;
3440 lockdep_assert_held(&rbd_dev->lock_rwsem);
3441 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3442 spin_lock(&rbd_dev->lock_lists_lock);
3445 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3447 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3448 spin_unlock(&rbd_dev->lock_lists_lock);
3454 struct rbd_device *rbd_dev = img_req->rbd_dev;
3457 lockdep_assert_held(&rbd_dev->lock_rwsem);
3458 spin_lock(&rbd_dev->lock_lists_lock);
3461 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3462 list_empty(&rbd_dev->running_list));
3464 spin_unlock(&rbd_dev->lock_lists_lock);
3466 complete(&rbd_dev->releasing_wait);
3471 struct rbd_device *rbd_dev = img_req->rbd_dev;
3479 if (rbd_dev->opts->exclusive) {
3488 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3489 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3495 struct rbd_device *rbd_dev = img_req->rbd_dev;
3500 __rbd_is_lock_owner(rbd_dev));
3504 down_read(&rbd_dev->header_rwsem);
3505 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
3506 up_read(&rbd_dev->header_rwsem);
3570 struct rbd_device *rbd_dev = img_req->rbd_dev;
3574 down_read(&rbd_dev->lock_rwsem);
3580 up_read(&rbd_dev->lock_rwsem);
3589 rbd_warn(rbd_dev, "%s%s result %d",
3626 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3630 mutex_lock(&rbd_dev->watch_mutex);
3631 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3632 cid.handle = rbd_dev->watch_cookie;
3633 mutex_unlock(&rbd_dev->watch_mutex);
3640 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3643 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3644 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3646 rbd_dev->owner_cid = *cid; /* struct */
3649 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3651 mutex_lock(&rbd_dev->watch_mutex);
3652 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3653 mutex_unlock(&rbd_dev->watch_mutex);
3656 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3658 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3660 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3661 strcpy(rbd_dev->lock_cookie, cookie);
3662 rbd_set_owner_cid(rbd_dev, &cid);
3663 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3669 static int rbd_lock(struct rbd_device *rbd_dev)
3671 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3675 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3676 rbd_dev->lock_cookie[0] != '\0');
3678 format_lock_cookie(rbd_dev, cookie);
3679 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3685 __rbd_lock(rbd_dev, cookie);
3692 static void rbd_unlock(struct rbd_device *rbd_dev)
3694 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3697 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3698 rbd_dev->lock_cookie[0] == '\0');
3700 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3701 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3703 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3706 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3707 rbd_dev->lock_cookie[0] = '\0';
3708 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3709 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3712 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3717 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3718 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3723 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3731 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3732 &rbd_dev->header_oloc, buf, buf_size,
3736 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3739 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
3744 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3747 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3752 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3755 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3758 static int rbd_request_lock(struct rbd_device *rbd_dev)
3765 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3767 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3770 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3792 rbd_warn(rbd_dev,
3802 rbd_warn(rbd_dev,
3813 rbd_warn(rbd_dev, "no lock owners detected");
3830 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3834 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3835 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3837 cancel_delayed_work(&rbd_dev->lock_dwork);
3838 if (!completion_done(&rbd_dev->acquire_wait)) {
3839 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3840 list_empty(&rbd_dev->running_list));
3841 rbd_dev->acquire_err = result;
3842 complete_all(&rbd_dev->acquire_wait);
3846 while (!list_empty(&rbd_dev->acquiring_list)) {
3847 img_req = list_first_entry(&rbd_dev->acquiring_list,
3853 &rbd_dev->running_list);
3876 static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
3878 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3886 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3887 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3890 rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
3895 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3901 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3907 rbd_warn(rbd_dev, "incompatible lock type detected");
3915 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3920 rbd_warn(rbd_dev, "locker has a blank address");
3924 dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n",
3925 __func__, rbd_dev, ENTITY_NAME(lockers[0].id.name),
3939 static int find_watcher(struct rbd_device *rbd_dev,
3942 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3949 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3950 &rbd_dev->header_oloc, &watchers,
3953 rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
3971 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3972 rbd_dev, cid.gid, cid.handle);
3973 rbd_set_owner_cid(rbd_dev, &cid);
3979 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3989 static int rbd_try_lock(struct rbd_device *rbd_dev)
3991 struct ceph_client *client = rbd_dev->rbd_client->client;
3998 ret = rbd_lock(rbd_dev);
4002 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4007 locker = get_lock_owner_info(rbd_dev);
4016 ret = find_watcher(rbd_dev, locker);
4020 refreshed_locker = get_lock_owner_info(rbd_dev);
4030 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
4036 rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
4041 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4042 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4045 rbd_warn(rbd_dev, "failed to break header lock: %d",
4061 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4065 ret = rbd_dev_refresh(rbd_dev);
4069 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4070 ret = rbd_object_map_open(rbd_dev);
4084 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4088 down_read(&rbd_dev->lock_rwsem);
4089 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4090 rbd_dev->lock_state);
4091 if (__rbd_is_lock_owner(rbd_dev)) {
4092 up_read(&rbd_dev->lock_rwsem);
4096 up_read(&rbd_dev->lock_rwsem);
4097 down_write(&rbd_dev->lock_rwsem);
4098 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4099 rbd_dev->lock_state);
4100 if (__rbd_is_lock_owner(rbd_dev)) {
4101 up_write(&rbd_dev->lock_rwsem);
4105 ret = rbd_try_lock(rbd_dev);
4107 rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
4111 up_write(&rbd_dev->lock_rwsem);
4115 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4116 rbd_assert(list_empty(&rbd_dev->running_list));
4118 ret = rbd_post_acquire_action(rbd_dev);
4120 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4126 rbd_unlock(rbd_dev);
4130 wake_lock_waiters(rbd_dev, ret);
4131 up_write(&rbd_dev->lock_rwsem);
4137 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4141 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4143 ret = rbd_try_acquire_lock(rbd_dev);
4145 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4149 ret = rbd_request_lock(rbd_dev);
4153 rbd_warn(rbd_dev, "peer will not release lock");
4154 down_write(&rbd_dev->lock_rwsem);
4155 wake_lock_waiters(rbd_dev, ret);
4156 up_write(&rbd_dev->lock_rwsem);
4158 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4159 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4166 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
4167 rbd_dev);
4168 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4173 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4175 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4176 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4178 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4184 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4185 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4186 if (list_empty(&rbd_dev->running_list))
4189 up_write(&rbd_dev->lock_rwsem);
4190 wait_for_completion(&rbd_dev->releasing_wait);
4192 down_write(&rbd_dev->lock_rwsem);
4193 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4196 rbd_assert(list_empty(&rbd_dev->running_list));
4200 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4202 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4203 rbd_object_map_close(rbd_dev);
4206 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4208 rbd_assert(list_empty(&rbd_dev->running_list));
4210 rbd_pre_release_action(rbd_dev);
4211 rbd_unlock(rbd_dev);
4217 static void rbd_release_lock(struct rbd_device *rbd_dev)
4219 if (!rbd_quiesce_lock(rbd_dev))
4222 __rbd_release_lock(rbd_dev);
4231 cancel_delayed_work(&rbd_dev->lock_dwork);
4236 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4239 down_write(&rbd_dev->lock_rwsem);
4240 rbd_release_lock(rbd_dev);
4241 up_write(&rbd_dev->lock_rwsem);
4244 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4248 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4249 if (__rbd_is_lock_owner(rbd_dev))
4252 spin_lock(&rbd_dev->lock_lists_lock);
4253 have_requests = !list_empty(&rbd_dev->acquiring_list);
4254 spin_unlock(&rbd_dev->lock_lists_lock);
4255 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4256 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4257 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4261 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4271 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4274 down_write(&rbd_dev->lock_rwsem);
4275 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4276 dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
4277 __func__, rbd_dev, cid.gid, cid.handle);
4279 rbd_set_owner_cid(rbd_dev, &cid);
4281 downgrade_write(&rbd_dev->lock_rwsem);
4283 down_read(&rbd_dev->lock_rwsem);
4286 maybe_kick_acquire(rbd_dev);
4287 up_read(&rbd_dev->lock_rwsem);
4290 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4300 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4303 down_write(&rbd_dev->lock_rwsem);
4304 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4305 dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
4306 __func__, rbd_dev, cid.gid, cid.handle,
4307 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4309 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4311 downgrade_write(&rbd_dev->lock_rwsem);
4313 down_read(&rbd_dev->lock_rwsem);
4316 maybe_kick_acquire(rbd_dev);
4317 up_read(&rbd_dev->lock_rwsem);
4324 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4327 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4336 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4341 down_read(&rbd_dev->lock_rwsem);
4342 if (__rbd_is_lock_owner(rbd_dev)) {
4343 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4344 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4353 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4354 if (!rbd_dev->opts->exclusive) {
4355 dout("%s rbd_dev %p queueing unlock_work\n",
4356 __func__, rbd_dev);
4357 queue_work(rbd_dev->task_wq,
4358 &rbd_dev->unlock_work);
4367 up_read(&rbd_dev->lock_rwsem);
4371 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4374 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4390 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4391 &rbd_dev->header_oloc, notify_id, cookie,
4394 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4397 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4400 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4401 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4404 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4407 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4408 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4414 struct rbd_device *rbd_dev = arg;
4422 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4423 __func__, rbd_dev, cookie, notify_id, data_len);
4428 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4440 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4443 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4444 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4447 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4448 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4451 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4453 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4456 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4459 ret = rbd_dev_refresh(rbd_dev);
4461 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4463 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4466 if (rbd_is_lock_owner(rbd_dev))
4467 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4470 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4475 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4479 struct rbd_device *rbd_dev = arg;
4481 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4483 down_write(&rbd_dev->lock_rwsem);
4484 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4485 up_write(&rbd_dev->lock_rwsem);
4487 mutex_lock(&rbd_dev->watch_mutex);
4488 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4489 __rbd_unregister_watch(rbd_dev);
4490 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4492 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4494 mutex_unlock(&rbd_dev->watch_mutex);
4500 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4502 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4505 rbd_assert(!rbd_dev->watch_handle);
4506 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4508 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4509 &rbd_dev->header_oloc, rbd_watch_cb,
4510 rbd_watch_errcb, rbd_dev);
4514 rbd_dev->watch_handle = handle;
4521 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4523 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4526 rbd_assert(rbd_dev->watch_handle);
4527 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4529 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4531 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4533 rbd_dev->watch_handle = NULL;
4536 static int rbd_register_watch(struct rbd_device *rbd_dev)
4540 mutex_lock(&rbd_dev->watch_mutex);
4541 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4542 ret = __rbd_register_watch(rbd_dev);
4546 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4547 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4550 mutex_unlock(&rbd_dev->watch_mutex);
4554 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4556 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4558 cancel_work_sync(&rbd_dev->acquired_lock_work);
4559 cancel_work_sync(&rbd_dev->released_lock_work);
4560 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4561 cancel_work_sync(&rbd_dev->unlock_work);
4568 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4570 cancel_tasks_sync(rbd_dev);
4572 mutex_lock(&rbd_dev->watch_mutex);
4573 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4574 __rbd_unregister_watch(rbd_dev);
4575 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4576 mutex_unlock(&rbd_dev->watch_mutex);
4578 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4579 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4585 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4587 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4591 if (!rbd_quiesce_lock(rbd_dev))
4594 format_lock_cookie(rbd_dev, cookie);
4595 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4596 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4597 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4601 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4608 __rbd_release_lock(rbd_dev);
4609 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4611 __rbd_lock(rbd_dev, cookie);
4612 wake_lock_waiters(rbd_dev, 0);
4618 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4622 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4624 mutex_lock(&rbd_dev->watch_mutex);
4625 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4626 mutex_unlock(&rbd_dev->watch_mutex);
4630 ret = __rbd_register_watch(rbd_dev);
4632 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4634 queue_delayed_work(rbd_dev->task_wq,
4635 &rbd_dev->watch_dwork,
4637 mutex_unlock(&rbd_dev->watch_mutex);
4641 mutex_unlock(&rbd_dev->watch_mutex);
4642 down_write(&rbd_dev->lock_rwsem);
4643 wake_lock_waiters(rbd_dev, ret);
4644 up_write(&rbd_dev->lock_rwsem);
4648 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4649 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4650 mutex_unlock(&rbd_dev->watch_mutex);
4652 down_write(&rbd_dev->lock_rwsem);
4653 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4654 rbd_reacquire_lock(rbd_dev);
4655 up_write(&rbd_dev->lock_rwsem);
4657 ret = rbd_dev_refresh(rbd_dev);
4659 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4666 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4675 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4723 struct rbd_device *rbd_dev = img_request->rbd_dev;
4740 down_read(&rbd_dev->header_rwsem);
4741 mapping_size = rbd_dev->mapping.size;
4743 up_read(&rbd_dev->header_rwsem);
4746 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4752 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4769 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4777 struct rbd_device *rbd_dev = hctx->queue->queuedata;
4795 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4799 rbd_img_request_init(img_req, rbd_dev, op_type);
4802 if (rbd_is_ro(rbd_dev)) {
4803 rbd_warn(rbd_dev, "%s on read-only mapping",
4807 rbd_assert(!rbd_is_snap(rbd_dev));
4815 static void rbd_free_disk(struct rbd_device *rbd_dev)
4817 put_disk(rbd_dev->disk);
4818 blk_mq_free_tag_set(&rbd_dev->tag_set);
4819 rbd_dev->disk = NULL;
4822 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4828 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4868 * return, the rbd_dev->header field will contain up-to-date
4871 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
4900 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4901 &rbd_dev->header_oloc, ondisk, size);
4906 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4912 rbd_warn(rbd_dev, "invalid header");
4928 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4933 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4937 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4938 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4939 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4941 set_capacity_and_notify(rbd_dev->disk, size);
4949 static int rbd_init_disk(struct rbd_device *rbd_dev)
4954 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
4957 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4958 rbd_dev->tag_set.ops = &rbd_mq_ops;
4959 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4960 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4961 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
4962 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
4963 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
4965 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4969 disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev);
4977 rbd_dev->dev_id);
4978 disk->major = rbd_dev->major;
4979 disk->first_minor = rbd_dev->minor;
4985 disk->private_data = rbd_dev;
4994 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
4995 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
4997 if (rbd_dev->opts->trim) {
4998 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
5003 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
5006 rbd_dev->disk = disk;
5010 blk_mq_free_tag_set(&rbd_dev->tag_set);
5026 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5029 (unsigned long long)rbd_dev->mapping.size);
5035 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5037 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
5043 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5045 if (rbd_dev->major)
5046 return sprintf(buf, "%d\n", rbd_dev->major);
5054 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5056 return sprintf(buf, "%d\n", rbd_dev->minor);
5062 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5064 ceph_client_addr(rbd_dev->rbd_client->client);
5073 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5076 ceph_client_gid(rbd_dev->rbd_client->client));
5082 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5084 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5090 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5095 return sprintf(buf, "%s\n", rbd_dev->config_info);
5101 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5103 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5109 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5112 (unsigned long long) rbd_dev->spec->pool_id);
5118 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5120 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5126 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5128 if (rbd_dev->spec->image_name)
5129 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5137 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5139 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5150 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5152 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5158 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5160 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5172 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5175 if (!rbd_dev->parent)
5178 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5179 struct rbd_spec *spec = rbd_dev->parent_spec;
5192 rbd_dev->parent_overlap);
5203 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5209 ret = rbd_dev_refresh(rbd_dev);
5313 static void rbd_dev_free(struct rbd_device *rbd_dev)
5315 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5316 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5318 ceph_oid_destroy(&rbd_dev->header_oid);
5319 ceph_oloc_destroy(&rbd_dev->header_oloc);
5320 kfree(rbd_dev->config_info);
5322 rbd_put_client(rbd_dev->rbd_client);
5323 rbd_spec_put(rbd_dev->spec);
5324 kfree(rbd_dev->opts);
5325 kfree(rbd_dev);
5330 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5331 bool need_put = !!rbd_dev->opts;
5334 destroy_workqueue(rbd_dev->task_wq);
5335 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5338 rbd_dev_free(rbd_dev);
5351 struct rbd_device *rbd_dev;
5353 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5354 if (!rbd_dev)
5357 spin_lock_init(&rbd_dev->lock);
5358 INIT_LIST_HEAD(&rbd_dev->node);
5359 init_rwsem(&rbd_dev->header_rwsem);
5361 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5362 ceph_oid_init(&rbd_dev->header_oid);
5363 rbd_dev->header_oloc.pool = spec->pool_id;
5366 rbd_dev->header_oloc.pool_ns =
5371 mutex_init(&rbd_dev->watch_mutex);
5372 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5373 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5375 init_rwsem(&rbd_dev->lock_rwsem);
5376 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5377 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5378 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5379 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5380 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5381 spin_lock_init(&rbd_dev->lock_lists_lock);
5382 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5383 INIT_LIST_HEAD(&rbd_dev->running_list);
5384 init_completion(&rbd_dev->acquire_wait);
5385 init_completion(&rbd_dev->releasing_wait);
5387 spin_lock_init(&rbd_dev->object_map_lock);
5389 rbd_dev->dev.bus = &rbd_bus_type;
5390 rbd_dev->dev.type = &rbd_device_type;
5391 rbd_dev->dev.parent = &rbd_root_dev;
5392 device_initialize(&rbd_dev->dev);
5394 return rbd_dev;
5398 * Create a mapping rbd_dev.
5404 struct rbd_device *rbd_dev;
5406 rbd_dev = __rbd_dev_create(spec);
5407 if (!rbd_dev)
5411 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5414 if (rbd_dev->dev_id < 0)
5417 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5418 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5419 rbd_dev->name);
5420 if (!rbd_dev->task_wq)
5426 rbd_dev->rbd_client = rbdc;
5427 rbd_dev->spec = spec;
5428 rbd_dev->opts = opts;
5430 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5431 return rbd_dev;
5434 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5436 rbd_dev_free(rbd_dev);
5440 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5442 if (rbd_dev)
5443 put_device(&rbd_dev->dev);
5451 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5461 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5462 &rbd_dev->header_oloc, "get_size",
5484 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
5499 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5500 &rbd_dev->header_oloc, "get_object_prefix",
5523 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5540 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5541 &rbd_dev->header_oloc, "get_features",
5552 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5569 * object map, store them in rbd_dev->object_map_flags.
5574 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5576 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5580 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5581 &rbd_dev->header_oloc, "get_flags",
5589 rbd_dev->object_map_flags = le64_to_cpu(flags);
5646 static int __get_parent_info(struct rbd_device *rbd_dev,
5651 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5656 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5668 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5692 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5697 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5702 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5730 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
5748 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5749 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5751 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5759 static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
5769 ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
5798 rbd_assert(!rbd_dev->parent_spec);
5799 rbd_dev->parent_spec = parent_spec;
5800 parent_spec = NULL; /* rbd_dev now owns this */
5807 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5808 rbd_dev->parent_overlap = pii.overlap;
5818 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
5828 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5829 &rbd_dev->header_oloc, "get_stripe_unit_count",
5845 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
5850 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5851 &rbd_dev->header_oloc, "get_data_pool",
5867 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5880 rbd_assert(!rbd_dev->spec->image_name);
5882 len = strlen(rbd_dev->spec->image_id);
5890 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5898 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5918 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5920 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5926 snap_name = rbd_dev->header.snap_names;
5936 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5938 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5947 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
5965 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5967 if (rbd_dev->image_format == 1)
5968 return rbd_v1_snap_id_by_name(rbd_dev, name);
5970 return rbd_v2_snap_id_by_name(rbd_dev, name);
5976 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5978 struct rbd_spec *spec = rbd_dev->spec;
5987 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6005 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6007 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6008 struct rbd_spec *spec = rbd_dev->spec;
6022 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6031 image_name = rbd_dev_image_name(rbd_dev);
6033 rbd_warn(rbd_dev, "unable to get image name");
6037 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6055 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
6080 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6081 &rbd_dev->header_oloc, "get_snapcontext",
6126 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6143 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6144 &rbd_dev->header_oloc, "get_snapshot_name",
6166 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
6172 ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
6179 ret = rbd_dev_v2_header_onetime(rbd_dev, header);
6184 ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
6191 static int rbd_dev_header_info(struct rbd_device *rbd_dev,
6195 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6198 if (rbd_dev->image_format == 1)
6199 return rbd_dev_v1_header_info(rbd_dev, header, first_time);
6201 return rbd_dev_v2_header_info(rbd_dev, header, first_time);
6547 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6549 down_write(&rbd_dev->lock_rwsem);
6550 if (__rbd_is_lock_owner(rbd_dev))
6551 __rbd_release_lock(rbd_dev);
6552 up_write(&rbd_dev->lock_rwsem);
6560 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6564 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6565 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6568 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6572 if (rbd_is_ro(rbd_dev))
6575 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6576 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6577 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6578 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6580 ret = rbd_dev->acquire_err;
6582 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6586 rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
6595 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6608 * This function will record the given rbd_dev's image_id field if
6610 * errors occur a negative errno will be returned and the rbd_dev's
6613 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6627 if (rbd_dev->spec->image_id) {
6628 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6638 rbd_dev->spec->image_name);
6654 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6662 rbd_dev->image_format = 1;
6670 rbd_dev->image_format = 2;
6674 rbd_dev->spec->image_id = image_id;
6687 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6689 rbd_dev_parent_put(rbd_dev);
6690 rbd_object_map_free(rbd_dev);
6691 rbd_dev_mapping_clear(rbd_dev);
6695 rbd_image_header_cleanup(&rbd_dev->header);
6698 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
6703 ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
6711 ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
6712 rbd_is_ro(rbd_dev), &header->features);
6719 ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
6726 ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
6739 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6744 if (!rbd_dev->parent_spec)
6753 parent = __rbd_dev_create(rbd_dev->parent_spec);
6763 parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
6764 parent->spec = rbd_spec_get(rbd_dev->parent_spec);
6772 rbd_dev->parent = parent;
6773 atomic_set(&rbd_dev->parent_ref, 1);
6777 rbd_dev_unparent(rbd_dev);
6782 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6784 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6785 rbd_free_disk(rbd_dev);
6787 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6791 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6794 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6801 ret = register_blkdev(0, rbd_dev->name);
6805 rbd_dev->major = ret;
6806 rbd_dev->minor = 0;
6808 rbd_dev->major = rbd_major;
6809 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6814 ret = rbd_init_disk(rbd_dev);
6818 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6819 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
6821 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6825 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6826 up_write(&rbd_dev->header_rwsem);
6830 rbd_free_disk(rbd_dev);
6833 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6835 up_write(&rbd_dev->header_rwsem);
6839 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6841 struct rbd_spec *spec = rbd_dev->spec;
6846 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6847 if (rbd_dev->image_format == 1)
6848 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6851 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6857 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6861 rbd_dev->spec->pool_name,
6862 rbd_dev->spec->pool_ns ?: "",
6863 rbd_dev->spec->pool_ns ? "/" : "",
6864 rbd_dev->spec->image_name);
6867 rbd_dev->spec->pool_name,
6868 rbd_dev->spec->pool_ns ?: "",
6869 rbd_dev->spec->pool_ns ? "/" : "",
6870 rbd_dev->spec->image_name,
6871 rbd_dev->spec->snap_name);
6875 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6877 if (!rbd_is_ro(rbd_dev))
6878 rbd_unregister_watch(rbd_dev);
6880 rbd_dev_unprobe(rbd_dev);
6881 rbd_dev->image_format = 0;
6882 kfree(rbd_dev->spec->image_id);
6883 rbd_dev->spec->image_id = NULL;
6895 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6897 bool need_watch = !rbd_is_ro(rbd_dev);
6902 * error, rbd_dev->spec->image_id will be filled in with
6903 * a dynamically-allocated string, and rbd_dev->image_format
6906 ret = rbd_dev_image_id(rbd_dev);
6910 ret = rbd_dev_header_name(rbd_dev);
6915 ret = rbd_register_watch(rbd_dev);
6918 rbd_print_dne(rbd_dev, false);
6924 down_write(&rbd_dev->header_rwsem);
6926 ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
6929 rbd_print_dne(rbd_dev, false);
6933 rbd_init_layout(rbd_dev);
6942 ret = rbd_spec_fill_snap_id(rbd_dev);
6944 ret = rbd_spec_fill_names(rbd_dev);
6947 rbd_print_dne(rbd_dev, true);
6951 ret = rbd_dev_mapping_set(rbd_dev);
6955 if (rbd_is_snap(rbd_dev) &&
6956 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
6957 ret = rbd_object_map_load(rbd_dev);
6962 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
6963 ret = rbd_dev_setup_parent(rbd_dev);
6968 ret = rbd_dev_probe_parent(rbd_dev, depth);
6973 rbd_dev->image_format, rbd_dev->header_oid.name);
6978 up_write(&rbd_dev->header_rwsem);
6980 rbd_unregister_watch(rbd_dev);
6981 rbd_dev_unprobe(rbd_dev);
6983 rbd_dev->image_format = 0;
6984 kfree(rbd_dev->spec->image_id);
6985 rbd_dev->spec->image_id = NULL;
6989 static void rbd_dev_update_header(struct rbd_device *rbd_dev,
6992 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6993 rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
6995 if (rbd_dev->header.image_size != header->image_size) {
6996 rbd_dev->header.image_size = header->image_size;
6998 if (!rbd_is_snap(rbd_dev)) {
6999 rbd_dev->mapping.size = header->image_size;
7000 rbd_dev_update_size(rbd_dev);
7004 ceph_put_snap_context(rbd_dev->header.snapc);
7005 rbd_dev->header.snapc = header->snapc;
7008 if (rbd_dev->image_format == 1) {
7009 kfree(rbd_dev->header.snap_names);
7010 rbd_dev->header.snap_names = header->snap_names;
7013 kfree(rbd_dev->header.snap_sizes);
7014 rbd_dev->header.snap_sizes = header->snap_sizes;
7019 static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
7036 if (rbd_dev->parent_overlap) {
7037 rbd_dev->parent_overlap = 0;
7038 rbd_dev_parent_put(rbd_dev);
7040 rbd_dev->disk->disk_name);
7043 rbd_assert(rbd_dev->parent_spec);
7049 if (!pii->overlap && rbd_dev->parent_overlap)
7050 rbd_warn(rbd_dev,
7052 rbd_dev->parent_overlap = pii->overlap;
7056 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
7062 dout("%s rbd_dev %p\n", __func__, rbd_dev);
7064 ret = rbd_dev_header_info(rbd_dev, &header, false);
7072 if (rbd_dev->parent) {
7073 ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
7078 down_write(&rbd_dev->header_rwsem);
7079 rbd_dev_update_header(rbd_dev, &header);
7080 if (rbd_dev->parent)
7081 rbd_dev_update_parent(rbd_dev, &pii);
7082 up_write(&rbd_dev->header_rwsem);
7092 struct rbd_device *rbd_dev = NULL;
7125 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7126 if (!rbd_dev) {
7130 rbdc = NULL; /* rbd_dev now owns this */
7131 spec = NULL; /* rbd_dev now owns this */
7132 rbd_opts = NULL; /* rbd_dev now owns this */
7135 if (rbd_dev->opts->read_only ||
7136 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7137 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7139 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7140 if (!rbd_dev->config_info) {
7145 rc = rbd_dev_image_probe(rbd_dev, 0);
7149 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7150 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7151 rbd_dev->layout.object_size);
7152 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7155 rc = rbd_dev_device_setup(rbd_dev);
7159 rc = rbd_add_acquire_lock(rbd_dev);
7165 rc = device_add(&rbd_dev->dev);
7169 rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7174 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7177 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7178 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7179 rbd_dev->header.features);
7186 rbd_free_disk(rbd_dev);
7188 rbd_dev_image_unlock(rbd_dev);
7189 rbd_dev_device_release(rbd_dev);
7191 rbd_dev_image_release(rbd_dev);
7193 rbd_dev_destroy(rbd_dev);
7216 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7218 while (rbd_dev->parent) {
7219 struct rbd_device *first = rbd_dev;
7245 struct rbd_device *rbd_dev = NULL;
7272 list_for_each_entry(rbd_dev, &rbd_dev_list, node) {
7273 if (rbd_dev->dev_id == dev_id) {
7279 spin_lock_irq(&rbd_dev->lock);
7280 if (rbd_dev->open_count && !force)
7283 &rbd_dev->flags))
7285 spin_unlock_irq(&rbd_dev->lock);
7296 blk_mq_freeze_queue(rbd_dev->disk->queue);
7297 blk_mark_disk_dead(rbd_dev->disk);
7300 del_gendisk(rbd_dev->disk);
7302 list_del_init(&rbd_dev->node);
7304 device_del(&rbd_dev->dev);
7306 rbd_dev_image_unlock(rbd_dev);
7307 rbd_dev_device_release(rbd_dev);
7308 rbd_dev_image_release(rbd_dev);
7309 rbd_dev_destroy(rbd_dev);