Lines Matching refs:rbd_dev

166  * identify an image.  Each rbd_dev structure includes a pointer to
174 * An rbd_dev structure contains a parent_spec pointer which is
177 * by the parent rbd_dev for its own identity (i.e., the structure
331 struct rbd_device *rbd_dev;
455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
457 * by rbd_dev->lock
501 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
513 static bool rbd_is_ro(struct rbd_device *rbd_dev)
515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
518 static bool rbd_is_snap(struct rbd_device *rbd_dev)
520 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
523 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
525 lockdep_assert_held(&rbd_dev->lock_rwsem);
527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
531 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
535 down_read(&rbd_dev->lock_rwsem);
536 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
537 up_read(&rbd_dev->lock_rwsem);
593 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
602 if (!rbd_dev)
604 else if (rbd_dev->disk)
606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
607 else if (rbd_dev->spec && rbd_dev->spec->image_name)
609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
610 else if (rbd_dev->spec && rbd_dev->spec->image_id)
612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME, rbd_dev, &vaf);
632 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
634 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
635 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
637 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
639 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
641 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
664 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
667 spin_lock_irq(&rbd_dev->lock);
668 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
671 rbd_dev->open_count++;
672 spin_unlock_irq(&rbd_dev->lock);
676 (void) get_device(&rbd_dev->dev);
683 struct rbd_device *rbd_dev = disk->private_data;
686 spin_lock_irq(&rbd_dev->lock);
687 open_count_before = rbd_dev->open_count--;
688 spin_unlock_irq(&rbd_dev->lock);
691 put_device(&rbd_dev->dev);
694 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
706 if (rbd_is_ro(rbd_dev))
709 rbd_assert(!rbd_is_snap(rbd_dev));
719 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
724 ret = rbd_ioctl_set_ro(rbd_dev, arg);
1033 static void rbd_init_layout(struct rbd_device *rbd_dev)
1035 if (rbd_dev->header.stripe_unit == 0 ||
1036 rbd_dev->header.stripe_count == 0) {
1037 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1038 rbd_dev->header.stripe_count = 1;
1041 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1042 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1043 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1044 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1045 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1046 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1154 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1158 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1162 snap_name = rbd_dev->header.snap_names;
1193 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1195 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1204 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1210 which = rbd_dev_snap_index(rbd_dev, snap_id);
1214 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1218 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1223 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1224 if (rbd_dev->image_format == 1)
1225 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1227 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1230 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1233 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1235 *snap_size = rbd_dev->header.image_size;
1236 } else if (rbd_dev->image_format == 1) {
1239 which = rbd_dev_snap_index(rbd_dev, snap_id);
1243 *snap_size = rbd_dev->header.snap_sizes[which];
1248 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1257 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1259 u64 snap_id = rbd_dev->spec->snap_id;
1263 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1267 rbd_dev->mapping.size = size;
1271 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1273 rbd_dev->mapping.size = 0;
1386 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1389 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1394 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1397 rbd_dev->layout.object_size;
1473 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1474 struct ceph_options *opt = rbd_dev->rbd_client->client->options;
1493 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1494 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1496 const char *name_format = rbd_dev->image_format == 1 ?
1512 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1513 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1516 rbd_dev->header.object_prefix,
1593 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1595 rbd_dev_remove_parent(rbd_dev);
1596 rbd_spec_put(rbd_dev->parent_spec);
1597 rbd_dev->parent_spec = NULL;
1598 rbd_dev->parent_overlap = 0;
1607 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1611 if (!rbd_dev->parent_spec)
1614 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1621 rbd_dev_unparent(rbd_dev);
1623 rbd_warn(rbd_dev, "parent reference underflow");
1634 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1638 if (!rbd_dev->parent_spec)
1641 if (rbd_dev->parent_overlap)
1642 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1645 rbd_warn(rbd_dev, "parent reference overflow");
1651 struct rbd_device *rbd_dev,
1656 img_request->rbd_dev = rbd_dev;
1671 struct rbd_device *rbd_dev = img_req->rbd_dev;
1673 lockdep_assert_held(&rbd_dev->header_rwsem);
1676 img_req->snap_id = rbd_dev->spec->snap_id;
1678 if (rbd_dev_parent_get(rbd_dev))
1694 rbd_dev_parent_put(img_request->rbd_dev);
1707 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1712 rbd_assert(objno < rbd_dev->object_map_size);
1717 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1722 lockdep_assert_held(&rbd_dev->object_map_lock);
1723 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1724 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1727 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1733 lockdep_assert_held(&rbd_dev->object_map_lock);
1736 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1737 p = &rbd_dev->object_map[index];
1741 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1745 spin_lock(&rbd_dev->object_map_lock);
1746 state = __rbd_object_map_get(rbd_dev, objno);
1747 spin_unlock(&rbd_dev->object_map_lock);
1751 static bool use_object_map(struct rbd_device *rbd_dev)
1761 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1764 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1765 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1768 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1773 if (!use_object_map(rbd_dev))
1776 state = rbd_object_map_get(rbd_dev, objno);
1780 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1785 rbd_dev->spec->image_id);
1788 rbd_dev->spec->image_id, snap_id);
1791 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1793 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1802 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1805 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1811 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1815 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1822 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1830 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1833 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1841 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1849 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1851 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1855 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1857 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1860 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1888 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1890 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1901 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1903 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1904 rbd_dev->mapping.size);
1913 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1914 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1927 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1938 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1939 if (!rbd_dev->object_map) {
1944 rbd_dev->object_map_size = object_map_size;
1945 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1953 static void rbd_object_map_free(struct rbd_device *rbd_dev)
1955 kvfree(rbd_dev->object_map);
1956 rbd_dev->object_map = NULL;
1957 rbd_dev->object_map_size = 0;
1960 static int rbd_object_map_load(struct rbd_device *rbd_dev)
1964 ret = __rbd_object_map_load(rbd_dev);
1968 ret = rbd_dev_v2_get_flags(rbd_dev);
1970 rbd_object_map_free(rbd_dev);
1974 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1975 rbd_warn(rbd_dev, "object map is invalid");
1980 static int rbd_object_map_open(struct rbd_device *rbd_dev)
1984 ret = rbd_object_map_lock(rbd_dev);
1988 ret = rbd_object_map_load(rbd_dev);
1990 rbd_object_map_unlock(rbd_dev);
1997 static void rbd_object_map_close(struct rbd_device *rbd_dev)
1999 rbd_object_map_free(rbd_dev);
2000 rbd_object_map_unlock(rbd_dev);
2016 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2048 spin_lock(&rbd_dev->object_map_lock);
2049 state = __rbd_object_map_get(rbd_dev, objno);
2052 __rbd_object_map_set(rbd_dev, objno, new_state);
2053 spin_unlock(&rbd_dev->object_map_lock);
2070 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2072 u8 state = rbd_object_map_get(rbd_dev, objno);
2123 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2124 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2131 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2145 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2146 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2201 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2204 if (!rbd_dev->parent_overlap)
2207 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2209 entire ? rbd_dev->layout.object_size :
2217 rbd_dev->parent_overlap);
2292 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2295 if (!use_object_map(rbd_dev) ||
2298 rbd_dev->layout.object_size,
2299 rbd_dev->layout.object_size,
2300 rbd_dev->opts->alloc_hint_flags);
2350 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2362 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2364 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2366 rbd_dev->opts->alloc_size);
2443 if (!use_object_map(img_req->rbd_dev) ||
2575 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2606 struct rbd_device *rbd_dev = img_req->rbd_dev;
2612 !rbd_layout_is_fancy(&rbd_dev->layout))
2627 ret = ceph_file_to_extents(&rbd_dev->layout,
2651 ret = ceph_iterate_extents(&rbd_dev->layout,
2822 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2824 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2859 struct rbd_device *parent = img_req->rbd_dev->parent;
2914 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2936 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2991 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2993 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
3013 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3016 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3174 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3179 rbd_dev->parent_overlap);
3199 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3207 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3214 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3275 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3317 rbd_warn(rbd_dev, "snap object map update failed: %d",
3349 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3352 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3364 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3387 rbd_warn(rbd_dev, "pre object map update failed: %d",
3424 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3438 rbd_warn(rbd_dev, "post object map update failed: %d",
3453 struct rbd_device *rbd_dev = img_req->rbd_dev;
3465 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3484 struct rbd_device *rbd_dev = img_req->rbd_dev;
3486 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3489 if (rbd_is_ro(rbd_dev))
3493 if (rbd_dev->opts->lock_on_read ||
3494 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3502 struct rbd_device *rbd_dev = img_req->rbd_dev;
3505 lockdep_assert_held(&rbd_dev->lock_rwsem);
3506 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3507 spin_lock(&rbd_dev->lock_lists_lock);
3510 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3512 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3513 spin_unlock(&rbd_dev->lock_lists_lock);
3519 struct rbd_device *rbd_dev = img_req->rbd_dev;
3522 lockdep_assert_held(&rbd_dev->lock_rwsem);
3523 spin_lock(&rbd_dev->lock_lists_lock);
3526 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3527 list_empty(&rbd_dev->running_list));
3529 spin_unlock(&rbd_dev->lock_lists_lock);
3531 complete(&rbd_dev->releasing_wait);
3536 struct rbd_device *rbd_dev = img_req->rbd_dev;
3544 if (rbd_dev->opts->exclusive) {
3553 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3554 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3560 struct rbd_device *rbd_dev = img_req->rbd_dev;
3565 __rbd_is_lock_owner(rbd_dev));
3569 down_read(&rbd_dev->header_rwsem);
3570 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
3571 up_read(&rbd_dev->header_rwsem);
3635 struct rbd_device *rbd_dev = img_req->rbd_dev;
3639 down_read(&rbd_dev->lock_rwsem);
3645 up_read(&rbd_dev->lock_rwsem);
3654 rbd_warn(rbd_dev, "%s%s result %d",
3691 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3695 mutex_lock(&rbd_dev->watch_mutex);
3696 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3697 cid.handle = rbd_dev->watch_cookie;
3698 mutex_unlock(&rbd_dev->watch_mutex);
3705 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3708 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3709 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3711 rbd_dev->owner_cid = *cid; /* struct */
3714 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3716 mutex_lock(&rbd_dev->watch_mutex);
3717 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3718 mutex_unlock(&rbd_dev->watch_mutex);
3721 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3723 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3725 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3726 strcpy(rbd_dev->lock_cookie, cookie);
3727 rbd_set_owner_cid(rbd_dev, &cid);
3728 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3734 static int rbd_lock(struct rbd_device *rbd_dev)
3736 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3740 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3741 rbd_dev->lock_cookie[0] != '\0');
3743 format_lock_cookie(rbd_dev, cookie);
3744 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3750 __rbd_lock(rbd_dev, cookie);
3757 static void rbd_unlock(struct rbd_device *rbd_dev)
3759 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3762 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3763 rbd_dev->lock_cookie[0] == '\0');
3765 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3766 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3768 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3771 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3772 rbd_dev->lock_cookie[0] = '\0';
3773 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3774 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3777 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3782 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3783 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3788 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3796 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3797 &rbd_dev->header_oloc, buf, buf_size,
3801 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3804 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
3809 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3812 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3817 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3820 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3823 static int rbd_request_lock(struct rbd_device *rbd_dev)
3830 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3832 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3835 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3857 rbd_warn(rbd_dev,
3867 rbd_warn(rbd_dev,
3878 rbd_warn(rbd_dev, "no lock owners detected");
3895 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3899 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3900 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3902 cancel_delayed_work(&rbd_dev->lock_dwork);
3903 if (!completion_done(&rbd_dev->acquire_wait)) {
3904 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3905 list_empty(&rbd_dev->running_list));
3906 rbd_dev->acquire_err = result;
3907 complete_all(&rbd_dev->acquire_wait);
3911 while (!list_empty(&rbd_dev->acquiring_list)) {
3912 img_req = list_first_entry(&rbd_dev->acquiring_list,
3918 &rbd_dev->running_list);
3941 static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
3943 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3950 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3952 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3953 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3956 rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
3961 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3967 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3973 rbd_warn(rbd_dev, "shared lock type detected");
3980 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3995 static int find_watcher(struct rbd_device *rbd_dev,
3998 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4005 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
4006 &rbd_dev->header_oloc, &watchers,
4009 rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
4027 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
4028 rbd_dev, cid.gid, cid.handle);
4029 rbd_set_owner_cid(rbd_dev, &cid);
4035 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
4045 static int rbd_try_lock(struct rbd_device *rbd_dev)
4047 struct ceph_client *client = rbd_dev->rbd_client->client;
4054 ret = rbd_lock(rbd_dev);
4058 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4063 locker = get_lock_owner_info(rbd_dev);
4072 ret = find_watcher(rbd_dev, locker);
4076 refreshed_locker = get_lock_owner_info(rbd_dev);
4086 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
4092 rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
4097 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4098 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4101 rbd_warn(rbd_dev, "failed to break header lock: %d",
4117 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4121 ret = rbd_dev_refresh(rbd_dev);
4125 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4126 ret = rbd_object_map_open(rbd_dev);
4140 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4144 down_read(&rbd_dev->lock_rwsem);
4145 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4146 rbd_dev->lock_state);
4147 if (__rbd_is_lock_owner(rbd_dev)) {
4148 up_read(&rbd_dev->lock_rwsem);
4152 up_read(&rbd_dev->lock_rwsem);
4153 down_write(&rbd_dev->lock_rwsem);
4154 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4155 rbd_dev->lock_state);
4156 if (__rbd_is_lock_owner(rbd_dev)) {
4157 up_write(&rbd_dev->lock_rwsem);
4161 ret = rbd_try_lock(rbd_dev);
4163 rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
4167 up_write(&rbd_dev->lock_rwsem);
4171 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4172 rbd_assert(list_empty(&rbd_dev->running_list));
4174 ret = rbd_post_acquire_action(rbd_dev);
4176 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4182 rbd_unlock(rbd_dev);
4186 wake_lock_waiters(rbd_dev, ret);
4187 up_write(&rbd_dev->lock_rwsem);
4193 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4197 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4199 ret = rbd_try_acquire_lock(rbd_dev);
4201 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4205 ret = rbd_request_lock(rbd_dev);
4209 rbd_warn(rbd_dev, "peer will not release lock");
4210 down_write(&rbd_dev->lock_rwsem);
4211 wake_lock_waiters(rbd_dev, ret);
4212 up_write(&rbd_dev->lock_rwsem);
4214 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4215 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4222 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
4223 rbd_dev);
4224 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4229 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4231 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4232 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4234 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4240 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4241 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4242 if (list_empty(&rbd_dev->running_list))
4245 up_write(&rbd_dev->lock_rwsem);
4246 wait_for_completion(&rbd_dev->releasing_wait);
4248 down_write(&rbd_dev->lock_rwsem);
4249 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4252 rbd_assert(list_empty(&rbd_dev->running_list));
4256 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4258 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4259 rbd_object_map_close(rbd_dev);
4262 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4264 rbd_assert(list_empty(&rbd_dev->running_list));
4266 rbd_pre_release_action(rbd_dev);
4267 rbd_unlock(rbd_dev);
4273 static void rbd_release_lock(struct rbd_device *rbd_dev)
4275 if (!rbd_quiesce_lock(rbd_dev))
4278 __rbd_release_lock(rbd_dev);
4287 cancel_delayed_work(&rbd_dev->lock_dwork);
4292 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4295 down_write(&rbd_dev->lock_rwsem);
4296 rbd_release_lock(rbd_dev);
4297 up_write(&rbd_dev->lock_rwsem);
4300 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4304 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4305 if (__rbd_is_lock_owner(rbd_dev))
4308 spin_lock(&rbd_dev->lock_lists_lock);
4309 have_requests = !list_empty(&rbd_dev->acquiring_list);
4310 spin_unlock(&rbd_dev->lock_lists_lock);
4311 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4312 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4313 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4317 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4327 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4330 down_write(&rbd_dev->lock_rwsem);
4331 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4332 dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
4333 __func__, rbd_dev, cid.gid, cid.handle);
4335 rbd_set_owner_cid(rbd_dev, &cid);
4337 downgrade_write(&rbd_dev->lock_rwsem);
4339 down_read(&rbd_dev->lock_rwsem);
4342 maybe_kick_acquire(rbd_dev);
4343 up_read(&rbd_dev->lock_rwsem);
4346 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4356 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4359 down_write(&rbd_dev->lock_rwsem);
4360 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4361 dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
4362 __func__, rbd_dev, cid.gid, cid.handle,
4363 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4365 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4367 downgrade_write(&rbd_dev->lock_rwsem);
4369 down_read(&rbd_dev->lock_rwsem);
4372 maybe_kick_acquire(rbd_dev);
4373 up_read(&rbd_dev->lock_rwsem);
4380 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4383 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4392 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4397 down_read(&rbd_dev->lock_rwsem);
4398 if (__rbd_is_lock_owner(rbd_dev)) {
4399 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4400 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4409 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4410 if (!rbd_dev->opts->exclusive) {
4411 dout("%s rbd_dev %p queueing unlock_work\n",
4412 __func__, rbd_dev);
4413 queue_work(rbd_dev->task_wq,
4414 &rbd_dev->unlock_work);
4423 up_read(&rbd_dev->lock_rwsem);
4427 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4430 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4446 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4447 &rbd_dev->header_oloc, notify_id, cookie,
4450 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4453 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4456 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4457 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4460 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4463 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4464 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4470 struct rbd_device *rbd_dev = arg;
4478 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4479 __func__, rbd_dev, cookie, notify_id, data_len);
4484 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4496 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4499 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4500 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4503 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4504 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4507 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4509 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4512 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4515 ret = rbd_dev_refresh(rbd_dev);
4517 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4519 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4522 if (rbd_is_lock_owner(rbd_dev))
4523 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4526 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4531 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4535 struct rbd_device *rbd_dev = arg;
4537 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4539 down_write(&rbd_dev->lock_rwsem);
4540 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4541 up_write(&rbd_dev->lock_rwsem);
4543 mutex_lock(&rbd_dev->watch_mutex);
4544 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4545 __rbd_unregister_watch(rbd_dev);
4546 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4548 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4550 mutex_unlock(&rbd_dev->watch_mutex);
4556 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4558 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4561 rbd_assert(!rbd_dev->watch_handle);
4562 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4564 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4565 &rbd_dev->header_oloc, rbd_watch_cb,
4566 rbd_watch_errcb, rbd_dev);
4570 rbd_dev->watch_handle = handle;
4577 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4579 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4582 rbd_assert(rbd_dev->watch_handle);
4583 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4585 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4587 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4589 rbd_dev->watch_handle = NULL;
4592 static int rbd_register_watch(struct rbd_device *rbd_dev)
4596 mutex_lock(&rbd_dev->watch_mutex);
4597 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4598 ret = __rbd_register_watch(rbd_dev);
4602 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4603 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4606 mutex_unlock(&rbd_dev->watch_mutex);
4610 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4612 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4614 cancel_work_sync(&rbd_dev->acquired_lock_work);
4615 cancel_work_sync(&rbd_dev->released_lock_work);
4616 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4617 cancel_work_sync(&rbd_dev->unlock_work);
4624 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4626 cancel_tasks_sync(rbd_dev);
4628 mutex_lock(&rbd_dev->watch_mutex);
4629 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4630 __rbd_unregister_watch(rbd_dev);
4631 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4632 mutex_unlock(&rbd_dev->watch_mutex);
4634 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4635 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4641 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4643 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4647 if (!rbd_quiesce_lock(rbd_dev))
4650 format_lock_cookie(rbd_dev, cookie);
4651 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4652 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4653 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4657 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4664 __rbd_release_lock(rbd_dev);
4665 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4667 __rbd_lock(rbd_dev, cookie);
4668 wake_lock_waiters(rbd_dev, 0);
4674 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4678 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4680 mutex_lock(&rbd_dev->watch_mutex);
4681 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4682 mutex_unlock(&rbd_dev->watch_mutex);
4686 ret = __rbd_register_watch(rbd_dev);
4688 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4690 queue_delayed_work(rbd_dev->task_wq,
4691 &rbd_dev->watch_dwork,
4693 mutex_unlock(&rbd_dev->watch_mutex);
4697 mutex_unlock(&rbd_dev->watch_mutex);
4698 down_write(&rbd_dev->lock_rwsem);
4699 wake_lock_waiters(rbd_dev, ret);
4700 up_write(&rbd_dev->lock_rwsem);
4704 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4705 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4706 mutex_unlock(&rbd_dev->watch_mutex);
4708 down_write(&rbd_dev->lock_rwsem);
4709 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4710 rbd_reacquire_lock(rbd_dev);
4711 up_write(&rbd_dev->lock_rwsem);
4713 ret = rbd_dev_refresh(rbd_dev);
4715 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4722 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4731 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4779 struct rbd_device *rbd_dev = img_request->rbd_dev;
4796 down_read(&rbd_dev->header_rwsem);
4797 mapping_size = rbd_dev->mapping.size;
4799 up_read(&rbd_dev->header_rwsem);
4802 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4808 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4825 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4833 struct rbd_device *rbd_dev = hctx->queue->queuedata;
4851 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4855 rbd_img_request_init(img_req, rbd_dev, op_type);
4858 if (rbd_is_ro(rbd_dev)) {
4859 rbd_warn(rbd_dev, "%s on read-only mapping",
4863 rbd_assert(!rbd_is_snap(rbd_dev));
4871 static void rbd_free_disk(struct rbd_device *rbd_dev)
4873 blk_cleanup_queue(rbd_dev->disk->queue);
4874 blk_mq_free_tag_set(&rbd_dev->tag_set);
4875 put_disk(rbd_dev->disk);
4876 rbd_dev->disk = NULL;
4879 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4885 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4925 * return, the rbd_dev->header field will contain up-to-date
4928 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
4957 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4958 &rbd_dev->header_oloc, ondisk, size);
4963 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4969 rbd_warn(rbd_dev, "invalid header");
4985 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4990 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4994 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4995 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4996 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4998 set_capacity(rbd_dev->disk, size);
4999 revalidate_disk_size(rbd_dev->disk, true);
5007 static int rbd_init_disk(struct rbd_device *rbd_dev)
5012 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
5023 rbd_dev->dev_id);
5024 disk->major = rbd_dev->major;
5025 disk->first_minor = rbd_dev->minor;
5029 disk->private_data = rbd_dev;
5031 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
5032 rbd_dev->tag_set.ops = &rbd_mq_ops;
5033 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
5034 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
5035 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5036 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
5037 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
5039 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
5043 q = blk_mq_init_queue(&rbd_dev->tag_set);
5056 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
5057 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
5059 if (rbd_dev->opts->trim) {
5061 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
5066 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
5075 q->queuedata = rbd_dev;
5077 rbd_dev->disk = disk;
5081 blk_mq_free_tag_set(&rbd_dev->tag_set);
5099 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5102 (unsigned long long)rbd_dev->mapping.size);
5108 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5110 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
5116 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5118 if (rbd_dev->major)
5119 return sprintf(buf, "%d\n", rbd_dev->major);
5127 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5129 return sprintf(buf, "%d\n", rbd_dev->minor);
5135 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5137 ceph_client_addr(rbd_dev->rbd_client->client);
5146 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5149 ceph_client_gid(rbd_dev->rbd_client->client));
5155 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5157 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5163 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5168 return sprintf(buf, "%s\n", rbd_dev->config_info);
5174 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5176 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5182 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5185 (unsigned long long) rbd_dev->spec->pool_id);
5191 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5193 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5199 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5201 if (rbd_dev->spec->image_name)
5202 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5210 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5212 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5223 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5225 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5231 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5233 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5245 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5248 if (!rbd_dev->parent)
5251 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5252 struct rbd_spec *spec = rbd_dev->parent_spec;
5265 rbd_dev->parent_overlap);
5276 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5282 ret = rbd_dev_refresh(rbd_dev);
5386 static void rbd_dev_free(struct rbd_device *rbd_dev)
5388 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5389 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5391 ceph_oid_destroy(&rbd_dev->header_oid);
5392 ceph_oloc_destroy(&rbd_dev->header_oloc);
5393 kfree(rbd_dev->config_info);
5395 rbd_put_client(rbd_dev->rbd_client);
5396 rbd_spec_put(rbd_dev->spec);
5397 kfree(rbd_dev->opts);
5398 kfree(rbd_dev);
5403 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5404 bool need_put = !!rbd_dev->opts;
5407 destroy_workqueue(rbd_dev->task_wq);
5408 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5411 rbd_dev_free(rbd_dev);
5424 struct rbd_device *rbd_dev;
5426 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5427 if (!rbd_dev)
5430 spin_lock_init(&rbd_dev->lock);
5431 INIT_LIST_HEAD(&rbd_dev->node);
5432 init_rwsem(&rbd_dev->header_rwsem);
5434 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5435 ceph_oid_init(&rbd_dev->header_oid);
5436 rbd_dev->header_oloc.pool = spec->pool_id;
5439 rbd_dev->header_oloc.pool_ns =
5444 mutex_init(&rbd_dev->watch_mutex);
5445 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5446 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5448 init_rwsem(&rbd_dev->lock_rwsem);
5449 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5450 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5451 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5452 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5453 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5454 spin_lock_init(&rbd_dev->lock_lists_lock);
5455 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5456 INIT_LIST_HEAD(&rbd_dev->running_list);
5457 init_completion(&rbd_dev->acquire_wait);
5458 init_completion(&rbd_dev->releasing_wait);
5460 spin_lock_init(&rbd_dev->object_map_lock);
5462 rbd_dev->dev.bus = &rbd_bus_type;
5463 rbd_dev->dev.type = &rbd_device_type;
5464 rbd_dev->dev.parent = &rbd_root_dev;
5465 device_initialize(&rbd_dev->dev);
5467 return rbd_dev;
5471 * Create a mapping rbd_dev.
5477 struct rbd_device *rbd_dev;
5479 rbd_dev = __rbd_dev_create(spec);
5480 if (!rbd_dev)
5484 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5487 if (rbd_dev->dev_id < 0)
5490 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5491 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5492 rbd_dev->name);
5493 if (!rbd_dev->task_wq)
5499 rbd_dev->rbd_client = rbdc;
5500 rbd_dev->spec = spec;
5501 rbd_dev->opts = opts;
5503 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5504 return rbd_dev;
5507 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5509 rbd_dev_free(rbd_dev);
5513 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5515 if (rbd_dev)
5516 put_device(&rbd_dev->dev);
5524 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5534 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5535 &rbd_dev->header_oloc, "get_size",
5557 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
5572 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5573 &rbd_dev->header_oloc, "get_object_prefix",
5596 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5613 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5614 &rbd_dev->header_oloc, "get_features",
5625 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5642 * object map, store them in rbd_dev->object_map_flags.
5647 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5649 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5653 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5654 &rbd_dev->header_oloc, "get_flags",
5662 rbd_dev->object_map_flags = le64_to_cpu(flags);
5719 static int __get_parent_info(struct rbd_device *rbd_dev,
5724 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5729 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5741 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5765 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5770 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5775 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5803 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
5821 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5822 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5824 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5832 static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
5842 ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
5871 rbd_assert(!rbd_dev->parent_spec);
5872 rbd_dev->parent_spec = parent_spec;
5873 parent_spec = NULL; /* rbd_dev now owns this */
5880 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5881 rbd_dev->parent_overlap = pii.overlap;
5891 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
5901 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5902 &rbd_dev->header_oloc, "get_stripe_unit_count",
5918 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
5923 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5924 &rbd_dev->header_oloc, "get_data_pool",
5940 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5953 rbd_assert(!rbd_dev->spec->image_name);
5955 len = strlen(rbd_dev->spec->image_id);
5963 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5971 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5991 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5993 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5999 snap_name = rbd_dev->header.snap_names;
6009 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6011 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6020 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
6038 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6040 if (rbd_dev->image_format == 1)
6041 return rbd_v1_snap_id_by_name(rbd_dev, name);
6043 return rbd_v2_snap_id_by_name(rbd_dev, name);
6049 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
6051 struct rbd_spec *spec = rbd_dev->spec;
6060 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6078 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6080 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6081 struct rbd_spec *spec = rbd_dev->spec;
6095 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6104 image_name = rbd_dev_image_name(rbd_dev);
6106 rbd_warn(rbd_dev, "unable to get image name");
6110 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6128 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
6153 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6154 &rbd_dev->header_oloc, "get_snapcontext",
6199 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6216 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6217 &rbd_dev->header_oloc, "get_snapshot_name",
6239 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
6245 ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
6252 ret = rbd_dev_v2_header_onetime(rbd_dev, header);
6257 ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
6264 static int rbd_dev_header_info(struct rbd_device *rbd_dev,
6268 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6271 if (rbd_dev->image_format == 1)
6272 return rbd_dev_v1_header_info(rbd_dev, header, first_time);
6274 return rbd_dev_v2_header_info(rbd_dev, header, first_time);
6619 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6621 down_write(&rbd_dev->lock_rwsem);
6622 if (__rbd_is_lock_owner(rbd_dev))
6623 __rbd_release_lock(rbd_dev);
6624 up_write(&rbd_dev->lock_rwsem);
6632 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6636 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6637 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6640 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6644 if (rbd_is_ro(rbd_dev))
6647 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6648 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6649 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6650 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6652 ret = rbd_dev->acquire_err;
6654 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6658 rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
6667 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6680 * This function will record the given rbd_dev's image_id field if
6682 * errors occur a negative errno will be returned and the rbd_dev's
6685 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6699 if (rbd_dev->spec->image_id) {
6700 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6710 rbd_dev->spec->image_name);
6726 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6734 rbd_dev->image_format = 1;
6742 rbd_dev->image_format = 2;
6746 rbd_dev->spec->image_id = image_id;
6759 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6761 rbd_dev_parent_put(rbd_dev);
6762 rbd_object_map_free(rbd_dev);
6763 rbd_dev_mapping_clear(rbd_dev);
6767 rbd_image_header_cleanup(&rbd_dev->header);
6770 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
6775 ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
6783 ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
6784 rbd_is_ro(rbd_dev), &header->features);
6791 ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
6798 ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
6811 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6816 if (!rbd_dev->parent_spec)
6825 parent = __rbd_dev_create(rbd_dev->parent_spec);
6835 parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
6836 parent->spec = rbd_spec_get(rbd_dev->parent_spec);
6844 rbd_dev->parent = parent;
6845 atomic_set(&rbd_dev->parent_ref, 1);
6849 rbd_dev_unparent(rbd_dev);
6854 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6856 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6857 rbd_free_disk(rbd_dev);
6859 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6863 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6866 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6873 ret = register_blkdev(0, rbd_dev->name);
6877 rbd_dev->major = ret;
6878 rbd_dev->minor = 0;
6880 rbd_dev->major = rbd_major;
6881 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6886 ret = rbd_init_disk(rbd_dev);
6890 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6891 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
6893 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6897 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6898 up_write(&rbd_dev->header_rwsem);
6902 rbd_free_disk(rbd_dev);
6905 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6907 up_write(&rbd_dev->header_rwsem);
6911 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6913 struct rbd_spec *spec = rbd_dev->spec;
6918 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6919 if (rbd_dev->image_format == 1)
6920 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6923 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6929 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6933 rbd_dev->spec->pool_name,
6934 rbd_dev->spec->pool_ns ?: "",
6935 rbd_dev->spec->pool_ns ? "/" : "",
6936 rbd_dev->spec->image_name);
6939 rbd_dev->spec->pool_name,
6940 rbd_dev->spec->pool_ns ?: "",
6941 rbd_dev->spec->pool_ns ? "/" : "",
6942 rbd_dev->spec->image_name,
6943 rbd_dev->spec->snap_name);
6947 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6949 if (!rbd_is_ro(rbd_dev))
6950 rbd_unregister_watch(rbd_dev);
6952 rbd_dev_unprobe(rbd_dev);
6953 rbd_dev->image_format = 0;
6954 kfree(rbd_dev->spec->image_id);
6955 rbd_dev->spec->image_id = NULL;
6967 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6969 bool need_watch = !rbd_is_ro(rbd_dev);
6974 * error, rbd_dev->spec->image_id will be filled in with
6975 * a dynamically-allocated string, and rbd_dev->image_format
6978 ret = rbd_dev_image_id(rbd_dev);
6982 ret = rbd_dev_header_name(rbd_dev);
6987 ret = rbd_register_watch(rbd_dev);
6990 rbd_print_dne(rbd_dev, false);
6996 down_write(&rbd_dev->header_rwsem);
6998 ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
7001 rbd_print_dne(rbd_dev, false);
7005 rbd_init_layout(rbd_dev);
7014 ret = rbd_spec_fill_snap_id(rbd_dev);
7016 ret = rbd_spec_fill_names(rbd_dev);
7019 rbd_print_dne(rbd_dev, true);
7023 ret = rbd_dev_mapping_set(rbd_dev);
7027 if (rbd_is_snap(rbd_dev) &&
7028 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
7029 ret = rbd_object_map_load(rbd_dev);
7034 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
7035 ret = rbd_dev_setup_parent(rbd_dev);
7040 ret = rbd_dev_probe_parent(rbd_dev, depth);
7045 rbd_dev->image_format, rbd_dev->header_oid.name);
7050 up_write(&rbd_dev->header_rwsem);
7052 rbd_unregister_watch(rbd_dev);
7053 rbd_dev_unprobe(rbd_dev);
7055 rbd_dev->image_format = 0;
7056 kfree(rbd_dev->spec->image_id);
7057 rbd_dev->spec->image_id = NULL;
7061 static void rbd_dev_update_header(struct rbd_device *rbd_dev,
7064 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
7065 rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
7067 if (rbd_dev->header.image_size != header->image_size) {
7068 rbd_dev->header.image_size = header->image_size;
7070 if (!rbd_is_snap(rbd_dev)) {
7071 rbd_dev->mapping.size = header->image_size;
7072 rbd_dev_update_size(rbd_dev);
7076 ceph_put_snap_context(rbd_dev->header.snapc);
7077 rbd_dev->header.snapc = header->snapc;
7080 if (rbd_dev->image_format == 1) {
7081 kfree(rbd_dev->header.snap_names);
7082 rbd_dev->header.snap_names = header->snap_names;
7085 kfree(rbd_dev->header.snap_sizes);
7086 rbd_dev->header.snap_sizes = header->snap_sizes;
7091 static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
7108 if (rbd_dev->parent_overlap) {
7109 rbd_dev->parent_overlap = 0;
7110 rbd_dev_parent_put(rbd_dev);
7112 rbd_dev->disk->disk_name);
7115 rbd_assert(rbd_dev->parent_spec);
7121 if (!pii->overlap && rbd_dev->parent_overlap)
7122 rbd_warn(rbd_dev,
7124 rbd_dev->parent_overlap = pii->overlap;
7128 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
7134 dout("%s rbd_dev %p\n", __func__, rbd_dev);
7136 ret = rbd_dev_header_info(rbd_dev, &header, false);
7144 if (rbd_dev->parent) {
7145 ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
7150 down_write(&rbd_dev->header_rwsem);
7151 rbd_dev_update_header(rbd_dev, &header);
7152 if (rbd_dev->parent)
7153 rbd_dev_update_parent(rbd_dev, &pii);
7154 up_write(&rbd_dev->header_rwsem);
7166 struct rbd_device *rbd_dev = NULL;
7199 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7200 if (!rbd_dev) {
7204 rbdc = NULL; /* rbd_dev now owns this */
7205 spec = NULL; /* rbd_dev now owns this */
7206 rbd_opts = NULL; /* rbd_dev now owns this */
7209 if (rbd_dev->opts->read_only ||
7210 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7211 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7213 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7214 if (!rbd_dev->config_info) {
7219 rc = rbd_dev_image_probe(rbd_dev, 0);
7223 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7224 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7225 rbd_dev->layout.object_size);
7226 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7229 rc = rbd_dev_device_setup(rbd_dev);
7233 rc = rbd_add_acquire_lock(rbd_dev);
7239 rc = device_add(&rbd_dev->dev);
7243 device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7245 blk_put_queue(rbd_dev->disk->queue);
7248 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7251 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7252 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7253 rbd_dev->header.features);
7260 rbd_dev_image_unlock(rbd_dev);
7261 rbd_dev_device_release(rbd_dev);
7263 rbd_dev_image_release(rbd_dev);
7265 rbd_dev_destroy(rbd_dev);
7288 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7290 while (rbd_dev->parent) {
7291 struct rbd_device *first = rbd_dev;
7319 struct rbd_device *rbd_dev = NULL;
7348 rbd_dev = list_entry(tmp, struct rbd_device, node);
7349 if (rbd_dev->dev_id == dev_id) {
7355 spin_lock_irq(&rbd_dev->lock);
7356 if (rbd_dev->open_count && !force)
7359 &rbd_dev->flags))
7361 spin_unlock_irq(&rbd_dev->lock);
7372 blk_mq_freeze_queue(rbd_dev->disk->queue);
7373 blk_set_queue_dying(rbd_dev->disk->queue);
7376 del_gendisk(rbd_dev->disk);
7378 list_del_init(&rbd_dev->node);
7380 device_del(&rbd_dev->dev);
7382 rbd_dev_image_unlock(rbd_dev);
7383 rbd_dev_device_release(rbd_dev);
7384 rbd_dev_image_release(rbd_dev);
7385 rbd_dev_destroy(rbd_dev);