Lines Matching refs:device

104 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
158 int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
162 atomic_inc(&device->local_cnt);
163 io_allowed = (device->state.disk >= mins);
165 if (atomic_dec_and_test(&device->local_cnt))
166 wake_up(&device->misc_wait);
287 * @device: DRBD device.
299 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
300 * @device: DRBD device.
302 void tl_abort_disk_io(struct drbd_device *device)
304 struct drbd_connection *connection = first_peer_device(device)->connection;
311 if (req->device != device)
491 minor = device_to_minor(peer_device->device);
537 * @device: DRBD device.
696 err = __send_command(peer_device->connection, peer_device->device->vnr,
751 if (get_ldev(peer_device->device)) {
752 dc = rcu_dereference(peer_device->device->ldev->disk_conf);
758 put_ldev(peer_device->device);
833 struct drbd_device *device = peer_device->device;
838 if (!get_ldev_if_state(device, D_NEGOTIATING))
844 put_ldev(device);
847 spin_lock_irq(&device->ldev->md.uuid_lock);
849 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
850 spin_unlock_irq(&device->ldev->md.uuid_lock);
852 device->comm_bm_set = drbd_bm_total_weight(device);
853 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
857 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
858 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
861 put_ldev(device);
875 void drbd_print_uuids(struct drbd_device *device, const char *text)
877 if (get_ldev_if_state(device, D_NEGOTIATING)) {
878 u64 *uuid = device->ldev->md.uuid;
879 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
885 put_ldev(device);
887 drbd_info(device, "%s effective data uuid: %016llX\n",
889 (unsigned long long)device->ed_uuid);
895 struct drbd_device *device = peer_device->device;
900 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
902 uuid = device->ldev->md.uuid[UI_BITMAP];
907 drbd_uuid_set(device, UI_BITMAP, uuid);
908 drbd_print_uuids(device, "updated sync UUID");
909 drbd_md_sync(device);
921 assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
933 q = device->rq_queue;
946 struct drbd_device *device = peer_device->device;
964 if (get_ldev_if_state(device, D_NEGOTIATING)) {
965 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
966 d_size = drbd_get_max_capacity(device->ldev);
968 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
970 q_order_type = drbd_queue_order_type(device);
973 assign_p_sizes_qlim(device, p, q);
974 put_ldev(device);
980 assign_p_sizes_qlim(device, p, NULL);
993 p->c_size = cpu_to_be64(get_capacity(device->vdisk));
1003 * @peer_device: DRBD peer device.
1014 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
1020 * @peer_device: DRBD peer device.
1115 static int fill_bitmap_rle_bits(struct drbd_device *device,
1130 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1132 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1152 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1153 : _drbd_bm_find_next(device, c->bit_offset);
1173 drbd_err(device, "unexpected zero runlength while encoding bitmap "
1182 drbd_err(device, "error while encoding bitmap: %d\n", bits);
1219 send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1221 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1222 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
1226 len = fill_bitmap_rle_bits(device, p,
1233 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
1253 drbd_bm_get_lel(device, c->word_offset, num_words, p);
1254 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1266 INFO_bm_xfer_stats(device, "send", c);
1275 static int _drbd_send_bitmap(struct drbd_device *device)
1280 if (!expect(device->bitmap))
1283 if (get_ldev(device)) {
1284 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1285 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1286 drbd_bm_set_all(device);
1287 if (drbd_bm_write(device)) {
1291 drbd_err(device, "Failed to write bitmap to disk!\n");
1293 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1294 drbd_md_sync(device);
1297 put_ldev(device);
1301 .bm_bits = drbd_bm_bits(device),
1302 .bm_words = drbd_bm_words(device),
1306 err = send_bitmap_rle_or_plain(device, &c);
1312 int drbd_send_bitmap(struct drbd_device *device)
1314 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1319 err = !_drbd_send_bitmap(device);
1343 * @device: DRBD device.
1355 if (peer_device->device->state.conn < C_CONNECTED)
1365 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1389 * @device: DRBD device
1485 /* long elapsed = (long)(jiffies - device->last_received); */
1502 return drop_it; /* && (device->state == R_PRIMARY) */;
1545 peer_device->device->send_cnt += size >> 9;
1577 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1585 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1590 peer_device->device->send_cnt += size >> 9;
1681 struct drbd_device *device = peer_device->device;
1699 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1701 if (device->state.conn >= C_SYNC_SOURCE &&
1702 device->state.conn <= C_PAUSED_SYNC_T)
1719 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*t), NULL, 0);
1738 __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
1743 __send_command(peer_device->connection, device->vnr, sock, P_DATA,
1769 drbd_warn(device,
1790 struct drbd_device *device = peer_device->device;
1810 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
1921 struct drbd_device *device = bdev->bd_disk->private_data;
1926 spin_lock_irqsave(&device->resource->req_lock, flags);
1927 /* to have a stable device->state.role
1930 if (device->state.role != R_PRIMARY) {
1938 device->open_cnt++;
1939 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1947 struct drbd_device *device = gd->private_data;
1949 device->open_cnt--;
1954 void drbd_queue_unplug(struct drbd_device *device)
1956 if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
1957 D_ASSERT(device, device->state.role == R_PRIMARY);
1958 if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
1960 &first_peer_device(device)->connection->sender_work,
1961 &device->unplug_work);
1966 static void drbd_set_defaults(struct drbd_device *device)
1970 device->state = (union drbd_dev_state) {
1979 void drbd_init_set_defaults(struct drbd_device *device)
1984 drbd_set_defaults(device);
1986 atomic_set(&device->ap_bio_cnt, 0);
1987 atomic_set(&device->ap_actlog_cnt, 0);
1988 atomic_set(&device->ap_pending_cnt, 0);
1989 atomic_set(&device->rs_pending_cnt, 0);
1990 atomic_set(&device->unacked_cnt, 0);
1991 atomic_set(&device->local_cnt, 0);
1992 atomic_set(&device->pp_in_use_by_net, 0);
1993 atomic_set(&device->rs_sect_in, 0);
1994 atomic_set(&device->rs_sect_ev, 0);
1995 atomic_set(&device->ap_in_flight, 0);
1996 atomic_set(&device->md_io.in_use, 0);
1998 mutex_init(&device->own_state_mutex);
1999 device->state_mutex = &device->own_state_mutex;
2001 spin_lock_init(&device->al_lock);
2002 spin_lock_init(&device->peer_seq_lock);
2004 INIT_LIST_HEAD(&device->active_ee);
2005 INIT_LIST_HEAD(&device->sync_ee);
2006 INIT_LIST_HEAD(&device->done_ee);
2007 INIT_LIST_HEAD(&device->read_ee);
2008 INIT_LIST_HEAD(&device->net_ee);
2009 INIT_LIST_HEAD(&device->resync_reads);
2010 INIT_LIST_HEAD(&device->resync_work.list);
2011 INIT_LIST_HEAD(&device->unplug_work.list);
2012 INIT_LIST_HEAD(&device->bm_io_work.w.list);
2013 INIT_LIST_HEAD(&device->pending_master_completion[0]);
2014 INIT_LIST_HEAD(&device->pending_master_completion[1]);
2015 INIT_LIST_HEAD(&device->pending_completion[0]);
2016 INIT_LIST_HEAD(&device->pending_completion[1]);
2018 device->resync_work.cb = w_resync_timer;
2019 device->unplug_work.cb = w_send_write_hint;
2020 device->bm_io_work.w.cb = w_bitmap_io;
2022 timer_setup(&device->resync_timer, resync_timer_fn, 0);
2023 timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0);
2024 timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0);
2025 timer_setup(&device->request_timer, request_timer_fn, 0);
2027 init_waitqueue_head(&device->misc_wait);
2028 init_waitqueue_head(&device->state_wait);
2029 init_waitqueue_head(&device->ee_wait);
2030 init_waitqueue_head(&device->al_wait);
2031 init_waitqueue_head(&device->seq_wait);
2033 device->resync_wenr = LC_FREE;
2034 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2035 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2038 void drbd_set_my_capacity(struct drbd_device *device, sector_t size)
2042 set_capacity(device->vdisk, size);
2043 revalidate_disk_size(device->vdisk, false);
2045 drbd_info(device, "size = %s (%llu KB)\n",
2049 void drbd_device_cleanup(struct drbd_device *device)
2052 if (first_peer_device(device)->connection->receiver.t_state != NONE)
2053 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2054 first_peer_device(device)->connection->receiver.t_state);
2056 device->al_writ_cnt =
2057 device->bm_writ_cnt =
2058 device->read_cnt =
2059 device->recv_cnt =
2060 device->send_cnt =
2061 device->writ_cnt =
2062 device->p_size =
2063 device->rs_start =
2064 device->rs_total =
2065 device->rs_failed = 0;
2066 device->rs_last_events = 0;
2067 device->rs_last_sect_ev = 0;
2069 device->rs_mark_left[i] = 0;
2070 device->rs_mark_time[i] = 0;
2072 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
2074 set_capacity(device->vdisk, 0);
2075 revalidate_disk_size(device->vdisk, false);
2076 if (device->bitmap) {
2078 drbd_bm_resize(device, 0, 1);
2079 drbd_bm_cleanup(device);
2082 drbd_backing_dev_free(device, device->ldev);
2083 device->ldev = NULL;
2085 clear_bit(AL_SUSPENDED, &device->flags);
2087 D_ASSERT(device, list_empty(&device->active_ee));
2088 D_ASSERT(device, list_empty(&device->sync_ee));
2089 D_ASSERT(device, list_empty(&device->done_ee));
2090 D_ASSERT(device, list_empty(&device->read_ee));
2091 D_ASSERT(device, list_empty(&device->net_ee));
2092 D_ASSERT(device, list_empty(&device->resync_reads));
2093 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2094 D_ASSERT(device, list_empty(&device->resync_work.list));
2095 D_ASSERT(device, list_empty(&device->unplug_work.list));
2097 drbd_set_defaults(device);
2112 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2201 static void drbd_release_all_peer_reqs(struct drbd_device *device)
2205 rr = drbd_free_peer_reqs(device, &device->active_ee);
2207 drbd_err(device, "%d EEs in active list found!\n", rr);
2209 rr = drbd_free_peer_reqs(device, &device->sync_ee);
2211 drbd_err(device, "%d EEs in sync list found!\n", rr);
2213 rr = drbd_free_peer_reqs(device, &device->read_ee);
2215 drbd_err(device, "%d EEs in read list found!\n", rr);
2217 rr = drbd_free_peer_reqs(device, &device->done_ee);
2219 drbd_err(device, "%d EEs in done list found!\n", rr);
2221 rr = drbd_free_peer_reqs(device, &device->net_ee);
2223 drbd_err(device, "%d EEs in net list found!\n", rr);
2229 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2230 struct drbd_resource *resource = device->resource;
2233 del_timer_sync(&device->request_timer);
2236 D_ASSERT(device, device->open_cnt == 0);
2240 * device (re-)configuration or state changes */
2242 drbd_backing_dev_free(device, device->ldev);
2243 device->ldev = NULL;
2245 drbd_release_all_peer_reqs(device);
2247 lc_destroy(device->act_log);
2248 lc_destroy(device->resync);
2250 kfree(device->p_uuid);
2251 /* device->p_uuid = NULL; */
2253 if (device->bitmap) /* should no longer be there. */
2254 drbd_bm_cleanup(device);
2255 __free_page(device->md_io.page);
2256 put_disk(device->vdisk);
2257 blk_cleanup_queue(device->rq_queue);
2258 kfree(device->rs_plan_s);
2263 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2267 memset(device, 0xfd, sizeof(*device));
2268 kfree(device);
2294 struct drbd_device *device = req->device;
2306 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2317 /* A single suspended or otherwise blocking device may stall
2324 * resource (replication group) or per device (minor) retry
2330 inc_ap_bio(device);
2331 __drbd_make_request(device, bio, start_jif);
2347 dec_ap_bio(req->device);
2380 struct drbd_device *device;
2399 idr_for_each_entry(&drbd_devices, device, i)
2400 drbd_delete_device(device);
2706 static int init_submitter(struct drbd_device *device)
2710 device->submit.wq =
2711 alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
2712 if (!device->submit.wq)
2715 INIT_WORK(&device->submit.worker, do_submit);
2716 INIT_LIST_HEAD(&device->submit.writes);
2724 struct drbd_device *device;
2732 device = minor_to_device(minor);
2733 if (device)
2737 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2738 if (!device)
2740 kref_init(&device->kref);
2743 device->resource = resource;
2744 device->minor = minor;
2745 device->vnr = vnr;
2747 drbd_init_set_defaults(device);
2752 device->rq_queue = q;
2757 device->vdisk = disk;
2766 disk->private_data = device;
2773 device->md_io.page = alloc_page(GFP_KERNEL);
2774 if (!device->md_io.page)
2777 if (drbd_bm_init(device))
2779 device->read_requests = RB_ROOT;
2780 device->write_requests = RB_ROOT;
2782 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2788 kref_get(&device->kref);
2790 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2796 kref_get(&device->kref);
2798 INIT_LIST_HEAD(&device->peer_devices);
2799 INIT_LIST_HEAD(&device->pending_bitmap_io);
2805 peer_device->device = device;
2807 list_add(&peer_device->peer_devices, &device->peer_devices);
2808 kref_get(&device->kref);
2820 if (init_submitter(device)) {
2828 device->state.conn = first_connection(resource)->cstate;
2829 if (device->state.conn == C_WF_REPORT_PARAMS) {
2830 for_each_peer_device(peer_device, device)
2834 for_each_peer_device(peer_device, device)
2836 drbd_debugfs_device_add(device);
2845 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2854 drbd_bm_cleanup(device);
2856 __free_page(device->md_io.page);
2863 kfree(device);
2867 void drbd_delete_device(struct drbd_device *device)
2869 struct drbd_resource *resource = device->resource;
2874 for_each_peer_device(peer_device, device)
2876 drbd_debugfs_device_cleanup(device);
2878 idr_remove(&connection->peer_devices, device->vnr);
2879 kref_put(&device->kref, drbd_destroy_device);
2881 idr_remove(&resource->devices, device->vnr);
2882 kref_put(&device->kref, drbd_destroy_device);
2883 idr_remove(&drbd_devices, device_to_minor(device));
2884 kref_put(&device->kref, drbd_destroy_device);
2885 del_gendisk(device->vdisk);
2887 kref_put(&device->kref, drbd_destroy_device);
2905 pr_err("unable to register block device major %d\n",
2953 pr_info("registered as block device major %d\n", DRBD_MAJOR);
2997 struct drbd_device *device = peer_device->device;
2999 kref_get(&device->kref);
3001 drbd_md_sync(device);
3002 kref_put(&device->kref, drbd_destroy_device);
3033 void drbd_md_write(struct drbd_device *device, void *b)
3041 buffer->la_size_sect = cpu_to_be64(get_capacity(device->vdisk));
3043 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3044 buffer->flags = cpu_to_be32(device->ldev->md.flags);
3047 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
3048 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
3049 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3051 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3053 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3054 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3056 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3057 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3059 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3060 sector = device->ldev->md.md_offset;
3062 if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
3064 drbd_err(device, "meta data update failed!\n");
3065 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3071 * @device: DRBD device.
3073 void drbd_md_sync(struct drbd_device *device)
3081 del_timer(&device->md_sync_timer);
3083 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3088 if (!get_ldev_if_state(device, D_FAILED))
3091 buffer = drbd_md_get_buffer(device, __func__);
3095 drbd_md_write(device, buffer);
3097 /* Update device->ldev->md.la_size_sect,
3099 device->ldev->md.la_size_sect = get_capacity(device->vdisk);
3101 drbd_md_put_buffer(device);
3103 put_ldev(device);
3106 static int check_activity_log_stripe_size(struct drbd_device *device,
3146 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3151 static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3209 /* FIXME check for device grow with flex external meta data? */
3211 /* can the available bitmap space cover the last agreed device size? */
3218 drbd_err(device, "meta data offsets don't make sense: idx=%d "
3233 * @device: DRBD device.
3240 * even before @bdev is assigned to @device->ldev.
3242 int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3248 if (device->state.disk != D_DISKLESS)
3251 buffer = drbd_md_get_buffer(device, __func__);
3264 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
3268 drbd_err(device, "Error while reading metadata.\n");
3278 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3286 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3288 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3293 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3310 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3312 if (check_offsets_and_sizes(device, bdev))
3316 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3321 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3328 spin_lock_irq(&device->resource->req_lock);
3329 if (device->state.conn < C_CONNECTED) {
3333 device->peer_max_bio_size = peer;
3335 spin_unlock_irq(&device->resource->req_lock);
3338 drbd_md_put_buffer(device);
3345 * @device: DRBD device.
3351 void drbd_md_mark_dirty(struct drbd_device *device)
3353 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3354 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3357 void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3362 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3365 void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3368 if (device->state.role == R_PRIMARY)
3373 drbd_set_ed_uuid(device, val);
3376 device->ldev->md.uuid[idx] = val;
3377 drbd_md_mark_dirty(device);
3380 void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3383 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3384 __drbd_uuid_set(device, idx, val);
3385 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3388 void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3391 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3392 if (device->ldev->md.uuid[idx]) {
3393 drbd_uuid_move_history(device);
3394 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3396 __drbd_uuid_set(device, idx, val);
3397 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3402 * @device: DRBD device.
3407 void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3414 spin_lock_irq(&device->ldev->md.uuid_lock);
3415 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3418 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3420 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3421 __drbd_uuid_set(device, UI_CURRENT, val);
3422 spin_unlock_irq(&device->ldev->md.uuid_lock);
3424 drbd_print_uuids(device, "new current UUID");
3426 drbd_md_sync(device);
3429 void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3432 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3435 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3437 drbd_uuid_move_history(device);
3438 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3439 device->ldev->md.uuid[UI_BITMAP] = 0;
3441 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3443 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3445 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3447 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3449 drbd_md_mark_dirty(device);
3454 * @device: DRBD device.
3458 int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
3462 drbd_md_set_flag(device, MDF_FULL_SYNC);
3463 drbd_md_sync(device);
3464 drbd_bm_set_all(device);
3466 rv = drbd_bm_write(device);
3469 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3470 drbd_md_sync(device);
3478 * @device: DRBD device.
3482 int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
3484 drbd_resume_al(device);
3485 drbd_bm_clear_all(device);
3486 return drbd_bm_write(device);
3491 struct drbd_device *device =
3493 struct bm_io_work *work = &device->bm_io_work;
3497 int cnt = atomic_read(&device->ap_bio_cnt);
3499 drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3503 if (get_ldev(device)) {
3504 drbd_bm_lock(device, work->why, work->flags);
3505 rv = work->io_fn(device);
3506 drbd_bm_unlock(device);
3507 put_ldev(device);
3510 clear_bit_unlock(BITMAP_IO, &device->flags);
3511 wake_up(&device->misc_wait);
3514 work->done(device, rv);
3516 clear_bit(BITMAP_IO_QUEUED, &device->flags);
3525 * @device: DRBD device.
3538 void drbd_queue_bitmap_io(struct drbd_device *device,
3543 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
3545 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3546 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3547 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3548 if (device->bm_io_work.why)
3549 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3550 why, device->bm_io_work.why);
3552 device->bm_io_work.io_fn = io_fn;
3553 device->bm_io_work.done = done;
3554 device->bm_io_work.why = why;
3555 device->bm_io_work.flags = flags;
3557 spin_lock_irq(&device->resource->req_lock);
3558 set_bit(BITMAP_IO, &device->flags);
3561 if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
3562 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3563 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3564 &device->bm_io_work.w);
3566 spin_unlock_irq(&device->resource->req_lock);
3571 * @device: DRBD device.
3578 int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
3585 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3588 drbd_suspend_io(device);
3590 drbd_bm_lock(device, why, flags);
3591 rv = io_fn(device);
3592 drbd_bm_unlock(device);
3595 drbd_resume_io(device);
3600 void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3602 if ((device->ldev->md.flags & flag) != flag) {
3603 drbd_md_mark_dirty(device);
3604 device->ldev->md.flags |= flag;
3608 void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3610 if ((device->ldev->md.flags & flag) != 0) {
3611 drbd_md_mark_dirty(device);
3612 device->ldev->md.flags &= ~flag;
3622 struct drbd_device *device = from_timer(device, t, md_sync_timer);
3623 drbd_device_post_work(device, MD_SYNC);
3703 * @device: device associated with the request
3707 int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3714 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3722 /* Indicate to wake up device->misc_wait on progress. */
3724 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3725 spin_unlock_irq(&device->resource->req_lock);
3727 finish_wait(&device->misc_wait, &wait);
3728 spin_lock_irq(&device->resource->req_lock);
3729 if (!timeout || device->state.conn < C_CONNECTED)
3806 _drbd_insert_fault(struct drbd_device *device, unsigned int type)
3812 ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
3819 drbd_warn(device, "***Simulating %s failure\n",