Lines Matching refs:device

103 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
144 int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
148 atomic_inc(&device->local_cnt);
149 io_allowed = (device->state.disk >= mins);
151 if (atomic_dec_and_test(&device->local_cnt))
152 wake_up(&device->misc_wait);
236 peer_device = conn_peer_device(connection, req->device->vnr);
264 peer_device = conn_peer_device(connection, req->device->vnr);
290 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
291 * @device: DRBD device.
293 void tl_abort_disk_io(struct drbd_device *device)
295 struct drbd_connection *connection = first_peer_device(device)->connection;
302 if (req->device != device)
482 minor = device_to_minor(peer_device->device);
686 err = __send_command(peer_device->connection, peer_device->device->vnr,
742 if (get_ldev(peer_device->device)) {
743 dc = rcu_dereference(peer_device->device->ldev->disk_conf);
749 put_ldev(peer_device->device);
824 struct drbd_device *device = peer_device->device;
829 if (!get_ldev_if_state(device, D_NEGOTIATING))
835 put_ldev(device);
838 spin_lock_irq(&device->ldev->md.uuid_lock);
840 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
841 spin_unlock_irq(&device->ldev->md.uuid_lock);
843 device->comm_bm_set = drbd_bm_total_weight(device);
844 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
848 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
849 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
852 put_ldev(device);
866 void drbd_print_uuids(struct drbd_device *device, const char *text)
868 if (get_ldev_if_state(device, D_NEGOTIATING)) {
869 u64 *uuid = device->ldev->md.uuid;
870 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
876 put_ldev(device);
878 drbd_info(device, "%s effective data uuid: %016llX\n",
880 (unsigned long long)device->ed_uuid);
886 struct drbd_device *device = peer_device->device;
891 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
893 uuid = device->ldev->md.uuid[UI_BITMAP];
898 drbd_uuid_set(device, UI_BITMAP, uuid);
899 drbd_print_uuids(device, "updated sync UUID");
900 drbd_md_sync(device);
912 struct drbd_device *device = peer_device->device;
930 if (get_ldev_if_state(device, D_NEGOTIATING)) {
931 struct block_device *bdev = device->ldev->backing_bdev;
934 d_size = drbd_get_max_capacity(device->ldev);
936 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
938 q_order_type = drbd_queue_order_type(device);
950 put_ldev(device);
952 struct request_queue *q = device->rq_queue;
979 p->c_size = cpu_to_be64(get_capacity(device->vdisk));
989 * @peer_device: DRBD peer device.
1000 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
1006 * @peer_device: DRBD peer device.
1101 static int fill_bitmap_rle_bits(struct drbd_device *device,
1116 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1118 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1138 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1139 : _drbd_bm_find_next(device, c->bit_offset);
1159 drbd_err(device, "unexpected zero runlength while encoding bitmap "
1168 drbd_err(device, "error while encoding bitmap: %d\n", bits);
1207 struct drbd_device *device = peer_device->device;
1213 len = fill_bitmap_rle_bits(device, p,
1220 err = __send_command(peer_device->connection, device->vnr, sock,
1240 drbd_bm_get_lel(device, c->word_offset, num_words, p);
1241 err = __send_command(peer_device->connection, device->vnr, sock, P_BITMAP,
1263 static int _drbd_send_bitmap(struct drbd_device *device,
1269 if (!expect(device, device->bitmap))
1272 if (get_ldev(device)) {
1273 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1274 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1275 drbd_bm_set_all(device);
1276 if (drbd_bm_write(device, peer_device)) {
1280 drbd_err(device, "Failed to write bitmap to disk!\n");
1282 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1283 drbd_md_sync(device);
1286 put_ldev(device);
1290 .bm_bits = drbd_bm_bits(device),
1291 .bm_words = drbd_bm_words(device),
1301 int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device)
1308 err = !_drbd_send_bitmap(device, peer_device);
1332 * @peer_device: DRBD peer device.
1344 if (peer_device->device->state.conn < C_CONNECTED)
1354 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1378 * @peer_device: DRBD peer device
1474 /* long elapsed = (long)(jiffies - device->last_received); */
1491 return drop_it; /* && (device->state == R_PRIMARY) */;
1534 peer_device->device->send_cnt += size >> 9;
1570 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1578 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1583 peer_device->device->send_cnt += size >> 9;
1667 struct drbd_device *device = peer_device->device;
1684 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1686 if (device->state.conn >= C_SYNC_SOURCE &&
1687 device->state.conn <= C_PAUSED_SYNC_T)
1704 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*t), NULL, 0);
1713 err = __send_command(peer_device->connection, device->vnr, sock, P_DATA,
1739 drbd_warn(device,
1760 struct drbd_device *device = peer_device->device;
1780 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
1891 struct drbd_device *device = disk->private_data;
1896 spin_lock_irqsave(&device->resource->req_lock, flags);
1897 /* to have a stable device->state.role
1900 if (device->state.role != R_PRIMARY) {
1908 device->open_cnt++;
1909 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1917 struct drbd_device *device = gd->private_data;
1920 device->open_cnt--;
1925 void drbd_queue_unplug(struct drbd_device *device)
1927 if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
1928 D_ASSERT(device, device->state.role == R_PRIMARY);
1929 if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
1931 &first_peer_device(device)->connection->sender_work,
1932 &device->unplug_work);
1937 static void drbd_set_defaults(struct drbd_device *device)
1941 device->state = (union drbd_dev_state) {
1950 void drbd_init_set_defaults(struct drbd_device *device)
1955 drbd_set_defaults(device);
1957 atomic_set(&device->ap_bio_cnt, 0);
1958 atomic_set(&device->ap_actlog_cnt, 0);
1959 atomic_set(&device->ap_pending_cnt, 0);
1960 atomic_set(&device->rs_pending_cnt, 0);
1961 atomic_set(&device->unacked_cnt, 0);
1962 atomic_set(&device->local_cnt, 0);
1963 atomic_set(&device->pp_in_use_by_net, 0);
1964 atomic_set(&device->rs_sect_in, 0);
1965 atomic_set(&device->rs_sect_ev, 0);
1966 atomic_set(&device->ap_in_flight, 0);
1967 atomic_set(&device->md_io.in_use, 0);
1969 mutex_init(&device->own_state_mutex);
1970 device->state_mutex = &device->own_state_mutex;
1972 spin_lock_init(&device->al_lock);
1973 spin_lock_init(&device->peer_seq_lock);
1975 INIT_LIST_HEAD(&device->active_ee);
1976 INIT_LIST_HEAD(&device->sync_ee);
1977 INIT_LIST_HEAD(&device->done_ee);
1978 INIT_LIST_HEAD(&device->read_ee);
1979 INIT_LIST_HEAD(&device->net_ee);
1980 INIT_LIST_HEAD(&device->resync_reads);
1981 INIT_LIST_HEAD(&device->resync_work.list);
1982 INIT_LIST_HEAD(&device->unplug_work.list);
1983 INIT_LIST_HEAD(&device->bm_io_work.w.list);
1984 INIT_LIST_HEAD(&device->pending_master_completion[0]);
1985 INIT_LIST_HEAD(&device->pending_master_completion[1]);
1986 INIT_LIST_HEAD(&device->pending_completion[0]);
1987 INIT_LIST_HEAD(&device->pending_completion[1]);
1989 device->resync_work.cb = w_resync_timer;
1990 device->unplug_work.cb = w_send_write_hint;
1991 device->bm_io_work.w.cb = w_bitmap_io;
1993 timer_setup(&device->resync_timer, resync_timer_fn, 0);
1994 timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0);
1995 timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0);
1996 timer_setup(&device->request_timer, request_timer_fn, 0);
1998 init_waitqueue_head(&device->misc_wait);
1999 init_waitqueue_head(&device->state_wait);
2000 init_waitqueue_head(&device->ee_wait);
2001 init_waitqueue_head(&device->al_wait);
2002 init_waitqueue_head(&device->seq_wait);
2004 device->resync_wenr = LC_FREE;
2005 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2006 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2009 void drbd_set_my_capacity(struct drbd_device *device, sector_t size)
2013 set_capacity_and_notify(device->vdisk, size);
2015 drbd_info(device, "size = %s (%llu KB)\n",
2019 void drbd_device_cleanup(struct drbd_device *device)
2022 if (first_peer_device(device)->connection->receiver.t_state != NONE)
2023 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2024 first_peer_device(device)->connection->receiver.t_state);
2026 device->al_writ_cnt =
2027 device->bm_writ_cnt =
2028 device->read_cnt =
2029 device->recv_cnt =
2030 device->send_cnt =
2031 device->writ_cnt =
2032 device->p_size =
2033 device->rs_start =
2034 device->rs_total =
2035 device->rs_failed = 0;
2036 device->rs_last_events = 0;
2037 device->rs_last_sect_ev = 0;
2039 device->rs_mark_left[i] = 0;
2040 device->rs_mark_time[i] = 0;
2042 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
2044 set_capacity_and_notify(device->vdisk, 0);
2045 if (device->bitmap) {
2047 drbd_bm_resize(device, 0, 1);
2048 drbd_bm_cleanup(device);
2051 drbd_backing_dev_free(device, device->ldev);
2052 device->ldev = NULL;
2054 clear_bit(AL_SUSPENDED, &device->flags);
2056 D_ASSERT(device, list_empty(&device->active_ee));
2057 D_ASSERT(device, list_empty(&device->sync_ee));
2058 D_ASSERT(device, list_empty(&device->done_ee));
2059 D_ASSERT(device, list_empty(&device->read_ee));
2060 D_ASSERT(device, list_empty(&device->net_ee));
2061 D_ASSERT(device, list_empty(&device->resync_reads));
2062 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2063 D_ASSERT(device, list_empty(&device->resync_work.list));
2064 D_ASSERT(device, list_empty(&device->unplug_work.list));
2066 drbd_set_defaults(device);
2081 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2167 static void drbd_release_all_peer_reqs(struct drbd_device *device)
2171 rr = drbd_free_peer_reqs(device, &device->active_ee);
2173 drbd_err(device, "%d EEs in active list found!\n", rr);
2175 rr = drbd_free_peer_reqs(device, &device->sync_ee);
2177 drbd_err(device, "%d EEs in sync list found!\n", rr);
2179 rr = drbd_free_peer_reqs(device, &device->read_ee);
2181 drbd_err(device, "%d EEs in read list found!\n", rr);
2183 rr = drbd_free_peer_reqs(device, &device->done_ee);
2185 drbd_err(device, "%d EEs in done list found!\n", rr);
2187 rr = drbd_free_peer_reqs(device, &device->net_ee);
2189 drbd_err(device, "%d EEs in net list found!\n", rr);
2195 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2196 struct drbd_resource *resource = device->resource;
2199 timer_shutdown_sync(&device->request_timer);
2202 D_ASSERT(device, device->open_cnt == 0);
2206 * device (re-)configuration or state changes */
2208 drbd_backing_dev_free(device, device->ldev);
2209 device->ldev = NULL;
2211 drbd_release_all_peer_reqs(device);
2213 lc_destroy(device->act_log);
2214 lc_destroy(device->resync);
2216 kfree(device->p_uuid);
2217 /* device->p_uuid = NULL; */
2219 if (device->bitmap) /* should no longer be there. */
2220 drbd_bm_cleanup(device);
2221 __free_page(device->md_io.page);
2222 put_disk(device->vdisk);
2223 kfree(device->rs_plan_s);
2228 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2232 if (device->submit.wq)
2233 destroy_workqueue(device->submit.wq);
2234 kfree(device);
2260 struct drbd_device *device = req->device;
2265 expect(device, atomic_read(&req->completion_ref) == 0) &&
2266 expect(device, req->rq_state & RQ_POSTPONED) &&
2267 expect(device, (req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2271 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2282 /* A single suspended or otherwise blocking device may stall
2289 * resource (replication group) or per device (minor) retry
2295 inc_ap_bio(device);
2296 __drbd_make_request(device, bio);
2312 dec_ap_bio(req->device);
2344 struct drbd_device *device;
2363 idr_for_each_entry(&drbd_devices, device, i)
2364 drbd_delete_device(device);
2669 static int init_submitter(struct drbd_device *device)
2673 device->submit.wq =
2674 alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
2675 if (!device->submit.wq)
2678 INIT_WORK(&device->submit.worker, do_submit);
2679 INIT_LIST_HEAD(&device->submit.writes);
2687 struct drbd_device *device;
2694 device = minor_to_device(minor);
2695 if (device)
2699 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2700 if (!device)
2702 kref_init(&device->kref);
2705 device->resource = resource;
2706 device->minor = minor;
2707 device->vnr = vnr;
2709 drbd_init_set_defaults(device);
2715 device->vdisk = disk;
2716 device->rq_queue = disk->queue;
2726 disk->private_data = device;
2734 device->md_io.page = alloc_page(GFP_KERNEL);
2735 if (!device->md_io.page)
2738 if (drbd_bm_init(device))
2740 device->read_requests = RB_ROOT;
2741 device->write_requests = RB_ROOT;
2743 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2749 kref_get(&device->kref);
2751 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2757 kref_get(&device->kref);
2759 INIT_LIST_HEAD(&device->peer_devices);
2760 INIT_LIST_HEAD(&device->pending_bitmap_io);
2766 peer_device->device = device;
2768 list_add(&peer_device->peer_devices, &device->peer_devices);
2769 kref_get(&device->kref);
2781 if (init_submitter(device)) {
2791 device->state.conn = first_connection(resource)->cstate;
2792 if (device->state.conn == C_WF_REPORT_PARAMS) {
2793 for_each_peer_device(peer_device, device)
2797 for_each_peer_device(peer_device, device)
2799 drbd_debugfs_device_add(device);
2803 destroy_workqueue(device->submit.wq);
2810 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2819 drbd_bm_cleanup(device);
2821 __free_page(device->md_io.page);
2826 kfree(device);
2830 void drbd_delete_device(struct drbd_device *device)
2832 struct drbd_resource *resource = device->resource;
2837 for_each_peer_device(peer_device, device)
2839 drbd_debugfs_device_cleanup(device);
2841 idr_remove(&connection->peer_devices, device->vnr);
2842 kref_put(&device->kref, drbd_destroy_device);
2844 idr_remove(&resource->devices, device->vnr);
2845 kref_put(&device->kref, drbd_destroy_device);
2846 idr_remove(&drbd_devices, device_to_minor(device));
2847 kref_put(&device->kref, drbd_destroy_device);
2848 del_gendisk(device->vdisk);
2850 kref_put(&device->kref, drbd_destroy_device);
2868 pr_err("unable to register block device major %d\n",
2916 pr_info("registered as block device major %d\n", DRBD_MAJOR);
2960 struct drbd_device *device = peer_device->device;
2962 kref_get(&device->kref);
2964 drbd_md_sync(device);
2965 kref_put(&device->kref, drbd_destroy_device);
2996 void drbd_md_write(struct drbd_device *device, void *b)
3004 buffer->la_size_sect = cpu_to_be64(get_capacity(device->vdisk));
3006 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3007 buffer->flags = cpu_to_be32(device->ldev->md.flags);
3010 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
3011 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
3012 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3014 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3016 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3017 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3019 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3020 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3022 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3023 sector = device->ldev->md.md_offset;
3025 if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
3027 drbd_err(device, "meta data update failed!\n");
3028 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3034 * @device: DRBD device.
3036 void drbd_md_sync(struct drbd_device *device)
3044 del_timer(&device->md_sync_timer);
3046 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3051 if (!get_ldev_if_state(device, D_FAILED))
3054 buffer = drbd_md_get_buffer(device, __func__);
3058 drbd_md_write(device, buffer);
3060 /* Update device->ldev->md.la_size_sect,
3062 device->ldev->md.la_size_sect = get_capacity(device->vdisk);
3064 drbd_md_put_buffer(device);
3066 put_ldev(device);
3069 static int check_activity_log_stripe_size(struct drbd_device *device,
3109 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3114 static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3172 /* FIXME check for device grow with flex external meta data? */
3174 /* can the available bitmap space cover the last agreed device size? */
3181 drbd_err(device, "meta data offsets don't make sense: idx=%d "
3196 * @device: DRBD device.
3203 * even before @bdev is assigned to @device->ldev.
3205 int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3211 if (device->state.disk != D_DISKLESS)
3214 buffer = drbd_md_get_buffer(device, __func__);
3227 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
3231 drbd_err(device, "Error while reading metadata.\n");
3241 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3249 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3251 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3256 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3273 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3275 if (check_offsets_and_sizes(device, bdev))
3279 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3284 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3291 spin_lock_irq(&device->resource->req_lock);
3292 if (device->state.conn < C_CONNECTED) {
3296 device->peer_max_bio_size = peer;
3298 spin_unlock_irq(&device->resource->req_lock);
3301 drbd_md_put_buffer(device);
3308 * @device: DRBD device.
3314 void drbd_md_mark_dirty(struct drbd_device *device)
3316 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3317 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3320 void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3325 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3328 void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3331 if (device->state.role == R_PRIMARY)
3336 drbd_set_ed_uuid(device, val);
3339 device->ldev->md.uuid[idx] = val;
3340 drbd_md_mark_dirty(device);
3343 void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3346 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3347 __drbd_uuid_set(device, idx, val);
3348 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3351 void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3354 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3355 if (device->ldev->md.uuid[idx]) {
3356 drbd_uuid_move_history(device);
3357 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3359 __drbd_uuid_set(device, idx, val);
3360 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3365 * @device: DRBD device.
3370 void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3377 spin_lock_irq(&device->ldev->md.uuid_lock);
3378 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3381 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3383 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3384 __drbd_uuid_set(device, UI_CURRENT, val);
3385 spin_unlock_irq(&device->ldev->md.uuid_lock);
3387 drbd_print_uuids(device, "new current UUID");
3389 drbd_md_sync(device);
3392 void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3395 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3398 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3400 drbd_uuid_move_history(device);
3401 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3402 device->ldev->md.uuid[UI_BITMAP] = 0;
3404 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3406 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3408 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3410 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3412 drbd_md_mark_dirty(device);
3417 * @device: DRBD device.
3421 int drbd_bmio_set_n_write(struct drbd_device *device,
3427 drbd_md_set_flag(device, MDF_FULL_SYNC);
3428 drbd_md_sync(device);
3429 drbd_bm_set_all(device);
3431 rv = drbd_bm_write(device, peer_device);
3434 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3435 drbd_md_sync(device);
3443 * @device: DRBD device.
3447 int drbd_bmio_clear_n_write(struct drbd_device *device,
3451 drbd_resume_al(device);
3452 drbd_bm_clear_all(device);
3453 return drbd_bm_write(device, peer_device);
3458 struct drbd_device *device =
3460 struct bm_io_work *work = &device->bm_io_work;
3464 int cnt = atomic_read(&device->ap_bio_cnt);
3466 drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3470 if (get_ldev(device)) {
3471 drbd_bm_lock(device, work->why, work->flags);
3472 rv = work->io_fn(device, work->peer_device);
3473 drbd_bm_unlock(device);
3474 put_ldev(device);
3477 clear_bit_unlock(BITMAP_IO, &device->flags);
3478 wake_up(&device->misc_wait);
3481 work->done(device, rv);
3483 clear_bit(BITMAP_IO_QUEUED, &device->flags);
3492 * @device: DRBD device.
3506 void drbd_queue_bitmap_io(struct drbd_device *device,
3512 D_ASSERT(device, current == peer_device->connection->worker.task);
3514 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3515 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3516 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3517 if (device->bm_io_work.why)
3518 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3519 why, device->bm_io_work.why);
3521 device->bm_io_work.peer_device = peer_device;
3522 device->bm_io_work.io_fn = io_fn;
3523 device->bm_io_work.done = done;
3524 device->bm_io_work.why = why;
3525 device->bm_io_work.flags = flags;
3527 spin_lock_irq(&device->resource->req_lock);
3528 set_bit(BITMAP_IO, &device->flags);
3531 if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
3532 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3534 &device->bm_io_work.w);
3536 spin_unlock_irq(&device->resource->req_lock);
3541 * @device: DRBD device.
3549 int drbd_bitmap_io(struct drbd_device *device,
3558 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3561 drbd_suspend_io(device);
3563 drbd_bm_lock(device, why, flags);
3564 rv = io_fn(device, peer_device);
3565 drbd_bm_unlock(device);
3568 drbd_resume_io(device);
3573 void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3575 if ((device->ldev->md.flags & flag) != flag) {
3576 drbd_md_mark_dirty(device);
3577 device->ldev->md.flags |= flag;
3581 void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3583 if ((device->ldev->md.flags & flag) != 0) {
3584 drbd_md_mark_dirty(device);
3585 device->ldev->md.flags &= ~flag;
3595 struct drbd_device *device = from_timer(device, t, md_sync_timer);
3596 drbd_device_post_work(device, MD_SYNC);
3675 * @device: device associated with the request
3679 int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3686 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3694 /* Indicate to wake up device->misc_wait on progress. */
3696 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3697 spin_unlock_irq(&device->resource->req_lock);
3699 finish_wait(&device->misc_wait, &wait);
3700 spin_lock_irq(&device->resource->req_lock);
3701 if (!timeout || device->state.conn < C_CONNECTED)
3778 _drbd_insert_fault(struct drbd_device *device, unsigned int type)
3784 ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
3791 drbd_warn(device, "***Simulating %s failure\n",