Lines Matching refs:device

57 	 * sectors-written since device creation, and other data generation tag
73 * allows to cover device sizes of up to 2**54 Byte (16 PiB) */
82 void *drbd_md_get_buffer(struct drbd_device *device, const char *intent)
86 wait_event(device->misc_wait,
87 (r = atomic_cmpxchg(&device->md_io.in_use, 0, 1)) == 0 ||
88 device->state.disk <= D_FAILED);
93 device->md_io.current_use = intent;
94 device->md_io.start_jif = jiffies;
95 device->md_io.submit_jif = device->md_io.start_jif - 1;
96 return page_address(device->md_io.page);
99 void drbd_md_put_buffer(struct drbd_device *device)
101 if (atomic_dec_and_test(&device->md_io.in_use))
102 wake_up(&device->misc_wait);
105 void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev,
117 dt = wait_event_timeout(device->misc_wait,
118 *done || test_bit(FORCE_DETACH, &device->flags), dt);
120 drbd_err(device, "meta-data IO operation timed out\n");
121 drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH);
125 static int _drbd_md_sync_page_io(struct drbd_device *device,
135 device->md_io.done = 0;
136 device->md_io.error = -ENODEV;
138 if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
146 if (bio_add_page(bio, device->md_io.page, size, 0) != size)
148 bio->bi_private = device;
151 if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
154 else if (!get_ldev_if_state(device, D_ATTACHING)) {
156 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
162 atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
163 device->md_io.submit_jif = jiffies;
164 if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
168 wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
170 err = device->md_io.error;
177 int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
181 D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
185 dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
192 drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
197 err = _drbd_md_sync_page_io(device, bdev, sector, op);
199 drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
206 static struct bm_extent *find_active_resync_extent(struct drbd_device *device, unsigned int enr)
209 tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
218 static struct lc_element *_al_get(struct drbd_device *device, unsigned int enr, bool nonblock)
224 spin_lock_irq(&device->al_lock);
225 bm_ext = find_active_resync_extent(device, enr);
228 spin_unlock_irq(&device->al_lock);
230 wake_up(&device->al_wait);
234 al_ext = lc_try_get(device->act_log, enr);
236 al_ext = lc_get(device->act_log, enr);
237 spin_unlock_irq(&device->al_lock);
241 bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i)
248 D_ASSERT(device, first <= last);
249 D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
255 return _al_get(device, first, true);
258 bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i)
267 D_ASSERT(device, first <= last);
268 D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
272 wait_event(device->al_wait,
273 (al_ext = _al_get(device, enr, false)) != NULL);
298 static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device)
300 const unsigned int stripes = device->ldev->md.al_stripes;
301 const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k;
304 unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k);
313 return device->ldev->md.md_offset + device->ldev->md.al_offset + t;
316 static int __al_write_transaction(struct drbd_device *device, struct al_transaction_on_disk *buffer)
327 buffer->tr_number = cpu_to_be32(device->al_tr_number);
331 drbd_bm_reset_al_hints(device);
337 spin_lock_irq(&device->al_lock);
338 list_for_each_entry(e, &device->act_log->to_be_changed, list) {
346 drbd_bm_mark_for_writeout(device,
350 spin_unlock_irq(&device->al_lock);
359 buffer->context_size = cpu_to_be16(device->act_log->nr_elements);
360 buffer->context_start_slot_nr = cpu_to_be16(device->al_tr_cycle);
363 device->act_log->nr_elements - device->al_tr_cycle);
365 unsigned idx = device->al_tr_cycle + i;
366 extent_nr = lc_element_by_index(device->act_log, idx)->lc_number;
372 device->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
373 if (device->al_tr_cycle >= device->act_log->nr_elements)
374 device->al_tr_cycle = 0;
376 sector = al_tr_number_to_on_disk_sector(device);
381 if (drbd_bm_write_hinted(device))
386 write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
389 if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
391 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
393 device->al_tr_number++;
394 device->al_writ_cnt++;
402 static int al_write_transaction(struct drbd_device *device)
407 if (!get_ldev(device)) {
408 drbd_err(device, "disk is %s, cannot start al transaction\n",
409 drbd_disk_str(device->state.disk));
414 if (device->state.disk < D_INCONSISTENT) {
415 drbd_err(device,
417 drbd_disk_str(device->state.disk));
418 put_ldev(device);
423 buffer = drbd_md_get_buffer(device, __func__);
425 drbd_err(device, "disk failed while waiting for md_io buffer\n");
426 put_ldev(device);
430 err = __al_write_transaction(device, buffer);
432 drbd_md_put_buffer(device);
433 put_ldev(device);
439 void drbd_al_begin_io_commit(struct drbd_device *device)
446 wait_event(device->al_wait,
447 device->act_log->pending_changes == 0 ||
448 (locked = lc_try_lock_for_transaction(device->act_log)));
453 if (device->act_log->pending_changes) {
457 write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
461 al_write_transaction(device);
462 spin_lock_irq(&device->al_lock);
467 lc_committed(device->act_log);
468 spin_unlock_irq(&device->al_lock);
470 lc_unlock(device->act_log);
471 wake_up(&device->al_wait);
478 void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i)
480 if (drbd_al_begin_io_prepare(device, i))
481 drbd_al_begin_io_commit(device);
484 int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i)
486 struct lru_cache *al = device->act_log;
495 D_ASSERT(device, first <= last);
514 __set_bit(__LC_STARVING, &device->act_log->flags);
521 tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
537 al_ext = lc_get_cumulative(device->act_log, enr);
539 drbd_info(device, "LOGIC BUG for enr=%u\n", enr);
544 void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
554 D_ASSERT(device, first <= last);
555 spin_lock_irqsave(&device->al_lock, flags);
558 extent = lc_find(device->act_log, enr);
560 drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr);
563 lc_put(device->act_log, extent);
565 spin_unlock_irqrestore(&device->al_lock, flags);
566 wake_up(&device->al_wait);
569 static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext)
573 spin_lock_irq(&device->al_lock);
576 lc_del(device->act_log, al_ext);
577 spin_unlock_irq(&device->al_lock);
584 * @device: DRBD device.
589 * You need to lock device->act_log with lc_try_lock() / lc_unlock()
591 void drbd_al_shrink(struct drbd_device *device)
596 D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags));
598 for (i = 0; i < device->act_log->nr_elements; i++) {
599 al_ext = lc_element_by_index(device->act_log, i);
602 wait_event(device->al_wait, _try_lc_del(device, al_ext));
605 wake_up(&device->al_wait);
608 int drbd_al_initialize(struct drbd_device *device, void *buffer)
611 struct drbd_md *md = &device->ldev->md;
615 __al_write_transaction(device, al);
617 spin_lock_irq(&device->al_lock);
618 lc_committed(device->act_log);
619 spin_unlock_irq(&device->al_lock);
625 int err = __al_write_transaction(device, al);
651 static bool update_rs_extent(struct drbd_device *device,
657 D_ASSERT(device, atomic_read(&device->local_cnt));
667 e = lc_find(device->resync, enr);
669 e = lc_get(device->resync, enr);
680 drbd_warn(device, "BAD! enr=%u rs_left=%d "
684 drbd_conn_str(device->state.conn));
692 ext->rs_left = drbd_bm_e_weight(device, enr);
701 int rs_left = drbd_bm_e_weight(device, enr);
703 drbd_warn(device, "changing resync lce: %d[%u;%02lx]"
710 drbd_warn(device, "Kicking resync_lru element enr=%u "
718 lc_committed(device->resync);
721 lc_put(device->resync, &ext->lce);
730 drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n",
731 device->resync_locked,
732 device->resync->nr_elements,
733 device->resync->flags);
740 struct drbd_device *device = peer_device->device;
742 unsigned long last = device->rs_mark_time[device->rs_last_mark];
743 int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS;
745 if (device->rs_mark_left[device->rs_last_mark] != still_to_go &&
746 device->state.conn != C_PAUSED_SYNC_T &&
747 device->state.conn != C_PAUSED_SYNC_S) {
748 device->rs_mark_time[next] = now;
749 device->rs_mark_left[next] = still_to_go;
750 device->rs_last_mark = next;
756 static bool lazy_bitmap_update_due(struct drbd_device *device)
758 return time_after(jiffies, device->rs_last_bcast + 2*HZ);
761 static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done)
764 struct drbd_connection *connection = first_peer_device(device)->connection;
766 is_sync_target_state(device->state.conn))
767 set_bit(RS_DONE, &device->flags);
774 } else if (!lazy_bitmap_update_due(device))
777 drbd_device_post_work(device, RS_PROGRESS);
780 static int update_sync_bits(struct drbd_device *device,
805 c = drbd_bm_count_bits(device, sbnr, tbnr);
807 c = drbd_bm_clear_bits(device, sbnr, tbnr);
809 c = drbd_bm_set_bits(device, sbnr, tbnr);
812 spin_lock_irqsave(&device->al_lock, flags);
813 cleared += update_rs_extent(device, BM_BIT_TO_EXT(sbnr), c, mode);
814 spin_unlock_irqrestore(&device->al_lock, flags);
821 unsigned long still_to_go = drbd_bm_total_weight(device);
822 bool rs_is_done = (still_to_go <= device->rs_failed);
823 drbd_advance_rs_marks(first_peer_device(device), still_to_go);
825 maybe_schedule_on_disk_bitmap_update(device, rs_is_done);
827 device->rs_failed += count;
828 wake_up(&device->al_wait);
851 struct drbd_device *device = peer_device->device;
861 drbd_err(device, "%s: sector=%llus size=%d nonsense!\n",
867 if (!get_ldev(device))
870 nr_sectors = get_capacity(device->vdisk);
873 if (!expect(device, sector < nr_sectors))
875 if (!expect(device, esector < nr_sectors))
897 count = update_sync_bits(device, sbnr, ebnr, mode);
899 put_ldev(device);
904 struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr)
911 spin_lock_irq(&device->al_lock);
912 if (device->resync_locked > device->resync->nr_elements/2) {
913 spin_unlock_irq(&device->al_lock);
916 e = lc_get(device->resync, enr);
920 bm_ext->rs_left = drbd_bm_e_weight(device, enr);
922 lc_committed(device->resync);
926 device->resync_locked++;
929 rs_flags = device->resync->flags;
930 spin_unlock_irq(&device->al_lock);
932 wake_up(&device->al_wait);
936 drbd_warn(device, "Have to wait for element"
944 static int _is_in_al(struct drbd_device *device, unsigned int enr)
948 spin_lock_irq(&device->al_lock);
949 rv = lc_is_used(device->act_log, enr);
950 spin_unlock_irq(&device->al_lock);
957 * @device: DRBD device.
962 int drbd_rs_begin_io(struct drbd_device *device, sector_t sector)
970 sig = wait_event_interruptible(device->al_wait,
971 (bm_ext = _bme_get(device, enr)));
979 sa = drbd_rs_c_min_rate_throttle(device);
982 sig = wait_event_interruptible(device->al_wait,
983 !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) ||
987 spin_lock_irq(&device->al_lock);
988 if (lc_put(device->resync, &bm_ext->lce) == 0) {
990 device->resync_locked--;
991 wake_up(&device->al_wait);
993 spin_unlock_irq(&device->al_lock);
1007 * @device: DRBD device.
1016 struct drbd_device *device = peer_device->device;
1029 if (throttle && device->resync_wenr != enr)
1032 spin_lock_irq(&device->al_lock);
1033 if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) {
1047 e = lc_find(device->resync, device->resync_wenr);
1050 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1051 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1053 device->resync_wenr = LC_FREE;
1054 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1056 device->resync_locked--;
1058 wake_up(&device->al_wait);
1060 drbd_alert(device, "LOGIC BUG\n");
1064 e = lc_try_get(device->resync, enr);
1070 device->resync_locked++;
1077 D_ASSERT(device, bm_ext->lce.refcnt > 0);
1082 if (device->resync_locked > device->resync->nr_elements-3)
1085 e = lc_get(device->resync, enr);
1088 const unsigned long rs_flags = device->resync->flags;
1090 drbd_warn(device, "Have to wait for element"
1096 bm_ext->rs_left = drbd_bm_e_weight(device, enr);
1098 lc_committed(device->resync);
1099 wake_up(&device->al_wait);
1100 D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0);
1103 D_ASSERT(device, bm_ext->lce.refcnt == 1);
1104 device->resync_locked++;
1109 if (lc_is_used(device->act_log, al_enr+i))
1114 device->resync_wenr = LC_FREE;
1115 spin_unlock_irq(&device->al_lock);
1121 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1122 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1124 device->resync_wenr = LC_FREE;
1125 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1127 device->resync_locked--;
1129 wake_up(&device->al_wait);
1131 device->resync_wenr = enr;
1133 spin_unlock_irq(&device->al_lock);
1137 void drbd_rs_complete_io(struct drbd_device *device, sector_t sector)
1144 spin_lock_irqsave(&device->al_lock, flags);
1145 e = lc_find(device->resync, enr);
1148 spin_unlock_irqrestore(&device->al_lock, flags);
1150 drbd_err(device, "drbd_rs_complete_io() called, but extent not found\n");
1155 spin_unlock_irqrestore(&device->al_lock, flags);
1156 drbd_err(device, "drbd_rs_complete_io(,%llu [=%u]) called, "
1162 if (lc_put(device->resync, &bm_ext->lce) == 0) {
1164 device->resync_locked--;
1165 wake_up(&device->al_wait);
1168 spin_unlock_irqrestore(&device->al_lock, flags);
1173 * @device: DRBD device.
1175 void drbd_rs_cancel_all(struct drbd_device *device)
1177 spin_lock_irq(&device->al_lock);
1179 if (get_ldev_if_state(device, D_FAILED)) { /* Makes sure ->resync is there. */
1180 lc_reset(device->resync);
1181 put_ldev(device);
1183 device->resync_locked = 0;
1184 device->resync_wenr = LC_FREE;
1185 spin_unlock_irq(&device->al_lock);
1186 wake_up(&device->al_wait);
1191 * @device: DRBD device.
1196 int drbd_rs_del_all(struct drbd_device *device)
1202 spin_lock_irq(&device->al_lock);
1204 if (get_ldev_if_state(device, D_FAILED)) {
1206 for (i = 0; i < device->resync->nr_elements; i++) {
1207 e = lc_element_by_index(device->resync, i);
1211 if (bm_ext->lce.lc_number == device->resync_wenr) {
1212 drbd_info(device, "dropping %u in drbd_rs_del_all, apparently"
1214 device->resync_wenr);
1215 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1216 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1218 device->resync_wenr = LC_FREE;
1219 lc_put(device->resync, &bm_ext->lce);
1222 drbd_info(device, "Retrying drbd_rs_del_all() later. "
1224 put_ldev(device);
1225 spin_unlock_irq(&device->al_lock);
1228 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1229 D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags));
1230 lc_del(device->resync, &bm_ext->lce);
1232 D_ASSERT(device, device->resync->used == 0);
1233 put_ldev(device);
1235 spin_unlock_irq(&device->al_lock);
1236 wake_up(&device->al_wait);