Lines Matching refs:device

52 	struct drbd_device *device;
54 device = bio->bi_private;
55 device->md_io.error = blk_status_to_errno(bio->bi_status);
58 if (device->ldev)
59 put_ldev(device);
63 * to timeout on the lower level device, and eventually detach from it.
71 * ASSERT(atomic_read(&device->md_io_in_use) == 1) there.
73 drbd_md_put_buffer(device);
74 device->md_io.done = 1;
75 wake_up(&device->misc_wait);
85 struct drbd_device *device = peer_device->device;
87 spin_lock_irqsave(&device->resource->req_lock, flags);
88 device->read_cnt += peer_req->i.size >> 9;
90 if (list_empty(&device->read_ee))
91 wake_up(&device->ee_wait);
93 __drbd_chk_io_error(device, DRBD_READ_ERROR);
94 spin_unlock_irqrestore(&device->resource->req_lock, flags);
97 put_ldev(device);
106 struct drbd_device *device = peer_device->device;
126 inc_unacked(device);
127 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
130 spin_lock_irqsave(&device->resource->req_lock, flags);
131 device->writ_cnt += peer_req->i.size >> 9;
132 list_move_tail(&peer_req->w.list, &device->done_ee);
142 do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
147 __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
150 kref_get(&device->kref); /* put is in drbd_send_acks_wf() */
152 kref_put(&device->kref, drbd_destroy_device);
154 spin_unlock_irqrestore(&device->resource->req_lock, flags);
157 drbd_rs_complete_io(device, i.sector);
160 wake_up(&device->ee_wait);
163 drbd_al_complete_io(device, &i);
165 put_ldev(device);
174 struct drbd_device *device = peer_req->peer_device->device;
180 drbd_warn(device, "%s: error=%d s=%llus\n",
198 drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device)
201 device->minor, device->resource->name, device->vnr);
210 struct drbd_device *device = req->device;
230 * If later the local backing device "recovers", and now DMAs some data
244 drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
247 drbd_panic_after_delayed_completion_of_aborted_request(device);
278 spin_lock_irqsave(&device->resource->req_lock, flags);
280 spin_unlock_irqrestore(&device->resource->req_lock, flags);
281 put_ldev(device);
284 complete_master_bio(device, &m);
347 struct drbd_device *device = peer_device->device;
369 drbd_free_peer_req(device, peer_req);
371 inc_rs_pending(device);
377 drbd_err(device, "kmalloc() of digest failed.\n");
383 drbd_free_peer_req(device, peer_req);
386 drbd_err(device, "drbd_send_drequest(..., csum) failed\n");
394 struct drbd_device *device = peer_device->device;
397 if (!get_ldev(device))
408 spin_lock_irq(&device->resource->req_lock);
409 list_add_tail(&peer_req->w.list, &device->read_ee);
410 spin_unlock_irq(&device->resource->req_lock);
412 atomic_add(size >> 9, &device->rs_sect_ev);
413 if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
421 spin_lock_irq(&device->resource->req_lock);
423 spin_unlock_irq(&device->resource->req_lock);
425 drbd_free_peer_req(device, peer_req);
427 put_ldev(device);
433 struct drbd_device *device =
436 switch (device->state.conn) {
438 make_ov_request(device, cancel);
441 make_resync_request(device, cancel);
450 struct drbd_device *device = from_timer(device, t, resync_timer);
453 &first_peer_device(device)->connection->sender_work,
454 &device->resync_work);
501 static int drbd_rs_controller(struct drbd_device *device, unsigned int sect_in)
513 dc = rcu_dereference(device->ldev->disk_conf);
514 plan = rcu_dereference(device->rs_plan_s);
518 if (device->rs_in_flight + sect_in == 0) { /* At start of resync */
525 correction = want - device->rs_in_flight - plan->total;
545 drbd_warn(device, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
546 sect_in, device->rs_in_flight, want, correction,
547 steps, cps, device->rs_planed, curr_corr, req_sect);
553 static int drbd_rs_number_requests(struct drbd_device *device)
558 sect_in = atomic_xchg(&device->rs_sect_in, 0);
559 device->rs_in_flight -= sect_in;
562 mxb = drbd_get_max_buffers(device) / 2;
563 if (rcu_dereference(device->rs_plan_s)->size) {
564 number = drbd_rs_controller(device, sect_in) >> (BM_BLOCK_SHIFT - 9);
565 device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
567 device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate;
568 number = SLEEP_TIME * device->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
582 if (mxb - device->rs_in_flight/8 < number)
583 number = mxb - device->rs_in_flight/8;
588 static int make_resync_request(struct drbd_device *const device, int cancel)
590 struct drbd_peer_device *const peer_device = first_peer_device(device);
594 const sector_t capacity = get_capacity(device->vdisk);
604 if (device->rs_total == 0) {
606 drbd_resync_finished(device);
610 if (!get_ldev(device)) {
611 /* Since we only need to access device->rsync a
612 get_ldev_if_state(device,D_FAILED) would be sufficient, but
615 drbd_err(device, "Disk broke down during resync!\n");
621 discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity;
625 max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
626 number = drbd_rs_number_requests(device);
651 bit = drbd_bm_find_next(device, device->bm_resync_fo);
654 device->bm_resync_fo = drbd_bm_bits(device);
655 put_ldev(device);
661 if (drbd_try_rs_begin_io(device, sector)) {
662 device->bm_resync_fo = bit;
665 device->bm_resync_fo = bit + 1;
667 if (unlikely(drbd_bm_test_bit(device, bit) == 0)) {
668 drbd_rs_complete_io(device, sector);
700 if (drbd_bm_test_bit(device, bit+1) != 1)
711 device->bm_resync_fo = bit + 1;
718 if (device->use_csums) {
721 put_ldev(device);
724 drbd_rs_complete_io(device, sector);
725 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
737 inc_rs_pending(device);
742 drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
743 dec_rs_pending(device);
744 put_ldev(device);
750 if (device->bm_resync_fo >= drbd_bm_bits(device)) {
757 put_ldev(device);
762 device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
763 mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
764 put_ldev(device);
768 static int make_ov_request(struct drbd_device *device, int cancel)
772 const sector_t capacity = get_capacity(device->vdisk);
778 number = drbd_rs_number_requests(device);
780 sector = device->ov_position;
789 && verify_can_do_stop_sector(device)
790 && sector >= device->ov_stop_sector;
796 if (drbd_try_rs_begin_io(device, sector)) {
797 device->ov_position = sector;
804 inc_rs_pending(device);
805 if (drbd_send_ov_request(first_peer_device(device), sector, size)) {
806 dec_rs_pending(device);
811 device->ov_position = sector;
814 device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
816 mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
824 struct drbd_device *device = dw->device;
826 ov_out_of_sync_print(device);
827 drbd_resync_finished(device);
836 struct drbd_device *device = dw->device;
839 drbd_resync_finished(device);
844 static void ping_peer(struct drbd_device *device)
846 struct drbd_connection *connection = first_peer_device(device)->connection;
851 test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
854 int drbd_resync_finished(struct drbd_device *device)
856 struct drbd_connection *connection = first_peer_device(device)->connection;
867 if (drbd_rs_del_all(device)) {
877 dw->device = device;
881 drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n");
884 dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
888 db = device->rs_total;
890 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
891 db -= device->ov_left;
894 device->rs_paused /= HZ;
896 if (!get_ldev(device))
899 ping_peer(device);
901 spin_lock_irq(&device->resource->req_lock);
902 os = drbd_read_state(device);
914 drbd_info(device, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
916 dt + device->rs_paused, device->rs_paused, dbdt);
918 n_oos = drbd_bm_total_weight(device);
922 drbd_alert(device, "Online verify found %lu %dk block out of sync!\n",
927 D_ASSERT(device, (n_oos - device->rs_failed) == 0);
932 if (device->use_csums && device->rs_total) {
933 const unsigned long s = device->rs_same_csum;
934 const unsigned long t = device->rs_total;
938 drbd_info(device, "%u %% had equal checksums, eliminated: %luK; "
941 Bit2KB(device->rs_same_csum),
942 Bit2KB(device->rs_total - device->rs_same_csum),
943 Bit2KB(device->rs_total));
947 if (device->rs_failed) {
948 drbd_info(device, " %lu failed blocks\n", device->rs_failed);
962 if (device->p_uuid) {
965 _drbd_uuid_set(device, i, device->p_uuid[i]);
966 drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]);
967 _drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]);
969 drbd_err(device, "device->p_uuid is NULL! BUG\n");
976 drbd_uuid_set_bm(device, 0UL);
977 drbd_print_uuids(device, "updated UUIDs");
978 if (device->p_uuid) {
983 device->p_uuid[i] = device->ldev->md.uuid[i];
988 _drbd_set_state(device, ns, CS_VERBOSE, NULL);
990 spin_unlock_irq(&device->resource->req_lock);
1000 fp = rcu_dereference(device->ldev->disk_conf)->fencing;
1005 struct drbd_device *device = peer_device->device;
1006 disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
1007 pdsk_state = min_t(enum drbd_disk_state, pdsk_state, device->state.pdsk);
1015 put_ldev(device);
1017 device->rs_total = 0;
1018 device->rs_failed = 0;
1019 device->rs_paused = 0;
1021 /* reset start sector, if we reached end of device */
1022 if (verify_done && device->ov_left == 0)
1023 device->ov_start_sector = 0;
1025 drbd_md_sync(device);
1028 drbd_khelper(device, khelper_cmd);
1034 static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
1039 atomic_add(i, &device->pp_in_use_by_net);
1040 atomic_sub(i, &device->pp_in_use);
1041 spin_lock_irq(&device->resource->req_lock);
1042 list_add_tail(&peer_req->w.list, &device->net_ee);
1043 spin_unlock_irq(&device->resource->req_lock);
1046 drbd_free_peer_req(device, peer_req);
1058 struct drbd_device *device = peer_device->device;
1062 drbd_free_peer_req(device, peer_req);
1063 dec_unacked(device);
1071 drbd_err(device, "Sending NegDReply. sector=%llus.\n",
1077 dec_unacked(device);
1079 move_to_net_ee_or_free(device, peer_req);
1082 drbd_err(device, "drbd_send_block() failed\n");
1119 struct drbd_device *device = peer_device->device;
1123 drbd_free_peer_req(device, peer_req);
1124 dec_unacked(device);
1128 if (get_ldev_if_state(device, D_FAILED)) {
1129 drbd_rs_complete_io(device, peer_req->i.sector);
1130 put_ldev(device);
1133 if (device->state.conn == C_AHEAD) {
1136 if (likely(device->state.pdsk >= D_INCONSISTENT)) {
1137 inc_rs_pending(device);
1144 drbd_err(device, "Not sending RSDataReply, "
1150 drbd_err(device, "Sending NegRSDReply. sector %llus.\n",
1156 drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size);
1159 dec_unacked(device);
1161 move_to_net_ee_or_free(device, peer_req);
1164 drbd_err(device, "drbd_send_block() failed\n");
1172 struct drbd_device *device = peer_device->device;
1179 drbd_free_peer_req(device, peer_req);
1180 dec_unacked(device);
1184 if (get_ldev(device)) {
1185 drbd_rs_complete_io(device, peer_req->i.sector);
1186 put_ldev(device);
1197 D_ASSERT(device, digest_size == di->digest_size);
1207 drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size);
1209 device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
1212 inc_rs_pending(device);
1221 drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
1224 dec_unacked(device);
1225 move_to_net_ee_or_free(device, peer_req);
1228 drbd_err(device, "drbd_send_block/ack() failed\n");
1236 struct drbd_device *device = peer_device->device;
1263 drbd_free_peer_req(device, peer_req);
1265 inc_rs_pending(device);
1268 dec_rs_pending(device);
1273 drbd_free_peer_req(device, peer_req);
1274 dec_unacked(device);
1278 void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size)
1280 if (device->ov_last_oos_start + device->ov_last_oos_size == sector) {
1281 device->ov_last_oos_size += size>>9;
1283 device->ov_last_oos_start = sector;
1284 device->ov_last_oos_size = size>>9;
1286 drbd_set_out_of_sync(device, sector, size);
1293 struct drbd_device *device = peer_device->device;
1303 drbd_free_peer_req(device, peer_req);
1304 dec_unacked(device);
1310 if (get_ldev(device)) {
1311 drbd_rs_complete_io(device, peer_req->i.sector);
1312 put_ldev(device);
1323 D_ASSERT(device, digest_size == di->digest_size);
1334 drbd_free_peer_req(device, peer_req);
1336 drbd_ov_out_of_sync_found(device, sector, size);
1338 ov_out_of_sync_print(device);
1343 dec_unacked(device);
1345 --device->ov_left;
1348 if ((device->ov_left & 0x200) == 0x200)
1349 drbd_advance_rs_marks(device, device->ov_left);
1351 stop_sector_reached = verify_can_do_stop_sector(device) &&
1352 (sector + (size>>9)) >= device->ov_stop_sector;
1354 if (device->ov_left == 0 || stop_sector_reached) {
1355 ov_out_of_sync_print(device);
1356 drbd_resync_finished(device);
1394 struct drbd_device *device =
1399 return pd_send_unplug_remote(first_peer_device(device));
1427 struct drbd_device *device = req->device;
1428 struct drbd_peer_device *const peer_device = first_peer_device(device);
1458 struct drbd_device *device = req->device;
1459 struct drbd_peer_device *const peer_device = first_peer_device(device);
1491 struct drbd_device *device = req->device;
1492 struct drbd_peer_device *const peer_device = first_peer_device(device);
1521 struct drbd_device *device = req->device;
1524 drbd_al_begin_io(device, &req->i);
1527 bio_set_dev(req->private_bio, device->ldev->backing_bdev);
1533 static int _drbd_may_sync_now(struct drbd_device *device)
1535 struct drbd_device *odev = device;
1559 * @device: DRBD device.
1563 static bool drbd_pause_after(struct drbd_device *device)
1585 * @device: DRBD device.
1589 static bool drbd_resume_next(struct drbd_device *device)
1610 void resume_next_sg(struct drbd_device *device)
1613 drbd_resume_next(device);
1617 void suspend_other_sg(struct drbd_device *device)
1620 drbd_pause_after(device);
1625 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor)
1638 if (odev == device)
1663 void drbd_resync_after_changed(struct drbd_device *device)
1668 changed = drbd_pause_after(device);
1669 changed |= drbd_resume_next(device);
1673 void drbd_rs_controller_reset(struct drbd_device *device)
1675 struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
1678 atomic_set(&device->rs_sect_in, 0);
1679 atomic_set(&device->rs_sect_ev, 0);
1680 device->rs_in_flight = 0;
1681 device->rs_last_events = (int)part_stat_read_accum(&disk->part0, sectors);
1688 plan = rcu_dereference(device->rs_plan_s);
1696 struct drbd_device *device = from_timer(device, t, start_resync_timer);
1697 drbd_device_post_work(device, RS_START);
1700 static void do_start_resync(struct drbd_device *device)
1702 if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) {
1703 drbd_warn(device, "postponing start_resync ...\n");
1704 device->start_resync_timer.expires = jiffies + HZ/10;
1705 add_timer(&device->start_resync_timer);
1709 drbd_start_resync(device, C_SYNC_SOURCE);
1710 clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags);
1713 static bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *device)
1722 || test_bit(CRASHED_PRIMARY, &device->flags)); /* or only after Primary crash? */
1727 * @device: DRBD device.
1733 void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1735 struct drbd_peer_device *peer_device = first_peer_device(device);
1740 if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) {
1741 drbd_err(device, "Resync already running!\n");
1746 drbd_err(device, "No connection to peer, aborting!\n");
1750 if (!test_bit(B_RS_H_DONE, &device->flags)) {
1755 r = drbd_khelper(device, "before-resync-target");
1758 drbd_info(device, "before-resync-target handler returned %d, "
1764 r = drbd_khelper(device, "before-resync-source");
1768 drbd_info(device, "before-resync-source handler returned %d, "
1771 drbd_info(device, "before-resync-source handler returned %d, "
1784 if (!mutex_trylock(device->state_mutex)) {
1785 set_bit(B_RS_H_DONE, &device->flags);
1786 device->start_resync_timer.expires = jiffies + HZ/5;
1787 add_timer(&device->start_resync_timer);
1791 mutex_lock(device->state_mutex);
1795 clear_bit(B_RS_H_DONE, &device->flags);
1797 if (device->state.conn < C_CONNECTED
1798 || !get_ldev_if_state(device, D_NEGOTIATING)) {
1803 ns = drbd_read_state(device);
1805 ns.aftr_isp = !_drbd_may_sync_now(device);
1814 r = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
1815 ns = drbd_read_state(device);
1821 unsigned long tw = drbd_bm_total_weight(device);
1825 device->rs_failed = 0;
1826 device->rs_paused = 0;
1827 device->rs_same_csum = 0;
1828 device->rs_last_sect_ev = 0;
1829 device->rs_total = tw;
1830 device->rs_start = now;
1832 device->rs_mark_left[i] = tw;
1833 device->rs_mark_time[i] = now;
1835 drbd_pause_after(device);
1837 * Open coded drbd_rs_cancel_all(device), we already have IRQs
1839 spin_lock(&device->al_lock);
1840 lc_reset(device->resync);
1841 device->resync_locked = 0;
1842 device->resync_wenr = LC_FREE;
1843 spin_unlock(&device->al_lock);
1848 wake_up(&device->al_wait); /* for lc_reset() above */
1851 device->rs_last_bcast = jiffies - HZ;
1853 drbd_info(device, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1855 (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10),
1856 (unsigned long) device->rs_total);
1858 device->bm_resync_fo = 0;
1859 device->use_csums = use_checksum_based_resync(connection, device);
1861 device->use_csums = false;
1874 if (connection->agreed_pro_version < 95 && device->rs_total == 0) {
1895 drbd_resync_finished(device);
1898 drbd_rs_controller_reset(device);
1899 /* ns.conn may already be != device->state.conn,
1904 mod_timer(&device->resync_timer, jiffies);
1906 drbd_md_sync(device);
1908 put_ldev(device);
1910 mutex_unlock(device->state_mutex);
1913 static void update_on_disk_bitmap(struct drbd_device *device, bool resync_done)
1916 device->rs_last_bcast = jiffies;
1918 if (!get_ldev(device))
1921 drbd_bm_write_lazy(device, 0);
1922 if (resync_done && is_sync_state(device->state.conn))
1923 drbd_resync_finished(device);
1925 drbd_bcast_event(device, &sib);
1927 device->rs_last_bcast = jiffies;
1928 put_ldev(device);
1931 static void drbd_ldev_destroy(struct drbd_device *device)
1933 lc_destroy(device->resync);
1934 device->resync = NULL;
1935 lc_destroy(device->act_log);
1936 device->act_log = NULL;
1939 drbd_backing_dev_free(device, device->ldev);
1940 device->ldev = NULL;
1943 clear_bit(GOING_DISKLESS, &device->flags);
1944 wake_up(&device->misc_wait);
1947 static void go_diskless(struct drbd_device *device)
1949 D_ASSERT(device, device->state.disk == D_FAILED);
1968 if (device->bitmap && device->ldev) {
1973 if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
1975 if (test_bit(WAS_READ_ERROR, &device->flags)) {
1976 drbd_md_set_flag(device, MDF_FULL_SYNC);
1977 drbd_md_sync(device);
1982 drbd_force_state(device, NS(disk, D_DISKLESS));
1985 static int do_md_sync(struct drbd_device *device)
1987 drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
1988 drbd_md_sync(device);
2015 static void do_device_work(struct drbd_device *device, const unsigned long todo)
2018 do_md_sync(device);
2021 update_on_disk_bitmap(device, test_bit(RS_DONE, &todo));
2023 go_diskless(device);
2025 drbd_ldev_destroy(device);
2027 do_start_resync(device);
2056 struct drbd_device *device = peer_device->device;
2057 unsigned long todo = get_work_bits(&device->flags);
2061 kref_get(&device->kref);
2063 do_device_work(device, todo);
2064 kref_put(&device->kref, drbd_destroy_device);
2223 struct drbd_device *device = peer_device->device;
2224 D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
2225 kref_get(&device->kref);
2227 drbd_device_cleanup(device);
2228 kref_put(&device->kref, drbd_destroy_device);