Lines Matching refs:device
146 static struct page *__drbd_alloc_pages(struct drbd_device *device,
192 static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
202 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
209 static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
214 spin_lock_irq(&device->resource->req_lock);
215 reclaim_finished_net_peer_reqs(device, &reclaimed);
216 spin_unlock_irq(&device->resource->req_lock);
218 drbd_free_net_peer_req(device, peer_req);
228 struct drbd_device *device = peer_device->device;
229 if (!atomic_read(&device->pp_in_use_by_net))
232 kref_get(&device->kref);
234 drbd_reclaim_net_peer_reqs(device);
235 kref_put(&device->kref, drbd_destroy_device);
243 * @device: DRBD device.
264 struct drbd_device *device = peer_device->device;
275 if (atomic_read(&device->pp_in_use) < mxb)
276 page = __drbd_alloc_pages(device, number);
280 if (page && atomic_read(&device->pp_in_use_by_net) > 512)
281 drbd_reclaim_net_peer_reqs(device);
286 drbd_reclaim_net_peer_reqs(device);
288 if (atomic_read(&device->pp_in_use) < mxb) {
289 page = __drbd_alloc_pages(device, number);
298 drbd_warn(device, "drbd_alloc_pages interrupted!\n");
308 atomic_add(number, &device->pp_in_use);
316 static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
318 atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
336 drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
362 struct drbd_device *device = peer_device->device;
367 if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
373 drbd_err(device, "%s: allocation failed\n", __func__);
405 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
411 drbd_free_pages(device, peer_req->pages, is_net);
412 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
413 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
416 drbd_al_complete_io(device, &peer_req->i);
421 int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
426 int is_net = list == &device->net_ee;
428 spin_lock_irq(&device->resource->req_lock);
430 spin_unlock_irq(&device->resource->req_lock);
433 __drbd_free_peer_req(device, peer_req, is_net);
442 static int drbd_finish_peer_reqs(struct drbd_device *device)
449 spin_lock_irq(&device->resource->req_lock);
450 reclaim_finished_net_peer_reqs(device, &reclaimed);
451 list_splice_init(&device->done_ee, &work_list);
452 spin_unlock_irq(&device->resource->req_lock);
455 drbd_free_net_peer_req(device, peer_req);
468 drbd_free_peer_req(device, peer_req);
470 wake_up(&device->ee_wait);
475 static void _drbd_wait_ee_list_empty(struct drbd_device *device,
483 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
484 spin_unlock_irq(&device->resource->req_lock);
486 finish_wait(&device->ee_wait, &wait);
487 spin_lock_irq(&device->resource->req_lock);
491 static void drbd_wait_ee_list_empty(struct drbd_device *device,
494 spin_lock_irq(&device->resource->req_lock);
495 _drbd_wait_ee_list_empty(device, head);
496 spin_unlock_irq(&device->resource->req_lock);
890 struct drbd_device *device = peer_device->device;
893 atomic_set(&device->packet_seq, 0);
894 device->peer_seq = 0;
896 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
898 &device->own_state_mutex;
907 clear_bit(USE_DEGR_WFC_T, &device->flags);
908 clear_bit(RESIZE_PENDING, &device->flags);
909 atomic_set(&device->ap_in_flight, 0);
910 mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
1066 /* drbd_request_state(device, NS(conn, WFAuth)); */
1091 mutex_lock(peer_device->device->state_mutex);
1099 mutex_unlock(peer_device->device->state_mutex);
1103 struct drbd_device *device = peer_device->device;
1104 kref_get(&device->kref);
1108 set_bit(DISCARD_MY_DATA, &device->flags);
1110 clear_bit(DISCARD_MY_DATA, &device->flags);
1113 kref_put(&device->kref, drbd_destroy_device);
1253 struct drbd_device *device;
1260 struct drbd_device *device = octx->device;
1265 drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
1270 clear_bit(FLUSH_PENDING, &device->flags);
1271 put_ldev(device);
1272 kref_put(&device->kref, drbd_destroy_device);
1278 static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
1283 drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
1292 put_ldev(device);
1293 kref_put(&device->kref, drbd_destroy_device);
1297 octx->device = device;
1299 bio_set_dev(bio, device->ldev->backing_bdev);
1304 device->flush_jif = jiffies;
1305 set_bit(FLUSH_PENDING, &device->flags);
1323 struct drbd_device *device = peer_device->device;
1325 if (!get_ldev(device))
1327 kref_get(&device->kref);
1330 submit_one_flush(device, &ctx);
1353 * @device: DRBD device.
1450 struct drbd_device *device;
1463 idr_for_each_entry(&resource->devices, device, vnr) {
1464 if (get_ldev(device)) {
1465 wo = max_allowed_wo(device->ldev, wo);
1466 if (device->ldev == bdev)
1468 put_ldev(device);
1512 int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags)
1514 struct block_device *bdev = device->ldev->backing_bdev;
1577 static bool can_do_reliable_discards(struct drbd_device *device)
1579 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
1587 dc = rcu_dereference(device->ldev->disk_conf);
1593 static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, struct drbd_peer_request *peer_req)
1599 if (!can_do_reliable_discards(device))
1602 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
1608 static void drbd_issue_peer_wsame(struct drbd_device *device,
1611 struct block_device *bdev = device->ldev->backing_bdev;
1622 * @device: DRBD device.
1637 int drbd_submit_peer_request(struct drbd_device *device,
1669 spin_lock_irq(&device->resource->req_lock);
1670 list_add_tail(&peer_req->w.list, &device->active_ee);
1671 spin_unlock_irq(&device->resource->req_lock);
1675 drbd_issue_peer_discard_or_zero_out(device, peer_req);
1677 drbd_issue_peer_wsame(device, peer_req);
1692 drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages);
1697 bio_set_dev(bio, device->ldev->backing_bdev);
1714 D_ASSERT(device, data_size == 0);
1715 D_ASSERT(device, page == NULL);
1726 drbd_submit_bio_noacct(device, fault_type, bio);
1739 static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
1744 drbd_remove_interval(&device->write_requests, i);
1749 wake_up(&device->misc_wait);
1759 struct drbd_device *device = peer_device->device;
1761 kref_get(&device->kref);
1763 drbd_wait_ee_list_empty(device, &device->active_ee);
1764 kref_put(&device->kref, drbd_destroy_device);
1777 * not a specific (peer)device.
1862 struct drbd_device *device = peer_device->device;
1863 const sector_t capacity = get_capacity(device->vdisk);
1899 if (data_size != queue_logical_block_size(device->rq_queue)) {
1901 data_size, queue_logical_block_size(device->rq_queue));
1904 if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) {
1906 data_size, bdev_logical_block_size(device->ldev->backing_bdev));
1923 drbd_err(device, "request from peer beyond end of local disk: "
1956 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
1957 drbd_err(device, "Fault injection: Corrupting data on receive\n");
1962 drbd_free_peer_req(device, peer_req);
1971 drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
1973 drbd_free_peer_req(device, peer_req);
1977 device->recv_cnt += data_size >> 9;
2005 drbd_free_pages(peer_device->device, page, 0);
2030 peer_device->device->recv_cnt += data_size>>9;
2033 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
2053 D_ASSERT(peer_device->device, data_size == 0);
2066 struct drbd_device *device = peer_device->device;
2070 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2073 drbd_set_in_sync(device, sector, peer_req->i.size);
2077 drbd_rs_failed_io(device, sector, peer_req->i.size);
2081 dec_unacked(device);
2089 struct drbd_device *device = peer_device->device;
2096 dec_rs_pending(device);
2098 inc_unacked(device);
2105 spin_lock_irq(&device->resource->req_lock);
2106 list_add_tail(&peer_req->w.list, &device->sync_ee);
2107 spin_unlock_irq(&device->resource->req_lock);
2109 atomic_add(pi->size >> 9, &device->rs_sect_ev);
2110 if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
2115 drbd_err(device, "submit failed, triggering re-connect\n");
2116 spin_lock_irq(&device->resource->req_lock);
2118 spin_unlock_irq(&device->resource->req_lock);
2120 drbd_free_peer_req(device, peer_req);
2122 put_ldev(device);
2127 find_request(struct drbd_device *device, struct rb_root *root, u64 id,
2137 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
2146 struct drbd_device *device;
2155 device = peer_device->device;
2159 spin_lock_irq(&device->resource->req_lock);
2160 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
2161 spin_unlock_irq(&device->resource->req_lock);
2181 struct drbd_device *device;
2189 device = peer_device->device;
2192 D_ASSERT(device, p->block_id == ID_SYNCER);
2194 if (get_ldev(device)) {
2201 drbd_err(device, "Can not write resync data to local disk.\n");
2208 atomic_add(pi->size >> 9, &device->rs_sect_in);
2213 static void restart_conflicting_writes(struct drbd_device *device,
2219 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2240 struct drbd_device *device = peer_device->device;
2246 pcmd = (device->state.conn >= C_SYNC_SOURCE &&
2247 device->state.conn <= C_PAUSED_SYNC_T &&
2252 drbd_set_in_sync(device, sector, peer_req->i.size);
2258 dec_unacked(device);
2264 spin_lock_irq(&device->resource->req_lock);
2265 D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
2266 drbd_remove_epoch_entry_interval(device, peer_req);
2268 restart_conflicting_writes(device, sector, peer_req->i.size);
2269 spin_unlock_irq(&device->resource->req_lock);
2271 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2286 dec_unacked(peer_device->device);
2323 struct drbd_device *device = peer_device->device;
2327 spin_lock(&device->peer_seq_lock);
2328 newest_peer_seq = seq_max(device->peer_seq, peer_seq);
2329 device->peer_seq = newest_peer_seq;
2330 spin_unlock(&device->peer_seq_lock);
2331 /* wake up only if we actually changed device->peer_seq */
2333 wake_up(&device->seq_wait);
2343 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
2348 spin_lock_irq(&device->resource->req_lock);
2349 list_for_each_entry(rs_req, &device->sync_ee, w.list) {
2356 spin_unlock_irq(&device->resource->req_lock);
2370 * In case packet_seq is larger than device->peer_seq number, there are
2372 * In case we are the logically next packet, we update device->peer_seq
2384 struct drbd_device *device = peer_device->device;
2392 spin_lock(&device->peer_seq_lock);
2394 if (!seq_greater(peer_seq - 1, device->peer_seq)) {
2395 device->peer_seq = seq_max(device->peer_seq, peer_seq);
2412 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
2413 spin_unlock(&device->peer_seq_lock);
2418 spin_lock(&device->peer_seq_lock);
2421 drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
2425 spin_unlock(&device->peer_seq_lock);
2426 finish_wait(&device->seq_wait, &wait);
2452 static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2458 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2469 spin_unlock_irq(&device->resource->req_lock);
2471 complete_master_bio(device, &m);
2472 spin_lock_irq(&device->resource->req_lock);
2477 static int handle_write_conflicts(struct drbd_device *device,
2492 drbd_insert_interval(&device->write_requests, &peer_req->i);
2495 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2507 err = drbd_wait_misc(device, i);
2525 drbd_alert(device, "Concurrent writes detected: "
2534 list_add_tail(&peer_req->w.list, &device->done_ee);
2544 drbd_alert(device, "Concurrent writes detected: "
2562 err = drbd_wait_misc(device, &req->i);
2565 fail_postponed_requests(device, sector, size);
2581 drbd_remove_epoch_entry_interval(device, peer_req);
2589 struct drbd_device *device;
2602 device = peer_device->device;
2604 if (!get_ldev(device)) {
2625 put_ldev(device);
2652 D_ASSERT(device, peer_req->i.size == 0);
2653 D_ASSERT(device, dp_flags & DP_FLUSH);
2682 inc_unacked(device);
2695 D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK);
2700 spin_lock_irq(&device->resource->req_lock);
2701 err = handle_write_conflicts(device, peer_req);
2703 spin_unlock_irq(&device->resource->req_lock);
2705 put_ldev(device);
2712 spin_lock_irq(&device->resource->req_lock);
2719 list_add_tail(&peer_req->w.list, &device->active_ee);
2720 spin_unlock_irq(&device->resource->req_lock);
2722 if (device->state.conn == C_SYNC_TARGET)
2723 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
2725 if (device->state.pdsk < D_INCONSISTENT) {
2727 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
2729 drbd_al_begin_io(device, &peer_req->i);
2733 err = drbd_submit_peer_request(device, peer_req, op, op_flags,
2739 drbd_err(device, "submit failed, triggering re-connect\n");
2740 spin_lock_irq(&device->resource->req_lock);
2742 drbd_remove_epoch_entry_interval(device, peer_req);
2743 spin_unlock_irq(&device->resource->req_lock);
2746 drbd_al_complete_io(device, &peer_req->i);
2751 put_ldev(device);
2752 drbd_free_peer_req(device, peer_req);
2756 /* We may throttle resync, if the lower device seems to be busy,
2759 * To decide whether or not the lower device is busy, we use a scheme similar
2767 bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
2771 bool throttle = drbd_rs_c_min_rate_throttle(device);
2776 spin_lock_irq(&device->al_lock);
2777 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
2785 spin_unlock_irq(&device->al_lock);
2790 bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
2792 struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
2798 c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
2806 atomic_read(&device->rs_sect_ev);
2808 if (atomic_read(&device->ap_actlog_cnt)
2809 || curr_events - device->rs_last_events > 64) {
2813 device->rs_last_events = curr_events;
2817 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2819 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
2820 rs_left = device->ov_left;
2822 rs_left = drbd_bm_total_weight(device) - device->rs_failed;
2824 dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
2827 db = device->rs_mark_left[i] - rs_left;
2839 struct drbd_device *device;
2851 device = peer_device->device;
2852 capacity = get_capacity(device->vdisk);
2858 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2863 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2868 if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
2882 dec_rs_pending(device);
2889 drbd_err(device, "Can not satisfy peer's read request, "
2902 put_ldev(device);
2925 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2945 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
2948 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2950 device->use_csums = true;
2953 atomic_add(size >> 9, &device->rs_sect_in);
2955 dec_rs_pending(device);
2963 if (device->ov_start_sector == ~(sector_t)0 &&
2967 device->ov_start_sector = sector;
2968 device->ov_position = sector;
2969 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
2970 device->rs_total = device->ov_left;
2972 device->rs_mark_left[i] = device->ov_left;
2973 device->rs_mark_time[i] = now;
2975 drbd_info(device, "Online Verify start sector: %llu\n",
3013 spin_lock_irq(&device->resource->req_lock);
3014 list_add_tail(&peer_req->w.list, &device->read_ee);
3015 spin_unlock_irq(&device->resource->req_lock);
3018 if (device->state.peer != R_PRIMARY
3019 && drbd_rs_should_slow_down(device, sector, false))
3022 if (drbd_rs_begin_io(device, sector))
3026 atomic_add(size >> 9, &device->rs_sect_ev);
3030 inc_unacked(device);
3031 if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
3036 drbd_err(device, "submit failed, triggering re-connect\n");
3039 spin_lock_irq(&device->resource->req_lock);
3041 spin_unlock_irq(&device->resource->req_lock);
3044 put_ldev(device);
3045 drbd_free_peer_req(device, peer_req);
3054 struct drbd_device *device = peer_device->device;
3059 self = device->ldev->md.uuid[UI_BITMAP] & 1;
3060 peer = device->p_uuid[UI_BITMAP] & 1;
3062 ch_peer = device->p_uuid[UI_SIZE];
3063 ch_self = device->comm_bm_set;
3073 drbd_err(device, "Configuration error.\n");
3097 drbd_warn(device, "Discard younger/older primary did not find a decision\n"
3137 struct drbd_device *device = peer_device->device;
3151 drbd_err(device, "Configuration error.\n");
3157 if (hg == -1 && device->state.role == R_SECONDARY)
3159 if (hg == 1 && device->state.role == R_PRIMARY)
3166 return device->state.role == R_PRIMARY ? 1 : -1;
3169 if (hg == -1 && device->state.role == R_PRIMARY) {
3175 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
3177 drbd_khelper(device, "pri-lost-after-sb");
3179 drbd_warn(device, "Successfully gave up primary role.\n");
3194 struct drbd_device *device = peer_device->device;
3210 drbd_err(device, "Configuration error.\n");
3225 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
3227 drbd_khelper(device, "pri-lost-after-sb");
3229 drbd_warn(device, "Successfully gave up primary role.\n");
3239 static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
3243 drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
3246 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
3269 static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
3271 struct drbd_peer_device *const peer_device = first_peer_device(device);
3276 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3277 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3296 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
3301 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
3302 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
3303 drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
3304 drbd_uuid_move_history(device);
3305 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3306 device->ldev->md.uuid[UI_BITMAP] = 0;
3308 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3309 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
3312 drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
3319 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
3324 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
3325 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
3326 drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
3328 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
3329 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
3330 device->p_uuid[UI_BITMAP] = 0UL;
3332 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3335 drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
3343 rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
3344 (device->p_uuid[UI_FLAGS] & 2);
3359 if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) {
3365 if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) {
3372 if (device->state.role == R_PRIMARY)
3391 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
3396 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
3399 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
3400 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
3401 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
3408 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
3409 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
3411 drbd_info(device, "Lost last syncUUID packet, corrected:\n");
3412 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3419 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3421 peer = device->p_uuid[i] & ~((u64)1);
3427 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3428 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3433 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
3436 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
3437 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
3438 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
3445 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
3446 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
3448 drbd_info(device, "Last syncUUID did not get through, corrected:\n");
3449 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3450 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
3458 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3460 self = device->ldev->md.uuid[i] & ~((u64)1);
3466 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3467 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
3473 self = device->ldev->md.uuid[i] & ~((u64)1);
3475 peer = device->p_uuid[j] & ~((u64)1);
3491 struct drbd_device *device = peer_device->device;
3497 mydisk = device->state.disk;
3499 mydisk = device->new_state_tmp.disk;
3501 drbd_info(device, "drbd_sync_handshake:\n");
3503 spin_lock_irq(&device->ldev->md.uuid_lock);
3504 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
3505 drbd_uuid_dump(device, "peer", device->p_uuid,
3506 device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3508 hg = drbd_uuid_compare(device, peer_role, &rule_nr);
3509 spin_unlock_irq(&device->ldev->md.uuid_lock);
3511 drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
3514 drbd_alert(device, "Unrelated data, aborting!\n");
3522 drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n",
3527 drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
3537 drbd_info(device, "Becoming sync %s due to disk states.\n",
3542 drbd_khelper(device, "initial-split-brain");
3552 int pcount = (device->state.role == R_PRIMARY)
3568 drbd_warn(device, "Split-Brain detected, %d primaries, "
3572 drbd_warn(device, "Doing a full sync, since"
3580 if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
3582 if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
3586 drbd_warn(device, "Split-Brain detected, manually solved. "
3596 drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
3597 drbd_khelper(device, "split-brain");
3602 drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
3607 device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
3610 drbd_khelper(device, "pri-lost");
3613 drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
3616 drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
3623 drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
3625 drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
3632 drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3633 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3644 if (drbd_bm_total_weight(device)) {
3645 drbd_info(device, "No resync, but %lu bits in bitmap!\n",
3646 drbd_bm_total_weight(device));
3824 const struct drbd_device *device,
3834 drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3862 * config_unknown_volume - device configuration command for unknown volume
3864 * When a device is added to an existing connection, the node on which the
3865 * device is added first will send configuration commands to its peer but the
3866 * peer will not know about the device yet. It will warn and ignore these
3867 * commands. Once the device is added on the second node, the second node will
3868 * send the same device configuration commands, but in the other direction.
3882 struct drbd_device *device;
3897 device = peer_device->device;
3906 drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3917 D_ASSERT(device, data_size == 0);
3921 D_ASSERT(device, data_size == 0);
3934 if (get_ldev(device)) {
3937 put_ldev(device);
3939 drbd_err(device, "Allocation of new disk_conf failed\n");
3943 old_disk_conf = device->ldev->disk_conf;
3952 drbd_err(device, "verify-alg of wrong size, "
3964 D_ASSERT(device, p->verify_alg[data_size-1] == 0);
3970 D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3971 D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3977 if (device->state.conn == C_WF_REPORT_PARAMS) {
3978 drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3982 verify_tfm = drbd_crypto_alloc_digest_safe(device,
3991 if (device->state.conn == C_WF_REPORT_PARAMS) {
3992 drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3996 csums_tfm = drbd_crypto_alloc_digest_safe(device,
4011 if (fifo_size != device->rs_plan_s->size) {
4014 drbd_err(device, "kmalloc of fifo_buffer failed");
4015 put_ldev(device);
4024 drbd_err(device, "Allocation of new net_conf failed\n");
4035 drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
4042 drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
4049 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
4050 put_ldev(device);
4054 old_plan = device->rs_plan_s;
4055 rcu_assign_pointer(device->rs_plan_s, new_plan);
4069 put_ldev(device);
4078 put_ldev(device);
4092 static void warn_if_differ_considerably(struct drbd_device *device,
4100 drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
4107 struct drbd_device *device;
4119 device = peer_device->device;
4120 cur_size = get_capacity(device->vdisk);
4128 device->p_size = p_size;
4130 if (get_ldev(device)) {
4132 my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
4135 warn_if_differ_considerably(device, "lower level device sizes",
4136 p_size, drbd_get_max_capacity(device->ldev));
4137 warn_if_differ_considerably(device, "user requested size",
4142 if (device->state.conn == C_WF_REPORT_PARAMS)
4145 /* Never shrink a device with usable data during connect,
4148 new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
4150 device->state.disk >= D_OUTDATED &&
4151 (device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS)) {
4152 drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n",
4155 put_ldev(device);
4164 drbd_err(device, "Allocation of new disk_conf failed\n");
4165 put_ldev(device);
4170 old_disk_conf = device->ldev->disk_conf;
4174 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
4179 drbd_info(device, "Peer sets u_size to %lu sectors (old: %lu)\n",
4183 put_ldev(device);
4186 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
4193 if (get_ldev(device)) {
4194 drbd_reconsider_queue_parameters(device, device->ldev, o);
4195 dd = drbd_determine_dev_size(device, ddsf, NULL);
4196 put_ldev(device);
4199 drbd_md_sync(device);
4218 drbd_reconsider_queue_parameters(device, NULL, o);
4224 drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n",
4226 } else if (new_size < cur_size && device->state.role == R_PRIMARY) {
4227 drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n",
4241 drbd_set_my_capacity(device, new_size);
4245 if (get_ldev(device)) {
4246 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
4247 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
4251 put_ldev(device);
4254 if (device->state.conn > C_WF_REPORT_PARAMS) {
4255 if (be64_to_cpu(p->c_size) != get_capacity(device->vdisk) ||
4261 if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
4262 (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
4263 if (device->state.pdsk >= D_INCONSISTENT &&
4264 device->state.disk >= D_INCONSISTENT) {
4266 drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
4268 resync_after_online_grow(device);
4270 set_bit(RESYNC_AFTER_NEG, &device->flags);
4280 struct drbd_device *device;
4288 device = peer_device->device;
4292 drbd_err(device, "kmalloc of p_uuid failed\n");
4299 kfree(device->p_uuid);
4300 device->p_uuid = p_uuid;
4302 if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
4303 device->state.disk < D_INCONSISTENT &&
4304 device->state.role == R_PRIMARY &&
4305 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
4306 drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
4307 (unsigned long long)device->ed_uuid);
4312 if (get_ldev(device)) {
4314 device->state.conn == C_CONNECTED &&
4316 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
4319 drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
4320 drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4323 _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
4324 _drbd_uuid_set(device, UI_BITMAP, 0);
4325 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4327 drbd_md_sync(device);
4330 put_ldev(device);
4331 } else if (device->state.disk < D_INCONSISTENT &&
4332 device->state.role == R_PRIMARY) {
4335 updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
4342 mutex_lock(device->state_mutex);
4343 mutex_unlock(device->state_mutex);
4344 if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
4345 updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
4348 drbd_print_uuids(device, "receiver updated UUIDs to");
4387 struct drbd_device *device;
4395 device = peer_device->device;
4401 mutex_is_locked(device->state_mutex)) {
4409 rv = drbd_change_state(device, CS_VERBOSE, mask, val);
4412 drbd_md_sync(device);
4444 struct drbd_device *device;
4454 device = peer_device->device;
4460 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
4461 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
4464 spin_lock_irq(&device->resource->req_lock);
4466 os = ns = drbd_read_state(device);
4467 spin_unlock_irq(&device->resource->req_lock);
4499 if (drbd_bm_total_weight(device) <= device->rs_failed)
4500 drbd_resync_finished(device);
4508 ov_out_of_sync_print(device);
4509 drbd_resync_finished(device);
4547 if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
4548 get_ldev_if_state(device, D_NEGOTIATING)) {
4560 cr |= test_bit(CONSIDER_RESYNC, &device->flags);
4570 put_ldev(device);
4573 if (device->state.disk == D_NEGOTIATING) {
4574 drbd_force_state(device, NS(disk, D_FAILED));
4576 drbd_err(device, "Disk attach process on the peer node was aborted.\n");
4582 D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
4589 spin_lock_irq(&device->resource->req_lock);
4590 if (os.i != drbd_read_state(device).i)
4592 clear_bit(CONSIDER_RESYNC, &device->flags);
4597 ns.disk = device->new_state_tmp.disk;
4599 if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
4600 test_bit(NEW_CUR_UUID, &device->flags)) {
4603 spin_unlock_irq(&device->resource->req_lock);
4604 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
4606 drbd_uuid_new_current(device);
4607 clear_bit(NEW_CUR_UUID, &device->flags);
4611 rv = _drbd_set_state(device, ns, cs_flags, NULL);
4612 ns = drbd_read_state(device);
4613 spin_unlock_irq(&device->resource->req_lock);
4631 clear_bit(DISCARD_MY_DATA, &device->flags);
4633 drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
4641 struct drbd_device *device;
4647 device = peer_device->device;
4649 wait_event(device->misc_wait,
4650 device->state.conn == C_WF_SYNC_UUID ||
4651 device->state.conn == C_BEHIND ||
4652 device->state.conn < C_CONNECTED ||
4653 device->state.disk < D_NEGOTIATING);
4655 /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */
4659 if (get_ldev_if_state(device, D_NEGOTIATING)) {
4660 _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
4661 _drbd_uuid_set(device, UI_BITMAP, 0UL);
4663 drbd_print_uuids(device, "updated sync uuid");
4664 drbd_start_resync(device, C_SYNC_TARGET);
4666 put_ldev(device);
4668 drbd_err(device, "Ignoring SyncUUID packet!\n");
4700 drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
4764 _drbd_bm_set_bits(peer_device->device, s, e);
4818 void INFO_bm_xfer_stats(struct drbd_device *device,
4822 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
4846 drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4865 struct drbd_device *device;
4872 device = peer_device->device;
4874 drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4879 .bm_bits = drbd_bm_bits(device),
4880 .bm_words = drbd_bm_words(device),
4892 drbd_err(device, "ReportCBitmap packet too large\n");
4897 drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4906 drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4924 INFO_bm_xfer_stats(device, "receive", &c);
4926 if (device->state.conn == C_WF_BITMAP_T) {
4929 err = drbd_send_bitmap(device);
4933 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4934 D_ASSERT(device, rv == SS_SUCCESS);
4935 } else if (device->state.conn != C_WF_BITMAP_S) {
4938 drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
4939 drbd_conn_str(device->state.conn));
4944 drbd_bm_unlock(device);
4945 if (!err && device->state.conn == C_WF_BITMAP_S)
4946 drbd_start_resync(device, C_SYNC_SOURCE);
4969 struct drbd_device *device;
4975 device = peer_device->device;
4977 switch (device->state.conn) {
4983 drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4984 drbd_conn_str(device->state.conn));
4987 drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4996 struct drbd_device *device;
5003 device = peer_device->device;
5008 dec_rs_pending(device);
5010 if (get_ldev(device)) {
5017 put_ldev(device);
5025 spin_lock_irq(&device->resource->req_lock);
5026 list_add_tail(&peer_req->w.list, &device->sync_ee);
5027 spin_unlock_irq(&device->resource->req_lock);
5029 atomic_add(pi->size >> 9, &device->rs_sect_ev);
5030 err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
5033 spin_lock_irq(&device->resource->req_lock);
5035 spin_unlock_irq(&device->resource->req_lock);
5037 drbd_free_peer_req(device, peer_req);
5038 put_ldev(device);
5043 inc_unacked(device);
5049 drbd_rs_complete_io(device, sector);
5053 atomic_add(size >> 9, &device->rs_sect_in);
5179 struct drbd_device *device = peer_device->device;
5180 kref_get(&device->kref);
5183 kref_put(&device->kref, drbd_destroy_device);
5212 struct drbd_device *device = peer_device->device;
5216 spin_lock_irq(&device->resource->req_lock);
5217 _drbd_wait_ee_list_empty(device, &device->active_ee);
5218 _drbd_wait_ee_list_empty(device, &device->sync_ee);
5219 _drbd_wait_ee_list_empty(device, &device->read_ee);
5220 spin_unlock_irq(&device->resource->req_lock);
5232 drbd_rs_cancel_all(device);
5233 device->rs_total = 0;
5234 device->rs_failed = 0;
5235 atomic_set(&device->rs_pending_cnt, 0);
5236 wake_up(&device->misc_wait);
5238 del_timer_sync(&device->resync_timer);
5239 resync_timer_fn(&device->resync_timer);
5246 drbd_finish_peer_reqs(device);
5255 drbd_rs_cancel_all(device);
5257 kfree(device->p_uuid);
5258 device->p_uuid = NULL;
5260 if (!drbd_suspended(device))
5263 drbd_md_sync(device);
5265 if (get_ldev(device)) {
5266 drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
5268 put_ldev(device);
5278 i = drbd_free_peer_reqs(device, &device->net_ee);
5280 drbd_info(device, "net_ee not empty, killed %u entries\n", i);
5281 i = atomic_read(&device->pp_in_use_by_net);
5283 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
5284 i = atomic_read(&device->pp_in_use);
5286 drbd_info(device, "pp_in_use = %d, expected 0\n", i);
5288 D_ASSERT(device, list_empty(&device->read_ee));
5289 D_ASSERT(device, list_empty(&device->active_ee));
5290 D_ASSERT(device, list_empty(&device->sync_ee));
5291 D_ASSERT(device, list_empty(&device->done_ee));
5643 struct drbd_device *device;
5650 device = peer_device->device;
5653 D_ASSERT(device, connection->agreed_pro_version < 100);
5658 set_bit(CL_ST_CHG_SUCCESS, &device->flags);
5660 set_bit(CL_ST_CHG_FAIL, &device->flags);
5661 drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
5664 wake_up(&device->state_wait);
5688 struct drbd_device *device;
5696 device = peer_device->device;
5698 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
5702 if (get_ldev(device)) {
5703 drbd_rs_complete_io(device, sector);
5704 drbd_set_in_sync(device, sector, blksize);
5706 device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
5707 put_ldev(device);
5709 dec_rs_pending(device);
5710 atomic_add(blksize >> 9, &device->rs_sect_in);
5716 validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
5723 spin_lock_irq(&device->resource->req_lock);
5724 req = find_request(device, root, id, sector, missing_ok, func);
5726 spin_unlock_irq(&device->resource->req_lock);
5730 spin_unlock_irq(&device->resource->req_lock);
5733 complete_master_bio(device, &m);
5740 struct drbd_device *device;
5749 device = peer_device->device;
5754 drbd_set_in_sync(device, sector, blksize);
5755 dec_rs_pending(device);
5778 return validate_req_change_req_state(device, p->block_id, sector,
5779 &device->write_requests, __func__,
5786 struct drbd_device *device;
5795 device = peer_device->device;
5800 dec_rs_pending(device);
5801 drbd_rs_failed_io(device, sector, size);
5805 err = validate_req_change_req_state(device, p->block_id, sector,
5806 &device->write_requests, __func__,
5814 drbd_set_out_of_sync(device, sector, size);
5822 struct drbd_device *device;
5829 device = peer_device->device;
5833 drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
5836 return validate_req_change_req_state(device, p->block_id, sector,
5837 &device->read_requests, __func__,
5844 struct drbd_device *device;
5852 device = peer_device->device;
5859 dec_rs_pending(device);
5861 if (get_ldev_if_state(device, D_FAILED)) {
5862 drbd_rs_complete_io(device, sector);
5865 drbd_rs_failed_io(device, sector, size);
5871 put_ldev(device);
5887 struct drbd_device *device = peer_device->device;
5889 if (device->state.conn == C_AHEAD &&
5890 atomic_read(&device->ap_in_flight) == 0 &&
5891 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
5892 device->start_resync_timer.expires = jiffies + HZ;
5893 add_timer(&device->start_resync_timer);
5904 struct drbd_device *device;
5913 device = peer_device->device;
5921 drbd_ov_out_of_sync_found(device, sector, size);
5923 ov_out_of_sync_print(device);
5925 if (!get_ldev(device))
5928 drbd_rs_complete_io(device, sector);
5929 dec_rs_pending(device);
5931 --device->ov_left;
5934 if ((device->ov_left & 0x200) == 0x200)
5935 drbd_advance_rs_marks(device, device->ov_left);
5937 if (device->ov_left == 0) {
5941 dw->device = device;
5944 drbd_err(device, "kmalloc(dw) failed.");
5945 ov_out_of_sync_print(device);
5946 drbd_resync_finished(device);
5949 put_ldev(device);
6151 struct drbd_device *device = peer_device->device;
6163 err = drbd_finish_peer_reqs(device);
6164 kref_put(&device->kref, drbd_destroy_device);