Lines Matching defs:peer_device
224 struct drbd_peer_device *peer_device;
228 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
229 struct drbd_device *device = peer_device->device;
244 * @peer_device: DRBD device.
262 struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
265 struct drbd_device *device = peer_device->device;
272 nc = rcu_dereference(peer_device->connection->net_conf);
360 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
363 struct drbd_device *device = peer_device->device;
379 page = drbd_alloc_pages(peer_device, nr_pages,
391 peer_req->peer_device = peer_device;
889 int drbd_connected(struct drbd_peer_device *peer_device)
891 struct drbd_device *device = peer_device->device;
897 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
898 &peer_device->connection->cstate_mutex :
901 err = drbd_send_sync_param(peer_device);
903 err = drbd_send_sizes(peer_device, 0, 0);
905 err = drbd_send_uuids(peer_device);
907 err = drbd_send_current_state(peer_device);
926 struct drbd_peer_device *peer_device;
1094 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1095 mutex_lock(peer_device->device->state_mutex);
1102 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1103 mutex_unlock(peer_device->device->state_mutex);
1106 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1107 struct drbd_device *device = peer_device->device;
1116 drbd_connected(peer_device);
1315 struct drbd_peer_device *peer_device;
1324 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1325 struct drbd_device *device = peer_device->device;
1636 struct drbd_device *device = peer_req->peer_device->device;
1654 conn_wait_active_ee_empty(peer_req->peer_device->connection);
1742 struct drbd_peer_device *peer_device;
1746 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1747 struct drbd_device *device = peer_device->device;
1847 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
1850 struct drbd_device *device = peer_device->device;
1856 void *dig_in = peer_device->connection->int_dig_in;
1857 void *dig_vv = peer_device->connection->int_dig_vv;
1863 if (!trim && peer_device->connection->peer_integrity_tfm) {
1864 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
1869 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
1878 if (!expect(peer_device, data_size == 0))
1882 if (!expect(peer_device, data_size == 0))
1887 if (!expect(peer_device, IS_ALIGNED(ds, 512)))
1890 if (!expect(peer_device, ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
1892 } else if (!expect(peer_device, ds <= DRBD_MAX_BIO_SIZE))
1908 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
1928 err = drbd_recv_all_warn(peer_device->connection, data, len);
1942 drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
1957 static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
1966 page = drbd_alloc_pages(peer_device, 1, 1);
1972 err = drbd_recv_all_warn(peer_device->connection, data, len);
1978 drbd_free_pages(peer_device->device, page, 0);
1982 static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
1989 void *dig_in = peer_device->connection->int_dig_in;
1990 void *dig_vv = peer_device->connection->int_dig_vv;
1993 if (peer_device->connection->peer_integrity_tfm) {
1994 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
1995 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
2003 peer_device->device->recv_cnt += data_size>>9;
2006 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
2011 err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
2019 drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
2021 drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
2026 D_ASSERT(peer_device->device, data_size == 0);
2038 struct drbd_peer_device *peer_device = peer_req->peer_device;
2039 struct drbd_device *device = peer_device->device;
2046 drbd_set_in_sync(peer_device, sector, peer_req->i.size);
2047 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
2050 drbd_rs_failed_io(peer_device, sector, peer_req->i.size);
2052 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
2059 static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
2062 struct drbd_device *device = peer_device->device;
2065 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
2069 dec_rs_pending(peer_device);
2118 struct drbd_peer_device *peer_device;
2125 peer_device = conn_peer_device(connection, pi->vnr);
2126 if (!peer_device)
2128 device = peer_device->device;
2138 err = recv_dless_read(peer_device, req, sector, pi->size);
2140 req_mod(req, DATA_RECEIVED, peer_device);
2150 struct drbd_peer_device *peer_device;
2156 peer_device = conn_peer_device(connection, pi->vnr);
2157 if (!peer_device)
2159 device = peer_device->device;
2168 err = recv_resync_read(peer_device, sector, pi);
2173 err = drbd_drain_block(peer_device, pi->size);
2175 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
2209 struct drbd_peer_device *peer_device = peer_req->peer_device;
2210 struct drbd_device *device = peer_device->device;
2220 err = drbd_send_ack(peer_device, pcmd, peer_req);
2222 drbd_set_in_sync(peer_device, sector, peer_req->i.size);
2224 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
2243 drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
2252 struct drbd_peer_device *peer_device = peer_req->peer_device;
2255 err = drbd_send_ack(peer_device, ack, peer_req);
2256 dec_unacked(peer_device->device);
2270 struct drbd_connection *connection = peer_req->peer_device->connection;
2291 static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
2293 struct drbd_device *device = peer_device->device;
2296 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
2352 static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
2354 struct drbd_device *device = peer_device->device;
2359 if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
2375 tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
2385 timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
2422 struct drbd_peer_device *peer_device = first_peer_device(device);
2436 __req_mod(req, NEG_ACKED, peer_device, &m);
2448 struct drbd_connection *connection = peer_req->peer_device->connection;
2503 queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
2556 struct drbd_peer_device *peer_device;
2566 peer_device = conn_peer_device(connection, pi->vnr);
2567 if (!peer_device)
2569 device = peer_device->device;
2574 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2575 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
2577 err2 = drbd_drain_block(peer_device, pi->size);
2590 peer_req = read_in_block(peer_device, p->block_id, sector, pi);
2603 D_ASSERT(peer_device, peer_req->i.size > 0);
2604 D_ASSERT(peer_device, peer_req_op(peer_req) == REQ_OP_DISCARD);
2605 D_ASSERT(peer_device, peer_req->pages == NULL);
2611 D_ASSERT(peer_device, peer_req->i.size > 0);
2612 D_ASSERT(peer_device, peer_req_op(peer_req) == REQ_OP_WRITE_ZEROES);
2613 D_ASSERT(peer_device, peer_req->pages == NULL);
2632 nc = rcu_dereference(peer_device->connection->net_conf);
2634 if (peer_device->connection->agreed_pro_version < 100) {
2656 drbd_send_ack(peer_device, P_RECV_ACK, peer_req);
2663 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2677 update_peer_seq(peer_device, peer_seq);
2693 drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size);
2732 bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector,
2735 struct drbd_device *device = peer_device->device;
2804 struct drbd_peer_device *peer_device;
2813 peer_device = conn_peer_device(connection, pi->vnr);
2814 if (!peer_device)
2816 device = peer_device->device;
2837 drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
2843 drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
2847 dec_rs_pending(peer_device);
2848 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
2858 return drbd_drain_block(peer_device, pi->size);
2864 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
2904 if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
2908 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
2918 dec_rs_pending(peer_device);
2927 peer_device->connection->agreed_pro_version >= 90) {
2981 && drbd_rs_should_slow_down(peer_device, sector, false))
3013 static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
3015 struct drbd_device *device = peer_device->device;
3027 after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
3063 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
3080 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
3096 static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
3098 struct drbd_device *device = peer_device->device;
3103 after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
3117 hg = drbd_asb_recover_0p(peer_device);
3124 rv = drbd_asb_recover_0p(peer_device);
3129 hg = drbd_asb_recover_0p(peer_device);
3153 static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
3155 struct drbd_device *device = peer_device->device;
3160 after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p;
3174 rv = drbd_asb_recover_0p(peer_device);
3179 hg = drbd_asb_recover_0p(peer_device);
3230 static int drbd_uuid_compare(struct drbd_peer_device *const peer_device,
3233 struct drbd_connection *const connection = peer_device->connection;
3234 struct drbd_device *device = peer_device->device;
3324 drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n");
3331 drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n");
3449 static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
3453 struct drbd_device *device = peer_device->device;
3470 hg = drbd_uuid_compare(peer_device, peer_role, &rule_nr);
3507 nc = rcu_dereference(peer_device->connection->net_conf);
3520 hg = drbd_asb_recover_0p(peer_device);
3523 hg = drbd_asb_recover_1p(peer_device);
3526 hg = drbd_asb_recover_2p(peer_device);
3583 if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) {
3840 struct drbd_peer_device *peer_device;
3853 peer_device = conn_peer_device(connection, pi->vnr);
3854 if (!peer_device)
3856 device = peer_device->device;
3888 err = drbd_recv_all(peer_device->connection, p, header_size);
3893 old_net_conf = peer_device->connection->net_conf;
3918 err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
3990 crypto_free_shash(peer_device->connection->verify_tfm);
3991 peer_device->connection->verify_tfm = verify_tfm;
3997 crypto_free_shash(peer_device->connection->csums_tfm);
3998 peer_device->connection->csums_tfm = csums_tfm;
4044 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4063 struct drbd_peer_device *peer_device;
4073 peer_device = conn_peer_device(connection, pi->vnr);
4074 if (!peer_device)
4076 device = peer_device->device;
4111 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4184 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4214 drbd_send_sizes(peer_device, 0, ddsf);
4234 struct drbd_peer_device *peer_device;
4240 peer_device = conn_peer_device(connection, pi->vnr);
4241 if (!peer_device)
4243 device = peer_device->device;
4261 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4268 peer_device->connection->agreed_pro_version >= 90 &&
4339 struct drbd_peer_device *peer_device;
4345 peer_device = conn_peer_device(connection, pi->vnr);
4346 if (!peer_device)
4348 device = peer_device->device;
4353 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) &&
4355 drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG);
4363 drbd_send_sr_reply(peer_device, rv);
4396 struct drbd_peer_device *peer_device;
4404 peer_device = conn_peer_device(connection, pi->vnr);
4405 if (!peer_device)
4407 device = peer_device->device;
4453 drbd_resync_finished(peer_device);
4461 ov_out_of_sync_print(peer_device);
4462 drbd_resync_finished(peer_device);
4521 ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk);
4533 if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags))
4536 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4558 tl_clear(peer_device->connection);
4561 conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
4569 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4579 drbd_send_uuids(peer_device);
4580 drbd_send_current_state(peer_device);
4593 struct drbd_peer_device *peer_device;
4597 peer_device = conn_peer_device(connection, pi->vnr);
4598 if (!peer_device)
4600 device = peer_device->device;
4633 receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
4637 drbd_header_size(peer_device->connection);
4644 drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size);
4649 err = drbd_recv_all(peer_device->connection, p, want);
4653 drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
4685 recv_bm_rle_bits(struct drbd_peer_device *peer_device,
4714 drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4717 _drbd_bm_set_bits(peer_device->device, s, e);
4721 drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4754 decode_bitmap_c(struct drbd_peer_device *peer_device,
4760 return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
4766 drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4767 conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4771 void INFO_bm_xfer_stats(struct drbd_peer_device *peer_device,
4775 unsigned int header_size = drbd_header_size(peer_device->connection);
4799 drbd_info(peer_device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4817 struct drbd_peer_device *peer_device;
4822 peer_device = conn_peer_device(connection, pi->vnr);
4823 if (!peer_device)
4825 device = peer_device->device;
4838 err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c);
4854 err = drbd_recv_all(peer_device->connection, p, pi->size);
4857 err = decode_bitmap_c(peer_device, p, &c, pi->size);
4872 err = drbd_recv_header(peer_device->connection, pi);
4877 INFO_bm_xfer_stats(peer_device, "receive", &c);
4882 err = drbd_send_bitmap(device, peer_device);
4921 struct drbd_peer_device *peer_device;
4925 peer_device = conn_peer_device(connection, pi->vnr);
4926 if (!peer_device)
4928 device = peer_device->device;
4940 drbd_set_out_of_sync(peer_device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4947 struct drbd_peer_device *peer_device;
4953 peer_device = conn_peer_device(connection, pi->vnr);
4954 if (!peer_device)
4956 device = peer_device->device;
4961 dec_rs_pending(peer_device);
4966 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
5003 drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER);
5107 struct drbd_peer_device *peer_device;
5130 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5131 struct drbd_device *device = peer_device->device;
5134 drbd_disconnected(peer_device);
5162 static int drbd_disconnected(struct drbd_peer_device *peer_device)
5164 struct drbd_device *device = peer_device->device;
5196 drbd_flush_workqueue(&peer_device->connection->sender_work);
5203 drbd_flush_workqueue(&peer_device->connection->sender_work);
5213 tl_clear(peer_device->connection);
5591 struct drbd_peer_device *peer_device;
5596 peer_device = conn_peer_device(connection, pi->vnr);
5597 if (!peer_device)
5599 device = peer_device->device;
5636 struct drbd_peer_device *peer_device;
5642 peer_device = conn_peer_device(connection, pi->vnr);
5643 if (!peer_device)
5645 device = peer_device->device;
5647 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
5649 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5653 drbd_set_in_sync(peer_device, sector, blksize);
5658 dec_rs_pending(peer_device);
5665 validate_req_change_req_state(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
5669 struct drbd_device *device = peer_device->device;
5679 __req_mod(req, what, peer_device, &m);
5689 struct drbd_peer_device *peer_device;
5696 peer_device = conn_peer_device(connection, pi->vnr);
5697 if (!peer_device)
5699 device = peer_device->device;
5701 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5704 drbd_set_in_sync(peer_device, sector, blksize);
5705 dec_rs_pending(peer_device);
5728 return validate_req_change_req_state(peer_device, p->block_id, sector,
5735 struct drbd_peer_device *peer_device;
5742 peer_device = conn_peer_device(connection, pi->vnr);
5743 if (!peer_device)
5745 device = peer_device->device;
5747 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5750 dec_rs_pending(peer_device);
5751 drbd_rs_failed_io(peer_device, sector, size);
5755 err = validate_req_change_req_state(peer_device, p->block_id, sector,
5764 drbd_set_out_of_sync(peer_device, sector, size);
5771 struct drbd_peer_device *peer_device;
5776 peer_device = conn_peer_device(connection, pi->vnr);
5777 if (!peer_device)
5779 device = peer_device->device;
5781 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5786 return validate_req_change_req_state(peer_device, p->block_id, sector,
5793 struct drbd_peer_device *peer_device;
5799 peer_device = conn_peer_device(connection, pi->vnr);
5800 if (!peer_device)
5802 device = peer_device->device;
5807 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5809 dec_rs_pending(peer_device);
5815 drbd_rs_failed_io(peer_device, sector, size);
5831 struct drbd_peer_device *peer_device;
5837 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5838 struct drbd_device *device = peer_device->device;
5854 struct drbd_peer_device *peer_device;
5861 peer_device = conn_peer_device(connection, pi->vnr);
5862 if (!peer_device)
5864 device = peer_device->device;
5869 update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5872 drbd_ov_out_of_sync_found(peer_device, sector, size);
5874 ov_out_of_sync_print(peer_device);
5880 dec_rs_pending(peer_device);
5886 drbd_advance_rs_marks(peer_device, device->ov_left);
5893 drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
5896 ov_out_of_sync_print(peer_device);
5897 drbd_resync_finished(peer_device);
6099 struct drbd_peer_device *peer_device =
6101 struct drbd_connection *connection = peer_device->connection;
6102 struct drbd_device *device = peer_device->device;
6117 struct work_struct send_acks_work alive, which is in the peer_device object */