Lines Matching defs:connection
96 drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w);
107 struct drbd_connection *connection = peer_device->connection;
149 if (connection->cstate >= C_WF_REPORT_PARAMS) {
151 if (!queue_work(connection->ack_sender, &peer_device->send_acks_work))
358 digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
363 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
453 &first_peer_device(device)->connection->sender_work,
591 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
619 if (connection->agreed_features & DRBD_FF_THIN_RESYNC) {
633 mutex_lock(&connection->data.mutex);
634 if (connection->data.socket) {
635 struct sock *sk = connection->data.socket->sk;
645 mutex_unlock(&connection->data.mutex);
846 struct drbd_connection *connection = first_peer_device(device)->connection;
848 clear_bit(GOT_PING_ACK, &connection->flags);
849 request_ping(connection);
850 wait_event(connection->ping_wait,
851 test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
856 struct drbd_connection *connection = first_peer_device(device)->connection;
878 drbd_queue_work(&connection->sender_work, &dw->w);
1004 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1012 conn_khelper(connection, "unfence-peer");
1052 * @cancel: The connection will be closed anyways
1113 * @cancel: The connection will be closed anyways
1195 if (peer_device->connection->csums_tfm) {
1196 digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
1201 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
1246 digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
1249 err = 1; /* terminate the connection in case the allocation failed */
1254 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
1318 digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
1321 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
1367 static int drbd_send_barrier(struct drbd_connection *connection)
1372 sock = &connection->data;
1373 p = conn_prepare_command(connection, sock);
1376 p->barrier = connection->send.current_epoch_nr;
1378 connection->send.current_epoch_writes = 0;
1379 connection->send.last_sent_barrier_jif = jiffies;
1381 return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
1386 struct drbd_socket *sock = &pd->connection->data;
1402 static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
1404 if (!connection->send.seen_any_write_yet) {
1405 connection->send.seen_any_write_yet = true;
1406 connection->send.current_epoch_nr = epoch;
1407 connection->send.current_epoch_writes = 0;
1408 connection->send.last_sent_barrier_jif = jiffies;
1412 static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch)
1414 /* re-init if first write on this connection */
1415 if (!connection->send.seen_any_write_yet)
1417 if (connection->send.current_epoch_nr != epoch) {
1418 if (connection->send.current_epoch_writes)
1419 drbd_send_barrier(connection);
1420 connection->send.current_epoch_nr = epoch;
1429 struct drbd_connection *const connection = peer_device->connection;
1438 /* this time, no connection->send.current_epoch_writes++;
1442 maybe_send_barrier(connection, req->epoch);
1453 * @cancel: The connection will be closed anyways
1460 struct drbd_connection *connection = peer_device->connection;
1470 re_init_if_first_write(connection, req->epoch);
1471 maybe_send_barrier(connection, req->epoch);
1472 connection->send.current_epoch_writes++;
1486 * @cancel: The connection will be closed anyways
1493 struct drbd_connection *connection = peer_device->connection;
1505 maybe_send_barrier(connection, req->epoch);
1713 static bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *device)
1717 csums_after_crash_only = rcu_dereference(connection->net_conf)->csums_after_crash_only;
1719 return connection->agreed_pro_version >= 89 && /* supported? */
1720 connection->csums_tfm && /* configured? */
1736 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
1745 if (!connection) {
1746 drbd_err(device, "No connection to peer, aborting!\n");
1759 "dropping connection.\n", r);
1760 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
1772 "dropping connection.\n", r);
1773 conn_request_state(connection,
1781 if (current == connection->worker.task) {
1796 /* Did some connection breakage or IO error race with us? */
1859 device->use_csums = use_checksum_based_resync(connection, device);
1871 if (side == C_SYNC_SOURCE && connection->agreed_pro_version < 96)
1874 if (connection->agreed_pro_version < 95 && device->rs_total == 0) {
1876 * detect connection loss) that can lead to a full sync
1882 * detect connection loss, then waiting for a ping
1890 nc = rcu_dereference(connection->net_conf);
2049 static void do_unqueued_work(struct drbd_connection *connection)
2055 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2078 static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list)
2084 dequeue_work_batch(&connection->sender_work, work_list);
2095 nc = rcu_dereference(connection->net_conf);
2099 mutex_lock(&connection->data.mutex);
2100 if (connection->data.socket)
2101 tcp_sock_set_cork(connection->data.socket->sk, false);
2102 mutex_unlock(&connection->data.mutex);
2107 prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
2108 spin_lock_irq(&connection->resource->req_lock);
2109 spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
2110 if (!list_empty(&connection->sender_work.q))
2111 list_splice_tail_init(&connection->sender_work.q, work_list);
2112 spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
2114 spin_unlock_irq(&connection->resource->req_lock);
2120 * epoch. Next incoming request epoch will be connection ->
2126 atomic_read(&connection->current_tle_nr) !=
2127 connection->send.current_epoch_nr;
2128 spin_unlock_irq(&connection->resource->req_lock);
2131 maybe_send_barrier(connection,
2132 connection->send.current_epoch_nr + 1);
2134 if (test_bit(DEVICE_WORK_PENDING, &connection->flags))
2138 if (get_t_state(&connection->worker) != RUNNING)
2146 finish_wait(&connection->sender_work.q_wait, &wait);
2150 nc = rcu_dereference(connection->net_conf);
2153 mutex_lock(&connection->data.mutex);
2154 if (connection->data.socket) {
2156 tcp_sock_set_cork(connection->data.socket->sk, true);
2158 tcp_sock_set_cork(connection->data.socket->sk, false);
2160 mutex_unlock(&connection->data.mutex);
2165 struct drbd_connection *connection = thi->connection;
2175 update_worker_timing_details(connection, wait_for_work);
2176 wait_for_work(connection, &work_list);
2179 if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) {
2180 update_worker_timing_details(connection, do_unqueued_work);
2181 do_unqueued_work(connection);
2187 drbd_warn(connection, "Worker got an unexpected signal\n");
2199 update_worker_timing_details(connection, w->cb);
2200 if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
2202 if (connection->cstate >= C_WF_REPORT_PARAMS)
2203 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
2208 if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) {
2209 update_worker_timing_details(connection, do_unqueued_work);
2210 do_unqueued_work(connection);
2215 update_worker_timing_details(connection, w->cb);
2218 dequeue_work_batch(&connection->sender_work, &work_list);
2219 } while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags));
2222 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {