Lines Matching refs:conn
30 struct smc_connection *conn = cdcpend->conn;
35 sndbuf_desc = conn->sndbuf_desc;
36 smc = container_of(conn, struct smc_sock, conn);
40 &cdcpend->conn->tx_curs_fin,
44 atomic_add(diff, &cdcpend->conn->sndbuf_space);
47 smc_curs_copy(&conn->tx_curs_fin, &cdcpend->cursor, conn);
48 smc_curs_copy(&conn->local_tx_ctrl_fin, &cdcpend->p_cursor,
49 conn);
50 conn->tx_cdc_seq_fin = cdcpend->ctrl_seq;
53 if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) &&
54 unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
55 wake_up(&conn->cdc_pend_tx_wq);
56 WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0);
62 int smc_cdc_get_free_slot(struct smc_connection *conn,
73 if (conn->killed) {
83 static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
95 pend->conn = conn;
96 pend->cursor = conn->tx_curs_sent;
97 pend->p_cursor = conn->local_tx_ctrl.prod;
98 pend->ctrl_seq = conn->tx_cdc_seq;
101 int smc_cdc_msg_send(struct smc_connection *conn,
105 struct smc_link *link = conn->lnk;
109 smc_cdc_add_pending_send(conn, pend);
111 conn->tx_cdc_seq++;
112 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
113 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
115 atomic_inc(&conn->cdc_pend_tx_wr);
120 smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
121 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
123 conn->tx_cdc_seq--;
124 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
125 atomic_dec(&conn->cdc_pend_tx_wr);
131 /* send a validation msg indicating the move of a conn to an other QP link */
132 int smcr_cdc_msg_send_validation(struct smc_connection *conn,
136 struct smc_host_cdc_msg *local = &conn->local_tx_ctrl;
137 struct smc_link *link = conn->lnk;
144 peer->seqno = htons(conn->tx_cdc_seq_fin); /* seqno last compl. tx */
148 /* We need to set pend->conn here to make sure smc_cdc_tx_handler()
151 smc_cdc_add_pending_send(conn, pend);
153 atomic_inc(&conn->cdc_pend_tx_wr);
158 atomic_dec(&conn->cdc_pend_tx_wr);
163 static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
172 link = conn->lnk;
175 rc = smc_cdc_get_free_slot(conn, link, &wr_buf, NULL, &pend);
179 spin_lock_bh(&conn->send_lock);
180 if (link != conn->lnk) {
182 spin_unlock_bh(&conn->send_lock);
191 rc = smc_cdc_msg_send(conn, wr_buf, pend);
192 spin_unlock_bh(&conn->send_lock);
198 int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
202 if (!conn->lgr || (conn->lgr->is_smcd && conn->lgr->peer_shutdown))
205 if (conn->lgr->is_smcd) {
206 spin_lock_bh(&conn->send_lock);
207 rc = smcd_cdc_msg_send(conn);
208 spin_unlock_bh(&conn->send_lock);
210 rc = smcr_cdc_get_slot_and_msg_send(conn);
216 void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn)
218 wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr));
225 int smcd_cdc_msg_send(struct smc_connection *conn)
227 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
234 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
237 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
240 cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
241 cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
242 rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
245 smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
246 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
248 diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
249 &conn->tx_curs_sent);
252 atomic_add(diff, &conn->sndbuf_space);
255 smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn);
271 struct smc_connection *conn = &smc->conn;
275 smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn);
276 conn->urg_state = SMC_URG_VALID;
280 base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off;
281 if (conn->urg_curs.count)
282 conn->urg_rx_byte = *(base + conn->urg_curs.count - 1);
284 conn->urg_rx_byte = *(base + conn->rmb_desc->len - 1);
291 struct smc_connection *conn = &smc->conn;
296 diff = conn->local_rx_ctrl.seqno - recv_seq;
299 conn->out_of_sync = 1; /* prevent any further receives */
300 spin_lock_bh(&conn->send_lock);
301 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
302 conn->lnk = link;
303 spin_unlock_bh(&conn->send_lock);
305 if (!queue_work(smc_close_wq, &conn->abort_work))
314 struct smc_connection *conn = &smc->conn;
317 smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn);
318 smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn);
319 smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn);
321 diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old,
322 &conn->local_rx_ctrl.cons);
328 atomic_add(diff_cons, &conn->peer_rmbe_space);
333 diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old,
334 &conn->local_rx_ctrl.prod);
336 if (conn->local_rx_ctrl.prod_flags.urg_data_present)
340 atomic_add(diff_prod, &conn->bytes_to_rcv);
345 if (conn->local_rx_ctrl.prod_flags.write_blocked)
347 if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
348 conn->urg_state = SMC_URG_NOTYET;
352 if ((diff_cons && smc_tx_prepared_sends(conn)) ||
353 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
354 conn->local_rx_ctrl.prod_flags.urg_data_pending)
355 smc_tx_sndbuf_nonempty(conn);
357 if (diff_cons && conn->urg_tx_pend &&
358 atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {
360 conn->urg_tx_pend = false;
364 if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
366 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
368 if (smc_cdc_rxed_any_close_or_senddone(conn)) {
374 if (!queue_work(smc_close_wq, &conn->close_work))
397 struct smc_connection *conn = (struct smc_connection *)data;
402 if (!conn || conn->killed)
405 data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
406 smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
407 smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
408 smc = container_of(conn, struct smc_sock, conn);
415 void smcd_cdc_rx_init(struct smc_connection *conn)
417 tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn);
426 struct smc_connection *conn;
438 conn = smc_lgr_find_conn(ntohl(cdc->token), lgr);
440 if (!conn || conn->out_of_sync)
442 smc = container_of(conn, struct smc_sock, conn);
449 conn->local_rx_ctrl.seqno))