Lines Matching refs:csk

27 __cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso)
46 submode |= (csk->submode & CXGBIT_SUBMODE_DCRC);
60 submode |= (csk->submode & CXGBIT_SUBMODE_HCRC);
67 static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len)
69 return __cxgbit_alloc_skb(csk, len, false);
159 cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
163 const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
186 req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
201 void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
205 while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) {
231 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) {
232 flowclen16 = cxgbit_send_tx_flowc_wr(csk);
233 csk->wr_cred -= flowclen16;
234 csk->wr_una_cred += flowclen16;
237 if (csk->wr_cred < credits_needed) {
238 pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n",
239 csk, skb->len, skb->data_len,
240 credits_needed, csk->wr_cred);
243 __skb_unlink(skb, &csk->txq);
244 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
246 csk->wr_cred -= credits_needed;
247 csk->wr_una_cred += credits_needed;
249 pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
250 csk, skb->len, skb->data_len, credits_needed,
251 csk->wr_cred, csk->wr_una_cred);
256 if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
257 (!before(csk->write_seq,
258 csk->snd_una + csk->snd_win))) {
260 csk->wr_una_cred = 0;
263 cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed,
265 csk->snd_nxt += len;
268 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
272 csk->wr_una_cred = 0;
275 cxgbit_sock_enqueue_wr(csk, skb);
276 t4_set_arp_err_handler(skb, csk,
279 pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n",
280 csk, csk->tid, skb, len);
282 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
286 static void cxgbit_unlock_sock(struct cxgbit_sock *csk)
294 spin_lock_bh(&csk->lock);
295 while (skb_queue_len(&csk->backlogq)) {
296 skb_queue_splice_init(&csk->backlogq, &backlogq);
297 spin_unlock_bh(&csk->lock);
301 fn(csk, skb);
304 spin_lock_bh(&csk->lock);
307 csk->lock_owner = false;
308 spin_unlock_bh(&csk->lock);
311 static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
315 spin_lock_bh(&csk->lock);
316 csk->lock_owner = true;
317 spin_unlock_bh(&csk->lock);
319 if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) ||
322 __skb_queue_purge(&csk->ppodq);
327 csk->write_seq += skb->len +
330 skb_queue_splice_tail_init(&csk->ppodq, &csk->txq);
331 __skb_queue_tail(&csk->txq, skb);
332 cxgbit_push_tx_frames(csk);
335 cxgbit_unlock_sock(csk);
393 cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
396 struct iscsit_conn *conn = csk->conn;
410 if (num_pdu > csk->max_iso_npdu)
411 num_pdu = csk->max_iso_npdu;
417 skb = __cxgbit_alloc_skb(csk, 0, true);
423 cxgbit_skcb_submode(skb) |= (csk->submode &
464 ret = cxgbit_queue_skb(csk, skb);
484 cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
490 skb = cxgbit_alloc_skb(csk, 0);
497 cxgbit_skcb_submode(skb) |= (csk->submode &
509 return cxgbit_queue_skb(csk, skb);
517 struct cxgbit_sock *csk = conn->context;
523 (!padding) && (!datain->offset) && csk->max_iso_npdu) {
526 return cxgbit_tx_datain_iso(csk, cmd, dr);
529 return cxgbit_tx_datain(csk, cmd, datain);
536 struct cxgbit_sock *csk = conn->context;
540 skb = cxgbit_alloc_skb(csk, data_buf_len + padding);
559 return cxgbit_queue_skb(csk, skb);
574 struct cxgbit_sock *csk = conn->context;
575 struct cxgbit_device *cdev = csk->com.cdev;
596 static int cxgbit_set_digest(struct cxgbit_sock *csk)
598 struct iscsit_conn *conn = csk->conn;
608 csk->submode |= CXGBIT_SUBMODE_HCRC;
612 csk->submode = 0;
618 csk->submode |= CXGBIT_SUBMODE_DCRC;
620 if (cxgbit_setup_conn_digest(csk)) {
621 csk->submode = 0;
628 static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
630 struct iscsit_conn *conn = csk->conn;
654 max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
658 cxgbit_digest_len[csk->submode]);
660 csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
662 if (csk->max_iso_npdu <= 1)
663 csk->max_iso_npdu = 0;
670 * @csk: pointer to cxgbit socket structure
679 static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk)
681 struct iscsit_conn *conn = csk->conn;
717 struct cxgbit_sock *csk = conn->context;
718 struct cxgbit_device *cdev = csk->com.cdev;
719 struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm;
727 if (cxgbit_set_digest(csk))
746 ret = cxgbit_seq_pdu_inorder(csk);
757 if (cxgbit_set_iso_npdu(csk))
763 if (cxgbit_setup_conn_pgidx(csk,
766 set_bit(CSK_DDP_ENABLE, &csk->com.flags);
777 struct cxgbit_sock *csk = conn->context;
782 skb = cxgbit_alloc_skb(csk, length + padding);
798 set_bit(CSK_LOGIN_DONE, &csk->com.flags);
801 if (cxgbit_queue_skb(csk, skb))
835 static struct iscsit_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
837 struct iscsit_conn *conn = csk->conn;
838 struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev);
860 struct cxgbit_sock *csk = conn->context;
861 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
881 struct skb_shared_info *ssi = skb_shinfo(csk->skb);
897 cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
967 cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
969 struct iscsit_conn *conn = csk->conn;
970 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
995 static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
998 struct iscsit_conn *conn = csk->conn;
1002 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
1061 cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
1069 struct cxgbit_device *cdev = csk->com.cdev;
1087 static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
1089 struct iscsit_conn *conn = csk->conn;
1090 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
1132 skb_copy_bits(csk->skb, pdu_cb->doffset,
1155 cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
1157 struct iscsit_conn *conn = csk->conn;
1158 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
1193 skb_copy_bits(csk->skb, pdu_cb->doffset,
1208 static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk)
1210 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
1212 struct iscsit_conn *conn = csk->conn;
1219 cmd = cxgbit_allocate_cmd(csk);
1223 ret = cxgbit_handle_scsi_cmd(csk, cmd);
1226 ret = cxgbit_handle_iscsi_dataout(csk);
1230 cmd = cxgbit_allocate_cmd(csk);
1235 ret = cxgbit_handle_nop_out(csk, cmd);
1238 cmd = cxgbit_allocate_cmd(csk);
1251 cmd = cxgbit_allocate_cmd(csk);
1256 ret = cxgbit_handle_text_cmd(csk, cmd);
1259 cmd = cxgbit_allocate_cmd(csk);
1286 static int cxgbit_rx_opcode(struct cxgbit_sock *csk)
1288 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
1289 struct iscsit_conn *conn = csk->conn;
1313 if (cxgbit_target_rx_opcode(csk) < 0)
1322 static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk)
1324 struct iscsit_conn *conn = csk->conn;
1326 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
1359 skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen);
1365 cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx)
1372 csk->skb = skb;
1374 if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) {
1375 ret = cxgbit_rx_login_pdu(csk);
1376 set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
1378 ret = cxgbit_rx_opcode(csk);
1394 pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n",
1395 skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen);
1408 static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
1410 struct sk_buff *skb = csk->lro_hskb;
1424 cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
1426 struct sk_buff *hskb = csk->lro_hskb;
1494 static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1502 cxgbit_lro_skb_merge(csk, skb, 0);
1505 struct sk_buff *hskb = csk->lro_hskb;
1507 ret = cxgbit_process_iscsi_pdu(csk, hskb, 0);
1509 cxgbit_lro_hskb_reset(csk);
1522 ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx);
1528 cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx);
1534 static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1541 (pdu_cb->seq != csk->rcv_nxt)) {
1542 pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n",
1543 csk, csk->tid, pdu_cb->seq, csk->rcv_nxt);
1548 csk->rcv_nxt += lro_cb->pdu_totallen;
1550 ret = cxgbit_process_lro_skb(csk, skb);
1552 csk->rx_credits += lro_cb->pdu_totallen;
1554 if (csk->rx_credits >= (csk->rcv_win / 4))
1555 cxgbit_rx_data_ack(csk);
1560 static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1565 ret = cxgbit_process_lro_skb(csk, skb);
1569 csk->rx_credits += lro_cb->pdu_totallen;
1570 if (csk->rx_credits >= csk->rcv_win) {
1571 csk->rx_credits = 0;
1572 cxgbit_rx_data_ack(csk);
1578 static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1580 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1585 ret = cxgbit_t5_rx_lro_skb(csk, skb);
1587 ret = cxgbit_rx_lro_skb(csk, skb);
1594 static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
1596 spin_lock_bh(&csk->rxq.lock);
1597 if (skb_queue_len(&csk->rxq)) {
1598 skb_queue_splice_init(&csk->rxq, rxq);
1599 spin_unlock_bh(&csk->rxq.lock);
1602 spin_unlock_bh(&csk->rxq.lock);
1606 static int cxgbit_wait_rxq(struct cxgbit_sock *csk)
1613 wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
1619 if (cxgbit_rx_skb(csk, skb))
1631 struct cxgbit_sock *csk = conn->context;
1634 while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) {
1635 ret = cxgbit_wait_rxq(csk);
1637 clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
1647 struct cxgbit_sock *csk = conn->context;
1651 if (cxgbit_wait_rxq(csk))