Lines Matching refs:csk

469 		     struct cxgbit_sock *csk)
472 conn->login_sockaddr = csk->com.remote_addr;
473 conn->local_sockaddr = csk->com.local_addr;
479 struct cxgbit_sock *csk;
504 csk = list_first_entry(&cnp->np_accept_list,
508 list_del_init(&csk->accept_node);
510 conn->context = csk;
511 csk->conn = conn;
513 cxgbit_set_conn_info(np, conn, csk);
598 static void __cxgbit_free_conn(struct cxgbit_sock *csk);
603 struct cxgbit_sock *csk, *tmp;
612 list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
613 list_del_init(&csk->accept_node);
614 __cxgbit_free_conn(csk);
622 static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
631 cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
635 __skb_queue_tail(&csk->txq, skb);
636 cxgbit_push_tx_frames(csk);
641 struct cxgbit_sock *csk = handle;
645 cxgbit_put_csk(csk);
658 static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
663 pr_debug("%s: csk %p tid %u; state %d\n",
664 __func__, csk, csk->tid, csk->com.state);
666 __skb_queue_purge(&csk->txq);
668 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
669 cxgbit_send_tx_flowc_wr(csk);
671 skb = __skb_dequeue(&csk->skbq);
672 cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
673 csk->com.cdev, cxgbit_abort_arp_failure);
675 return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
679 __cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
683 if (csk->com.state != CSK_STATE_ESTABLISHED)
686 set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
687 csk->com.state = CSK_STATE_ABORTING;
689 cxgbit_send_abort_req(csk);
694 cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
695 cxgbit_put_csk(csk);
698 void cxgbit_abort_conn(struct cxgbit_sock *csk)
702 cxgbit_get_csk(csk);
703 cxgbit_init_wr_wait(&csk->com.wr_wait);
705 spin_lock_bh(&csk->lock);
706 if (csk->lock_owner) {
708 __skb_queue_tail(&csk->backlogq, skb);
710 __cxgbit_abort_conn(csk, skb);
712 spin_unlock_bh(&csk->lock);
714 cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
715 csk->tid, 600, __func__);
718 static void __cxgbit_free_conn(struct cxgbit_sock *csk)
720 struct iscsit_conn *conn = csk->conn;
724 __func__, csk->com.state);
726 spin_lock_bh(&csk->lock);
727 switch (csk->com.state) {
730 csk->com.state = CSK_STATE_CLOSING;
731 cxgbit_send_halfclose(csk);
733 csk->com.state = CSK_STATE_ABORTING;
734 cxgbit_send_abort_req(csk);
738 csk->com.state = CSK_STATE_MORIBUND;
739 cxgbit_send_halfclose(csk);
745 pr_err("%s: csk %p; state %d\n",
746 __func__, csk, csk->com.state);
748 spin_unlock_bh(&csk->lock);
751 cxgbit_put_csk(csk);
759 static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
761 csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
762 ((csk->com.remote_addr.ss_family == AF_INET) ?
765 csk->mss = csk->emss;
767 csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
768 if (csk->emss < 128)
769 csk->emss = 128;
770 if (csk->emss & 7)
772 TCPOPT_MSS_G(opt), csk->mss, csk->emss);
774 csk->mss, csk->emss);
777 static void cxgbit_free_skb(struct cxgbit_sock *csk)
781 __skb_queue_purge(&csk->txq);
782 __skb_queue_purge(&csk->rxq);
783 __skb_queue_purge(&csk->backlogq);
784 __skb_queue_purge(&csk->ppodq);
785 __skb_queue_purge(&csk->skbq);
787 while ((skb = cxgbit_sock_dequeue_wr(csk)))
790 __kfree_skb(csk->lro_hskb);
795 struct cxgbit_sock *csk;
798 csk = container_of(kref, struct cxgbit_sock, kref);
800 pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
802 if (csk->com.local_addr.ss_family == AF_INET6) {
804 &csk->com.local_addr;
805 cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
810 cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
811 csk->com.local_addr.ss_family);
812 dst_release(csk->dst);
813 cxgb4_l2t_release(csk->l2t);
815 cdev = csk->com.cdev;
817 list_del(&csk->list);
820 cxgbit_free_skb(csk);
821 cxgbit_put_cnp(csk->cnp);
824 kfree(csk);
827 static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
836 csk->rcv_win = CXGBIT_10G_RCV_WIN;
838 csk->rcv_win *= scale;
839 csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10);
842 csk->snd_win = CXGBIT_10G_SND_WIN;
844 csk->snd_win *= scale;
845 csk->snd_win = min(csk->snd_win, 512U * 1024);
848 __func__, csk->snd_win, csk->rcv_win);
899 cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
933 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
935 if (!csk->l2t)
937 csk->mtu = ndev->mtu;
938 csk->tx_chan = cxgb4_port_chan(ndev);
939 csk->smac_idx =
943 csk->txq_idx = cxgb4_port_idx(ndev) * step;
946 csk->ctrlq_idx = cxgb4_port_idx(ndev);
947 csk->rss_qid = cdev->lldi.rxq_ids[
949 csk->port_id = cxgb4_port_idx(ndev);
950 cxgbit_set_tcp_window(csk,
964 csk->dcb_priority = priority;
966 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
968 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
970 if (!csk->l2t)
973 csk->mtu = dst_mtu(dst);
974 csk->tx_chan = cxgb4_port_chan(ndev);
975 csk->smac_idx =
979 csk->txq_idx = (port_id * step) +
981 csk->ctrlq_idx = cxgb4_port_idx(ndev);
986 csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
987 csk->port_id = port_id;
988 cxgbit_set_tcp_window(csk,
1045 static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1047 if (csk->com.state != CSK_STATE_ESTABLISHED) {
1052 cxgbit_ofld_send(csk->com.cdev, skb);
1060 int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1071 RX_CREDITS_V(csk->rx_credits);
1073 cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
1076 csk->rx_credits = 0;
1078 spin_lock_bh(&csk->lock);
1079 if (csk->lock_owner) {
1081 __skb_queue_tail(&csk->backlogq, skb);
1082 spin_unlock_bh(&csk->lock);
1086 cxgbit_send_rx_credits(csk, skb);
1087 spin_unlock_bh(&csk->lock);
1094 static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1113 __skb_queue_tail(&csk->skbq, skb);
1121 csk->lro_hskb = skb;
1125 __skb_queue_purge(&csk->skbq);
1130 cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1135 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1143 pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1147 cxgbit_put_csk(csk);
1153 INIT_TP_WR(rpl5, csk->tid);
1155 csk->tid));
1156 cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1158 (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1159 wscale = cxgb_compute_wscale(csk->rcv_win);
1164 win = csk->rcv_win >> 10;
1170 L2T_IDX_V(csk->l2t->idx) |
1171 TX_CHAN_V(csk->tx_chan) |
1172 SMAC_SEL_V(csk->smac_idx) |
1173 DSCP_V(csk->tos >> 2) |
1178 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1211 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1212 t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
1213 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1219 struct cxgbit_sock *csk = NULL;
1250 csk = lookup_tid(t, tid);
1251 if (csk) {
1252 pr_err("%s csk not null tid %u\n",
1291 csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1292 if (!csk) {
1297 ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1303 kfree(csk);
1307 kref_init(&csk->kref);
1308 init_completion(&csk->com.wr_wait.completion);
1310 INIT_LIST_HEAD(&csk->accept_node);
1314 if (peer_mss && csk->mtu > (peer_mss + hdrs))
1315 csk->mtu = peer_mss + hdrs;
1317 csk->com.state = CSK_STATE_CONNECTING;
1318 csk->com.cdev = cdev;
1319 csk->cnp = cnp;
1320 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1321 csk->dst = dst;
1322 csk->tid = tid;
1323 csk->wr_cred = cdev->lldi.wr_cred -
1325 csk->wr_max_cred = csk->wr_cred;
1326 csk->wr_una_cred = 0;
1330 &csk->com.local_addr;
1335 sin = (struct sockaddr_in *)&csk->com.remote_addr;
1341 &csk->com.local_addr;
1350 sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1356 skb_queue_head_init(&csk->rxq);
1357 skb_queue_head_init(&csk->txq);
1358 skb_queue_head_init(&csk->ppodq);
1359 skb_queue_head_init(&csk->backlogq);
1360 skb_queue_head_init(&csk->skbq);
1361 cxgbit_sock_reset_wr_list(csk);
1362 spin_lock_init(&csk->lock);
1363 init_waitqueue_head(&csk->waitq);
1364 csk->lock_owner = false;
1366 if (cxgbit_alloc_csk_skb(csk)) {
1368 kfree(csk);
1376 list_add_tail(&csk->list, &cdev->cskq.list);
1378 cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
1379 cxgbit_pass_accept_rpl(csk, req);
1389 cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1396 if (csk->snd_wscale)
1416 u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1418 struct cxgbit_device *cdev = csk->com.cdev;
1425 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1428 flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1430 skb = __skb_dequeue(&csk->skbq);
1436 FW_WR_FLOWID_V(csk->tid));
1439 (csk->com.cdev->lldi.pf));
1441 flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1443 flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1445 flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1447 flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1449 flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1451 flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1453 flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1463 if (csk->snd_wscale) {
1465 flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1472 pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1479 pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1481 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1482 csk->rcv_nxt, csk->snd_win, csk->emss);
1483 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1484 cxgbit_ofld_send(csk->com.cdev, skb);
1489 cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1491 spin_lock_bh(&csk->lock);
1492 if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) {
1493 spin_unlock_bh(&csk->lock);
1494 pr_err("%s: csk 0x%p, tid %u, state %u\n",
1495 __func__, csk, csk->tid, csk->com.state);
1500 cxgbit_get_csk(csk);
1501 cxgbit_init_wr_wait(&csk->com.wr_wait);
1502 cxgbit_ofld_send(csk->com.cdev, skb);
1503 spin_unlock_bh(&csk->lock);
1508 int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1512 u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1513 u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1524 INIT_TP_WR(req, csk->tid);
1525 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1526 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1531 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1533 if (cxgbit_send_tcb_skb(csk, skb))
1536 ret = cxgbit_wait_for_reply(csk->com.cdev,
1537 &csk->com.wr_wait,
1538 csk->tid, 5, __func__);
1545 int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1558 INIT_TP_WR(req, csk->tid);
1559 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1560 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1564 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1566 if (cxgbit_send_tcb_skb(csk, skb))
1569 ret = cxgbit_wait_for_reply(csk->com.cdev,
1570 &csk->com.wr_wait,
1571 csk->tid, 5, __func__);
1628 struct cxgbit_sock *csk;
1634 csk = lookup_tid(t, tid);
1635 if (unlikely(!csk)) {
1639 cnp = csk->cnp;
1641 pr_debug("%s: csk %p; tid %u; cnp %p\n",
1642 __func__, csk, tid, cnp);
1644 csk->write_seq = snd_isn;
1645 csk->snd_una = snd_isn;
1646 csk->snd_nxt = snd_isn;
1648 csk->rcv_nxt = rcv_isn;
1650 csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1651 cxgbit_set_emss(csk, tcp_opt);
1652 dst_confirm(csk->dst);
1653 csk->com.state = CSK_STATE_ESTABLISHED;
1655 list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1662 static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1665 spin_lock_bh(&csk->rxq.lock);
1666 __skb_queue_tail(&csk->rxq, skb);
1667 spin_unlock_bh(&csk->rxq.lock);
1668 wake_up(&csk->waitq);
1671 static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1673 pr_debug("%s: csk %p; tid %u; state %d\n",
1674 __func__, csk, csk->tid, csk->com.state);
1676 switch (csk->com.state) {
1678 csk->com.state = CSK_STATE_CLOSING;
1679 cxgbit_queue_rx_skb(csk, skb);
1683 csk->com.state = CSK_STATE_MORIBUND;
1686 csk->com.state = CSK_STATE_DEAD;
1687 cxgbit_put_csk(csk);
1693 __func__, csk->com.state);
1699 static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1701 pr_debug("%s: csk %p; tid %u; state %d\n",
1702 __func__, csk, csk->tid, csk->com.state);
1704 switch (csk->com.state) {
1706 csk->com.state = CSK_STATE_MORIBUND;
1709 csk->com.state = CSK_STATE_DEAD;
1710 cxgbit_put_csk(csk);
1717 __func__, csk->com.state);
1723 static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1732 pr_debug("%s: csk %p; tid %u; state %d\n",
1733 __func__, csk, tid, csk->com.state);
1741 switch (csk->com.state) {
1744 csk->com.state = CSK_STATE_DEAD;
1748 csk->com.state = CSK_STATE_DEAD;
1752 csk->com.state = CSK_STATE_DEAD;
1753 if (!csk->conn)
1760 __func__, csk->com.state);
1761 csk->com.state = CSK_STATE_DEAD;
1764 __skb_queue_purge(&csk->txq);
1766 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1767 cxgbit_send_tx_flowc_wr(csk);
1769 rpl_skb = __skb_dequeue(&csk->skbq);
1771 cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
1772 cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1775 cxgbit_queue_rx_skb(csk, skb);
1780 cxgbit_put_csk(csk);
1785 static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1789 pr_debug("%s: csk %p; tid %u; state %d\n",
1790 __func__, csk, csk->tid, csk->com.state);
1792 switch (csk->com.state) {
1794 csk->com.state = CSK_STATE_DEAD;
1795 if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
1796 cxgbit_wake_up(&csk->com.wr_wait, __func__,
1798 cxgbit_put_csk(csk);
1802 __func__, csk->com.state);
1808 static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1810 const struct sk_buff *skb = csk->wr_pending_head;
1813 if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1814 pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1815 csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1824 if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1825 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1826 csk, csk->tid, csk->wr_cred,
1827 credit, csk->wr_max_cred);
1835 static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1841 csk->wr_cred += credits;
1842 if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1843 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1846 struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1850 pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1851 csk, csk->tid, credits,
1852 csk->wr_cred, csk->wr_una_cred);
1858 pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1859 csk, csk->tid,
1860 credits, csk->wr_cred, csk->wr_una_cred,
1866 cxgbit_sock_dequeue_wr(csk);
1871 if (unlikely(cxgbit_credit_err(csk))) {
1872 cxgbit_queue_rx_skb(csk, skb);
1877 if (unlikely(before(snd_una, csk->snd_una))) {
1878 pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1879 csk, csk->tid, snd_una,
1880 csk->snd_una);
1884 if (csk->snd_una != snd_una) {
1885 csk->snd_una = snd_una;
1886 dst_confirm(csk->dst);
1890 if (skb_queue_len(&csk->txq))
1891 cxgbit_push_tx_frames(csk);
1899 struct cxgbit_sock *csk;
1905 csk = lookup_tid(t, tid);
1906 if (unlikely(!csk)) {
1910 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1913 cxgbit_put_csk(csk);
1920 struct cxgbit_sock *csk;
1926 csk = lookup_tid(t, tid);
1927 if (unlikely(!csk)) {
1932 cxgbit_queue_rx_skb(csk, skb);
1939 __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1941 spin_lock(&csk->lock);
1942 if (csk->lock_owner) {
1943 __skb_queue_tail(&csk->backlogq, skb);
1944 spin_unlock(&csk->lock);
1948 cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1949 spin_unlock(&csk->lock);
1952 static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1954 cxgbit_get_csk(csk);
1955 __cxgbit_process_rx_cpl(csk, skb);
1956 cxgbit_put_csk(csk);
1961 struct cxgbit_sock *csk;
1990 csk = lookup_tid(t, tid);
1991 if (unlikely(!csk)) {
1997 cxgbit_process_rx_cpl(csk, skb);
1999 __cxgbit_process_rx_cpl(csk, skb);