Lines Matching refs:csk
469 struct cxgbit_sock *csk)
472 conn->login_sockaddr = csk->com.remote_addr;
473 conn->local_sockaddr = csk->com.local_addr;
479 struct cxgbit_sock *csk;
504 csk = list_first_entry(&cnp->np_accept_list,
508 list_del_init(&csk->accept_node);
510 conn->context = csk;
511 csk->conn = conn;
513 cxgbit_set_conn_info(np, conn, csk);
598 static void __cxgbit_free_conn(struct cxgbit_sock *csk);
603 struct cxgbit_sock *csk, *tmp;
612 list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
613 list_del_init(&csk->accept_node);
614 __cxgbit_free_conn(csk);
622 static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
631 cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
635 __skb_queue_tail(&csk->txq, skb);
636 cxgbit_push_tx_frames(csk);
641 struct cxgbit_sock *csk = handle;
645 cxgbit_put_csk(csk);
658 static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
663 pr_debug("%s: csk %p tid %u; state %d\n",
664 __func__, csk, csk->tid, csk->com.state);
666 __skb_queue_purge(&csk->txq);
668 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
669 cxgbit_send_tx_flowc_wr(csk);
671 skb = __skb_dequeue(&csk->skbq);
672 cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
673 csk->com.cdev, cxgbit_abort_arp_failure);
675 return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
679 __cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
683 if (csk->com.state != CSK_STATE_ESTABLISHED)
686 set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
687 csk->com.state = CSK_STATE_ABORTING;
689 cxgbit_send_abort_req(csk);
694 cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
695 cxgbit_put_csk(csk);
698 void cxgbit_abort_conn(struct cxgbit_sock *csk)
702 cxgbit_get_csk(csk);
703 cxgbit_init_wr_wait(&csk->com.wr_wait);
705 spin_lock_bh(&csk->lock);
706 if (csk->lock_owner) {
708 __skb_queue_tail(&csk->backlogq, skb);
710 __cxgbit_abort_conn(csk, skb);
712 spin_unlock_bh(&csk->lock);
714 cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
715 csk->tid, 600, __func__);
718 static void __cxgbit_free_conn(struct cxgbit_sock *csk)
720 struct iscsi_conn *conn = csk->conn;
724 __func__, csk->com.state);
726 spin_lock_bh(&csk->lock);
727 switch (csk->com.state) {
730 csk->com.state = CSK_STATE_CLOSING;
731 cxgbit_send_halfclose(csk);
733 csk->com.state = CSK_STATE_ABORTING;
734 cxgbit_send_abort_req(csk);
738 csk->com.state = CSK_STATE_MORIBUND;
739 cxgbit_send_halfclose(csk);
745 pr_err("%s: csk %p; state %d\n",
746 __func__, csk, csk->com.state);
748 spin_unlock_bh(&csk->lock);
751 cxgbit_put_csk(csk);
759 static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
761 csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
762 ((csk->com.remote_addr.ss_family == AF_INET) ?
765 csk->mss = csk->emss;
767 csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
768 if (csk->emss < 128)
769 csk->emss = 128;
770 if (csk->emss & 7)
772 TCPOPT_MSS_G(opt), csk->mss, csk->emss);
774 csk->mss, csk->emss);
777 static void cxgbit_free_skb(struct cxgbit_sock *csk)
781 __skb_queue_purge(&csk->txq);
782 __skb_queue_purge(&csk->rxq);
783 __skb_queue_purge(&csk->backlogq);
784 __skb_queue_purge(&csk->ppodq);
785 __skb_queue_purge(&csk->skbq);
787 while ((skb = cxgbit_sock_dequeue_wr(csk)))
790 __kfree_skb(csk->lro_hskb);
795 struct cxgbit_sock *csk;
798 csk = container_of(kref, struct cxgbit_sock, kref);
800 pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
802 if (csk->com.local_addr.ss_family == AF_INET6) {
804 &csk->com.local_addr;
805 cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
810 cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
811 csk->com.local_addr.ss_family);
812 dst_release(csk->dst);
813 cxgb4_l2t_release(csk->l2t);
815 cdev = csk->com.cdev;
817 list_del(&csk->list);
820 cxgbit_free_skb(csk);
821 cxgbit_put_cnp(csk->cnp);
824 kfree(csk);
827 static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
836 csk->rcv_win = CXGBIT_10G_RCV_WIN;
838 csk->rcv_win *= scale;
841 csk->snd_win = CXGBIT_10G_SND_WIN;
843 csk->snd_win *= scale;
846 __func__, csk->snd_win, csk->rcv_win);
897 cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
931 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
933 if (!csk->l2t)
935 csk->mtu = ndev->mtu;
936 csk->tx_chan = cxgb4_port_chan(ndev);
937 csk->smac_idx =
941 csk->txq_idx = cxgb4_port_idx(ndev) * step;
944 csk->ctrlq_idx = cxgb4_port_idx(ndev);
945 csk->rss_qid = cdev->lldi.rxq_ids[
947 csk->port_id = cxgb4_port_idx(ndev);
948 cxgbit_set_tcp_window(csk,
962 csk->dcb_priority = priority;
964 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
966 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
968 if (!csk->l2t)
971 csk->mtu = dst_mtu(dst);
972 csk->tx_chan = cxgb4_port_chan(ndev);
973 csk->smac_idx =
977 csk->txq_idx = (port_id * step) +
979 csk->ctrlq_idx = cxgb4_port_idx(ndev);
984 csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
985 csk->port_id = port_id;
986 cxgbit_set_tcp_window(csk,
1043 static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1045 if (csk->com.state != CSK_STATE_ESTABLISHED) {
1050 cxgbit_ofld_send(csk->com.cdev, skb);
1058 int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1069 RX_CREDITS_V(csk->rx_credits);
1071 cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
1074 csk->rx_credits = 0;
1076 spin_lock_bh(&csk->lock);
1077 if (csk->lock_owner) {
1079 __skb_queue_tail(&csk->backlogq, skb);
1080 spin_unlock_bh(&csk->lock);
1084 cxgbit_send_rx_credits(csk, skb);
1085 spin_unlock_bh(&csk->lock);
1092 static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1111 __skb_queue_tail(&csk->skbq, skb);
1119 csk->lro_hskb = skb;
1123 __skb_queue_purge(&csk->skbq);
1128 cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1133 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1141 pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1145 cxgbit_put_csk(csk);
1151 INIT_TP_WR(rpl5, csk->tid);
1153 csk->tid));
1154 cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1156 (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1157 wscale = cxgb_compute_wscale(csk->rcv_win);
1162 win = csk->rcv_win >> 10;
1168 L2T_IDX_V(csk->l2t->idx) |
1169 TX_CHAN_V(csk->tx_chan) |
1170 SMAC_SEL_V(csk->smac_idx) |
1171 DSCP_V(csk->tos >> 2) |
1176 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1210 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1211 t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
1212 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1218 struct cxgbit_sock *csk = NULL;
1249 csk = lookup_tid(t, tid);
1250 if (csk) {
1251 pr_err("%s csk not null tid %u\n",
1290 csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1291 if (!csk) {
1296 ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1302 kfree(csk);
1306 kref_init(&csk->kref);
1307 init_completion(&csk->com.wr_wait.completion);
1309 INIT_LIST_HEAD(&csk->accept_node);
1313 if (peer_mss && csk->mtu > (peer_mss + hdrs))
1314 csk->mtu = peer_mss + hdrs;
1316 csk->com.state = CSK_STATE_CONNECTING;
1317 csk->com.cdev = cdev;
1318 csk->cnp = cnp;
1319 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1320 csk->dst = dst;
1321 csk->tid = tid;
1322 csk->wr_cred = cdev->lldi.wr_cred -
1324 csk->wr_max_cred = csk->wr_cred;
1325 csk->wr_una_cred = 0;
1329 &csk->com.local_addr;
1334 sin = (struct sockaddr_in *)&csk->com.remote_addr;
1340 &csk->com.local_addr;
1349 sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1355 skb_queue_head_init(&csk->rxq);
1356 skb_queue_head_init(&csk->txq);
1357 skb_queue_head_init(&csk->ppodq);
1358 skb_queue_head_init(&csk->backlogq);
1359 skb_queue_head_init(&csk->skbq);
1360 cxgbit_sock_reset_wr_list(csk);
1361 spin_lock_init(&csk->lock);
1362 init_waitqueue_head(&csk->waitq);
1363 csk->lock_owner = false;
1365 if (cxgbit_alloc_csk_skb(csk)) {
1367 kfree(csk);
1375 list_add_tail(&csk->list, &cdev->cskq.list);
1377 cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
1378 cxgbit_pass_accept_rpl(csk, req);
1388 cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1395 if (csk->snd_wscale)
1415 u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1417 struct cxgbit_device *cdev = csk->com.cdev;
1424 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1427 flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1429 skb = __skb_dequeue(&csk->skbq);
1435 FW_WR_FLOWID_V(csk->tid));
1438 (csk->com.cdev->lldi.pf));
1440 flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1442 flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1444 flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1446 flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1448 flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1450 flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1452 flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1462 if (csk->snd_wscale) {
1464 flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1471 pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1478 pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1480 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1481 csk->rcv_nxt, csk->snd_win, csk->emss);
1482 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1483 cxgbit_ofld_send(csk->com.cdev, skb);
1488 cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1490 spin_lock_bh(&csk->lock);
1491 if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) {
1492 spin_unlock_bh(&csk->lock);
1493 pr_err("%s: csk 0x%p, tid %u, state %u\n",
1494 __func__, csk, csk->tid, csk->com.state);
1499 cxgbit_get_csk(csk);
1500 cxgbit_init_wr_wait(&csk->com.wr_wait);
1501 cxgbit_ofld_send(csk->com.cdev, skb);
1502 spin_unlock_bh(&csk->lock);
1507 int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1511 u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1512 u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1523 INIT_TP_WR(req, csk->tid);
1524 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1525 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1530 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1532 if (cxgbit_send_tcb_skb(csk, skb))
1535 ret = cxgbit_wait_for_reply(csk->com.cdev,
1536 &csk->com.wr_wait,
1537 csk->tid, 5, __func__);
1544 int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1557 INIT_TP_WR(req, csk->tid);
1558 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1559 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1563 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1565 if (cxgbit_send_tcb_skb(csk, skb))
1568 ret = cxgbit_wait_for_reply(csk->com.cdev,
1569 &csk->com.wr_wait,
1570 csk->tid, 5, __func__);
1627 struct cxgbit_sock *csk;
1633 csk = lookup_tid(t, tid);
1634 if (unlikely(!csk)) {
1638 cnp = csk->cnp;
1640 pr_debug("%s: csk %p; tid %u; cnp %p\n",
1641 __func__, csk, tid, cnp);
1643 csk->write_seq = snd_isn;
1644 csk->snd_una = snd_isn;
1645 csk->snd_nxt = snd_isn;
1647 csk->rcv_nxt = rcv_isn;
1649 if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1650 csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1652 csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1653 cxgbit_set_emss(csk, tcp_opt);
1654 dst_confirm(csk->dst);
1655 csk->com.state = CSK_STATE_ESTABLISHED;
1657 list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1664 static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1667 spin_lock_bh(&csk->rxq.lock);
1668 __skb_queue_tail(&csk->rxq, skb);
1669 spin_unlock_bh(&csk->rxq.lock);
1670 wake_up(&csk->waitq);
1673 static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1675 pr_debug("%s: csk %p; tid %u; state %d\n",
1676 __func__, csk, csk->tid, csk->com.state);
1678 switch (csk->com.state) {
1680 csk->com.state = CSK_STATE_CLOSING;
1681 cxgbit_queue_rx_skb(csk, skb);
1685 csk->com.state = CSK_STATE_MORIBUND;
1688 csk->com.state = CSK_STATE_DEAD;
1689 cxgbit_put_csk(csk);
1695 __func__, csk->com.state);
1701 static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1703 pr_debug("%s: csk %p; tid %u; state %d\n",
1704 __func__, csk, csk->tid, csk->com.state);
1706 switch (csk->com.state) {
1708 csk->com.state = CSK_STATE_MORIBUND;
1711 csk->com.state = CSK_STATE_DEAD;
1712 cxgbit_put_csk(csk);
1719 __func__, csk->com.state);
1725 static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1734 pr_debug("%s: csk %p; tid %u; state %d\n",
1735 __func__, csk, tid, csk->com.state);
1743 switch (csk->com.state) {
1746 csk->com.state = CSK_STATE_DEAD;
1750 csk->com.state = CSK_STATE_DEAD;
1754 csk->com.state = CSK_STATE_DEAD;
1755 if (!csk->conn)
1762 __func__, csk->com.state);
1763 csk->com.state = CSK_STATE_DEAD;
1766 __skb_queue_purge(&csk->txq);
1768 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1769 cxgbit_send_tx_flowc_wr(csk);
1771 rpl_skb = __skb_dequeue(&csk->skbq);
1773 cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
1774 cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1777 cxgbit_queue_rx_skb(csk, skb);
1782 cxgbit_put_csk(csk);
1787 static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1791 pr_debug("%s: csk %p; tid %u; state %d\n",
1792 __func__, csk, csk->tid, csk->com.state);
1794 switch (csk->com.state) {
1796 csk->com.state = CSK_STATE_DEAD;
1797 if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
1798 cxgbit_wake_up(&csk->com.wr_wait, __func__,
1800 cxgbit_put_csk(csk);
1804 __func__, csk->com.state);
1810 static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1812 const struct sk_buff *skb = csk->wr_pending_head;
1815 if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1816 pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1817 csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1826 if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1827 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1828 csk, csk->tid, csk->wr_cred,
1829 credit, csk->wr_max_cred);
1837 static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1843 csk->wr_cred += credits;
1844 if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1845 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1848 struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1852 pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1853 csk, csk->tid, credits,
1854 csk->wr_cred, csk->wr_una_cred);
1860 pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1861 csk, csk->tid,
1862 credits, csk->wr_cred, csk->wr_una_cred,
1868 cxgbit_sock_dequeue_wr(csk);
1873 if (unlikely(cxgbit_credit_err(csk))) {
1874 cxgbit_queue_rx_skb(csk, skb);
1879 if (unlikely(before(snd_una, csk->snd_una))) {
1880 pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1881 csk, csk->tid, snd_una,
1882 csk->snd_una);
1886 if (csk->snd_una != snd_una) {
1887 csk->snd_una = snd_una;
1888 dst_confirm(csk->dst);
1892 if (skb_queue_len(&csk->txq))
1893 cxgbit_push_tx_frames(csk);
1901 struct cxgbit_sock *csk;
1907 csk = lookup_tid(t, tid);
1908 if (unlikely(!csk)) {
1912 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1915 cxgbit_put_csk(csk);
1922 struct cxgbit_sock *csk;
1928 csk = lookup_tid(t, tid);
1929 if (unlikely(!csk)) {
1934 cxgbit_queue_rx_skb(csk, skb);
1941 __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1943 spin_lock(&csk->lock);
1944 if (csk->lock_owner) {
1945 __skb_queue_tail(&csk->backlogq, skb);
1946 spin_unlock(&csk->lock);
1950 cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1951 spin_unlock(&csk->lock);
1954 static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1956 cxgbit_get_csk(csk);
1957 __cxgbit_process_rx_cpl(csk, skb);
1958 cxgbit_put_csk(csk);
1963 struct cxgbit_sock *csk;
1992 csk = lookup_tid(t, tid);
1993 if (unlikely(!csk)) {
1999 cxgbit_process_rx_cpl(csk, skb);
2001 __cxgbit_process_rx_cpl(csk, skb);