Lines Matching refs:csk

208 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
211 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
212 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
215 unsigned int qid_atid = ((unsigned int)csk->atid) |
216 (((unsigned int)csk->rss_qid) << 14);
220 MSS_IDX_V(csk->mss_idx) |
221 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
222 TX_CHAN_V(csk->tx_chan) |
223 SMAC_SEL_V(csk->smac_idx) |
225 RCV_BUFSIZ_V(csk->rcv_win >> 10);
229 RSS_QUEUE_V(csk->rss_qid);
238 req->local_port = csk->saddr.sin_port;
239 req->peer_port = csk->daddr.sin_port;
240 req->local_ip = csk->saddr.sin_addr.s_addr;
241 req->peer_ip = csk->daddr.sin_addr.s_addr;
244 csk->cdev->ports[csk->port_id],
245 csk->l2t));
250 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
251 csk, &req->local_ip, ntohs(req->local_port),
253 csk->atid, csk->rss_qid);
262 req->local_port = csk->saddr.sin_port;
263 req->peer_port = csk->daddr.sin_port;
264 req->local_ip = csk->saddr.sin_addr.s_addr;
265 req->peer_ip = csk->daddr.sin_addr.s_addr;
269 csk->cdev->ports[csk->port_id],
270 csk->l2t)));
278 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
279 csk, &req->local_ip, ntohs(req->local_port),
281 csk->atid, csk->rss_qid);
290 req->local_port = csk->saddr.sin_port;
291 req->peer_port = csk->daddr.sin_port;
292 req->local_ip = csk->saddr.sin_addr.s_addr;
293 req->peer_ip = csk->daddr.sin_addr.s_addr;
297 csk->cdev->ports[csk->port_id],
298 csk->l2t)));
310 "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
311 csk, &req->local_ip, ntohs(req->local_port),
313 csk->atid, csk->rss_qid);
316 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
318 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
319 (&csk->saddr), (&csk->daddr),
320 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
321 csk->state, csk->flags, csk->atid, csk->rss_qid);
323 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
327 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
330 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
331 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
334 unsigned int qid_atid = ((unsigned int)csk->atid) |
335 (((unsigned int)csk->rss_qid) << 14);
339 MSS_IDX_V(csk->mss_idx) |
340 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
341 TX_CHAN_V(csk->tx_chan) |
342 SMAC_SEL_V(csk->smac_idx) |
344 RCV_BUFSIZ_V(csk->rcv_win >> 10);
348 RSS_QUEUE_V(csk->rss_qid);
357 req->local_port = csk->saddr6.sin6_port;
358 req->peer_port = csk->daddr6.sin6_port;
360 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
361 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
363 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
364 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
373 csk->cdev->ports[csk->port_id],
374 csk->l2t));
382 req->local_port = csk->saddr6.sin6_port;
383 req->peer_port = csk->daddr6.sin6_port;
384 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
385 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
387 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
388 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
396 csk->cdev->ports[csk->port_id],
397 csk->l2t)));
405 req->local_port = csk->saddr6.sin6_port;
406 req->peer_port = csk->daddr6.sin6_port;
407 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
408 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
410 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
411 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
421 csk->cdev->ports[csk->port_id],
422 csk->l2t)));
428 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
430 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
431 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
432 csk->flags, csk->atid,
433 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
434 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
435 csk->rss_qid);
437 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
441 static void send_close_req(struct cxgbi_sock *csk)
443 struct sk_buff *skb = csk->cpl_close;
445 unsigned int tid = csk->tid;
448 "csk 0x%p,%u,0x%lx, tid %u.\n",
449 csk, csk->state, csk->flags, csk->tid);
450 csk->cpl_close = NULL;
451 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
456 cxgbi_sock_skb_entail(csk, skb);
457 if (csk->state >= CTP_ESTABLISHED)
458 push_tx_frames(csk, 1);
463 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
467 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
468 csk, csk->state, csk->flags, csk->tid);
471 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
474 static void send_abort_req(struct cxgbi_sock *csk)
477 struct sk_buff *skb = csk->cpl_abort_req;
479 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
482 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
483 send_tx_flowc_wr(csk);
484 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
487 cxgbi_sock_set_state(csk, CTP_ABORTING);
488 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
489 cxgbi_sock_purge_write_queue(csk);
491 csk->cpl_abort_req = NULL;
493 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
495 t4_set_arp_err_handler(skb, csk, abort_arp_failure);
496 INIT_TP_WR(req, csk->tid);
497 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
498 req->rsvd0 = htonl(csk->snd_nxt);
499 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
502 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
503 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
506 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
509 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
511 struct sk_buff *skb = csk->cpl_abort_rpl;
515 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
516 csk, csk->state, csk->flags, csk->tid, rst_status);
518 csk->cpl_abort_rpl = NULL;
519 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
520 INIT_TP_WR(rpl, csk->tid);
521 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
523 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
531 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
537 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
538 csk, csk->state, csk->flags, csk->tid, credits);
542 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
547 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
548 INIT_TP_WR(req, csk->tid);
550 csk->tid));
553 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
614 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
621 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
629 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
631 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
633 flowc->mnemval[1].val = htonl(csk->tx_chan);
635 flowc->mnemval[2].val = htonl(csk->tx_chan);
637 flowc->mnemval[3].val = htonl(csk->rss_qid);
639 flowc->mnemval[4].val = htonl(csk->snd_nxt);
641 flowc->mnemval[5].val = htonl(csk->rcv_nxt);
643 flowc->mnemval[6].val = htonl(csk->snd_win);
645 flowc->mnemval[7].val = htonl(csk->advmss);
649 if (csk->cdev->skb_iso_txhdr)
656 pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
657 csk->tid);
665 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
668 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
669 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
670 csk->snd_nxt, csk->rcv_nxt, csk->snd_win,
671 csk->advmss);
673 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
714 cxgb4i_make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int dlen,
717 struct cxgbi_device *cdev = csk->cdev;
743 req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
758 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
759 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
767 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
772 if (unlikely(csk->state < CTP_ESTABLISHED ||
773 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
776 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
777 csk, csk->state, csk->flags, csk->tid);
781 while (csk->wr_cred && ((skb = skb_peek(&csk->write_queue)) != NULL)) {
808 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
809 flowclen16 = send_tx_flowc_wr(csk);
810 csk->wr_cred -= flowclen16;
811 csk->wr_una_cred += flowclen16;
812 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
815 if (csk->wr_cred < credits_needed) {
817 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
818 csk, skb->len, skb->data_len,
819 credits_needed, csk->wr_cred);
821 csk->no_tx_credits++;
825 csk->no_tx_credits = 0;
827 __skb_unlink(skb, &csk->write_queue);
828 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
830 csk->wr_cred -= credits_needed;
831 csk->wr_una_cred += credits_needed;
832 cxgbi_sock_enqueue_wr(csk, skb);
835 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
836 csk, skb->len, skb->data_len, credits_needed,
837 csk->wr_cred, csk->wr_una_cred);
840 ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
841 after(csk->write_seq, (csk->snd_una + csk->snd_win / 2))))
857 cxgb4i_make_tx_data_wr(csk, skb, dlen, len,
859 csk->snd_nxt += len;
862 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
870 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
873 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
874 csk, csk->state, csk->flags, csk->tid, skb, len);
875 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
880 static inline void free_atid(struct cxgbi_sock *csk)
882 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
884 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
885 cxgb4_free_atid(lldi->tids, csk->atid);
886 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
887 cxgbi_sock_put(csk);
893 struct cxgbi_sock *csk;
902 csk = lookup_atid(t, atid);
903 if (unlikely(!csk)) {
908 if (csk->atid != atid) {
909 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
910 atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
914 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
915 (&csk->saddr), (&csk->daddr),
916 atid, tid, csk, csk->state, csk->flags, rcv_isn);
920 cxgbi_sock_get(csk);
921 csk->tid = tid;
922 cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family);
923 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
925 free_atid(csk);
927 spin_lock_bh(&csk->lock);
928 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
929 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
930 csk, csk->state, csk->flags, csk->tid);
932 if (csk->retry_timer.function) {
933 del_timer(&csk->retry_timer);
934 csk->retry_timer.function = NULL;
937 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
942 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10))
943 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10);
945 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
947 csk->advmss -= 12;
948 if (csk->advmss < 128)
949 csk->advmss = 128;
952 "csk 0x%p, mss_idx %u, advmss %u.\n",
953 csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
955 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
957 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
958 send_abort_req(csk);
960 if (skb_queue_len(&csk->write_queue))
961 push_tx_frames(csk, 0);
962 cxgbi_conn_tx_open(csk);
964 spin_unlock_bh(&csk->lock);
991 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
992 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
998 "csk 0x%p,%u,0x%lx,%u.\n",
999 csk, csk->state, csk->flags, csk->tid);
1001 cxgbi_sock_get(csk);
1002 spin_lock_bh(&csk->lock);
1012 if (csk->csk_family == AF_INET) {
1023 cxgbi_sock_fail_act_open(csk, -ENOMEM);
1025 skb->sk = (struct sock *)csk;
1026 t4_set_arp_err_handler(skb, csk,
1028 send_act_open_func(csk, skb, csk->l2t);
1031 spin_unlock_bh(&csk->lock);
1032 cxgbi_sock_put(csk);
1045 struct cxgbi_sock *csk;
1054 csk = lookup_atid(t, atid);
1055 if (unlikely(!csk)) {
1061 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
1062 atid, tid, status, csk, csk->state, csk->flags);
1072 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl),
1073 csk->csk_family);
1075 cxgbi_sock_get(csk);
1076 spin_lock_bh(&csk->lock);
1079 csk->retry_timer.function != csk_act_open_retry_timer) {
1080 csk->retry_timer.function = csk_act_open_retry_timer;
1081 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
1083 cxgbi_sock_fail_act_open(csk,
1086 spin_unlock_bh(&csk->lock);
1087 cxgbi_sock_put(csk);
1094 struct cxgbi_sock *csk;
1100 csk = lookup_tid(t, tid);
1101 if (unlikely(!csk)) {
1105 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1106 (&csk->saddr), (&csk->daddr),
1107 csk, csk->state, csk->flags, csk->tid);
1108 cxgbi_sock_rcv_peer_close(csk);
1115 struct cxgbi_sock *csk;
1121 csk = lookup_tid(t, tid);
1122 if (unlikely(!csk)) {
1126 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1127 (&csk->saddr), (&csk->daddr),
1128 csk, csk->state, csk->flags, csk->tid);
1129 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
1134 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
1140 return csk->state > CTP_ESTABLISHED ?
1154 struct cxgbi_sock *csk;
1161 csk = lookup_tid(t, tid);
1162 if (unlikely(!csk)) {
1167 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1168 (&csk->saddr), (&csk->daddr),
1169 csk, csk->state, csk->flags, csk->tid, req->status);
1174 cxgbi_sock_get(csk);
1175 spin_lock_bh(&csk->lock);
1177 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
1179 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
1180 send_tx_flowc_wr(csk);
1181 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
1184 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
1185 cxgbi_sock_set_state(csk, CTP_ABORTING);
1187 send_abort_rpl(csk, rst_status);
1189 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
1190 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
1191 cxgbi_sock_closed(csk);
1194 spin_unlock_bh(&csk->lock);
1195 cxgbi_sock_put(csk);
1202 struct cxgbi_sock *csk;
1208 csk = lookup_tid(t, tid);
1209 if (!csk)
1212 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1213 (&csk->saddr), (&csk->daddr), csk,
1214 csk->state, csk->flags, csk->tid, rpl->status);
1219 cxgbi_sock_rcv_abort_rpl(csk);
1226 struct cxgbi_sock *csk;
1232 csk = lookup_tid(t, tid);
1233 if (!csk) {
1237 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
1238 spin_lock_bh(&csk->lock);
1239 send_abort_req(csk);
1240 spin_unlock_bh(&csk->lock);
1247 struct cxgbi_sock *csk;
1254 csk = lookup_tid(t, tid);
1255 if (unlikely(!csk)) {
1261 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1262 csk, csk->state, csk->flags, csk->tid, skb, skb->len,
1265 spin_lock_bh(&csk->lock);
1267 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1269 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1270 csk, csk->state, csk->flags, csk->tid);
1271 if (csk->state != CTP_ABORTING)
1284 if (!csk->skb_ulp_lhdr) {
1289 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1290 csk, csk->state, csk->flags, csk->tid, skb);
1291 csk->skb_ulp_lhdr = skb;
1295 (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) {
1297 csk->tid, cxgbi_skcb_tcp_seq(skb),
1298 csk->rcv_nxt);
1313 csk->tid, plen, hlen, dlen,
1320 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1321 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1324 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1325 csk, skb, *bhs, hlen, dlen,
1330 struct sk_buff *lskb = csk->skb_ulp_lhdr;
1334 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1335 csk, csk->state, csk->flags, skb, lskb);
1338 __skb_queue_tail(&csk->receive_queue, skb);
1339 spin_unlock_bh(&csk->lock);
1343 send_abort_req(csk);
1345 spin_unlock_bh(&csk->lock);
1352 struct cxgbi_sock *csk;
1360 csk = lookup_tid(t, tid);
1361 if (unlikely(!csk)) {
1367 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1368 csk, csk->state, csk->flags, csk->tid, skb,
1371 spin_lock_bh(&csk->lock);
1373 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1375 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1376 csk, csk->state, csk->flags, csk->tid);
1378 if (csk->state != CTP_ABORTING)
1391 if (!csk->skb_ulp_lhdr)
1392 csk->skb_ulp_lhdr = skb;
1394 lskb = csk->skb_ulp_lhdr;
1398 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1399 csk, csk->state, csk->flags, skb, lskb);
1401 __skb_queue_tail(&csk->receive_queue, skb);
1402 spin_unlock_bh(&csk->lock);
1406 send_abort_req(csk);
1408 spin_unlock_bh(&csk->lock);
1414 cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
1418 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1419 csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1424 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1425 csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1431 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1432 csk, skb, ddpvld);
1439 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1440 csk, skb, ddpvld);
1448 struct cxgbi_sock *csk;
1456 csk = lookup_tid(t, tid);
1457 if (unlikely(!csk)) {
1463 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1464 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
1466 spin_lock_bh(&csk->lock);
1468 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1470 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1471 csk, csk->state, csk->flags, csk->tid);
1472 if (csk->state != CTP_ABORTING)
1478 if (!csk->skb_ulp_lhdr) {
1479 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1483 lskb = csk->skb_ulp_lhdr;
1484 csk->skb_ulp_lhdr = NULL;
1490 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1492 cxgb4i_process_ddpvld(csk, lskb, ddpvld);
1495 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1496 csk, lskb, cxgbi_skcb_flags(lskb));
1499 cxgbi_conn_pdu_ready(csk);
1500 spin_unlock_bh(&csk->lock);
1504 send_abort_req(csk);
1506 spin_unlock_bh(&csk->lock);
1514 struct cxgbi_sock *csk;
1524 csk = lookup_tid(t, tid);
1525 if (unlikely(!csk)) {
1531 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
1533 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
1536 spin_lock_bh(&csk->lock);
1538 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1540 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1541 csk, csk->state, csk->flags, csk->tid);
1543 if (csk->state != CTP_ABORTING)
1557 csk->rcv_nxt = seq + pdu_len_ddp;
1559 if (csk->skb_ulp_lhdr) {
1560 data_skb = skb_peek(&csk->receive_queue);
1568 __skb_unlink(data_skb, &csk->receive_queue);
1572 __skb_queue_tail(&csk->receive_queue, skb);
1573 __skb_queue_tail(&csk->receive_queue, data_skb);
1575 __skb_queue_tail(&csk->receive_queue, skb);
1578 csk->skb_ulp_lhdr = NULL;
1585 cxgb4i_process_ddpvld(csk, skb, ddpvld);
1587 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
1588 csk, skb, cxgbi_skcb_flags(skb));
1590 cxgbi_conn_pdu_ready(csk);
1591 spin_unlock_bh(&csk->lock);
1596 send_abort_req(csk);
1598 spin_unlock_bh(&csk->lock);
1605 struct cxgbi_sock *csk;
1611 csk = lookup_tid(t, tid);
1612 if (unlikely(!csk))
1616 "csk 0x%p,%u,0x%lx,%u.\n",
1617 csk, csk->state, csk->flags, csk->tid);
1618 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1630 struct cxgbi_sock *csk;
1632 csk = lookup_tid(t, tid);
1633 if (!csk) {
1639 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1640 csk, csk->state, csk->flags, csk->tid, rpl->status);
1643 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1644 csk, tid, rpl->status);
1645 csk->err = -EINVAL;
1648 complete(&csk->cmpl);
1653 static int alloc_cpls(struct cxgbi_sock *csk)
1655 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1657 if (!csk->cpl_close)
1660 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1662 if (!csk->cpl_abort_req)
1665 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1667 if (!csk->cpl_abort_rpl)
1672 cxgbi_sock_free_cpl_skbs(csk);
1676 static inline void l2t_put(struct cxgbi_sock *csk)
1678 if (csk->l2t) {
1679 cxgb4_l2t_release(csk->l2t);
1680 csk->l2t = NULL;
1681 cxgbi_sock_put(csk);
1685 static void release_offload_resources(struct cxgbi_sock *csk)
1689 struct net_device *ndev = csk->cdev->ports[csk->port_id];
1693 "csk 0x%p,%u,0x%lx,%u.\n",
1694 csk, csk->state, csk->flags, csk->tid);
1696 cxgbi_sock_free_cpl_skbs(csk);
1697 cxgbi_sock_purge_write_queue(csk);
1698 if (csk->wr_cred != csk->wr_max_cred) {
1699 cxgbi_sock_purge_wr_queue(csk);
1700 cxgbi_sock_reset_wr_list(csk);
1703 l2t_put(csk);
1705 if (csk->csk_family == AF_INET6)
1707 (const u32 *)&csk->saddr6.sin6_addr, 1);
1710 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1711 free_atid(csk);
1712 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1713 lldi = cxgbi_cdev_priv(csk->cdev);
1714 cxgb4_remove_tid(lldi->tids, 0, csk->tid,
1715 csk->csk_family);
1716 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1717 cxgbi_sock_put(csk);
1719 csk->dst = NULL;
1766 static int init_act_open(struct cxgbi_sock *csk)
1768 struct cxgbi_device *cdev = csk->cdev;
1770 struct net_device *ndev = cdev->ports[csk->port_id];
1783 "csk 0x%p,%u,0x%lx,%u.\n",
1784 csk, csk->state, csk->flags, csk->tid);
1786 if (csk->csk_family == AF_INET)
1787 daddr = &csk->daddr.sin_addr.s_addr;
1789 else if (csk->csk_family == AF_INET6)
1790 daddr = &csk->daddr6.sin6_addr;
1793 pr_err("address family 0x%x not supported\n", csk->csk_family);
1797 n = dst_neigh_lookup(csk->dst, daddr);
1800 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1807 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1808 if (csk->atid < 0) {
1812 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1813 cxgbi_sock_get(csk);
1819 csk->dcb_priority = priority;
1820 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
1822 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1824 if (!csk->l2t) {
1828 cxgbi_sock_get(csk);
1831 if (csk->csk_family == AF_INET6)
1832 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
1846 if (csk->csk_family == AF_INET)
1855 skb->sk = (struct sock *)csk;
1856 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1858 if (!csk->mtu)
1859 csk->mtu = dst_mtu(csk->dst);
1860 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1861 csk->tx_chan = cxgb4_port_chan(ndev);
1862 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
1864 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1868 csk->rss_qid = lldi->rxq_ids[rxq_idx];
1870 csk->snd_win = cxgb4i_snd_win;
1871 csk->rcv_win = cxgb4i_rcv_win;
1873 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN;
1876 csk->rcv_win *= rcv_winf;
1879 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN;
1882 csk->snd_win *= snd_winf;
1884 csk->wr_cred = lldi->wr_cred -
1886 csk->wr_max_cred = csk->wr_cred;
1887 csk->wr_una_cred = 0;
1888 cxgbi_sock_reset_wr_list(csk);
1889 csk->err = 0;
1891 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1892 (&csk->saddr), (&csk->daddr), csk, csk->state,
1893 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1894 csk->mtu, csk->mss_idx, csk->smac_idx);
1902 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1903 if (csk->csk_family == AF_INET)
1904 send_act_open_req(csk, skb, csk->l2t);
1907 send_act_open_req6(csk, skb, csk->l2t);
1915 if (csk->csk_family == AF_INET6)
1917 (const u32 *)&csk->saddr6.sin6_addr, 1);
2016 static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2022 struct cxgbi_device *cdev = csk->cdev;
2024 csk->tid);
2042 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
2044 spin_lock_bh(&csk->lock);
2045 cxgbi_sock_skb_entail(csk, skb);
2046 spin_unlock_bh(&csk->lock);
2051 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2061 ttinfo->cid = csk->port_id;
2068 err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
2077 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2092 INIT_TP_WR(req, csk->tid);
2093 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2094 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2098 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
2101 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2103 reinit_completion(&csk->cmpl);
2104 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2105 wait_for_completion(&csk->cmpl);
2107 return csk->err;
2110 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2123 csk->hcrc_len = (hcrc ? 4 : 0);
2124 csk->dcrc_len = (dcrc ? 4 : 0);
2129 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2134 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
2137 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2139 reinit_completion(&csk->cmpl);
2140 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2141 wait_for_completion(&csk->cmpl);
2143 return csk->err;
2429 struct cxgbi_sock *csk = pmap->port_csk[i];
2431 if (csk->dcb_priority != priority) {
2432 iscsi_conn_failure(csk->user_data,
2435 "priority %u->%u.\n", csk,
2436 csk->dcb_priority, priority);