Lines Matching refs:csk

207 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
210 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
211 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
214 unsigned int qid_atid = ((unsigned int)csk->atid) |
215 (((unsigned int)csk->rss_qid) << 14);
219 MSS_IDX_V(csk->mss_idx) |
220 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
221 TX_CHAN_V(csk->tx_chan) |
222 SMAC_SEL_V(csk->smac_idx) |
224 RCV_BUFSIZ_V(csk->rcv_win >> 10);
228 RSS_QUEUE_V(csk->rss_qid);
237 req->local_port = csk->saddr.sin_port;
238 req->peer_port = csk->daddr.sin_port;
239 req->local_ip = csk->saddr.sin_addr.s_addr;
240 req->peer_ip = csk->daddr.sin_addr.s_addr;
243 csk->cdev->ports[csk->port_id],
244 csk->l2t));
249 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
250 csk, &req->local_ip, ntohs(req->local_port),
252 csk->atid, csk->rss_qid);
261 req->local_port = csk->saddr.sin_port;
262 req->peer_port = csk->daddr.sin_port;
263 req->local_ip = csk->saddr.sin_addr.s_addr;
264 req->peer_ip = csk->daddr.sin_addr.s_addr;
268 csk->cdev->ports[csk->port_id],
269 csk->l2t)));
277 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
278 csk, &req->local_ip, ntohs(req->local_port),
280 csk->atid, csk->rss_qid);
289 req->local_port = csk->saddr.sin_port;
290 req->peer_port = csk->daddr.sin_port;
291 req->local_ip = csk->saddr.sin_addr.s_addr;
292 req->peer_ip = csk->daddr.sin_addr.s_addr;
296 csk->cdev->ports[csk->port_id],
297 csk->l2t)));
309 "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
310 csk, &req->local_ip, ntohs(req->local_port),
312 csk->atid, csk->rss_qid);
315 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
317 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
318 (&csk->saddr), (&csk->daddr),
319 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
320 csk->state, csk->flags, csk->atid, csk->rss_qid);
322 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
326 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
329 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
330 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
333 unsigned int qid_atid = ((unsigned int)csk->atid) |
334 (((unsigned int)csk->rss_qid) << 14);
338 MSS_IDX_V(csk->mss_idx) |
339 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
340 TX_CHAN_V(csk->tx_chan) |
341 SMAC_SEL_V(csk->smac_idx) |
343 RCV_BUFSIZ_V(csk->rcv_win >> 10);
347 RSS_QUEUE_V(csk->rss_qid);
356 req->local_port = csk->saddr6.sin6_port;
357 req->peer_port = csk->daddr6.sin6_port;
359 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
360 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
362 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
363 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
372 csk->cdev->ports[csk->port_id],
373 csk->l2t));
381 req->local_port = csk->saddr6.sin6_port;
382 req->peer_port = csk->daddr6.sin6_port;
383 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
384 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
386 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
387 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
395 csk->cdev->ports[csk->port_id],
396 csk->l2t)));
404 req->local_port = csk->saddr6.sin6_port;
405 req->peer_port = csk->daddr6.sin6_port;
406 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
407 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
409 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
410 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
420 csk->cdev->ports[csk->port_id],
421 csk->l2t)));
427 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
429 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
430 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
431 csk->flags, csk->atid,
432 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
433 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
434 csk->rss_qid);
436 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
440 static void send_close_req(struct cxgbi_sock *csk)
442 struct sk_buff *skb = csk->cpl_close;
444 unsigned int tid = csk->tid;
447 "csk 0x%p,%u,0x%lx, tid %u.\n",
448 csk, csk->state, csk->flags, csk->tid);
449 csk->cpl_close = NULL;
450 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
455 cxgbi_sock_skb_entail(csk, skb);
456 if (csk->state >= CTP_ESTABLISHED)
457 push_tx_frames(csk, 1);
462 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
466 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
467 csk, csk->state, csk->flags, csk->tid);
470 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
473 static void send_abort_req(struct cxgbi_sock *csk)
476 struct sk_buff *skb = csk->cpl_abort_req;
478 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
481 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
482 send_tx_flowc_wr(csk);
483 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
486 cxgbi_sock_set_state(csk, CTP_ABORTING);
487 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
488 cxgbi_sock_purge_write_queue(csk);
490 csk->cpl_abort_req = NULL;
492 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
494 t4_set_arp_err_handler(skb, csk, abort_arp_failure);
495 INIT_TP_WR(req, csk->tid);
496 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
497 req->rsvd0 = htonl(csk->snd_nxt);
498 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
501 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
502 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
505 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
508 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
510 struct sk_buff *skb = csk->cpl_abort_rpl;
514 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
515 csk, csk->state, csk->flags, csk->tid, rst_status);
517 csk->cpl_abort_rpl = NULL;
518 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
519 INIT_TP_WR(rpl, csk->tid);
520 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
522 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
530 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
536 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
537 csk, csk->state, csk->flags, csk->tid, credits);
541 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
546 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
547 INIT_TP_WR(req, csk->tid);
549 csk->tid));
552 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
613 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
620 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
628 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
630 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
632 flowc->mnemval[1].val = htonl(csk->tx_chan);
634 flowc->mnemval[2].val = htonl(csk->tx_chan);
636 flowc->mnemval[3].val = htonl(csk->rss_qid);
638 flowc->mnemval[4].val = htonl(csk->snd_nxt);
640 flowc->mnemval[5].val = htonl(csk->rcv_nxt);
642 flowc->mnemval[6].val = htonl(csk->snd_win);
644 flowc->mnemval[7].val = htonl(csk->advmss);
648 if (csk->cdev->skb_iso_txhdr)
655 pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
656 csk->tid);
664 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
667 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
668 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
669 csk->snd_nxt, csk->rcv_nxt, csk->snd_win,
670 csk->advmss);
672 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
713 cxgb4i_make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int dlen,
716 struct cxgbi_device *cdev = csk->cdev;
742 req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
757 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
758 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
766 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
771 if (unlikely(csk->state < CTP_ESTABLISHED ||
772 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
775 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
776 csk, csk->state, csk->flags, csk->tid);
780 while (csk->wr_cred && ((skb = skb_peek(&csk->write_queue)) != NULL)) {
807 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
808 flowclen16 = send_tx_flowc_wr(csk);
809 csk->wr_cred -= flowclen16;
810 csk->wr_una_cred += flowclen16;
811 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
814 if (csk->wr_cred < credits_needed) {
816 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
817 csk, skb->len, skb->data_len,
818 credits_needed, csk->wr_cred);
820 csk->no_tx_credits++;
824 csk->no_tx_credits = 0;
826 __skb_unlink(skb, &csk->write_queue);
827 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
829 csk->wr_cred -= credits_needed;
830 csk->wr_una_cred += credits_needed;
831 cxgbi_sock_enqueue_wr(csk, skb);
834 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
835 csk, skb->len, skb->data_len, credits_needed,
836 csk->wr_cred, csk->wr_una_cred);
839 ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
840 after(csk->write_seq, (csk->snd_una + csk->snd_win / 2))))
856 cxgb4i_make_tx_data_wr(csk, skb, dlen, len,
858 csk->snd_nxt += len;
861 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
869 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
872 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
873 csk, csk->state, csk->flags, csk->tid, skb, len);
874 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
879 static inline void free_atid(struct cxgbi_sock *csk)
881 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
883 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
884 cxgb4_free_atid(lldi->tids, csk->atid);
885 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
886 cxgbi_sock_put(csk);
892 struct cxgbi_sock *csk;
901 csk = lookup_atid(t, atid);
902 if (unlikely(!csk)) {
907 if (csk->atid != atid) {
908 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
909 atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
913 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
914 (&csk->saddr), (&csk->daddr),
915 atid, tid, csk, csk->state, csk->flags, rcv_isn);
919 cxgbi_sock_get(csk);
920 csk->tid = tid;
921 cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family);
922 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
924 free_atid(csk);
926 spin_lock_bh(&csk->lock);
927 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
928 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
929 csk, csk->state, csk->flags, csk->tid);
931 if (csk->retry_timer.function) {
932 del_timer(&csk->retry_timer);
933 csk->retry_timer.function = NULL;
936 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
941 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10))
942 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10);
944 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
946 csk->advmss -= 12;
947 if (csk->advmss < 128)
948 csk->advmss = 128;
951 "csk 0x%p, mss_idx %u, advmss %u.\n",
952 csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
954 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
956 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
957 send_abort_req(csk);
959 if (skb_queue_len(&csk->write_queue))
960 push_tx_frames(csk, 0);
961 cxgbi_conn_tx_open(csk);
963 spin_unlock_bh(&csk->lock);
990 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
991 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
997 "csk 0x%p,%u,0x%lx,%u.\n",
998 csk, csk->state, csk->flags, csk->tid);
1000 cxgbi_sock_get(csk);
1001 spin_lock_bh(&csk->lock);
1011 if (csk->csk_family == AF_INET) {
1022 cxgbi_sock_fail_act_open(csk, -ENOMEM);
1024 skb->sk = (struct sock *)csk;
1025 t4_set_arp_err_handler(skb, csk,
1027 send_act_open_func(csk, skb, csk->l2t);
1030 spin_unlock_bh(&csk->lock);
1031 cxgbi_sock_put(csk);
1044 struct cxgbi_sock *csk;
1053 csk = lookup_atid(t, atid);
1054 if (unlikely(!csk)) {
1060 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
1061 atid, tid, status, csk, csk->state, csk->flags);
1071 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl),
1072 csk->csk_family);
1074 cxgbi_sock_get(csk);
1075 spin_lock_bh(&csk->lock);
1078 csk->retry_timer.function != csk_act_open_retry_timer) {
1079 csk->retry_timer.function = csk_act_open_retry_timer;
1080 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
1082 cxgbi_sock_fail_act_open(csk,
1085 spin_unlock_bh(&csk->lock);
1086 cxgbi_sock_put(csk);
1093 struct cxgbi_sock *csk;
1099 csk = lookup_tid(t, tid);
1100 if (unlikely(!csk)) {
1104 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1105 (&csk->saddr), (&csk->daddr),
1106 csk, csk->state, csk->flags, csk->tid);
1107 cxgbi_sock_rcv_peer_close(csk);
1114 struct cxgbi_sock *csk;
1120 csk = lookup_tid(t, tid);
1121 if (unlikely(!csk)) {
1125 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1126 (&csk->saddr), (&csk->daddr),
1127 csk, csk->state, csk->flags, csk->tid);
1128 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
1133 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
1139 return csk->state > CTP_ESTABLISHED ?
1153 struct cxgbi_sock *csk;
1160 csk = lookup_tid(t, tid);
1161 if (unlikely(!csk)) {
1166 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1167 (&csk->saddr), (&csk->daddr),
1168 csk, csk->state, csk->flags, csk->tid, req->status);
1173 cxgbi_sock_get(csk);
1174 spin_lock_bh(&csk->lock);
1176 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
1178 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
1179 send_tx_flowc_wr(csk);
1180 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
1183 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
1184 cxgbi_sock_set_state(csk, CTP_ABORTING);
1186 send_abort_rpl(csk, rst_status);
1188 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
1189 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
1190 cxgbi_sock_closed(csk);
1193 spin_unlock_bh(&csk->lock);
1194 cxgbi_sock_put(csk);
1201 struct cxgbi_sock *csk;
1207 csk = lookup_tid(t, tid);
1208 if (!csk)
1211 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1212 (&csk->saddr), (&csk->daddr), csk,
1213 csk->state, csk->flags, csk->tid, rpl->status);
1218 cxgbi_sock_rcv_abort_rpl(csk);
1225 struct cxgbi_sock *csk;
1231 csk = lookup_tid(t, tid);
1232 if (!csk) {
1236 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
1237 spin_lock_bh(&csk->lock);
1238 send_abort_req(csk);
1239 spin_unlock_bh(&csk->lock);
1246 struct cxgbi_sock *csk;
1253 csk = lookup_tid(t, tid);
1254 if (unlikely(!csk)) {
1260 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1261 csk, csk->state, csk->flags, csk->tid, skb, skb->len,
1264 spin_lock_bh(&csk->lock);
1266 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1268 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1269 csk, csk->state, csk->flags, csk->tid);
1270 if (csk->state != CTP_ABORTING)
1283 if (!csk->skb_ulp_lhdr) {
1288 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1289 csk, csk->state, csk->flags, csk->tid, skb);
1290 csk->skb_ulp_lhdr = skb;
1294 (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) {
1296 csk->tid, cxgbi_skcb_tcp_seq(skb),
1297 csk->rcv_nxt);
1312 csk->tid, plen, hlen, dlen,
1319 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1320 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1323 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1324 csk, skb, *bhs, hlen, dlen,
1329 struct sk_buff *lskb = csk->skb_ulp_lhdr;
1333 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1334 csk, csk->state, csk->flags, skb, lskb);
1337 __skb_queue_tail(&csk->receive_queue, skb);
1338 spin_unlock_bh(&csk->lock);
1342 send_abort_req(csk);
1344 spin_unlock_bh(&csk->lock);
1351 struct cxgbi_sock *csk;
1359 csk = lookup_tid(t, tid);
1360 if (unlikely(!csk)) {
1366 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1367 csk, csk->state, csk->flags, csk->tid, skb,
1370 spin_lock_bh(&csk->lock);
1372 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1374 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1375 csk, csk->state, csk->flags, csk->tid);
1377 if (csk->state != CTP_ABORTING)
1390 if (!csk->skb_ulp_lhdr)
1391 csk->skb_ulp_lhdr = skb;
1393 lskb = csk->skb_ulp_lhdr;
1397 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1398 csk, csk->state, csk->flags, skb, lskb);
1400 __skb_queue_tail(&csk->receive_queue, skb);
1401 spin_unlock_bh(&csk->lock);
1405 send_abort_req(csk);
1407 spin_unlock_bh(&csk->lock);
1413 cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
1417 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1418 csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1423 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1424 csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1430 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1431 csk, skb, ddpvld);
1438 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1439 csk, skb, ddpvld);
1447 struct cxgbi_sock *csk;
1455 csk = lookup_tid(t, tid);
1456 if (unlikely(!csk)) {
1462 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1463 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
1465 spin_lock_bh(&csk->lock);
1467 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1469 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1470 csk, csk->state, csk->flags, csk->tid);
1471 if (csk->state != CTP_ABORTING)
1477 if (!csk->skb_ulp_lhdr) {
1478 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1482 lskb = csk->skb_ulp_lhdr;
1483 csk->skb_ulp_lhdr = NULL;
1489 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1491 cxgb4i_process_ddpvld(csk, lskb, ddpvld);
1494 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1495 csk, lskb, cxgbi_skcb_flags(lskb));
1498 cxgbi_conn_pdu_ready(csk);
1499 spin_unlock_bh(&csk->lock);
1503 send_abort_req(csk);
1505 spin_unlock_bh(&csk->lock);
1513 struct cxgbi_sock *csk;
1523 csk = lookup_tid(t, tid);
1524 if (unlikely(!csk)) {
1530 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
1532 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
1535 spin_lock_bh(&csk->lock);
1537 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1539 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1540 csk, csk->state, csk->flags, csk->tid);
1542 if (csk->state != CTP_ABORTING)
1556 csk->rcv_nxt = seq + pdu_len_ddp;
1558 if (csk->skb_ulp_lhdr) {
1559 data_skb = skb_peek(&csk->receive_queue);
1567 __skb_unlink(data_skb, &csk->receive_queue);
1571 __skb_queue_tail(&csk->receive_queue, skb);
1572 __skb_queue_tail(&csk->receive_queue, data_skb);
1574 __skb_queue_tail(&csk->receive_queue, skb);
1577 csk->skb_ulp_lhdr = NULL;
1584 cxgb4i_process_ddpvld(csk, skb, ddpvld);
1586 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
1587 csk, skb, cxgbi_skcb_flags(skb));
1589 cxgbi_conn_pdu_ready(csk);
1590 spin_unlock_bh(&csk->lock);
1595 send_abort_req(csk);
1597 spin_unlock_bh(&csk->lock);
1604 struct cxgbi_sock *csk;
1610 csk = lookup_tid(t, tid);
1611 if (unlikely(!csk))
1615 "csk 0x%p,%u,0x%lx,%u.\n",
1616 csk, csk->state, csk->flags, csk->tid);
1617 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1629 struct cxgbi_sock *csk;
1631 csk = lookup_tid(t, tid);
1632 if (!csk) {
1638 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1639 csk, csk->state, csk->flags, csk->tid, rpl->status);
1642 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1643 csk, tid, rpl->status);
1644 csk->err = -EINVAL;
1647 complete(&csk->cmpl);
1652 static int alloc_cpls(struct cxgbi_sock *csk)
1654 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1656 if (!csk->cpl_close)
1659 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1661 if (!csk->cpl_abort_req)
1664 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1666 if (!csk->cpl_abort_rpl)
1671 cxgbi_sock_free_cpl_skbs(csk);
1675 static inline void l2t_put(struct cxgbi_sock *csk)
1677 if (csk->l2t) {
1678 cxgb4_l2t_release(csk->l2t);
1679 csk->l2t = NULL;
1680 cxgbi_sock_put(csk);
1684 static void release_offload_resources(struct cxgbi_sock *csk)
1688 struct net_device *ndev = csk->cdev->ports[csk->port_id];
1692 "csk 0x%p,%u,0x%lx,%u.\n",
1693 csk, csk->state, csk->flags, csk->tid);
1695 cxgbi_sock_free_cpl_skbs(csk);
1696 cxgbi_sock_purge_write_queue(csk);
1697 if (csk->wr_cred != csk->wr_max_cred) {
1698 cxgbi_sock_purge_wr_queue(csk);
1699 cxgbi_sock_reset_wr_list(csk);
1702 l2t_put(csk);
1704 if (csk->csk_family == AF_INET6)
1706 (const u32 *)&csk->saddr6.sin6_addr, 1);
1709 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1710 free_atid(csk);
1711 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1712 lldi = cxgbi_cdev_priv(csk->cdev);
1713 cxgb4_remove_tid(lldi->tids, 0, csk->tid,
1714 csk->csk_family);
1715 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1716 cxgbi_sock_put(csk);
1718 csk->dst = NULL;
1765 static int init_act_open(struct cxgbi_sock *csk)
1767 struct cxgbi_device *cdev = csk->cdev;
1769 struct net_device *ndev = cdev->ports[csk->port_id];
1782 "csk 0x%p,%u,0x%lx,%u.\n",
1783 csk, csk->state, csk->flags, csk->tid);
1785 if (csk->csk_family == AF_INET)
1786 daddr = &csk->daddr.sin_addr.s_addr;
1788 else if (csk->csk_family == AF_INET6)
1789 daddr = &csk->daddr6.sin6_addr;
1792 pr_err("address family 0x%x not supported\n", csk->csk_family);
1796 n = dst_neigh_lookup(csk->dst, daddr);
1799 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1806 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1807 if (csk->atid < 0) {
1811 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1812 cxgbi_sock_get(csk);
1818 csk->dcb_priority = priority;
1819 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
1821 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1823 if (!csk->l2t) {
1827 cxgbi_sock_get(csk);
1830 if (csk->csk_family == AF_INET6)
1831 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
1845 if (csk->csk_family == AF_INET)
1854 skb->sk = (struct sock *)csk;
1855 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1857 if (!csk->mtu)
1858 csk->mtu = dst_mtu(csk->dst);
1859 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1860 csk->tx_chan = cxgb4_port_chan(ndev);
1861 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
1863 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1867 csk->rss_qid = lldi->rxq_ids[rxq_idx];
1869 csk->snd_win = cxgb4i_snd_win;
1870 csk->rcv_win = cxgb4i_rcv_win;
1872 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN;
1875 csk->rcv_win *= rcv_winf;
1878 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN;
1881 csk->snd_win *= snd_winf;
1883 csk->wr_cred = lldi->wr_cred -
1885 csk->wr_max_cred = csk->wr_cred;
1886 csk->wr_una_cred = 0;
1887 cxgbi_sock_reset_wr_list(csk);
1888 csk->err = 0;
1890 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1891 (&csk->saddr), (&csk->daddr), csk, csk->state,
1892 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1893 csk->mtu, csk->mss_idx, csk->smac_idx);
1901 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1902 if (csk->csk_family == AF_INET)
1903 send_act_open_req(csk, skb, csk->l2t);
1906 send_act_open_req6(csk, skb, csk->l2t);
1914 if (csk->csk_family == AF_INET6)
1916 (const u32 *)&csk->saddr6.sin6_addr, 1);
2015 static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2021 struct cxgbi_device *cdev = csk->cdev;
2023 csk->tid);
2041 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
2043 spin_lock_bh(&csk->lock);
2044 cxgbi_sock_skb_entail(csk, skb);
2045 spin_unlock_bh(&csk->lock);
2050 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
2060 ttinfo->cid = csk->port_id;
2067 err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
2076 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2091 INIT_TP_WR(req, csk->tid);
2092 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2093 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2097 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
2100 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2102 reinit_completion(&csk->cmpl);
2103 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2104 wait_for_completion(&csk->cmpl);
2106 return csk->err;
2109 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2122 csk->hcrc_len = (hcrc ? 4 : 0);
2123 csk->dcrc_len = (dcrc ? 4 : 0);
2128 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2133 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
2136 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2138 reinit_completion(&csk->cmpl);
2139 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2140 wait_for_completion(&csk->cmpl);
2142 return csk->err;
2428 struct cxgbi_sock *csk = pmap->port_csk[i];
2430 if (csk->dcb_priority != priority) {
2431 iscsi_conn_failure(csk->user_data,
2434 "priority %u->%u.\n", csk,
2435 csk->dcb_priority, priority);