Lines Matching refs:csk

157 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
159 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
162 unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win);
168 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
169 req->local_port = csk->saddr.sin_port;
170 req->peer_port = csk->daddr.sin_port;
171 req->local_ip = csk->saddr.sin_addr.s_addr;
172 req->peer_ip = csk->daddr.sin_addr.s_addr;
175 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
178 V_RCV_BUFSIZ(csk->rcv_win >> 10));
181 "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
182 csk, csk->state, csk->flags, csk->atid,
185 csk->mss_idx, e->idx, e->smt_idx);
187 l2t_send(csk->cdev->lldev, skb, csk->l2t);
201 static void send_close_req(struct cxgbi_sock *csk)
203 struct sk_buff *skb = csk->cpl_close;
205 unsigned int tid = csk->tid;
208 "csk 0x%p,%u,0x%lx,%u.\n",
209 csk, csk->state, csk->flags, csk->tid);
211 csk->cpl_close = NULL;
215 req->rsvd = htonl(csk->write_seq);
217 cxgbi_sock_skb_entail(csk, skb);
218 if (csk->state >= CTP_ESTABLISHED)
219 push_tx_frames(csk, 1);
240 static void send_abort_req(struct cxgbi_sock *csk)
242 struct sk_buff *skb = csk->cpl_abort_req;
245 if (unlikely(csk->state == CTP_ABORTING || !skb))
247 cxgbi_sock_set_state(csk, CTP_ABORTING);
248 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
250 cxgbi_sock_purge_write_queue(csk);
252 csk->cpl_abort_req = NULL;
257 req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
258 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
259 req->rsvd0 = htonl(csk->snd_nxt);
260 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
264 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
265 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
268 l2t_send(csk->cdev->lldev, skb, csk->l2t);
276 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
278 struct sk_buff *skb = csk->cpl_abort_rpl;
282 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
283 csk, csk->state, csk->flags, csk->tid, rst_status);
285 csk->cpl_abort_rpl = NULL;
288 rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid));
289 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
291 cxgb3_ofld_send(csk->cdev->lldev, skb);
299 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
306 "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
307 csk, csk->state, csk->flags, csk->tid, credits, dack);
311 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
316 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
320 cxgb3_ofld_send(csk->cdev->lldev, skb);
352 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
356 struct l2t_entry *l2t = csk->l2t;
362 req->wr_lo = htonl(V_WR_TID(csk->tid));
367 V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
368 req->sndseq = htonl(csk->snd_nxt);
371 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
373 V_TX_CPU_IDX(csk->rss_qid));
375 req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15));
376 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
394 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
399 if (unlikely(csk->state < CTP_ESTABLISHED ||
400 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
402 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
403 csk, csk->state, csk->flags, csk->tid);
407 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
417 if (csk->wr_cred < wrs_needed) {
419 "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
420 csk, skb->len, skb->data_len, frags,
421 wrs_needed, csk->wr_cred);
425 __skb_unlink(skb, &csk->write_queue);
428 csk->wr_cred -= wrs_needed;
429 csk->wr_una_cred += wrs_needed;
430 cxgbi_sock_enqueue_wr(csk, skb);
433 "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
435 csk, skb->len, skb->data_len, frags, skb->csum,
436 csk->wr_cred, csk->wr_una_cred);
440 csk->wr_una_cred == wrs_needed) ||
441 csk->wr_una_cred >= csk->wr_max_cred / 2) {
443 csk->wr_una_cred = 0;
446 make_tx_data_wr(csk, skb, len, req_completion);
447 csk->snd_nxt += len;
452 "csk 0x%p, tid 0x%x, send skb 0x%p.\n",
453 csk, csk->tid, skb);
455 l2t_send(csk->cdev->lldev, skb, csk->l2t);
466 static inline void free_atid(struct cxgbi_sock *csk)
468 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
469 cxgb3_free_atid(csk->cdev->lldev, csk->atid);
470 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
471 cxgbi_sock_put(csk);
477 struct cxgbi_sock *csk = ctx;
484 "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
485 atid, atid, csk, csk->state, csk->flags, rcv_isn);
487 cxgbi_sock_get(csk);
488 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
489 csk->tid = tid;
490 cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid);
492 free_atid(csk);
494 csk->rss_qid = G_QNUM(ntohs(skb->csum));
496 spin_lock_bh(&csk->lock);
497 if (csk->retry_timer.function) {
498 del_timer(&csk->retry_timer);
499 csk->retry_timer.function = NULL;
502 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
503 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
504 csk, csk->state, csk->flags, csk->tid);
506 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
507 if (csk->rcv_win > (M_RCV_BUFSIZ << 10))
508 csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10);
510 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
512 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
514 send_abort_req(csk);
516 if (skb_queue_len(&csk->write_queue))
517 push_tx_frames(csk, 1);
518 cxgbi_conn_tx_open(csk);
521 spin_unlock_bh(&csk->lock);
550 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
554 "csk 0x%p,%u,0x%lx,%u.\n",
555 csk, csk->state, csk->flags, csk->tid);
557 cxgbi_sock_get(csk);
558 spin_lock_bh(&csk->lock);
561 cxgbi_sock_fail_act_open(csk, -ENOMEM);
563 skb->sk = (struct sock *)csk;
565 send_act_open_req(csk, skb, csk->l2t);
567 spin_unlock_bh(&csk->lock);
568 cxgbi_sock_put(csk);
573 struct cxgbi_sock *csk = ctx;
576 pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
577 csk, csk->state, csk->flags, csk->atid, rpl->status,
578 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
579 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
586 cxgbi_sock_get(csk);
587 spin_lock_bh(&csk->lock);
589 csk->retry_timer.function != act_open_retry_timer) {
590 csk->retry_timer.function = act_open_retry_timer;
591 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
593 cxgbi_sock_fail_act_open(csk,
596 spin_unlock_bh(&csk->lock);
597 cxgbi_sock_put(csk);
608 struct cxgbi_sock *csk = ctx;
611 "csk 0x%p,%u,0x%lx,%u.\n",
612 csk, csk->state, csk->flags, csk->tid);
614 cxgbi_sock_rcv_peer_close(csk);
626 struct cxgbi_sock *csk = ctx;
630 "csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
631 csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt));
633 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
644 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
650 return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
664 struct cxgbi_sock *csk = ctx;
668 "csk 0x%p,%u,0x%lx,%u.\n",
669 csk, csk->state, csk->flags, csk->tid);
676 cxgbi_sock_get(csk);
677 spin_lock_bh(&csk->lock);
679 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
680 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
681 cxgbi_sock_set_state(csk, CTP_ABORTING);
685 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
686 send_abort_rpl(csk, rst_status);
688 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
689 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
690 cxgbi_sock_closed(csk);
694 spin_unlock_bh(&csk->lock);
695 cxgbi_sock_put(csk);
711 struct cxgbi_sock *csk = ctx;
714 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
715 rpl->status, csk, csk ? csk->state : 0,
716 csk ? csk->flags : 0UL);
732 if (csk)
733 cxgbi_sock_rcv_abort_rpl(csk);
746 struct cxgbi_sock *csk = ctx;
755 "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
756 csk, csk->state, csk->flags, csk->tid, skb, skb->len);
758 spin_lock_bh(&csk->lock);
760 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
762 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
763 csk, csk->state, csk->flags, csk->tid);
764 if (csk->state != CTP_ABORTING)
780 csk->cdev->ports[csk->port_id]->name, csk->tid,
790 csk->cdev->ports[csk->port_id]->name, csk->tid,
801 "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
802 csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
815 csk->cdev->ports[csk->port_id]->name,
816 csk->tid, sizeof(data_cpl), skb->len, err);
827 csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
829 __skb_queue_tail(&csk->receive_queue, skb);
830 cxgbi_conn_pdu_ready(csk);
832 spin_unlock_bh(&csk->lock);
836 send_abort_req(csk);
838 spin_unlock_bh(&csk->lock);
850 struct cxgbi_sock *csk = ctx;
854 "csk 0x%p,%u,0x%lx,%u, cr %u.\n",
855 csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits));
857 cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1);
866 static int alloc_cpls(struct cxgbi_sock *csk)
868 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0,
870 if (!csk->cpl_close)
872 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0,
874 if (!csk->cpl_abort_req)
877 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0,
879 if (!csk->cpl_abort_rpl)
885 cxgbi_sock_free_cpl_skbs(csk);
889 static void l2t_put(struct cxgbi_sock *csk)
891 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
893 if (csk->l2t) {
894 l2t_release(t3dev, csk->l2t);
895 csk->l2t = NULL;
896 cxgbi_sock_put(csk);
904 static void release_offload_resources(struct cxgbi_sock *csk)
906 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
909 "csk 0x%p,%u,0x%lx,%u.\n",
910 csk, csk->state, csk->flags, csk->tid);
912 csk->rss_qid = 0;
913 cxgbi_sock_free_cpl_skbs(csk);
915 if (csk->wr_cred != csk->wr_max_cred) {
916 cxgbi_sock_purge_wr_queue(csk);
917 cxgbi_sock_reset_wr_list(csk);
919 l2t_put(csk);
920 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
921 free_atid(csk);
922 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
923 cxgb3_remove_tid(t3dev, (void *)csk, csk->tid);
924 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
925 cxgbi_sock_put(csk);
927 csk->dst = NULL;
928 csk->cdev = NULL;
953 static int init_act_open(struct cxgbi_sock *csk)
955 struct dst_entry *dst = csk->dst;
956 struct cxgbi_device *cdev = csk->cdev;
958 struct net_device *ndev = cdev->ports[csk->port_id];
959 struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
964 "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
968 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
970 csk->rss_qid = 0;
971 csk->l2t = t3_l2t_get(t3dev, dst, ndev,
972 &csk->daddr.sin_addr.s_addr);
973 if (!csk->l2t) {
977 cxgbi_sock_get(csk);
979 csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
980 if (csk->atid < 0) {
985 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
986 cxgbi_sock_get(csk);
993 skb->sk = (struct sock *)csk;
995 csk->snd_win = cxgb3i_snd_win;
996 csk->rcv_win = cxgb3i_rcv_win;
998 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
999 csk->wr_una_cred = 0;
1000 csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst));
1001 cxgbi_sock_reset_wr_list(csk);
1002 csk->err = 0;
1005 "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
1006 csk, csk->state, csk->flags,
1007 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
1008 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
1010 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1011 send_act_open_req(csk, skb, csk->l2t);
1015 cxgb3_free_atid(t3dev, csk->atid);
1017 cxgbi_sock_put(csk);
1018 l2t_release(t3dev, csk->l2t);
1019 csk->l2t = NULL;
1096 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1153 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1162 "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
1177 cxgb3_ofld_send(csk->cdev->lldev, skb);
1183 * @csk: cxgb tcp socket
1189 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1198 "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
1213 cxgb3_ofld_send(csk->cdev->lldev, skb);