Lines Matching refs:csk

156 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
158 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
161 unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win);
167 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
168 req->local_port = csk->saddr.sin_port;
169 req->peer_port = csk->daddr.sin_port;
170 req->local_ip = csk->saddr.sin_addr.s_addr;
171 req->peer_ip = csk->daddr.sin_addr.s_addr;
174 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
177 V_RCV_BUFSIZ(csk->rcv_win >> 10));
180 "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
181 csk, csk->state, csk->flags, csk->atid,
184 csk->mss_idx, e->idx, e->smt_idx);
186 l2t_send(csk->cdev->lldev, skb, csk->l2t);
200 static void send_close_req(struct cxgbi_sock *csk)
202 struct sk_buff *skb = csk->cpl_close;
204 unsigned int tid = csk->tid;
207 "csk 0x%p,%u,0x%lx,%u.\n",
208 csk, csk->state, csk->flags, csk->tid);
210 csk->cpl_close = NULL;
214 req->rsvd = htonl(csk->write_seq);
216 cxgbi_sock_skb_entail(csk, skb);
217 if (csk->state >= CTP_ESTABLISHED)
218 push_tx_frames(csk, 1);
239 static void send_abort_req(struct cxgbi_sock *csk)
241 struct sk_buff *skb = csk->cpl_abort_req;
244 if (unlikely(csk->state == CTP_ABORTING || !skb))
246 cxgbi_sock_set_state(csk, CTP_ABORTING);
247 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
249 cxgbi_sock_purge_write_queue(csk);
251 csk->cpl_abort_req = NULL;
256 req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
257 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
258 req->rsvd0 = htonl(csk->snd_nxt);
259 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
263 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
264 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
267 l2t_send(csk->cdev->lldev, skb, csk->l2t);
275 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
277 struct sk_buff *skb = csk->cpl_abort_rpl;
281 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
282 csk, csk->state, csk->flags, csk->tid, rst_status);
284 csk->cpl_abort_rpl = NULL;
287 rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid));
288 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
290 cxgb3_ofld_send(csk->cdev->lldev, skb);
298 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
305 "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
306 csk, csk->state, csk->flags, csk->tid, credits, dack);
310 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
315 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
319 cxgb3_ofld_send(csk->cdev->lldev, skb);
351 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
355 struct l2t_entry *l2t = csk->l2t;
361 req->wr_lo = htonl(V_WR_TID(csk->tid));
366 V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
367 req->sndseq = htonl(csk->snd_nxt);
370 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
372 V_TX_CPU_IDX(csk->rss_qid));
374 req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15));
375 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
393 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
398 if (unlikely(csk->state < CTP_ESTABLISHED ||
399 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
401 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
402 csk, csk->state, csk->flags, csk->tid);
406 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
416 if (csk->wr_cred < wrs_needed) {
418 "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
419 csk, skb->len, skb->data_len, frags,
420 wrs_needed, csk->wr_cred);
424 __skb_unlink(skb, &csk->write_queue);
427 csk->wr_cred -= wrs_needed;
428 csk->wr_una_cred += wrs_needed;
429 cxgbi_sock_enqueue_wr(csk, skb);
432 "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
434 csk, skb->len, skb->data_len, frags, skb->csum,
435 csk->wr_cred, csk->wr_una_cred);
439 csk->wr_una_cred == wrs_needed) ||
440 csk->wr_una_cred >= csk->wr_max_cred / 2) {
442 csk->wr_una_cred = 0;
445 make_tx_data_wr(csk, skb, len, req_completion);
446 csk->snd_nxt += len;
451 "csk 0x%p, tid 0x%x, send skb 0x%p.\n",
452 csk, csk->tid, skb);
454 l2t_send(csk->cdev->lldev, skb, csk->l2t);
465 static inline void free_atid(struct cxgbi_sock *csk)
467 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
468 cxgb3_free_atid(csk->cdev->lldev, csk->atid);
469 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
470 cxgbi_sock_put(csk);
476 struct cxgbi_sock *csk = ctx;
483 "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
484 atid, atid, csk, csk->state, csk->flags, rcv_isn);
486 cxgbi_sock_get(csk);
487 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
488 csk->tid = tid;
489 cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid);
491 free_atid(csk);
493 csk->rss_qid = G_QNUM(ntohs(skb->csum));
495 spin_lock_bh(&csk->lock);
496 if (csk->retry_timer.function) {
497 del_timer(&csk->retry_timer);
498 csk->retry_timer.function = NULL;
501 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
502 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
503 csk, csk->state, csk->flags, csk->tid);
505 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
506 if (csk->rcv_win > (M_RCV_BUFSIZ << 10))
507 csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10);
509 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
511 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
513 send_abort_req(csk);
515 if (skb_queue_len(&csk->write_queue))
516 push_tx_frames(csk, 1);
517 cxgbi_conn_tx_open(csk);
520 spin_unlock_bh(&csk->lock);
549 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
553 "csk 0x%p,%u,0x%lx,%u.\n",
554 csk, csk->state, csk->flags, csk->tid);
556 cxgbi_sock_get(csk);
557 spin_lock_bh(&csk->lock);
560 cxgbi_sock_fail_act_open(csk, -ENOMEM);
562 skb->sk = (struct sock *)csk;
564 send_act_open_req(csk, skb, csk->l2t);
566 spin_unlock_bh(&csk->lock);
567 cxgbi_sock_put(csk);
572 struct cxgbi_sock *csk = ctx;
575 pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
576 csk, csk->state, csk->flags, csk->atid, rpl->status,
577 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
578 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
585 cxgbi_sock_get(csk);
586 spin_lock_bh(&csk->lock);
588 csk->retry_timer.function != act_open_retry_timer) {
589 csk->retry_timer.function = act_open_retry_timer;
590 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
592 cxgbi_sock_fail_act_open(csk,
595 spin_unlock_bh(&csk->lock);
596 cxgbi_sock_put(csk);
607 struct cxgbi_sock *csk = ctx;
610 "csk 0x%p,%u,0x%lx,%u.\n",
611 csk, csk->state, csk->flags, csk->tid);
613 cxgbi_sock_rcv_peer_close(csk);
625 struct cxgbi_sock *csk = ctx;
629 "csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
630 csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt));
632 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
643 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
649 return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
663 struct cxgbi_sock *csk = ctx;
667 "csk 0x%p,%u,0x%lx,%u.\n",
668 csk, csk->state, csk->flags, csk->tid);
675 cxgbi_sock_get(csk);
676 spin_lock_bh(&csk->lock);
678 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
679 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
680 cxgbi_sock_set_state(csk, CTP_ABORTING);
684 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
685 send_abort_rpl(csk, rst_status);
687 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
688 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
689 cxgbi_sock_closed(csk);
693 spin_unlock_bh(&csk->lock);
694 cxgbi_sock_put(csk);
710 struct cxgbi_sock *csk = ctx;
713 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
714 rpl->status, csk, csk ? csk->state : 0,
715 csk ? csk->flags : 0UL);
731 if (csk)
732 cxgbi_sock_rcv_abort_rpl(csk);
745 struct cxgbi_sock *csk = ctx;
754 "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
755 csk, csk->state, csk->flags, csk->tid, skb, skb->len);
757 spin_lock_bh(&csk->lock);
759 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
761 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
762 csk, csk->state, csk->flags, csk->tid);
763 if (csk->state != CTP_ABORTING)
779 csk->cdev->ports[csk->port_id]->name, csk->tid,
789 csk->cdev->ports[csk->port_id]->name, csk->tid,
800 "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
801 csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
814 csk->cdev->ports[csk->port_id]->name,
815 csk->tid, sizeof(data_cpl), skb->len, err);
826 csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
828 __skb_queue_tail(&csk->receive_queue, skb);
829 cxgbi_conn_pdu_ready(csk);
831 spin_unlock_bh(&csk->lock);
835 send_abort_req(csk);
837 spin_unlock_bh(&csk->lock);
849 struct cxgbi_sock *csk = ctx;
853 "csk 0x%p,%u,0x%lx,%u, cr %u.\n",
854 csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits));
856 cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1);
865 static int alloc_cpls(struct cxgbi_sock *csk)
867 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0,
869 if (!csk->cpl_close)
871 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0,
873 if (!csk->cpl_abort_req)
876 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0,
878 if (!csk->cpl_abort_rpl)
884 cxgbi_sock_free_cpl_skbs(csk);
888 static void l2t_put(struct cxgbi_sock *csk)
890 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
892 if (csk->l2t) {
893 l2t_release(t3dev, csk->l2t);
894 csk->l2t = NULL;
895 cxgbi_sock_put(csk);
903 static void release_offload_resources(struct cxgbi_sock *csk)
905 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
908 "csk 0x%p,%u,0x%lx,%u.\n",
909 csk, csk->state, csk->flags, csk->tid);
911 csk->rss_qid = 0;
912 cxgbi_sock_free_cpl_skbs(csk);
914 if (csk->wr_cred != csk->wr_max_cred) {
915 cxgbi_sock_purge_wr_queue(csk);
916 cxgbi_sock_reset_wr_list(csk);
918 l2t_put(csk);
919 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
920 free_atid(csk);
921 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
922 cxgb3_remove_tid(t3dev, (void *)csk, csk->tid);
923 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
924 cxgbi_sock_put(csk);
926 csk->dst = NULL;
927 csk->cdev = NULL;
952 static int init_act_open(struct cxgbi_sock *csk)
954 struct dst_entry *dst = csk->dst;
955 struct cxgbi_device *cdev = csk->cdev;
957 struct net_device *ndev = cdev->ports[csk->port_id];
958 struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
963 "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
967 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
969 csk->rss_qid = 0;
970 csk->l2t = t3_l2t_get(t3dev, dst, ndev,
971 &csk->daddr.sin_addr.s_addr);
972 if (!csk->l2t) {
976 cxgbi_sock_get(csk);
978 csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
979 if (csk->atid < 0) {
984 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
985 cxgbi_sock_get(csk);
992 skb->sk = (struct sock *)csk;
994 csk->snd_win = cxgb3i_snd_win;
995 csk->rcv_win = cxgb3i_rcv_win;
997 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
998 csk->wr_una_cred = 0;
999 csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst));
1000 cxgbi_sock_reset_wr_list(csk);
1001 csk->err = 0;
1004 "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
1005 csk, csk->state, csk->flags,
1006 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
1007 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
1009 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1010 send_act_open_req(csk, skb, csk->l2t);
1014 cxgb3_free_atid(t3dev, csk->atid);
1016 cxgbi_sock_put(csk);
1017 l2t_release(t3dev, csk->l2t);
1018 csk->l2t = NULL;
1095 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1152 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1161 "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
1176 cxgb3_ofld_send(csk->cdev->lldev, skb);
1182 * @csk: cxgb tcp socket
1188 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1197 "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
1212 cxgb3_ofld_send(csk->cdev->lldev, skb);