Lines Matching refs:ep

144 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
163 static void deref_qp(struct c4iw_ep *ep)
165 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
166 clear_bit(QP_REFERENCED, &ep->com.flags);
167 set_bit(QP_DEREFED, &ep->com.history);
170 static void ref_qp(struct c4iw_ep *ep)
172 set_bit(QP_REFERENCED, &ep->com.flags);
173 set_bit(QP_REFED, &ep->com.history);
174 c4iw_qp_add_ref(&ep->com.qp->ibqp);
177 static void start_ep_timer(struct c4iw_ep *ep)
179 pr_debug("ep %p\n", ep);
180 if (timer_pending(&ep->timer)) {
181 pr_err("%s timer already started! ep %p\n",
182 __func__, ep);
185 clear_bit(TIMEOUT, &ep->com.flags);
186 c4iw_get_ep(&ep->com);
187 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
188 add_timer(&ep->timer);
191 static int stop_ep_timer(struct c4iw_ep *ep)
193 pr_debug("ep %p stopping\n", ep);
194 del_timer_sync(&ep->timer);
195 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
196 c4iw_put_ep(&ep->com);
248 static void set_emss(struct c4iw_ep *ep, u16 opt)
250 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
251 ((AF_INET == ep->com.remote_addr.ss_family) ?
254 ep->mss = ep->emss;
256 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
257 if (ep->emss < 128)
258 ep->emss = 128;
259 if (ep->emss & 7)
261 TCPOPT_MSS_G(opt), ep->mss, ep->emss);
262 pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
263 ep->emss);
325 pr_debug("alloc ep %p\n", epc);
330 static void remove_ep_tid(struct c4iw_ep *ep)
334 xa_lock_irqsave(&ep->com.dev->hwtids, flags);
335 __xa_erase(&ep->com.dev->hwtids, ep->hwtid);
336 if (xa_empty(&ep->com.dev->hwtids))
337 wake_up(&ep->com.dev->wait);
338 xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
341 static int insert_ep_tid(struct c4iw_ep *ep)
346 xa_lock_irqsave(&ep->com.dev->hwtids, flags);
347 err = __xa_insert(&ep->com.dev->hwtids, ep->hwtid, ep, GFP_KERNEL);
348 xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
354 * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
358 struct c4iw_ep *ep;
362 ep = xa_load(&dev->hwtids, tid);
363 if (ep)
364 c4iw_get_ep(&ep->com);
366 return ep;
370 * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
375 struct c4iw_listen_ep *ep;
379 ep = xa_load(&dev->stids, stid);
380 if (ep)
381 c4iw_get_ep(&ep->com);
383 return ep;
388 struct c4iw_ep *ep;
390 ep = container_of(kref, struct c4iw_ep, com.kref);
391 pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
392 if (test_bit(QP_REFERENCED, &ep->com.flags))
393 deref_qp(ep);
394 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
395 if (ep->com.remote_addr.ss_family == AF_INET6) {
398 &ep->com.local_addr;
401 ep->com.dev->rdev.lldi.ports[0],
405 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
406 ep->com.local_addr.ss_family);
407 dst_release(ep->dst);
408 cxgb4_l2t_release(ep->l2t);
409 kfree_skb(ep->mpa_skb);
411 if (!skb_queue_empty(&ep->com.ep_skb_list))
412 skb_queue_purge(&ep->com.ep_skb_list);
413 c4iw_put_wr_wait(ep->com.wr_waitp);
414 kfree(ep);
417 static void release_ep_resources(struct c4iw_ep *ep)
419 set_bit(RELEASE_RESOURCES, &ep->com.flags);
424 * we have a race where one thread finds the ep ptr just
425 * before the other thread is freeing the ep memory.
427 if (ep->hwtid != -1)
428 remove_ep_tid(ep);
429 c4iw_put_ep(&ep->com);
494 struct c4iw_ep *ep;
496 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
497 release_ep_resources(ep);
503 struct c4iw_ep *ep;
505 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
506 c4iw_put_ep(&ep->parent_ep->com);
507 release_ep_resources(ep);
513 * _put_ep_safe() in a safe context to free the ep resources. This is needed
517 static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
526 * Save ep in the skb->cb area, after where sched() will save the dev
529 *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
530 sched(ep->com.dev, skb);
536 struct c4iw_ep *ep = handle;
539 ep->hwtid);
541 __state_set(&ep->com, DEAD);
542 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
550 struct c4iw_ep *ep = handle;
553 connect_reply_upcall(ep, -EHOSTUNREACH);
554 __state_set(&ep->com, DEAD);
555 if (ep->com.remote_addr.ss_family == AF_INET6) {
557 (struct sockaddr_in6 *)&ep->com.local_addr;
558 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
561 xa_erase_irq(&ep->com.dev->atids, ep->atid);
562 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
563 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
573 struct c4iw_ep *ep = handle;
574 struct c4iw_rdev *rdev = &ep->com.dev->rdev;
582 __state_set(&ep->com, DEAD);
583 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
588 static int send_flowc(struct c4iw_ep *ep)
591 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
592 u16 vlan = ep->l2t->vlan;
614 FW_WR_FLOWID_V(ep->hwtid));
618 (ep->com.dev->rdev.lldi.pf));
620 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
622 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
624 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
626 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
628 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
630 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
632 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
634 flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale);
642 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
643 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
646 static int send_halfclose(struct c4iw_ep *ep)
648 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
651 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
655 cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
658 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
661 static void read_tcb(struct c4iw_ep *ep)
671 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
674 INIT_TP_WR(req, ep->hwtid);
675 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid));
676 req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid));
679 * keep a ref on the ep so the tcb is not unlocked before this
682 c4iw_get_ep(&ep->com);
683 if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb)))
684 c4iw_put_ep(&ep->com);
687 static int send_abort_req(struct c4iw_ep *ep)
690 struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
692 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
696 cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
697 ep, abort_arp_failure);
699 return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
702 static int send_abort(struct c4iw_ep *ep)
704 if (!ep->com.qp || !ep->com.qp->srq) {
705 send_abort_req(ep);
708 set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags);
709 read_tcb(ep);
713 static int send_connect(struct c4iw_ep *ep)
728 &ep->com.local_addr;
730 &ep->com.remote_addr;
732 &ep->com.local_addr;
734 &ep->com.remote_addr;
736 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
741 netdev = ep->com.dev->rdev.lldi.ports[0];
762 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
766 pr_debug("ep %p atid %u\n", ep, ep->atid);
773 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
775 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
777 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
784 win = ep->rcv_win >> 10;
793 L2T_IDX_V(ep->l2t->idx) |
794 TX_CHAN_V(ep->tx_chan) |
795 SMAC_SEL_V(ep->smac_idx) |
796 DSCP_V(ep->tos >> 2) |
801 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
817 params = cxgb4_select_ntuple(netdev, ep->l2t);
819 if (ep->com.remote_addr.ss_family == AF_INET6)
820 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
823 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
825 if (ep->com.remote_addr.ss_family == AF_INET) {
850 ((ep->rss_qid<<14) | ep->atid)));
857 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
861 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
900 ((ep->rss_qid<<14)|ep->atid)));
909 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
911 ep->l2t));
914 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
931 set_bit(ACT_OPEN_REQ, &ep->com.history);
932 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
934 if (ret && ep->com.remote_addr.ss_family == AF_INET6)
935 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
940 static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
948 pr_debug("ep %p tid %u pd_len %d\n",
949 ep, ep->hwtid, ep->plen);
951 mpalen = sizeof(*mpa) + ep->plen;
957 connect_reply_upcall(ep, -ENOMEM);
960 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
968 FW_WR_FLOWID_V(ep->hwtid) |
983 ep->mpa_attr.recv_marker_enabled = 1;
985 ep->mpa_attr.recv_marker_enabled = 0;
990 mpa->private_data_size = htons(ep->plen);
993 ep->tried_with_mpa_v1 = 1;
994 ep->retry_with_mpa_v1 = 0;
1001 pr_debug("initiator ird %u ord %u\n", ep->ird,
1002 ep->ord);
1003 mpa_v2_params.ird = htons((u16)ep->ird);
1004 mpa_v2_params.ord = htons((u16)ep->ord);
1018 if (ep->plen)
1021 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1023 if (ep->plen)
1025 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1034 ep->mpa_skb = skb;
1035 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1038 start_ep_timer(ep);
1039 __state_set(&ep->com, MPA_REQ_SENT);
1040 ep->mpa_attr.initiator = 1;
1041 ep->snd_seq += mpalen;
1045 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1053 pr_debug("ep %p tid %u pd_len %d\n",
1054 ep, ep->hwtid, ep->plen);
1057 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1066 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1074 FW_WR_FLOWID_V(ep->hwtid) |
1085 mpa->revision = ep->mpa_attr.version;
1088 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1093 mpa_v2_params.ird = htons(((u16)ep->ird) |
1096 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1105 if (ep->plen)
1118 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1120 ep->mpa_skb = skb;
1121 ep->snd_seq += mpalen;
1122 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1125 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1133 pr_debug("ep %p tid %u pd_len %d\n",
1134 ep, ep->hwtid, ep->plen);
1137 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1146 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1154 FW_WR_FLOWID_V(ep->hwtid) |
1165 if (ep->mpa_attr.crc_enabled)
1167 if (ep->mpa_attr.recv_marker_enabled)
1169 mpa->revision = ep->mpa_attr.version;
1172 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1177 mpa_v2_params.ird = htons((u16)ep->ird);
1178 mpa_v2_params.ord = htons((u16)ep->ord);
1179 if (peer2peer && (ep->mpa_attr.p2p_type !=
1194 if (ep->plen)
1208 ep->mpa_skb = skb;
1209 __state_set(&ep->com, MPA_REP_SENT);
1210 ep->snd_seq += mpalen;
1211 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1216 struct c4iw_ep *ep;
1224 ep = lookup_atid(t, atid);
1226 pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
1229 mutex_lock(&ep->com.mutex);
1230 dst_confirm(ep->dst);
1233 ep->hwtid = tid;
1234 cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family);
1235 insert_ep_tid(ep);
1237 ep->snd_seq = be32_to_cpu(req->snd_isn);
1238 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1239 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1241 set_emss(ep, tcp_opt);
1244 xa_erase_irq(&ep->com.dev->atids, atid);
1246 set_bit(ACT_ESTAB, &ep->com.history);
1249 ret = send_flowc(ep);
1252 if (ep->retry_with_mpa_v1)
1253 ret = send_mpa_req(ep, skb, 1);
1255 ret = send_mpa_req(ep, skb, mpa_rev);
1258 mutex_unlock(&ep->com.mutex);
1261 mutex_unlock(&ep->com.mutex);
1262 connect_reply_upcall(ep, -ENOMEM);
1263 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1267 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1271 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1275 if (ep->com.cm_id) {
1276 pr_debug("close complete delivered ep %p cm_id %p tid %u\n",
1277 ep, ep->com.cm_id, ep->hwtid);
1278 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1279 deref_cm_id(&ep->com);
1280 set_bit(CLOSE_UPCALL, &ep->com.history);
1284 static void peer_close_upcall(struct c4iw_ep *ep)
1288 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1291 if (ep->com.cm_id) {
1292 pr_debug("peer close delivered ep %p cm_id %p tid %u\n",
1293 ep, ep->com.cm_id, ep->hwtid);
1294 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1295 set_bit(DISCONN_UPCALL, &ep->com.history);
1299 static void peer_abort_upcall(struct c4iw_ep *ep)
1303 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1307 if (ep->com.cm_id) {
1308 pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep,
1309 ep->com.cm_id, ep->hwtid);
1310 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1311 deref_cm_id(&ep->com);
1312 set_bit(ABORT_UPCALL, &ep->com.history);
1316 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1320 pr_debug("ep %p tid %u status %d\n",
1321 ep, ep->hwtid, status);
1325 memcpy(&event.local_addr, &ep->com.local_addr,
1326 sizeof(ep->com.local_addr));
1327 memcpy(&event.remote_addr, &ep->com.remote_addr,
1328 sizeof(ep->com.remote_addr));
1331 if (!ep->tried_with_mpa_v1) {
1333 event.ord = ep->ird;
1334 event.ird = ep->ord;
1335 event.private_data_len = ep->plen -
1337 event.private_data = ep->mpa_pkt +
1342 event.ord = cur_max_read_depth(ep->com.dev);
1343 event.ird = cur_max_read_depth(ep->com.dev);
1344 event.private_data_len = ep->plen;
1345 event.private_data = ep->mpa_pkt +
1350 pr_debug("ep %p tid %u status %d\n", ep,
1351 ep->hwtid, status);
1352 set_bit(CONN_RPL_UPCALL, &ep->com.history);
1353 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1356 deref_cm_id(&ep->com);
1359 static int connect_request_upcall(struct c4iw_ep *ep)
1364 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1367 memcpy(&event.local_addr, &ep->com.local_addr,
1368 sizeof(ep->com.local_addr));
1369 memcpy(&event.remote_addr, &ep->com.remote_addr,
1370 sizeof(ep->com.remote_addr));
1371 event.provider_data = ep;
1372 if (!ep->tried_with_mpa_v1) {
1374 event.ord = ep->ord;
1375 event.ird = ep->ird;
1376 event.private_data_len = ep->plen -
1378 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1382 event.ord = cur_max_read_depth(ep->com.dev);
1383 event.ird = cur_max_read_depth(ep->com.dev);
1384 event.private_data_len = ep->plen;
1385 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1387 c4iw_get_ep(&ep->com);
1388 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1391 c4iw_put_ep(&ep->com);
1392 set_bit(CONNREQ_UPCALL, &ep->com.history);
1393 c4iw_put_ep(&ep->parent_ep->com);
1397 static void established_upcall(struct c4iw_ep *ep)
1401 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1404 event.ird = ep->ord;
1405 event.ord = ep->ird;
1406 if (ep->com.cm_id) {
1407 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1408 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1409 set_bit(ESTAB_UPCALL, &ep->com.history);
1413 static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1419 pr_debug("ep %p tid %u credits %u\n",
1420 ep, ep->hwtid, credits);
1432 if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1433 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
1438 cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
1441 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1459 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1471 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1477 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1485 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1487 ep->mpa_pkt_len += skb->len;
1492 if (ep->mpa_pkt_len < sizeof(*mpa))
1494 mpa = (struct mpa_message *) ep->mpa_pkt;
1521 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1526 ep->plen = (u8) plen;
1532 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1545 if (stop_ep_timer(ep))
1553 __state_set(&ep->com, FPDU_MODE);
1554 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1555 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1556 ep->mpa_attr.version = mpa->revision;
1557 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1560 ep->mpa_attr.enhanced_rdma_conn =
1562 if (ep->mpa_attr.enhanced_rdma_conn) {
1564 (ep->mpa_pkt + sizeof(*mpa));
1569 pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
1570 resp_ird, resp_ord, ep->ird, ep->ord);
1577 if (ep->ird < resp_ord) {
1579 ep->com.dev->rdev.lldi.max_ordird_qp)
1580 ep->ird = resp_ord;
1583 } else if (ep->ird > resp_ord) {
1584 ep->ird = resp_ord;
1586 if (ep->ord > resp_ird) {
1588 ep->ord = resp_ird;
1594 ep->ird = resp_ord;
1595 ep->ord = resp_ird;
1602 ep->mpa_attr.p2p_type =
1606 ep->mpa_attr.p2p_type =
1612 ep->mpa_attr.p2p_type = p2p_type;
1615 ep->mpa_attr.crc_enabled,
1616 ep->mpa_attr.recv_marker_enabled,
1617 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1618 ep->mpa_attr.p2p_type, p2p_type);
1626 if ((ep->mpa_attr.version == 2) && peer2peer &&
1627 (ep->mpa_attr.p2p_type != p2p_type)) {
1628 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1632 attrs.mpa_attr = ep->mpa_attr;
1633 attrs.max_ird = ep->ird;
1634 attrs.max_ord = ep->ord;
1635 attrs.llp_stream_handle = ep;
1643 err = c4iw_modify_qp(ep->com.qp->rhp,
1644 ep->com.qp, mask, &attrs, 1);
1658 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1677 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1685 stop_ep_timer(ep);
1689 connect_reply_upcall(ep, err);
1705 static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1711 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1717 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
1725 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1727 ep->mpa_pkt_len += skb->len;
1733 if (ep->mpa_pkt_len < sizeof(*mpa))
1737 mpa = (struct mpa_message *) ep->mpa_pkt;
1762 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1764 ep->plen = (u8) plen;
1769 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1776 ep->mpa_attr.initiator = 0;
1777 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1778 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1779 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1780 ep->mpa_attr.version = mpa->revision;
1782 ep->tried_with_mpa_v1 = 1;
1783 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1786 ep->mpa_attr.enhanced_rdma_conn =
1788 if (ep->mpa_attr.enhanced_rdma_conn) {
1790 (ep->mpa_pkt + sizeof(*mpa));
1791 ep->ird = ntohs(mpa_v2_params->ird) &
1793 ep->ird = min_t(u32, ep->ird,
1794 cur_max_read_depth(ep->com.dev));
1795 ep->ord = ntohs(mpa_v2_params->ord) &
1797 ep->ord = min_t(u32, ep->ord,
1798 cur_max_read_depth(ep->com.dev));
1800 ep->ird, ep->ord);
1805 ep->mpa_attr.p2p_type =
1809 ep->mpa_attr.p2p_type =
1815 ep->mpa_attr.p2p_type = p2p_type;
1818 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1819 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1820 ep->mpa_attr.p2p_type);
1822 __state_set(&ep->com, MPA_REQ_RCVD);
1825 mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
1826 if (ep->parent_ep->com.state != DEAD) {
1827 if (connect_request_upcall(ep))
1832 mutex_unlock(&ep->parent_ep->com.mutex);
1836 mutex_unlock(&ep->parent_ep->com.mutex);
1839 (void)stop_ep_timer(ep);
1846 struct c4iw_ep *ep;
1853 ep = get_ep_from_tid(dev, tid);
1854 if (!ep)
1856 pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
1859 mutex_lock(&ep->com.mutex);
1861 switch (ep->com.state) {
1863 update_rx_credits(ep, dlen);
1864 ep->rcv_seq += dlen;
1865 disconnect = process_mpa_reply(ep, skb);
1868 update_rx_credits(ep, dlen);
1869 ep->rcv_seq += dlen;
1870 disconnect = process_mpa_request(ep, skb);
1875 update_rx_credits(ep, dlen);
1878 " qpid %u ep %p state %d tid %u status %d\n",
1879 __func__, ep->com.qp->wq.sq.qid, ep,
1880 ep->com.state, ep->hwtid, status);
1882 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1890 mutex_unlock(&ep->com.mutex);
1892 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
1893 c4iw_put_ep(&ep->com);
1897 static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx)
1901 adapter_type = ep->com.dev->rdev.lldi.adapter_type;
1910 if (ep->com.qp->ibqp.uobject)
1911 t4_set_wq_in_error(&ep->com.qp->wq, srqidx);
1913 c4iw_flush_srqidx(ep->com.qp, srqidx);
1920 struct c4iw_ep *ep;
1925 ep = get_ep_from_tid(dev, tid);
1926 if (!ep) {
1931 if (ep->com.qp && ep->com.qp->srq) {
1933 complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx);
1936 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1937 mutex_lock(&ep->com.mutex);
1938 switch (ep->com.state) {
1940 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
1941 __state_set(&ep->com, DEAD);
1945 pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
1948 mutex_unlock(&ep->com.mutex);
1951 close_complete_upcall(ep, -ECONNRESET);
1952 release_ep_resources(ep);
1954 c4iw_put_ep(&ep->com);
1958 static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1975 ep->com.dev->rdev.lldi.ports[0],
1976 ep->l2t));
1977 sin = (struct sockaddr_in *)&ep->com.local_addr;
1980 sin = (struct sockaddr_in *)&ep->com.remote_addr;
1990 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1992 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1999 win = ep->rcv_win >> 10;
2009 L2T_IDX_V(ep->l2t->idx) |
2010 TX_CHAN_V(ep->tx_chan) |
2011 SMAC_SEL_V(ep->smac_idx) |
2012 DSCP_V(ep->tos >> 2) |
2016 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
2019 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
2028 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
2029 set_bit(ACT_OFLD_CONN, &ep->com.history);
2030 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2061 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
2063 ep->snd_win = snd_win;
2064 ep->rcv_win = rcv_win;
2066 ep->snd_win, ep->rcv_win);
2071 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
2102 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2104 if (!ep->l2t) {
2108 ep->mtu = pdev->mtu;
2109 ep->tx_chan = cxgb4_port_chan(pdev);
2110 ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
2113 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2116 ep->ctrlq_idx = cxgb4_port_idx(pdev);
2117 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2119 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2123 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2125 if (!ep->l2t)
2127 ep->mtu = dst_mtu(dst);
2128 ep->tx_chan = cxgb4_port_chan(pdev);
2129 ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
2132 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2133 ep->ctrlq_idx = cxgb4_port_idx(pdev);
2136 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2138 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2141 ep->retry_with_mpa_v1 = 0;
2142 ep->tried_with_mpa_v1 = 0;
2154 static int c4iw_reconnect(struct c4iw_ep *ep)
2159 &ep->com.cm_id->m_local_addr;
2161 &ep->com.cm_id->m_remote_addr;
2163 &ep->com.cm_id->m_local_addr;
2165 &ep->com.cm_id->m_remote_addr;
2169 pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
2170 c4iw_init_wr_wait(ep->com.wr_waitp);
2181 size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
2182 if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
2190 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
2191 if (ep->atid == -1) {
2196 err = xa_insert_irq(&ep->com.dev->atids, ep->atid, ep, GFP_KERNEL);
2201 if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
2202 ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
2206 raddr->sin_port, ep->com.cm_id->tos);
2210 ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
2216 ep->com.cm_id->tos,
2221 if (!ep->dst) {
2226 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
2227 ep->com.dev->rdev.lldi.adapter_type,
2228 ep->com.cm_id->tos);
2235 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2236 ep->l2t->idx);
2238 state_set(&ep->com, CONNECTING);
2239 ep->tos = ep->com.cm_id->tos;
2242 err = send_connect(ep);
2246 cxgb4_l2t_release(ep->l2t);
2248 dst_release(ep->dst);
2250 xa_erase_irq(&ep->com.dev->atids, ep->atid);
2252 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2260 connect_reply_upcall(ep, -ECONNRESET);
2262 c4iw_put_ep(&ep->com);
2269 struct c4iw_ep *ep;
2281 ep = lookup_atid(t, atid);
2282 la = (struct sockaddr_in *)&ep->com.local_addr;
2283 ra = (struct sockaddr_in *)&ep->com.remote_addr;
2284 la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
2285 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
2287 pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
2293 ep->stats.connect_neg_adv++;
2300 set_bit(ACT_OPEN_RPL, &ep->com.history);
2313 if (ep->com.local_addr.ss_family == AF_INET &&
2315 ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
2323 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2324 set_bit(ACT_RETRY_INUSE, &ep->com.history);
2325 if (ep->com.remote_addr.ss_family == AF_INET6) {
2328 &ep->com.local_addr;
2330 ep->com.dev->rdev.lldi.ports[0],
2334 xa_erase_irq(&ep->com.dev->atids, atid);
2336 dst_release(ep->dst);
2337 cxgb4_l2t_release(ep->l2t);
2338 c4iw_reconnect(ep);
2343 if (ep->com.local_addr.ss_family == AF_INET) {
2358 connect_reply_upcall(ep, status2errno(status));
2359 state_set(&ep->com, DEAD);
2361 if (ep->com.remote_addr.ss_family == AF_INET6) {
2363 (struct sockaddr_in6 *)&ep->com.local_addr;
2364 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
2368 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl),
2369 ep->com.local_addr.ss_family);
2371 xa_erase_irq(&ep->com.dev->atids, atid);
2373 dst_release(ep->dst);
2374 cxgb4_l2t_release(ep->l2t);
2375 c4iw_put_ep(&ep->com);
2384 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2386 if (!ep) {
2390 pr_debug("ep %p status %d error %d\n", ep,
2392 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
2393 c4iw_put_ep(&ep->com);
2402 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2404 if (!ep) {
2408 pr_debug("ep %p\n", ep);
2409 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
2410 c4iw_put_ep(&ep->com);
2415 static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2425 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
2427 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2428 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2430 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
2437 win = ep->rcv_win >> 10;
2445 L2T_IDX_V(ep->l2t->idx) |
2446 TX_CHAN_V(ep->tx_chan) |
2447 SMAC_SEL_V(ep->smac_idx) |
2448 DSCP_V(ep->tos >> 2) |
2452 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
2479 INIT_TP_WR(rpl5, ep->hwtid);
2482 INIT_TP_WR(rpl, ep->hwtid);
2485 ep->hwtid));
2502 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
2503 t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
2505 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2541 pr_err("%s - listening ep not in LISTEN\n", __func__);
2555 pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2563 pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2580 pr_err("%s - failed to allocate ep entry!\n", __func__);
2681 struct c4iw_ep *ep;
2687 ep = get_ep_from_tid(dev, tid);
2688 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2689 ep->snd_seq = be32_to_cpu(req->snd_isn);
2690 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2691 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
2693 pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt);
2695 set_emss(ep, tcp_opt);
2697 dst_confirm(ep->dst);
2698 mutex_lock(&ep->com.mutex);
2699 ep->com.state = MPA_REQ_WAIT;
2700 start_ep_timer(ep);
2701 set_bit(PASS_ESTAB, &ep->com.history);
2702 ret = send_flowc(ep);
2703 mutex_unlock(&ep->com.mutex);
2705 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2706 c4iw_put_ep(&ep->com);
2714 struct c4iw_ep *ep;
2721 ep = get_ep_from_tid(dev, tid);
2722 if (!ep)
2725 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2726 dst_confirm(ep->dst);
2728 set_bit(PEER_CLOSE, &ep->com.history);
2729 mutex_lock(&ep->com.mutex);
2730 switch (ep->com.state) {
2732 __state_set(&ep->com, CLOSING);
2735 __state_set(&ep->com, CLOSING);
2736 connect_reply_upcall(ep, -ECONNRESET);
2746 __state_set(&ep->com, CLOSING);
2747 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2748 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
2751 __state_set(&ep->com, CLOSING);
2752 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2753 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
2756 start_ep_timer(ep);
2757 __state_set(&ep->com, CLOSING);
2759 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2762 peer_close_upcall(ep);
2770 __state_set(&ep->com, MORIBUND);
2774 (void)stop_ep_timer(ep);
2775 if (ep->com.cm_id && ep->com.qp) {
2777 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2780 close_complete_upcall(ep, 0);
2781 __state_set(&ep->com, DEAD);
2789 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
2791 mutex_unlock(&ep->com.mutex);
2793 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2795 release_ep_resources(ep);
2796 c4iw_put_ep(&ep->com);
2800 static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep)
2802 complete_cached_srq_buffers(ep, ep->srqe_idx);
2803 if (ep->com.cm_id && ep->com.qp) {
2807 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2810 peer_abort_upcall(ep);
2811 release_ep_resources(ep);
2812 c4iw_put_ep(&ep->com);
2818 struct c4iw_ep *ep;
2829 ep = get_ep_from_tid(dev, tid);
2830 if (!ep)
2837 ep->hwtid, status, neg_adv_str(status));
2838 ep->stats.abort_neg_adv++;
2845 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
2846 ep->com.state);
2847 set_bit(PEER_ABORT, &ep->com.history);
2854 if (ep->com.state != MPA_REQ_SENT)
2855 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
2857 mutex_lock(&ep->com.mutex);
2858 switch (ep->com.state) {
2860 c4iw_put_ep(&ep->parent_ep->com);
2863 (void)stop_ep_timer(ep);
2866 (void)stop_ep_timer(ep);
2868 (mpa_rev == 2 && ep->tried_with_mpa_v1))
2869 connect_reply_upcall(ep, -ECONNRESET);
2881 ep->retry_with_mpa_v1 = 1;
2890 stop_ep_timer(ep);
2893 if (ep->com.qp && ep->com.qp->srq) {
2897 complete_cached_srq_buffers(ep, srqidx);
2899 /* Hold ep ref until finish_peer_abort() */
2900 c4iw_get_ep(&ep->com);
2901 __state_set(&ep->com, ABORTING);
2902 set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags);
2903 read_tcb(ep);
2909 if (ep->com.cm_id && ep->com.qp) {
2911 ret = c4iw_modify_qp(ep->com.qp->rhp,
2912 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2917 peer_abort_upcall(ep);
2923 mutex_unlock(&ep->com.mutex);
2926 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
2929 dst_confirm(ep->dst);
2930 if (ep->com.state != ABORTING) {
2931 __state_set(&ep->com, DEAD);
2933 if (!ep->retry_with_mpa_v1)
2936 mutex_unlock(&ep->com.mutex);
2938 rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
2944 cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
2946 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2949 release_ep_resources(ep);
2950 else if (ep->retry_with_mpa_v1) {
2951 if (ep->com.remote_addr.ss_family == AF_INET6) {
2954 &ep->com.local_addr;
2956 ep->com.dev->rdev.lldi.ports[0],
2960 xa_erase_irq(&ep->com.dev->hwtids, ep->hwtid);
2961 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
2962 ep->com.local_addr.ss_family);
2963 dst_release(ep->dst);
2964 cxgb4_l2t_release(ep->l2t);
2965 c4iw_reconnect(ep);
2969 c4iw_put_ep(&ep->com);
2970 /* Dereferencing ep, referenced in peer_abort_intr() */
2971 c4iw_put_ep(&ep->com);
2977 struct c4iw_ep *ep;
2983 ep = get_ep_from_tid(dev, tid);
2984 if (!ep)
2987 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2990 mutex_lock(&ep->com.mutex);
2991 set_bit(CLOSE_CON_RPL, &ep->com.history);
2992 switch (ep->com.state) {
2994 __state_set(&ep->com, MORIBUND);
2997 (void)stop_ep_timer(ep);
2998 if ((ep->com.cm_id) && (ep->com.qp)) {
3000 c4iw_modify_qp(ep->com.qp->rhp,
3001 ep->com.qp,
3005 close_complete_upcall(ep, 0);
3006 __state_set(&ep->com, DEAD);
3013 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
3016 mutex_unlock(&ep->com.mutex);
3018 release_ep_resources(ep);
3019 c4iw_put_ep(&ep->com);
3027 struct c4iw_ep *ep;
3030 ep = get_ep_from_tid(dev, tid);
3032 if (ep) {
3033 if (ep->com.qp) {
3035 ep->com.qp->wq.sq.qid);
3037 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
3044 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
3045 c4iw_put_ep(&ep->com);
3047 pr_warn("TERM received tid %u no ep/qp\n", tid);
3059 struct c4iw_ep *ep;
3065 ep = get_ep_from_tid(dev, tid);
3066 if (!ep)
3068 pr_debug("ep %p tid %u credits %u\n",
3069 ep, ep->hwtid, credits);
3071 pr_debug("0 credit ack ep %p tid %u state %u\n",
3072 ep, ep->hwtid, state_read(&ep->com));
3076 dst_confirm(ep->dst);
3077 if (ep->mpa_skb) {
3078 pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
3079 ep, ep->hwtid, state_read(&ep->com),
3080 ep->mpa_attr.initiator ? 1 : 0);
3081 mutex_lock(&ep->com.mutex);
3082 kfree_skb(ep->mpa_skb);
3083 ep->mpa_skb = NULL;
3084 if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
3085 stop_ep_timer(ep);
3086 mutex_unlock(&ep->com.mutex);
3089 c4iw_put_ep(&ep->com);
3096 struct c4iw_ep *ep = to_ep(cm_id);
3098 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
3100 mutex_lock(&ep->com.mutex);
3101 if (ep->com.state != MPA_REQ_RCVD) {
3102 mutex_unlock(&ep->com.mutex);
3103 c4iw_put_ep(&ep->com);
3106 set_bit(ULP_REJECT, &ep->com.history);
3110 abort = send_mpa_reject(ep, pdata, pdata_len);
3111 mutex_unlock(&ep->com.mutex);
3113 stop_ep_timer(ep);
3114 c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
3115 c4iw_put_ep(&ep->com);
3124 struct c4iw_ep *ep = to_ep(cm_id);
3129 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
3131 mutex_lock(&ep->com.mutex);
3132 if (ep->com.state != MPA_REQ_RCVD) {
3142 set_bit(ULP_ACCEPT, &ep->com.history);
3143 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
3144 (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
3149 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
3150 if (conn_param->ord > ep->ird) {
3152 conn_param->ord = ep->ird;
3154 ep->ird = conn_param->ird;
3155 ep->ord = conn_param->ord;
3156 send_mpa_reject(ep, conn_param->private_data,
3162 if (conn_param->ird < ep->ord) {
3164 ep->ord <= h->rdev.lldi.max_ordird_qp) {
3165 conn_param->ird = ep->ord;
3172 ep->ird = conn_param->ird;
3173 ep->ord = conn_param->ord;
3175 if (ep->mpa_attr.version == 1) {
3176 if (peer2peer && ep->ird == 0)
3177 ep->ird = 1;
3180 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
3181 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
3182 ep->ird = 1;
3185 pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
3187 ep->com.cm_id = cm_id;
3188 ref_cm_id(&ep->com);
3189 ep->com.qp = qp;
3190 ref_qp(ep);
3193 attrs.mpa_attr = ep->mpa_attr;
3194 attrs.max_ird = ep->ird;
3195 attrs.max_ord = ep->ord;
3196 attrs.llp_stream_handle = ep;
3206 err = c4iw_modify_qp(ep->com.qp->rhp,
3207 ep->com.qp, mask, &attrs, 1);
3211 set_bit(STOP_MPA_TIMER, &ep->com.flags);
3212 err = send_mpa_reply(ep, conn_param->private_data,
3217 __state_set(&ep->com, FPDU_MODE);
3218 established_upcall(ep);
3219 mutex_unlock(&ep->com.mutex);
3220 c4iw_put_ep(&ep->com);
3223 deref_cm_id(&ep->com);
3227 mutex_unlock(&ep->com.mutex);
3229 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
3230 c4iw_put_ep(&ep->com);
3303 struct c4iw_ep *ep;
3317 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3318 if (!ep) {
3319 pr_err("%s - cannot alloc ep\n", __func__);
3324 skb_queue_head_init(&ep->com.ep_skb_list);
3325 if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
3330 timer_setup(&ep->timer, ep_timeout, 0);
3331 ep->plen = conn_param->private_data_len;
3332 if (ep->plen)
3333 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
3334 conn_param->private_data, ep->plen);
3335 ep->ird = conn_param->ird;
3336 ep->ord = conn_param->ord;
3338 if (peer2peer && ep->ord == 0)
3339 ep->ord = 1;
3341 ep->com.cm_id = cm_id;
3342 ref_cm_id(&ep->com);
3343 cm_id->provider_data = ep;
3344 ep->com.dev = dev;
3345 ep->com.qp = get_qhp(dev, conn_param->qpn);
3346 if (!ep->com.qp) {
3351 ref_qp(ep);
3353 ep->com.qp, cm_id);
3358 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
3359 if (ep->atid == -1) {
3364 err = xa_insert_irq(&dev->atids, ep->atid, ep, GFP_KERNEL);
3368 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3369 sizeof(ep->com.local_addr));
3370 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
3371 sizeof(ep->com.remote_addr));
3373 laddr = (struct sockaddr_in *)&ep->com.local_addr;
3374 raddr = (struct sockaddr_in *)&ep->com.remote_addr;
3375 laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3376 raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
3395 ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
3418 ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
3425 if (!ep->dst) {
3431 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
3432 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
3439 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
3440 ep->l2t->idx);
3442 state_set(&ep->com, CONNECTING);
3443 ep->tos = cm_id->tos;
3446 err = send_connect(ep);
3450 cxgb4_l2t_release(ep->l2t);
3452 dst_release(ep->dst);
3454 xa_erase_irq(&ep->com.dev->atids, ep->atid);
3456 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
3458 skb_queue_purge(&ep->com.ep_skb_list);
3459 deref_cm_id(&ep->com);
3461 c4iw_put_ep(&ep->com);
3466 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3470 &ep->com.local_addr;
3473 err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
3478 c4iw_init_wr_wait(ep->com.wr_waitp);
3479 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
3480 ep->stid, &sin6->sin6_addr,
3482 ep->com.dev->rdev.lldi.rxq_ids[0]);
3484 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3485 ep->com.wr_waitp,
3490 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3493 err, ep->stid,
3499 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3503 &ep->com.local_addr;
3508 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3510 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
3512 if (c4iw_fatal_error(&ep->com.dev->rdev)) {
3521 c4iw_init_wr_wait(ep->com.wr_waitp);
3522 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
3523 ep->stid, sin->sin_addr.s_addr, sin->sin_port,
3524 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
3526 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3527 ep->com.wr_waitp,
3534 , err, ep->stid,
3543 struct c4iw_listen_ep *ep;
3547 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3548 if (!ep) {
3549 pr_err("%s - cannot alloc ep\n", __func__);
3553 skb_queue_head_init(&ep->com.ep_skb_list);
3554 pr_debug("ep %p\n", ep);
3555 ep->com.cm_id = cm_id;
3556 ref_cm_id(&ep->com);
3557 ep->com.dev = dev;
3558 ep->backlog = backlog;
3559 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3560 sizeof(ep->com.local_addr));
3566 ep->com.local_addr.ss_family == AF_INET)
3567 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
3568 cm_id->m_local_addr.ss_family, ep);
3570 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
3571 cm_id->m_local_addr.ss_family, ep);
3573 if (ep->stid == -1) {
3578 err = xa_insert_irq(&dev->stids, ep->stid, ep, GFP_KERNEL);
3582 state_set(&ep->com, LISTEN);
3583 if (ep->com.local_addr.ss_family == AF_INET)
3584 err = create_server4(dev, ep);
3586 err = create_server6(dev, ep);
3588 cm_id->provider_data = ep;
3591 xa_erase_irq(&ep->com.dev->stids, ep->stid);
3593 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3594 ep->com.local_addr.ss_family);
3596 deref_cm_id(&ep->com);
3597 c4iw_put_ep(&ep->com);
3606 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3608 pr_debug("ep %p\n", ep);
3611 state_set(&ep->com, DEAD);
3612 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3613 ep->com.local_addr.ss_family == AF_INET) {
3615 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3616 ep->com.dev->rdev.lldi.rxq_ids[0], false);
3619 c4iw_init_wr_wait(ep->com.wr_waitp);
3621 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3622 ep->com.dev->rdev.lldi.rxq_ids[0],
3623 ep->com.local_addr.ss_family == AF_INET6);
3626 err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
3628 sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3629 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3632 xa_erase_irq(&ep->com.dev->stids, ep->stid);
3633 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3634 ep->com.local_addr.ss_family);
3636 deref_cm_id(&ep->com);
3637 c4iw_put_ep(&ep->com);
3641 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3648 mutex_lock(&ep->com.mutex);
3650 pr_debug("ep %p state %s, abrupt %d\n", ep,
3651 states[ep->com.state], abrupt);
3654 * Ref the ep here in case we have fatal errors causing the
3655 * ep to be released and freed.
3657 c4iw_get_ep(&ep->com);
3659 rdev = &ep->com.dev->rdev;
3662 close_complete_upcall(ep, -EIO);
3663 ep->com.state = DEAD;
3665 switch (ep->com.state) {
3674 ep->com.state = ABORTING;
3676 ep->com.state = CLOSING;
3682 if (ep->mpa_skb &&
3683 test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
3684 clear_bit(STOP_MPA_TIMER, &ep->com.flags);
3685 stop_ep_timer(ep);
3687 start_ep_timer(ep);
3689 set_bit(CLOSE_SENT, &ep->com.flags);
3692 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3695 (void)stop_ep_timer(ep);
3696 ep->com.state = ABORTING;
3698 ep->com.state = MORIBUND;
3704 pr_debug("ignoring disconnect ep %p state %u\n",
3705 ep, ep->com.state);
3708 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
3714 set_bit(EP_DISC_ABORT, &ep->com.history);
3715 ret = send_abort(ep);
3717 set_bit(EP_DISC_CLOSE, &ep->com.history);
3718 ret = send_halfclose(ep);
3721 set_bit(EP_DISC_FAIL, &ep->com.history);
3723 stop_ep_timer(ep);
3724 close_complete_upcall(ep, -EIO);
3726 if (ep->com.qp) {
3730 ret = c4iw_modify_qp(ep->com.qp->rhp,
3731 ep->com.qp,
3741 mutex_unlock(&ep->com.mutex);
3742 c4iw_put_ep(&ep->com);
3744 release_ep_resources(ep);
3751 struct c4iw_ep *ep;
3754 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3756 if (!ep)
3761 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3762 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3763 send_fw_act_open_req(ep, atid);
3768 set_bit(ACT_RETRY_INUSE, &ep->com.history);
3769 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3770 send_fw_act_open_req(ep, atid);
3784 connect_reply_upcall(ep, status2errno(req->retval));
3785 state_set(&ep->com, DEAD);
3786 if (ep->com.remote_addr.ss_family == AF_INET6) {
3788 (struct sockaddr_in6 *)&ep->com.local_addr;
3789 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3794 dst_release(ep->dst);
3795 cxgb4_l2t_release(ep->l2t);
3796 c4iw_put_ep(&ep->com);
3853 struct c4iw_ep *ep;
3857 ep = get_ep_from_tid(dev, tid);
3858 if (!ep)
3871 c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */
3872 c4iw_put_ep(&ep->com); /* from read_tcb() */
3876 if (++ep->rx_pdu_out_cnt >= 2) {
3880 read_tcb(ep);
3884 ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
3887 pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
3889 if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags))
3890 finish_peer_abort(dev, ep);
3891 else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags))
3892 send_abort_req(ep);
4226 static void process_timeout(struct c4iw_ep *ep)
4231 mutex_lock(&ep->com.mutex);
4232 pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
4233 set_bit(TIMEDOUT, &ep->com.history);
4234 switch (ep->com.state) {
4236 connect_reply_upcall(ep, -ETIMEDOUT);
4245 if (ep->com.cm_id && ep->com.qp) {
4247 c4iw_modify_qp(ep->com.qp->rhp,
4248 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
4251 close_complete_upcall(ep, -ETIMEDOUT);
4257 * These states are expected if the ep timed out at the same
4264 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
4265 __func__, ep, ep->hwtid, ep->com.state);
4268 mutex_unlock(&ep->com.mutex);
4270 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
4271 c4iw_put_ep(&ep->com);
4276 struct c4iw_ep *ep;
4287 ep = list_entry(tmp, struct c4iw_ep, entry);
4288 process_timeout(ep);
4325 struct c4iw_ep *ep = from_timer(ep, t, timer);
4329 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
4333 if (!ep->entry.next) {
4334 list_add_tail(&ep->entry, &timeout_list);
4407 struct c4iw_ep *ep;
4410 ep = get_ep_from_tid(dev, tid);
4412 if (!ep) {
4419 ep->hwtid, req->status,
4423 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
4425 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);