Lines Matching defs:packet
208 struct hfi1_packet *packet)
210 struct ib_header *rhdr = packet->hdr;
211 u32 rte = rhf_rcv_type_err(packet->rhf);
218 if ((packet->rhf & RHF_DC_ERR) &&
222 if (packet->rhf & RHF_ICRC_ERR)
225 if (packet->etype == RHF_RCV_TYPE_BYPASS) {
232 packet->ohdr = &rhdr->u.oth;
234 packet->ohdr = &rhdr->u.l.oth;
235 packet->grh = &rhdr->u.l.grh;
241 if (packet->rhf & RHF_TID_ERR) {
243 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
247 /* Sanity check packet */
252 if (packet->grh) {
254 struct ib_grh *grh = packet->grh;
264 qp_num = ib_bth_get_qpn(packet->ohdr);
278 * packet.
290 hfi1_rc_hdrerr(rcd, packet, qp);
300 } /* Valid packet with TIDErr */
309 if (rhf_use_egr_bfr(packet->rhf))
310 ebuf = packet->ebuf;
315 opcode = ib_bth_get_opcode(packet->ohdr);
326 sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf);
329 lqpn = ib_bth_get_qpn(packet->ohdr);
357 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
369 struct hfi1_packet *packet)
371 packet->rsize = get_hdrqentsize(rcd); /* words */
372 packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */
373 packet->rcd = rcd;
374 packet->updegr = 0;
375 packet->etail = -1;
376 packet->rhf_addr = get_rhf_addr(rcd);
377 packet->rhf = rhf_to_cpu(packet->rhf_addr);
378 packet->rhqoff = hfi1_rcd_head(rcd);
379 packet->numpkt = 0;
390 * @qp: The packet's destination QP
391 * @pkt: The packet itself.
394 * Process the packet's FECN or BECN bits. By now, the packet
399 * normal packet processing to send an ACK with BECN set (or a CNP).
502 struct hfi1_packet *packet)
504 struct hfi1_ctxtdata *rcd = packet->rcd;
507 mdata->rsize = packet->rsize;
508 mdata->maxcnt = packet->maxcnt;
509 mdata->ps_head = packet->rhqoff;
565 #define prescan_rxq(rcd, packet) \
568 __prescan_rxq(packet); \
570 static void __prescan_rxq(struct hfi1_packet *packet)
572 struct hfi1_ctxtdata *rcd = packet->rcd;
575 init_ps_mdata(&mdata, packet);
580 packet->rcd->rhf_offset;
597 packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr);
598 hdr = packet->hdr;
602 packet->ohdr = &hdr->u.oth;
603 packet->grh = NULL;
605 packet->ohdr = &hdr->u.l.oth;
606 packet->grh = &hdr->u.l.grh;
611 if (!hfi1_may_ecn(packet))
614 bth1 = be32_to_cpu(packet->ohdr->bth[1]);
624 hfi1_process_ecn_slowpath(qp, packet, true);
629 packet->ohdr->bth[1] = cpu_to_be32(bth1);
635 static void process_rcv_qp_work(struct hfi1_packet *packet)
638 struct hfi1_ctxtdata *rcd = packet->rcd;
648 packet->qp = qp;
649 hfi1_send_rc_ack(packet, 0);
665 static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
668 if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0)
670 process_rcv_qp_work(packet);
674 this_cpu_inc(*packet->rcd->dd->rcv_limit);
679 static inline int check_max_packet(struct hfi1_packet *packet, int thread)
683 if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0))
684 ret = max_packet_exceeded(packet, thread);
688 static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
692 packet->rcd->dd->ctx0_seq_drop++;
693 /* Set up for the next packet */
694 packet->rhqoff += packet->rsize;
695 if (packet->rhqoff >= packet->maxcnt)
696 packet->rhqoff = 0;
698 packet->numpkt++;
699 ret = check_max_packet(packet, thread);
701 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
702 packet->rcd->rhf_offset;
703 packet->rhf = rhf_to_cpu(packet->rhf_addr);
708 static void process_rcv_packet_napi(struct hfi1_packet *packet)
710 packet->etype = rhf_rcv_type(packet->rhf);
713 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
715 packet->etail = rhf_egr_index(packet->rhf);
716 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
717 &packet->updegr);
723 prefetch_range(packet->ebuf,
724 packet->tlen - ((packet->rcd->rcvhdrqentsize -
725 (rhf_hdrq_offset(packet->rhf)
728 packet->rcd->rhf_rcv_function_map[packet->etype](packet);
729 packet->numpkt++;
731 /* Set up for the next packet */
732 packet->rhqoff += packet->rsize;
733 if (packet->rhqoff >= packet->maxcnt)
734 packet->rhqoff = 0;
736 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
737 packet->rcd->rhf_offset;
738 packet->rhf = rhf_to_cpu(packet->rhf_addr);
741 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
745 packet->etype = rhf_rcv_type(packet->rhf);
748 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
750 packet->ebuf = NULL;
751 if (rhf_use_egr_bfr(packet->rhf)) {
752 packet->etail = rhf_egr_index(packet->rhf);
753 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
754 &packet->updegr);
760 prefetch_range(packet->ebuf,
761 packet->tlen - ((get_hdrqentsize(packet->rcd) -
762 (rhf_hdrq_offset(packet->rhf)
767 * Call a type specific handler for the packet. We
774 packet->rcd->rhf_rcv_function_map[packet->etype](packet);
775 packet->numpkt++;
777 /* Set up for the next packet */
778 packet->rhqoff += packet->rsize;
779 if (packet->rhqoff >= packet->maxcnt)
780 packet->rhqoff = 0;
782 ret = check_max_packet(packet, thread);
784 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
785 packet->rcd->rhf_offset;
786 packet->rhf = rhf_to_cpu(packet->rhf_addr);
791 static inline void process_rcv_update(int last, struct hfi1_packet *packet)
799 if (!last && !(packet->numpkt & 0xf)) {
800 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
801 packet->etail, 0, 0);
802 packet->updegr = 0;
804 packet->grh = NULL;
807 static inline void finish_packet(struct hfi1_packet *packet)
810 * Nothing we need to free for the packet.
815 update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr,
816 packet->etail, rcv_intr_dynamic, packet->numpkt);
820 * handle_receive_interrupt_napi_fp - receive a packet
830 struct hfi1_packet packet;
832 init_packet(rcd, &packet);
833 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
836 while (packet.numpkt < budget) {
837 process_rcv_packet_napi(&packet);
838 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
841 process_rcv_update(0, &packet);
843 hfi1_set_rcd_head(rcd, packet.rhqoff);
845 finish_packet(&packet);
846 return packet.numpkt;
855 struct hfi1_packet packet;
857 init_packet(rcd, &packet);
858 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
863 prescan_rxq(rcd, &packet);
866 last = process_rcv_packet(&packet, thread);
867 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
869 process_rcv_update(last, &packet);
871 process_rcv_qp_work(&packet);
872 hfi1_set_rcd_head(rcd, packet.rhqoff);
874 finish_packet(&packet);
882 struct hfi1_packet packet;
884 init_packet(rcd, &packet);
886 if (packet.rhqoff == hdrqtail) {
892 prescan_rxq(rcd, &packet);
895 last = process_rcv_packet(&packet, thread);
896 if (packet.rhqoff == hdrqtail)
898 process_rcv_update(last, &packet);
900 process_rcv_qp_work(&packet);
901 hfi1_set_rcd_head(rcd, packet.rhqoff);
903 finish_packet(&packet);
948 static bool __set_armed_to_active(struct hfi1_packet *packet)
950 u8 etype = rhf_rcv_type(packet->rhf);
954 struct ib_header *hdr = hfi1_get_msgheader(packet->rcd,
955 packet->rhf_addr);
956 sc = hfi1_9B_get_sc5(hdr, packet->rhf);
959 packet->rcd,
960 packet->rhf_addr);
964 int hwstate = driver_lstate(packet->rcd->ppd);
966 &packet->rcd->ppd->linkstate_active_work;
969 dd_dev_info(packet->rcd->dd,
975 queue_work(packet->rcd->ppd->link_wq, lsaw);
983 * @packet: the packet structure
985 * Return true if packet processing needs to bail.
987 static bool set_armed_to_active(struct hfi1_packet *packet)
989 if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED))
991 return __set_armed_to_active(packet);
995 * handle_receive_interrupt - receive a packet
1006 struct hfi1_packet packet;
1014 init_packet(rcd, &packet);
1017 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
1024 if (packet.rhqoff == hdrqtail) {
1035 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
1039 prescan_rxq(rcd, &packet);
1043 /* On to the next packet */
1044 packet.rhqoff += packet.rsize;
1045 packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
1046 packet.rhqoff +
1048 packet.rhf = rhf_to_cpu(packet.rhf_addr);
1051 last = skip_rcv_packet(&packet, thread);
1054 if (set_armed_to_active(&packet))
1056 last = process_rcv_packet(&packet, thread);
1060 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
1063 if (packet.rhqoff == hdrqtail)
1073 rhf_rcv_seq(packet.rhf));
1083 process_rcv_update(last, &packet);
1086 process_rcv_qp_work(&packet);
1087 hfi1_set_rcd_head(rcd, packet.rhqoff);
1094 finish_packet(&packet);
1099 * handle_receive_interrupt_napi_sp - receive a packet
1112 struct hfi1_packet packet;
1114 init_packet(rcd, &packet);
1115 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
1118 while (last != RCV_PKT_DONE && packet.numpkt < budget) {
1120 /* On to the next packet */
1121 packet.rhqoff += packet.rsize;
1122 packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
1123 packet.rhqoff +
1125 packet.rhf = rhf_to_cpu(packet.rhf_addr);
1128 if (set_armed_to_active(&packet))
1130 process_rcv_packet_napi(&packet);
1133 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
1141 process_rcv_update(last, &packet);
1144 hfi1_set_rcd_head(rcd, packet.rhqoff);
1151 finish_packet(&packet);
1152 return packet.numpkt;
1157 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
1180 /* Received non-SC15 packet implies neighbor_normal */
1197 * Convert a given MTU size to the on-wire MAD packet enumeration.
1259 * MTU is specified per-VL. To ensure that no packet gets
1260 * stuck (due, e.g., to the MTU for the packet's VL being
1436 static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
1438 packet->hdr = (struct hfi1_ib_message_header *)
1439 hfi1_get_msgheader(packet->rcd,
1440 packet->rhf_addr);
1441 packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
1444 static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet)
1446 struct hfi1_pportdata *ppd = packet->rcd->ppd;
1449 if ((!packet->slid) || (!packet->dlid))
1452 /* Compare port lid with incoming packet dlid */
1453 if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
1454 (packet->dlid !=
1456 if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid)
1461 if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF))
1465 if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE),
1467 (packet->sc != 0xF))
1473 static int hfi1_setup_9B_packet(struct hfi1_packet *packet)
1475 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1479 hfi1_setup_ib_header(packet);
1480 hdr = packet->hdr;
1484 packet->ohdr = &hdr->u.oth;
1485 packet->grh = NULL;
1489 packet->ohdr = &hdr->u.l.oth;
1490 packet->grh = &hdr->u.l.grh;
1491 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1493 vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1500 /* Query commonly used fields from packet header */
1501 packet->payload = packet->ebuf;
1502 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1503 packet->slid = ib_get_slid(hdr);
1504 packet->dlid = ib_get_dlid(hdr);
1505 if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
1506 (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE))))
1507 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1509 packet->sl = ib_get_sl(hdr);
1510 packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf);
1511 packet->pad = ib_bth_get_pad(packet->ohdr);
1512 packet->extra_byte = 0;
1513 packet->pkey = ib_bth_get_pkey(packet->ohdr);
1514 packet->migrated = ib_bth_is_migration(packet->ohdr);
1522 static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
1526 * compared to an IB packet.
1534 struct hfi1_ctxtdata *rcd = packet->rcd;
1539 packet->hdr = (struct hfi1_16b_header *)
1540 hfi1_get_16B_header(packet->rcd,
1541 packet->rhf_addr);
1542 l4 = hfi1_16B_get_l4(packet->hdr);
1544 packet->ohdr = packet->ebuf;
1545 packet->grh = NULL;
1546 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1547 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
1549 packet->hlen = hdr_len_by_opcode[packet->opcode] +
1551 packet->migrated = opa_bth_is_migration(packet->ohdr);
1556 packet->ohdr = packet->ebuf + grh_len;
1557 packet->grh = packet->ebuf;
1558 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1559 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
1561 packet->hlen = hdr_len_by_opcode[packet->opcode] +
1563 packet->migrated = opa_bth_is_migration(packet->ohdr);
1565 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1567 vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1571 packet->mgmt = packet->ebuf;
1572 packet->ohdr = NULL;
1573 packet->grh = NULL;
1574 packet->opcode = IB_OPCODE_UD_SEND_ONLY;
1575 packet->pad = OPA_16B_L4_FM_PAD;
1576 packet->hlen = OPA_16B_L4_FM_HLEN;
1577 packet->migrated = false;
1582 /* Query commonly used fields from packet header */
1583 packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES;
1584 packet->slid = hfi1_16B_get_slid(packet->hdr);
1585 packet->dlid = hfi1_16B_get_dlid(packet->hdr);
1586 if (unlikely(hfi1_is_16B_mcast(packet->dlid)))
1587 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1590 packet->sc = hfi1_16B_get_sc(packet->hdr);
1591 packet->sl = ibp->sc_to_sl[packet->sc];
1592 packet->extra_byte = SIZE_OF_LT;
1593 packet->pkey = hfi1_16B_get_pkey(packet->hdr);
1595 if (hfi1_bypass_ingress_pkt_check(packet))
1600 hfi1_cdbg(PKT, "%s: packet dropped", __func__);
1605 static void show_eflags_errs(struct hfi1_packet *packet)
1607 struct hfi1_ctxtdata *rcd = packet->rcd;
1608 u32 rte = rhf_rcv_type_err(packet->rhf);
1612 rcd->ctxt, packet->rhf,
1613 packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
1614 packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
1615 packet->rhf & RHF_DC_ERR ? "dc " : "",
1616 packet->rhf & RHF_TID_ERR ? "tid " : "",
1617 packet->rhf & RHF_LEN_ERR ? "len " : "",
1618 packet->rhf & RHF_ECC_ERR ? "ecc " : "",
1619 packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
1623 void handle_eflags(struct hfi1_packet *packet)
1625 struct hfi1_ctxtdata *rcd = packet->rcd;
1627 rcv_hdrerr(rcd, rcd->ppd, packet);
1628 if (rhf_err_flags(packet->rhf))
1629 show_eflags_errs(packet);
1632 static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
1636 struct hfi1_ctxtdata *rcd = packet->rcd;
1645 trace_hfi1_rcvhdr(packet);
1647 hfi1_setup_ib_header(packet);
1649 packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth;
1650 packet->grh = NULL;
1652 if (unlikely(rhf_err_flags(packet->rhf))) {
1653 handle_eflags(packet);
1657 qpnum = ib_bth_get_qpn(packet->ohdr);
1662 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
1666 do_work = hfi1_may_ecn(packet);
1668 do_cnp = (packet->opcode != IB_OPCODE_CNP);
1670 packet, do_cnp);
1676 * tlen is whole packet len so we need to
1679 tlen = packet->tlen;
1680 extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) +
1681 packet->hlen;
1687 skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf);
1702 ibp = rcd_to_iport(packet->rcd);
1708 * specific handlers for each packet type.
1710 static void process_receive_ib(struct hfi1_packet *packet)
1712 if (hfi1_setup_9B_packet(packet))
1715 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1718 trace_hfi1_rcvhdr(packet);
1720 if (unlikely(rhf_err_flags(packet->rhf))) {
1721 handle_eflags(packet);
1725 hfi1_ib_rcv(packet);
1728 static void process_receive_bypass(struct hfi1_packet *packet)
1730 struct hfi1_devdata *dd = packet->rcd->dd;
1732 if (hfi1_setup_bypass_packet(packet))
1735 trace_hfi1_rcvhdr(packet);
1737 if (unlikely(rhf_err_flags(packet->rhf))) {
1738 handle_eflags(packet);
1742 if (hfi1_16B_get_l2(packet->hdr) == 0x2) {
1743 hfi1_16B_rcv(packet);
1750 u64 *flits = packet->ebuf;
1752 if (flits && !(packet->rhf & RHF_LEN_ERR)) {
1755 packet->tlen > sizeof(flits[0]) ?
1764 static void process_receive_error(struct hfi1_packet *packet)
1766 /* KHdrHCRCErr -- KDETH packet with a bad HCRC */
1768 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
1769 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR ||
1770 packet->rhf & RHF_DC_ERR)))
1773 hfi1_setup_ib_header(packet);
1774 handle_eflags(packet);
1776 if (unlikely(rhf_err_flags(packet->rhf)))
1777 dd_dev_err(packet->rcd->dd,
1778 "Unhandled error packet received. Dropping.\n");
1781 static void kdeth_process_expected(struct hfi1_packet *packet)
1783 hfi1_setup_9B_packet(packet);
1784 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1787 if (unlikely(rhf_err_flags(packet->rhf))) {
1788 struct hfi1_ctxtdata *rcd = packet->rcd;
1790 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
1794 hfi1_kdeth_expected_rcv(packet);
1797 static void kdeth_process_eager(struct hfi1_packet *packet)
1799 hfi1_setup_9B_packet(packet);
1800 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1803 trace_hfi1_rcvhdr(packet);
1804 if (unlikely(rhf_err_flags(packet->rhf))) {
1805 struct hfi1_ctxtdata *rcd = packet->rcd;
1807 show_eflags_errs(packet);
1808 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
1812 hfi1_kdeth_eager_rcv(packet);
1815 static void process_receive_invalid(struct hfi1_packet *packet)
1817 dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
1818 rhf_rcv_type(packet->rhf));
1825 struct hfi1_packet packet;
1840 init_packet(rcd, &packet);
1841 init_ps_mdata(&mdata, &packet);
1862 packet.hdr = hfi1_get_msgheader(rcd, rhf_addr);
1863 hdr = packet.hdr;
1868 packet.ohdr = &hdr->u.oth;
1870 packet.ohdr = &hdr->u.l.oth;
1874 opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24);
1875 qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK;
1876 psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2]));