Lines Matching defs:packet

255 		       struct hfi1_packet *packet)
257 struct ib_header *rhdr = packet->hdr;
258 u32 rte = rhf_rcv_type_err(packet->rhf);
265 if ((packet->rhf & RHF_DC_ERR) &&
269 if (packet->rhf & RHF_ICRC_ERR)
272 if (packet->etype == RHF_RCV_TYPE_BYPASS) {
279 packet->ohdr = &rhdr->u.oth;
281 packet->ohdr = &rhdr->u.l.oth;
282 packet->grh = &rhdr->u.l.grh;
288 if (packet->rhf & RHF_TID_ERR) {
290 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
294 /* Sanity check packet */
299 if (packet->grh) {
301 struct ib_grh *grh = packet->grh;
311 qp_num = ib_bth_get_qpn(packet->ohdr);
325 * packet.
337 hfi1_rc_hdrerr(rcd, packet, qp);
347 } /* Valid packet with TIDErr */
356 if (rhf_use_egr_bfr(packet->rhf))
357 ebuf = packet->ebuf;
362 opcode = ib_bth_get_opcode(packet->ohdr);
373 sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf);
376 lqpn = ib_bth_get_qpn(packet->ohdr);
404 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
416 struct hfi1_packet *packet)
418 packet->rsize = get_hdrqentsize(rcd); /* words */
419 packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */
420 packet->rcd = rcd;
421 packet->updegr = 0;
422 packet->etail = -1;
423 packet->rhf_addr = get_rhf_addr(rcd);
424 packet->rhf = rhf_to_cpu(packet->rhf_addr);
425 packet->rhqoff = hfi1_rcd_head(rcd);
426 packet->numpkt = 0;
437 * @qp: The packet's destination QP
438 * @pkt: The packet itself.
441 * Process the packet's FECN or BECN bits. By now, the packet
446 * normal packet processing to send an ACK with BECN set (or a CNP).
549 struct hfi1_packet *packet)
551 struct hfi1_ctxtdata *rcd = packet->rcd;
554 mdata->rsize = packet->rsize;
555 mdata->maxcnt = packet->maxcnt;
556 mdata->ps_head = packet->rhqoff;
612 #define prescan_rxq(rcd, packet) \
615 __prescan_rxq(packet); \
617 static void __prescan_rxq(struct hfi1_packet *packet)
619 struct hfi1_ctxtdata *rcd = packet->rcd;
622 init_ps_mdata(&mdata, packet);
627 packet->rcd->rhf_offset;
644 packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr);
645 hdr = packet->hdr;
649 packet->ohdr = &hdr->u.oth;
650 packet->grh = NULL;
652 packet->ohdr = &hdr->u.l.oth;
653 packet->grh = &hdr->u.l.grh;
658 if (!hfi1_may_ecn(packet))
661 bth1 = be32_to_cpu(packet->ohdr->bth[1]);
671 hfi1_process_ecn_slowpath(qp, packet, true);
676 packet->ohdr->bth[1] = cpu_to_be32(bth1);
682 static void process_rcv_qp_work(struct hfi1_packet *packet)
685 struct hfi1_ctxtdata *rcd = packet->rcd;
695 packet->qp = qp;
696 hfi1_send_rc_ack(packet, 0);
712 static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
715 if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0)
717 process_rcv_qp_work(packet);
721 this_cpu_inc(*packet->rcd->dd->rcv_limit);
726 static inline int check_max_packet(struct hfi1_packet *packet, int thread)
730 if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0))
731 ret = max_packet_exceeded(packet, thread);
735 static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
739 packet->rcd->dd->ctx0_seq_drop++;
740 /* Set up for the next packet */
741 packet->rhqoff += packet->rsize;
742 if (packet->rhqoff >= packet->maxcnt)
743 packet->rhqoff = 0;
745 packet->numpkt++;
746 ret = check_max_packet(packet, thread);
748 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
749 packet->rcd->rhf_offset;
750 packet->rhf = rhf_to_cpu(packet->rhf_addr);
755 static void process_rcv_packet_napi(struct hfi1_packet *packet)
757 packet->etype = rhf_rcv_type(packet->rhf);
760 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
762 packet->etail = rhf_egr_index(packet->rhf);
763 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
764 &packet->updegr);
770 prefetch_range(packet->ebuf,
771 packet->tlen - ((packet->rcd->rcvhdrqentsize -
772 (rhf_hdrq_offset(packet->rhf)
775 packet->rcd->rhf_rcv_function_map[packet->etype](packet);
776 packet->numpkt++;
778 /* Set up for the next packet */
779 packet->rhqoff += packet->rsize;
780 if (packet->rhqoff >= packet->maxcnt)
781 packet->rhqoff = 0;
783 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
784 packet->rcd->rhf_offset;
785 packet->rhf = rhf_to_cpu(packet->rhf_addr);
788 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
792 packet->etype = rhf_rcv_type(packet->rhf);
795 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
797 packet->ebuf = NULL;
798 if (rhf_use_egr_bfr(packet->rhf)) {
799 packet->etail = rhf_egr_index(packet->rhf);
800 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
801 &packet->updegr);
807 prefetch_range(packet->ebuf,
808 packet->tlen - ((get_hdrqentsize(packet->rcd) -
809 (rhf_hdrq_offset(packet->rhf)
814 * Call a type specific handler for the packet. We
821 packet->rcd->rhf_rcv_function_map[packet->etype](packet);
822 packet->numpkt++;
824 /* Set up for the next packet */
825 packet->rhqoff += packet->rsize;
826 if (packet->rhqoff >= packet->maxcnt)
827 packet->rhqoff = 0;
829 ret = check_max_packet(packet, thread);
831 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
832 packet->rcd->rhf_offset;
833 packet->rhf = rhf_to_cpu(packet->rhf_addr);
838 static inline void process_rcv_update(int last, struct hfi1_packet *packet)
846 if (!last && !(packet->numpkt & 0xf)) {
847 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
848 packet->etail, 0, 0);
849 packet->updegr = 0;
851 packet->grh = NULL;
854 static inline void finish_packet(struct hfi1_packet *packet)
857 * Nothing we need to free for the packet.
862 update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr,
863 packet->etail, rcv_intr_dynamic, packet->numpkt);
867 * handle_receive_interrupt_napi_fp - receive a packet
877 struct hfi1_packet packet;
879 init_packet(rcd, &packet);
880 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
883 while (packet.numpkt < budget) {
884 process_rcv_packet_napi(&packet);
885 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
888 process_rcv_update(0, &packet);
890 hfi1_set_rcd_head(rcd, packet.rhqoff);
892 finish_packet(&packet);
893 return packet.numpkt;
902 struct hfi1_packet packet;
904 init_packet(rcd, &packet);
905 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
910 prescan_rxq(rcd, &packet);
913 last = process_rcv_packet(&packet, thread);
914 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
916 process_rcv_update(last, &packet);
918 process_rcv_qp_work(&packet);
919 hfi1_set_rcd_head(rcd, packet.rhqoff);
921 finish_packet(&packet);
929 struct hfi1_packet packet;
931 init_packet(rcd, &packet);
933 if (packet.rhqoff == hdrqtail) {
939 prescan_rxq(rcd, &packet);
942 last = process_rcv_packet(&packet, thread);
943 if (packet.rhqoff == hdrqtail)
945 process_rcv_update(last, &packet);
947 process_rcv_qp_work(&packet);
948 hfi1_set_rcd_head(rcd, packet.rhqoff);
950 finish_packet(&packet);
995 static bool __set_armed_to_active(struct hfi1_packet *packet)
997 u8 etype = rhf_rcv_type(packet->rhf);
1001 struct ib_header *hdr = hfi1_get_msgheader(packet->rcd,
1002 packet->rhf_addr);
1003 sc = hfi1_9B_get_sc5(hdr, packet->rhf);
1006 packet->rcd,
1007 packet->rhf_addr);
1011 int hwstate = driver_lstate(packet->rcd->ppd);
1013 &packet->rcd->ppd->linkstate_active_work;
1016 dd_dev_info(packet->rcd->dd,
1022 queue_work(packet->rcd->ppd->link_wq, lsaw);
1030 * @packet: the packet structure
1032 * Return true if packet processing needs to bail.
1034 static bool set_armed_to_active(struct hfi1_packet *packet)
1036 if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED))
1038 return __set_armed_to_active(packet);
1042 * handle_receive_interrupt - receive a packet
1053 struct hfi1_packet packet;
1061 init_packet(rcd, &packet);
1064 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
1071 if (packet.rhqoff == hdrqtail) {
1082 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
1086 prescan_rxq(rcd, &packet);
1090 /* On to the next packet */
1091 packet.rhqoff += packet.rsize;
1092 packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
1093 packet.rhqoff +
1095 packet.rhf = rhf_to_cpu(packet.rhf_addr);
1098 last = skip_rcv_packet(&packet, thread);
1101 if (set_armed_to_active(&packet))
1103 last = process_rcv_packet(&packet, thread);
1107 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
1110 if (packet.rhqoff == hdrqtail)
1120 rhf_rcv_seq(packet.rhf));
1130 process_rcv_update(last, &packet);
1133 process_rcv_qp_work(&packet);
1134 hfi1_set_rcd_head(rcd, packet.rhqoff);
1141 finish_packet(&packet);
1146 * handle_receive_interrupt_napi_sp - receive a packet
1159 struct hfi1_packet packet;
1161 init_packet(rcd, &packet);
1162 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
1165 while (last != RCV_PKT_DONE && packet.numpkt < budget) {
1167 /* On to the next packet */
1168 packet.rhqoff += packet.rsize;
1169 packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
1170 packet.rhqoff +
1172 packet.rhf = rhf_to_cpu(packet.rhf_addr);
1175 if (set_armed_to_active(&packet))
1177 process_rcv_packet_napi(&packet);
1180 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
1188 process_rcv_update(last, &packet);
1191 hfi1_set_rcd_head(rcd, packet.rhqoff);
1198 finish_packet(&packet);
1199 return packet.numpkt;
1204 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
1227 /* Received non-SC15 packet implies neighbor_normal */
1244 * Convert a given MTU size to the on-wire MAD packet enumeration.
1306 * MTU is specified per-VL. To ensure that no packet gets
1307 * stuck (due, e.g., to the MTU for the packet's VL being
1483 static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
1485 packet->hdr = (struct hfi1_ib_message_header *)
1486 hfi1_get_msgheader(packet->rcd,
1487 packet->rhf_addr);
1488 packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
1491 static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet)
1493 struct hfi1_pportdata *ppd = packet->rcd->ppd;
1496 if ((!packet->slid) || (!packet->dlid))
1499 /* Compare port lid with incoming packet dlid */
1500 if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
1501 (packet->dlid !=
1503 if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid)
1508 if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF))
1512 if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE),
1514 (packet->sc != 0xF))
1520 static int hfi1_setup_9B_packet(struct hfi1_packet *packet)
1522 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1526 hfi1_setup_ib_header(packet);
1527 hdr = packet->hdr;
1531 packet->ohdr = &hdr->u.oth;
1532 packet->grh = NULL;
1536 packet->ohdr = &hdr->u.l.oth;
1537 packet->grh = &hdr->u.l.grh;
1538 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1540 vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1547 /* Query commonly used fields from packet header */
1548 packet->payload = packet->ebuf;
1549 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1550 packet->slid = ib_get_slid(hdr);
1551 packet->dlid = ib_get_dlid(hdr);
1552 if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
1553 (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE))))
1554 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1556 packet->sl = ib_get_sl(hdr);
1557 packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf);
1558 packet->pad = ib_bth_get_pad(packet->ohdr);
1559 packet->extra_byte = 0;
1560 packet->pkey = ib_bth_get_pkey(packet->ohdr);
1561 packet->migrated = ib_bth_is_migration(packet->ohdr);
1569 static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
1573 * compared to an IB packet.
1581 struct hfi1_ctxtdata *rcd = packet->rcd;
1586 packet->hdr = (struct hfi1_16b_header *)
1587 hfi1_get_16B_header(packet->rcd,
1588 packet->rhf_addr);
1589 l4 = hfi1_16B_get_l4(packet->hdr);
1591 packet->ohdr = packet->ebuf;
1592 packet->grh = NULL;
1593 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1594 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
1596 packet->hlen = hdr_len_by_opcode[packet->opcode] +
1598 packet->migrated = opa_bth_is_migration(packet->ohdr);
1603 packet->ohdr = packet->ebuf + grh_len;
1604 packet->grh = packet->ebuf;
1605 packet->opcode = ib_bth_get_opcode(packet->ohdr);
1606 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
1608 packet->hlen = hdr_len_by_opcode[packet->opcode] +
1610 packet->migrated = opa_bth_is_migration(packet->ohdr);
1612 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1614 vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1618 packet->mgmt = packet->ebuf;
1619 packet->ohdr = NULL;
1620 packet->grh = NULL;
1621 packet->opcode = IB_OPCODE_UD_SEND_ONLY;
1622 packet->pad = OPA_16B_L4_FM_PAD;
1623 packet->hlen = OPA_16B_L4_FM_HLEN;
1624 packet->migrated = false;
1629 /* Query commonly used fields from packet header */
1630 packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES;
1631 packet->slid = hfi1_16B_get_slid(packet->hdr);
1632 packet->dlid = hfi1_16B_get_dlid(packet->hdr);
1633 if (unlikely(hfi1_is_16B_mcast(packet->dlid)))
1634 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1637 packet->sc = hfi1_16B_get_sc(packet->hdr);
1638 packet->sl = ibp->sc_to_sl[packet->sc];
1639 packet->extra_byte = SIZE_OF_LT;
1640 packet->pkey = hfi1_16B_get_pkey(packet->hdr);
1642 if (hfi1_bypass_ingress_pkt_check(packet))
1647 hfi1_cdbg(PKT, "%s: packet dropped\n", __func__);
1652 static void show_eflags_errs(struct hfi1_packet *packet)
1654 struct hfi1_ctxtdata *rcd = packet->rcd;
1655 u32 rte = rhf_rcv_type_err(packet->rhf);
1659 rcd->ctxt, packet->rhf,
1660 packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
1661 packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
1662 packet->rhf & RHF_DC_ERR ? "dc " : "",
1663 packet->rhf & RHF_TID_ERR ? "tid " : "",
1664 packet->rhf & RHF_LEN_ERR ? "len " : "",
1665 packet->rhf & RHF_ECC_ERR ? "ecc " : "",
1666 packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
1670 void handle_eflags(struct hfi1_packet *packet)
1672 struct hfi1_ctxtdata *rcd = packet->rcd;
1674 rcv_hdrerr(rcd, rcd->ppd, packet);
1675 if (rhf_err_flags(packet->rhf))
1676 show_eflags_errs(packet);
1679 static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
1683 struct hfi1_ctxtdata *rcd = packet->rcd;
1693 trace_hfi1_rcvhdr(packet);
1695 hfi1_setup_ib_header(packet);
1697 packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth;
1698 packet->grh = NULL;
1700 if (unlikely(rhf_err_flags(packet->rhf))) {
1701 handle_eflags(packet);
1705 qpnum = ib_bth_get_qpn(packet->ohdr);
1710 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
1714 do_work = hfi1_may_ecn(packet);
1716 do_cnp = (packet->opcode != IB_OPCODE_CNP);
1718 packet, do_cnp);
1724 * tlen is whole packet len so we need to
1727 tlen = packet->tlen;
1728 extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) +
1729 packet->hlen;
1735 skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf);
1751 ibp = rcd_to_iport(packet->rcd);
1757 * specific handlers for each packet type.
1759 static void process_receive_ib(struct hfi1_packet *packet)
1761 if (hfi1_setup_9B_packet(packet))
1764 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1767 trace_hfi1_rcvhdr(packet);
1769 if (unlikely(rhf_err_flags(packet->rhf))) {
1770 handle_eflags(packet);
1774 hfi1_ib_rcv(packet);
1777 static void process_receive_bypass(struct hfi1_packet *packet)
1779 struct hfi1_devdata *dd = packet->rcd->dd;
1781 if (hfi1_setup_bypass_packet(packet))
1784 trace_hfi1_rcvhdr(packet);
1786 if (unlikely(rhf_err_flags(packet->rhf))) {
1787 handle_eflags(packet);
1791 if (hfi1_16B_get_l2(packet->hdr) == 0x2) {
1792 hfi1_16B_rcv(packet);
1799 u64 *flits = packet->ebuf;
1801 if (flits && !(packet->rhf & RHF_LEN_ERR)) {
1804 packet->tlen > sizeof(flits[0]) ?
1813 static void process_receive_error(struct hfi1_packet *packet)
1815 /* KHdrHCRCErr -- KDETH packet with a bad HCRC */
1817 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
1818 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR ||
1819 packet->rhf & RHF_DC_ERR)))
1822 hfi1_setup_ib_header(packet);
1823 handle_eflags(packet);
1825 if (unlikely(rhf_err_flags(packet->rhf)))
1826 dd_dev_err(packet->rcd->dd,
1827 "Unhandled error packet received. Dropping.\n");
1830 static void kdeth_process_expected(struct hfi1_packet *packet)
1832 hfi1_setup_9B_packet(packet);
1833 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1836 if (unlikely(rhf_err_flags(packet->rhf))) {
1837 struct hfi1_ctxtdata *rcd = packet->rcd;
1839 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
1843 hfi1_kdeth_expected_rcv(packet);
1846 static void kdeth_process_eager(struct hfi1_packet *packet)
1848 hfi1_setup_9B_packet(packet);
1849 if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1852 trace_hfi1_rcvhdr(packet);
1853 if (unlikely(rhf_err_flags(packet->rhf))) {
1854 struct hfi1_ctxtdata *rcd = packet->rcd;
1856 show_eflags_errs(packet);
1857 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
1861 hfi1_kdeth_eager_rcv(packet);
1864 static void process_receive_invalid(struct hfi1_packet *packet)
1866 dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
1867 rhf_rcv_type(packet->rhf));
1874 struct hfi1_packet packet;
1889 init_packet(rcd, &packet);
1890 init_ps_mdata(&mdata, &packet);
1911 packet.hdr = hfi1_get_msgheader(rcd, rhf_addr);
1912 hdr = packet.hdr;
1917 packet.ohdr = &hdr->u.oth;
1919 packet.ohdr = &hdr->u.l.oth;
1923 opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24);
1924 qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK;
1925 psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2]));