1/* 2 * Copyright(c) 2015-2020 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48#include <linux/spinlock.h> 49#include <linux/pci.h> 50#include <linux/io.h> 51#include <linux/delay.h> 52#include <linux/netdevice.h> 53#include <linux/vmalloc.h> 54#include <linux/module.h> 55#include <linux/prefetch.h> 56#include <rdma/ib_verbs.h> 57#include <linux/etherdevice.h> 58 59#include "hfi.h" 60#include "trace.h" 61#include "qp.h" 62#include "sdma.h" 63#include "debugfs.h" 64#include "vnic.h" 65#include "fault.h" 66 67#include "ipoib.h" 68#include "netdev.h" 69 70#undef pr_fmt 71#define pr_fmt(fmt) DRIVER_NAME ": " fmt 72 73/* 74 * The size has to be longer than this string, so we can append 75 * board/chip information to it in the initialization code. 76 */ 77const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n"; 78 79DEFINE_MUTEX(hfi1_mutex); /* general driver use */ 80 81unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 82module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO); 83MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify( 84 HFI1_DEFAULT_MAX_MTU)); 85 86unsigned int hfi1_cu = 1; 87module_param_named(cu, hfi1_cu, uint, S_IRUGO); 88MODULE_PARM_DESC(cu, "Credit return units"); 89 90unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT; 91static int hfi1_caps_set(const char *val, const struct kernel_param *kp); 92static int hfi1_caps_get(char *buffer, const struct kernel_param *kp); 93static const struct kernel_param_ops cap_ops = { 94 .set = hfi1_caps_set, 95 .get = hfi1_caps_get 96}; 97module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO); 98MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features"); 99 100MODULE_LICENSE("Dual BSD/GPL"); 101MODULE_DESCRIPTION("Intel Omni-Path Architecture driver"); 102 103/* 104 * MAX_PKT_RCV is the max # if packets processed per receive interrupt. 105 */ 106#define MAX_PKT_RECV 64 107/* 108 * MAX_PKT_THREAD_RCV is the max # of packets processed before 109 * the qp_wait_list queue is flushed. 110 */ 111#define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4) 112#define EGR_HEAD_UPDATE_THRESHOLD 16 113 114struct hfi1_ib_stats hfi1_stats; 115 116static int hfi1_caps_set(const char *val, const struct kernel_param *kp) 117{ 118 int ret = 0; 119 unsigned long *cap_mask_ptr = (unsigned long *)kp->arg, 120 cap_mask = *cap_mask_ptr, value, diff, 121 write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) | 122 HFI1_CAP_WRITABLE_MASK); 123 124 ret = kstrtoul(val, 0, &value); 125 if (ret) { 126 pr_warn("Invalid module parameter value for 'cap_mask'\n"); 127 goto done; 128 } 129 /* Get the changed bits (except the locked bit) */ 130 diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK); 131 132 /* Remove any bits that are not allowed to change after driver load */ 133 if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) { 134 pr_warn("Ignoring non-writable capability bits %#lx\n", 135 diff & ~write_mask); 136 diff &= write_mask; 137 } 138 139 /* Mask off any reserved bits */ 140 diff &= ~HFI1_CAP_RESERVED_MASK; 141 /* Clear any previously set and changing bits */ 142 cap_mask &= ~diff; 143 /* Update the bits with the new capability */ 144 cap_mask |= (value & diff); 145 /* Check for any kernel/user restrictions */ 146 diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^ 147 ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT); 148 cap_mask &= ~diff; 149 /* Set the bitmask to the final set */ 150 *cap_mask_ptr = cap_mask; 151done: 152 return ret; 153} 154 155static int hfi1_caps_get(char *buffer, const struct kernel_param *kp) 156{ 157 unsigned long cap_mask = *(unsigned long *)kp->arg; 158 159 cap_mask &= ~HFI1_CAP_LOCKED_SMASK; 160 cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT); 161 162 return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask); 163} 164 165struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi) 166{ 167 struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi); 168 struct hfi1_devdata *dd = container_of(ibdev, 169 struct hfi1_devdata, verbs_dev); 170 return dd->pcidev; 171} 172 173/* 174 * Return count of units with at least one port ACTIVE. 175 */ 176int hfi1_count_active_units(void) 177{ 178 struct hfi1_devdata *dd; 179 struct hfi1_pportdata *ppd; 180 unsigned long index, flags; 181 int pidx, nunits_active = 0; 182 183 xa_lock_irqsave(&hfi1_dev_table, flags); 184 xa_for_each(&hfi1_dev_table, index, dd) { 185 if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1) 186 continue; 187 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 188 ppd = dd->pport + pidx; 189 if (ppd->lid && ppd->linkup) { 190 nunits_active++; 191 break; 192 } 193 } 194 } 195 xa_unlock_irqrestore(&hfi1_dev_table, flags); 196 return nunits_active; 197} 198 199/* 200 * Get address of eager buffer from it's index (allocated in chunks, not 201 * contiguous). 202 */ 203static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, 204 u8 *update) 205{ 206 u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf); 207 208 *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset; 209 return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) + 210 (offset * RCV_BUF_BLOCK_SIZE)); 211} 212 213static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd, 214 __le32 *rhf_addr) 215{ 216 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr)); 217 218 return (void *)(rhf_addr - rcd->rhf_offset + offset); 219} 220 221static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd, 222 __le32 *rhf_addr) 223{ 224 return (struct ib_header *)hfi1_get_header(rcd, rhf_addr); 225} 226 227static inline struct hfi1_16b_header 228 *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd, 229 __le32 *rhf_addr) 230{ 231 return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr); 232} 233 234/* 235 * Validate and encode the a given RcvArray Buffer size. 236 * The function will check whether the given size falls within 237 * allowed size ranges for the respective type and, optionally, 238 * return the proper encoding. 239 */ 240int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded) 241{ 242 if (unlikely(!PAGE_ALIGNED(size))) 243 return 0; 244 if (unlikely(size < MIN_EAGER_BUFFER)) 245 return 0; 246 if (size > 247 (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER)) 248 return 0; 249 if (encoded) 250 *encoded = ilog2(size / PAGE_SIZE) + 1; 251 return 1; 252} 253 254static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, 255 struct hfi1_packet *packet) 256{ 257 struct ib_header *rhdr = packet->hdr; 258 u32 rte = rhf_rcv_type_err(packet->rhf); 259 u32 mlid_base; 260 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 261 struct hfi1_devdata *dd = ppd->dd; 262 struct hfi1_ibdev *verbs_dev = &dd->verbs_dev; 263 struct rvt_dev_info *rdi = &verbs_dev->rdi; 264 265 if ((packet->rhf & RHF_DC_ERR) && 266 hfi1_dbg_fault_suppress_err(verbs_dev)) 267 return; 268 269 if (packet->rhf & RHF_ICRC_ERR) 270 return; 271 272 if (packet->etype == RHF_RCV_TYPE_BYPASS) { 273 goto drop; 274 } else { 275 u8 lnh = ib_get_lnh(rhdr); 276 277 mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE); 278 if (lnh == HFI1_LRH_BTH) { 279 packet->ohdr = &rhdr->u.oth; 280 } else if (lnh == HFI1_LRH_GRH) { 281 packet->ohdr = &rhdr->u.l.oth; 282 packet->grh = &rhdr->u.l.grh; 283 } else { 284 goto drop; 285 } 286 } 287 288 if (packet->rhf & RHF_TID_ERR) { 289 /* For TIDERR and RC QPs preemptively schedule a NAK */ 290 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 291 u32 dlid = ib_get_dlid(rhdr); 292 u32 qp_num; 293 294 /* Sanity check packet */ 295 if (tlen < 24) 296 goto drop; 297 298 /* Check for GRH */ 299 if (packet->grh) { 300 u32 vtf; 301 struct ib_grh *grh = packet->grh; 302 303 if (grh->next_hdr != IB_GRH_NEXT_HDR) 304 goto drop; 305 vtf = be32_to_cpu(grh->version_tclass_flow); 306 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 307 goto drop; 308 } 309 310 /* Get the destination QP number. */ 311 qp_num = ib_bth_get_qpn(packet->ohdr); 312 if (dlid < mlid_base) { 313 struct rvt_qp *qp; 314 unsigned long flags; 315 316 rcu_read_lock(); 317 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); 318 if (!qp) { 319 rcu_read_unlock(); 320 goto drop; 321 } 322 323 /* 324 * Handle only RC QPs - for other QP types drop error 325 * packet. 326 */ 327 spin_lock_irqsave(&qp->r_lock, flags); 328 329 /* Check for valid receive state. */ 330 if (!(ib_rvt_state_ops[qp->state] & 331 RVT_PROCESS_RECV_OK)) { 332 ibp->rvp.n_pkt_drops++; 333 } 334 335 switch (qp->ibqp.qp_type) { 336 case IB_QPT_RC: 337 hfi1_rc_hdrerr(rcd, packet, qp); 338 break; 339 default: 340 /* For now don't handle any other QP types */ 341 break; 342 } 343 344 spin_unlock_irqrestore(&qp->r_lock, flags); 345 rcu_read_unlock(); 346 } /* Unicast QP */ 347 } /* Valid packet with TIDErr */ 348 349 /* handle "RcvTypeErr" flags */ 350 switch (rte) { 351 case RHF_RTE_ERROR_OP_CODE_ERR: 352 { 353 void *ebuf = NULL; 354 u8 opcode; 355 356 if (rhf_use_egr_bfr(packet->rhf)) 357 ebuf = packet->ebuf; 358 359 if (!ebuf) 360 goto drop; /* this should never happen */ 361 362 opcode = ib_bth_get_opcode(packet->ohdr); 363 if (opcode == IB_OPCODE_CNP) { 364 /* 365 * Only in pre-B0 h/w is the CNP_OPCODE handled 366 * via this code path. 367 */ 368 struct rvt_qp *qp = NULL; 369 u32 lqpn, rqpn; 370 u16 rlid; 371 u8 svc_type, sl, sc5; 372 373 sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf); 374 sl = ibp->sc_to_sl[sc5]; 375 376 lqpn = ib_bth_get_qpn(packet->ohdr); 377 rcu_read_lock(); 378 qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn); 379 if (!qp) { 380 rcu_read_unlock(); 381 goto drop; 382 } 383 384 switch (qp->ibqp.qp_type) { 385 case IB_QPT_UD: 386 rlid = 0; 387 rqpn = 0; 388 svc_type = IB_CC_SVCTYPE_UD; 389 break; 390 case IB_QPT_UC: 391 rlid = ib_get_slid(rhdr); 392 rqpn = qp->remote_qpn; 393 svc_type = IB_CC_SVCTYPE_UC; 394 break; 395 default: 396 rcu_read_unlock(); 397 goto drop; 398 } 399 400 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 401 rcu_read_unlock(); 402 } 403 404 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK; 405 break; 406 } 407 default: 408 break; 409 } 410 411drop: 412 return; 413} 414 415static inline void init_packet(struct hfi1_ctxtdata *rcd, 416 struct hfi1_packet *packet) 417{ 418 packet->rsize = get_hdrqentsize(rcd); /* words */ 419 packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */ 420 packet->rcd = rcd; 421 packet->updegr = 0; 422 packet->etail = -1; 423 packet->rhf_addr = get_rhf_addr(rcd); 424 packet->rhf = rhf_to_cpu(packet->rhf_addr); 425 packet->rhqoff = hfi1_rcd_head(rcd); 426 packet->numpkt = 0; 427} 428 429/* We support only two types - 9B and 16B for now */ 430static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = { 431 [HFI1_PKT_TYPE_9B] = &return_cnp, 432 [HFI1_PKT_TYPE_16B] = &return_cnp_16B 433}; 434 435/** 436 * hfi1_process_ecn_slowpath - Process FECN or BECN bits 437 * @qp: The packet's destination QP 438 * @pkt: The packet itself. 439 * @prescan: Is the caller the RXQ prescan 440 * 441 * Process the packet's FECN or BECN bits. By now, the packet 442 * has already been evaluated whether processing of those bit should 443 * be done. 444 * The significance of the @prescan argument is that if the caller 445 * is the RXQ prescan, a CNP will be send out instead of waiting for the 446 * normal packet processing to send an ACK with BECN set (or a CNP). 447 */ 448bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, 449 bool prescan) 450{ 451 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 452 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 453 struct ib_other_headers *ohdr = pkt->ohdr; 454 struct ib_grh *grh = pkt->grh; 455 u32 rqpn = 0; 456 u16 pkey; 457 u32 rlid, slid, dlid = 0; 458 u8 hdr_type, sc, svc_type, opcode; 459 bool is_mcast = false, ignore_fecn = false, do_cnp = false, 460 fecn, becn; 461 462 /* can be called from prescan */ 463 if (pkt->etype == RHF_RCV_TYPE_BYPASS) { 464 pkey = hfi1_16B_get_pkey(pkt->hdr); 465 sc = hfi1_16B_get_sc(pkt->hdr); 466 dlid = hfi1_16B_get_dlid(pkt->hdr); 467 slid = hfi1_16B_get_slid(pkt->hdr); 468 is_mcast = hfi1_is_16B_mcast(dlid); 469 opcode = ib_bth_get_opcode(ohdr); 470 hdr_type = HFI1_PKT_TYPE_16B; 471 fecn = hfi1_16B_get_fecn(pkt->hdr); 472 becn = hfi1_16B_get_becn(pkt->hdr); 473 } else { 474 pkey = ib_bth_get_pkey(ohdr); 475 sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf); 476 dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) : 477 ppd->lid; 478 slid = ib_get_slid(pkt->hdr); 479 is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && 480 (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); 481 opcode = ib_bth_get_opcode(ohdr); 482 hdr_type = HFI1_PKT_TYPE_9B; 483 fecn = ib_bth_get_fecn(ohdr); 484 becn = ib_bth_get_becn(ohdr); 485 } 486 487 switch (qp->ibqp.qp_type) { 488 case IB_QPT_UD: 489 rlid = slid; 490 rqpn = ib_get_sqpn(pkt->ohdr); 491 svc_type = IB_CC_SVCTYPE_UD; 492 break; 493 case IB_QPT_SMI: 494 case IB_QPT_GSI: 495 rlid = slid; 496 rqpn = ib_get_sqpn(pkt->ohdr); 497 svc_type = IB_CC_SVCTYPE_UD; 498 break; 499 case IB_QPT_UC: 500 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 501 rqpn = qp->remote_qpn; 502 svc_type = IB_CC_SVCTYPE_UC; 503 break; 504 case IB_QPT_RC: 505 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 506 rqpn = qp->remote_qpn; 507 svc_type = IB_CC_SVCTYPE_RC; 508 break; 509 default: 510 return false; 511 } 512 513 ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) || 514 (opcode == IB_OPCODE_RC_ACKNOWLEDGE); 515 /* 516 * ACKNOWLEDGE packets do not get a CNP but this will be 517 * guarded by ignore_fecn above. 518 */ 519 do_cnp = prescan || 520 (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST && 521 opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) || 522 opcode == TID_OP(READ_RESP) || 523 opcode == TID_OP(ACK); 524 525 /* Call appropriate CNP handler */ 526 if (!ignore_fecn && do_cnp && fecn) 527 hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey, 528 dlid, rlid, sc, grh); 529 530 if (becn) { 531 u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; 532 u8 sl = ibp->sc_to_sl[sc]; 533 534 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 535 } 536 return !ignore_fecn && fecn; 537} 538 539struct ps_mdata { 540 struct hfi1_ctxtdata *rcd; 541 u32 rsize; 542 u32 maxcnt; 543 u32 ps_head; 544 u32 ps_tail; 545 u32 ps_seq; 546}; 547 548static inline void init_ps_mdata(struct ps_mdata *mdata, 549 struct hfi1_packet *packet) 550{ 551 struct hfi1_ctxtdata *rcd = packet->rcd; 552 553 mdata->rcd = rcd; 554 mdata->rsize = packet->rsize; 555 mdata->maxcnt = packet->maxcnt; 556 mdata->ps_head = packet->rhqoff; 557 558 if (get_dma_rtail_setting(rcd)) { 559 mdata->ps_tail = get_rcvhdrtail(rcd); 560 if (rcd->ctxt == HFI1_CTRL_CTXT) 561 mdata->ps_seq = hfi1_seq_cnt(rcd); 562 else 563 mdata->ps_seq = 0; /* not used with DMA_RTAIL */ 564 } else { 565 mdata->ps_tail = 0; /* used only with DMA_RTAIL*/ 566 mdata->ps_seq = hfi1_seq_cnt(rcd); 567 } 568} 569 570static inline int ps_done(struct ps_mdata *mdata, u64 rhf, 571 struct hfi1_ctxtdata *rcd) 572{ 573 if (get_dma_rtail_setting(rcd)) 574 return mdata->ps_head == mdata->ps_tail; 575 return mdata->ps_seq != rhf_rcv_seq(rhf); 576} 577 578static inline int ps_skip(struct ps_mdata *mdata, u64 rhf, 579 struct hfi1_ctxtdata *rcd) 580{ 581 /* 582 * Control context can potentially receive an invalid rhf. 583 * Drop such packets. 584 */ 585 if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail)) 586 return mdata->ps_seq != rhf_rcv_seq(rhf); 587 588 return 0; 589} 590 591static inline void update_ps_mdata(struct ps_mdata *mdata, 592 struct hfi1_ctxtdata *rcd) 593{ 594 mdata->ps_head += mdata->rsize; 595 if (mdata->ps_head >= mdata->maxcnt) 596 mdata->ps_head = 0; 597 598 /* Control context must do seq counting */ 599 if (!get_dma_rtail_setting(rcd) || 600 rcd->ctxt == HFI1_CTRL_CTXT) 601 mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq); 602} 603 604/* 605 * prescan_rxq - search through the receive queue looking for packets 606 * containing Excplicit Congestion Notifications (FECNs, or BECNs). 607 * When an ECN is found, process the Congestion Notification, and toggle 608 * it off. 609 * This is declared as a macro to allow quick checking of the port to avoid 610 * the overhead of a function call if not enabled. 611 */ 612#define prescan_rxq(rcd, packet) \ 613 do { \ 614 if (rcd->ppd->cc_prescan) \ 615 __prescan_rxq(packet); \ 616 } while (0) 617static void __prescan_rxq(struct hfi1_packet *packet) 618{ 619 struct hfi1_ctxtdata *rcd = packet->rcd; 620 struct ps_mdata mdata; 621 622 init_ps_mdata(&mdata, packet); 623 624 while (1) { 625 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 626 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + 627 packet->rcd->rhf_offset; 628 struct rvt_qp *qp; 629 struct ib_header *hdr; 630 struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi; 631 u64 rhf = rhf_to_cpu(rhf_addr); 632 u32 etype = rhf_rcv_type(rhf), qpn, bth1; 633 u8 lnh; 634 635 if (ps_done(&mdata, rhf, rcd)) 636 break; 637 638 if (ps_skip(&mdata, rhf, rcd)) 639 goto next; 640 641 if (etype != RHF_RCV_TYPE_IB) 642 goto next; 643 644 packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr); 645 hdr = packet->hdr; 646 lnh = ib_get_lnh(hdr); 647 648 if (lnh == HFI1_LRH_BTH) { 649 packet->ohdr = &hdr->u.oth; 650 packet->grh = NULL; 651 } else if (lnh == HFI1_LRH_GRH) { 652 packet->ohdr = &hdr->u.l.oth; 653 packet->grh = &hdr->u.l.grh; 654 } else { 655 goto next; /* just in case */ 656 } 657 658 if (!hfi1_may_ecn(packet)) 659 goto next; 660 661 bth1 = be32_to_cpu(packet->ohdr->bth[1]); 662 qpn = bth1 & RVT_QPN_MASK; 663 rcu_read_lock(); 664 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn); 665 666 if (!qp) { 667 rcu_read_unlock(); 668 goto next; 669 } 670 671 hfi1_process_ecn_slowpath(qp, packet, true); 672 rcu_read_unlock(); 673 674 /* turn off BECN, FECN */ 675 bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK); 676 packet->ohdr->bth[1] = cpu_to_be32(bth1); 677next: 678 update_ps_mdata(&mdata, rcd); 679 } 680} 681 682static void process_rcv_qp_work(struct hfi1_packet *packet) 683{ 684 struct rvt_qp *qp, *nqp; 685 struct hfi1_ctxtdata *rcd = packet->rcd; 686 687 /* 688 * Iterate over all QPs waiting to respond. 689 * The list won't change since the IRQ is only run on one CPU. 690 */ 691 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { 692 list_del_init(&qp->rspwait); 693 if (qp->r_flags & RVT_R_RSP_NAK) { 694 qp->r_flags &= ~RVT_R_RSP_NAK; 695 packet->qp = qp; 696 hfi1_send_rc_ack(packet, 0); 697 } 698 if (qp->r_flags & RVT_R_RSP_SEND) { 699 unsigned long flags; 700 701 qp->r_flags &= ~RVT_R_RSP_SEND; 702 spin_lock_irqsave(&qp->s_lock, flags); 703 if (ib_rvt_state_ops[qp->state] & 704 RVT_PROCESS_OR_FLUSH_SEND) 705 hfi1_schedule_send(qp); 706 spin_unlock_irqrestore(&qp->s_lock, flags); 707 } 708 rvt_put_qp(qp); 709 } 710} 711 712static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread) 713{ 714 if (thread) { 715 if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0) 716 /* allow defered processing */ 717 process_rcv_qp_work(packet); 718 cond_resched(); 719 return RCV_PKT_OK; 720 } else { 721 this_cpu_inc(*packet->rcd->dd->rcv_limit); 722 return RCV_PKT_LIMIT; 723 } 724} 725 726static inline int check_max_packet(struct hfi1_packet *packet, int thread) 727{ 728 int ret = RCV_PKT_OK; 729 730 if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) 731 ret = max_packet_exceeded(packet, thread); 732 return ret; 733} 734 735static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread) 736{ 737 int ret; 738 739 packet->rcd->dd->ctx0_seq_drop++; 740 /* Set up for the next packet */ 741 packet->rhqoff += packet->rsize; 742 if (packet->rhqoff >= packet->maxcnt) 743 packet->rhqoff = 0; 744 745 packet->numpkt++; 746 ret = check_max_packet(packet, thread); 747 748 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 749 packet->rcd->rhf_offset; 750 packet->rhf = rhf_to_cpu(packet->rhf_addr); 751 752 return ret; 753} 754 755static void process_rcv_packet_napi(struct hfi1_packet *packet) 756{ 757 packet->etype = rhf_rcv_type(packet->rhf); 758 759 /* total length */ 760 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 761 /* retrieve eager buffer details */ 762 packet->etail = rhf_egr_index(packet->rhf); 763 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, 764 &packet->updegr); 765 /* 766 * Prefetch the contents of the eager buffer. It is 767 * OK to send a negative length to prefetch_range(). 768 * The +2 is the size of the RHF. 769 */ 770 prefetch_range(packet->ebuf, 771 packet->tlen - ((packet->rcd->rcvhdrqentsize - 772 (rhf_hdrq_offset(packet->rhf) 773 + 2)) * 4)); 774 775 packet->rcd->rhf_rcv_function_map[packet->etype](packet); 776 packet->numpkt++; 777 778 /* Set up for the next packet */ 779 packet->rhqoff += packet->rsize; 780 if (packet->rhqoff >= packet->maxcnt) 781 packet->rhqoff = 0; 782 783 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 784 packet->rcd->rhf_offset; 785 packet->rhf = rhf_to_cpu(packet->rhf_addr); 786} 787 788static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) 789{ 790 int ret; 791 792 packet->etype = rhf_rcv_type(packet->rhf); 793 794 /* total length */ 795 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 796 /* retrieve eager buffer details */ 797 packet->ebuf = NULL; 798 if (rhf_use_egr_bfr(packet->rhf)) { 799 packet->etail = rhf_egr_index(packet->rhf); 800 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, 801 &packet->updegr); 802 /* 803 * Prefetch the contents of the eager buffer. It is 804 * OK to send a negative length to prefetch_range(). 805 * The +2 is the size of the RHF. 806 */ 807 prefetch_range(packet->ebuf, 808 packet->tlen - ((get_hdrqentsize(packet->rcd) - 809 (rhf_hdrq_offset(packet->rhf) 810 + 2)) * 4)); 811 } 812 813 /* 814 * Call a type specific handler for the packet. We 815 * should be able to trust that etype won't be beyond 816 * the range of valid indexes. If so something is really 817 * wrong and we can probably just let things come 818 * crashing down. There is no need to eat another 819 * comparison in this performance critical code. 820 */ 821 packet->rcd->rhf_rcv_function_map[packet->etype](packet); 822 packet->numpkt++; 823 824 /* Set up for the next packet */ 825 packet->rhqoff += packet->rsize; 826 if (packet->rhqoff >= packet->maxcnt) 827 packet->rhqoff = 0; 828 829 ret = check_max_packet(packet, thread); 830 831 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 832 packet->rcd->rhf_offset; 833 packet->rhf = rhf_to_cpu(packet->rhf_addr); 834 835 return ret; 836} 837 838static inline void process_rcv_update(int last, struct hfi1_packet *packet) 839{ 840 /* 841 * Update head regs etc., every 16 packets, if not last pkt, 842 * to help prevent rcvhdrq overflows, when many packets 843 * are processed and queue is nearly full. 844 * Don't request an interrupt for intermediate updates. 845 */ 846 if (!last && !(packet->numpkt & 0xf)) { 847 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr, 848 packet->etail, 0, 0); 849 packet->updegr = 0; 850 } 851 packet->grh = NULL; 852} 853 854static inline void finish_packet(struct hfi1_packet *packet) 855{ 856 /* 857 * Nothing we need to free for the packet. 858 * 859 * The only thing we need to do is a final update and call for an 860 * interrupt 861 */ 862 update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr, 863 packet->etail, rcv_intr_dynamic, packet->numpkt); 864} 865 866/* 867 * handle_receive_interrupt_napi_fp - receive a packet 868 * @rcd: the context 869 * @budget: polling budget 870 * 871 * Called from interrupt handler for receive interrupt. 872 * This is the fast path interrupt handler 873 * when executing napi soft irq environment. 874 */ 875int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget) 876{ 877 struct hfi1_packet packet; 878 879 init_packet(rcd, &packet); 880 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 881 goto bail; 882 883 while (packet.numpkt < budget) { 884 process_rcv_packet_napi(&packet); 885 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 886 break; 887 888 process_rcv_update(0, &packet); 889 } 890 hfi1_set_rcd_head(rcd, packet.rhqoff); 891bail: 892 finish_packet(&packet); 893 return packet.numpkt; 894} 895 896/* 897 * Handle receive interrupts when using the no dma rtail option. 898 */ 899int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) 900{ 901 int last = RCV_PKT_OK; 902 struct hfi1_packet packet; 903 904 init_packet(rcd, &packet); 905 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { 906 last = RCV_PKT_DONE; 907 goto bail; 908 } 909 910 prescan_rxq(rcd, &packet); 911 912 while (last == RCV_PKT_OK) { 913 last = process_rcv_packet(&packet, thread); 914 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 915 last = RCV_PKT_DONE; 916 process_rcv_update(last, &packet); 917 } 918 process_rcv_qp_work(&packet); 919 hfi1_set_rcd_head(rcd, packet.rhqoff); 920bail: 921 finish_packet(&packet); 922 return last; 923} 924 925int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) 926{ 927 u32 hdrqtail; 928 int last = RCV_PKT_OK; 929 struct hfi1_packet packet; 930 931 init_packet(rcd, &packet); 932 hdrqtail = get_rcvhdrtail(rcd); 933 if (packet.rhqoff == hdrqtail) { 934 last = RCV_PKT_DONE; 935 goto bail; 936 } 937 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 938 939 prescan_rxq(rcd, &packet); 940 941 while (last == RCV_PKT_OK) { 942 last = process_rcv_packet(&packet, thread); 943 if (packet.rhqoff == hdrqtail) 944 last = RCV_PKT_DONE; 945 process_rcv_update(last, &packet); 946 } 947 process_rcv_qp_work(&packet); 948 hfi1_set_rcd_head(rcd, packet.rhqoff); 949bail: 950 finish_packet(&packet); 951 return last; 952} 953 954static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 955{ 956 u16 i; 957 958 /* 959 * For dynamically allocated kernel contexts (like vnic) switch 960 * interrupt handler only for that context. Otherwise, switch 961 * interrupt handler for all statically allocated kernel contexts. 962 */ 963 if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) { 964 hfi1_rcd_get(rcd); 965 hfi1_set_fast(rcd); 966 hfi1_rcd_put(rcd); 967 return; 968 } 969 970 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { 971 rcd = hfi1_rcd_get_by_index(dd, i); 972 if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)) 973 hfi1_set_fast(rcd); 974 hfi1_rcd_put(rcd); 975 } 976} 977 978void set_all_slowpath(struct hfi1_devdata *dd) 979{ 980 struct hfi1_ctxtdata *rcd; 981 u16 i; 982 983 /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */ 984 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { 985 rcd = hfi1_rcd_get_by_index(dd, i); 986 if (!rcd) 987 continue; 988 if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic) 989 rcd->do_interrupt = rcd->slow_handler; 990 991 hfi1_rcd_put(rcd); 992 } 993} 994 995static bool __set_armed_to_active(struct hfi1_packet *packet) 996{ 997 u8 etype = rhf_rcv_type(packet->rhf); 998 u8 sc = SC15_PACKET; 999 1000 if (etype == RHF_RCV_TYPE_IB) { 1001 struct ib_header *hdr = hfi1_get_msgheader(packet->rcd, 1002 packet->rhf_addr); 1003 sc = hfi1_9B_get_sc5(hdr, packet->rhf); 1004 } else if (etype == RHF_RCV_TYPE_BYPASS) { 1005 struct hfi1_16b_header *hdr = hfi1_get_16B_header( 1006 packet->rcd, 1007 packet->rhf_addr); 1008 sc = hfi1_16B_get_sc(hdr); 1009 } 1010 if (sc != SC15_PACKET) { 1011 int hwstate = driver_lstate(packet->rcd->ppd); 1012 struct work_struct *lsaw = 1013 &packet->rcd->ppd->linkstate_active_work; 1014 1015 if (hwstate != IB_PORT_ACTIVE) { 1016 dd_dev_info(packet->rcd->dd, 1017 "Unexpected link state %s\n", 1018 opa_lstate_name(hwstate)); 1019 return false; 1020 } 1021 1022 queue_work(packet->rcd->ppd->link_wq, lsaw); 1023 return true; 1024 } 1025 return false; 1026} 1027 1028/** 1029 * armed to active - the fast path for armed to active 1030 * @packet: the packet structure 1031 * 1032 * Return true if packet processing needs to bail. 1033 */ 1034static bool set_armed_to_active(struct hfi1_packet *packet) 1035{ 1036 if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED)) 1037 return false; 1038 return __set_armed_to_active(packet); 1039} 1040 1041/* 1042 * handle_receive_interrupt - receive a packet 1043 * @rcd: the context 1044 * 1045 * Called from interrupt handler for errors or receive interrupt. 1046 * This is the slow path interrupt handler. 1047 */ 1048int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) 1049{ 1050 struct hfi1_devdata *dd = rcd->dd; 1051 u32 hdrqtail; 1052 int needset, last = RCV_PKT_OK; 1053 struct hfi1_packet packet; 1054 int skip_pkt = 0; 1055 1056 if (!rcd->rcvhdrq) 1057 return RCV_PKT_OK; 1058 /* Control context will always use the slow path interrupt handler */ 1059 needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1; 1060 1061 init_packet(rcd, &packet); 1062 1063 if (!get_dma_rtail_setting(rcd)) { 1064 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { 1065 last = RCV_PKT_DONE; 1066 goto bail; 1067 } 1068 hdrqtail = 0; 1069 } else { 1070 hdrqtail = get_rcvhdrtail(rcd); 1071 if (packet.rhqoff == hdrqtail) { 1072 last = RCV_PKT_DONE; 1073 goto bail; 1074 } 1075 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 1076 1077 /* 1078 * Control context can potentially receive an invalid 1079 * rhf. Drop such packets. 1080 */ 1081 if (rcd->ctxt == HFI1_CTRL_CTXT) 1082 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 1083 skip_pkt = 1; 1084 } 1085 1086 prescan_rxq(rcd, &packet); 1087 1088 while (last == RCV_PKT_OK) { 1089 if (hfi1_need_drop(dd)) { 1090 /* On to the next packet */ 1091 packet.rhqoff += packet.rsize; 1092 packet.rhf_addr = (__le32 *)rcd->rcvhdrq + 1093 packet.rhqoff + 1094 rcd->rhf_offset; 1095 packet.rhf = rhf_to_cpu(packet.rhf_addr); 1096 1097 } else if (skip_pkt) { 1098 last = skip_rcv_packet(&packet, thread); 1099 skip_pkt = 0; 1100 } else { 1101 if (set_armed_to_active(&packet)) 1102 goto bail; 1103 last = process_rcv_packet(&packet, thread); 1104 } 1105 1106 if (!get_dma_rtail_setting(rcd)) { 1107 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 1108 last = RCV_PKT_DONE; 1109 } else { 1110 if (packet.rhqoff == hdrqtail) 1111 last = RCV_PKT_DONE; 1112 /* 1113 * Control context can potentially receive an invalid 1114 * rhf. Drop such packets. 1115 */ 1116 if (rcd->ctxt == HFI1_CTRL_CTXT) { 1117 bool lseq; 1118 1119 lseq = hfi1_seq_incr(rcd, 1120 rhf_rcv_seq(packet.rhf)); 1121 if (!last && lseq) 1122 skip_pkt = 1; 1123 } 1124 } 1125 1126 if (needset) { 1127 needset = false; 1128 set_all_fastpath(dd, rcd); 1129 } 1130 process_rcv_update(last, &packet); 1131 } 1132 1133 process_rcv_qp_work(&packet); 1134 hfi1_set_rcd_head(rcd, packet.rhqoff); 1135 1136bail: 1137 /* 1138 * Always write head at end, and setup rcv interrupt, even 1139 * if no packets were processed. 1140 */ 1141 finish_packet(&packet); 1142 return last; 1143} 1144 1145/* 1146 * handle_receive_interrupt_napi_sp - receive a packet 1147 * @rcd: the context 1148 * @budget: polling budget 1149 * 1150 * Called from interrupt handler for errors or receive interrupt. 1151 * This is the slow path interrupt handler 1152 * when executing napi soft irq environment. 1153 */ 1154int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget) 1155{ 1156 struct hfi1_devdata *dd = rcd->dd; 1157 int last = RCV_PKT_OK; 1158 bool needset = true; 1159 struct hfi1_packet packet; 1160 1161 init_packet(rcd, &packet); 1162 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 1163 goto bail; 1164 1165 while (last != RCV_PKT_DONE && packet.numpkt < budget) { 1166 if (hfi1_need_drop(dd)) { 1167 /* On to the next packet */ 1168 packet.rhqoff += packet.rsize; 1169 packet.rhf_addr = (__le32 *)rcd->rcvhdrq + 1170 packet.rhqoff + 1171 rcd->rhf_offset; 1172 packet.rhf = rhf_to_cpu(packet.rhf_addr); 1173 1174 } else { 1175 if (set_armed_to_active(&packet)) 1176 goto bail; 1177 process_rcv_packet_napi(&packet); 1178 } 1179 1180 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 1181 last = RCV_PKT_DONE; 1182 1183 if (needset) { 1184 needset = false; 1185 set_all_fastpath(dd, rcd); 1186 } 1187 1188 process_rcv_update(last, &packet); 1189 } 1190 1191 hfi1_set_rcd_head(rcd, packet.rhqoff); 1192 1193bail: 1194 /* 1195 * Always write head at end, and setup rcv interrupt, even 1196 * if no packets were processed. 1197 */ 1198 finish_packet(&packet); 1199 return packet.numpkt; 1200} 1201 1202/* 1203 * We may discover in the interrupt that the hardware link state has 1204 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet), 1205 * and we need to update the driver's notion of the link state. We cannot 1206 * run set_link_state from interrupt context, so we queue this function on 1207 * a workqueue. 1208 * 1209 * We delay the regular interrupt processing until after the state changes 1210 * so that the link will be in the correct state by the time any application 1211 * we wake up attempts to send a reply to any message it received. 1212 * (Subsequent receive interrupts may possibly force the wakeup before we 1213 * update the link state.) 1214 * 1215 * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes 1216 * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues, 1217 * so we're safe from use-after-free of the rcd. 1218 */ 1219void receive_interrupt_work(struct work_struct *work) 1220{ 1221 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 1222 linkstate_active_work); 1223 struct hfi1_devdata *dd = ppd->dd; 1224 struct hfi1_ctxtdata *rcd; 1225 u16 i; 1226 1227 /* Received non-SC15 packet implies neighbor_normal */ 1228 ppd->neighbor_normal = 1; 1229 set_link_state(ppd, HLS_UP_ACTIVE); 1230 1231 /* 1232 * Interrupt all statically allocated kernel contexts that could 1233 * have had an interrupt during auto activation. 1234 */ 1235 for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) { 1236 rcd = hfi1_rcd_get_by_index(dd, i); 1237 if (rcd) 1238 force_recv_intr(rcd); 1239 hfi1_rcd_put(rcd); 1240 } 1241} 1242 1243/* 1244 * Convert a given MTU size to the on-wire MAD packet enumeration. 1245 * Return -1 if the size is invalid. 1246 */ 1247int mtu_to_enum(u32 mtu, int default_if_bad) 1248{ 1249 switch (mtu) { 1250 case 0: return OPA_MTU_0; 1251 case 256: return OPA_MTU_256; 1252 case 512: return OPA_MTU_512; 1253 case 1024: return OPA_MTU_1024; 1254 case 2048: return OPA_MTU_2048; 1255 case 4096: return OPA_MTU_4096; 1256 case 8192: return OPA_MTU_8192; 1257 case 10240: return OPA_MTU_10240; 1258 } 1259 return default_if_bad; 1260} 1261 1262u16 enum_to_mtu(int mtu) 1263{ 1264 switch (mtu) { 1265 case OPA_MTU_0: return 0; 1266 case OPA_MTU_256: return 256; 1267 case OPA_MTU_512: return 512; 1268 case OPA_MTU_1024: return 1024; 1269 case OPA_MTU_2048: return 2048; 1270 case OPA_MTU_4096: return 4096; 1271 case OPA_MTU_8192: return 8192; 1272 case OPA_MTU_10240: return 10240; 1273 default: return 0xffff; 1274 } 1275} 1276 1277/* 1278 * set_mtu - set the MTU 1279 * @ppd: the per port data 1280 * 1281 * We can handle "any" incoming size, the issue here is whether we 1282 * need to restrict our outgoing size. We do not deal with what happens 1283 * to programs that are already running when the size changes. 1284 */ 1285int set_mtu(struct hfi1_pportdata *ppd) 1286{ 1287 struct hfi1_devdata *dd = ppd->dd; 1288 int i, drain, ret = 0, is_up = 0; 1289 1290 ppd->ibmtu = 0; 1291 for (i = 0; i < ppd->vls_supported; i++) 1292 if (ppd->ibmtu < dd->vld[i].mtu) 1293 ppd->ibmtu = dd->vld[i].mtu; 1294 ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd); 1295 1296 mutex_lock(&ppd->hls_lock); 1297 if (ppd->host_link_state == HLS_UP_INIT || 1298 ppd->host_link_state == HLS_UP_ARMED || 1299 ppd->host_link_state == HLS_UP_ACTIVE) 1300 is_up = 1; 1301 1302 drain = !is_ax(dd) && is_up; 1303 1304 if (drain) 1305 /* 1306 * MTU is specified per-VL. To ensure that no packet gets 1307 * stuck (due, e.g., to the MTU for the packet's VL being 1308 * reduced), empty the per-VL FIFOs before adjusting MTU. 1309 */ 1310 ret = stop_drain_data_vls(dd); 1311 1312 if (ret) { 1313 dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n", 1314 __func__); 1315 goto err; 1316 } 1317 1318 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0); 1319 1320 if (drain) 1321 open_fill_data_vls(dd); /* reopen all VLs */ 1322 1323err: 1324 mutex_unlock(&ppd->hls_lock); 1325 1326 return ret; 1327} 1328 1329int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc) 1330{ 1331 struct hfi1_devdata *dd = ppd->dd; 1332 1333 ppd->lid = lid; 1334 ppd->lmc = lmc; 1335 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); 1336 1337 dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid); 1338 1339 return 0; 1340} 1341 1342void shutdown_led_override(struct hfi1_pportdata *ppd) 1343{ 1344 struct hfi1_devdata *dd = ppd->dd; 1345 1346 /* 1347 * This pairs with the memory barrier in hfi1_start_led_override to 1348 * ensure that we read the correct state of LED beaconing represented 1349 * by led_override_timer_active 1350 */ 1351 smp_rmb(); 1352 if (atomic_read(&ppd->led_override_timer_active)) { 1353 del_timer_sync(&ppd->led_override_timer); 1354 atomic_set(&ppd->led_override_timer_active, 0); 1355 /* Ensure the atomic_set is visible to all CPUs */ 1356 smp_wmb(); 1357 } 1358 1359 /* Hand control of the LED to the DC for normal operation */ 1360 write_csr(dd, DCC_CFG_LED_CNTRL, 0); 1361} 1362 1363static void run_led_override(struct timer_list *t) 1364{ 1365 struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer); 1366 struct hfi1_devdata *dd = ppd->dd; 1367 unsigned long timeout; 1368 int phase_idx; 1369 1370 if (!(dd->flags & HFI1_INITTED)) 1371 return; 1372 1373 phase_idx = ppd->led_override_phase & 1; 1374 1375 setextled(dd, phase_idx); 1376 1377 timeout = ppd->led_override_vals[phase_idx]; 1378 1379 /* Set up for next phase */ 1380 ppd->led_override_phase = !ppd->led_override_phase; 1381 1382 mod_timer(&ppd->led_override_timer, jiffies + timeout); 1383} 1384 1385/* 1386 * To have the LED blink in a particular pattern, provide timeon and timeoff 1387 * in milliseconds. 1388 * To turn off custom blinking and return to normal operation, use 1389 * shutdown_led_override() 1390 */ 1391void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, 1392 unsigned int timeoff) 1393{ 1394 if (!(ppd->dd->flags & HFI1_INITTED)) 1395 return; 1396 1397 /* Convert to jiffies for direct use in timer */ 1398 ppd->led_override_vals[0] = msecs_to_jiffies(timeoff); 1399 ppd->led_override_vals[1] = msecs_to_jiffies(timeon); 1400 1401 /* Arbitrarily start from LED on phase */ 1402 ppd->led_override_phase = 1; 1403 1404 /* 1405 * If the timer has not already been started, do so. Use a "quick" 1406 * timeout so the handler will be called soon to look at our request. 1407 */ 1408 if (!timer_pending(&ppd->led_override_timer)) { 1409 timer_setup(&ppd->led_override_timer, run_led_override, 0); 1410 ppd->led_override_timer.expires = jiffies + 1; 1411 add_timer(&ppd->led_override_timer); 1412 atomic_set(&ppd->led_override_timer_active, 1); 1413 /* Ensure the atomic_set is visible to all CPUs */ 1414 smp_wmb(); 1415 } 1416} 1417 1418/** 1419 * hfi1_reset_device - reset the chip if possible 1420 * @unit: the device to reset 1421 * 1422 * Whether or not reset is successful, we attempt to re-initialize the chip 1423 * (that is, much like a driver unload/reload). We clear the INITTED flag 1424 * so that the various entry points will fail until we reinitialize. For 1425 * now, we only allow this if no user contexts are open that use chip resources 1426 */ 1427int hfi1_reset_device(int unit) 1428{ 1429 int ret; 1430 struct hfi1_devdata *dd = hfi1_lookup(unit); 1431 struct hfi1_pportdata *ppd; 1432 int pidx; 1433 1434 if (!dd) { 1435 ret = -ENODEV; 1436 goto bail; 1437 } 1438 1439 dd_dev_info(dd, "Reset on unit %u requested\n", unit); 1440 1441 if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) { 1442 dd_dev_info(dd, 1443 "Invalid unit number %u or not initialized or not present\n", 1444 unit); 1445 ret = -ENXIO; 1446 goto bail; 1447 } 1448 1449 /* If there are any user/vnic contexts, we cannot reset */ 1450 mutex_lock(&hfi1_mutex); 1451 if (dd->rcd) 1452 if (hfi1_stats.sps_ctxts) { 1453 mutex_unlock(&hfi1_mutex); 1454 ret = -EBUSY; 1455 goto bail; 1456 } 1457 mutex_unlock(&hfi1_mutex); 1458 1459 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1460 ppd = dd->pport + pidx; 1461 1462 shutdown_led_override(ppd); 1463 } 1464 if (dd->flags & HFI1_HAS_SEND_DMA) 1465 sdma_exit(dd); 1466 1467 hfi1_reset_cpu_counters(dd); 1468 1469 ret = hfi1_init(dd, 1); 1470 1471 if (ret) 1472 dd_dev_err(dd, 1473 "Reinitialize unit %u after reset failed with %d\n", 1474 unit, ret); 1475 else 1476 dd_dev_info(dd, "Reinitialized unit %u after resetting\n", 1477 unit); 1478 1479bail: 1480 return ret; 1481} 1482 1483static inline void hfi1_setup_ib_header(struct hfi1_packet *packet) 1484{ 1485 packet->hdr = (struct hfi1_ib_message_header *) 1486 hfi1_get_msgheader(packet->rcd, 1487 packet->rhf_addr); 1488 packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; 1489} 1490 1491static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet) 1492{ 1493 struct hfi1_pportdata *ppd = packet->rcd->ppd; 1494 1495 /* slid and dlid cannot be 0 */ 1496 if ((!packet->slid) || (!packet->dlid)) 1497 return -EINVAL; 1498 1499 /* Compare port lid with incoming packet dlid */ 1500 if ((!(hfi1_is_16B_mcast(packet->dlid))) && 1501 (packet->dlid != 1502 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) { 1503 if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid) 1504 return -EINVAL; 1505 } 1506 1507 /* No multicast packets with SC15 */ 1508 if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF)) 1509 return -EINVAL; 1510 1511 /* Packets with permissive DLID always on SC15 */ 1512 if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 1513 16B)) && 1514 (packet->sc != 0xF)) 1515 return -EINVAL; 1516 1517 return 0; 1518} 1519 1520static int hfi1_setup_9B_packet(struct hfi1_packet *packet) 1521{ 1522 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 1523 struct ib_header *hdr; 1524 u8 lnh; 1525 1526 hfi1_setup_ib_header(packet); 1527 hdr = packet->hdr; 1528 1529 lnh = ib_get_lnh(hdr); 1530 if (lnh == HFI1_LRH_BTH) { 1531 packet->ohdr = &hdr->u.oth; 1532 packet->grh = NULL; 1533 } else if (lnh == HFI1_LRH_GRH) { 1534 u32 vtf; 1535 1536 packet->ohdr = &hdr->u.l.oth; 1537 packet->grh = &hdr->u.l.grh; 1538 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) 1539 goto drop; 1540 vtf = be32_to_cpu(packet->grh->version_tclass_flow); 1541 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 1542 goto drop; 1543 } else { 1544 goto drop; 1545 } 1546 1547 /* Query commonly used fields from packet header */ 1548 packet->payload = packet->ebuf; 1549 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1550 packet->slid = ib_get_slid(hdr); 1551 packet->dlid = ib_get_dlid(hdr); 1552 if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && 1553 (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE)))) 1554 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - 1555 be16_to_cpu(IB_MULTICAST_LID_BASE); 1556 packet->sl = ib_get_sl(hdr); 1557 packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf); 1558 packet->pad = ib_bth_get_pad(packet->ohdr); 1559 packet->extra_byte = 0; 1560 packet->pkey = ib_bth_get_pkey(packet->ohdr); 1561 packet->migrated = ib_bth_is_migration(packet->ohdr); 1562 1563 return 0; 1564drop: 1565 ibp->rvp.n_pkt_drops++; 1566 return -EINVAL; 1567} 1568 1569static int hfi1_setup_bypass_packet(struct hfi1_packet *packet) 1570{ 1571 /* 1572 * Bypass packets have a different header/payload split 1573 * compared to an IB packet. 1574 * Current split is set such that 16 bytes of the actual 1575 * header is in the header buffer and the remining is in 1576 * the eager buffer. We chose 16 since hfi1 driver only 1577 * supports 16B bypass packets and we will be able to 1578 * receive the entire LRH with such a split. 1579 */ 1580 1581 struct hfi1_ctxtdata *rcd = packet->rcd; 1582 struct hfi1_pportdata *ppd = rcd->ppd; 1583 struct hfi1_ibport *ibp = &ppd->ibport_data; 1584 u8 l4; 1585 1586 packet->hdr = (struct hfi1_16b_header *) 1587 hfi1_get_16B_header(packet->rcd, 1588 packet->rhf_addr); 1589 l4 = hfi1_16B_get_l4(packet->hdr); 1590 if (l4 == OPA_16B_L4_IB_LOCAL) { 1591 packet->ohdr = packet->ebuf; 1592 packet->grh = NULL; 1593 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1594 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); 1595 /* hdr_len_by_opcode already has an IB LRH factored in */ 1596 packet->hlen = hdr_len_by_opcode[packet->opcode] + 1597 (LRH_16B_BYTES - LRH_9B_BYTES); 1598 packet->migrated = opa_bth_is_migration(packet->ohdr); 1599 } else if (l4 == OPA_16B_L4_IB_GLOBAL) { 1600 u32 vtf; 1601 u8 grh_len = sizeof(struct ib_grh); 1602 1603 packet->ohdr = packet->ebuf + grh_len; 1604 packet->grh = packet->ebuf; 1605 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1606 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); 1607 /* hdr_len_by_opcode already has an IB LRH factored in */ 1608 packet->hlen = hdr_len_by_opcode[packet->opcode] + 1609 (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len; 1610 packet->migrated = opa_bth_is_migration(packet->ohdr); 1611 1612 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) 1613 goto drop; 1614 vtf = be32_to_cpu(packet->grh->version_tclass_flow); 1615 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 1616 goto drop; 1617 } else if (l4 == OPA_16B_L4_FM) { 1618 packet->mgmt = packet->ebuf; 1619 packet->ohdr = NULL; 1620 packet->grh = NULL; 1621 packet->opcode = IB_OPCODE_UD_SEND_ONLY; 1622 packet->pad = OPA_16B_L4_FM_PAD; 1623 packet->hlen = OPA_16B_L4_FM_HLEN; 1624 packet->migrated = false; 1625 } else { 1626 goto drop; 1627 } 1628 1629 /* Query commonly used fields from packet header */ 1630 packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES; 1631 packet->slid = hfi1_16B_get_slid(packet->hdr); 1632 packet->dlid = hfi1_16B_get_dlid(packet->hdr); 1633 if (unlikely(hfi1_is_16B_mcast(packet->dlid))) 1634 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - 1635 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 1636 16B); 1637 packet->sc = hfi1_16B_get_sc(packet->hdr); 1638 packet->sl = ibp->sc_to_sl[packet->sc]; 1639 packet->extra_byte = SIZE_OF_LT; 1640 packet->pkey = hfi1_16B_get_pkey(packet->hdr); 1641 1642 if (hfi1_bypass_ingress_pkt_check(packet)) 1643 goto drop; 1644 1645 return 0; 1646drop: 1647 hfi1_cdbg(PKT, "%s: packet dropped\n", __func__); 1648 ibp->rvp.n_pkt_drops++; 1649 return -EINVAL; 1650} 1651 1652static void show_eflags_errs(struct hfi1_packet *packet) 1653{ 1654 struct hfi1_ctxtdata *rcd = packet->rcd; 1655 u32 rte = rhf_rcv_type_err(packet->rhf); 1656 1657 dd_dev_err(rcd->dd, 1658 "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n", 1659 rcd->ctxt, packet->rhf, 1660 packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "", 1661 packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "", 1662 packet->rhf & RHF_DC_ERR ? "dc " : "", 1663 packet->rhf & RHF_TID_ERR ? "tid " : "", 1664 packet->rhf & RHF_LEN_ERR ? "len " : "", 1665 packet->rhf & RHF_ECC_ERR ? "ecc " : "", 1666 packet->rhf & RHF_ICRC_ERR ? "icrc " : "", 1667 rte); 1668} 1669 1670void handle_eflags(struct hfi1_packet *packet) 1671{ 1672 struct hfi1_ctxtdata *rcd = packet->rcd; 1673 1674 rcv_hdrerr(rcd, rcd->ppd, packet); 1675 if (rhf_err_flags(packet->rhf)) 1676 show_eflags_errs(packet); 1677} 1678 1679static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet) 1680{ 1681 struct hfi1_ibport *ibp; 1682 struct net_device *netdev; 1683 struct hfi1_ctxtdata *rcd = packet->rcd; 1684 struct napi_struct *napi = rcd->napi; 1685 struct sk_buff *skb; 1686 struct hfi1_netdev_rxq *rxq = container_of(napi, 1687 struct hfi1_netdev_rxq, napi); 1688 u32 extra_bytes; 1689 u32 tlen, qpnum; 1690 bool do_work, do_cnp; 1691 struct hfi1_ipoib_dev_priv *priv; 1692 1693 trace_hfi1_rcvhdr(packet); 1694 1695 hfi1_setup_ib_header(packet); 1696 1697 packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth; 1698 packet->grh = NULL; 1699 1700 if (unlikely(rhf_err_flags(packet->rhf))) { 1701 handle_eflags(packet); 1702 return; 1703 } 1704 1705 qpnum = ib_bth_get_qpn(packet->ohdr); 1706 netdev = hfi1_netdev_get_data(rcd->dd, qpnum); 1707 if (!netdev) 1708 goto drop_no_nd; 1709 1710 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); 1711 trace_ctxt_rsm_hist(rcd->ctxt); 1712 1713 /* handle congestion notifications */ 1714 do_work = hfi1_may_ecn(packet); 1715 if (unlikely(do_work)) { 1716 do_cnp = (packet->opcode != IB_OPCODE_CNP); 1717 (void)hfi1_process_ecn_slowpath(hfi1_ipoib_priv(netdev)->qp, 1718 packet, do_cnp); 1719 } 1720 1721 /* 1722 * We have split point after last byte of DETH 1723 * lets strip padding and CRC and ICRC. 1724 * tlen is whole packet len so we need to 1725 * subtract header size as well. 1726 */ 1727 tlen = packet->tlen; 1728 extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) + 1729 packet->hlen; 1730 if (unlikely(tlen < extra_bytes)) 1731 goto drop; 1732 1733 tlen -= extra_bytes; 1734 1735 skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf); 1736 if (unlikely(!skb)) 1737 goto drop; 1738 1739 priv = hfi1_ipoib_priv(netdev); 1740 hfi1_ipoib_update_rx_netstats(priv, 1, skb->len); 1741 1742 skb->dev = netdev; 1743 skb->pkt_type = PACKET_HOST; 1744 netif_receive_skb(skb); 1745 1746 return; 1747 1748drop: 1749 ++netdev->stats.rx_dropped; 1750drop_no_nd: 1751 ibp = rcd_to_iport(packet->rcd); 1752 ++ibp->rvp.n_pkt_drops; 1753} 1754 1755/* 1756 * The following functions are called by the interrupt handler. They are type 1757 * specific handlers for each packet type. 1758 */ 1759static void process_receive_ib(struct hfi1_packet *packet) 1760{ 1761 if (hfi1_setup_9B_packet(packet)) 1762 return; 1763 1764 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1765 return; 1766 1767 trace_hfi1_rcvhdr(packet); 1768 1769 if (unlikely(rhf_err_flags(packet->rhf))) { 1770 handle_eflags(packet); 1771 return; 1772 } 1773 1774 hfi1_ib_rcv(packet); 1775} 1776 1777static void process_receive_bypass(struct hfi1_packet *packet) 1778{ 1779 struct hfi1_devdata *dd = packet->rcd->dd; 1780 1781 if (hfi1_setup_bypass_packet(packet)) 1782 return; 1783 1784 trace_hfi1_rcvhdr(packet); 1785 1786 if (unlikely(rhf_err_flags(packet->rhf))) { 1787 handle_eflags(packet); 1788 return; 1789 } 1790 1791 if (hfi1_16B_get_l2(packet->hdr) == 0x2) { 1792 hfi1_16B_rcv(packet); 1793 } else { 1794 dd_dev_err(dd, 1795 "Bypass packets other than 16B are not supported in normal operation. Dropping\n"); 1796 incr_cntr64(&dd->sw_rcv_bypass_packet_errors); 1797 if (!(dd->err_info_rcvport.status_and_code & 1798 OPA_EI_STATUS_SMASK)) { 1799 u64 *flits = packet->ebuf; 1800 1801 if (flits && !(packet->rhf & RHF_LEN_ERR)) { 1802 dd->err_info_rcvport.packet_flit1 = flits[0]; 1803 dd->err_info_rcvport.packet_flit2 = 1804 packet->tlen > sizeof(flits[0]) ? 1805 flits[1] : 0; 1806 } 1807 dd->err_info_rcvport.status_and_code |= 1808 (OPA_EI_STATUS_SMASK | BAD_L2_ERR); 1809 } 1810 } 1811} 1812 1813static void process_receive_error(struct hfi1_packet *packet) 1814{ 1815 /* KHdrHCRCErr -- KDETH packet with a bad HCRC */ 1816 if (unlikely( 1817 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) && 1818 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR || 1819 packet->rhf & RHF_DC_ERR))) 1820 return; 1821 1822 hfi1_setup_ib_header(packet); 1823 handle_eflags(packet); 1824 1825 if (unlikely(rhf_err_flags(packet->rhf))) 1826 dd_dev_err(packet->rcd->dd, 1827 "Unhandled error packet received. Dropping.\n"); 1828} 1829 1830static void kdeth_process_expected(struct hfi1_packet *packet) 1831{ 1832 hfi1_setup_9B_packet(packet); 1833 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1834 return; 1835 1836 if (unlikely(rhf_err_flags(packet->rhf))) { 1837 struct hfi1_ctxtdata *rcd = packet->rcd; 1838 1839 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) 1840 return; 1841 } 1842 1843 hfi1_kdeth_expected_rcv(packet); 1844} 1845 1846static void kdeth_process_eager(struct hfi1_packet *packet) 1847{ 1848 hfi1_setup_9B_packet(packet); 1849 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1850 return; 1851 1852 trace_hfi1_rcvhdr(packet); 1853 if (unlikely(rhf_err_flags(packet->rhf))) { 1854 struct hfi1_ctxtdata *rcd = packet->rcd; 1855 1856 show_eflags_errs(packet); 1857 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) 1858 return; 1859 } 1860 1861 hfi1_kdeth_eager_rcv(packet); 1862} 1863 1864static void process_receive_invalid(struct hfi1_packet *packet) 1865{ 1866 dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", 1867 rhf_rcv_type(packet->rhf)); 1868} 1869 1870#define HFI1_RCVHDR_DUMP_MAX 5 1871 1872void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd) 1873{ 1874 struct hfi1_packet packet; 1875 struct ps_mdata mdata; 1876 int i; 1877 1878 seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu sw head %u\n", 1879 rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd), 1880 get_dma_rtail_setting(rcd) ? 1881 "dma_rtail" : "nodma_rtail", 1882 read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL), 1883 read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS), 1884 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) & 1885 RCV_HDR_HEAD_HEAD_MASK, 1886 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL), 1887 rcd->head); 1888 1889 init_packet(rcd, &packet); 1890 init_ps_mdata(&mdata, &packet); 1891 1892 for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) { 1893 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + 1894 rcd->rhf_offset; 1895 struct ib_header *hdr; 1896 u64 rhf = rhf_to_cpu(rhf_addr); 1897 u32 etype = rhf_rcv_type(rhf), qpn; 1898 u8 opcode; 1899 u32 psn; 1900 u8 lnh; 1901 1902 if (ps_done(&mdata, rhf, rcd)) 1903 break; 1904 1905 if (ps_skip(&mdata, rhf, rcd)) 1906 goto next; 1907 1908 if (etype > RHF_RCV_TYPE_IB) 1909 goto next; 1910 1911 packet.hdr = hfi1_get_msgheader(rcd, rhf_addr); 1912 hdr = packet.hdr; 1913 1914 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 1915 1916 if (lnh == HFI1_LRH_BTH) 1917 packet.ohdr = &hdr->u.oth; 1918 else if (lnh == HFI1_LRH_GRH) 1919 packet.ohdr = &hdr->u.l.oth; 1920 else 1921 goto next; /* just in case */ 1922 1923 opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24); 1924 qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK; 1925 psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2])); 1926 1927 seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n", 1928 mdata.ps_head, opcode, qpn, psn); 1929next: 1930 update_ps_mdata(&mdata, rcd); 1931 } 1932} 1933 1934const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = { 1935 [RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected, 1936 [RHF_RCV_TYPE_EAGER] = kdeth_process_eager, 1937 [RHF_RCV_TYPE_IB] = process_receive_ib, 1938 [RHF_RCV_TYPE_ERROR] = process_receive_error, 1939 [RHF_RCV_TYPE_BYPASS] = process_receive_bypass, 1940 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, 1941 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, 1942 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid, 1943}; 1944 1945const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] = { 1946 [RHF_RCV_TYPE_EXPECTED] = process_receive_invalid, 1947 [RHF_RCV_TYPE_EAGER] = process_receive_invalid, 1948 [RHF_RCV_TYPE_IB] = hfi1_ipoib_ib_rcv, 1949 [RHF_RCV_TYPE_ERROR] = process_receive_error, 1950 [RHF_RCV_TYPE_BYPASS] = hfi1_vnic_bypass_rcv, 1951 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, 1952 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, 1953 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid, 1954}; 1955