1/* 2 * Copyright(c) 2020 Cornelis Networks, Inc. 3 * Copyright(c) 2015-2020 Intel Corporation. 4 * 5 * This file is provided under a dual BSD/GPLv2 license. When using or 6 * redistributing this file, you may do so under either license. 7 * 8 * GPL LICENSE SUMMARY 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * BSD LICENSE 20 * 21 * Redistribution and use in source and binary forms, with or without 22 * modification, are permitted provided that the following conditions 23 * are met: 24 * 25 * - Redistributions of source code must retain the above copyright 26 * notice, this list of conditions and the following disclaimer. 27 * - Redistributions in binary form must reproduce the above copyright 28 * notice, this list of conditions and the following disclaimer in 29 * the documentation and/or other materials provided with the 30 * distribution. 31 * - Neither the name of Intel Corporation nor the names of its 32 * contributors may be used to endorse or promote products derived 33 * from this software without specific prior written permission. 34 * 35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 46 * 47 */ 48#include <linux/poll.h> 49#include <linux/cdev.h> 50#include <linux/vmalloc.h> 51#include <linux/io.h> 52#include <linux/sched/mm.h> 53#include <linux/bitmap.h> 54 55#include <rdma/ib.h> 56 57#include "hfi.h" 58#include "pio.h" 59#include "device.h" 60#include "common.h" 61#include "trace.h" 62#include "mmu_rb.h" 63#include "user_sdma.h" 64#include "user_exp_rcv.h" 65#include "aspm.h" 66 67#undef pr_fmt 68#define pr_fmt(fmt) DRIVER_NAME ": " fmt 69 70#define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */ 71 72/* 73 * File operation functions 74 */ 75static int hfi1_file_open(struct inode *inode, struct file *fp); 76static int hfi1_file_close(struct inode *inode, struct file *fp); 77static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from); 78static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt); 79static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma); 80 81static u64 kvirt_to_phys(void *addr); 82static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len); 83static void init_subctxts(struct hfi1_ctxtdata *uctxt, 84 const struct hfi1_user_info *uinfo); 85static int init_user_ctxt(struct hfi1_filedata *fd, 86 struct hfi1_ctxtdata *uctxt); 87static void user_init(struct hfi1_ctxtdata *uctxt); 88static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len); 89static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len); 90static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg, 91 u32 len); 92static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg, 93 u32 len); 94static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg, 95 u32 len); 96static int setup_base_ctxt(struct hfi1_filedata *fd, 97 struct hfi1_ctxtdata *uctxt); 98static int setup_subctxt(struct hfi1_ctxtdata *uctxt); 99 100static int find_sub_ctxt(struct hfi1_filedata *fd, 101 const struct hfi1_user_info *uinfo); 102static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, 103 struct hfi1_user_info *uinfo, 104 struct hfi1_ctxtdata **cd); 105static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt); 106static __poll_t poll_urgent(struct file *fp, struct poll_table_struct *pt); 107static __poll_t poll_next(struct file *fp, struct poll_table_struct *pt); 108static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt, 109 unsigned long arg); 110static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg); 111static int ctxt_reset(struct hfi1_ctxtdata *uctxt); 112static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt, 113 unsigned long arg); 114static vm_fault_t vma_fault(struct vm_fault *vmf); 115static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, 116 unsigned long arg); 117 118static const struct file_operations hfi1_file_ops = { 119 .owner = THIS_MODULE, 120 .write_iter = hfi1_write_iter, 121 .open = hfi1_file_open, 122 .release = hfi1_file_close, 123 .unlocked_ioctl = hfi1_file_ioctl, 124 .poll = hfi1_poll, 125 .mmap = hfi1_file_mmap, 126 .llseek = noop_llseek, 127}; 128 129static const struct vm_operations_struct vm_ops = { 130 .fault = vma_fault, 131}; 132 133/* 134 * Types of memories mapped into user processes' space 135 */ 136enum mmap_types { 137 PIO_BUFS = 1, 138 PIO_BUFS_SOP, 139 PIO_CRED, 140 RCV_HDRQ, 141 RCV_EGRBUF, 142 UREGS, 143 EVENTS, 144 STATUS, 145 RTAIL, 146 SUBCTXT_UREGS, 147 SUBCTXT_RCV_HDRQ, 148 SUBCTXT_EGRBUF, 149 SDMA_COMP 150}; 151 152/* 153 * Masks and offsets defining the mmap tokens 154 */ 155#define HFI1_MMAP_OFFSET_MASK 0xfffULL 156#define HFI1_MMAP_OFFSET_SHIFT 0 157#define HFI1_MMAP_SUBCTXT_MASK 0xfULL 158#define HFI1_MMAP_SUBCTXT_SHIFT 12 159#define HFI1_MMAP_CTXT_MASK 0xffULL 160#define HFI1_MMAP_CTXT_SHIFT 16 161#define HFI1_MMAP_TYPE_MASK 0xfULL 162#define HFI1_MMAP_TYPE_SHIFT 24 163#define HFI1_MMAP_MAGIC_MASK 0xffffffffULL 164#define HFI1_MMAP_MAGIC_SHIFT 32 165 166#define HFI1_MMAP_MAGIC 0xdabbad00 167 168#define HFI1_MMAP_TOKEN_SET(field, val) \ 169 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT) 170#define HFI1_MMAP_TOKEN_GET(field, token) \ 171 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK) 172#define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ 173 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \ 174 HFI1_MMAP_TOKEN_SET(TYPE, type) | \ 175 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \ 176 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \ 177 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr)))) 178 179#define dbg(fmt, ...) \ 180 pr_info(fmt, ##__VA_ARGS__) 181 182static inline int is_valid_mmap(u64 token) 183{ 184 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC); 185} 186 187static int hfi1_file_open(struct inode *inode, struct file *fp) 188{ 189 struct hfi1_filedata *fd; 190 struct hfi1_devdata *dd = container_of(inode->i_cdev, 191 struct hfi1_devdata, 192 user_cdev); 193 194 if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1)) 195 return -EINVAL; 196 197 if (!atomic_inc_not_zero(&dd->user_refcount)) 198 return -ENXIO; 199 200 /* The real work is performed later in assign_ctxt() */ 201 202 fd = kzalloc(sizeof(*fd), GFP_KERNEL); 203 204 if (!fd || init_srcu_struct(&fd->pq_srcu)) 205 goto nomem; 206 spin_lock_init(&fd->pq_rcu_lock); 207 spin_lock_init(&fd->tid_lock); 208 spin_lock_init(&fd->invalid_lock); 209 fd->rec_cpu_num = -1; /* no cpu affinity by default */ 210 fd->dd = dd; 211 fp->private_data = fd; 212 return 0; 213nomem: 214 kfree(fd); 215 fp->private_data = NULL; 216 if (atomic_dec_and_test(&dd->user_refcount)) 217 complete(&dd->user_comp); 218 return -ENOMEM; 219} 220 221static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, 222 unsigned long arg) 223{ 224 struct hfi1_filedata *fd = fp->private_data; 225 struct hfi1_ctxtdata *uctxt = fd->uctxt; 226 int ret = 0; 227 int uval = 0; 228 229 hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd); 230 if (cmd != HFI1_IOCTL_ASSIGN_CTXT && 231 cmd != HFI1_IOCTL_GET_VERS && 232 !uctxt) 233 return -EINVAL; 234 235 switch (cmd) { 236 case HFI1_IOCTL_ASSIGN_CTXT: 237 ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd)); 238 break; 239 240 case HFI1_IOCTL_CTXT_INFO: 241 ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd)); 242 break; 243 244 case HFI1_IOCTL_USER_INFO: 245 ret = get_base_info(fd, arg, _IOC_SIZE(cmd)); 246 break; 247 248 case HFI1_IOCTL_CREDIT_UPD: 249 if (uctxt) 250 sc_return_credits(uctxt->sc); 251 break; 252 253 case HFI1_IOCTL_TID_UPDATE: 254 ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd)); 255 break; 256 257 case HFI1_IOCTL_TID_FREE: 258 ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd)); 259 break; 260 261 case HFI1_IOCTL_TID_INVAL_READ: 262 ret = user_exp_rcv_invalid(fd, arg, _IOC_SIZE(cmd)); 263 break; 264 265 case HFI1_IOCTL_RECV_CTRL: 266 ret = manage_rcvq(uctxt, fd->subctxt, arg); 267 break; 268 269 case HFI1_IOCTL_POLL_TYPE: 270 if (get_user(uval, (int __user *)arg)) 271 return -EFAULT; 272 uctxt->poll_type = (typeof(uctxt->poll_type))uval; 273 break; 274 275 case HFI1_IOCTL_ACK_EVENT: 276 ret = user_event_ack(uctxt, fd->subctxt, arg); 277 break; 278 279 case HFI1_IOCTL_SET_PKEY: 280 ret = set_ctxt_pkey(uctxt, arg); 281 break; 282 283 case HFI1_IOCTL_CTXT_RESET: 284 ret = ctxt_reset(uctxt); 285 break; 286 287 case HFI1_IOCTL_GET_VERS: 288 uval = HFI1_USER_SWVERSION; 289 if (put_user(uval, (int __user *)arg)) 290 return -EFAULT; 291 break; 292 293 default: 294 return -EINVAL; 295 } 296 297 return ret; 298} 299 300static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) 301{ 302 struct hfi1_filedata *fd = kiocb->ki_filp->private_data; 303 struct hfi1_user_sdma_pkt_q *pq; 304 struct hfi1_user_sdma_comp_q *cq = fd->cq; 305 int done = 0, reqs = 0; 306 unsigned long dim = from->nr_segs; 307 int idx; 308 309 if (!HFI1_CAP_IS_KSET(SDMA)) 310 return -EINVAL; 311 idx = srcu_read_lock(&fd->pq_srcu); 312 pq = srcu_dereference(fd->pq, &fd->pq_srcu); 313 if (!cq || !pq) { 314 srcu_read_unlock(&fd->pq_srcu, idx); 315 return -EIO; 316 } 317 318 if (!iter_is_iovec(from) || !dim) { 319 srcu_read_unlock(&fd->pq_srcu, idx); 320 return -EINVAL; 321 } 322 323 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); 324 325 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) { 326 srcu_read_unlock(&fd->pq_srcu, idx); 327 return -ENOSPC; 328 } 329 330 while (dim) { 331 int ret; 332 unsigned long count = 0; 333 334 ret = hfi1_user_sdma_process_request( 335 fd, (struct iovec *)(from->iov + done), 336 dim, &count); 337 if (ret) { 338 reqs = ret; 339 break; 340 } 341 dim -= count; 342 done += count; 343 reqs++; 344 } 345 346 srcu_read_unlock(&fd->pq_srcu, idx); 347 return reqs; 348} 349 350static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) 351{ 352 struct hfi1_filedata *fd = fp->private_data; 353 struct hfi1_ctxtdata *uctxt = fd->uctxt; 354 struct hfi1_devdata *dd; 355 unsigned long flags; 356 u64 token = vma->vm_pgoff << PAGE_SHIFT, 357 memaddr = 0; 358 void *memvirt = NULL; 359 u8 subctxt, mapio = 0, vmf = 0, type; 360 ssize_t memlen = 0; 361 int ret = 0; 362 u16 ctxt; 363 364 if (!is_valid_mmap(token) || !uctxt || 365 !(vma->vm_flags & VM_SHARED)) { 366 ret = -EINVAL; 367 goto done; 368 } 369 dd = uctxt->dd; 370 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); 371 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token); 372 type = HFI1_MMAP_TOKEN_GET(TYPE, token); 373 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { 374 ret = -EINVAL; 375 goto done; 376 } 377 378 flags = vma->vm_flags; 379 380 switch (type) { 381 case PIO_BUFS: 382 case PIO_BUFS_SOP: 383 memaddr = ((dd->physaddr + TXE_PIO_SEND) + 384 /* chip pio base */ 385 (uctxt->sc->hw_context * BIT(16))) + 386 /* 64K PIO space / ctxt */ 387 (type == PIO_BUFS_SOP ? 388 (TXE_PIO_SIZE / 2) : 0); /* sop? */ 389 /* 390 * Map only the amount allocated to the context, not the 391 * entire available context's PIO space. 392 */ 393 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE); 394 flags &= ~VM_MAYREAD; 395 flags |= VM_DONTCOPY | VM_DONTEXPAND; 396 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 397 mapio = 1; 398 break; 399 case PIO_CRED: 400 if (flags & VM_WRITE) { 401 ret = -EPERM; 402 goto done; 403 } 404 /* 405 * The credit return location for this context could be on the 406 * second or third page allocated for credit returns (if number 407 * of enabled contexts > 64 and 128 respectively). 408 */ 409 memvirt = dd->cr_base[uctxt->numa_id].va; 410 memaddr = virt_to_phys(memvirt) + 411 (((u64)uctxt->sc->hw_free - 412 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK); 413 memlen = PAGE_SIZE; 414 flags &= ~VM_MAYWRITE; 415 flags |= VM_DONTCOPY | VM_DONTEXPAND; 416 /* 417 * The driver has already allocated memory for credit 418 * returns and programmed it into the chip. Has that 419 * memory been flagged as non-cached? 420 */ 421 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */ 422 mapio = 1; 423 break; 424 case RCV_HDRQ: 425 memlen = rcvhdrq_size(uctxt); 426 memvirt = uctxt->rcvhdrq; 427 break; 428 case RCV_EGRBUF: { 429 unsigned long addr; 430 int i; 431 /* 432 * The RcvEgr buffer need to be handled differently 433 * as multiple non-contiguous pages need to be mapped 434 * into the user process. 435 */ 436 memlen = uctxt->egrbufs.size; 437 if ((vma->vm_end - vma->vm_start) != memlen) { 438 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n", 439 (vma->vm_end - vma->vm_start), memlen); 440 ret = -EINVAL; 441 goto done; 442 } 443 if (vma->vm_flags & VM_WRITE) { 444 ret = -EPERM; 445 goto done; 446 } 447 vma->vm_flags &= ~VM_MAYWRITE; 448 addr = vma->vm_start; 449 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) { 450 memlen = uctxt->egrbufs.buffers[i].len; 451 memvirt = uctxt->egrbufs.buffers[i].addr; 452 ret = remap_pfn_range( 453 vma, addr, 454 /* 455 * virt_to_pfn() does the same, but 456 * it's not available on x86_64 457 * when CONFIG_MMU is enabled. 458 */ 459 PFN_DOWN(__pa(memvirt)), 460 memlen, 461 vma->vm_page_prot); 462 if (ret < 0) 463 goto done; 464 addr += memlen; 465 } 466 ret = 0; 467 goto done; 468 } 469 case UREGS: 470 /* 471 * Map only the page that contains this context's user 472 * registers. 473 */ 474 memaddr = (unsigned long) 475 (dd->physaddr + RXE_PER_CONTEXT_USER) 476 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); 477 /* 478 * TidFlow table is on the same page as the rest of the 479 * user registers. 480 */ 481 memlen = PAGE_SIZE; 482 flags |= VM_DONTCOPY | VM_DONTEXPAND; 483 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 484 mapio = 1; 485 break; 486 case EVENTS: 487 /* 488 * Use the page where this context's flags are. User level 489 * knows where it's own bitmap is within the page. 490 */ 491 memaddr = (unsigned long) 492 (dd->events + uctxt_offset(uctxt)) & PAGE_MASK; 493 memlen = PAGE_SIZE; 494 /* 495 * v3.7 removes VM_RESERVED but the effect is kept by 496 * using VM_IO. 497 */ 498 flags |= VM_IO | VM_DONTEXPAND; 499 vmf = 1; 500 break; 501 case STATUS: 502 if (flags & VM_WRITE) { 503 ret = -EPERM; 504 goto done; 505 } 506 memaddr = kvirt_to_phys((void *)dd->status); 507 memlen = PAGE_SIZE; 508 flags |= VM_IO | VM_DONTEXPAND; 509 break; 510 case RTAIL: 511 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) { 512 /* 513 * If the memory allocation failed, the context alloc 514 * also would have failed, so we would never get here 515 */ 516 ret = -EINVAL; 517 goto done; 518 } 519 if ((flags & VM_WRITE) || !hfi1_rcvhdrtail_kvaddr(uctxt)) { 520 ret = -EPERM; 521 goto done; 522 } 523 memlen = PAGE_SIZE; 524 memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt); 525 flags &= ~VM_MAYWRITE; 526 break; 527 case SUBCTXT_UREGS: 528 memaddr = (u64)uctxt->subctxt_uregbase; 529 memlen = PAGE_SIZE; 530 flags |= VM_IO | VM_DONTEXPAND; 531 vmf = 1; 532 break; 533 case SUBCTXT_RCV_HDRQ: 534 memaddr = (u64)uctxt->subctxt_rcvhdr_base; 535 memlen = rcvhdrq_size(uctxt) * uctxt->subctxt_cnt; 536 flags |= VM_IO | VM_DONTEXPAND; 537 vmf = 1; 538 break; 539 case SUBCTXT_EGRBUF: 540 memaddr = (u64)uctxt->subctxt_rcvegrbuf; 541 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt; 542 flags |= VM_IO | VM_DONTEXPAND; 543 flags &= ~VM_MAYWRITE; 544 vmf = 1; 545 break; 546 case SDMA_COMP: { 547 struct hfi1_user_sdma_comp_q *cq = fd->cq; 548 549 if (!cq) { 550 ret = -EFAULT; 551 goto done; 552 } 553 memaddr = (u64)cq->comps; 554 memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries); 555 flags |= VM_IO | VM_DONTEXPAND; 556 vmf = 1; 557 break; 558 } 559 default: 560 ret = -EINVAL; 561 break; 562 } 563 564 if ((vma->vm_end - vma->vm_start) != memlen) { 565 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu", 566 uctxt->ctxt, fd->subctxt, 567 (vma->vm_end - vma->vm_start), memlen); 568 ret = -EINVAL; 569 goto done; 570 } 571 572 vma->vm_flags = flags; 573 hfi1_cdbg(PROC, 574 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n", 575 ctxt, subctxt, type, mapio, vmf, memaddr, memlen, 576 vma->vm_end - vma->vm_start, vma->vm_flags); 577 if (vmf) { 578 vma->vm_pgoff = PFN_DOWN(memaddr); 579 vma->vm_ops = &vm_ops; 580 ret = 0; 581 } else if (mapio) { 582 ret = io_remap_pfn_range(vma, vma->vm_start, 583 PFN_DOWN(memaddr), 584 memlen, 585 vma->vm_page_prot); 586 } else if (memvirt) { 587 ret = remap_pfn_range(vma, vma->vm_start, 588 PFN_DOWN(__pa(memvirt)), 589 memlen, 590 vma->vm_page_prot); 591 } else { 592 ret = remap_pfn_range(vma, vma->vm_start, 593 PFN_DOWN(memaddr), 594 memlen, 595 vma->vm_page_prot); 596 } 597done: 598 return ret; 599} 600 601/* 602 * Local (non-chip) user memory is not mapped right away but as it is 603 * accessed by the user-level code. 604 */ 605static vm_fault_t vma_fault(struct vm_fault *vmf) 606{ 607 struct page *page; 608 609 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); 610 if (!page) 611 return VM_FAULT_SIGBUS; 612 613 get_page(page); 614 vmf->page = page; 615 616 return 0; 617} 618 619static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt) 620{ 621 struct hfi1_ctxtdata *uctxt; 622 __poll_t pollflag; 623 624 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt; 625 if (!uctxt) 626 pollflag = EPOLLERR; 627 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT) 628 pollflag = poll_urgent(fp, pt); 629 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV) 630 pollflag = poll_next(fp, pt); 631 else /* invalid */ 632 pollflag = EPOLLERR; 633 634 return pollflag; 635} 636 637static int hfi1_file_close(struct inode *inode, struct file *fp) 638{ 639 struct hfi1_filedata *fdata = fp->private_data; 640 struct hfi1_ctxtdata *uctxt = fdata->uctxt; 641 struct hfi1_devdata *dd = container_of(inode->i_cdev, 642 struct hfi1_devdata, 643 user_cdev); 644 unsigned long flags, *ev; 645 646 fp->private_data = NULL; 647 648 if (!uctxt) 649 goto done; 650 651 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); 652 653 flush_wc(); 654 /* drain user sdma queue */ 655 hfi1_user_sdma_free_queues(fdata, uctxt); 656 657 /* release the cpu */ 658 hfi1_put_proc_affinity(fdata->rec_cpu_num); 659 660 /* clean up rcv side */ 661 hfi1_user_exp_rcv_free(fdata); 662 663 /* 664 * fdata->uctxt is used in the above cleanup. It is not ready to be 665 * removed until here. 666 */ 667 fdata->uctxt = NULL; 668 hfi1_rcd_put(uctxt); 669 670 /* 671 * Clear any left over, unhandled events so the next process that 672 * gets this context doesn't get confused. 673 */ 674 ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt; 675 *ev = 0; 676 677 spin_lock_irqsave(&dd->uctxt_lock, flags); 678 __clear_bit(fdata->subctxt, uctxt->in_use_ctxts); 679 if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { 680 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 681 goto done; 682 } 683 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 684 685 /* 686 * Disable receive context and interrupt available, reset all 687 * RcvCtxtCtrl bits to default values. 688 */ 689 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | 690 HFI1_RCVCTRL_TIDFLOW_DIS | 691 HFI1_RCVCTRL_INTRAVAIL_DIS | 692 HFI1_RCVCTRL_TAILUPD_DIS | 693 HFI1_RCVCTRL_ONE_PKT_EGR_DIS | 694 HFI1_RCVCTRL_NO_RHQ_DROP_DIS | 695 HFI1_RCVCTRL_NO_EGR_DROP_DIS | 696 HFI1_RCVCTRL_URGENT_DIS, uctxt); 697 /* Clear the context's J_KEY */ 698 hfi1_clear_ctxt_jkey(dd, uctxt); 699 /* 700 * If a send context is allocated, reset context integrity 701 * checks to default and disable the send context. 702 */ 703 if (uctxt->sc) { 704 sc_disable(uctxt->sc); 705 set_pio_integrity(uctxt->sc); 706 } 707 708 hfi1_free_ctxt_rcv_groups(uctxt); 709 hfi1_clear_ctxt_pkey(dd, uctxt); 710 711 uctxt->event_flags = 0; 712 713 deallocate_ctxt(uctxt); 714done: 715 716 if (atomic_dec_and_test(&dd->user_refcount)) 717 complete(&dd->user_comp); 718 719 cleanup_srcu_struct(&fdata->pq_srcu); 720 kfree(fdata); 721 return 0; 722} 723 724/* 725 * Convert kernel *virtual* addresses to physical addresses. 726 * This is used to vmalloc'ed addresses. 727 */ 728static u64 kvirt_to_phys(void *addr) 729{ 730 struct page *page; 731 u64 paddr = 0; 732 733 page = vmalloc_to_page(addr); 734 if (page) 735 paddr = page_to_pfn(page) << PAGE_SHIFT; 736 737 return paddr; 738} 739 740/** 741 * complete_subctxt 742 * @fd: valid filedata pointer 743 * 744 * Sub-context info can only be set up after the base context 745 * has been completed. This is indicated by the clearing of the 746 * HFI1_CTXT_BASE_UINIT bit. 747 * 748 * Wait for the bit to be cleared, and then complete the subcontext 749 * initialization. 750 * 751 */ 752static int complete_subctxt(struct hfi1_filedata *fd) 753{ 754 int ret; 755 unsigned long flags; 756 757 /* 758 * sub-context info can only be set up after the base context 759 * has been completed. 760 */ 761 ret = wait_event_interruptible( 762 fd->uctxt->wait, 763 !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags)); 764 765 if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags)) 766 ret = -ENOMEM; 767 768 /* Finish the sub-context init */ 769 if (!ret) { 770 fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id); 771 ret = init_user_ctxt(fd, fd->uctxt); 772 } 773 774 if (ret) { 775 spin_lock_irqsave(&fd->dd->uctxt_lock, flags); 776 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); 777 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); 778 hfi1_rcd_put(fd->uctxt); 779 fd->uctxt = NULL; 780 } 781 782 return ret; 783} 784 785static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len) 786{ 787 int ret; 788 unsigned int swmajor; 789 struct hfi1_ctxtdata *uctxt = NULL; 790 struct hfi1_user_info uinfo; 791 792 if (fd->uctxt) 793 return -EINVAL; 794 795 if (sizeof(uinfo) != len) 796 return -EINVAL; 797 798 if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo))) 799 return -EFAULT; 800 801 swmajor = uinfo.userversion >> 16; 802 if (swmajor != HFI1_USER_SWMAJOR) 803 return -ENODEV; 804 805 if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS) 806 return -EINVAL; 807 808 /* 809 * Acquire the mutex to protect against multiple creations of what 810 * could be a shared base context. 811 */ 812 mutex_lock(&hfi1_mutex); 813 /* 814 * Get a sub context if available (fd->uctxt will be set). 815 * ret < 0 error, 0 no context, 1 sub-context found 816 */ 817 ret = find_sub_ctxt(fd, &uinfo); 818 819 /* 820 * Allocate a base context if context sharing is not required or a 821 * sub context wasn't found. 822 */ 823 if (!ret) 824 ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt); 825 826 mutex_unlock(&hfi1_mutex); 827 828 /* Depending on the context type, finish the appropriate init */ 829 switch (ret) { 830 case 0: 831 ret = setup_base_ctxt(fd, uctxt); 832 if (ret) 833 deallocate_ctxt(uctxt); 834 break; 835 case 1: 836 ret = complete_subctxt(fd); 837 break; 838 default: 839 break; 840 } 841 842 return ret; 843} 844 845/** 846 * match_ctxt 847 * @fd: valid filedata pointer 848 * @uinfo: user info to compare base context with 849 * @uctxt: context to compare uinfo to. 850 * 851 * Compare the given context with the given information to see if it 852 * can be used for a sub context. 853 */ 854static int match_ctxt(struct hfi1_filedata *fd, 855 const struct hfi1_user_info *uinfo, 856 struct hfi1_ctxtdata *uctxt) 857{ 858 struct hfi1_devdata *dd = fd->dd; 859 unsigned long flags; 860 u16 subctxt; 861 862 /* Skip dynamically allocated kernel contexts */ 863 if (uctxt->sc && (uctxt->sc->type == SC_KERNEL)) 864 return 0; 865 866 /* Skip ctxt if it doesn't match the requested one */ 867 if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) || 868 uctxt->jkey != generate_jkey(current_uid()) || 869 uctxt->subctxt_id != uinfo->subctxt_id || 870 uctxt->subctxt_cnt != uinfo->subctxt_cnt) 871 return 0; 872 873 /* Verify the sharing process matches the base */ 874 if (uctxt->userversion != uinfo->userversion) 875 return -EINVAL; 876 877 /* Find an unused sub context */ 878 spin_lock_irqsave(&dd->uctxt_lock, flags); 879 if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { 880 /* context is being closed, do not use */ 881 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 882 return 0; 883 } 884 885 subctxt = find_first_zero_bit(uctxt->in_use_ctxts, 886 HFI1_MAX_SHARED_CTXTS); 887 if (subctxt >= uctxt->subctxt_cnt) { 888 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 889 return -EBUSY; 890 } 891 892 fd->subctxt = subctxt; 893 __set_bit(fd->subctxt, uctxt->in_use_ctxts); 894 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 895 896 fd->uctxt = uctxt; 897 hfi1_rcd_get(uctxt); 898 899 return 1; 900} 901 902/** 903 * find_sub_ctxt 904 * @fd: valid filedata pointer 905 * @uinfo: matching info to use to find a possible context to share. 906 * 907 * The hfi1_mutex must be held when this function is called. It is 908 * necessary to ensure serialized creation of shared contexts. 909 * 910 * Return: 911 * 0 No sub-context found 912 * 1 Subcontext found and allocated 913 * errno EINVAL (incorrect parameters) 914 * EBUSY (all sub contexts in use) 915 */ 916static int find_sub_ctxt(struct hfi1_filedata *fd, 917 const struct hfi1_user_info *uinfo) 918{ 919 struct hfi1_ctxtdata *uctxt; 920 struct hfi1_devdata *dd = fd->dd; 921 u16 i; 922 int ret; 923 924 if (!uinfo->subctxt_cnt) 925 return 0; 926 927 for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) { 928 uctxt = hfi1_rcd_get_by_index(dd, i); 929 if (uctxt) { 930 ret = match_ctxt(fd, uinfo, uctxt); 931 hfi1_rcd_put(uctxt); 932 /* value of != 0 will return */ 933 if (ret) 934 return ret; 935 } 936 } 937 938 return 0; 939} 940 941static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, 942 struct hfi1_user_info *uinfo, 943 struct hfi1_ctxtdata **rcd) 944{ 945 struct hfi1_ctxtdata *uctxt; 946 int ret, numa; 947 948 if (dd->flags & HFI1_FROZEN) { 949 /* 950 * Pick an error that is unique from all other errors 951 * that are returned so the user process knows that 952 * it tried to allocate while the SPC was frozen. It 953 * it should be able to retry with success in a short 954 * while. 955 */ 956 return -EIO; 957 } 958 959 if (!dd->freectxts) 960 return -EBUSY; 961 962 /* 963 * If we don't have a NUMA node requested, preference is towards 964 * device NUMA node. 965 */ 966 fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node); 967 if (fd->rec_cpu_num != -1) 968 numa = cpu_to_node(fd->rec_cpu_num); 969 else 970 numa = numa_node_id(); 971 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt); 972 if (ret < 0) { 973 dd_dev_err(dd, "user ctxtdata allocation failed\n"); 974 return ret; 975 } 976 hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)", 977 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num, 978 uctxt->numa_id); 979 980 /* 981 * Allocate and enable a PIO send context. 982 */ 983 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node); 984 if (!uctxt->sc) { 985 ret = -ENOMEM; 986 goto ctxdata_free; 987 } 988 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index, 989 uctxt->sc->hw_context); 990 ret = sc_enable(uctxt->sc); 991 if (ret) 992 goto ctxdata_free; 993 994 /* 995 * Setup sub context information if the user-level has requested 996 * sub contexts. 997 * This has to be done here so the rest of the sub-contexts find the 998 * proper base context. 999 * NOTE: _set_bit() can be used here because the context creation is 1000 * protected by the mutex (rather than the spin_lock), and will be the 1001 * very first instance of this context. 1002 */ 1003 __set_bit(0, uctxt->in_use_ctxts); 1004 if (uinfo->subctxt_cnt) 1005 init_subctxts(uctxt, uinfo); 1006 uctxt->userversion = uinfo->userversion; 1007 uctxt->flags = hfi1_cap_mask; /* save current flag state */ 1008 init_waitqueue_head(&uctxt->wait); 1009 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm)); 1010 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)); 1011 uctxt->jkey = generate_jkey(current_uid()); 1012 hfi1_stats.sps_ctxts++; 1013 /* 1014 * Disable ASPM when there are open user/PSM contexts to avoid 1015 * issues with ASPM L1 exit latency 1016 */ 1017 if (dd->freectxts-- == dd->num_user_contexts) 1018 aspm_disable_all(dd); 1019 1020 *rcd = uctxt; 1021 1022 return 0; 1023 1024ctxdata_free: 1025 hfi1_free_ctxt(uctxt); 1026 return ret; 1027} 1028 1029static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt) 1030{ 1031 mutex_lock(&hfi1_mutex); 1032 hfi1_stats.sps_ctxts--; 1033 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts) 1034 aspm_enable_all(uctxt->dd); 1035 mutex_unlock(&hfi1_mutex); 1036 1037 hfi1_free_ctxt(uctxt); 1038} 1039 1040static void init_subctxts(struct hfi1_ctxtdata *uctxt, 1041 const struct hfi1_user_info *uinfo) 1042{ 1043 uctxt->subctxt_cnt = uinfo->subctxt_cnt; 1044 uctxt->subctxt_id = uinfo->subctxt_id; 1045 set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); 1046} 1047 1048static int setup_subctxt(struct hfi1_ctxtdata *uctxt) 1049{ 1050 int ret = 0; 1051 u16 num_subctxts = uctxt->subctxt_cnt; 1052 1053 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE); 1054 if (!uctxt->subctxt_uregbase) 1055 return -ENOMEM; 1056 1057 /* We can take the size of the RcvHdr Queue from the master */ 1058 uctxt->subctxt_rcvhdr_base = vmalloc_user(rcvhdrq_size(uctxt) * 1059 num_subctxts); 1060 if (!uctxt->subctxt_rcvhdr_base) { 1061 ret = -ENOMEM; 1062 goto bail_ureg; 1063 } 1064 1065 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size * 1066 num_subctxts); 1067 if (!uctxt->subctxt_rcvegrbuf) { 1068 ret = -ENOMEM; 1069 goto bail_rhdr; 1070 } 1071 1072 return 0; 1073 1074bail_rhdr: 1075 vfree(uctxt->subctxt_rcvhdr_base); 1076 uctxt->subctxt_rcvhdr_base = NULL; 1077bail_ureg: 1078 vfree(uctxt->subctxt_uregbase); 1079 uctxt->subctxt_uregbase = NULL; 1080 1081 return ret; 1082} 1083 1084static void user_init(struct hfi1_ctxtdata *uctxt) 1085{ 1086 unsigned int rcvctrl_ops = 0; 1087 1088 /* initialize poll variables... */ 1089 uctxt->urgent = 0; 1090 uctxt->urgent_poll = 0; 1091 1092 /* 1093 * Now enable the ctxt for receive. 1094 * For chips that are set to DMA the tail register to memory 1095 * when they change (and when the update bit transitions from 1096 * 0 to 1. So for those chips, we turn it off and then back on. 1097 * This will (very briefly) affect any other open ctxts, but the 1098 * duration is very short, and therefore isn't an issue. We 1099 * explicitly set the in-memory tail copy to 0 beforehand, so we 1100 * don't have to wait to be sure the DMA update has happened 1101 * (chip resets head/tail to 0 on transition to enable). 1102 */ 1103 if (hfi1_rcvhdrtail_kvaddr(uctxt)) 1104 clear_rcvhdrtail(uctxt); 1105 1106 /* Setup J_KEY before enabling the context */ 1107 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey); 1108 1109 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB; 1110 rcvctrl_ops |= HFI1_RCVCTRL_URGENT_ENB; 1111 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP)) 1112 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB; 1113 /* 1114 * Ignore the bit in the flags for now until proper 1115 * support for multiple packet per rcv array entry is 1116 * added. 1117 */ 1118 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR)) 1119 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; 1120 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL)) 1121 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; 1122 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL)) 1123 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; 1124 /* 1125 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written. 1126 * We can't rely on the correct value to be set from prior 1127 * uses of the chip or ctxt. Therefore, add the rcvctrl op 1128 * for both cases. 1129 */ 1130 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL)) 1131 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB; 1132 else 1133 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS; 1134 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt); 1135} 1136 1137static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len) 1138{ 1139 struct hfi1_ctxt_info cinfo; 1140 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1141 1142 if (sizeof(cinfo) != len) 1143 return -EINVAL; 1144 1145 memset(&cinfo, 0, sizeof(cinfo)); 1146 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) & 1147 HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) | 1148 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) | 1149 HFI1_CAP_KGET_MASK(uctxt->flags, K2U); 1150 /* adjust flag if this fd is not able to cache */ 1151 if (!fd->use_mn) 1152 cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */ 1153 1154 cinfo.num_active = hfi1_count_active_units(); 1155 cinfo.unit = uctxt->dd->unit; 1156 cinfo.ctxt = uctxt->ctxt; 1157 cinfo.subctxt = fd->subctxt; 1158 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced, 1159 uctxt->dd->rcv_entries.group_size) + 1160 uctxt->expected_count; 1161 cinfo.credits = uctxt->sc->credits; 1162 cinfo.numa_node = uctxt->numa_id; 1163 cinfo.rec_cpu = fd->rec_cpu_num; 1164 cinfo.send_ctxt = uctxt->sc->hw_context; 1165 1166 cinfo.egrtids = uctxt->egrbufs.alloced; 1167 cinfo.rcvhdrq_cnt = get_hdrq_cnt(uctxt); 1168 cinfo.rcvhdrq_entsize = get_hdrqentsize(uctxt) << 2; 1169 cinfo.sdma_ring_size = fd->cq->nentries; 1170 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size; 1171 1172 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo); 1173 if (copy_to_user((void __user *)arg, &cinfo, len)) 1174 return -EFAULT; 1175 1176 return 0; 1177} 1178 1179static int init_user_ctxt(struct hfi1_filedata *fd, 1180 struct hfi1_ctxtdata *uctxt) 1181{ 1182 int ret; 1183 1184 ret = hfi1_user_sdma_alloc_queues(uctxt, fd); 1185 if (ret) 1186 return ret; 1187 1188 ret = hfi1_user_exp_rcv_init(fd, uctxt); 1189 if (ret) 1190 hfi1_user_sdma_free_queues(fd, uctxt); 1191 1192 return ret; 1193} 1194 1195static int setup_base_ctxt(struct hfi1_filedata *fd, 1196 struct hfi1_ctxtdata *uctxt) 1197{ 1198 struct hfi1_devdata *dd = uctxt->dd; 1199 int ret = 0; 1200 1201 hfi1_init_ctxt(uctxt->sc); 1202 1203 /* Now allocate the RcvHdr queue and eager buffers. */ 1204 ret = hfi1_create_rcvhdrq(dd, uctxt); 1205 if (ret) 1206 goto done; 1207 1208 ret = hfi1_setup_eagerbufs(uctxt); 1209 if (ret) 1210 goto done; 1211 1212 /* If sub-contexts are enabled, do the appropriate setup */ 1213 if (uctxt->subctxt_cnt) 1214 ret = setup_subctxt(uctxt); 1215 if (ret) 1216 goto done; 1217 1218 ret = hfi1_alloc_ctxt_rcv_groups(uctxt); 1219 if (ret) 1220 goto done; 1221 1222 ret = init_user_ctxt(fd, uctxt); 1223 if (ret) { 1224 hfi1_free_ctxt_rcv_groups(uctxt); 1225 goto done; 1226 } 1227 1228 user_init(uctxt); 1229 1230 /* Now that the context is set up, the fd can get a reference. */ 1231 fd->uctxt = uctxt; 1232 hfi1_rcd_get(uctxt); 1233 1234done: 1235 if (uctxt->subctxt_cnt) { 1236 /* 1237 * On error, set the failed bit so sub-contexts will clean up 1238 * correctly. 1239 */ 1240 if (ret) 1241 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); 1242 1243 /* 1244 * Base context is done (successfully or not), notify anybody 1245 * using a sub-context that is waiting for this completion. 1246 */ 1247 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); 1248 wake_up(&uctxt->wait); 1249 } 1250 1251 return ret; 1252} 1253 1254static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len) 1255{ 1256 struct hfi1_base_info binfo; 1257 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1258 struct hfi1_devdata *dd = uctxt->dd; 1259 unsigned offset; 1260 1261 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt); 1262 1263 if (sizeof(binfo) != len) 1264 return -EINVAL; 1265 1266 memset(&binfo, 0, sizeof(binfo)); 1267 binfo.hw_version = dd->revision; 1268 binfo.sw_version = HFI1_KERN_SWVERSION; 1269 binfo.bthqp = RVT_KDETH_QP_PREFIX; 1270 binfo.jkey = uctxt->jkey; 1271 /* 1272 * If more than 64 contexts are enabled the allocated credit 1273 * return will span two or three contiguous pages. Since we only 1274 * map the page containing the context's credit return address, 1275 * we need to calculate the offset in the proper page. 1276 */ 1277 offset = ((u64)uctxt->sc->hw_free - 1278 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE; 1279 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt, 1280 fd->subctxt, offset); 1281 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt, 1282 fd->subctxt, 1283 uctxt->sc->base_addr); 1284 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP, 1285 uctxt->ctxt, 1286 fd->subctxt, 1287 uctxt->sc->base_addr); 1288 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt, 1289 fd->subctxt, 1290 uctxt->rcvhdrq); 1291 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt, 1292 fd->subctxt, 1293 uctxt->egrbufs.rcvtids[0].dma); 1294 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt, 1295 fd->subctxt, 0); 1296 /* 1297 * user regs are at 1298 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE)) 1299 */ 1300 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt, 1301 fd->subctxt, 0); 1302 offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) * 1303 sizeof(*dd->events)); 1304 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt, 1305 fd->subctxt, 1306 offset); 1307 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt, 1308 fd->subctxt, 1309 dd->status); 1310 if (HFI1_CAP_IS_USET(DMA_RTAIL)) 1311 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt, 1312 fd->subctxt, 0); 1313 if (uctxt->subctxt_cnt) { 1314 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS, 1315 uctxt->ctxt, 1316 fd->subctxt, 0); 1317 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ, 1318 uctxt->ctxt, 1319 fd->subctxt, 0); 1320 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF, 1321 uctxt->ctxt, 1322 fd->subctxt, 0); 1323 } 1324 1325 if (copy_to_user((void __user *)arg, &binfo, len)) 1326 return -EFAULT; 1327 1328 return 0; 1329} 1330 1331/** 1332 * user_exp_rcv_setup - Set up the given tid rcv list 1333 * @fd: file data of the current driver instance 1334 * @arg: ioctl argumnent for user space information 1335 * @len: length of data structure associated with ioctl command 1336 * 1337 * Wrapper to validate ioctl information before doing _rcv_setup. 1338 * 1339 */ 1340static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg, 1341 u32 len) 1342{ 1343 int ret; 1344 unsigned long addr; 1345 struct hfi1_tid_info tinfo; 1346 1347 if (sizeof(tinfo) != len) 1348 return -EINVAL; 1349 1350 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo)))) 1351 return -EFAULT; 1352 1353 ret = hfi1_user_exp_rcv_setup(fd, &tinfo); 1354 if (!ret) { 1355 /* 1356 * Copy the number of tidlist entries we used 1357 * and the length of the buffer we registered. 1358 */ 1359 addr = arg + offsetof(struct hfi1_tid_info, tidcnt); 1360 if (copy_to_user((void __user *)addr, &tinfo.tidcnt, 1361 sizeof(tinfo.tidcnt))) 1362 ret = -EFAULT; 1363 1364 addr = arg + offsetof(struct hfi1_tid_info, length); 1365 if (!ret && copy_to_user((void __user *)addr, &tinfo.length, 1366 sizeof(tinfo.length))) 1367 ret = -EFAULT; 1368 1369 if (ret) 1370 hfi1_user_exp_rcv_invalid(fd, &tinfo); 1371 } 1372 1373 return ret; 1374} 1375 1376/** 1377 * user_exp_rcv_clear - Clear the given tid rcv list 1378 * @fd: file data of the current driver instance 1379 * @arg: ioctl argumnent for user space information 1380 * @len: length of data structure associated with ioctl command 1381 * 1382 * The hfi1_user_exp_rcv_clear() can be called from the error path. Because 1383 * of this, we need to use this wrapper to copy the user space information 1384 * before doing the clear. 1385 */ 1386static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg, 1387 u32 len) 1388{ 1389 int ret; 1390 unsigned long addr; 1391 struct hfi1_tid_info tinfo; 1392 1393 if (sizeof(tinfo) != len) 1394 return -EINVAL; 1395 1396 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo)))) 1397 return -EFAULT; 1398 1399 ret = hfi1_user_exp_rcv_clear(fd, &tinfo); 1400 if (!ret) { 1401 addr = arg + offsetof(struct hfi1_tid_info, tidcnt); 1402 if (copy_to_user((void __user *)addr, &tinfo.tidcnt, 1403 sizeof(tinfo.tidcnt))) 1404 return -EFAULT; 1405 } 1406 1407 return ret; 1408} 1409 1410/** 1411 * user_exp_rcv_invalid - Invalidate the given tid rcv list 1412 * @fd: file data of the current driver instance 1413 * @arg: ioctl argumnent for user space information 1414 * @len: length of data structure associated with ioctl command 1415 * 1416 * Wrapper to validate ioctl information before doing _rcv_invalid. 1417 * 1418 */ 1419static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg, 1420 u32 len) 1421{ 1422 int ret; 1423 unsigned long addr; 1424 struct hfi1_tid_info tinfo; 1425 1426 if (sizeof(tinfo) != len) 1427 return -EINVAL; 1428 1429 if (!fd->invalid_tids) 1430 return -EINVAL; 1431 1432 if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo)))) 1433 return -EFAULT; 1434 1435 ret = hfi1_user_exp_rcv_invalid(fd, &tinfo); 1436 if (ret) 1437 return ret; 1438 1439 addr = arg + offsetof(struct hfi1_tid_info, tidcnt); 1440 if (copy_to_user((void __user *)addr, &tinfo.tidcnt, 1441 sizeof(tinfo.tidcnt))) 1442 ret = -EFAULT; 1443 1444 return ret; 1445} 1446 1447static __poll_t poll_urgent(struct file *fp, 1448 struct poll_table_struct *pt) 1449{ 1450 struct hfi1_filedata *fd = fp->private_data; 1451 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1452 struct hfi1_devdata *dd = uctxt->dd; 1453 __poll_t pollflag; 1454 1455 poll_wait(fp, &uctxt->wait, pt); 1456 1457 spin_lock_irq(&dd->uctxt_lock); 1458 if (uctxt->urgent != uctxt->urgent_poll) { 1459 pollflag = EPOLLIN | EPOLLRDNORM; 1460 uctxt->urgent_poll = uctxt->urgent; 1461 } else { 1462 pollflag = 0; 1463 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags); 1464 } 1465 spin_unlock_irq(&dd->uctxt_lock); 1466 1467 return pollflag; 1468} 1469 1470static __poll_t poll_next(struct file *fp, 1471 struct poll_table_struct *pt) 1472{ 1473 struct hfi1_filedata *fd = fp->private_data; 1474 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1475 struct hfi1_devdata *dd = uctxt->dd; 1476 __poll_t pollflag; 1477 1478 poll_wait(fp, &uctxt->wait, pt); 1479 1480 spin_lock_irq(&dd->uctxt_lock); 1481 if (hdrqempty(uctxt)) { 1482 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags); 1483 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt); 1484 pollflag = 0; 1485 } else { 1486 pollflag = EPOLLIN | EPOLLRDNORM; 1487 } 1488 spin_unlock_irq(&dd->uctxt_lock); 1489 1490 return pollflag; 1491} 1492 1493/* 1494 * Find all user contexts in use, and set the specified bit in their 1495 * event mask. 1496 * See also find_ctxt() for a similar use, that is specific to send buffers. 1497 */ 1498int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit) 1499{ 1500 struct hfi1_ctxtdata *uctxt; 1501 struct hfi1_devdata *dd = ppd->dd; 1502 u16 ctxt; 1503 1504 if (!dd->events) 1505 return -EINVAL; 1506 1507 for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts; 1508 ctxt++) { 1509 uctxt = hfi1_rcd_get_by_index(dd, ctxt); 1510 if (uctxt) { 1511 unsigned long *evs; 1512 int i; 1513 /* 1514 * subctxt_cnt is 0 if not shared, so do base 1515 * separately, first, then remaining subctxt, if any 1516 */ 1517 evs = dd->events + uctxt_offset(uctxt); 1518 set_bit(evtbit, evs); 1519 for (i = 1; i < uctxt->subctxt_cnt; i++) 1520 set_bit(evtbit, evs + i); 1521 hfi1_rcd_put(uctxt); 1522 } 1523 } 1524 1525 return 0; 1526} 1527 1528/** 1529 * manage_rcvq - manage a context's receive queue 1530 * @uctxt: the context 1531 * @subctxt: the sub-context 1532 * @start_stop: action to carry out 1533 * 1534 * start_stop == 0 disables receive on the context, for use in queue 1535 * overflow conditions. start_stop==1 re-enables, to be used to 1536 * re-init the software copy of the head register 1537 */ 1538static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt, 1539 unsigned long arg) 1540{ 1541 struct hfi1_devdata *dd = uctxt->dd; 1542 unsigned int rcvctrl_op; 1543 int start_stop; 1544 1545 if (subctxt) 1546 return 0; 1547 1548 if (get_user(start_stop, (int __user *)arg)) 1549 return -EFAULT; 1550 1551 /* atomically clear receive enable ctxt. */ 1552 if (start_stop) { 1553 /* 1554 * On enable, force in-memory copy of the tail register to 1555 * 0, so that protocol code doesn't have to worry about 1556 * whether or not the chip has yet updated the in-memory 1557 * copy or not on return from the system call. The chip 1558 * always resets it's tail register back to 0 on a 1559 * transition from disabled to enabled. 1560 */ 1561 if (hfi1_rcvhdrtail_kvaddr(uctxt)) 1562 clear_rcvhdrtail(uctxt); 1563 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB; 1564 } else { 1565 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS; 1566 } 1567 hfi1_rcvctrl(dd, rcvctrl_op, uctxt); 1568 /* always; new head should be equal to new tail; see above */ 1569 1570 return 0; 1571} 1572 1573/* 1574 * clear the event notifier events for this context. 1575 * User process then performs actions appropriate to bit having been 1576 * set, if desired, and checks again in future. 1577 */ 1578static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt, 1579 unsigned long arg) 1580{ 1581 int i; 1582 struct hfi1_devdata *dd = uctxt->dd; 1583 unsigned long *evs; 1584 unsigned long events; 1585 1586 if (!dd->events) 1587 return 0; 1588 1589 if (get_user(events, (unsigned long __user *)arg)) 1590 return -EFAULT; 1591 1592 evs = dd->events + uctxt_offset(uctxt) + subctxt; 1593 1594 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) { 1595 if (!test_bit(i, &events)) 1596 continue; 1597 clear_bit(i, evs); 1598 } 1599 return 0; 1600} 1601 1602static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg) 1603{ 1604 int i; 1605 struct hfi1_pportdata *ppd = uctxt->ppd; 1606 struct hfi1_devdata *dd = uctxt->dd; 1607 u16 pkey; 1608 1609 if (!HFI1_CAP_IS_USET(PKEY_CHECK)) 1610 return -EPERM; 1611 1612 if (get_user(pkey, (u16 __user *)arg)) 1613 return -EFAULT; 1614 1615 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) 1616 return -EINVAL; 1617 1618 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) 1619 if (pkey == ppd->pkeys[i]) 1620 return hfi1_set_ctxt_pkey(dd, uctxt, pkey); 1621 1622 return -ENOENT; 1623} 1624 1625/** 1626 * ctxt_reset - Reset the user context 1627 * @uctxt: valid user context 1628 */ 1629static int ctxt_reset(struct hfi1_ctxtdata *uctxt) 1630{ 1631 struct send_context *sc; 1632 struct hfi1_devdata *dd; 1633 int ret = 0; 1634 1635 if (!uctxt || !uctxt->dd || !uctxt->sc) 1636 return -EINVAL; 1637 1638 /* 1639 * There is no protection here. User level has to guarantee that 1640 * no one will be writing to the send context while it is being 1641 * re-initialized. If user level breaks that guarantee, it will 1642 * break it's own context and no one else's. 1643 */ 1644 dd = uctxt->dd; 1645 sc = uctxt->sc; 1646 1647 /* 1648 * Wait until the interrupt handler has marked the context as 1649 * halted or frozen. Report error if we time out. 1650 */ 1651 wait_event_interruptible_timeout( 1652 sc->halt_wait, (sc->flags & SCF_HALTED), 1653 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); 1654 if (!(sc->flags & SCF_HALTED)) 1655 return -ENOLCK; 1656 1657 /* 1658 * If the send context was halted due to a Freeze, wait until the 1659 * device has been "unfrozen" before resetting the context. 1660 */ 1661 if (sc->flags & SCF_FROZEN) { 1662 wait_event_interruptible_timeout( 1663 dd->event_queue, 1664 !(READ_ONCE(dd->flags) & HFI1_FROZEN), 1665 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); 1666 if (dd->flags & HFI1_FROZEN) 1667 return -ENOLCK; 1668 1669 if (dd->flags & HFI1_FORCED_FREEZE) 1670 /* 1671 * Don't allow context reset if we are into 1672 * forced freeze 1673 */ 1674 return -ENODEV; 1675 1676 sc_disable(sc); 1677 ret = sc_enable(sc); 1678 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt); 1679 } else { 1680 ret = sc_restart(sc); 1681 } 1682 if (!ret) 1683 sc_return_credits(sc); 1684 1685 return ret; 1686} 1687 1688static void user_remove(struct hfi1_devdata *dd) 1689{ 1690 1691 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device); 1692} 1693 1694static int user_add(struct hfi1_devdata *dd) 1695{ 1696 char name[10]; 1697 int ret; 1698 1699 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); 1700 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops, 1701 &dd->user_cdev, &dd->user_device, 1702 true, &dd->verbs_dev.rdi.ibdev.dev.kobj); 1703 if (ret) 1704 user_remove(dd); 1705 1706 return ret; 1707} 1708 1709/* 1710 * Create per-unit files in /dev 1711 */ 1712int hfi1_device_create(struct hfi1_devdata *dd) 1713{ 1714 return user_add(dd); 1715} 1716 1717/* 1718 * Remove per-unit files in /dev 1719 * void, core kernel returns no errors for this stuff 1720 */ 1721void hfi1_device_remove(struct hfi1_devdata *dd) 1722{ 1723 user_remove(dd); 1724} 1725