Lines Matching defs:uctxt

42 static void init_subctxts(struct hfi1_ctxtdata *uctxt,
45 struct hfi1_ctxtdata *uctxt);
46 static void user_init(struct hfi1_ctxtdata *uctxt);
56 struct hfi1_ctxtdata *uctxt);
57 static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
64 static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
67 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
69 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
70 static int ctxt_reset(struct hfi1_ctxtdata *uctxt);
71 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
184 struct hfi1_ctxtdata *uctxt = fd->uctxt;
191 !uctxt)
208 if (uctxt)
209 sc_return_credits(uctxt->sc);
225 ret = manage_rcvq(uctxt, fd->subctxt, arg);
231 uctxt->poll_type = (typeof(uctxt->poll_type))uval;
235 ret = user_event_ack(uctxt, fd->subctxt, arg);
239 ret = set_ctxt_pkey(uctxt, arg);
243 ret = ctxt_reset(uctxt);
279 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
321 struct hfi1_ctxtdata *uctxt = fd->uctxt;
333 if (!is_valid_mmap(token) || !uctxt ||
338 dd = uctxt->dd;
342 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
359 (uctxt->sc->hw_context * BIT(16))) +
367 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
384 cr_page_offset = ((u64)uctxt->sc->hw_free -
385 (u64)dd->cr_base[uctxt->numa_id].va) &
387 memvirt = dd->cr_base[uctxt->numa_id].va + cr_page_offset;
388 memdma = dd->cr_base[uctxt->numa_id].dma + cr_page_offset;
401 memlen = rcvhdrq_size(uctxt);
402 memvirt = uctxt->rcvhdrq;
403 memdma = uctxt->rcvhdrq_dma;
414 memlen = uctxt->egrbufs.size;
436 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
437 memlen = uctxt->egrbufs.buffers[i].len;
438 memvirt = uctxt->egrbufs.buffers[i].addr;
439 memdma = uctxt->egrbufs.buffers[i].dma;
464 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
480 (dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
507 if ((flags & VM_WRITE) || !hfi1_rcvhdrtail_kvaddr(uctxt)) {
512 memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt);
513 memdma = uctxt->rcvhdrqtailaddr_dma;
517 memaddr = (u64)uctxt->subctxt_uregbase;
523 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
524 memlen = rcvhdrq_size(uctxt) * uctxt->subctxt_cnt;
529 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
530 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
555 uctxt->ctxt, fd->subctxt,
611 struct hfi1_ctxtdata *uctxt;
614 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
615 if (!uctxt)
617 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
619 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
630 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
638 if (!uctxt)
641 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
645 hfi1_user_sdma_free_queues(fdata, uctxt);
654 * fdata->uctxt is used in the above cleanup. It is not ready to be
657 fdata->uctxt = NULL;
658 hfi1_rcd_put(uctxt);
664 ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
668 __clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
669 if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
686 HFI1_RCVCTRL_URGENT_DIS, uctxt);
688 hfi1_clear_ctxt_jkey(dd, uctxt);
693 if (uctxt->sc) {
694 sc_disable(uctxt->sc);
695 set_pio_integrity(uctxt->sc);
698 hfi1_free_ctxt_rcv_groups(uctxt);
699 hfi1_clear_ctxt_pkey(dd, uctxt);
701 uctxt->event_flags = 0;
703 deallocate_ctxt(uctxt);
752 fd->uctxt->wait,
753 !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags));
755 if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags))
760 fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id);
761 ret = init_user_ctxt(fd, fd->uctxt);
766 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
768 hfi1_rcd_put(fd->uctxt);
769 fd->uctxt = NULL;
779 struct hfi1_ctxtdata *uctxt = NULL;
782 if (fd->uctxt)
804 * Get a sub context if available (fd->uctxt will be set).
814 ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
821 ret = setup_base_ctxt(fd, uctxt);
823 deallocate_ctxt(uctxt);
839 * @uctxt: context to compare uinfo to.
846 struct hfi1_ctxtdata *uctxt)
853 if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
857 if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) ||
858 uctxt->jkey != generate_jkey(current_uid()) ||
859 uctxt->subctxt_id != uinfo->subctxt_id ||
860 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
864 if (uctxt->userversion != uinfo->userversion)
869 if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
875 subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
877 if (subctxt >= uctxt->subctxt_cnt) {
883 __set_bit(fd->subctxt, uctxt->in_use_ctxts);
886 fd->uctxt = uctxt;
887 hfi1_rcd_get(uctxt);
909 struct hfi1_ctxtdata *uctxt;
918 uctxt = hfi1_rcd_get_by_index(dd, i);
919 if (uctxt) {
920 ret = match_ctxt(fd, uinfo, uctxt);
921 hfi1_rcd_put(uctxt);
935 struct hfi1_ctxtdata *uctxt;
961 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt);
967 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
968 uctxt->numa_id);
973 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node);
974 if (!uctxt->sc) {
978 hfi1_cdbg(PROC, "allocated send context %u(%u)", uctxt->sc->sw_index,
979 uctxt->sc->hw_context);
980 ret = sc_enable(uctxt->sc);
993 __set_bit(0, uctxt->in_use_ctxts);
995 init_subctxts(uctxt, uinfo);
996 uctxt->userversion = uinfo->userversion;
997 uctxt->flags = hfi1_cap_mask; /* save current flag state */
998 init_waitqueue_head(&uctxt->wait);
999 strscpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1000 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1001 uctxt->jkey = generate_jkey(current_uid());
1010 *rcd = uctxt;
1015 hfi1_free_ctxt(uctxt);
1019 static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt)
1023 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts)
1024 aspm_enable_all(uctxt->dd);
1027 hfi1_free_ctxt(uctxt);
1030 static void init_subctxts(struct hfi1_ctxtdata *uctxt,
1033 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1034 uctxt->subctxt_id = uinfo->subctxt_id;
1035 set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
1038 static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1041 u16 num_subctxts = uctxt->subctxt_cnt;
1043 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
1044 if (!uctxt->subctxt_uregbase)
1048 uctxt->subctxt_rcvhdr_base = vmalloc_user(rcvhdrq_size(uctxt) *
1050 if (!uctxt->subctxt_rcvhdr_base) {
1055 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1057 if (!uctxt->subctxt_rcvegrbuf) {
1065 vfree(uctxt->subctxt_rcvhdr_base);
1066 uctxt->subctxt_rcvhdr_base = NULL;
1068 vfree(uctxt->subctxt_uregbase);
1069 uctxt->subctxt_uregbase = NULL;
1074 static void user_init(struct hfi1_ctxtdata *uctxt)
1079 uctxt->urgent = 0;
1080 uctxt->urgent_poll = 0;
1093 if (hfi1_rcvhdrtail_kvaddr(uctxt))
1094 clear_rcvhdrtail(uctxt);
1097 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
1101 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
1108 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
1110 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
1112 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
1120 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
1124 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
1130 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1136 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
1138 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
1139 HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
1145 cinfo.unit = uctxt->dd->unit;
1146 cinfo.ctxt = uctxt->ctxt;
1148 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1149 uctxt->dd->rcv_entries.group_size) +
1150 uctxt->expected_count;
1151 cinfo.credits = uctxt->sc->credits;
1152 cinfo.numa_node = uctxt->numa_id;
1154 cinfo.send_ctxt = uctxt->sc->hw_context;
1156 cinfo.egrtids = uctxt->egrbufs.alloced;
1157 cinfo.rcvhdrq_cnt = get_hdrq_cnt(uctxt);
1158 cinfo.rcvhdrq_entsize = get_hdrqentsize(uctxt) << 2;
1160 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1162 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo);
1170 struct hfi1_ctxtdata *uctxt)
1174 ret = hfi1_user_sdma_alloc_queues(uctxt, fd);
1178 ret = hfi1_user_exp_rcv_init(fd, uctxt);
1180 hfi1_user_sdma_free_queues(fd, uctxt);
1186 struct hfi1_ctxtdata *uctxt)
1188 struct hfi1_devdata *dd = uctxt->dd;
1191 hfi1_init_ctxt(uctxt->sc);
1194 ret = hfi1_create_rcvhdrq(dd, uctxt);
1198 ret = hfi1_setup_eagerbufs(uctxt);
1203 if (uctxt->subctxt_cnt)
1204 ret = setup_subctxt(uctxt);
1208 ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
1212 ret = init_user_ctxt(fd, uctxt);
1214 hfi1_free_ctxt_rcv_groups(uctxt);
1218 user_init(uctxt);
1221 fd->uctxt = uctxt;
1222 hfi1_rcd_get(uctxt);
1225 if (uctxt->subctxt_cnt) {
1231 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
1237 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
1238 wake_up(&uctxt->wait);
1247 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1248 struct hfi1_devdata *dd = uctxt->dd;
1251 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
1260 binfo.jkey = uctxt->jkey;
1267 offset = ((u64)uctxt->sc->hw_free -
1268 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1269 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
1271 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
1273 uctxt->sc->base_addr);
1275 uctxt->ctxt,
1277 uctxt->sc->base_addr);
1278 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
1280 uctxt->rcvhdrq);
1281 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
1283 uctxt->egrbufs.rcvtids[0].dma);
1284 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
1290 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
1292 offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
1294 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
1297 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
1301 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
1303 if (uctxt->subctxt_cnt) {
1305 uctxt->ctxt,
1308 uctxt->ctxt,
1311 uctxt->ctxt,
1441 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1442 struct hfi1_devdata *dd = uctxt->dd;
1445 poll_wait(fp, &uctxt->wait, pt);
1448 if (uctxt->urgent != uctxt->urgent_poll) {
1450 uctxt->urgent_poll = uctxt->urgent;
1453 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1464 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1465 struct hfi1_devdata *dd = uctxt->dd;
1468 poll_wait(fp, &uctxt->wait, pt);
1471 if (hdrqempty(uctxt)) {
1472 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
1473 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt);
1490 struct hfi1_ctxtdata *uctxt;
1499 uctxt = hfi1_rcd_get_by_index(dd, ctxt);
1500 if (uctxt) {
1507 evs = dd->events + uctxt_offset(uctxt);
1509 for (i = 1; i < uctxt->subctxt_cnt; i++)
1511 hfi1_rcd_put(uctxt);
1520 * @uctxt: the context
1528 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
1531 struct hfi1_devdata *dd = uctxt->dd;
1551 if (hfi1_rcvhdrtail_kvaddr(uctxt))
1552 clear_rcvhdrtail(uctxt);
1557 hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
1568 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
1572 struct hfi1_devdata *dd = uctxt->dd;
1582 evs = dd->events + uctxt_offset(uctxt) + subctxt;
1592 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg)
1595 struct hfi1_pportdata *ppd = uctxt->ppd;
1596 struct hfi1_devdata *dd = uctxt->dd;
1610 return hfi1_set_ctxt_pkey(dd, uctxt, pkey);
1617 * @uctxt: valid user context
1619 static int ctxt_reset(struct hfi1_ctxtdata *uctxt)
1625 if (!uctxt || !uctxt->dd || !uctxt->sc)
1634 dd = uctxt->dd;
1635 sc = uctxt->sc;
1668 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);