Lines Matching defs:uctxt

83 static void init_subctxts(struct hfi1_ctxtdata *uctxt,
86 struct hfi1_ctxtdata *uctxt);
87 static void user_init(struct hfi1_ctxtdata *uctxt);
97 struct hfi1_ctxtdata *uctxt);
98 static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
105 static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
108 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
110 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
111 static int ctxt_reset(struct hfi1_ctxtdata *uctxt);
112 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
225 struct hfi1_ctxtdata *uctxt = fd->uctxt;
232 !uctxt)
249 if (uctxt)
250 sc_return_credits(uctxt->sc);
266 ret = manage_rcvq(uctxt, fd->subctxt, arg);
272 uctxt->poll_type = (typeof(uctxt->poll_type))uval;
276 ret = user_event_ack(uctxt, fd->subctxt, arg);
280 ret = set_ctxt_pkey(uctxt, arg);
284 ret = ctxt_reset(uctxt);
323 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
353 struct hfi1_ctxtdata *uctxt = fd->uctxt;
364 if (!is_valid_mmap(token) || !uctxt ||
369 dd = uctxt->dd;
373 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
385 (uctxt->sc->hw_context * BIT(16))) +
393 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
409 memvirt = dd->cr_base[uctxt->numa_id].va;
411 (((u64)uctxt->sc->hw_free -
412 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
425 memlen = rcvhdrq_size(uctxt);
426 memvirt = uctxt->rcvhdrq;
436 memlen = uctxt->egrbufs.size;
449 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
450 memlen = uctxt->egrbufs.buffers[i].len;
451 memvirt = uctxt->egrbufs.buffers[i].addr;
476 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
492 (dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
519 if ((flags & VM_WRITE) || !hfi1_rcvhdrtail_kvaddr(uctxt)) {
524 memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt);
528 memaddr = (u64)uctxt->subctxt_uregbase;
534 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
535 memlen = rcvhdrq_size(uctxt) * uctxt->subctxt_cnt;
540 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
541 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
566 uctxt->ctxt, fd->subctxt,
621 struct hfi1_ctxtdata *uctxt;
624 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
625 if (!uctxt)
627 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
629 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
640 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
648 if (!uctxt)
651 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
655 hfi1_user_sdma_free_queues(fdata, uctxt);
664 * fdata->uctxt is used in the above cleanup. It is not ready to be
667 fdata->uctxt = NULL;
668 hfi1_rcd_put(uctxt);
674 ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
678 __clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
679 if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
696 HFI1_RCVCTRL_URGENT_DIS, uctxt);
698 hfi1_clear_ctxt_jkey(dd, uctxt);
703 if (uctxt->sc) {
704 sc_disable(uctxt->sc);
705 set_pio_integrity(uctxt->sc);
708 hfi1_free_ctxt_rcv_groups(uctxt);
709 hfi1_clear_ctxt_pkey(dd, uctxt);
711 uctxt->event_flags = 0;
713 deallocate_ctxt(uctxt);
762 fd->uctxt->wait,
763 !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags));
765 if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags))
770 fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id);
771 ret = init_user_ctxt(fd, fd->uctxt);
776 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
778 hfi1_rcd_put(fd->uctxt);
779 fd->uctxt = NULL;
789 struct hfi1_ctxtdata *uctxt = NULL;
792 if (fd->uctxt)
814 * Get a sub context if available (fd->uctxt will be set).
824 ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
831 ret = setup_base_ctxt(fd, uctxt);
833 deallocate_ctxt(uctxt);
849 * @uctxt: context to compare uinfo to.
856 struct hfi1_ctxtdata *uctxt)
863 if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
867 if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) ||
868 uctxt->jkey != generate_jkey(current_uid()) ||
869 uctxt->subctxt_id != uinfo->subctxt_id ||
870 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
874 if (uctxt->userversion != uinfo->userversion)
879 if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
885 subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
887 if (subctxt >= uctxt->subctxt_cnt) {
893 __set_bit(fd->subctxt, uctxt->in_use_ctxts);
896 fd->uctxt = uctxt;
897 hfi1_rcd_get(uctxt);
919 struct hfi1_ctxtdata *uctxt;
928 uctxt = hfi1_rcd_get_by_index(dd, i);
929 if (uctxt) {
930 ret = match_ctxt(fd, uinfo, uctxt);
931 hfi1_rcd_put(uctxt);
945 struct hfi1_ctxtdata *uctxt;
971 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt);
977 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
978 uctxt->numa_id);
983 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node);
984 if (!uctxt->sc) {
988 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
989 uctxt->sc->hw_context);
990 ret = sc_enable(uctxt->sc);
1003 __set_bit(0, uctxt->in_use_ctxts);
1005 init_subctxts(uctxt, uinfo);
1006 uctxt->userversion = uinfo->userversion;
1007 uctxt->flags = hfi1_cap_mask; /* save current flag state */
1008 init_waitqueue_head(&uctxt->wait);
1009 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1010 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1011 uctxt->jkey = generate_jkey(current_uid());
1020 *rcd = uctxt;
1025 hfi1_free_ctxt(uctxt);
1029 static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt)
1033 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts)
1034 aspm_enable_all(uctxt->dd);
1037 hfi1_free_ctxt(uctxt);
1040 static void init_subctxts(struct hfi1_ctxtdata *uctxt,
1043 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1044 uctxt->subctxt_id = uinfo->subctxt_id;
1045 set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
1048 static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1051 u16 num_subctxts = uctxt->subctxt_cnt;
1053 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
1054 if (!uctxt->subctxt_uregbase)
1058 uctxt->subctxt_rcvhdr_base = vmalloc_user(rcvhdrq_size(uctxt) *
1060 if (!uctxt->subctxt_rcvhdr_base) {
1065 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1067 if (!uctxt->subctxt_rcvegrbuf) {
1075 vfree(uctxt->subctxt_rcvhdr_base);
1076 uctxt->subctxt_rcvhdr_base = NULL;
1078 vfree(uctxt->subctxt_uregbase);
1079 uctxt->subctxt_uregbase = NULL;
1084 static void user_init(struct hfi1_ctxtdata *uctxt)
1089 uctxt->urgent = 0;
1090 uctxt->urgent_poll = 0;
1103 if (hfi1_rcvhdrtail_kvaddr(uctxt))
1104 clear_rcvhdrtail(uctxt);
1107 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
1111 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
1118 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
1120 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
1122 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
1130 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
1134 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
1140 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1146 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
1148 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
1149 HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
1155 cinfo.unit = uctxt->dd->unit;
1156 cinfo.ctxt = uctxt->ctxt;
1158 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1159 uctxt->dd->rcv_entries.group_size) +
1160 uctxt->expected_count;
1161 cinfo.credits = uctxt->sc->credits;
1162 cinfo.numa_node = uctxt->numa_id;
1164 cinfo.send_ctxt = uctxt->sc->hw_context;
1166 cinfo.egrtids = uctxt->egrbufs.alloced;
1167 cinfo.rcvhdrq_cnt = get_hdrq_cnt(uctxt);
1168 cinfo.rcvhdrq_entsize = get_hdrqentsize(uctxt) << 2;
1170 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1172 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo);
1180 struct hfi1_ctxtdata *uctxt)
1184 ret = hfi1_user_sdma_alloc_queues(uctxt, fd);
1188 ret = hfi1_user_exp_rcv_init(fd, uctxt);
1190 hfi1_user_sdma_free_queues(fd, uctxt);
1196 struct hfi1_ctxtdata *uctxt)
1198 struct hfi1_devdata *dd = uctxt->dd;
1201 hfi1_init_ctxt(uctxt->sc);
1204 ret = hfi1_create_rcvhdrq(dd, uctxt);
1208 ret = hfi1_setup_eagerbufs(uctxt);
1213 if (uctxt->subctxt_cnt)
1214 ret = setup_subctxt(uctxt);
1218 ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
1222 ret = init_user_ctxt(fd, uctxt);
1224 hfi1_free_ctxt_rcv_groups(uctxt);
1228 user_init(uctxt);
1231 fd->uctxt = uctxt;
1232 hfi1_rcd_get(uctxt);
1235 if (uctxt->subctxt_cnt) {
1241 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
1247 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
1248 wake_up(&uctxt->wait);
1257 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1258 struct hfi1_devdata *dd = uctxt->dd;
1261 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
1270 binfo.jkey = uctxt->jkey;
1277 offset = ((u64)uctxt->sc->hw_free -
1278 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1279 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
1281 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
1283 uctxt->sc->base_addr);
1285 uctxt->ctxt,
1287 uctxt->sc->base_addr);
1288 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
1290 uctxt->rcvhdrq);
1291 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
1293 uctxt->egrbufs.rcvtids[0].dma);
1294 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
1300 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
1302 offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
1304 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
1307 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
1311 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
1313 if (uctxt->subctxt_cnt) {
1315 uctxt->ctxt,
1318 uctxt->ctxt,
1321 uctxt->ctxt,
1451 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1452 struct hfi1_devdata *dd = uctxt->dd;
1455 poll_wait(fp, &uctxt->wait, pt);
1458 if (uctxt->urgent != uctxt->urgent_poll) {
1460 uctxt->urgent_poll = uctxt->urgent;
1463 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1474 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1475 struct hfi1_devdata *dd = uctxt->dd;
1478 poll_wait(fp, &uctxt->wait, pt);
1481 if (hdrqempty(uctxt)) {
1482 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
1483 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt);
1500 struct hfi1_ctxtdata *uctxt;
1509 uctxt = hfi1_rcd_get_by_index(dd, ctxt);
1510 if (uctxt) {
1517 evs = dd->events + uctxt_offset(uctxt);
1519 for (i = 1; i < uctxt->subctxt_cnt; i++)
1521 hfi1_rcd_put(uctxt);
1530 * @uctxt: the context
1538 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
1541 struct hfi1_devdata *dd = uctxt->dd;
1561 if (hfi1_rcvhdrtail_kvaddr(uctxt))
1562 clear_rcvhdrtail(uctxt);
1567 hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
1578 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
1582 struct hfi1_devdata *dd = uctxt->dd;
1592 evs = dd->events + uctxt_offset(uctxt) + subctxt;
1602 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg)
1605 struct hfi1_pportdata *ppd = uctxt->ppd;
1606 struct hfi1_devdata *dd = uctxt->dd;
1620 return hfi1_set_ctxt_pkey(dd, uctxt, pkey);
1627 * @uctxt: valid user context
1629 static int ctxt_reset(struct hfi1_ctxtdata *uctxt)
1635 if (!uctxt || !uctxt->dd || !uctxt->sc)
1644 dd = uctxt->dd;
1645 sc = uctxt->sc;
1678 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);