Lines Matching refs:rcd
129 struct hfi1_ctxtdata *rcd;
135 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
146 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
152 if (rcd->ctxt == HFI1_CTRL_CTXT)
153 rcd->flags |= HFI1_CAP_DMA_RTAIL;
154 rcd->fast_handler = get_dma_rtail_setting(rcd) ?
158 hfi1_set_seq_cnt(rcd, 1);
160 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
161 if (!rcd->sc) {
165 hfi1_init_ctxt(rcd->sc);
178 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
180 if (!dd->rcd)
191 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
192 hfi1_free_ctxt(dd->rcd[i]);
195 kfree(dd->rcd);
196 dd->rcd = NULL;
201 * Helper routines for the receive context reference count (rcd and uctxt).
203 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
205 kref_init(&rcd->kref);
210 * @kref: pointer to an initialized rcd data structure
216 struct hfi1_ctxtdata *rcd =
219 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
220 rcd->dd->rcd[rcd->ctxt] = NULL;
221 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
223 hfi1_free_ctxtdata(rcd->dd, rcd);
225 kfree(rcd);
229 * hfi1_rcd_put - decrement reference for rcd
230 * @rcd: pointer to an initialized rcd data structure
234 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
236 if (rcd)
237 return kref_put(&rcd->kref, hfi1_rcd_free);
243 * hfi1_rcd_get - increment reference for rcd
244 * @rcd: pointer to an initialized rcd data structure
251 int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
253 return kref_get_unless_zero(&rcd->kref);
257 * allocate_rcd_index - allocate an rcd index from the rcd array
259 * @rcd: rcd data structure to assign
262 * Find an empty index in the rcd array, and assign the given rcd to it.
267 struct hfi1_ctxtdata *rcd, u16 *index)
274 if (!dd->rcd[ctxt])
278 rcd->ctxt = ctxt;
279 dd->rcd[ctxt] = rcd;
280 hfi1_rcd_init(rcd);
296 * @ctxt: the index of an possilbe rcd
316 * @ctxt: the index of an possilbe rcd
318 * We need to protect access to the rcd array. If access is needed to
328 struct hfi1_ctxtdata *rcd = NULL;
331 if (dd->rcd[ctxt]) {
332 rcd = dd->rcd[ctxt];
333 if (!hfi1_rcd_get(rcd))
334 rcd = NULL;
338 return rcd;
349 struct hfi1_ctxtdata *rcd;
357 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
358 if (rcd) {
363 ret = allocate_rcd_index(dd, rcd, &ctxt);
366 kfree(rcd);
370 INIT_LIST_HEAD(&rcd->qp_wait_list);
371 hfi1_exp_tid_group_init(rcd);
372 rcd->ppd = ppd;
373 rcd->dd = dd;
374 rcd->numa_id = numa;
375 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
376 rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
377 rcd->slow_handler = handle_receive_interrupt;
378 rcd->do_interrupt = rcd->slow_handler;
379 rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
381 mutex_init(&rcd->exp_mutex);
382 spin_lock_init(&rcd->exp_lock);
383 INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
384 INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
386 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
398 rcd->rcv_array_groups++;
410 rcd->rcv_array_groups++;
416 rcd->eager_base = base * dd->rcv_entries.group_size;
418 rcd->rcvhdrq_cnt = rcvhdrcnt;
419 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
420 rcd->rhf_offset =
421 rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
433 max_entries = rcd->rcv_array_groups *
436 rcd->egrbufs.count = round_down(rcvtids,
438 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
440 rcd->ctxt);
441 rcd->egrbufs.count = MAX_EAGER_ENTRIES;
445 rcd->ctxt, rcd->egrbufs.count);
455 rcd->egrbufs.buffers =
456 kcalloc_node(rcd->egrbufs.count,
457 sizeof(*rcd->egrbufs.buffers),
459 if (!rcd->egrbufs.buffers)
461 rcd->egrbufs.rcvtids =
462 kcalloc_node(rcd->egrbufs.count,
463 sizeof(*rcd->egrbufs.rcvtids),
465 if (!rcd->egrbufs.rcvtids)
467 rcd->egrbufs.size = eager_buffer_size;
473 if (rcd->egrbufs.size < hfi1_max_mtu) {
474 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
477 rcd->ctxt, rcd->egrbufs.size);
479 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
483 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
485 if (!rcd->opstats)
489 hfi1_kern_init_ctxt_generations(rcd);
492 *context = rcd;
498 hfi1_free_ctxt(rcd);
504 * @rcd: pointer to an initialized rcd data structure
512 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
514 hfi1_rcd_put(rcd);
721 struct hfi1_ctxtdata *rcd;
728 rcd = hfi1_rcd_get_by_index(dd, i);
731 HFI1_RCVCTRL_TAILUPD_DIS, rcd);
732 hfi1_rcd_put(rcd);
743 struct hfi1_ctxtdata *rcd;
755 rcd = hfi1_rcd_get_by_index(dd, i);
756 if (!rcd)
759 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
761 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
763 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
765 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
769 hfi1_rcvctrl(dd, rcvmask, rcd);
770 sc_enable(rcd->sc);
771 hfi1_rcd_put(rcd);
889 struct hfi1_ctxtdata *rcd;
919 /* dd->rcd can be NULL if early initialization failed */
920 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
927 rcd = hfi1_rcd_get_by_index(dd, i);
928 if (!rcd)
931 lastfail = hfi1_create_rcvhdrq(dd, rcd);
933 lastfail = hfi1_setup_eagerbufs(rcd);
935 lastfail = hfi1_kern_exp_rcv_init(rcd, reinit);
942 hfi1_rcd_put(rcd);
1048 struct hfi1_ctxtdata *rcd;
1073 rcd = hfi1_rcd_get_by_index(dd, i);
1078 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
1079 hfi1_rcd_put(rcd);
1122 * @rcd: the ctxtdata structure
1127 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1131 if (!rcd)
1134 if (rcd->rcvhdrq) {
1135 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
1136 rcd->rcvhdrq, rcd->rcvhdrq_dma);
1137 rcd->rcvhdrq = NULL;
1138 if (hfi1_rcvhdrtail_kvaddr(rcd)) {
1140 (void *)hfi1_rcvhdrtail_kvaddr(rcd),
1141 rcd->rcvhdrqtailaddr_dma);
1142 rcd->rcvhdrtail_kvaddr = NULL;
1147 kfree(rcd->egrbufs.rcvtids);
1148 rcd->egrbufs.rcvtids = NULL;
1150 for (e = 0; e < rcd->egrbufs.alloced; e++) {
1151 if (rcd->egrbufs.buffers[e].addr)
1153 rcd->egrbufs.buffers[e].len,
1154 rcd->egrbufs.buffers[e].addr,
1155 rcd->egrbufs.buffers[e].dma);
1157 kfree(rcd->egrbufs.buffers);
1158 rcd->egrbufs.alloced = 0;
1159 rcd->egrbufs.buffers = NULL;
1161 sc_free(rcd->sc);
1162 rcd->sc = NULL;
1164 vfree(rcd->subctxt_uregbase);
1165 vfree(rcd->subctxt_rcvegrbuf);
1166 vfree(rcd->subctxt_rcvhdr_base);
1167 kfree(rcd->opstats);
1169 rcd->subctxt_uregbase = NULL;
1170 rcd->subctxt_rcvegrbuf = NULL;
1171 rcd->subctxt_rcvhdr_base = NULL;
1172 rcd->opstats = NULL;
1554 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
1555 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
1557 if (rcd) {
1558 hfi1_free_ctxt_rcv_groups(rcd);
1559 hfi1_free_ctxt(rcd);
1563 kfree(dd->rcd);
1564 dd->rcd = NULL;
1794 * @rcd: the context data
1800 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1804 if (!rcd->rcvhdrq) {
1807 amt = rcvhdrq_size(rcd);
1809 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
1813 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
1814 &rcd->rcvhdrq_dma,
1817 if (!rcd->rcvhdrq) {
1820 amt, rcd->ctxt);
1824 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1825 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1826 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
1828 &rcd->rcvhdrqtailaddr_dma,
1830 if (!rcd->rcvhdrtail_kvaddr)
1835 set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize,
1836 rcd->rcvhdrq_cnt);
1843 rcd->ctxt);
1844 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1845 rcd->rcvhdrq_dma);
1846 rcd->rcvhdrq = NULL;
1853 * @rcd: the context we are setting up.
1860 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1862 struct hfi1_devdata *dd = rcd->dd;
1884 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1885 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1890 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1891 rcd->egrbufs.rcvtid_size = round_mtu;
1897 if (rcd->egrbufs.size <= (1 << 20))
1898 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1899 rounddown_pow_of_two(rcd->egrbufs.size / 8));
1901 while (alloced_bytes < rcd->egrbufs.size &&
1902 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1903 rcd->egrbufs.buffers[idx].addr =
1905 rcd->egrbufs.rcvtid_size,
1906 &rcd->egrbufs.buffers[idx].dma,
1908 if (rcd->egrbufs.buffers[idx].addr) {
1909 rcd->egrbufs.buffers[idx].len =
1910 rcd->egrbufs.rcvtid_size;
1911 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1912 rcd->egrbufs.buffers[idx].addr;
1913 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1914 rcd->egrbufs.buffers[idx].dma;
1915 rcd->egrbufs.alloced++;
1916 alloced_bytes += rcd->egrbufs.rcvtid_size;
1928 if (rcd->egrbufs.rcvtid_size == round_mtu ||
1929 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1931 rcd->ctxt);
1936 new_size = rcd->egrbufs.rcvtid_size / 2;
1944 rcd->egrbufs.rcvtid_size = new_size;
1952 rcd->egrbufs.alloced = 0;
1954 if (i >= rcd->egrbufs.count)
1956 rcd->egrbufs.rcvtids[i].dma =
1957 rcd->egrbufs.buffers[j].dma + offset;
1958 rcd->egrbufs.rcvtids[i].addr =
1959 rcd->egrbufs.buffers[j].addr + offset;
1960 rcd->egrbufs.alloced++;
1961 if ((rcd->egrbufs.buffers[j].dma + offset +
1963 (rcd->egrbufs.buffers[j].dma +
1964 rcd->egrbufs.buffers[j].len)) {
1971 rcd->egrbufs.rcvtid_size = new_size;
1974 rcd->egrbufs.numbufs = idx;
1975 rcd->egrbufs.size = alloced_bytes;
1979 rcd->ctxt, rcd->egrbufs.alloced,
1980 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
1987 rcd->egrbufs.threshold =
1988 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
1994 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
1995 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
1996 rcd->expected_count = max_entries - egrtop;
1997 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
1998 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
2000 rcd->expected_base = rcd->eager_base + egrtop;
2002 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
2003 rcd->eager_base, rcd->expected_base);
2005 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
2008 rcd->ctxt, rcd->egrbufs.rcvtid_size);
2013 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
2014 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
2015 rcd->egrbufs.rcvtids[idx].dma, order);
2022 for (idx = 0; idx < rcd->egrbufs.alloced &&
2023 rcd->egrbufs.buffers[idx].addr;
2026 rcd->egrbufs.buffers[idx].len,
2027 rcd->egrbufs.buffers[idx].addr,
2028 rcd->egrbufs.buffers[idx].dma);
2029 rcd->egrbufs.buffers[idx].addr = NULL;
2030 rcd->egrbufs.buffers[idx].dma = 0;
2031 rcd->egrbufs.buffers[idx].len = 0;