Lines Matching refs:vring
71 struct wil_ring *vring = &wil->ring_tx[i];
72 int vring_index = vring - wil->ring_tx;
78 if (!vring->va || !txdata->enabled) {
86 while (!wil_ring_is_empty(vring)) {
94 "tx vring is not empty -> NAPI\n");
99 if (!vring->va || !txdata->enabled)
110 static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
113 size_t sz = vring->size * sizeof(vring->va[0]);
118 BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
120 vring->swhead = 0;
121 vring->swtail = 0;
122 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
123 if (!vring->ctx) {
124 vring->va = NULL;
128 /* vring->va should be aligned on its size rounded up to power of 2
134 * allocation before allocating vring memory.
143 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
144 if (!vring->va) {
145 kfree(vring->ctx);
146 vring->ctx = NULL;
158 for (i = 0; i < vring->size; i++) {
160 &vring->va[i].tx.legacy;
165 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
166 vring->va, &vring->pa, vring->ctx);
190 static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
193 size_t sz = vring->size * sizeof(vring->va[0]);
196 if (!vring->is_rx) {
197 int vring_index = vring - wil->ring_tx;
199 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
200 vring_index, vring->size, vring->va,
201 &vring->pa, vring->ctx);
203 wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
204 vring->size, vring->va,
205 &vring->pa, vring->ctx);
208 while (!wil_ring_is_empty(vring)) {
213 if (!vring->is_rx) {
216 &vring->va[vring->swtail].tx.legacy;
218 ctx = &vring->ctx[vring->swtail];
222 vring->swtail);
223 vring->swtail = wil_ring_next_tail(vring);
230 vring->swtail = wil_ring_next_tail(vring);
234 &vring->va[vring->swhead].rx.legacy;
236 ctx = &vring->ctx[vring->swhead];
242 wil_ring_advance_head(vring, 1);
245 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
246 kfree(vring->ctx);
247 vring->pa = 0;
248 vring->va = NULL;
249 vring->ctx = NULL;
256 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
262 volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
292 vring->ctx[i].skb = skb;
440 struct wil_ring *vring)
460 if (unlikely(wil_ring_is_empty(vring)))
463 i = (int)vring->swhead;
464 _d = &vring->va[i].rx.legacy;
470 skb = vring->ctx[i].skb;
471 vring->ctx[i].skb = NULL;
472 wil_ring_advance_head(vring, 1);
1051 struct wil_ring *vring = &wil->ring_rx;
1056 if (vring->va) {
1063 vring->size = 1 << order;
1064 vring->is_rx = true;
1065 rc = wil_vring_alloc(wil, vring);
1069 rc = wmi_rx_chain_add(wil, vring);
1073 rc = wil_rx_refill(wil, vring->size);
1079 wil_vring_free(wil, vring);
1086 struct wil_ring *vring = &wil->ring_rx;
1090 if (vring->va)
1091 wil_vring_free(wil, vring);
1164 struct wil_ring *vring = &wil->ring_tx[id];
1179 if (vring->va) {
1186 vring->is_rx = false;
1187 vring->size = size;
1188 rc = wil_vring_alloc(wil, vring);
1195 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1213 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1227 wil_vring_free(wil, vring);
1267 struct wil_ring *vring = &wil->ring_tx[ring_id];
1274 if (!vring->va) {
1286 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1341 struct wil_ring *vring = &wil->ring_tx[id];
1348 if (vring->va) {
1355 vring->is_rx = false;
1356 vring->size = size;
1357 rc = wil_vring_alloc(wil, vring);
1364 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1383 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1394 wil_vring_free(wil, vring);
1428 "find_tx_ucast: vring[%d] not valid\n",
1453 * find 1-st vring eligible for this skb and use it.
1482 * use dedicated broadcast vring
1484 * Find 1-st vring and return it;
1561 /* find 1-st vring eligible for data */
1726 struct wil_ring *vring, struct sk_buff *skb)
1745 u32 swhead = vring->swhead;
1746 int used, avail = wil_ring_avail_tx(vring);
1751 int vring_index = vring - wil->ring_tx;
1766 wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
1820 _hdr_desc = &vring->va[i].tx.legacy;
1834 vring->ctx[i].mapped_as = wil_mapped_as_single;
1835 hdr_ctx = &vring->ctx[i];
1863 i = (swhead + descs_used) % vring->size;
1870 vring->ctx[i].mapped_as = wil_mapped_as_page;
1877 vring->ctx[i].mapped_as = wil_mapped_as_single;
1886 _desc = &vring->va[i].tx.legacy;
1890 first_ctx = &vring->ctx[i];
1975 vring->ctx[i].skb = skb_get(skb);
1978 used = wil_ring_used_tx(vring);
1994 wil_ring_advance_head(vring, descs_used);
1995 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
2007 wil_w(wil, vring->hwtail, vring->swhead);
2014 i = (swhead + descs_used - 1) % vring->size;
2015 d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
2016 _desc = &vring->va[i].tx.legacy;
2019 ctx = &vring->ctx[i];
2220 * vring is the vring which is currently being modified by either adding
2224 * The implementation is to stop net queues if modified vring has low
2226 * availability and modified vring has high descriptor availability.
2240 wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
2257 /* not enough room in the vring */
2339 /* find vring */
2365 /* set up vring entry */
2425 struct wil_ring *vring = &wil->ring_tx[ringid];
2434 if (unlikely(!vring->va)) {
2435 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
2440 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
2446 used_before_complete = wil_ring_used_tx(vring);
2451 while (!wil_ring_is_empty(vring)) {
2453 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
2458 int lf = (vring->swtail + ctx->nr_frags) % vring->size;
2461 _d = &vring->va[lf].tx.legacy;
2465 new_swtail = (lf + 1) % vring->size;
2466 while (vring->swtail != new_swtail) {
2471 ctx = &vring->ctx[vring->swtail];
2473 _d = &vring->va[vring->swtail].tx.legacy;
2478 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
2482 ringid, vring->swtail, dmalen,
2525 vring->swtail = wil_ring_next_tail(vring);
2531 used_new = wil_ring_used_tx(vring);
2541 wil_update_net_queues(wil, vif, vring, false);