Lines Matching defs:gq
202 static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num)
204 int index = cur ? gq->cur : gq->dirty;
206 if (index + num >= gq->ring_size)
207 index = (index + num) % gq->ring_size;
214 static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
216 if (gq->cur >= gq->dirty)
217 return gq->cur - gq->dirty;
219 return gq->ring_size - gq->dirty + gq->cur;
222 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
224 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
232 static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
238 index = (i + start_index) % gq->ring_size;
239 if (gq->skbs[index])
241 gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev,
243 if (!gq->skbs[index])
251 index = (i + start_index) % gq->ring_size;
252 dev_kfree_skb(gq->skbs[index]);
253 gq->skbs[index] = NULL;
260 struct rswitch_gwca_queue *gq)
264 if (!gq->dir_tx) {
267 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
268 gq->rx_ring = NULL;
270 for (i = 0; i < gq->ring_size; i++)
271 dev_kfree_skb(gq->skbs[i]);
275 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
276 gq->tx_ring = NULL;
279 kfree(gq->skbs);
280 gq->skbs = NULL;
285 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
288 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
289 gq->ts_ring, gq->ring_dma);
290 gq->ts_ring = NULL;
295 struct rswitch_gwca_queue *gq,
300 gq->dir_tx = dir_tx;
301 gq->ring_size = ring_size;
302 gq->ndev = ndev;
304 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
305 if (!gq->skbs)
309 rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
311 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
313 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
315 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
317 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
320 if (!gq->rx_ring && !gq->tx_ring)
323 i = gq->index / 32;
324 bit = BIT(gq->index % 32);
333 rswitch_gwca_queue_free(ndev, gq);
351 struct rswitch_gwca_queue *gq)
353 int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
359 memset(gq->tx_ring, 0, ring_size);
360 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
361 if (!gq->dir_tx) {
363 gq->skbs[i]->data, PKT_BUF_SZ,
375 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
378 linkfix = &priv->gwca.linkfix_table[gq->index];
380 rswitch_desc_set_dptr(linkfix, gq->ring_dma);
382 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE,
383 priv->addr + GWDCC_OFFS(gq->index));
388 if (!gq->dir_tx) {
389 for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) {
402 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
407 index = (i + start_index) % gq->ring_size;
408 desc = &gq->ts_ring[index];
414 struct rswitch_gwca_queue *gq,
423 index = (i + start_index) % gq->ring_size;
424 desc = &gq->rx_ring[index];
425 if (!gq->dir_tx) {
427 gq->skbs[index]->data, PKT_BUF_SZ,
445 if (!gq->dir_tx) {
447 index = (i + start_index) % gq->ring_size;
448 desc = &gq->rx_ring[index];
460 struct rswitch_gwca_queue *gq)
462 int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
467 memset(gq->rx_ring, 0, ring_size);
468 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
472 desc = &gq->rx_ring[gq->ring_size]; /* Last */
473 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
476 linkfix = &priv->gwca.linkfix_table[gq->index];
478 rswitch_desc_set_dptr(linkfix, gq->ring_dma);
480 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) |
482 priv->addr + GWDCC_OFFS(gq->index));
516 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
519 gq->ring_size = TS_RING_SIZE;
520 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
522 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
524 if (!gq->ts_ring)
528 desc = &gq->ts_ring[gq->ring_size];
530 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
538 struct rswitch_gwca_queue *gq;
545 gq = &priv->gwca.queues[index];
546 memset(gq, 0, sizeof(*gq));
547 gq->index = index;
549 return gq;
553 struct rswitch_gwca_queue *gq)
555 clear_bit(gq->index, priv->gwca.used);
698 struct rswitch_gwca_queue *gq = rdev->rx_queue;
709 boguscnt = min_t(int, gq->ring_size, *quota);
712 desc = &gq->rx_ring[gq->cur];
716 skb = gq->skbs[gq->cur];
717 gq->skbs[gq->cur] = NULL;
737 gq->cur = rswitch_next_queue_index(gq, true, 1);
738 desc = &gq->rx_ring[gq->cur];
744 num = rswitch_get_num_cur_queues(gq);
745 ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
748 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
751 gq->dirty = rswitch_next_queue_index(gq, false, num);
766 struct rswitch_gwca_queue *gq = rdev->tx_queue;
773 for (; rswitch_get_num_cur_queues(gq) > 0;
774 gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
775 desc = &gq->tx_ring[gq->dirty];
781 skb = gq->skbs[gq->dirty];
786 dev_kfree_skb_any(gq->skbs[gq->dirty]);
787 gq->skbs[gq->dirty] = NULL;
852 struct rswitch_gwca_queue *gq;
856 gq = &priv->gwca.queues[i];
857 index = gq->index / 32;
858 bit = BIT(gq->index % 32);
862 rswitch_ack_data_irq(priv, gq->index);
863 rswitch_queue_interrupt(gq->ndev);
915 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
923 desc = &gq->ts_ring[gq->cur];
945 gq->cur = rswitch_next_queue_index(gq, true, 1);
946 desc = &gq->ts_ring[gq->cur];
949 num = rswitch_get_num_cur_queues(gq);
950 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
951 gq->dirty = rswitch_next_queue_index(gq, false, num);
1503 struct rswitch_gwca_queue *gq = rdev->tx_queue;
1508 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
1520 gq->skbs[gq->cur] = skb;
1521 desc = &gq->tx_ring[gq->cur];
1549 wmb(); /* gq->cur must be incremented after die_dt was set */
1551 gq->cur = rswitch_next_queue_index(gq, true, 1);
1552 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));