Lines Matching refs:rx_queue

110 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
118 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
140 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
153 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
259 if (likely(priv->rx_queue[i]->rxcoalescing))
260 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
271 if (unlikely(priv->rx_queue[0]->rxcoalescing))
272 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
289 rx_packets += priv->rx_queue[i]->stats.rx_packets;
290 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
291 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
424 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
426 if (!priv->rx_queue[i])
429 priv->rx_queue[i]->qindex = i;
430 priv->rx_queue[i]->ndev = priv->ndev;
448 kfree(priv->rx_queue[i]);
563 if (!grp->rx_queue)
564 grp->rx_queue = priv->rx_queue[i];
568 priv->rx_queue[i]->grp = grp;
1125 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1129 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1131 dev_kfree_skb(rx_queue->skb);
1133 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1134 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1143 dma_unmap_page(rx_queue->dev, rxb->dma,
1150 kfree(rx_queue->rx_buff);
1151 rx_queue->rx_buff = NULL;
1160 struct gfar_priv_rx_q *rx_queue = NULL;
1175 rx_queue = priv->rx_queue[i];
1176 if (rx_queue->rx_buff)
1177 free_skb_rx_queue(rx_queue);
1267 static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1269 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1272 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1276 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1283 i = rx_queue->next_to_use;
1284 bdp = &rx_queue->rx_bd_base[i];
1285 rxb = &rx_queue->rx_buff[i];
1290 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1291 gfar_rx_alloc_err(rx_queue);
1297 gfar_init_rxbdp(rx_queue, bdp,
1304 if (unlikely(++i == rx_queue->rx_ring_size)) {
1306 bdp = rx_queue->rx_bd_base;
1307 rxb = rx_queue->rx_buff;
1311 rx_queue->next_to_use = i;
1312 rx_queue->next_to_alloc = i;
1320 struct gfar_priv_rx_q *rx_queue = NULL;
1350 rx_queue = priv->rx_queue[i];
1352 rx_queue->next_to_clean = 0;
1353 rx_queue->next_to_use = 0;
1354 rx_queue->next_to_alloc = 0;
1359 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1361 rx_queue->rfbptr = rfbptr;
1374 struct gfar_priv_rx_q *rx_queue = NULL;
1382 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1406 rx_queue = priv->rx_queue[i];
1407 rx_queue->rx_bd_base = vaddr;
1408 rx_queue->rx_bd_dma_base = addr;
1409 rx_queue->ndev = ndev;
1410 rx_queue->dev = dev;
1411 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1412 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1430 rx_queue = priv->rx_queue[i];
1431 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
1432 sizeof(*rx_queue->rx_buff),
1434 if (!rx_queue->rx_buff)
1522 struct gfar_priv_rx_q *rx_queue = NULL;
1582 rx_queue = priv->rx_queue[i];
1583 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1584 gfar_write(rx_queue->rfbptr, bdp_dma);
2440 static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2443 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2452 gfar_rx_alloc_err(rx_queue);
2459 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2464 gfar_reuse_rx_page(rx_queue, rxb);
2467 dma_unmap_page(rx_queue->dev, rxb->dma,
2537 static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2540 struct net_device *ndev = rx_queue->ndev;
2544 struct sk_buff *skb = rx_queue->skb;
2545 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2549 i = rx_queue->next_to_clean;
2555 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2559 bdp = &rx_queue->rx_bd_base[i];
2570 rx_queue->stats.rx_dropped++;
2579 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2586 if (unlikely(++i == rx_queue->rx_ring_size))
2589 rx_queue->next_to_clean = i;
2601 rx_queue->stats.rx_dropped++;
2611 skb_record_rx_queue(skb, rx_queue->qindex);
2616 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2622 rx_queue->skb = skb;
2624 rx_queue->stats.rx_packets += total_pkts;
2625 rx_queue->stats.rx_bytes += total_bytes;
2628 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2632 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2634 gfar_write(rx_queue->rfbptr, bdp_dma);
2645 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2653 work_done = gfar_clean_rx_ring(rx_queue, budget);
2705 struct gfar_priv_rx_q *rx_queue = NULL;
2727 rx_queue = priv->rx_queue[i];
2729 gfar_clean_rx_ring(rx_queue, budget_per_q);
3405 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3406 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3407 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3468 i, priv->rx_queue[i]->rx_ring_size);
3548 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;