Lines Matching defs:ring
363 /* RDMA/TDMA ring registers and accessors
427 unsigned int ring,
431 (DMA_RING_SIZE * ring) +
436 unsigned int ring, u32 val,
440 (DMA_RING_SIZE * ring) +
445 unsigned int ring,
449 (DMA_RING_SIZE * ring) +
454 unsigned int ring, u32 val,
458 (DMA_RING_SIZE * ring) +
690 * We'll map them to ring 0, but don't enable the filter
834 struct bcmgenet_rx_ring *ring;
847 ring = &priv->rx_rings[i];
848 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
850 ring = &priv->rx_rings[DESC_INDEX];
851 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
856 static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
859 struct bcmgenet_priv *priv = ring->priv;
860 unsigned int i = ring->index;
871 static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
877 ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
878 ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
879 usecs = ring->rx_coalesce_usecs;
880 pkts = ring->rx_max_coalesced_frames;
882 if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
883 moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
888 ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
889 bcmgenet_set_rx_coalesce(ring, usecs, pkts);
913 * transmitted, or when the ring is empty.
1678 struct bcmgenet_tx_ring *ring)
1682 tx_cb_ptr = ring->cbs;
1683 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1686 if (ring->write_ptr == ring->end_ptr)
1687 ring->write_ptr = ring->cb_ptr;
1689 ring->write_ptr++;
1695 struct bcmgenet_tx_ring *ring)
1699 tx_cb_ptr = ring->cbs;
1700 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1703 if (ring->write_ptr == ring->cb_ptr)
1704 ring->write_ptr = ring->end_ptr;
1706 ring->write_ptr--;
1711 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1713 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1717 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1719 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1723 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1725 bcmgenet_intrl2_1_writel(ring->priv,
1726 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1730 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1732 bcmgenet_intrl2_1_writel(ring->priv,
1733 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1737 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1739 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1743 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1745 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1749 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1751 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1755 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1757 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1818 struct bcmgenet_tx_ring *ring)
1829 if (ring->index == DESC_INDEX)
1833 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
1837 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
1839 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
1842 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1843 __func__, ring->index, ring->c_index, c_index, txbds_ready);
1848 &priv->tx_cbs[ring->clean_ptr]);
1856 if (likely(ring->clean_ptr < ring->end_ptr))
1857 ring->clean_ptr++;
1859 ring->clean_ptr = ring->cb_ptr;
1862 ring->free_bds += txbds_processed;
1863 ring->c_index = c_index;
1865 ring->packets += pkts_compl;
1866 ring->bytes += bytes_compl;
1868 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1875 struct bcmgenet_tx_ring *ring)
1879 spin_lock_bh(&ring->lock);
1880 released = __bcmgenet_tx_reclaim(dev, ring);
1881 spin_unlock_bh(&ring->lock);
1888 struct bcmgenet_tx_ring *ring =
1893 spin_lock(&ring->lock);
1894 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
1895 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1896 txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
1899 spin_unlock(&ring->lock);
1903 ring->int_enable(ring);
1996 struct bcmgenet_tx_ring *ring = NULL;
2010 * queue_mapping = 1, goes to ring 0. (highest priority queue
2011 * queue_mapping = 2, goes to ring 1.
2012 * queue_mapping = 3, goes to ring 2.
2013 * queue_mapping = 4, goes to ring 3.
2020 ring = &priv->tx_rings[index];
2021 txq = netdev_get_tx_queue(dev, ring->queue);
2025 spin_lock(&ring->lock);
2026 if (ring->free_bds <= (nr_frags + 1)) {
2030 "%s: tx ring %d full when queue %d awake\n",
2031 __func__, index, ring->queue);
2050 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
2105 ring->free_bds -= nr_frags + 1;
2106 ring->prod_index += nr_frags + 1;
2107 ring->prod_index &= DMA_P_INDEX_MASK;
2111 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
2116 bcmgenet_tdma_ring_writel(priv, ring->index,
2117 ring->prod_index, TDMA_PROD_INDEX);
2119 spin_unlock(&ring->lock);
2125 bcmgenet_put_txcb(priv, ring);
2129 tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
2166 /* Grab the current Rx skb from the ring and DMA-unmap it */
2169 /* Put the new Rx skb on the ring */
2182 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
2185 struct bcmgenet_priv *priv = ring->priv;
2198 if (ring->index == DESC_INDEX) {
2202 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
2208 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
2212 if (discards > ring->old_discards) {
2213 discards = discards - ring->old_discards;
2214 ring->errors += discards;
2215 ring->old_discards += discards;
2218 if (ring->old_discards >= 0xC000) {
2219 ring->old_discards = 0;
2220 bcmgenet_rdma_ring_writel(priv, ring->index, 0,
2226 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
2236 cb = &priv->rx_cbs[ring->read_ptr];
2240 ring->dropped++;
2262 __func__, p_index, ring->c_index,
2263 ring->read_ptr, dma_length_status);
2276 ring->errors++;
2317 ring->packets++;
2318 ring->bytes += len;
2323 napi_gro_receive(&ring->napi, skb);
2328 if (likely(ring->read_ptr < ring->end_ptr))
2329 ring->read_ptr++;
2331 ring->read_ptr = ring->cb_ptr;
2333 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
2334 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
2337 ring->dim.bytes = bytes_processed;
2338 ring->dim.packets = rxpktprocessed;
2346 struct bcmgenet_rx_ring *ring = container_of(napi,
2351 work_done = bcmgenet_desc_rx(ring, budget);
2355 ring->int_enable(ring);
2358 if (ring->dim.use_dim) {
2359 dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
2360 ring->dim.bytes, &dim_sample);
2361 net_dim(&ring->dim.dim, dim_sample);
2372 struct bcmgenet_rx_ring *ring =
2377 bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
2383 struct bcmgenet_rx_ring *ring)
2392 for (i = 0; i < ring->size; i++) {
2393 cb = ring->cbs + i;
2546 static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
2549 struct bcmgenet_net_dim *dim = &ring->dim;
2558 static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
2560 struct bcmgenet_net_dim *dim = &ring->dim;
2564 usecs = ring->rx_coalesce_usecs;
2565 pkts = ring->rx_max_coalesced_frames;
2574 bcmgenet_set_rx_coalesce(ring, usecs, pkts);
2577 /* Initialize a Tx ring along with corresponding hardware registers */
2582 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2586 spin_lock_init(&ring->lock);
2587 ring->priv = priv;
2588 ring->index = index;
2590 ring->queue = 0;
2591 ring->int_enable = bcmgenet_tx_ring16_int_enable;
2592 ring->int_disable = bcmgenet_tx_ring16_int_disable;
2594 ring->queue = index + 1;
2595 ring->int_enable = bcmgenet_tx_ring_int_enable;
2596 ring->int_disable = bcmgenet_tx_ring_int_disable;
2598 ring->cbs = priv->tx_cbs + start_ptr;
2599 ring->size = size;
2600 ring->clean_ptr = start_ptr;
2601 ring->c_index = 0;
2602 ring->free_bds = size;
2603 ring->write_ptr = start_ptr;
2604 ring->cb_ptr = start_ptr;
2605 ring->end_ptr = end_ptr - 1;
2606 ring->prod_index = 0;
2608 /* Set flow period for ring != 16 */
2633 netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
2637 /* Initialize a RDMA ring */
2642 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
2646 ring->priv = priv;
2647 ring->index = index;
2649 ring->int_enable = bcmgenet_rx_ring16_int_enable;
2650 ring->int_disable = bcmgenet_rx_ring16_int_disable;
2652 ring->int_enable = bcmgenet_rx_ring_int_enable;
2653 ring->int_disable = bcmgenet_rx_ring_int_disable;
2655 ring->cbs = priv->rx_cbs + start_ptr;
2656 ring->size = size;
2657 ring->c_index = 0;
2658 ring->read_ptr = start_ptr;
2659 ring->cb_ptr = start_ptr;
2660 ring->end_ptr = end_ptr - 1;
2662 ret = bcmgenet_alloc_rx_buffers(priv, ring);
2666 bcmgenet_init_dim(ring, bcmgenet_dim_work);
2667 bcmgenet_init_rx_coalesce(ring);
2670 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
2699 struct bcmgenet_tx_ring *ring;
2702 ring = &priv->tx_rings[i];
2703 napi_enable(&ring->napi);
2704 ring->int_enable(ring);
2707 ring = &priv->tx_rings[DESC_INDEX];
2708 napi_enable(&ring->napi);
2709 ring->int_enable(ring);
2715 struct bcmgenet_tx_ring *ring;
2718 ring = &priv->tx_rings[i];
2719 napi_disable(&ring->napi);
2722 ring = &priv->tx_rings[DESC_INDEX];
2723 napi_disable(&ring->napi);
2729 struct bcmgenet_tx_ring *ring;
2732 ring = &priv->tx_rings[i];
2733 netif_napi_del(&ring->napi);
2736 ring = &priv->tx_rings[DESC_INDEX];
2737 netif_napi_del(&ring->napi);
2812 struct bcmgenet_rx_ring *ring;
2815 ring = &priv->rx_rings[i];
2816 napi_enable(&ring->napi);
2817 ring->int_enable(ring);
2820 ring = &priv->rx_rings[DESC_INDEX];
2821 napi_enable(&ring->napi);
2822 ring->int_enable(ring);
2828 struct bcmgenet_rx_ring *ring;
2831 ring = &priv->rx_rings[i];
2832 napi_disable(&ring->napi);
2833 cancel_work_sync(&ring->dim.dim.work);
2836 ring = &priv->rx_rings[DESC_INDEX];
2837 napi_disable(&ring->napi);
2838 cancel_work_sync(&ring->dim.dim.work);
2844 struct bcmgenet_rx_ring *ring;
2847 ring = &priv->rx_rings[i];
2848 netif_napi_del(&ring->napi);
2851 ring = &priv->rx_rings[DESC_INDEX];
2852 netif_napi_del(&ring->napi);
2907 /* Configure ring as descriptor ring and re-enable DMA if enabled */
3016 /* Initialize common Rx ring structures */
3029 /* Initialize common TX ring structures */
3353 /* Always enable ring 16 - descriptor ring */
3457 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
3459 struct bcmgenet_priv *priv = ring->priv;
3468 txq = netdev_get_tx_queue(priv->dev, ring->queue);
3470 spin_lock(&ring->lock);
3471 if (ring->index == DESC_INDEX) {
3476 intmsk = 1 << ring->index;
3478 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
3479 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
3481 free_bds = ring->free_bds;
3482 spin_unlock(&ring->lock);
3491 ring->index, ring->queue,
3494 free_bds, ring->size,
3495 ring->prod_index, p_index & DMA_P_INDEX_MASK,
3496 ring->c_index, c_index & DMA_C_INDEX_MASK,
3497 ring->clean_ptr, ring->write_ptr,
3498 ring->cb_ptr, ring->end_ptr);
4078 * just the ring 16 descriptor based TX
4211 /* Always enable ring 16 - descriptor ring */