Lines Matching defs:queue
182 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
185 index = macb_tx_ring_wrap(queue->bp, index);
186 index = macb_adj_dma_desc_idx(queue->bp, index);
187 return &queue->tx_ring[index];
190 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
193 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
196 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
200 offset = macb_tx_ring_wrap(queue->bp, index) *
201 macb_dma_desc_get_size(queue->bp);
203 return queue->tx_ring_dma + offset;
211 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
213 index = macb_rx_ring_wrap(queue->bp, index);
214 index = macb_adj_dma_desc_idx(queue->bp, index);
215 return &queue->rx_ring[index];
218 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
220 return queue->rx_buffers + queue->bp->rx_buffer_size *
221 macb_rx_ring_wrap(queue->bp, index);
439 struct macb_queue *queue;
442 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
443 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
446 queue_writel(queue, RBQPH,
447 upper_32_bits(queue->rx_ring_dma));
449 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
452 queue_writel(queue, TBQPH,
453 upper_32_bits(queue->tx_ring_dma));
599 struct macb_queue *queue;
604 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
605 queue_writel(queue, IDR,
623 struct macb_queue *queue;
660 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
661 queue_writel(queue, IER,
896 struct macb_queue *queue = container_of(work, struct macb_queue,
898 struct macb *bp = queue->bp;
906 (unsigned int)(queue - bp->queues),
907 queue->tx_tail, queue->tx_head);
909 /* Prevent the queue IRQ handlers from running: each of them may call
917 /* Make sure nobody is trying to queue up new packets */
928 /* Treat frames in TX queue including the ones that caused the error.
931 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
934 desc = macb_tx_desc(queue, tail);
936 tx_skb = macb_tx_skb(queue, tail);
944 tx_skb = macb_tx_skb(queue, tail);
956 queue->stats.tx_packets++;
958 queue->stats.tx_bytes += skb->len;
975 /* Set end of TX queue */
976 desc = macb_tx_desc(queue, 0);
983 /* Reinitialize the TX desc queue */
984 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
987 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
990 queue->tx_head = 0;
991 queue->tx_tail = 0;
995 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
1004 static void macb_tx_interrupt(struct macb_queue *queue)
1009 struct macb *bp = queue->bp;
1010 u16 queue_index = queue - bp->queues;
1016 queue_writel(queue, ISR, MACB_BIT(TCOMP));
1021 head = queue->tx_head;
1022 for (tail = queue->tx_tail; tail != head; tail++) {
1028 desc = macb_tx_desc(queue, tail);
1043 tx_skb = macb_tx_skb(queue, tail);
1050 gem_ptp_do_txstamp(queue, skb, desc) == 0) {
1060 queue->stats.tx_packets++;
1062 queue->stats.tx_bytes += skb->len;
1077 queue->tx_tail = tail;
1079 CIRC_CNT(queue->tx_head, queue->tx_tail,
1084 static void gem_rx_refill(struct macb_queue *queue)
1089 struct macb *bp = queue->bp;
1092 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
1094 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
1099 desc = macb_rx_desc(queue, entry);
1101 if (!queue->rx_skbuff[entry]) {
1119 queue->rx_skbuff[entry] = skb;
1137 queue->rx_prepared_head++;
1143 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1144 queue, queue->rx_prepared_head, queue->rx_tail);
1148 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
1154 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
1168 static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1171 struct macb *bp = queue->bp;
1183 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1184 desc = macb_rx_desc(queue, entry);
1200 queue->rx_tail++;
1207 queue->stats.rx_dropped++;
1210 skb = queue->rx_skbuff[entry];
1215 queue->stats.rx_dropped++;
1219 queue->rx_skbuff[entry] = NULL;
1236 queue->stats.rx_packets++;
1238 queue->stats.rx_bytes += skb->len;
1254 gem_rx_refill(queue);
1259 static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1267 struct macb *bp = queue->bp;
1269 desc = macb_rx_desc(queue, last_frag);
1288 desc = macb_rx_desc(queue, frag);
1316 macb_rx_buffer(queue, frag),
1319 desc = macb_rx_desc(queue, frag);
1341 static inline void macb_init_rx_ring(struct macb_queue *queue)
1343 struct macb *bp = queue->bp;
1348 addr = queue->rx_buffers_dma;
1350 desc = macb_rx_desc(queue, i);
1356 queue->rx_tail = 0;
1359 static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1362 struct macb *bp = queue->bp;
1368 for (tail = queue->rx_tail; budget > 0; tail++) {
1369 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1385 discard_partial_frame(queue, first_frag, tail);
1397 dropped = macb_rx_frame(queue, napi, first_frag, tail);
1414 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1421 macb_init_rx_ring(queue);
1422 queue_writel(queue, RBQP, queue->rx_ring_dma);
1431 queue->rx_tail = first_frag;
1433 queue->rx_tail = tail;
1440 struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
1441 struct macb *bp = queue->bp;
1451 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1466 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1469 queue_writel(queue, IER, bp->rx_intr_mask);
1481 queue_writel(queue, IDR, bp->rx_intr_mask);
1483 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1498 struct macb_queue *queue;
1502 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1503 queue_writel(queue, IDR, bp->rx_intr_mask |
1520 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1521 queue_writel(queue, IER,
1533 static void macb_tx_restart(struct macb_queue *queue)
1535 unsigned int head = queue->tx_head;
1536 unsigned int tail = queue->tx_tail;
1537 struct macb *bp = queue->bp;
1541 queue_writel(queue, ISR, MACB_BIT(TXUBR));
1546 tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
1558 struct macb_queue *queue = dev_id;
1559 struct macb *bp = queue->bp;
1562 status = queue_readl(queue, ISR);
1570 queue_writel(queue, IDR, MACB_BIT(WOL));
1572 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
1573 (unsigned int)(queue - bp->queues),
1576 queue_writel(queue, ISR, MACB_BIT(WOL));
1587 struct macb_queue *queue = dev_id;
1588 struct macb *bp = queue->bp;
1591 status = queue_readl(queue, ISR);
1599 queue_writel(queue, IDR, GEM_BIT(WOL));
1601 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
1602 (unsigned int)(queue - bp->queues),
1605 queue_writel(queue, ISR, GEM_BIT(WOL));
1616 struct macb_queue *queue = dev_id;
1617 struct macb *bp = queue->bp;
1621 status = queue_readl(queue, ISR);
1631 queue_writel(queue, IDR, -1);
1633 queue_writel(queue, ISR, -1);
1637 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1638 (unsigned int)(queue - bp->queues),
1648 queue_writel(queue, IDR, bp->rx_intr_mask);
1650 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1652 if (napi_schedule_prep(&queue->napi)) {
1654 __napi_schedule(&queue->napi);
1659 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1660 schedule_work(&queue->tx_error_task);
1663 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1669 macb_tx_interrupt(queue);
1672 macb_tx_restart(queue);
1692 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1703 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1711 queue_writel(queue, ISR, MACB_BIT(HRESP));
1713 status = queue_readl(queue, ISR);
1728 struct macb_queue *queue;
1733 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1734 macb_interrupt(dev->irq, queue);
1740 struct macb_queue *queue,
1745 unsigned int len, entry, i, tx_head = queue->tx_head;
1772 tx_skb = &queue->tx_skb[entry];
1803 tx_skb = &queue->tx_skb[entry];
1837 * to set the end of TX queue
1842 desc = macb_tx_desc(queue, entry);
1863 tx_skb = &queue->tx_skb[entry];
1864 desc = macb_tx_desc(queue, entry);
1875 if (i == queue->tx_head) {
1894 } while (i != queue->tx_head);
1896 queue->tx_head = tx_head;
1903 for (i = queue->tx_head; i != tx_head; i++) {
1904 tx_skb = macb_tx_skb(queue, i);
2020 struct macb_queue *queue = &bp->queues[queue_index];
2042 /* only queue eth + ip headers separately for UDP */
2056 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
2081 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
2086 queue->tx_head, queue->tx_tail);
2091 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
2102 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
2135 struct macb_queue *queue;
2140 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2141 if (!queue->rx_skbuff)
2145 skb = queue->rx_skbuff[i];
2150 desc = macb_rx_desc(queue, i);
2159 kfree(queue->rx_skbuff);
2160 queue->rx_skbuff = NULL;
2166 struct macb_queue *queue = &bp->queues[0];
2168 if (queue->rx_buffers) {
2171 queue->rx_buffers, queue->rx_buffers_dma);
2172 queue->rx_buffers = NULL;
2178 struct macb_queue *queue;
2184 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2185 kfree(queue->tx_skb);
2186 queue->tx_skb = NULL;
2187 if (queue->tx_ring) {
2190 queue->tx_ring, queue->tx_ring_dma);
2191 queue->tx_ring = NULL;
2193 if (queue->rx_ring) {
2196 queue->rx_ring, queue->rx_ring_dma);
2197 queue->rx_ring = NULL;
2204 struct macb_queue *queue;
2208 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2210 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
2211 if (!queue->rx_skbuff)
2216 bp->rx_ring_size, queue->rx_skbuff);
2223 struct macb_queue *queue = &bp->queues[0];
2227 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2228 &queue->rx_buffers_dma, GFP_KERNEL);
2229 if (!queue->rx_buffers)
2234 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
2240 struct macb_queue *queue;
2244 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2246 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2247 &queue->tx_ring_dma,
2249 if (!queue->tx_ring)
2252 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2253 q, size, (unsigned long)queue->tx_ring_dma,
2254 queue->tx_ring);
2257 queue->tx_skb = kmalloc(size, GFP_KERNEL);
2258 if (!queue->tx_skb)
2262 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2263 &queue->rx_ring_dma, GFP_KERNEL);
2264 if (!queue->rx_ring)
2268 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
2282 struct macb_queue *queue;
2287 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2289 desc = macb_tx_desc(queue, i);
2294 queue->tx_head = 0;
2295 queue->tx_tail = 0;
2297 queue->rx_tail = 0;
2298 queue->rx_prepared_head = 0;
2300 gem_rx_refill(queue);
2324 struct macb_queue *queue;
2343 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2344 queue_writel(queue, IDR, -1);
2345 queue_readl(queue, ISR);
2347 queue_writel(queue, ISR, -1);
2422 struct macb_queue *queue;
2430 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2432 queue_writel(queue, RBQS, buffer_size);
2620 struct macb_queue *queue;
2640 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2641 napi_enable(&queue->napi);
2658 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2659 napi_disable(&queue->napi);
2669 struct macb_queue *queue;
2675 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2676 napi_disable(&queue->napi);
2708 struct macb_queue *queue;
2730 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2731 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2803 struct macb_queue *queue;
2813 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3235 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3288 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3601 /* bit 0 is never set but queue 0 always exists */
3704 struct macb_queue *queue;
3711 /* set the queue register mapping once for all: queue0 has a special
3712 * register mapping but we don't want to test the queue index then
3719 queue = &bp->queues[q];
3720 queue->bp = bp;
3721 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
3723 queue->ISR = GEM_ISR(hw_q - 1);
3724 queue->IER = GEM_IER(hw_q - 1);
3725 queue->IDR = GEM_IDR(hw_q - 1);
3726 queue->IMR = GEM_IMR(hw_q - 1);
3727 queue->TBQP = GEM_TBQP(hw_q - 1);
3728 queue->RBQP = GEM_RBQP(hw_q - 1);
3729 queue->RBQS = GEM_RBQS(hw_q - 1);
3732 queue->TBQPH = GEM_TBQPH(hw_q - 1);
3733 queue->RBQPH = GEM_RBQPH(hw_q - 1);
3738 queue->ISR = MACB_ISR;
3739 queue->IER = MACB_IER;
3740 queue->IDR = MACB_IDR;
3741 queue->IMR = MACB_IMR;
3742 queue->TBQP = MACB_TBQP;
3743 queue->RBQP = MACB_RBQP;
3746 queue->TBQPH = MACB_TBQPH;
3747 queue->RBQPH = MACB_RBQPH;
3752 /* get irq: here we use the linux queue index, not the hardware
3753 * queue index. the queue irq definitions in the device tree
3755 * hardware queue mask.
3757 queue->irq = platform_get_irq(pdev, q);
3758 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
3759 IRQF_SHARED, dev->name, queue);
3763 queue->irq, err);
3767 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
4700 struct macb_queue *queue = bp->queues;
4713 for (q = 0, queue = bp->queues; q < bp->num_queues;
4714 ++q, ++queue) {
4716 queue_writel(queue, IDR, -1);
4717 queue_readl(queue, ISR);
4719 queue_writel(queue, ISR, -1);
4722 * Enable WoL IRQ on queue 0
4756 for (q = 0, queue = bp->queues; q < bp->num_queues;
4757 ++q, ++queue)
4758 napi_disable(&queue->napi);
4787 struct macb_queue *queue = bp->queues;
4808 /* Clear ISR on queue 0 */
4812 /* Replace interrupt handler on queue 0 */
4835 for (q = 0, queue = bp->queues; q < bp->num_queues;
4836 ++q, ++queue)
4837 napi_enable(&queue->napi);