Lines Matching refs:rxq

109 #define      MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
134 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
857 struct mvneta_rx_queue *rxq,
864 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
870 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
876 struct mvneta_rx_queue *rxq)
880 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
888 struct mvneta_rx_queue *rxq,
896 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
916 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
922 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
924 int rx_desc = rxq->next_desc_to_proc;
926 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
927 prefetch(rxq->descs + rxq->next_desc_to_proc);
928 return rxq->descs + rx_desc;
946 struct mvneta_rx_queue *rxq,
951 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
956 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1001 /* Set rxq buf size */
1003 struct mvneta_rx_queue *rxq,
1008 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
1013 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
1018 struct mvneta_rx_queue *rxq)
1022 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1024 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1029 struct mvneta_rx_queue *rxq)
1033 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1035 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1040 struct mvneta_rx_queue *rxq)
1044 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1048 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1053 struct mvneta_rx_queue *rxq)
1057 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1061 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1269 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1271 if (rxq->descs)
1498 int rxq, txq;
1500 for (rxq = 0; rxq < rxq_number; rxq++)
1501 if ((rxq % max_cpu) == cpu)
1502 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1686 struct mvneta_rx_queue *rxq, u32 value)
1688 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1696 struct mvneta_rx_queue *rxq, u32 value)
1704 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1724 struct mvneta_rx_queue *rxq)
1729 i = rx_desc - rxq->descs;
1730 rxq->buf_virt_addr[i] = virt_addr;
1937 struct mvneta_rx_queue *rxq,
1943 page = page_pool_alloc_pages(rxq->page_pool,
1949 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1987 struct mvneta_rx_queue *rxq)
1991 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1993 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1998 mvneta_rxq_next_desc_get(rxq);
2010 for (i = 0; i < rxq->size; i++) {
2011 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
2012 void *data = rxq->buf_virt_addr[i];
2016 page_pool_put_full_page(rxq->page_pool, data, false);
2018 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
2019 xdp_rxq_info_unreg(&rxq->xdp_rxq);
2020 page_pool_destroy(rxq->page_pool);
2021 rxq->page_pool = NULL;
2041 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
2044 int curr_desc = rxq->first_to_refill;
2047 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
2048 rx_desc = rxq->descs + curr_desc;
2050 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
2054 rxq->id, i, rxq->refill_num);
2063 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
2065 rxq->refill_num -= i;
2066 rxq->first_to_refill = curr_desc;
2072 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2082 page_pool_put_full_page(rxq->page_pool,
2086 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
2257 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2281 mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2292 mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2301 mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2316 struct mvneta_rx_queue *rxq,
2334 dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2351 struct mvneta_rx_queue *rxq,
2367 dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2389 page_pool_put_full_page(rxq->page_pool, page, true);
2427 struct mvneta_rx_queue *rxq)
2436 xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
2440 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2446 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2451 index = rx_desc - rxq->descs;
2452 page = (struct page *)rxq->buf_virt_addr[index];
2456 rxq->refill_num++;
2469 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2474 page_pool_put_full_page(rxq->page_pool, page,
2479 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2488 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2493 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
2496 skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
2500 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2520 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2529 refill = mvneta_rx_refill_queue(pp, rxq);
2531 /* Update rxq management counters */
2532 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2540 struct mvneta_rx_queue *rxq)
2548 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2557 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2664 /* Update rxq management counters */
2665 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
3347 struct mvneta_rx_queue *rxq, int size)
3362 rxq->page_pool = page_pool_create(&pp_params);
3363 if (IS_ERR(rxq->page_pool)) {
3364 err = PTR_ERR(rxq->page_pool);
3365 rxq->page_pool = NULL;
3369 err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
3374 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
3375 rxq->page_pool);
3382 xdp_rxq_info_unreg(&rxq->xdp_rxq);
3384 page_pool_destroy(rxq->page_pool);
3385 rxq->page_pool = NULL;
3389 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
3390 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
3395 err = mvneta_create_page_pool(pp, rxq, num);
3400 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
3401 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
3404 "%s:rxq %d, %d of %d buffs filled\n",
3405 __func__, rxq->id, i, num);
3413 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
3440 struct mvneta_rx_queue *rxq)
3442 rxq->size = pp->rx_ring_size;
3445 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3446 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3447 &rxq->descs_phys, GFP_KERNEL);
3448 if (!rxq->descs)
3451 rxq->last_desc = rxq->size - 1;
3457 struct mvneta_rx_queue *rxq)
3460 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
3461 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
3464 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3465 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3469 mvneta_rxq_offset_set(pp, rxq, 0);
3470 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3473 mvneta_rxq_bm_disable(pp, rxq);
3474 mvneta_rxq_fill(pp, rxq, rxq->size);
3477 mvneta_rxq_offset_set(pp, rxq,
3480 mvneta_rxq_bm_enable(pp, rxq);
3482 mvneta_rxq_long_pool_set(pp, rxq);
3483 mvneta_rxq_short_pool_set(pp, rxq);
3484 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3490 struct mvneta_rx_queue *rxq)
3495 ret = mvneta_rxq_sw_init(pp, rxq);
3499 mvneta_rxq_hw_init(pp, rxq);
3506 struct mvneta_rx_queue *rxq)
3508 mvneta_rxq_drop_pkts(pp, rxq);
3510 if (rxq->descs)
3512 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3513 rxq->descs,
3514 rxq->descs_phys);
3516 rxq->descs = NULL;
3517 rxq->last_desc = 0;
3518 rxq->next_desc_to_proc = 0;
3519 rxq->descs_phys = 0;
3520 rxq->first_to_refill = 0;
3521 rxq->refill_num = 0;
3668 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
4333 /* Use the cpu associated to the rxq when it is online, in all
4343 int rxq;
4345 for (rxq = 0; rxq < rxq_number; rxq++)
4346 if ((rxq % max_cpu) == cpu)
4347 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
4683 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4684 rxq->time_coal = c->rx_coalesce_usecs;
4685 rxq->pkts_coal = c->rx_max_coalesced_frames;
4686 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
4687 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
5144 static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
5149 val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
5221 int rxq, txq, tc, ret;
5246 for (rxq = mqprio->qopt.offset[tc];
5247 rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
5248 rxq++) {
5249 if (rxq >= rxq_number)
5252 mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
5368 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5369 rxq->id = queue;
5370 rxq->size = pp->rx_ring_size;
5371 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
5372 rxq->time_coal = MVNETA_RX_COAL_USEC;
5373 rxq->buf_virt_addr
5375 rxq->size,
5376 sizeof(*rxq->buf_virt_addr),
5378 if (!rxq->buf_virt_addr)
5789 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5791 mvneta_rxq_drop_pkts(pp, rxq);
5841 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5843 rxq->next_desc_to_proc = 0;
5844 mvneta_rxq_hw_init(pp, rxq);