Lines Matching refs:rxq
129 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
817 struct mvneta_rx_queue *rxq,
824 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
830 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
836 struct mvneta_rx_queue *rxq)
840 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
848 struct mvneta_rx_queue *rxq,
856 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
876 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
882 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
884 int rx_desc = rxq->next_desc_to_proc;
886 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
887 prefetch(rxq->descs + rxq->next_desc_to_proc);
888 return rxq->descs + rx_desc;
906 struct mvneta_rx_queue *rxq,
911 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
916 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
961 /* Set rxq buf size */
963 struct mvneta_rx_queue *rxq,
968 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
973 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
978 struct mvneta_rx_queue *rxq)
982 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
984 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
989 struct mvneta_rx_queue *rxq)
993 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
995 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1000 struct mvneta_rx_queue *rxq)
1004 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1008 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1013 struct mvneta_rx_queue *rxq)
1017 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1021 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1229 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1231 if (rxq->descs)
1458 int rxq, txq;
1460 for (rxq = 0; rxq < rxq_number; rxq++)
1461 if ((rxq % max_cpu) == cpu)
1462 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1646 struct mvneta_rx_queue *rxq, u32 value)
1648 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1656 struct mvneta_rx_queue *rxq, u32 value)
1664 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1684 struct mvneta_rx_queue *rxq)
1689 i = rx_desc - rxq->descs;
1690 rxq->buf_virt_addr[i] = virt_addr;
1892 struct mvneta_rx_queue *rxq,
1898 page = page_pool_alloc_pages(rxq->page_pool,
1904 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1942 struct mvneta_rx_queue *rxq)
1946 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1948 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1953 mvneta_rxq_next_desc_get(rxq);
1965 for (i = 0; i < rxq->size; i++) {
1966 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1967 void *data = rxq->buf_virt_addr[i];
1971 page_pool_put_full_page(rxq->page_pool, data, false);
1973 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
1974 xdp_rxq_info_unreg(&rxq->xdp_rxq);
1975 page_pool_destroy(rxq->page_pool);
1976 rxq->page_pool = NULL;
1996 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
1999 int curr_desc = rxq->first_to_refill;
2002 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
2003 rx_desc = rxq->descs + curr_desc;
2005 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
2009 rxq->id, i, rxq->refill_num);
2018 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
2020 rxq->refill_num -= i;
2021 rxq->first_to_refill = curr_desc;
2027 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2034 page_pool_put_full_page(rxq->page_pool,
2036 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
2170 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2194 mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
2205 mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
2214 mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
2229 struct mvneta_rx_queue *rxq,
2248 dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2270 struct mvneta_rx_queue *rxq,
2286 dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2300 page_pool_put_full_page(rxq->page_pool, page, true);
2306 mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2317 page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data));
2329 page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
2338 struct mvneta_rx_queue *rxq)
2344 .rxq = &rxq->xdp_rxq,
2351 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2358 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2363 index = rx_desc - rxq->descs;
2364 page = (struct page *)rxq->buf_virt_addr[index];
2368 rxq->refill_num++;
2381 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2386 page_pool_put_full_page(rxq->page_pool, page,
2391 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2400 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2405 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
2408 skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
2412 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2433 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2442 refill = mvneta_rx_refill_queue(pp, rxq);
2444 /* Update rxq management counters */
2445 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2453 struct mvneta_rx_queue *rxq)
2461 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2470 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2578 /* Update rxq management counters */
2579 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
3201 struct mvneta_rx_queue *rxq, int size)
3216 rxq->page_pool = page_pool_create(&pp_params);
3217 if (IS_ERR(rxq->page_pool)) {
3218 err = PTR_ERR(rxq->page_pool);
3219 rxq->page_pool = NULL;
3223 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
3227 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
3228 rxq->page_pool);
3235 xdp_rxq_info_unreg(&rxq->xdp_rxq);
3237 page_pool_destroy(rxq->page_pool);
3238 rxq->page_pool = NULL;
3242 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
3243 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
3248 err = mvneta_create_page_pool(pp, rxq, num);
3253 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
3254 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
3257 "%s:rxq %d, %d of %d buffs filled\n",
3258 __func__, rxq->id, i, num);
3266 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
3293 struct mvneta_rx_queue *rxq)
3295 rxq->size = pp->rx_ring_size;
3298 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3299 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3300 &rxq->descs_phys, GFP_KERNEL);
3301 if (!rxq->descs)
3304 rxq->last_desc = rxq->size - 1;
3310 struct mvneta_rx_queue *rxq)
3313 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
3314 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
3317 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3318 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3322 mvneta_rxq_offset_set(pp, rxq, 0);
3323 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3326 mvneta_rxq_bm_disable(pp, rxq);
3327 mvneta_rxq_fill(pp, rxq, rxq->size);
3330 mvneta_rxq_offset_set(pp, rxq,
3333 mvneta_rxq_bm_enable(pp, rxq);
3335 mvneta_rxq_long_pool_set(pp, rxq);
3336 mvneta_rxq_short_pool_set(pp, rxq);
3337 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3343 struct mvneta_rx_queue *rxq)
3348 ret = mvneta_rxq_sw_init(pp, rxq);
3352 mvneta_rxq_hw_init(pp, rxq);
3359 struct mvneta_rx_queue *rxq)
3361 mvneta_rxq_drop_pkts(pp, rxq);
3363 if (rxq->descs)
3365 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3366 rxq->descs,
3367 rxq->descs_phys);
3369 rxq->descs = NULL;
3370 rxq->last_desc = 0;
3371 rxq->next_desc_to_proc = 0;
3372 rxq->descs_phys = 0;
3373 rxq->first_to_refill = 0;
3374 rxq->refill_num = 0;
3525 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
4140 /* Use the cpu associated to the rxq when it is online, in all
4150 int rxq;
4152 for (rxq = 0; rxq < rxq_number; rxq++)
4153 if ((rxq % max_cpu) == cpu)
4154 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
4490 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4491 rxq->time_coal = c->rx_coalesce_usecs;
4492 rxq->pkts_coal = c->rx_max_coalesced_frames;
4493 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
4494 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
4976 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4977 rxq->id = queue;
4978 rxq->size = pp->rx_ring_size;
4979 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4980 rxq->time_coal = MVNETA_RX_COAL_USEC;
4981 rxq->buf_virt_addr
4983 rxq->size,
4984 sizeof(*rxq->buf_virt_addr),
4986 if (!rxq->buf_virt_addr)
5357 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5359 mvneta_rxq_drop_pkts(pp, rxq);
5409 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5411 rxq->next_desc_to_proc = 0;
5412 mvneta_rxq_hw_init(pp, rxq);