/kernel/linux/linux-5.10/drivers/net/ethernet/google/gve/ |
H A D | gve_tx.c | 31 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP, in gve_tx_fifo_init() 35 fifo->qpl->id); in gve_tx_fifo_init() 39 fifo->size = fifo->qpl->num_entries * PAGE_SIZE; in gve_tx_fifo_init() 162 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_free_ring() 163 tx->tx_fifo.qpl = NULL; in gve_tx_free_ring() 209 tx->tx_fifo.qpl = gve_assign_tx_qpl(priv); in gve_tx_alloc_ring() 210 if (!tx->tx_fifo.qpl) in gve_tx_alloc_ring() 235 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_alloc_ring() 454 gve_dma_sync_for_device(dev, tx->tx_fifo.qpl in gve_tx_add_skb() [all...] |
H A D | gve_main.c | 624 struct gve_queue_page_list *qpl = &priv->qpls[id]; in gve_alloc_queue_page_list() local 636 qpl->id = id; in gve_alloc_queue_page_list() 637 qpl->num_entries = 0; in gve_alloc_queue_page_list() 638 qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL); in gve_alloc_queue_page_list() 640 if (!qpl->pages) in gve_alloc_queue_page_list() 642 qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses), in gve_alloc_queue_page_list() 645 if (!qpl->page_buses) in gve_alloc_queue_page_list() 649 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl in gve_alloc_queue_page_list() 674 struct gve_queue_page_list *qpl = &priv->qpls[id]; gve_free_queue_page_list() local [all...] |
H A D | gve_adminq.c | 333 .queue_page_list_id = cpu_to_be32(tx->tx_fifo.qpl->id), in gve_adminq_create_tx_queue() 374 .queue_page_list_id = cpu_to_be32(rx->data.qpl->id), in gve_adminq_create_rx_queue() 531 struct gve_queue_page_list *qpl) in gve_adminq_register_page_list() 534 u32 num_entries = qpl->num_entries; in gve_adminq_register_page_list() 535 u32 size = num_entries * sizeof(qpl->page_buses[0]); in gve_adminq_register_page_list() 548 page_list[i] = cpu_to_be64(qpl->page_buses[i]); in gve_adminq_register_page_list() 552 .page_list_id = cpu_to_be32(qpl->id), in gve_adminq_register_page_list() 530 gve_adminq_register_page_list(struct gve_priv *priv, struct gve_queue_page_list *qpl) gve_adminq_register_page_list() argument
|
H A D | gve_rx.c | 36 gve_unassign_qpl(priv, rx->data.qpl->id); in gve_rx_free_ring() 37 rx->data.qpl = NULL; in gve_rx_free_ring() 74 rx->data.qpl = gve_assign_rx_qpl(priv); in gve_prefill_rx_pages() 77 struct page *page = rx->data.qpl->pages[i]; in gve_prefill_rx_pages() 302 dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx], in gve_rx()
|
H A D | gve.h | 70 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ member 124 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */ member 176 /* Tracks the available and used qpl IDs */ 179 unsigned long *qpl_id_map; /* bitmap of used qpl ids */ 451 /* Returns a pointer to the next available tx qpl in the list of qpls 467 /* Returns a pointer to the next available rx qpl in the list of qpls 484 /* Unassigns the qpl with the given id
|
H A D | gve_adminq.h | 262 struct gve_queue_page_list *qpl);
|
/kernel/linux/linux-6.6/drivers/net/ethernet/google/gve/ |
H A D | gve_rx_dqo.c | 146 if (rx->dqo.qpl) in gve_get_recycled_buf_state() 170 if (!rx->dqo.qpl) { in gve_alloc_page_dqo() 186 buf_state->page_info.page = rx->dqo.qpl->pages[idx]; in gve_alloc_page_dqo() 187 buf_state->addr = rx->dqo.qpl->page_buses[idx]; in gve_alloc_page_dqo() 226 gve_free_page_dqo(priv, bs, !rx->dqo.qpl); in gve_rx_free_ring_dqo() 228 if (rx->dqo.qpl) { in gve_rx_free_ring_dqo() 229 gve_unassign_qpl(priv, rx->dqo.qpl->id); in gve_rx_free_ring_dqo() 230 rx->dqo.qpl = NULL; in gve_rx_free_ring_dqo() 312 rx->dqo.qpl = gve_assign_rx_qpl(priv, rx->q_num); in gve_rx_alloc_ring_dqo() 313 if (!rx->dqo.qpl) in gve_rx_alloc_ring_dqo() [all...] |
H A D | gve_tx_dqo.c | 21 if (!tx->dqo.qpl) in gve_has_free_tx_qpl_bufs() 225 if (tx->dqo.qpl) { in gve_tx_free_ring_dqo() 226 gve_unassign_qpl(priv, tx->dqo.qpl->id); in gve_tx_free_ring_dqo() 227 tx->dqo.qpl = NULL; in gve_tx_free_ring_dqo() 236 tx->dqo.qpl->num_entries; in gve_tx_qpl_buf_init() 331 tx->dqo.qpl = gve_assign_tx_qpl(priv, idx); in gve_tx_alloc_ring_dqo() 332 if (!tx->dqo.qpl) in gve_tx_alloc_ring_dqo() 663 *va = page_address(tx->dqo.qpl->pages[page_id]) + offset; in gve_tx_buf_get_addr() 664 *dma_addr = tx->dqo.qpl->page_buses[page_id] + offset; in gve_tx_buf_get_addr() 748 if (tx->dqo.qpl) { in gve_tx_add_skb_dqo() [all...] |
H A D | gve_adminq.c | 523 GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id; in gve_adminq_create_tx_queue() 535 qpl_id = tx->dqo.qpl->id; in gve_adminq_create_tx_queue() 579 GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id; in gve_adminq_create_rx_queue() 597 qpl_id = rx->dqo.qpl->id; in gve_adminq_create_rx_queue() 752 /* Override pages for qpl for DQO-QPL */ in gve_enable_supported_features() 877 struct gve_queue_page_list *qpl) in gve_adminq_register_page_list() 880 u32 num_entries = qpl->num_entries; in gve_adminq_register_page_list() 881 u32 size = num_entries * sizeof(qpl->page_buses[0]); in gve_adminq_register_page_list() 894 page_list[i] = cpu_to_be64(qpl->page_buses[i]); in gve_adminq_register_page_list() 898 .page_list_id = cpu_to_be32(qpl in gve_adminq_register_page_list() 876 gve_adminq_register_page_list(struct gve_priv *priv, struct gve_queue_page_list *qpl) gve_adminq_register_page_list() argument [all...] |
H A D | gve_tx.c | 41 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP, in gve_tx_fifo_init() 45 fifo->qpl->id); in gve_tx_fifo_init() 49 fifo->size = fifo->qpl->num_entries * PAGE_SIZE; in gve_tx_fifo_init() 221 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_free_ring() 222 tx->tx_fifo.qpl = NULL; in gve_tx_free_ring() 264 tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx); in gve_tx_alloc_ring() 265 if (!tx->tx_fifo.qpl) in gve_tx_alloc_ring() 293 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_alloc_ring() 550 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl in gve_tx_add_skb_copy() [all...] |
H A D | gve_main.c | 998 struct gve_queue_page_list *qpl = &priv->qpls[id]; in gve_alloc_queue_page_list() local 1010 qpl->id = id; in gve_alloc_queue_page_list() 1011 qpl->num_entries = 0; in gve_alloc_queue_page_list() 1012 qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL); in gve_alloc_queue_page_list() 1014 if (!qpl->pages) in gve_alloc_queue_page_list() 1016 qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL); in gve_alloc_queue_page_list() 1018 if (!qpl->page_buses) in gve_alloc_queue_page_list() 1022 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl in gve_alloc_queue_page_list() 1046 struct gve_queue_page_list *qpl = &priv->qpls[id]; gve_free_queue_page_list() local [all...] |
H A D | gve_rx.c | 39 gve_unassign_qpl(priv, rx->data.qpl->id); in gve_rx_unfill_pages() 40 rx->data.qpl = NULL; in gve_rx_unfill_pages() 130 rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num); in gve_prefill_rx_pages() 131 if (!rx->data.qpl) { in gve_prefill_rx_pages() 139 struct page *page = rx->data.qpl->pages[i]; in gve_prefill_rx_pages() 188 gve_unassign_qpl(priv, rx->data.qpl->id); in gve_prefill_rx_pages() 189 rx->data.qpl = NULL; in gve_prefill_rx_pages() 765 rx->data.qpl->page_buses[idx]; in gve_rx()
|
H A D | gve.h | 69 * allocs and uses a non-qpl page on the receive path of DQO QPL to free 107 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ member 242 /* qpl assigned to this queue */ 243 struct gve_queue_page_list *qpl; member 327 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */ member 539 /* qpl assigned to this queue */ 540 struct gve_queue_page_list *qpl; member 596 /* Tracks the available and used qpl IDs */ 599 unsigned long *qpl_id_map; /* bitmap of used qpl id [all...] |
H A D | gve_adminq.h | 428 struct gve_queue_page_list *qpl);
|