Lines Matching refs:rx

891 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
893 struct device *dmadev = rx->adapter->dmadev;
898 entry = &rx->entry[i];
899 if (!rx->xsk_pool && entry->page)
900 page_pool_put_full_page(rx->page_pool, entry->page,
902 if (rx->xsk_pool && entry->xdp)
908 if (rx->page_pool)
909 page_pool_destroy(rx->page_pool);
911 memset(rx->entry, 0, sizeof(rx->entry));
914 if (rx->page[i]) {
915 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
916 rx->page_dma[i]);
917 rx->page[i] = NULL;
918 rx->page_dma[i] = 0;
923 static int tsnep_rx_ring_create(struct tsnep_rx *rx)
925 struct device *dmadev = rx->adapter->dmadev;
933 rx->page[i] =
934 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
936 if (!rx->page[i]) {
941 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
943 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
946 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
958 rx->page_pool = page_pool_create(&pp_params);
959 if (IS_ERR(rx->page_pool)) {
960 retval = PTR_ERR(rx->page_pool);
961 rx->page_pool = NULL;
966 entry = &rx->entry[i];
967 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK];
974 tsnep_rx_ring_cleanup(rx);
978 static void tsnep_rx_init(struct tsnep_rx *rx)
982 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
983 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
984 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
985 rx->write = 0;
986 rx->read = 0;
987 rx->owner_counter = 1;
988 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
991 static void tsnep_rx_enable(struct tsnep_rx *rx)
996 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
999 static void tsnep_rx_disable(struct tsnep_rx *rx)
1003 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
1004 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
1009 static int tsnep_rx_desc_available(struct tsnep_rx *rx)
1011 if (rx->read <= rx->write)
1012 return TSNEP_RING_SIZE - rx->write + rx->read - 1;
1014 return rx->read - rx->write - 1;
1017 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx)
1024 page = rx->page_buffer;
1026 page_pool_put_full_page(rx->page_pool, *page, false);
1032 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx)
1040 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool);
1041 if (!rx->page_buffer[i]) {
1042 tsnep_rx_free_page_buffer(rx);
1051 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
1057 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET);
1060 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
1062 struct tsnep_rx_entry *entry = &rx->entry[index];
1065 page = page_pool_dev_alloc_pages(rx->page_pool);
1068 tsnep_rx_set_page(rx, entry, page);
1073 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index)
1075 struct tsnep_rx_entry *entry = &rx->entry[index];
1076 struct tsnep_rx_entry *read = &rx->entry[rx->read];
1078 tsnep_rx_set_page(rx, entry, read->page);
1082 static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
1084 struct tsnep_rx_entry *entry = &rx->entry[index];
1089 if (index == rx->increment_owner_counter) {
1090 rx->owner_counter++;
1091 if (rx->owner_counter == 4)
1092 rx->owner_counter = 1;
1093 rx->increment_owner_counter--;
1094 if (rx->increment_owner_counter < 0)
1095 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
1098 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
1109 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse)
1115 index = (rx->write + i) & TSNEP_RING_MASK;
1117 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) {
1118 rx->alloc_failed++;
1123 tsnep_rx_reuse_buffer(rx, index);
1128 tsnep_rx_activate(rx, index);
1132 rx->write = (rx->write + i) & TSNEP_RING_MASK;
1137 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
1141 desc_refilled = tsnep_rx_alloc(rx, count, reuse);
1143 tsnep_rx_enable(rx);
1148 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
1154 entry->desc->rx = __cpu_to_le64(entry->dma);
1157 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index)
1159 struct tsnep_rx_entry *entry = &rx->entry[index];
1160 struct tsnep_rx_entry *read = &rx->entry[rx->read];
1162 tsnep_rx_set_xdp(rx, entry, read->xdp);
1166 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse)
1171 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count);
1173 int index = (rx->write + i) & TSNEP_RING_MASK;
1174 struct tsnep_rx_entry *entry = &rx->entry[index];
1176 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]);
1177 tsnep_rx_activate(rx, index);
1180 rx->alloc_failed++;
1183 tsnep_rx_reuse_buffer_zc(rx, rx->write);
1184 tsnep_rx_activate(rx, rx->write);
1189 rx->write = (rx->write + i) & TSNEP_RING_MASK;
1194 static void tsnep_rx_free_zc(struct tsnep_rx *rx)
1199 struct tsnep_rx_entry *entry = &rx->entry[i];
1207 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse)
1211 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse);
1213 tsnep_rx_enable(rx);
1218 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
1233 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false))
1238 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
1243 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
1247 trace_xdp_exception(rx->adapter->netdev, prog, act);
1256 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
1262 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
1273 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
1283 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true))
1288 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
1292 trace_xdp_exception(rx->adapter->netdev, prog, act);
1313 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
1326 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
1338 skb_record_rx_queue(skb, rx->queue_index);
1339 skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
1344 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi,
1349 skb = tsnep_build_skb(rx, page, length);
1353 rx->packets++;
1354 rx->bytes += length;
1356 rx->multicast++;
1360 page_pool_recycle_direct(rx->page_pool, page);
1362 rx->dropped++;
1366 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
1369 struct device *dmadev = rx->adapter->dmadev;
1381 desc_available = tsnep_rx_desc_available(rx);
1382 dma_dir = page_pool_get_dma_dir(rx->page_pool);
1383 prog = READ_ONCE(rx->adapter->xdp_prog);
1385 tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
1386 rx->tx_queue_index);
1387 tx = &rx->adapter->tx[rx->tx_queue_index];
1389 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
1392 while (likely(done < budget) && (rx->read != rx->write)) {
1393 entry = &rx->entry[rx->read];
1403 desc_available -= tsnep_rx_refill(rx, desc_available,
1410 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1413 rx->dropped++;
1437 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1447 consume = tsnep_xdp_run_prog(rx, prog, &xdp,
1450 rx->packets++;
1451 rx->bytes += length;
1459 tsnep_rx_page(rx, napi, entry->page, length);
1464 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
1467 tsnep_rx_refill(rx, desc_available, false);
1472 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
1485 desc_available = tsnep_rx_desc_available(rx);
1486 prog = READ_ONCE(rx->adapter->xdp_prog);
1488 tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
1489 rx->tx_queue_index);
1490 tx = &rx->adapter->tx[rx->tx_queue_index];
1493 while (likely(done < budget) && (rx->read != rx->write)) {
1494 entry = &rx->entry[rx->read];
1504 desc_available -= tsnep_rx_refill_zc(rx, desc_available,
1511 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1514 rx->dropped++;
1529 xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
1538 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1547 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp,
1550 rx->packets++;
1551 rx->bytes += length;
1559 page = page_pool_dev_alloc_pages(rx->page_pool);
1564 tsnep_rx_page(rx, napi, page, length);
1566 rx->dropped++;
1573 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
1576 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
1578 if (xsk_uses_need_wakeup(rx->xsk_pool)) {
1580 xsk_set_rx_need_wakeup(rx->xsk_pool);
1582 xsk_clear_rx_need_wakeup(rx->xsk_pool);
1590 static bool tsnep_rx_pending(struct tsnep_rx *rx)
1594 if (rx->read != rx->write) {
1595 entry = &rx->entry[rx->read];
1605 static int tsnep_rx_open(struct tsnep_rx *rx)
1610 retval = tsnep_rx_ring_create(rx);
1614 tsnep_rx_init(rx);
1616 desc_available = tsnep_rx_desc_available(rx);
1617 if (rx->xsk_pool)
1618 retval = tsnep_rx_alloc_zc(rx, desc_available, false);
1620 retval = tsnep_rx_alloc(rx, desc_available, false);
1630 if (rx->xsk_pool) {
1631 retval = tsnep_rx_alloc_page_buffer(rx);
1639 tsnep_rx_ring_cleanup(rx);
1643 static void tsnep_rx_close(struct tsnep_rx *rx)
1645 if (rx->xsk_pool)
1646 tsnep_rx_free_page_buffer(rx);
1648 tsnep_rx_ring_cleanup(rx);
1651 static void tsnep_rx_reopen(struct tsnep_rx *rx)
1653 struct page **page = rx->page_buffer;
1656 tsnep_rx_init(rx);
1659 struct tsnep_rx_entry *entry = &rx->entry[i];
1669 tsnep_rx_set_page(rx, entry, *page);
1670 tsnep_rx_activate(rx, rx->write);
1671 rx->write++;
1679 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
1681 struct page **page = rx->page_buffer;
1685 tsnep_rx_init(rx);
1691 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch,
1695 struct tsnep_rx_entry *entry = &rx->entry[i];
1714 tsnep_rx_set_xdp(rx, entry,
1715 rx->xdp_batch[allocated - 1]);
1716 tsnep_rx_activate(rx, rx->write);
1717 rx->write++;
1727 if (xsk_uses_need_wakeup(rx->xsk_pool)) {
1728 int desc_available = tsnep_rx_desc_available(rx);
1731 xsk_set_rx_need_wakeup(rx->xsk_pool);
1733 xsk_clear_rx_need_wakeup(rx->xsk_pool);
1742 if (queue->rx && tsnep_rx_pending(queue->rx))
1762 if (queue->rx) {
1763 done = queue->rx->xsk_pool ?
1764 tsnep_rx_poll_zc(queue->rx, napi, budget) :
1765 tsnep_rx_poll(queue->rx, napi, budget);
1802 if (queue->tx && queue->rx)
1804 name, queue->rx->queue_index);
1809 snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
1810 name, queue->rx->queue_index);
1842 struct tsnep_rx *rx = queue->rx;
1846 if (rx) {
1847 if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
1848 xdp_rxq_info_unreg(&rx->xdp_rxq);
1849 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc))
1850 xdp_rxq_info_unreg(&rx->xdp_rxq_zc);
1859 struct tsnep_rx *rx = queue->rx;
1865 if (rx) {
1868 rx->tx_queue_index = tx->queue_index;
1869 else if (rx->queue_index < adapter->num_tx_queues)
1870 rx->tx_queue_index = rx->queue_index;
1872 rx->tx_queue_index = 0;
1878 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev,
1879 rx->queue_index, queue->napi.napi_id);
1882 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
1884 rx->page_pool);
1887 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev,
1888 rx->queue_index, queue->napi.napi_id);
1891 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc,
1896 if (rx->xsk_pool)
1897 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc);
1923 if (queue->rx)
1924 tsnep_rx_enable(queue->rx);
1938 if (queue->rx)
1939 tsnep_rx_disable(queue->rx);
1953 if (adapter->queue[i].rx) {
1954 retval = tsnep_rx_open(adapter->queue[i].rx);
1989 if (adapter->queue[i].rx)
1990 tsnep_rx_close(adapter->queue[i].rx);
2010 if (adapter->queue[i].rx)
2011 tsnep_rx_close(adapter->queue[i].rx);
2028 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE,
2029 sizeof(*queue->rx->page_buffer),
2031 if (!queue->rx->page_buffer)
2033 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE,
2034 sizeof(*queue->rx->xdp_batch),
2036 if (!queue->rx->xdp_batch) {
2037 kfree(queue->rx->page_buffer);
2038 queue->rx->page_buffer = NULL;
2043 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc);
2049 queue->rx->xsk_pool = pool;
2052 tsnep_rx_reopen_xsk(queue->rx);
2066 tsnep_rx_free_zc(queue->rx);
2068 queue->rx->xsk_pool = NULL;
2072 tsnep_rx_reopen(queue->rx);
2076 kfree(queue->rx->xdp_batch);
2077 queue->rx->xdp_batch = NULL;
2078 kfree(queue->rx->page_buffer);
2079 queue->rx->page_buffer = NULL;
2134 stats->rx_packets += adapter->rx[i].packets;
2135 stats->rx_bytes += adapter->rx[i].bytes;
2136 stats->rx_dropped += adapter->rx[i].dropped;
2137 stats->multicast += adapter->rx[i].multicast;
2441 adapter->queue[0].rx = &adapter->rx[0];
2442 adapter->queue[0].rx->adapter = adapter;
2443 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0);
2444 adapter->queue[0].rx->queue_index = 0;
2472 adapter->queue[i].rx = &adapter->rx[i];
2473 adapter->queue[i].rx->adapter = adapter;
2474 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i);
2475 adapter->queue[i].rx->queue_index = i;