162306a36Sopenharmony_ci// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 262306a36Sopenharmony_ci 362306a36Sopenharmony_ci#include <linux/dma-mapping.h> 462306a36Sopenharmony_ci#include <linux/ip.h> 562306a36Sopenharmony_ci#include <linux/pci.h> 662306a36Sopenharmony_ci#include <linux/skbuff.h> 762306a36Sopenharmony_ci#include <linux/tcp.h> 862306a36Sopenharmony_ci#include <uapi/linux/udp.h> 962306a36Sopenharmony_ci#include "funeth.h" 1062306a36Sopenharmony_ci#include "funeth_ktls.h" 1162306a36Sopenharmony_ci#include "funeth_txrx.h" 1262306a36Sopenharmony_ci#include "funeth_trace.h" 1362306a36Sopenharmony_ci#include "fun_queue.h" 1462306a36Sopenharmony_ci 1562306a36Sopenharmony_ci#define FUN_XDP_CLEAN_THRES 32 1662306a36Sopenharmony_ci#define FUN_XDP_CLEAN_BATCH 16 1762306a36Sopenharmony_ci 1862306a36Sopenharmony_ci/* DMA-map a packet and return the (length, DMA_address) pairs for its 1962306a36Sopenharmony_ci * segments. If a mapping error occurs -ENOMEM is returned. The packet 2062306a36Sopenharmony_ci * consists of an skb_shared_info and one additional address/length pair. 2162306a36Sopenharmony_ci */ 2262306a36Sopenharmony_cistatic int fun_map_pkt(struct device *dev, const struct skb_shared_info *si, 2362306a36Sopenharmony_ci void *data, unsigned int data_len, 2462306a36Sopenharmony_ci dma_addr_t *addr, unsigned int *len) 2562306a36Sopenharmony_ci{ 2662306a36Sopenharmony_ci const skb_frag_t *fp, *end; 2762306a36Sopenharmony_ci 2862306a36Sopenharmony_ci *len = data_len; 2962306a36Sopenharmony_ci *addr = dma_map_single(dev, data, *len, DMA_TO_DEVICE); 3062306a36Sopenharmony_ci if (dma_mapping_error(dev, *addr)) 3162306a36Sopenharmony_ci return -ENOMEM; 3262306a36Sopenharmony_ci 3362306a36Sopenharmony_ci if (!si) 3462306a36Sopenharmony_ci return 0; 3562306a36Sopenharmony_ci 3662306a36Sopenharmony_ci for (fp = si->frags, end = fp + si->nr_frags; fp < end; fp++) { 3762306a36Sopenharmony_ci *++len = skb_frag_size(fp); 3862306a36Sopenharmony_ci *++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE); 3962306a36Sopenharmony_ci if (dma_mapping_error(dev, *addr)) 4062306a36Sopenharmony_ci goto unwind; 4162306a36Sopenharmony_ci } 4262306a36Sopenharmony_ci return 0; 4362306a36Sopenharmony_ci 4462306a36Sopenharmony_ciunwind: 4562306a36Sopenharmony_ci while (fp-- > si->frags) 4662306a36Sopenharmony_ci dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); 4762306a36Sopenharmony_ci 4862306a36Sopenharmony_ci dma_unmap_single(dev, addr[-1], data_len, DMA_TO_DEVICE); 4962306a36Sopenharmony_ci return -ENOMEM; 5062306a36Sopenharmony_ci} 5162306a36Sopenharmony_ci 5262306a36Sopenharmony_ci/* Return the address just past the end of a Tx queue's descriptor ring. 5362306a36Sopenharmony_ci * It exploits the fact that the HW writeback area is just after the end 5462306a36Sopenharmony_ci * of the descriptor ring. 5562306a36Sopenharmony_ci */ 5662306a36Sopenharmony_cistatic void *txq_end(const struct funeth_txq *q) 5762306a36Sopenharmony_ci{ 5862306a36Sopenharmony_ci return (void *)q->hw_wb; 5962306a36Sopenharmony_ci} 6062306a36Sopenharmony_ci 6162306a36Sopenharmony_ci/* Return the amount of space within a Tx ring from the given address to the 6262306a36Sopenharmony_ci * end. 6362306a36Sopenharmony_ci */ 6462306a36Sopenharmony_cistatic unsigned int txq_to_end(const struct funeth_txq *q, void *p) 6562306a36Sopenharmony_ci{ 6662306a36Sopenharmony_ci return txq_end(q) - p; 6762306a36Sopenharmony_ci} 6862306a36Sopenharmony_ci 6962306a36Sopenharmony_ci/* Return the number of Tx descriptors occupied by a Tx request. */ 7062306a36Sopenharmony_cistatic unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req) 7162306a36Sopenharmony_ci{ 7262306a36Sopenharmony_ci return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8); 7362306a36Sopenharmony_ci} 7462306a36Sopenharmony_ci 7562306a36Sopenharmony_ci/* Write a gather list to the Tx descriptor at @req from @ngle address/length 7662306a36Sopenharmony_ci * pairs. 7762306a36Sopenharmony_ci */ 7862306a36Sopenharmony_cistatic struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, 7962306a36Sopenharmony_ci struct fun_eth_tx_req *req, 8062306a36Sopenharmony_ci const dma_addr_t *addrs, 8162306a36Sopenharmony_ci const unsigned int *lens, 8262306a36Sopenharmony_ci unsigned int ngle) 8362306a36Sopenharmony_ci{ 8462306a36Sopenharmony_ci struct fun_dataop_gl *gle; 8562306a36Sopenharmony_ci unsigned int i; 8662306a36Sopenharmony_ci 8762306a36Sopenharmony_ci req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8; 8862306a36Sopenharmony_ci 8962306a36Sopenharmony_ci for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm; 9062306a36Sopenharmony_ci i < ngle && txq_to_end(q, gle); i++, gle++) 9162306a36Sopenharmony_ci fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]); 9262306a36Sopenharmony_ci 9362306a36Sopenharmony_ci if (txq_to_end(q, gle) == 0) { 9462306a36Sopenharmony_ci gle = (struct fun_dataop_gl *)q->desc; 9562306a36Sopenharmony_ci for ( ; i < ngle; i++, gle++) 9662306a36Sopenharmony_ci fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]); 9762306a36Sopenharmony_ci } 9862306a36Sopenharmony_ci 9962306a36Sopenharmony_ci return gle; 10062306a36Sopenharmony_ci} 10162306a36Sopenharmony_ci 10262306a36Sopenharmony_cistatic __be16 tcp_hdr_doff_flags(const struct tcphdr *th) 10362306a36Sopenharmony_ci{ 10462306a36Sopenharmony_ci return *(__be16 *)&tcp_flag_word(th); 10562306a36Sopenharmony_ci} 10662306a36Sopenharmony_ci 10762306a36Sopenharmony_cistatic struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, 10862306a36Sopenharmony_ci unsigned int *tls_len) 10962306a36Sopenharmony_ci{ 11062306a36Sopenharmony_ci#if IS_ENABLED(CONFIG_TLS_DEVICE) 11162306a36Sopenharmony_ci const struct fun_ktls_tx_ctx *tls_ctx; 11262306a36Sopenharmony_ci u32 datalen, seq; 11362306a36Sopenharmony_ci 11462306a36Sopenharmony_ci datalen = skb->len - skb_tcp_all_headers(skb); 11562306a36Sopenharmony_ci if (!datalen) 11662306a36Sopenharmony_ci return skb; 11762306a36Sopenharmony_ci 11862306a36Sopenharmony_ci if (likely(!tls_offload_tx_resync_pending(skb->sk))) { 11962306a36Sopenharmony_ci seq = ntohl(tcp_hdr(skb)->seq); 12062306a36Sopenharmony_ci tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX); 12162306a36Sopenharmony_ci 12262306a36Sopenharmony_ci if (likely(tls_ctx->next_seq == seq)) { 12362306a36Sopenharmony_ci *tls_len = datalen; 12462306a36Sopenharmony_ci return skb; 12562306a36Sopenharmony_ci } 12662306a36Sopenharmony_ci if (seq - tls_ctx->next_seq < U32_MAX / 4) { 12762306a36Sopenharmony_ci tls_offload_tx_resync_request(skb->sk, seq, 12862306a36Sopenharmony_ci tls_ctx->next_seq); 12962306a36Sopenharmony_ci } 13062306a36Sopenharmony_ci } 13162306a36Sopenharmony_ci 13262306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_tls_fallback); 13362306a36Sopenharmony_ci skb = tls_encrypt_skb(skb); 13462306a36Sopenharmony_ci if (!skb) 13562306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_tls_drops); 13662306a36Sopenharmony_ci 13762306a36Sopenharmony_ci return skb; 13862306a36Sopenharmony_ci#else 13962306a36Sopenharmony_ci return NULL; 14062306a36Sopenharmony_ci#endif 14162306a36Sopenharmony_ci} 14262306a36Sopenharmony_ci 14362306a36Sopenharmony_ci/* Write as many descriptors as needed for the supplied skb starting at the 14462306a36Sopenharmony_ci * current producer location. The caller has made certain enough descriptors 14562306a36Sopenharmony_ci * are available. 14662306a36Sopenharmony_ci * 14762306a36Sopenharmony_ci * Returns the number of descriptors written, 0 on error. 14862306a36Sopenharmony_ci */ 14962306a36Sopenharmony_cistatic unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q, 15062306a36Sopenharmony_ci unsigned int tls_len) 15162306a36Sopenharmony_ci{ 15262306a36Sopenharmony_ci unsigned int extra_bytes = 0, extra_pkts = 0; 15362306a36Sopenharmony_ci unsigned int idx = q->prod_cnt & q->mask; 15462306a36Sopenharmony_ci const struct skb_shared_info *shinfo; 15562306a36Sopenharmony_ci unsigned int lens[MAX_SKB_FRAGS + 1]; 15662306a36Sopenharmony_ci dma_addr_t addrs[MAX_SKB_FRAGS + 1]; 15762306a36Sopenharmony_ci struct fun_eth_tx_req *req; 15862306a36Sopenharmony_ci struct fun_dataop_gl *gle; 15962306a36Sopenharmony_ci const struct tcphdr *th; 16062306a36Sopenharmony_ci unsigned int l4_hlen; 16162306a36Sopenharmony_ci unsigned int ngle; 16262306a36Sopenharmony_ci u16 flags; 16362306a36Sopenharmony_ci 16462306a36Sopenharmony_ci shinfo = skb_shinfo(skb); 16562306a36Sopenharmony_ci if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data, 16662306a36Sopenharmony_ci skb_headlen(skb), addrs, lens))) { 16762306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_map_err); 16862306a36Sopenharmony_ci return 0; 16962306a36Sopenharmony_ci } 17062306a36Sopenharmony_ci 17162306a36Sopenharmony_ci req = fun_tx_desc_addr(q, idx); 17262306a36Sopenharmony_ci req->op = FUN_ETH_OP_TX; 17362306a36Sopenharmony_ci req->len8 = 0; 17462306a36Sopenharmony_ci req->flags = 0; 17562306a36Sopenharmony_ci req->suboff8 = offsetof(struct fun_eth_tx_req, dataop); 17662306a36Sopenharmony_ci req->repr_idn = 0; 17762306a36Sopenharmony_ci req->encap_proto = 0; 17862306a36Sopenharmony_ci 17962306a36Sopenharmony_ci if (likely(shinfo->gso_size)) { 18062306a36Sopenharmony_ci if (skb->encapsulation) { 18162306a36Sopenharmony_ci u16 ol4_ofst; 18262306a36Sopenharmony_ci 18362306a36Sopenharmony_ci flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO | 18462306a36Sopenharmony_ci FUN_ETH_UPDATE_INNER_L4_CKSUM | 18562306a36Sopenharmony_ci FUN_ETH_UPDATE_OUTER_L3_LEN; 18662306a36Sopenharmony_ci if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL | 18762306a36Sopenharmony_ci SKB_GSO_UDP_TUNNEL_CSUM)) { 18862306a36Sopenharmony_ci flags |= FUN_ETH_UPDATE_OUTER_L4_LEN | 18962306a36Sopenharmony_ci FUN_ETH_OUTER_UDP; 19062306a36Sopenharmony_ci if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) 19162306a36Sopenharmony_ci flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM; 19262306a36Sopenharmony_ci ol4_ofst = skb_transport_offset(skb); 19362306a36Sopenharmony_ci } else { 19462306a36Sopenharmony_ci ol4_ofst = skb_inner_network_offset(skb); 19562306a36Sopenharmony_ci } 19662306a36Sopenharmony_ci 19762306a36Sopenharmony_ci if (ip_hdr(skb)->version == 4) 19862306a36Sopenharmony_ci flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM; 19962306a36Sopenharmony_ci else 20062306a36Sopenharmony_ci flags |= FUN_ETH_OUTER_IPV6; 20162306a36Sopenharmony_ci 20262306a36Sopenharmony_ci if (skb->inner_network_header) { 20362306a36Sopenharmony_ci if (inner_ip_hdr(skb)->version == 4) 20462306a36Sopenharmony_ci flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM | 20562306a36Sopenharmony_ci FUN_ETH_UPDATE_INNER_L3_LEN; 20662306a36Sopenharmony_ci else 20762306a36Sopenharmony_ci flags |= FUN_ETH_INNER_IPV6 | 20862306a36Sopenharmony_ci FUN_ETH_UPDATE_INNER_L3_LEN; 20962306a36Sopenharmony_ci } 21062306a36Sopenharmony_ci th = inner_tcp_hdr(skb); 21162306a36Sopenharmony_ci l4_hlen = __tcp_hdrlen(th); 21262306a36Sopenharmony_ci fun_eth_offload_init(&req->offload, flags, 21362306a36Sopenharmony_ci shinfo->gso_size, 21462306a36Sopenharmony_ci tcp_hdr_doff_flags(th), 0, 21562306a36Sopenharmony_ci skb_inner_network_offset(skb), 21662306a36Sopenharmony_ci skb_inner_transport_offset(skb), 21762306a36Sopenharmony_ci skb_network_offset(skb), ol4_ofst); 21862306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_encap_tso); 21962306a36Sopenharmony_ci } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { 22062306a36Sopenharmony_ci flags = FUN_ETH_INNER_LSO | FUN_ETH_INNER_UDP | 22162306a36Sopenharmony_ci FUN_ETH_UPDATE_INNER_L4_CKSUM | 22262306a36Sopenharmony_ci FUN_ETH_UPDATE_INNER_L4_LEN | 22362306a36Sopenharmony_ci FUN_ETH_UPDATE_INNER_L3_LEN; 22462306a36Sopenharmony_ci 22562306a36Sopenharmony_ci if (ip_hdr(skb)->version == 4) 22662306a36Sopenharmony_ci flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM; 22762306a36Sopenharmony_ci else 22862306a36Sopenharmony_ci flags |= FUN_ETH_INNER_IPV6; 22962306a36Sopenharmony_ci 23062306a36Sopenharmony_ci l4_hlen = sizeof(struct udphdr); 23162306a36Sopenharmony_ci fun_eth_offload_init(&req->offload, flags, 23262306a36Sopenharmony_ci shinfo->gso_size, 23362306a36Sopenharmony_ci cpu_to_be16(l4_hlen << 10), 0, 23462306a36Sopenharmony_ci skb_network_offset(skb), 23562306a36Sopenharmony_ci skb_transport_offset(skb), 0, 0); 23662306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_uso); 23762306a36Sopenharmony_ci } else { 23862306a36Sopenharmony_ci /* HW considers one set of headers as inner */ 23962306a36Sopenharmony_ci flags = FUN_ETH_INNER_LSO | 24062306a36Sopenharmony_ci FUN_ETH_UPDATE_INNER_L4_CKSUM | 24162306a36Sopenharmony_ci FUN_ETH_UPDATE_INNER_L3_LEN; 24262306a36Sopenharmony_ci if (shinfo->gso_type & SKB_GSO_TCPV6) 24362306a36Sopenharmony_ci flags |= FUN_ETH_INNER_IPV6; 24462306a36Sopenharmony_ci else 24562306a36Sopenharmony_ci flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM; 24662306a36Sopenharmony_ci th = tcp_hdr(skb); 24762306a36Sopenharmony_ci l4_hlen = __tcp_hdrlen(th); 24862306a36Sopenharmony_ci fun_eth_offload_init(&req->offload, flags, 24962306a36Sopenharmony_ci shinfo->gso_size, 25062306a36Sopenharmony_ci tcp_hdr_doff_flags(th), 0, 25162306a36Sopenharmony_ci skb_network_offset(skb), 25262306a36Sopenharmony_ci skb_transport_offset(skb), 0, 0); 25362306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_tso); 25462306a36Sopenharmony_ci } 25562306a36Sopenharmony_ci 25662306a36Sopenharmony_ci u64_stats_update_begin(&q->syncp); 25762306a36Sopenharmony_ci q->stats.tx_cso += shinfo->gso_segs; 25862306a36Sopenharmony_ci u64_stats_update_end(&q->syncp); 25962306a36Sopenharmony_ci 26062306a36Sopenharmony_ci extra_pkts = shinfo->gso_segs - 1; 26162306a36Sopenharmony_ci extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) + 26262306a36Sopenharmony_ci l4_hlen) * extra_pkts; 26362306a36Sopenharmony_ci } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 26462306a36Sopenharmony_ci flags = FUN_ETH_UPDATE_INNER_L4_CKSUM; 26562306a36Sopenharmony_ci if (skb->csum_offset == offsetof(struct udphdr, check)) 26662306a36Sopenharmony_ci flags |= FUN_ETH_INNER_UDP; 26762306a36Sopenharmony_ci fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0, 26862306a36Sopenharmony_ci skb_checksum_start_offset(skb), 0, 0); 26962306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_cso); 27062306a36Sopenharmony_ci } else { 27162306a36Sopenharmony_ci fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0); 27262306a36Sopenharmony_ci } 27362306a36Sopenharmony_ci 27462306a36Sopenharmony_ci ngle = shinfo->nr_frags + 1; 27562306a36Sopenharmony_ci req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len); 27662306a36Sopenharmony_ci 27762306a36Sopenharmony_ci gle = fun_write_gl(q, req, addrs, lens, ngle); 27862306a36Sopenharmony_ci 27962306a36Sopenharmony_ci if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) { 28062306a36Sopenharmony_ci struct fun_eth_tls *tls = (struct fun_eth_tls *)gle; 28162306a36Sopenharmony_ci struct fun_ktls_tx_ctx *tls_ctx; 28262306a36Sopenharmony_ci 28362306a36Sopenharmony_ci req->len8 += FUNETH_TLS_SZ / 8; 28462306a36Sopenharmony_ci req->flags = cpu_to_be16(FUN_ETH_TX_TLS); 28562306a36Sopenharmony_ci 28662306a36Sopenharmony_ci tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX); 28762306a36Sopenharmony_ci tls->tlsid = tls_ctx->tlsid; 28862306a36Sopenharmony_ci tls_ctx->next_seq += tls_len; 28962306a36Sopenharmony_ci 29062306a36Sopenharmony_ci u64_stats_update_begin(&q->syncp); 29162306a36Sopenharmony_ci q->stats.tx_tls_bytes += tls_len; 29262306a36Sopenharmony_ci q->stats.tx_tls_pkts += 1 + extra_pkts; 29362306a36Sopenharmony_ci u64_stats_update_end(&q->syncp); 29462306a36Sopenharmony_ci } 29562306a36Sopenharmony_ci 29662306a36Sopenharmony_ci u64_stats_update_begin(&q->syncp); 29762306a36Sopenharmony_ci q->stats.tx_bytes += skb->len + extra_bytes; 29862306a36Sopenharmony_ci q->stats.tx_pkts += 1 + extra_pkts; 29962306a36Sopenharmony_ci u64_stats_update_end(&q->syncp); 30062306a36Sopenharmony_ci 30162306a36Sopenharmony_ci q->info[idx].skb = skb; 30262306a36Sopenharmony_ci 30362306a36Sopenharmony_ci trace_funeth_tx(q, skb->len, idx, req->dataop.ngather); 30462306a36Sopenharmony_ci return tx_req_ndesc(req); 30562306a36Sopenharmony_ci} 30662306a36Sopenharmony_ci 30762306a36Sopenharmony_ci/* Return the number of available descriptors of a Tx queue. 30862306a36Sopenharmony_ci * HW assumes head==tail means the ring is empty so we need to keep one 30962306a36Sopenharmony_ci * descriptor unused. 31062306a36Sopenharmony_ci */ 31162306a36Sopenharmony_cistatic unsigned int fun_txq_avail(const struct funeth_txq *q) 31262306a36Sopenharmony_ci{ 31362306a36Sopenharmony_ci return q->mask - q->prod_cnt + q->cons_cnt; 31462306a36Sopenharmony_ci} 31562306a36Sopenharmony_ci 31662306a36Sopenharmony_ci/* Stop a queue if it can't handle another worst-case packet. */ 31762306a36Sopenharmony_cistatic void fun_tx_check_stop(struct funeth_txq *q) 31862306a36Sopenharmony_ci{ 31962306a36Sopenharmony_ci if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC)) 32062306a36Sopenharmony_ci return; 32162306a36Sopenharmony_ci 32262306a36Sopenharmony_ci netif_tx_stop_queue(q->ndq); 32362306a36Sopenharmony_ci 32462306a36Sopenharmony_ci /* NAPI reclaim is freeing packets in parallel with us and we may race. 32562306a36Sopenharmony_ci * We have stopped the queue but check again after synchronizing with 32662306a36Sopenharmony_ci * reclaim. 32762306a36Sopenharmony_ci */ 32862306a36Sopenharmony_ci smp_mb(); 32962306a36Sopenharmony_ci if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC)) 33062306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_nstops); 33162306a36Sopenharmony_ci else 33262306a36Sopenharmony_ci netif_tx_start_queue(q->ndq); 33362306a36Sopenharmony_ci} 33462306a36Sopenharmony_ci 33562306a36Sopenharmony_ci/* Return true if a queue has enough space to restart. Current condition is 33662306a36Sopenharmony_ci * that the queue must be >= 1/4 empty. 33762306a36Sopenharmony_ci */ 33862306a36Sopenharmony_cistatic bool fun_txq_may_restart(struct funeth_txq *q) 33962306a36Sopenharmony_ci{ 34062306a36Sopenharmony_ci return fun_txq_avail(q) >= q->mask / 4; 34162306a36Sopenharmony_ci} 34262306a36Sopenharmony_ci 34362306a36Sopenharmony_cinetdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev) 34462306a36Sopenharmony_ci{ 34562306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(netdev); 34662306a36Sopenharmony_ci unsigned int qid = skb_get_queue_mapping(skb); 34762306a36Sopenharmony_ci struct funeth_txq *q = fp->txqs[qid]; 34862306a36Sopenharmony_ci unsigned int tls_len = 0; 34962306a36Sopenharmony_ci unsigned int ndesc; 35062306a36Sopenharmony_ci 35162306a36Sopenharmony_ci if (tls_is_skb_tx_device_offloaded(skb)) { 35262306a36Sopenharmony_ci skb = fun_tls_tx(skb, q, &tls_len); 35362306a36Sopenharmony_ci if (unlikely(!skb)) 35462306a36Sopenharmony_ci goto dropped; 35562306a36Sopenharmony_ci } 35662306a36Sopenharmony_ci 35762306a36Sopenharmony_ci ndesc = write_pkt_desc(skb, q, tls_len); 35862306a36Sopenharmony_ci if (unlikely(!ndesc)) { 35962306a36Sopenharmony_ci dev_kfree_skb_any(skb); 36062306a36Sopenharmony_ci goto dropped; 36162306a36Sopenharmony_ci } 36262306a36Sopenharmony_ci 36362306a36Sopenharmony_ci q->prod_cnt += ndesc; 36462306a36Sopenharmony_ci fun_tx_check_stop(q); 36562306a36Sopenharmony_ci 36662306a36Sopenharmony_ci skb_tx_timestamp(skb); 36762306a36Sopenharmony_ci 36862306a36Sopenharmony_ci if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more())) 36962306a36Sopenharmony_ci fun_txq_wr_db(q); 37062306a36Sopenharmony_ci else 37162306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_more); 37262306a36Sopenharmony_ci 37362306a36Sopenharmony_ci return NETDEV_TX_OK; 37462306a36Sopenharmony_ci 37562306a36Sopenharmony_cidropped: 37662306a36Sopenharmony_ci /* A dropped packet may be the last one in a xmit_more train, 37762306a36Sopenharmony_ci * ring the doorbell just in case. 37862306a36Sopenharmony_ci */ 37962306a36Sopenharmony_ci if (!netdev_xmit_more()) 38062306a36Sopenharmony_ci fun_txq_wr_db(q); 38162306a36Sopenharmony_ci return NETDEV_TX_OK; 38262306a36Sopenharmony_ci} 38362306a36Sopenharmony_ci 38462306a36Sopenharmony_ci/* Return a Tx queue's HW head index written back to host memory. */ 38562306a36Sopenharmony_cistatic u16 txq_hw_head(const struct funeth_txq *q) 38662306a36Sopenharmony_ci{ 38762306a36Sopenharmony_ci return (u16)be64_to_cpu(*q->hw_wb); 38862306a36Sopenharmony_ci} 38962306a36Sopenharmony_ci 39062306a36Sopenharmony_ci/* Unmap the Tx packet starting at the given descriptor index and 39162306a36Sopenharmony_ci * return the number of Tx descriptors it occupied. 39262306a36Sopenharmony_ci */ 39362306a36Sopenharmony_cistatic unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx) 39462306a36Sopenharmony_ci{ 39562306a36Sopenharmony_ci const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx); 39662306a36Sopenharmony_ci unsigned int ngle = req->dataop.ngather; 39762306a36Sopenharmony_ci struct fun_dataop_gl *gle; 39862306a36Sopenharmony_ci 39962306a36Sopenharmony_ci if (ngle) { 40062306a36Sopenharmony_ci gle = (struct fun_dataop_gl *)req->dataop.imm; 40162306a36Sopenharmony_ci dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data), 40262306a36Sopenharmony_ci be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE); 40362306a36Sopenharmony_ci 40462306a36Sopenharmony_ci for (gle++; --ngle && txq_to_end(q, gle); gle++) 40562306a36Sopenharmony_ci dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data), 40662306a36Sopenharmony_ci be32_to_cpu(gle->sgl_len), 40762306a36Sopenharmony_ci DMA_TO_DEVICE); 40862306a36Sopenharmony_ci 40962306a36Sopenharmony_ci for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++) 41062306a36Sopenharmony_ci dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data), 41162306a36Sopenharmony_ci be32_to_cpu(gle->sgl_len), 41262306a36Sopenharmony_ci DMA_TO_DEVICE); 41362306a36Sopenharmony_ci } 41462306a36Sopenharmony_ci 41562306a36Sopenharmony_ci return tx_req_ndesc(req); 41662306a36Sopenharmony_ci} 41762306a36Sopenharmony_ci 41862306a36Sopenharmony_ci/* Reclaim completed Tx descriptors and free their packets. Restart a stopped 41962306a36Sopenharmony_ci * queue if we freed enough descriptors. 42062306a36Sopenharmony_ci * 42162306a36Sopenharmony_ci * Return true if we exhausted the budget while there is more work to be done. 42262306a36Sopenharmony_ci */ 42362306a36Sopenharmony_cistatic bool fun_txq_reclaim(struct funeth_txq *q, int budget) 42462306a36Sopenharmony_ci{ 42562306a36Sopenharmony_ci unsigned int npkts = 0, nbytes = 0, ndesc = 0; 42662306a36Sopenharmony_ci unsigned int head, limit, reclaim_idx; 42762306a36Sopenharmony_ci 42862306a36Sopenharmony_ci /* budget may be 0, e.g., netpoll */ 42962306a36Sopenharmony_ci limit = budget ? budget : UINT_MAX; 43062306a36Sopenharmony_ci 43162306a36Sopenharmony_ci for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask; 43262306a36Sopenharmony_ci head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) { 43362306a36Sopenharmony_ci /* The HW head is continually updated, ensure we don't read 43462306a36Sopenharmony_ci * descriptor state before the head tells us to reclaim it. 43562306a36Sopenharmony_ci * On the enqueue side the doorbell is an implicit write 43662306a36Sopenharmony_ci * barrier. 43762306a36Sopenharmony_ci */ 43862306a36Sopenharmony_ci rmb(); 43962306a36Sopenharmony_ci 44062306a36Sopenharmony_ci do { 44162306a36Sopenharmony_ci unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx); 44262306a36Sopenharmony_ci struct sk_buff *skb = q->info[reclaim_idx].skb; 44362306a36Sopenharmony_ci 44462306a36Sopenharmony_ci trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head); 44562306a36Sopenharmony_ci 44662306a36Sopenharmony_ci nbytes += skb->len; 44762306a36Sopenharmony_ci napi_consume_skb(skb, budget); 44862306a36Sopenharmony_ci ndesc += pkt_desc; 44962306a36Sopenharmony_ci reclaim_idx = (reclaim_idx + pkt_desc) & q->mask; 45062306a36Sopenharmony_ci npkts++; 45162306a36Sopenharmony_ci } while (reclaim_idx != head && npkts < limit); 45262306a36Sopenharmony_ci } 45362306a36Sopenharmony_ci 45462306a36Sopenharmony_ci q->cons_cnt += ndesc; 45562306a36Sopenharmony_ci netdev_tx_completed_queue(q->ndq, npkts, nbytes); 45662306a36Sopenharmony_ci smp_mb(); /* pairs with the one in fun_tx_check_stop() */ 45762306a36Sopenharmony_ci 45862306a36Sopenharmony_ci if (unlikely(netif_tx_queue_stopped(q->ndq) && 45962306a36Sopenharmony_ci fun_txq_may_restart(q))) { 46062306a36Sopenharmony_ci netif_tx_wake_queue(q->ndq); 46162306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_nrestarts); 46262306a36Sopenharmony_ci } 46362306a36Sopenharmony_ci 46462306a36Sopenharmony_ci return reclaim_idx != head; 46562306a36Sopenharmony_ci} 46662306a36Sopenharmony_ci 46762306a36Sopenharmony_ci/* The NAPI handler for Tx queues. */ 46862306a36Sopenharmony_ciint fun_txq_napi_poll(struct napi_struct *napi, int budget) 46962306a36Sopenharmony_ci{ 47062306a36Sopenharmony_ci struct fun_irq *irq = container_of(napi, struct fun_irq, napi); 47162306a36Sopenharmony_ci struct funeth_txq *q = irq->txq; 47262306a36Sopenharmony_ci unsigned int db_val; 47362306a36Sopenharmony_ci 47462306a36Sopenharmony_ci if (fun_txq_reclaim(q, budget)) 47562306a36Sopenharmony_ci return budget; /* exhausted budget */ 47662306a36Sopenharmony_ci 47762306a36Sopenharmony_ci napi_complete(napi); /* exhausted pending work */ 47862306a36Sopenharmony_ci db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask); 47962306a36Sopenharmony_ci writel(db_val, q->db); 48062306a36Sopenharmony_ci return 0; 48162306a36Sopenharmony_ci} 48262306a36Sopenharmony_ci 48362306a36Sopenharmony_ci/* Reclaim up to @budget completed Tx packets from a TX XDP queue. */ 48462306a36Sopenharmony_cistatic unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget) 48562306a36Sopenharmony_ci{ 48662306a36Sopenharmony_ci unsigned int npkts = 0, ndesc = 0, head, reclaim_idx; 48762306a36Sopenharmony_ci 48862306a36Sopenharmony_ci for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask; 48962306a36Sopenharmony_ci head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) { 49062306a36Sopenharmony_ci /* The HW head is continually updated, ensure we don't read 49162306a36Sopenharmony_ci * descriptor state before the head tells us to reclaim it. 49262306a36Sopenharmony_ci * On the enqueue side the doorbell is an implicit write 49362306a36Sopenharmony_ci * barrier. 49462306a36Sopenharmony_ci */ 49562306a36Sopenharmony_ci rmb(); 49662306a36Sopenharmony_ci 49762306a36Sopenharmony_ci do { 49862306a36Sopenharmony_ci unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx); 49962306a36Sopenharmony_ci 50062306a36Sopenharmony_ci xdp_return_frame(q->info[reclaim_idx].xdpf); 50162306a36Sopenharmony_ci 50262306a36Sopenharmony_ci trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head); 50362306a36Sopenharmony_ci 50462306a36Sopenharmony_ci reclaim_idx = (reclaim_idx + pkt_desc) & q->mask; 50562306a36Sopenharmony_ci ndesc += pkt_desc; 50662306a36Sopenharmony_ci npkts++; 50762306a36Sopenharmony_ci } while (reclaim_idx != head && npkts < budget); 50862306a36Sopenharmony_ci } 50962306a36Sopenharmony_ci 51062306a36Sopenharmony_ci q->cons_cnt += ndesc; 51162306a36Sopenharmony_ci return npkts; 51262306a36Sopenharmony_ci} 51362306a36Sopenharmony_ci 51462306a36Sopenharmony_cibool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf) 51562306a36Sopenharmony_ci{ 51662306a36Sopenharmony_ci unsigned int idx, nfrags = 1, ndesc = 1, tot_len = xdpf->len; 51762306a36Sopenharmony_ci const struct skb_shared_info *si = NULL; 51862306a36Sopenharmony_ci unsigned int lens[MAX_SKB_FRAGS + 1]; 51962306a36Sopenharmony_ci dma_addr_t dma[MAX_SKB_FRAGS + 1]; 52062306a36Sopenharmony_ci struct fun_eth_tx_req *req; 52162306a36Sopenharmony_ci 52262306a36Sopenharmony_ci if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES) 52362306a36Sopenharmony_ci fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH); 52462306a36Sopenharmony_ci 52562306a36Sopenharmony_ci if (unlikely(xdp_frame_has_frags(xdpf))) { 52662306a36Sopenharmony_ci si = xdp_get_shared_info_from_frame(xdpf); 52762306a36Sopenharmony_ci tot_len = xdp_get_frame_len(xdpf); 52862306a36Sopenharmony_ci nfrags += si->nr_frags; 52962306a36Sopenharmony_ci ndesc = DIV_ROUND_UP((sizeof(*req) + nfrags * 53062306a36Sopenharmony_ci sizeof(struct fun_dataop_gl)), 53162306a36Sopenharmony_ci FUNETH_SQE_SIZE); 53262306a36Sopenharmony_ci } 53362306a36Sopenharmony_ci 53462306a36Sopenharmony_ci if (unlikely(fun_txq_avail(q) < ndesc)) { 53562306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_xdp_full); 53662306a36Sopenharmony_ci return false; 53762306a36Sopenharmony_ci } 53862306a36Sopenharmony_ci 53962306a36Sopenharmony_ci if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma, 54062306a36Sopenharmony_ci lens))) { 54162306a36Sopenharmony_ci FUN_QSTAT_INC(q, tx_map_err); 54262306a36Sopenharmony_ci return false; 54362306a36Sopenharmony_ci } 54462306a36Sopenharmony_ci 54562306a36Sopenharmony_ci idx = q->prod_cnt & q->mask; 54662306a36Sopenharmony_ci req = fun_tx_desc_addr(q, idx); 54762306a36Sopenharmony_ci req->op = FUN_ETH_OP_TX; 54862306a36Sopenharmony_ci req->len8 = 0; 54962306a36Sopenharmony_ci req->flags = 0; 55062306a36Sopenharmony_ci req->suboff8 = offsetof(struct fun_eth_tx_req, dataop); 55162306a36Sopenharmony_ci req->repr_idn = 0; 55262306a36Sopenharmony_ci req->encap_proto = 0; 55362306a36Sopenharmony_ci fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0); 55462306a36Sopenharmony_ci req->dataop = FUN_DATAOP_HDR_INIT(nfrags, 0, nfrags, 0, tot_len); 55562306a36Sopenharmony_ci 55662306a36Sopenharmony_ci fun_write_gl(q, req, dma, lens, nfrags); 55762306a36Sopenharmony_ci 55862306a36Sopenharmony_ci q->info[idx].xdpf = xdpf; 55962306a36Sopenharmony_ci 56062306a36Sopenharmony_ci u64_stats_update_begin(&q->syncp); 56162306a36Sopenharmony_ci q->stats.tx_bytes += tot_len; 56262306a36Sopenharmony_ci q->stats.tx_pkts++; 56362306a36Sopenharmony_ci u64_stats_update_end(&q->syncp); 56462306a36Sopenharmony_ci 56562306a36Sopenharmony_ci trace_funeth_tx(q, tot_len, idx, nfrags); 56662306a36Sopenharmony_ci q->prod_cnt += ndesc; 56762306a36Sopenharmony_ci 56862306a36Sopenharmony_ci return true; 56962306a36Sopenharmony_ci} 57062306a36Sopenharmony_ci 57162306a36Sopenharmony_ciint fun_xdp_xmit_frames(struct net_device *dev, int n, 57262306a36Sopenharmony_ci struct xdp_frame **frames, u32 flags) 57362306a36Sopenharmony_ci{ 57462306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(dev); 57562306a36Sopenharmony_ci struct funeth_txq *q, **xdpqs; 57662306a36Sopenharmony_ci int i, q_idx; 57762306a36Sopenharmony_ci 57862306a36Sopenharmony_ci if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 57962306a36Sopenharmony_ci return -EINVAL; 58062306a36Sopenharmony_ci 58162306a36Sopenharmony_ci xdpqs = rcu_dereference_bh(fp->xdpqs); 58262306a36Sopenharmony_ci if (unlikely(!xdpqs)) 58362306a36Sopenharmony_ci return -ENETDOWN; 58462306a36Sopenharmony_ci 58562306a36Sopenharmony_ci q_idx = smp_processor_id(); 58662306a36Sopenharmony_ci if (unlikely(q_idx >= fp->num_xdpqs)) 58762306a36Sopenharmony_ci return -ENXIO; 58862306a36Sopenharmony_ci 58962306a36Sopenharmony_ci for (q = xdpqs[q_idx], i = 0; i < n; i++) 59062306a36Sopenharmony_ci if (!fun_xdp_tx(q, frames[i])) 59162306a36Sopenharmony_ci break; 59262306a36Sopenharmony_ci 59362306a36Sopenharmony_ci if (unlikely(flags & XDP_XMIT_FLUSH)) 59462306a36Sopenharmony_ci fun_txq_wr_db(q); 59562306a36Sopenharmony_ci return i; 59662306a36Sopenharmony_ci} 59762306a36Sopenharmony_ci 59862306a36Sopenharmony_ci/* Purge a Tx queue of any queued packets. Should be called once HW access 59962306a36Sopenharmony_ci * to the packets has been revoked, e.g., after the queue has been disabled. 60062306a36Sopenharmony_ci */ 60162306a36Sopenharmony_cistatic void fun_txq_purge(struct funeth_txq *q) 60262306a36Sopenharmony_ci{ 60362306a36Sopenharmony_ci while (q->cons_cnt != q->prod_cnt) { 60462306a36Sopenharmony_ci unsigned int idx = q->cons_cnt & q->mask; 60562306a36Sopenharmony_ci 60662306a36Sopenharmony_ci q->cons_cnt += fun_unmap_pkt(q, idx); 60762306a36Sopenharmony_ci dev_kfree_skb_any(q->info[idx].skb); 60862306a36Sopenharmony_ci } 60962306a36Sopenharmony_ci netdev_tx_reset_queue(q->ndq); 61062306a36Sopenharmony_ci} 61162306a36Sopenharmony_ci 61262306a36Sopenharmony_cistatic void fun_xdpq_purge(struct funeth_txq *q) 61362306a36Sopenharmony_ci{ 61462306a36Sopenharmony_ci while (q->cons_cnt != q->prod_cnt) { 61562306a36Sopenharmony_ci unsigned int idx = q->cons_cnt & q->mask; 61662306a36Sopenharmony_ci 61762306a36Sopenharmony_ci q->cons_cnt += fun_unmap_pkt(q, idx); 61862306a36Sopenharmony_ci xdp_return_frame(q->info[idx].xdpf); 61962306a36Sopenharmony_ci } 62062306a36Sopenharmony_ci} 62162306a36Sopenharmony_ci 62262306a36Sopenharmony_ci/* Create a Tx queue, allocating all the host resources needed. */ 62362306a36Sopenharmony_cistatic struct funeth_txq *fun_txq_create_sw(struct net_device *dev, 62462306a36Sopenharmony_ci unsigned int qidx, 62562306a36Sopenharmony_ci unsigned int ndesc, 62662306a36Sopenharmony_ci struct fun_irq *irq) 62762306a36Sopenharmony_ci{ 62862306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(dev); 62962306a36Sopenharmony_ci struct funeth_txq *q; 63062306a36Sopenharmony_ci int numa_node; 63162306a36Sopenharmony_ci 63262306a36Sopenharmony_ci if (irq) 63362306a36Sopenharmony_ci numa_node = fun_irq_node(irq); /* skb Tx queue */ 63462306a36Sopenharmony_ci else 63562306a36Sopenharmony_ci numa_node = cpu_to_node(qidx); /* XDP Tx queue */ 63662306a36Sopenharmony_ci 63762306a36Sopenharmony_ci q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); 63862306a36Sopenharmony_ci if (!q) 63962306a36Sopenharmony_ci goto err; 64062306a36Sopenharmony_ci 64162306a36Sopenharmony_ci q->dma_dev = &fp->pdev->dev; 64262306a36Sopenharmony_ci q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE, 64362306a36Sopenharmony_ci sizeof(*q->info), true, numa_node, 64462306a36Sopenharmony_ci &q->dma_addr, (void **)&q->info, 64562306a36Sopenharmony_ci &q->hw_wb); 64662306a36Sopenharmony_ci if (!q->desc) 64762306a36Sopenharmony_ci goto free_q; 64862306a36Sopenharmony_ci 64962306a36Sopenharmony_ci q->netdev = dev; 65062306a36Sopenharmony_ci q->mask = ndesc - 1; 65162306a36Sopenharmony_ci q->qidx = qidx; 65262306a36Sopenharmony_ci q->numa_node = numa_node; 65362306a36Sopenharmony_ci u64_stats_init(&q->syncp); 65462306a36Sopenharmony_ci q->init_state = FUN_QSTATE_INIT_SW; 65562306a36Sopenharmony_ci return q; 65662306a36Sopenharmony_ci 65762306a36Sopenharmony_cifree_q: 65862306a36Sopenharmony_ci kfree(q); 65962306a36Sopenharmony_cierr: 66062306a36Sopenharmony_ci netdev_err(dev, "Can't allocate memory for %s queue %u\n", 66162306a36Sopenharmony_ci irq ? "Tx" : "XDP", qidx); 66262306a36Sopenharmony_ci return NULL; 66362306a36Sopenharmony_ci} 66462306a36Sopenharmony_ci 66562306a36Sopenharmony_cistatic void fun_txq_free_sw(struct funeth_txq *q) 66662306a36Sopenharmony_ci{ 66762306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(q->netdev); 66862306a36Sopenharmony_ci 66962306a36Sopenharmony_ci fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true, 67062306a36Sopenharmony_ci q->desc, q->dma_addr, q->info); 67162306a36Sopenharmony_ci 67262306a36Sopenharmony_ci fp->tx_packets += q->stats.tx_pkts; 67362306a36Sopenharmony_ci fp->tx_bytes += q->stats.tx_bytes; 67462306a36Sopenharmony_ci fp->tx_dropped += q->stats.tx_map_err; 67562306a36Sopenharmony_ci 67662306a36Sopenharmony_ci kfree(q); 67762306a36Sopenharmony_ci} 67862306a36Sopenharmony_ci 67962306a36Sopenharmony_ci/* Allocate the device portion of a Tx queue. */ 68062306a36Sopenharmony_ciint fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq) 68162306a36Sopenharmony_ci{ 68262306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(q->netdev); 68362306a36Sopenharmony_ci unsigned int irq_idx, ndesc = q->mask + 1; 68462306a36Sopenharmony_ci int err; 68562306a36Sopenharmony_ci 68662306a36Sopenharmony_ci q->irq = irq; 68762306a36Sopenharmony_ci *q->hw_wb = 0; 68862306a36Sopenharmony_ci q->prod_cnt = 0; 68962306a36Sopenharmony_ci q->cons_cnt = 0; 69062306a36Sopenharmony_ci irq_idx = irq ? irq->irq_idx : 0; 69162306a36Sopenharmony_ci 69262306a36Sopenharmony_ci err = fun_sq_create(fp->fdev, 69362306a36Sopenharmony_ci FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS | 69462306a36Sopenharmony_ci FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0, 69562306a36Sopenharmony_ci FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc, 69662306a36Sopenharmony_ci q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec, 69762306a36Sopenharmony_ci irq_idx, 0, fp->fdev->kern_end_qid, 0, 69862306a36Sopenharmony_ci &q->hw_qid, &q->db); 69962306a36Sopenharmony_ci if (err) 70062306a36Sopenharmony_ci goto out; 70162306a36Sopenharmony_ci 70262306a36Sopenharmony_ci err = fun_create_and_bind_tx(fp, q->hw_qid); 70362306a36Sopenharmony_ci if (err < 0) 70462306a36Sopenharmony_ci goto free_devq; 70562306a36Sopenharmony_ci q->ethid = err; 70662306a36Sopenharmony_ci 70762306a36Sopenharmony_ci if (irq) { 70862306a36Sopenharmony_ci irq->txq = q; 70962306a36Sopenharmony_ci q->ndq = netdev_get_tx_queue(q->netdev, q->qidx); 71062306a36Sopenharmony_ci q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, 71162306a36Sopenharmony_ci fp->tx_coal_count); 71262306a36Sopenharmony_ci writel(q->irq_db_val, q->db); 71362306a36Sopenharmony_ci } 71462306a36Sopenharmony_ci 71562306a36Sopenharmony_ci q->init_state = FUN_QSTATE_INIT_FULL; 71662306a36Sopenharmony_ci netif_info(fp, ifup, q->netdev, 71762306a36Sopenharmony_ci "%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n", 71862306a36Sopenharmony_ci irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx, 71962306a36Sopenharmony_ci q->ethid, q->numa_node); 72062306a36Sopenharmony_ci return 0; 72162306a36Sopenharmony_ci 72262306a36Sopenharmony_cifree_devq: 72362306a36Sopenharmony_ci fun_destroy_sq(fp->fdev, q->hw_qid); 72462306a36Sopenharmony_ciout: 72562306a36Sopenharmony_ci netdev_err(q->netdev, 72662306a36Sopenharmony_ci "Failed to create %s queue %u on device, error %d\n", 72762306a36Sopenharmony_ci irq ? "Tx" : "XDP", q->qidx, err); 72862306a36Sopenharmony_ci return err; 72962306a36Sopenharmony_ci} 73062306a36Sopenharmony_ci 73162306a36Sopenharmony_cistatic void fun_txq_free_dev(struct funeth_txq *q) 73262306a36Sopenharmony_ci{ 73362306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(q->netdev); 73462306a36Sopenharmony_ci 73562306a36Sopenharmony_ci if (q->init_state < FUN_QSTATE_INIT_FULL) 73662306a36Sopenharmony_ci return; 73762306a36Sopenharmony_ci 73862306a36Sopenharmony_ci netif_info(fp, ifdown, q->netdev, 73962306a36Sopenharmony_ci "Freeing %s queue %u (id %u), IRQ %u, ethid %u\n", 74062306a36Sopenharmony_ci q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid, 74162306a36Sopenharmony_ci q->irq ? q->irq->irq_idx : 0, q->ethid); 74262306a36Sopenharmony_ci 74362306a36Sopenharmony_ci fun_destroy_sq(fp->fdev, q->hw_qid); 74462306a36Sopenharmony_ci fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid); 74562306a36Sopenharmony_ci 74662306a36Sopenharmony_ci if (q->irq) { 74762306a36Sopenharmony_ci q->irq->txq = NULL; 74862306a36Sopenharmony_ci fun_txq_purge(q); 74962306a36Sopenharmony_ci } else { 75062306a36Sopenharmony_ci fun_xdpq_purge(q); 75162306a36Sopenharmony_ci } 75262306a36Sopenharmony_ci 75362306a36Sopenharmony_ci q->init_state = FUN_QSTATE_INIT_SW; 75462306a36Sopenharmony_ci} 75562306a36Sopenharmony_ci 75662306a36Sopenharmony_ci/* Create or advance a Tx queue, allocating all the host and device resources 75762306a36Sopenharmony_ci * needed to reach the target state. 75862306a36Sopenharmony_ci */ 75962306a36Sopenharmony_ciint funeth_txq_create(struct net_device *dev, unsigned int qidx, 76062306a36Sopenharmony_ci unsigned int ndesc, struct fun_irq *irq, int state, 76162306a36Sopenharmony_ci struct funeth_txq **qp) 76262306a36Sopenharmony_ci{ 76362306a36Sopenharmony_ci struct funeth_txq *q = *qp; 76462306a36Sopenharmony_ci int err; 76562306a36Sopenharmony_ci 76662306a36Sopenharmony_ci if (!q) 76762306a36Sopenharmony_ci q = fun_txq_create_sw(dev, qidx, ndesc, irq); 76862306a36Sopenharmony_ci if (!q) 76962306a36Sopenharmony_ci return -ENOMEM; 77062306a36Sopenharmony_ci 77162306a36Sopenharmony_ci if (q->init_state >= state) 77262306a36Sopenharmony_ci goto out; 77362306a36Sopenharmony_ci 77462306a36Sopenharmony_ci err = fun_txq_create_dev(q, irq); 77562306a36Sopenharmony_ci if (err) { 77662306a36Sopenharmony_ci if (!*qp) 77762306a36Sopenharmony_ci fun_txq_free_sw(q); 77862306a36Sopenharmony_ci return err; 77962306a36Sopenharmony_ci } 78062306a36Sopenharmony_ci 78162306a36Sopenharmony_ciout: 78262306a36Sopenharmony_ci *qp = q; 78362306a36Sopenharmony_ci return 0; 78462306a36Sopenharmony_ci} 78562306a36Sopenharmony_ci 78662306a36Sopenharmony_ci/* Free Tx queue resources until it reaches the target state. 78762306a36Sopenharmony_ci * The queue must be already disconnected from the stack. 78862306a36Sopenharmony_ci */ 78962306a36Sopenharmony_cistruct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state) 79062306a36Sopenharmony_ci{ 79162306a36Sopenharmony_ci if (state < FUN_QSTATE_INIT_FULL) 79262306a36Sopenharmony_ci fun_txq_free_dev(q); 79362306a36Sopenharmony_ci 79462306a36Sopenharmony_ci if (state == FUN_QSTATE_DESTROYED) { 79562306a36Sopenharmony_ci fun_txq_free_sw(q); 79662306a36Sopenharmony_ci q = NULL; 79762306a36Sopenharmony_ci } 79862306a36Sopenharmony_ci 79962306a36Sopenharmony_ci return q; 80062306a36Sopenharmony_ci} 801