162306a36Sopenharmony_ci// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Copyright 2008 - 2016 Freescale Semiconductor Inc. 462306a36Sopenharmony_ci * Copyright 2020 NXP 562306a36Sopenharmony_ci */ 662306a36Sopenharmony_ci 762306a36Sopenharmony_ci#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 862306a36Sopenharmony_ci 962306a36Sopenharmony_ci#include <linux/init.h> 1062306a36Sopenharmony_ci#include <linux/mod_devicetable.h> 1162306a36Sopenharmony_ci#include <linux/module.h> 1262306a36Sopenharmony_ci#include <linux/of_mdio.h> 1362306a36Sopenharmony_ci#include <linux/of_net.h> 1462306a36Sopenharmony_ci#include <linux/io.h> 1562306a36Sopenharmony_ci#include <linux/if_arp.h> 1662306a36Sopenharmony_ci#include <linux/if_vlan.h> 1762306a36Sopenharmony_ci#include <linux/icmp.h> 1862306a36Sopenharmony_ci#include <linux/ip.h> 1962306a36Sopenharmony_ci#include <linux/ipv6.h> 2062306a36Sopenharmony_ci#include <linux/platform_device.h> 2162306a36Sopenharmony_ci#include <linux/udp.h> 2262306a36Sopenharmony_ci#include <linux/tcp.h> 2362306a36Sopenharmony_ci#include <linux/net.h> 2462306a36Sopenharmony_ci#include <linux/skbuff.h> 2562306a36Sopenharmony_ci#include <linux/etherdevice.h> 2662306a36Sopenharmony_ci#include <linux/if_ether.h> 2762306a36Sopenharmony_ci#include <linux/highmem.h> 2862306a36Sopenharmony_ci#include <linux/percpu.h> 2962306a36Sopenharmony_ci#include <linux/dma-mapping.h> 3062306a36Sopenharmony_ci#include <linux/sort.h> 3162306a36Sopenharmony_ci#include <linux/phy_fixed.h> 3262306a36Sopenharmony_ci#include <linux/bpf.h> 3362306a36Sopenharmony_ci#include <linux/bpf_trace.h> 3462306a36Sopenharmony_ci#include <soc/fsl/bman.h> 3562306a36Sopenharmony_ci#include <soc/fsl/qman.h> 3662306a36Sopenharmony_ci#include "fman.h" 3762306a36Sopenharmony_ci#include "fman_port.h" 3862306a36Sopenharmony_ci#include "mac.h" 3962306a36Sopenharmony_ci#include "dpaa_eth.h" 4062306a36Sopenharmony_ci 4162306a36Sopenharmony_ci/* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files 4262306a36Sopenharmony_ci * using trace events only need to #include <trace/events/sched.h> 4362306a36Sopenharmony_ci */ 4462306a36Sopenharmony_ci#define CREATE_TRACE_POINTS 4562306a36Sopenharmony_ci#include "dpaa_eth_trace.h" 4662306a36Sopenharmony_ci 4762306a36Sopenharmony_cistatic int debug = -1; 4862306a36Sopenharmony_cimodule_param(debug, int, 0444); 4962306a36Sopenharmony_ciMODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)"); 5062306a36Sopenharmony_ci 5162306a36Sopenharmony_cistatic u16 tx_timeout = 1000; 5262306a36Sopenharmony_cimodule_param(tx_timeout, ushort, 0444); 5362306a36Sopenharmony_ciMODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); 5462306a36Sopenharmony_ci 5562306a36Sopenharmony_ci#define FM_FD_STAT_RX_ERRORS \ 5662306a36Sopenharmony_ci (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \ 5762306a36Sopenharmony_ci FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \ 5862306a36Sopenharmony_ci FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \ 5962306a36Sopenharmony_ci FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \ 6062306a36Sopenharmony_ci FM_FD_ERR_PRS_HDR_ERR) 6162306a36Sopenharmony_ci 6262306a36Sopenharmony_ci#define FM_FD_STAT_TX_ERRORS \ 6362306a36Sopenharmony_ci (FM_FD_ERR_UNSUPPORTED_FORMAT | \ 6462306a36Sopenharmony_ci FM_FD_ERR_LENGTH | FM_FD_ERR_DMA) 6562306a36Sopenharmony_ci 6662306a36Sopenharmony_ci#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 6762306a36Sopenharmony_ci NETIF_MSG_LINK | NETIF_MSG_IFUP | \ 6862306a36Sopenharmony_ci NETIF_MSG_IFDOWN | NETIF_MSG_HW) 6962306a36Sopenharmony_ci 7062306a36Sopenharmony_ci#define DPAA_INGRESS_CS_THRESHOLD 0x10000000 7162306a36Sopenharmony_ci/* Ingress congestion threshold on FMan ports 7262306a36Sopenharmony_ci * The size in bytes of the ingress tail-drop threshold on FMan ports. 7362306a36Sopenharmony_ci * Traffic piling up above this value will be rejected by QMan and discarded 7462306a36Sopenharmony_ci * by FMan. 7562306a36Sopenharmony_ci */ 7662306a36Sopenharmony_ci 7762306a36Sopenharmony_ci/* Size in bytes of the FQ taildrop threshold */ 7862306a36Sopenharmony_ci#define DPAA_FQ_TD 0x200000 7962306a36Sopenharmony_ci 8062306a36Sopenharmony_ci#define DPAA_CS_THRESHOLD_1G 0x06000000 8162306a36Sopenharmony_ci/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000 8262306a36Sopenharmony_ci * The size in bytes of the egress Congestion State notification threshold on 8362306a36Sopenharmony_ci * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a 8462306a36Sopenharmony_ci * tight loop (e.g. by sending UDP datagrams at "while(1) speed"), 8562306a36Sopenharmony_ci * and the larger the frame size, the more acute the problem. 8662306a36Sopenharmony_ci * So we have to find a balance between these factors: 8762306a36Sopenharmony_ci * - avoiding the device staying congested for a prolonged time (risking 8862306a36Sopenharmony_ci * the netdev watchdog to fire - see also the tx_timeout module param); 8962306a36Sopenharmony_ci * - affecting performance of protocols such as TCP, which otherwise 9062306a36Sopenharmony_ci * behave well under the congestion notification mechanism; 9162306a36Sopenharmony_ci * - preventing the Tx cores from tightly-looping (as if the congestion 9262306a36Sopenharmony_ci * threshold was too low to be effective); 9362306a36Sopenharmony_ci * - running out of memory if the CS threshold is set too high. 9462306a36Sopenharmony_ci */ 9562306a36Sopenharmony_ci 9662306a36Sopenharmony_ci#define DPAA_CS_THRESHOLD_10G 0x10000000 9762306a36Sopenharmony_ci/* The size in bytes of the egress Congestion State notification threshold on 9862306a36Sopenharmony_ci * 10G ports, range 0x1000 .. 0x10000000 9962306a36Sopenharmony_ci */ 10062306a36Sopenharmony_ci 10162306a36Sopenharmony_ci/* Largest value that the FQD's OAL field can hold */ 10262306a36Sopenharmony_ci#define FSL_QMAN_MAX_OAL 127 10362306a36Sopenharmony_ci 10462306a36Sopenharmony_ci/* Default alignment for start of data in an Rx FD */ 10562306a36Sopenharmony_ci#ifdef CONFIG_DPAA_ERRATUM_A050385 10662306a36Sopenharmony_ci/* aligning data start to 64 avoids DMA transaction splits, unless the buffer 10762306a36Sopenharmony_ci * is crossing a 4k page boundary 10862306a36Sopenharmony_ci */ 10962306a36Sopenharmony_ci#define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16) 11062306a36Sopenharmony_ci/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary 11162306a36Sopenharmony_ci * crossings; also, all SG fragments except the last must have a size multiple 11262306a36Sopenharmony_ci * of 256 to avoid DMA transaction splits 11362306a36Sopenharmony_ci */ 11462306a36Sopenharmony_ci#define DPAA_A050385_ALIGN 256 11562306a36Sopenharmony_ci#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \ 11662306a36Sopenharmony_ci DPAA_A050385_ALIGN : 16) 11762306a36Sopenharmony_ci#else 11862306a36Sopenharmony_ci#define DPAA_FD_DATA_ALIGNMENT 16 11962306a36Sopenharmony_ci#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT 12062306a36Sopenharmony_ci#endif 12162306a36Sopenharmony_ci 12262306a36Sopenharmony_ci/* The DPAA requires 256 bytes reserved and mapped for the SGT */ 12362306a36Sopenharmony_ci#define DPAA_SGT_SIZE 256 12462306a36Sopenharmony_ci 12562306a36Sopenharmony_ci/* Values for the L3R field of the FM Parse Results 12662306a36Sopenharmony_ci */ 12762306a36Sopenharmony_ci/* L3 Type field: First IP Present IPv4 */ 12862306a36Sopenharmony_ci#define FM_L3_PARSE_RESULT_IPV4 0x8000 12962306a36Sopenharmony_ci/* L3 Type field: First IP Present IPv6 */ 13062306a36Sopenharmony_ci#define FM_L3_PARSE_RESULT_IPV6 0x4000 13162306a36Sopenharmony_ci/* Values for the L4R field of the FM Parse Results */ 13262306a36Sopenharmony_ci/* L4 Type field: UDP */ 13362306a36Sopenharmony_ci#define FM_L4_PARSE_RESULT_UDP 0x40 13462306a36Sopenharmony_ci/* L4 Type field: TCP */ 13562306a36Sopenharmony_ci#define FM_L4_PARSE_RESULT_TCP 0x20 13662306a36Sopenharmony_ci 13762306a36Sopenharmony_ci/* FD status field indicating whether the FM Parser has attempted to validate 13862306a36Sopenharmony_ci * the L4 csum of the frame. 13962306a36Sopenharmony_ci * Note that having this bit set doesn't necessarily imply that the checksum 14062306a36Sopenharmony_ci * is valid. One would have to check the parse results to find that out. 14162306a36Sopenharmony_ci */ 14262306a36Sopenharmony_ci#define FM_FD_STAT_L4CV 0x00000004 14362306a36Sopenharmony_ci 14462306a36Sopenharmony_ci#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ 14562306a36Sopenharmony_ci#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ 14662306a36Sopenharmony_ci 14762306a36Sopenharmony_ci#define FSL_DPAA_BPID_INV 0xff 14862306a36Sopenharmony_ci#define FSL_DPAA_ETH_MAX_BUF_COUNT 128 14962306a36Sopenharmony_ci#define FSL_DPAA_ETH_REFILL_THRESHOLD 80 15062306a36Sopenharmony_ci 15162306a36Sopenharmony_ci#define DPAA_TX_PRIV_DATA_SIZE 16 15262306a36Sopenharmony_ci#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) 15362306a36Sopenharmony_ci#define DPAA_TIME_STAMP_SIZE 8 15462306a36Sopenharmony_ci#define DPAA_HASH_RESULTS_SIZE 8 15562306a36Sopenharmony_ci#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \ 15662306a36Sopenharmony_ci + DPAA_HASH_RESULTS_SIZE) 15762306a36Sopenharmony_ci#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \ 15862306a36Sopenharmony_ci XDP_PACKET_HEADROOM - DPAA_HWA_SIZE) 15962306a36Sopenharmony_ci#ifdef CONFIG_DPAA_ERRATUM_A050385 16062306a36Sopenharmony_ci#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE) 16162306a36Sopenharmony_ci#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \ 16262306a36Sopenharmony_ci DPAA_RX_PRIV_DATA_A050385_SIZE : \ 16362306a36Sopenharmony_ci DPAA_RX_PRIV_DATA_DEFAULT_SIZE) 16462306a36Sopenharmony_ci#else 16562306a36Sopenharmony_ci#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE 16662306a36Sopenharmony_ci#endif 16762306a36Sopenharmony_ci 16862306a36Sopenharmony_ci#define DPAA_ETH_PCD_RXQ_NUM 128 16962306a36Sopenharmony_ci 17062306a36Sopenharmony_ci#define DPAA_ENQUEUE_RETRIES 100000 17162306a36Sopenharmony_ci 17262306a36Sopenharmony_cienum port_type {RX, TX}; 17362306a36Sopenharmony_ci 17462306a36Sopenharmony_cistruct fm_port_fqs { 17562306a36Sopenharmony_ci struct dpaa_fq *tx_defq; 17662306a36Sopenharmony_ci struct dpaa_fq *tx_errq; 17762306a36Sopenharmony_ci struct dpaa_fq *rx_defq; 17862306a36Sopenharmony_ci struct dpaa_fq *rx_errq; 17962306a36Sopenharmony_ci struct dpaa_fq *rx_pcdq; 18062306a36Sopenharmony_ci}; 18162306a36Sopenharmony_ci 18262306a36Sopenharmony_ci/* All the dpa bps in use at any moment */ 18362306a36Sopenharmony_cistatic struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; 18462306a36Sopenharmony_ci 18562306a36Sopenharmony_ci#define DPAA_BP_RAW_SIZE 4096 18662306a36Sopenharmony_ci 18762306a36Sopenharmony_ci#ifdef CONFIG_DPAA_ERRATUM_A050385 18862306a36Sopenharmony_ci#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \ 18962306a36Sopenharmony_ci ~(DPAA_A050385_ALIGN - 1)) 19062306a36Sopenharmony_ci#else 19162306a36Sopenharmony_ci#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size) 19262306a36Sopenharmony_ci#endif 19362306a36Sopenharmony_ci 19462306a36Sopenharmony_cistatic int dpaa_max_frm; 19562306a36Sopenharmony_ci 19662306a36Sopenharmony_cistatic int dpaa_rx_extra_headroom; 19762306a36Sopenharmony_ci 19862306a36Sopenharmony_ci#define dpaa_get_max_mtu() \ 19962306a36Sopenharmony_ci (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN)) 20062306a36Sopenharmony_ci 20162306a36Sopenharmony_cistatic void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed); 20262306a36Sopenharmony_ci 20362306a36Sopenharmony_cistatic int dpaa_netdev_init(struct net_device *net_dev, 20462306a36Sopenharmony_ci const struct net_device_ops *dpaa_ops, 20562306a36Sopenharmony_ci u16 tx_timeout) 20662306a36Sopenharmony_ci{ 20762306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(net_dev); 20862306a36Sopenharmony_ci struct device *dev = net_dev->dev.parent; 20962306a36Sopenharmony_ci struct mac_device *mac_dev = priv->mac_dev; 21062306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 21162306a36Sopenharmony_ci const u8 *mac_addr; 21262306a36Sopenharmony_ci int i, err; 21362306a36Sopenharmony_ci 21462306a36Sopenharmony_ci /* Although we access another CPU's private data here 21562306a36Sopenharmony_ci * we do it at initialization so it is safe 21662306a36Sopenharmony_ci */ 21762306a36Sopenharmony_ci for_each_possible_cpu(i) { 21862306a36Sopenharmony_ci percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 21962306a36Sopenharmony_ci percpu_priv->net_dev = net_dev; 22062306a36Sopenharmony_ci } 22162306a36Sopenharmony_ci 22262306a36Sopenharmony_ci net_dev->netdev_ops = dpaa_ops; 22362306a36Sopenharmony_ci mac_addr = mac_dev->addr; 22462306a36Sopenharmony_ci 22562306a36Sopenharmony_ci net_dev->mem_start = (unsigned long)priv->mac_dev->res->start; 22662306a36Sopenharmony_ci net_dev->mem_end = (unsigned long)priv->mac_dev->res->end; 22762306a36Sopenharmony_ci 22862306a36Sopenharmony_ci net_dev->min_mtu = ETH_MIN_MTU; 22962306a36Sopenharmony_ci net_dev->max_mtu = dpaa_get_max_mtu(); 23062306a36Sopenharmony_ci 23162306a36Sopenharmony_ci net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 23262306a36Sopenharmony_ci NETIF_F_LLTX | NETIF_F_RXHASH); 23362306a36Sopenharmony_ci 23462306a36Sopenharmony_ci net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; 23562306a36Sopenharmony_ci /* The kernels enables GSO automatically, if we declare NETIF_F_SG. 23662306a36Sopenharmony_ci * For conformity, we'll still declare GSO explicitly. 23762306a36Sopenharmony_ci */ 23862306a36Sopenharmony_ci net_dev->features |= NETIF_F_GSO; 23962306a36Sopenharmony_ci net_dev->features |= NETIF_F_RXCSUM; 24062306a36Sopenharmony_ci 24162306a36Sopenharmony_ci net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 24262306a36Sopenharmony_ci /* we do not want shared skbs on TX */ 24362306a36Sopenharmony_ci net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 24462306a36Sopenharmony_ci 24562306a36Sopenharmony_ci net_dev->features |= net_dev->hw_features; 24662306a36Sopenharmony_ci net_dev->vlan_features = net_dev->features; 24762306a36Sopenharmony_ci 24862306a36Sopenharmony_ci net_dev->xdp_features = NETDEV_XDP_ACT_BASIC | 24962306a36Sopenharmony_ci NETDEV_XDP_ACT_REDIRECT | 25062306a36Sopenharmony_ci NETDEV_XDP_ACT_NDO_XMIT; 25162306a36Sopenharmony_ci 25262306a36Sopenharmony_ci if (is_valid_ether_addr(mac_addr)) { 25362306a36Sopenharmony_ci memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); 25462306a36Sopenharmony_ci eth_hw_addr_set(net_dev, mac_addr); 25562306a36Sopenharmony_ci } else { 25662306a36Sopenharmony_ci eth_hw_addr_random(net_dev); 25762306a36Sopenharmony_ci err = mac_dev->change_addr(mac_dev->fman_mac, 25862306a36Sopenharmony_ci (const enet_addr_t *)net_dev->dev_addr); 25962306a36Sopenharmony_ci if (err) { 26062306a36Sopenharmony_ci dev_err(dev, "Failed to set random MAC address\n"); 26162306a36Sopenharmony_ci return -EINVAL; 26262306a36Sopenharmony_ci } 26362306a36Sopenharmony_ci dev_info(dev, "Using random MAC address: %pM\n", 26462306a36Sopenharmony_ci net_dev->dev_addr); 26562306a36Sopenharmony_ci } 26662306a36Sopenharmony_ci 26762306a36Sopenharmony_ci net_dev->ethtool_ops = &dpaa_ethtool_ops; 26862306a36Sopenharmony_ci 26962306a36Sopenharmony_ci net_dev->needed_headroom = priv->tx_headroom; 27062306a36Sopenharmony_ci net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); 27162306a36Sopenharmony_ci 27262306a36Sopenharmony_ci /* The rest of the config is filled in by the mac device already */ 27362306a36Sopenharmony_ci mac_dev->phylink_config.dev = &net_dev->dev; 27462306a36Sopenharmony_ci mac_dev->phylink_config.type = PHYLINK_NETDEV; 27562306a36Sopenharmony_ci mac_dev->update_speed = dpaa_eth_cgr_set_speed; 27662306a36Sopenharmony_ci mac_dev->phylink = phylink_create(&mac_dev->phylink_config, 27762306a36Sopenharmony_ci dev_fwnode(mac_dev->dev), 27862306a36Sopenharmony_ci mac_dev->phy_if, 27962306a36Sopenharmony_ci mac_dev->phylink_ops); 28062306a36Sopenharmony_ci if (IS_ERR(mac_dev->phylink)) { 28162306a36Sopenharmony_ci err = PTR_ERR(mac_dev->phylink); 28262306a36Sopenharmony_ci dev_err_probe(dev, err, "Could not create phylink\n"); 28362306a36Sopenharmony_ci return err; 28462306a36Sopenharmony_ci } 28562306a36Sopenharmony_ci 28662306a36Sopenharmony_ci /* start without the RUNNING flag, phylib controls it later */ 28762306a36Sopenharmony_ci netif_carrier_off(net_dev); 28862306a36Sopenharmony_ci 28962306a36Sopenharmony_ci err = register_netdev(net_dev); 29062306a36Sopenharmony_ci if (err < 0) { 29162306a36Sopenharmony_ci dev_err(dev, "register_netdev() = %d\n", err); 29262306a36Sopenharmony_ci phylink_destroy(mac_dev->phylink); 29362306a36Sopenharmony_ci return err; 29462306a36Sopenharmony_ci } 29562306a36Sopenharmony_ci 29662306a36Sopenharmony_ci return 0; 29762306a36Sopenharmony_ci} 29862306a36Sopenharmony_ci 29962306a36Sopenharmony_cistatic int dpaa_stop(struct net_device *net_dev) 30062306a36Sopenharmony_ci{ 30162306a36Sopenharmony_ci struct mac_device *mac_dev; 30262306a36Sopenharmony_ci struct dpaa_priv *priv; 30362306a36Sopenharmony_ci int i, error; 30462306a36Sopenharmony_ci int err = 0; 30562306a36Sopenharmony_ci 30662306a36Sopenharmony_ci priv = netdev_priv(net_dev); 30762306a36Sopenharmony_ci mac_dev = priv->mac_dev; 30862306a36Sopenharmony_ci 30962306a36Sopenharmony_ci netif_tx_stop_all_queues(net_dev); 31062306a36Sopenharmony_ci /* Allow the Fman (Tx) port to process in-flight frames before we 31162306a36Sopenharmony_ci * try switching it off. 31262306a36Sopenharmony_ci */ 31362306a36Sopenharmony_ci msleep(200); 31462306a36Sopenharmony_ci 31562306a36Sopenharmony_ci phylink_stop(mac_dev->phylink); 31662306a36Sopenharmony_ci mac_dev->disable(mac_dev->fman_mac); 31762306a36Sopenharmony_ci 31862306a36Sopenharmony_ci for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { 31962306a36Sopenharmony_ci error = fman_port_disable(mac_dev->port[i]); 32062306a36Sopenharmony_ci if (error) 32162306a36Sopenharmony_ci err = error; 32262306a36Sopenharmony_ci } 32362306a36Sopenharmony_ci 32462306a36Sopenharmony_ci phylink_disconnect_phy(mac_dev->phylink); 32562306a36Sopenharmony_ci net_dev->phydev = NULL; 32662306a36Sopenharmony_ci 32762306a36Sopenharmony_ci msleep(200); 32862306a36Sopenharmony_ci 32962306a36Sopenharmony_ci return err; 33062306a36Sopenharmony_ci} 33162306a36Sopenharmony_ci 33262306a36Sopenharmony_cistatic void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue) 33362306a36Sopenharmony_ci{ 33462306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 33562306a36Sopenharmony_ci const struct dpaa_priv *priv; 33662306a36Sopenharmony_ci 33762306a36Sopenharmony_ci priv = netdev_priv(net_dev); 33862306a36Sopenharmony_ci percpu_priv = this_cpu_ptr(priv->percpu_priv); 33962306a36Sopenharmony_ci 34062306a36Sopenharmony_ci netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", 34162306a36Sopenharmony_ci jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); 34262306a36Sopenharmony_ci 34362306a36Sopenharmony_ci percpu_priv->stats.tx_errors++; 34462306a36Sopenharmony_ci} 34562306a36Sopenharmony_ci 34662306a36Sopenharmony_ci/* Calculates the statistics for the given device by adding the statistics 34762306a36Sopenharmony_ci * collected by each CPU. 34862306a36Sopenharmony_ci */ 34962306a36Sopenharmony_cistatic void dpaa_get_stats64(struct net_device *net_dev, 35062306a36Sopenharmony_ci struct rtnl_link_stats64 *s) 35162306a36Sopenharmony_ci{ 35262306a36Sopenharmony_ci int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); 35362306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(net_dev); 35462306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 35562306a36Sopenharmony_ci u64 *netstats = (u64 *)s; 35662306a36Sopenharmony_ci u64 *cpustats; 35762306a36Sopenharmony_ci int i, j; 35862306a36Sopenharmony_ci 35962306a36Sopenharmony_ci for_each_possible_cpu(i) { 36062306a36Sopenharmony_ci percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 36162306a36Sopenharmony_ci 36262306a36Sopenharmony_ci cpustats = (u64 *)&percpu_priv->stats; 36362306a36Sopenharmony_ci 36462306a36Sopenharmony_ci /* add stats from all CPUs */ 36562306a36Sopenharmony_ci for (j = 0; j < numstats; j++) 36662306a36Sopenharmony_ci netstats[j] += cpustats[j]; 36762306a36Sopenharmony_ci } 36862306a36Sopenharmony_ci} 36962306a36Sopenharmony_ci 37062306a36Sopenharmony_cistatic int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, 37162306a36Sopenharmony_ci void *type_data) 37262306a36Sopenharmony_ci{ 37362306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(net_dev); 37462306a36Sopenharmony_ci struct tc_mqprio_qopt *mqprio = type_data; 37562306a36Sopenharmony_ci u8 num_tc; 37662306a36Sopenharmony_ci int i; 37762306a36Sopenharmony_ci 37862306a36Sopenharmony_ci if (type != TC_SETUP_QDISC_MQPRIO) 37962306a36Sopenharmony_ci return -EOPNOTSUPP; 38062306a36Sopenharmony_ci 38162306a36Sopenharmony_ci mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 38262306a36Sopenharmony_ci num_tc = mqprio->num_tc; 38362306a36Sopenharmony_ci 38462306a36Sopenharmony_ci if (num_tc == priv->num_tc) 38562306a36Sopenharmony_ci return 0; 38662306a36Sopenharmony_ci 38762306a36Sopenharmony_ci if (!num_tc) { 38862306a36Sopenharmony_ci netdev_reset_tc(net_dev); 38962306a36Sopenharmony_ci goto out; 39062306a36Sopenharmony_ci } 39162306a36Sopenharmony_ci 39262306a36Sopenharmony_ci if (num_tc > DPAA_TC_NUM) { 39362306a36Sopenharmony_ci netdev_err(net_dev, "Too many traffic classes: max %d supported.\n", 39462306a36Sopenharmony_ci DPAA_TC_NUM); 39562306a36Sopenharmony_ci return -EINVAL; 39662306a36Sopenharmony_ci } 39762306a36Sopenharmony_ci 39862306a36Sopenharmony_ci netdev_set_num_tc(net_dev, num_tc); 39962306a36Sopenharmony_ci 40062306a36Sopenharmony_ci for (i = 0; i < num_tc; i++) 40162306a36Sopenharmony_ci netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, 40262306a36Sopenharmony_ci i * DPAA_TC_TXQ_NUM); 40362306a36Sopenharmony_ci 40462306a36Sopenharmony_ciout: 40562306a36Sopenharmony_ci priv->num_tc = num_tc ? : 1; 40662306a36Sopenharmony_ci netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); 40762306a36Sopenharmony_ci return 0; 40862306a36Sopenharmony_ci} 40962306a36Sopenharmony_ci 41062306a36Sopenharmony_cistatic struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) 41162306a36Sopenharmony_ci{ 41262306a36Sopenharmony_ci struct dpaa_eth_data *eth_data; 41362306a36Sopenharmony_ci struct device *dpaa_dev; 41462306a36Sopenharmony_ci struct mac_device *mac_dev; 41562306a36Sopenharmony_ci 41662306a36Sopenharmony_ci dpaa_dev = &pdev->dev; 41762306a36Sopenharmony_ci eth_data = dpaa_dev->platform_data; 41862306a36Sopenharmony_ci if (!eth_data) { 41962306a36Sopenharmony_ci dev_err(dpaa_dev, "eth_data missing\n"); 42062306a36Sopenharmony_ci return ERR_PTR(-ENODEV); 42162306a36Sopenharmony_ci } 42262306a36Sopenharmony_ci mac_dev = eth_data->mac_dev; 42362306a36Sopenharmony_ci if (!mac_dev) { 42462306a36Sopenharmony_ci dev_err(dpaa_dev, "mac_dev missing\n"); 42562306a36Sopenharmony_ci return ERR_PTR(-EINVAL); 42662306a36Sopenharmony_ci } 42762306a36Sopenharmony_ci 42862306a36Sopenharmony_ci return mac_dev; 42962306a36Sopenharmony_ci} 43062306a36Sopenharmony_ci 43162306a36Sopenharmony_cistatic int dpaa_set_mac_address(struct net_device *net_dev, void *addr) 43262306a36Sopenharmony_ci{ 43362306a36Sopenharmony_ci const struct dpaa_priv *priv; 43462306a36Sopenharmony_ci struct mac_device *mac_dev; 43562306a36Sopenharmony_ci struct sockaddr old_addr; 43662306a36Sopenharmony_ci int err; 43762306a36Sopenharmony_ci 43862306a36Sopenharmony_ci priv = netdev_priv(net_dev); 43962306a36Sopenharmony_ci 44062306a36Sopenharmony_ci memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); 44162306a36Sopenharmony_ci 44262306a36Sopenharmony_ci err = eth_mac_addr(net_dev, addr); 44362306a36Sopenharmony_ci if (err < 0) { 44462306a36Sopenharmony_ci netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); 44562306a36Sopenharmony_ci return err; 44662306a36Sopenharmony_ci } 44762306a36Sopenharmony_ci 44862306a36Sopenharmony_ci mac_dev = priv->mac_dev; 44962306a36Sopenharmony_ci 45062306a36Sopenharmony_ci err = mac_dev->change_addr(mac_dev->fman_mac, 45162306a36Sopenharmony_ci (const enet_addr_t *)net_dev->dev_addr); 45262306a36Sopenharmony_ci if (err < 0) { 45362306a36Sopenharmony_ci netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", 45462306a36Sopenharmony_ci err); 45562306a36Sopenharmony_ci /* reverting to previous address */ 45662306a36Sopenharmony_ci eth_mac_addr(net_dev, &old_addr); 45762306a36Sopenharmony_ci 45862306a36Sopenharmony_ci return err; 45962306a36Sopenharmony_ci } 46062306a36Sopenharmony_ci 46162306a36Sopenharmony_ci return 0; 46262306a36Sopenharmony_ci} 46362306a36Sopenharmony_ci 46462306a36Sopenharmony_cistatic void dpaa_set_rx_mode(struct net_device *net_dev) 46562306a36Sopenharmony_ci{ 46662306a36Sopenharmony_ci const struct dpaa_priv *priv; 46762306a36Sopenharmony_ci int err; 46862306a36Sopenharmony_ci 46962306a36Sopenharmony_ci priv = netdev_priv(net_dev); 47062306a36Sopenharmony_ci 47162306a36Sopenharmony_ci if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { 47262306a36Sopenharmony_ci priv->mac_dev->promisc = !priv->mac_dev->promisc; 47362306a36Sopenharmony_ci err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, 47462306a36Sopenharmony_ci priv->mac_dev->promisc); 47562306a36Sopenharmony_ci if (err < 0) 47662306a36Sopenharmony_ci netif_err(priv, drv, net_dev, 47762306a36Sopenharmony_ci "mac_dev->set_promisc() = %d\n", 47862306a36Sopenharmony_ci err); 47962306a36Sopenharmony_ci } 48062306a36Sopenharmony_ci 48162306a36Sopenharmony_ci if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) { 48262306a36Sopenharmony_ci priv->mac_dev->allmulti = !priv->mac_dev->allmulti; 48362306a36Sopenharmony_ci err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac, 48462306a36Sopenharmony_ci priv->mac_dev->allmulti); 48562306a36Sopenharmony_ci if (err < 0) 48662306a36Sopenharmony_ci netif_err(priv, drv, net_dev, 48762306a36Sopenharmony_ci "mac_dev->set_allmulti() = %d\n", 48862306a36Sopenharmony_ci err); 48962306a36Sopenharmony_ci } 49062306a36Sopenharmony_ci 49162306a36Sopenharmony_ci err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); 49262306a36Sopenharmony_ci if (err < 0) 49362306a36Sopenharmony_ci netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", 49462306a36Sopenharmony_ci err); 49562306a36Sopenharmony_ci} 49662306a36Sopenharmony_ci 49762306a36Sopenharmony_cistatic struct dpaa_bp *dpaa_bpid2pool(int bpid) 49862306a36Sopenharmony_ci{ 49962306a36Sopenharmony_ci if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS)) 50062306a36Sopenharmony_ci return NULL; 50162306a36Sopenharmony_ci 50262306a36Sopenharmony_ci return dpaa_bp_array[bpid]; 50362306a36Sopenharmony_ci} 50462306a36Sopenharmony_ci 50562306a36Sopenharmony_ci/* checks if this bpool is already allocated */ 50662306a36Sopenharmony_cistatic bool dpaa_bpid2pool_use(int bpid) 50762306a36Sopenharmony_ci{ 50862306a36Sopenharmony_ci if (dpaa_bpid2pool(bpid)) { 50962306a36Sopenharmony_ci refcount_inc(&dpaa_bp_array[bpid]->refs); 51062306a36Sopenharmony_ci return true; 51162306a36Sopenharmony_ci } 51262306a36Sopenharmony_ci 51362306a36Sopenharmony_ci return false; 51462306a36Sopenharmony_ci} 51562306a36Sopenharmony_ci 51662306a36Sopenharmony_ci/* called only once per bpid by dpaa_bp_alloc_pool() */ 51762306a36Sopenharmony_cistatic void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp) 51862306a36Sopenharmony_ci{ 51962306a36Sopenharmony_ci dpaa_bp_array[bpid] = dpaa_bp; 52062306a36Sopenharmony_ci refcount_set(&dpaa_bp->refs, 1); 52162306a36Sopenharmony_ci} 52262306a36Sopenharmony_ci 52362306a36Sopenharmony_cistatic int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp) 52462306a36Sopenharmony_ci{ 52562306a36Sopenharmony_ci int err; 52662306a36Sopenharmony_ci 52762306a36Sopenharmony_ci if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) { 52862306a36Sopenharmony_ci pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n", 52962306a36Sopenharmony_ci __func__); 53062306a36Sopenharmony_ci return -EINVAL; 53162306a36Sopenharmony_ci } 53262306a36Sopenharmony_ci 53362306a36Sopenharmony_ci /* If the pool is already specified, we only create one per bpid */ 53462306a36Sopenharmony_ci if (dpaa_bp->bpid != FSL_DPAA_BPID_INV && 53562306a36Sopenharmony_ci dpaa_bpid2pool_use(dpaa_bp->bpid)) 53662306a36Sopenharmony_ci return 0; 53762306a36Sopenharmony_ci 53862306a36Sopenharmony_ci if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) { 53962306a36Sopenharmony_ci dpaa_bp->pool = bman_new_pool(); 54062306a36Sopenharmony_ci if (!dpaa_bp->pool) { 54162306a36Sopenharmony_ci pr_err("%s: bman_new_pool() failed\n", 54262306a36Sopenharmony_ci __func__); 54362306a36Sopenharmony_ci return -ENODEV; 54462306a36Sopenharmony_ci } 54562306a36Sopenharmony_ci 54662306a36Sopenharmony_ci dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool); 54762306a36Sopenharmony_ci } 54862306a36Sopenharmony_ci 54962306a36Sopenharmony_ci if (dpaa_bp->seed_cb) { 55062306a36Sopenharmony_ci err = dpaa_bp->seed_cb(dpaa_bp); 55162306a36Sopenharmony_ci if (err) 55262306a36Sopenharmony_ci goto pool_seed_failed; 55362306a36Sopenharmony_ci } 55462306a36Sopenharmony_ci 55562306a36Sopenharmony_ci dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp); 55662306a36Sopenharmony_ci 55762306a36Sopenharmony_ci return 0; 55862306a36Sopenharmony_ci 55962306a36Sopenharmony_cipool_seed_failed: 56062306a36Sopenharmony_ci pr_err("%s: pool seeding failed\n", __func__); 56162306a36Sopenharmony_ci bman_free_pool(dpaa_bp->pool); 56262306a36Sopenharmony_ci 56362306a36Sopenharmony_ci return err; 56462306a36Sopenharmony_ci} 56562306a36Sopenharmony_ci 56662306a36Sopenharmony_ci/* remove and free all the buffers from the given buffer pool */ 56762306a36Sopenharmony_cistatic void dpaa_bp_drain(struct dpaa_bp *bp) 56862306a36Sopenharmony_ci{ 56962306a36Sopenharmony_ci u8 num = 8; 57062306a36Sopenharmony_ci int ret; 57162306a36Sopenharmony_ci 57262306a36Sopenharmony_ci do { 57362306a36Sopenharmony_ci struct bm_buffer bmb[8]; 57462306a36Sopenharmony_ci int i; 57562306a36Sopenharmony_ci 57662306a36Sopenharmony_ci ret = bman_acquire(bp->pool, bmb, num); 57762306a36Sopenharmony_ci if (ret < 0) { 57862306a36Sopenharmony_ci if (num == 8) { 57962306a36Sopenharmony_ci /* we have less than 8 buffers left; 58062306a36Sopenharmony_ci * drain them one by one 58162306a36Sopenharmony_ci */ 58262306a36Sopenharmony_ci num = 1; 58362306a36Sopenharmony_ci ret = 1; 58462306a36Sopenharmony_ci continue; 58562306a36Sopenharmony_ci } else { 58662306a36Sopenharmony_ci /* Pool is fully drained */ 58762306a36Sopenharmony_ci break; 58862306a36Sopenharmony_ci } 58962306a36Sopenharmony_ci } 59062306a36Sopenharmony_ci 59162306a36Sopenharmony_ci if (bp->free_buf_cb) 59262306a36Sopenharmony_ci for (i = 0; i < num; i++) 59362306a36Sopenharmony_ci bp->free_buf_cb(bp, &bmb[i]); 59462306a36Sopenharmony_ci } while (ret > 0); 59562306a36Sopenharmony_ci} 59662306a36Sopenharmony_ci 59762306a36Sopenharmony_cistatic void dpaa_bp_free(struct dpaa_bp *dpaa_bp) 59862306a36Sopenharmony_ci{ 59962306a36Sopenharmony_ci struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid); 60062306a36Sopenharmony_ci 60162306a36Sopenharmony_ci /* the mapping between bpid and dpaa_bp is done very late in the 60262306a36Sopenharmony_ci * allocation procedure; if something failed before the mapping, the bp 60362306a36Sopenharmony_ci * was not configured, therefore we don't need the below instructions 60462306a36Sopenharmony_ci */ 60562306a36Sopenharmony_ci if (!bp) 60662306a36Sopenharmony_ci return; 60762306a36Sopenharmony_ci 60862306a36Sopenharmony_ci if (!refcount_dec_and_test(&bp->refs)) 60962306a36Sopenharmony_ci return; 61062306a36Sopenharmony_ci 61162306a36Sopenharmony_ci if (bp->free_buf_cb) 61262306a36Sopenharmony_ci dpaa_bp_drain(bp); 61362306a36Sopenharmony_ci 61462306a36Sopenharmony_ci dpaa_bp_array[bp->bpid] = NULL; 61562306a36Sopenharmony_ci bman_free_pool(bp->pool); 61662306a36Sopenharmony_ci} 61762306a36Sopenharmony_ci 61862306a36Sopenharmony_cistatic void dpaa_bps_free(struct dpaa_priv *priv) 61962306a36Sopenharmony_ci{ 62062306a36Sopenharmony_ci dpaa_bp_free(priv->dpaa_bp); 62162306a36Sopenharmony_ci} 62262306a36Sopenharmony_ci 62362306a36Sopenharmony_ci/* Use multiple WQs for FQ assignment: 62462306a36Sopenharmony_ci * - Tx Confirmation queues go to WQ1. 62562306a36Sopenharmony_ci * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance 62662306a36Sopenharmony_ci * to be scheduled, in case there are many more FQs in WQ6). 62762306a36Sopenharmony_ci * - Rx Default goes to WQ6. 62862306a36Sopenharmony_ci * - Tx queues go to different WQs depending on their priority. Equal 62962306a36Sopenharmony_ci * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and 63062306a36Sopenharmony_ci * WQ0 (highest priority). 63162306a36Sopenharmony_ci * This ensures that Tx-confirmed buffers are timely released. In particular, 63262306a36Sopenharmony_ci * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they 63362306a36Sopenharmony_ci * are greatly outnumbered by other FQs in the system, while 63462306a36Sopenharmony_ci * dequeue scheduling is round-robin. 63562306a36Sopenharmony_ci */ 63662306a36Sopenharmony_cistatic inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) 63762306a36Sopenharmony_ci{ 63862306a36Sopenharmony_ci switch (fq->fq_type) { 63962306a36Sopenharmony_ci case FQ_TYPE_TX_CONFIRM: 64062306a36Sopenharmony_ci case FQ_TYPE_TX_CONF_MQ: 64162306a36Sopenharmony_ci fq->wq = 1; 64262306a36Sopenharmony_ci break; 64362306a36Sopenharmony_ci case FQ_TYPE_RX_ERROR: 64462306a36Sopenharmony_ci case FQ_TYPE_TX_ERROR: 64562306a36Sopenharmony_ci fq->wq = 5; 64662306a36Sopenharmony_ci break; 64762306a36Sopenharmony_ci case FQ_TYPE_RX_DEFAULT: 64862306a36Sopenharmony_ci case FQ_TYPE_RX_PCD: 64962306a36Sopenharmony_ci fq->wq = 6; 65062306a36Sopenharmony_ci break; 65162306a36Sopenharmony_ci case FQ_TYPE_TX: 65262306a36Sopenharmony_ci switch (idx / DPAA_TC_TXQ_NUM) { 65362306a36Sopenharmony_ci case 0: 65462306a36Sopenharmony_ci /* Low priority (best effort) */ 65562306a36Sopenharmony_ci fq->wq = 6; 65662306a36Sopenharmony_ci break; 65762306a36Sopenharmony_ci case 1: 65862306a36Sopenharmony_ci /* Medium priority */ 65962306a36Sopenharmony_ci fq->wq = 2; 66062306a36Sopenharmony_ci break; 66162306a36Sopenharmony_ci case 2: 66262306a36Sopenharmony_ci /* High priority */ 66362306a36Sopenharmony_ci fq->wq = 1; 66462306a36Sopenharmony_ci break; 66562306a36Sopenharmony_ci case 3: 66662306a36Sopenharmony_ci /* Very high priority */ 66762306a36Sopenharmony_ci fq->wq = 0; 66862306a36Sopenharmony_ci break; 66962306a36Sopenharmony_ci default: 67062306a36Sopenharmony_ci WARN(1, "Too many TX FQs: more than %d!\n", 67162306a36Sopenharmony_ci DPAA_ETH_TXQ_NUM); 67262306a36Sopenharmony_ci } 67362306a36Sopenharmony_ci break; 67462306a36Sopenharmony_ci default: 67562306a36Sopenharmony_ci WARN(1, "Invalid FQ type %d for FQID %d!\n", 67662306a36Sopenharmony_ci fq->fq_type, fq->fqid); 67762306a36Sopenharmony_ci } 67862306a36Sopenharmony_ci} 67962306a36Sopenharmony_ci 68062306a36Sopenharmony_cistatic struct dpaa_fq *dpaa_fq_alloc(struct device *dev, 68162306a36Sopenharmony_ci u32 start, u32 count, 68262306a36Sopenharmony_ci struct list_head *list, 68362306a36Sopenharmony_ci enum dpaa_fq_type fq_type) 68462306a36Sopenharmony_ci{ 68562306a36Sopenharmony_ci struct dpaa_fq *dpaa_fq; 68662306a36Sopenharmony_ci int i; 68762306a36Sopenharmony_ci 68862306a36Sopenharmony_ci dpaa_fq = devm_kcalloc(dev, count, sizeof(*dpaa_fq), 68962306a36Sopenharmony_ci GFP_KERNEL); 69062306a36Sopenharmony_ci if (!dpaa_fq) 69162306a36Sopenharmony_ci return NULL; 69262306a36Sopenharmony_ci 69362306a36Sopenharmony_ci for (i = 0; i < count; i++) { 69462306a36Sopenharmony_ci dpaa_fq[i].fq_type = fq_type; 69562306a36Sopenharmony_ci dpaa_fq[i].fqid = start ? start + i : 0; 69662306a36Sopenharmony_ci list_add_tail(&dpaa_fq[i].list, list); 69762306a36Sopenharmony_ci } 69862306a36Sopenharmony_ci 69962306a36Sopenharmony_ci for (i = 0; i < count; i++) 70062306a36Sopenharmony_ci dpaa_assign_wq(dpaa_fq + i, i); 70162306a36Sopenharmony_ci 70262306a36Sopenharmony_ci return dpaa_fq; 70362306a36Sopenharmony_ci} 70462306a36Sopenharmony_ci 70562306a36Sopenharmony_cistatic int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, 70662306a36Sopenharmony_ci struct fm_port_fqs *port_fqs) 70762306a36Sopenharmony_ci{ 70862306a36Sopenharmony_ci struct dpaa_fq *dpaa_fq; 70962306a36Sopenharmony_ci u32 fq_base, fq_base_aligned, i; 71062306a36Sopenharmony_ci 71162306a36Sopenharmony_ci dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR); 71262306a36Sopenharmony_ci if (!dpaa_fq) 71362306a36Sopenharmony_ci goto fq_alloc_failed; 71462306a36Sopenharmony_ci 71562306a36Sopenharmony_ci port_fqs->rx_errq = &dpaa_fq[0]; 71662306a36Sopenharmony_ci 71762306a36Sopenharmony_ci dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT); 71862306a36Sopenharmony_ci if (!dpaa_fq) 71962306a36Sopenharmony_ci goto fq_alloc_failed; 72062306a36Sopenharmony_ci 72162306a36Sopenharmony_ci port_fqs->rx_defq = &dpaa_fq[0]; 72262306a36Sopenharmony_ci 72362306a36Sopenharmony_ci /* the PCD FQIDs range needs to be aligned for correct operation */ 72462306a36Sopenharmony_ci if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM)) 72562306a36Sopenharmony_ci goto fq_alloc_failed; 72662306a36Sopenharmony_ci 72762306a36Sopenharmony_ci fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM); 72862306a36Sopenharmony_ci 72962306a36Sopenharmony_ci for (i = fq_base; i < fq_base_aligned; i++) 73062306a36Sopenharmony_ci qman_release_fqid(i); 73162306a36Sopenharmony_ci 73262306a36Sopenharmony_ci for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM; 73362306a36Sopenharmony_ci i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++) 73462306a36Sopenharmony_ci qman_release_fqid(i); 73562306a36Sopenharmony_ci 73662306a36Sopenharmony_ci dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM, 73762306a36Sopenharmony_ci list, FQ_TYPE_RX_PCD); 73862306a36Sopenharmony_ci if (!dpaa_fq) 73962306a36Sopenharmony_ci goto fq_alloc_failed; 74062306a36Sopenharmony_ci 74162306a36Sopenharmony_ci port_fqs->rx_pcdq = &dpaa_fq[0]; 74262306a36Sopenharmony_ci 74362306a36Sopenharmony_ci if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) 74462306a36Sopenharmony_ci goto fq_alloc_failed; 74562306a36Sopenharmony_ci 74662306a36Sopenharmony_ci dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); 74762306a36Sopenharmony_ci if (!dpaa_fq) 74862306a36Sopenharmony_ci goto fq_alloc_failed; 74962306a36Sopenharmony_ci 75062306a36Sopenharmony_ci port_fqs->tx_errq = &dpaa_fq[0]; 75162306a36Sopenharmony_ci 75262306a36Sopenharmony_ci dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM); 75362306a36Sopenharmony_ci if (!dpaa_fq) 75462306a36Sopenharmony_ci goto fq_alloc_failed; 75562306a36Sopenharmony_ci 75662306a36Sopenharmony_ci port_fqs->tx_defq = &dpaa_fq[0]; 75762306a36Sopenharmony_ci 75862306a36Sopenharmony_ci if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) 75962306a36Sopenharmony_ci goto fq_alloc_failed; 76062306a36Sopenharmony_ci 76162306a36Sopenharmony_ci return 0; 76262306a36Sopenharmony_ci 76362306a36Sopenharmony_cifq_alloc_failed: 76462306a36Sopenharmony_ci dev_err(dev, "dpaa_fq_alloc() failed\n"); 76562306a36Sopenharmony_ci return -ENOMEM; 76662306a36Sopenharmony_ci} 76762306a36Sopenharmony_ci 76862306a36Sopenharmony_cistatic u32 rx_pool_channel; 76962306a36Sopenharmony_cistatic DEFINE_SPINLOCK(rx_pool_channel_init); 77062306a36Sopenharmony_ci 77162306a36Sopenharmony_cistatic int dpaa_get_channel(void) 77262306a36Sopenharmony_ci{ 77362306a36Sopenharmony_ci spin_lock(&rx_pool_channel_init); 77462306a36Sopenharmony_ci if (!rx_pool_channel) { 77562306a36Sopenharmony_ci u32 pool; 77662306a36Sopenharmony_ci int ret; 77762306a36Sopenharmony_ci 77862306a36Sopenharmony_ci ret = qman_alloc_pool(&pool); 77962306a36Sopenharmony_ci 78062306a36Sopenharmony_ci if (!ret) 78162306a36Sopenharmony_ci rx_pool_channel = pool; 78262306a36Sopenharmony_ci } 78362306a36Sopenharmony_ci spin_unlock(&rx_pool_channel_init); 78462306a36Sopenharmony_ci if (!rx_pool_channel) 78562306a36Sopenharmony_ci return -ENOMEM; 78662306a36Sopenharmony_ci return rx_pool_channel; 78762306a36Sopenharmony_ci} 78862306a36Sopenharmony_ci 78962306a36Sopenharmony_cistatic void dpaa_release_channel(void) 79062306a36Sopenharmony_ci{ 79162306a36Sopenharmony_ci qman_release_pool(rx_pool_channel); 79262306a36Sopenharmony_ci} 79362306a36Sopenharmony_ci 79462306a36Sopenharmony_cistatic void dpaa_eth_add_channel(u16 channel, struct device *dev) 79562306a36Sopenharmony_ci{ 79662306a36Sopenharmony_ci u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); 79762306a36Sopenharmony_ci const cpumask_t *cpus = qman_affine_cpus(); 79862306a36Sopenharmony_ci struct qman_portal *portal; 79962306a36Sopenharmony_ci int cpu; 80062306a36Sopenharmony_ci 80162306a36Sopenharmony_ci for_each_cpu_and(cpu, cpus, cpu_online_mask) { 80262306a36Sopenharmony_ci portal = qman_get_affine_portal(cpu); 80362306a36Sopenharmony_ci qman_p_static_dequeue_add(portal, pool); 80462306a36Sopenharmony_ci qman_start_using_portal(portal, dev); 80562306a36Sopenharmony_ci } 80662306a36Sopenharmony_ci} 80762306a36Sopenharmony_ci 80862306a36Sopenharmony_ci/* Congestion group state change notification callback. 80962306a36Sopenharmony_ci * Stops the device's egress queues while they are congested and 81062306a36Sopenharmony_ci * wakes them upon exiting congested state. 81162306a36Sopenharmony_ci * Also updates some CGR-related stats. 81262306a36Sopenharmony_ci */ 81362306a36Sopenharmony_cistatic void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, 81462306a36Sopenharmony_ci int congested) 81562306a36Sopenharmony_ci{ 81662306a36Sopenharmony_ci struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr, 81762306a36Sopenharmony_ci struct dpaa_priv, cgr_data.cgr); 81862306a36Sopenharmony_ci 81962306a36Sopenharmony_ci if (congested) { 82062306a36Sopenharmony_ci priv->cgr_data.congestion_start_jiffies = jiffies; 82162306a36Sopenharmony_ci netif_tx_stop_all_queues(priv->net_dev); 82262306a36Sopenharmony_ci priv->cgr_data.cgr_congested_count++; 82362306a36Sopenharmony_ci } else { 82462306a36Sopenharmony_ci priv->cgr_data.congested_jiffies += 82562306a36Sopenharmony_ci (jiffies - priv->cgr_data.congestion_start_jiffies); 82662306a36Sopenharmony_ci netif_tx_wake_all_queues(priv->net_dev); 82762306a36Sopenharmony_ci } 82862306a36Sopenharmony_ci} 82962306a36Sopenharmony_ci 83062306a36Sopenharmony_cistatic int dpaa_eth_cgr_init(struct dpaa_priv *priv) 83162306a36Sopenharmony_ci{ 83262306a36Sopenharmony_ci struct qm_mcc_initcgr initcgr; 83362306a36Sopenharmony_ci u32 cs_th; 83462306a36Sopenharmony_ci int err; 83562306a36Sopenharmony_ci 83662306a36Sopenharmony_ci err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); 83762306a36Sopenharmony_ci if (err < 0) { 83862306a36Sopenharmony_ci if (netif_msg_drv(priv)) 83962306a36Sopenharmony_ci pr_err("%s: Error %d allocating CGR ID\n", 84062306a36Sopenharmony_ci __func__, err); 84162306a36Sopenharmony_ci goto out_error; 84262306a36Sopenharmony_ci } 84362306a36Sopenharmony_ci priv->cgr_data.cgr.cb = dpaa_eth_cgscn; 84462306a36Sopenharmony_ci 84562306a36Sopenharmony_ci /* Enable Congestion State Change Notifications and CS taildrop */ 84662306a36Sopenharmony_ci memset(&initcgr, 0, sizeof(initcgr)); 84762306a36Sopenharmony_ci initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES); 84862306a36Sopenharmony_ci initcgr.cgr.cscn_en = QM_CGR_EN; 84962306a36Sopenharmony_ci 85062306a36Sopenharmony_ci /* Set different thresholds based on the configured MAC speed. 85162306a36Sopenharmony_ci * This may turn suboptimal if the MAC is reconfigured at another 85262306a36Sopenharmony_ci * speed, so MACs must call dpaa_eth_cgr_set_speed in their link_up 85362306a36Sopenharmony_ci * callback. 85462306a36Sopenharmony_ci */ 85562306a36Sopenharmony_ci if (priv->mac_dev->phylink_config.mac_capabilities & MAC_10000FD) 85662306a36Sopenharmony_ci cs_th = DPAA_CS_THRESHOLD_10G; 85762306a36Sopenharmony_ci else 85862306a36Sopenharmony_ci cs_th = DPAA_CS_THRESHOLD_1G; 85962306a36Sopenharmony_ci qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); 86062306a36Sopenharmony_ci 86162306a36Sopenharmony_ci initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); 86262306a36Sopenharmony_ci initcgr.cgr.cstd_en = QM_CGR_EN; 86362306a36Sopenharmony_ci 86462306a36Sopenharmony_ci err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, 86562306a36Sopenharmony_ci &initcgr); 86662306a36Sopenharmony_ci if (err < 0) { 86762306a36Sopenharmony_ci if (netif_msg_drv(priv)) 86862306a36Sopenharmony_ci pr_err("%s: Error %d creating CGR with ID %d\n", 86962306a36Sopenharmony_ci __func__, err, priv->cgr_data.cgr.cgrid); 87062306a36Sopenharmony_ci qman_release_cgrid(priv->cgr_data.cgr.cgrid); 87162306a36Sopenharmony_ci goto out_error; 87262306a36Sopenharmony_ci } 87362306a36Sopenharmony_ci if (netif_msg_drv(priv)) 87462306a36Sopenharmony_ci pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", 87562306a36Sopenharmony_ci priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, 87662306a36Sopenharmony_ci priv->cgr_data.cgr.chan); 87762306a36Sopenharmony_ci 87862306a36Sopenharmony_ciout_error: 87962306a36Sopenharmony_ci return err; 88062306a36Sopenharmony_ci} 88162306a36Sopenharmony_ci 88262306a36Sopenharmony_cistatic void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed) 88362306a36Sopenharmony_ci{ 88462306a36Sopenharmony_ci struct net_device *net_dev = to_net_dev(mac_dev->phylink_config.dev); 88562306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(net_dev); 88662306a36Sopenharmony_ci struct qm_mcc_initcgr opts = { }; 88762306a36Sopenharmony_ci u32 cs_th; 88862306a36Sopenharmony_ci int err; 88962306a36Sopenharmony_ci 89062306a36Sopenharmony_ci opts.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES); 89162306a36Sopenharmony_ci switch (speed) { 89262306a36Sopenharmony_ci case SPEED_10000: 89362306a36Sopenharmony_ci cs_th = DPAA_CS_THRESHOLD_10G; 89462306a36Sopenharmony_ci break; 89562306a36Sopenharmony_ci case SPEED_1000: 89662306a36Sopenharmony_ci default: 89762306a36Sopenharmony_ci cs_th = DPAA_CS_THRESHOLD_1G; 89862306a36Sopenharmony_ci break; 89962306a36Sopenharmony_ci } 90062306a36Sopenharmony_ci qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, cs_th, 1); 90162306a36Sopenharmony_ci 90262306a36Sopenharmony_ci err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts); 90362306a36Sopenharmony_ci if (err) 90462306a36Sopenharmony_ci netdev_err(net_dev, "could not update speed: %d\n", err); 90562306a36Sopenharmony_ci} 90662306a36Sopenharmony_ci 90762306a36Sopenharmony_cistatic inline void dpaa_setup_ingress(const struct dpaa_priv *priv, 90862306a36Sopenharmony_ci struct dpaa_fq *fq, 90962306a36Sopenharmony_ci const struct qman_fq *template) 91062306a36Sopenharmony_ci{ 91162306a36Sopenharmony_ci fq->fq_base = *template; 91262306a36Sopenharmony_ci fq->net_dev = priv->net_dev; 91362306a36Sopenharmony_ci 91462306a36Sopenharmony_ci fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; 91562306a36Sopenharmony_ci fq->channel = priv->channel; 91662306a36Sopenharmony_ci} 91762306a36Sopenharmony_ci 91862306a36Sopenharmony_cistatic inline void dpaa_setup_egress(const struct dpaa_priv *priv, 91962306a36Sopenharmony_ci struct dpaa_fq *fq, 92062306a36Sopenharmony_ci struct fman_port *port, 92162306a36Sopenharmony_ci const struct qman_fq *template) 92262306a36Sopenharmony_ci{ 92362306a36Sopenharmony_ci fq->fq_base = *template; 92462306a36Sopenharmony_ci fq->net_dev = priv->net_dev; 92562306a36Sopenharmony_ci 92662306a36Sopenharmony_ci if (port) { 92762306a36Sopenharmony_ci fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; 92862306a36Sopenharmony_ci fq->channel = (u16)fman_port_get_qman_channel_id(port); 92962306a36Sopenharmony_ci } else { 93062306a36Sopenharmony_ci fq->flags = QMAN_FQ_FLAG_NO_MODIFY; 93162306a36Sopenharmony_ci } 93262306a36Sopenharmony_ci} 93362306a36Sopenharmony_ci 93462306a36Sopenharmony_cistatic void dpaa_fq_setup(struct dpaa_priv *priv, 93562306a36Sopenharmony_ci const struct dpaa_fq_cbs *fq_cbs, 93662306a36Sopenharmony_ci struct fman_port *tx_port) 93762306a36Sopenharmony_ci{ 93862306a36Sopenharmony_ci int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu; 93962306a36Sopenharmony_ci const cpumask_t *affine_cpus = qman_affine_cpus(); 94062306a36Sopenharmony_ci u16 channels[NR_CPUS]; 94162306a36Sopenharmony_ci struct dpaa_fq *fq; 94262306a36Sopenharmony_ci 94362306a36Sopenharmony_ci for_each_cpu_and(cpu, affine_cpus, cpu_online_mask) 94462306a36Sopenharmony_ci channels[num_portals++] = qman_affine_channel(cpu); 94562306a36Sopenharmony_ci 94662306a36Sopenharmony_ci if (num_portals == 0) 94762306a36Sopenharmony_ci dev_err(priv->net_dev->dev.parent, 94862306a36Sopenharmony_ci "No Qman software (affine) channels found\n"); 94962306a36Sopenharmony_ci 95062306a36Sopenharmony_ci /* Initialize each FQ in the list */ 95162306a36Sopenharmony_ci list_for_each_entry(fq, &priv->dpaa_fq_list, list) { 95262306a36Sopenharmony_ci switch (fq->fq_type) { 95362306a36Sopenharmony_ci case FQ_TYPE_RX_DEFAULT: 95462306a36Sopenharmony_ci dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); 95562306a36Sopenharmony_ci break; 95662306a36Sopenharmony_ci case FQ_TYPE_RX_ERROR: 95762306a36Sopenharmony_ci dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); 95862306a36Sopenharmony_ci break; 95962306a36Sopenharmony_ci case FQ_TYPE_RX_PCD: 96062306a36Sopenharmony_ci if (!num_portals) 96162306a36Sopenharmony_ci continue; 96262306a36Sopenharmony_ci dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); 96362306a36Sopenharmony_ci fq->channel = channels[portal_cnt++ % num_portals]; 96462306a36Sopenharmony_ci break; 96562306a36Sopenharmony_ci case FQ_TYPE_TX: 96662306a36Sopenharmony_ci dpaa_setup_egress(priv, fq, tx_port, 96762306a36Sopenharmony_ci &fq_cbs->egress_ern); 96862306a36Sopenharmony_ci /* If we have more Tx queues than the number of cores, 96962306a36Sopenharmony_ci * just ignore the extra ones. 97062306a36Sopenharmony_ci */ 97162306a36Sopenharmony_ci if (egress_cnt < DPAA_ETH_TXQ_NUM) 97262306a36Sopenharmony_ci priv->egress_fqs[egress_cnt++] = &fq->fq_base; 97362306a36Sopenharmony_ci break; 97462306a36Sopenharmony_ci case FQ_TYPE_TX_CONF_MQ: 97562306a36Sopenharmony_ci priv->conf_fqs[conf_cnt++] = &fq->fq_base; 97662306a36Sopenharmony_ci fallthrough; 97762306a36Sopenharmony_ci case FQ_TYPE_TX_CONFIRM: 97862306a36Sopenharmony_ci dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); 97962306a36Sopenharmony_ci break; 98062306a36Sopenharmony_ci case FQ_TYPE_TX_ERROR: 98162306a36Sopenharmony_ci dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); 98262306a36Sopenharmony_ci break; 98362306a36Sopenharmony_ci default: 98462306a36Sopenharmony_ci dev_warn(priv->net_dev->dev.parent, 98562306a36Sopenharmony_ci "Unknown FQ type detected!\n"); 98662306a36Sopenharmony_ci break; 98762306a36Sopenharmony_ci } 98862306a36Sopenharmony_ci } 98962306a36Sopenharmony_ci 99062306a36Sopenharmony_ci /* Make sure all CPUs receive a corresponding Tx queue. */ 99162306a36Sopenharmony_ci while (egress_cnt < DPAA_ETH_TXQ_NUM) { 99262306a36Sopenharmony_ci list_for_each_entry(fq, &priv->dpaa_fq_list, list) { 99362306a36Sopenharmony_ci if (fq->fq_type != FQ_TYPE_TX) 99462306a36Sopenharmony_ci continue; 99562306a36Sopenharmony_ci priv->egress_fqs[egress_cnt++] = &fq->fq_base; 99662306a36Sopenharmony_ci if (egress_cnt == DPAA_ETH_TXQ_NUM) 99762306a36Sopenharmony_ci break; 99862306a36Sopenharmony_ci } 99962306a36Sopenharmony_ci } 100062306a36Sopenharmony_ci} 100162306a36Sopenharmony_ci 100262306a36Sopenharmony_cistatic inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, 100362306a36Sopenharmony_ci struct qman_fq *tx_fq) 100462306a36Sopenharmony_ci{ 100562306a36Sopenharmony_ci int i; 100662306a36Sopenharmony_ci 100762306a36Sopenharmony_ci for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) 100862306a36Sopenharmony_ci if (priv->egress_fqs[i] == tx_fq) 100962306a36Sopenharmony_ci return i; 101062306a36Sopenharmony_ci 101162306a36Sopenharmony_ci return -EINVAL; 101262306a36Sopenharmony_ci} 101362306a36Sopenharmony_ci 101462306a36Sopenharmony_cistatic int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) 101562306a36Sopenharmony_ci{ 101662306a36Sopenharmony_ci const struct dpaa_priv *priv; 101762306a36Sopenharmony_ci struct qman_fq *confq = NULL; 101862306a36Sopenharmony_ci struct qm_mcc_initfq initfq; 101962306a36Sopenharmony_ci struct device *dev; 102062306a36Sopenharmony_ci struct qman_fq *fq; 102162306a36Sopenharmony_ci int queue_id; 102262306a36Sopenharmony_ci int err; 102362306a36Sopenharmony_ci 102462306a36Sopenharmony_ci priv = netdev_priv(dpaa_fq->net_dev); 102562306a36Sopenharmony_ci dev = dpaa_fq->net_dev->dev.parent; 102662306a36Sopenharmony_ci 102762306a36Sopenharmony_ci if (dpaa_fq->fqid == 0) 102862306a36Sopenharmony_ci dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; 102962306a36Sopenharmony_ci 103062306a36Sopenharmony_ci dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); 103162306a36Sopenharmony_ci 103262306a36Sopenharmony_ci err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base); 103362306a36Sopenharmony_ci if (err) { 103462306a36Sopenharmony_ci dev_err(dev, "qman_create_fq() failed\n"); 103562306a36Sopenharmony_ci return err; 103662306a36Sopenharmony_ci } 103762306a36Sopenharmony_ci fq = &dpaa_fq->fq_base; 103862306a36Sopenharmony_ci 103962306a36Sopenharmony_ci if (dpaa_fq->init) { 104062306a36Sopenharmony_ci memset(&initfq, 0, sizeof(initfq)); 104162306a36Sopenharmony_ci 104262306a36Sopenharmony_ci initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL); 104362306a36Sopenharmony_ci /* Note: we may get to keep an empty FQ in cache */ 104462306a36Sopenharmony_ci initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE); 104562306a36Sopenharmony_ci 104662306a36Sopenharmony_ci /* Try to reduce the number of portal interrupts for 104762306a36Sopenharmony_ci * Tx Confirmation FQs. 104862306a36Sopenharmony_ci */ 104962306a36Sopenharmony_ci if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) 105062306a36Sopenharmony_ci initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK); 105162306a36Sopenharmony_ci 105262306a36Sopenharmony_ci /* FQ placement */ 105362306a36Sopenharmony_ci initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ); 105462306a36Sopenharmony_ci 105562306a36Sopenharmony_ci qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); 105662306a36Sopenharmony_ci 105762306a36Sopenharmony_ci /* Put all egress queues in a congestion group of their own. 105862306a36Sopenharmony_ci * Sensu stricto, the Tx confirmation queues are Rx FQs, 105962306a36Sopenharmony_ci * rather than Tx - but they nonetheless account for the 106062306a36Sopenharmony_ci * memory footprint on behalf of egress traffic. We therefore 106162306a36Sopenharmony_ci * place them in the netdev's CGR, along with the Tx FQs. 106262306a36Sopenharmony_ci */ 106362306a36Sopenharmony_ci if (dpaa_fq->fq_type == FQ_TYPE_TX || 106462306a36Sopenharmony_ci dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || 106562306a36Sopenharmony_ci dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { 106662306a36Sopenharmony_ci initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); 106762306a36Sopenharmony_ci initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); 106862306a36Sopenharmony_ci initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; 106962306a36Sopenharmony_ci /* Set a fixed overhead accounting, in an attempt to 107062306a36Sopenharmony_ci * reduce the impact of fixed-size skb shells and the 107162306a36Sopenharmony_ci * driver's needed headroom on system memory. This is 107262306a36Sopenharmony_ci * especially the case when the egress traffic is 107362306a36Sopenharmony_ci * composed of small datagrams. 107462306a36Sopenharmony_ci * Unfortunately, QMan's OAL value is capped to an 107562306a36Sopenharmony_ci * insufficient value, but even that is better than 107662306a36Sopenharmony_ci * no overhead accounting at all. 107762306a36Sopenharmony_ci */ 107862306a36Sopenharmony_ci initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); 107962306a36Sopenharmony_ci qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); 108062306a36Sopenharmony_ci qm_fqd_set_oal(&initfq.fqd, 108162306a36Sopenharmony_ci min(sizeof(struct sk_buff) + 108262306a36Sopenharmony_ci priv->tx_headroom, 108362306a36Sopenharmony_ci (size_t)FSL_QMAN_MAX_OAL)); 108462306a36Sopenharmony_ci } 108562306a36Sopenharmony_ci 108662306a36Sopenharmony_ci if (td_enable) { 108762306a36Sopenharmony_ci initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH); 108862306a36Sopenharmony_ci qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); 108962306a36Sopenharmony_ci initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE); 109062306a36Sopenharmony_ci } 109162306a36Sopenharmony_ci 109262306a36Sopenharmony_ci if (dpaa_fq->fq_type == FQ_TYPE_TX) { 109362306a36Sopenharmony_ci queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base); 109462306a36Sopenharmony_ci if (queue_id >= 0) 109562306a36Sopenharmony_ci confq = priv->conf_fqs[queue_id]; 109662306a36Sopenharmony_ci if (confq) { 109762306a36Sopenharmony_ci initfq.we_mask |= 109862306a36Sopenharmony_ci cpu_to_be16(QM_INITFQ_WE_CONTEXTA); 109962306a36Sopenharmony_ci /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) 110062306a36Sopenharmony_ci * A2V=1 (contextA A2 field is valid) 110162306a36Sopenharmony_ci * A0V=1 (contextA A0 field is valid) 110262306a36Sopenharmony_ci * B0V=1 (contextB field is valid) 110362306a36Sopenharmony_ci * ContextA A2: EBD=1 (deallocate buffers inside FMan) 110462306a36Sopenharmony_ci * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) 110562306a36Sopenharmony_ci */ 110662306a36Sopenharmony_ci qm_fqd_context_a_set64(&initfq.fqd, 110762306a36Sopenharmony_ci 0x1e00000080000000ULL); 110862306a36Sopenharmony_ci } 110962306a36Sopenharmony_ci } 111062306a36Sopenharmony_ci 111162306a36Sopenharmony_ci /* Put all the ingress queues in our "ingress CGR". */ 111262306a36Sopenharmony_ci if (priv->use_ingress_cgr && 111362306a36Sopenharmony_ci (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || 111462306a36Sopenharmony_ci dpaa_fq->fq_type == FQ_TYPE_RX_ERROR || 111562306a36Sopenharmony_ci dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) { 111662306a36Sopenharmony_ci initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); 111762306a36Sopenharmony_ci initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); 111862306a36Sopenharmony_ci initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; 111962306a36Sopenharmony_ci /* Set a fixed overhead accounting, just like for the 112062306a36Sopenharmony_ci * egress CGR. 112162306a36Sopenharmony_ci */ 112262306a36Sopenharmony_ci initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); 112362306a36Sopenharmony_ci qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); 112462306a36Sopenharmony_ci qm_fqd_set_oal(&initfq.fqd, 112562306a36Sopenharmony_ci min(sizeof(struct sk_buff) + 112662306a36Sopenharmony_ci priv->tx_headroom, 112762306a36Sopenharmony_ci (size_t)FSL_QMAN_MAX_OAL)); 112862306a36Sopenharmony_ci } 112962306a36Sopenharmony_ci 113062306a36Sopenharmony_ci /* Initialization common to all ingress queues */ 113162306a36Sopenharmony_ci if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { 113262306a36Sopenharmony_ci initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA); 113362306a36Sopenharmony_ci initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE | 113462306a36Sopenharmony_ci QM_FQCTRL_CTXASTASHING); 113562306a36Sopenharmony_ci initfq.fqd.context_a.stashing.exclusive = 113662306a36Sopenharmony_ci QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | 113762306a36Sopenharmony_ci QM_STASHING_EXCL_ANNOTATION; 113862306a36Sopenharmony_ci qm_fqd_set_stashing(&initfq.fqd, 1, 2, 113962306a36Sopenharmony_ci DIV_ROUND_UP(sizeof(struct qman_fq), 114062306a36Sopenharmony_ci 64)); 114162306a36Sopenharmony_ci } 114262306a36Sopenharmony_ci 114362306a36Sopenharmony_ci err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); 114462306a36Sopenharmony_ci if (err < 0) { 114562306a36Sopenharmony_ci dev_err(dev, "qman_init_fq(%u) = %d\n", 114662306a36Sopenharmony_ci qman_fq_fqid(fq), err); 114762306a36Sopenharmony_ci qman_destroy_fq(fq); 114862306a36Sopenharmony_ci return err; 114962306a36Sopenharmony_ci } 115062306a36Sopenharmony_ci } 115162306a36Sopenharmony_ci 115262306a36Sopenharmony_ci dpaa_fq->fqid = qman_fq_fqid(fq); 115362306a36Sopenharmony_ci 115462306a36Sopenharmony_ci if (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || 115562306a36Sopenharmony_ci dpaa_fq->fq_type == FQ_TYPE_RX_PCD) { 115662306a36Sopenharmony_ci err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev, 115762306a36Sopenharmony_ci dpaa_fq->fqid, 0); 115862306a36Sopenharmony_ci if (err) { 115962306a36Sopenharmony_ci dev_err(dev, "xdp_rxq_info_reg() = %d\n", err); 116062306a36Sopenharmony_ci return err; 116162306a36Sopenharmony_ci } 116262306a36Sopenharmony_ci 116362306a36Sopenharmony_ci err = xdp_rxq_info_reg_mem_model(&dpaa_fq->xdp_rxq, 116462306a36Sopenharmony_ci MEM_TYPE_PAGE_ORDER0, NULL); 116562306a36Sopenharmony_ci if (err) { 116662306a36Sopenharmony_ci dev_err(dev, "xdp_rxq_info_reg_mem_model() = %d\n", 116762306a36Sopenharmony_ci err); 116862306a36Sopenharmony_ci xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq); 116962306a36Sopenharmony_ci return err; 117062306a36Sopenharmony_ci } 117162306a36Sopenharmony_ci } 117262306a36Sopenharmony_ci 117362306a36Sopenharmony_ci return 0; 117462306a36Sopenharmony_ci} 117562306a36Sopenharmony_ci 117662306a36Sopenharmony_cistatic int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq) 117762306a36Sopenharmony_ci{ 117862306a36Sopenharmony_ci const struct dpaa_priv *priv; 117962306a36Sopenharmony_ci struct dpaa_fq *dpaa_fq; 118062306a36Sopenharmony_ci int err, error; 118162306a36Sopenharmony_ci 118262306a36Sopenharmony_ci err = 0; 118362306a36Sopenharmony_ci 118462306a36Sopenharmony_ci dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); 118562306a36Sopenharmony_ci priv = netdev_priv(dpaa_fq->net_dev); 118662306a36Sopenharmony_ci 118762306a36Sopenharmony_ci if (dpaa_fq->init) { 118862306a36Sopenharmony_ci err = qman_retire_fq(fq, NULL); 118962306a36Sopenharmony_ci if (err < 0 && netif_msg_drv(priv)) 119062306a36Sopenharmony_ci dev_err(dev, "qman_retire_fq(%u) = %d\n", 119162306a36Sopenharmony_ci qman_fq_fqid(fq), err); 119262306a36Sopenharmony_ci 119362306a36Sopenharmony_ci error = qman_oos_fq(fq); 119462306a36Sopenharmony_ci if (error < 0 && netif_msg_drv(priv)) { 119562306a36Sopenharmony_ci dev_err(dev, "qman_oos_fq(%u) = %d\n", 119662306a36Sopenharmony_ci qman_fq_fqid(fq), error); 119762306a36Sopenharmony_ci if (err >= 0) 119862306a36Sopenharmony_ci err = error; 119962306a36Sopenharmony_ci } 120062306a36Sopenharmony_ci } 120162306a36Sopenharmony_ci 120262306a36Sopenharmony_ci if ((dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || 120362306a36Sopenharmony_ci dpaa_fq->fq_type == FQ_TYPE_RX_PCD) && 120462306a36Sopenharmony_ci xdp_rxq_info_is_reg(&dpaa_fq->xdp_rxq)) 120562306a36Sopenharmony_ci xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq); 120662306a36Sopenharmony_ci 120762306a36Sopenharmony_ci qman_destroy_fq(fq); 120862306a36Sopenharmony_ci list_del(&dpaa_fq->list); 120962306a36Sopenharmony_ci 121062306a36Sopenharmony_ci return err; 121162306a36Sopenharmony_ci} 121262306a36Sopenharmony_ci 121362306a36Sopenharmony_cistatic int dpaa_fq_free(struct device *dev, struct list_head *list) 121462306a36Sopenharmony_ci{ 121562306a36Sopenharmony_ci struct dpaa_fq *dpaa_fq, *tmp; 121662306a36Sopenharmony_ci int err, error; 121762306a36Sopenharmony_ci 121862306a36Sopenharmony_ci err = 0; 121962306a36Sopenharmony_ci list_for_each_entry_safe(dpaa_fq, tmp, list, list) { 122062306a36Sopenharmony_ci error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq); 122162306a36Sopenharmony_ci if (error < 0 && err >= 0) 122262306a36Sopenharmony_ci err = error; 122362306a36Sopenharmony_ci } 122462306a36Sopenharmony_ci 122562306a36Sopenharmony_ci return err; 122662306a36Sopenharmony_ci} 122762306a36Sopenharmony_ci 122862306a36Sopenharmony_cistatic int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, 122962306a36Sopenharmony_ci struct dpaa_fq *defq, 123062306a36Sopenharmony_ci struct dpaa_buffer_layout *buf_layout) 123162306a36Sopenharmony_ci{ 123262306a36Sopenharmony_ci struct fman_buffer_prefix_content buf_prefix_content; 123362306a36Sopenharmony_ci struct fman_port_params params; 123462306a36Sopenharmony_ci int err; 123562306a36Sopenharmony_ci 123662306a36Sopenharmony_ci memset(¶ms, 0, sizeof(params)); 123762306a36Sopenharmony_ci memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); 123862306a36Sopenharmony_ci 123962306a36Sopenharmony_ci buf_prefix_content.priv_data_size = buf_layout->priv_data_size; 124062306a36Sopenharmony_ci buf_prefix_content.pass_prs_result = true; 124162306a36Sopenharmony_ci buf_prefix_content.pass_hash_result = true; 124262306a36Sopenharmony_ci buf_prefix_content.pass_time_stamp = true; 124362306a36Sopenharmony_ci buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; 124462306a36Sopenharmony_ci 124562306a36Sopenharmony_ci params.specific_params.non_rx_params.err_fqid = errq->fqid; 124662306a36Sopenharmony_ci params.specific_params.non_rx_params.dflt_fqid = defq->fqid; 124762306a36Sopenharmony_ci 124862306a36Sopenharmony_ci err = fman_port_config(port, ¶ms); 124962306a36Sopenharmony_ci if (err) { 125062306a36Sopenharmony_ci pr_err("%s: fman_port_config failed\n", __func__); 125162306a36Sopenharmony_ci return err; 125262306a36Sopenharmony_ci } 125362306a36Sopenharmony_ci 125462306a36Sopenharmony_ci err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); 125562306a36Sopenharmony_ci if (err) { 125662306a36Sopenharmony_ci pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", 125762306a36Sopenharmony_ci __func__); 125862306a36Sopenharmony_ci return err; 125962306a36Sopenharmony_ci } 126062306a36Sopenharmony_ci 126162306a36Sopenharmony_ci err = fman_port_init(port); 126262306a36Sopenharmony_ci if (err) 126362306a36Sopenharmony_ci pr_err("%s: fm_port_init failed\n", __func__); 126462306a36Sopenharmony_ci 126562306a36Sopenharmony_ci return err; 126662306a36Sopenharmony_ci} 126762306a36Sopenharmony_ci 126862306a36Sopenharmony_cistatic int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp, 126962306a36Sopenharmony_ci struct dpaa_fq *errq, 127062306a36Sopenharmony_ci struct dpaa_fq *defq, struct dpaa_fq *pcdq, 127162306a36Sopenharmony_ci struct dpaa_buffer_layout *buf_layout) 127262306a36Sopenharmony_ci{ 127362306a36Sopenharmony_ci struct fman_buffer_prefix_content buf_prefix_content; 127462306a36Sopenharmony_ci struct fman_port_rx_params *rx_p; 127562306a36Sopenharmony_ci struct fman_port_params params; 127662306a36Sopenharmony_ci int err; 127762306a36Sopenharmony_ci 127862306a36Sopenharmony_ci memset(¶ms, 0, sizeof(params)); 127962306a36Sopenharmony_ci memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); 128062306a36Sopenharmony_ci 128162306a36Sopenharmony_ci buf_prefix_content.priv_data_size = buf_layout->priv_data_size; 128262306a36Sopenharmony_ci buf_prefix_content.pass_prs_result = true; 128362306a36Sopenharmony_ci buf_prefix_content.pass_hash_result = true; 128462306a36Sopenharmony_ci buf_prefix_content.pass_time_stamp = true; 128562306a36Sopenharmony_ci buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT; 128662306a36Sopenharmony_ci 128762306a36Sopenharmony_ci rx_p = ¶ms.specific_params.rx_params; 128862306a36Sopenharmony_ci rx_p->err_fqid = errq->fqid; 128962306a36Sopenharmony_ci rx_p->dflt_fqid = defq->fqid; 129062306a36Sopenharmony_ci if (pcdq) { 129162306a36Sopenharmony_ci rx_p->pcd_base_fqid = pcdq->fqid; 129262306a36Sopenharmony_ci rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM; 129362306a36Sopenharmony_ci } 129462306a36Sopenharmony_ci 129562306a36Sopenharmony_ci rx_p->ext_buf_pools.num_of_pools_used = 1; 129662306a36Sopenharmony_ci rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid; 129762306a36Sopenharmony_ci rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size; 129862306a36Sopenharmony_ci 129962306a36Sopenharmony_ci err = fman_port_config(port, ¶ms); 130062306a36Sopenharmony_ci if (err) { 130162306a36Sopenharmony_ci pr_err("%s: fman_port_config failed\n", __func__); 130262306a36Sopenharmony_ci return err; 130362306a36Sopenharmony_ci } 130462306a36Sopenharmony_ci 130562306a36Sopenharmony_ci err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); 130662306a36Sopenharmony_ci if (err) { 130762306a36Sopenharmony_ci pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", 130862306a36Sopenharmony_ci __func__); 130962306a36Sopenharmony_ci return err; 131062306a36Sopenharmony_ci } 131162306a36Sopenharmony_ci 131262306a36Sopenharmony_ci err = fman_port_init(port); 131362306a36Sopenharmony_ci if (err) 131462306a36Sopenharmony_ci pr_err("%s: fm_port_init failed\n", __func__); 131562306a36Sopenharmony_ci 131662306a36Sopenharmony_ci return err; 131762306a36Sopenharmony_ci} 131862306a36Sopenharmony_ci 131962306a36Sopenharmony_cistatic int dpaa_eth_init_ports(struct mac_device *mac_dev, 132062306a36Sopenharmony_ci struct dpaa_bp *bp, 132162306a36Sopenharmony_ci struct fm_port_fqs *port_fqs, 132262306a36Sopenharmony_ci struct dpaa_buffer_layout *buf_layout, 132362306a36Sopenharmony_ci struct device *dev) 132462306a36Sopenharmony_ci{ 132562306a36Sopenharmony_ci struct fman_port *rxport = mac_dev->port[RX]; 132662306a36Sopenharmony_ci struct fman_port *txport = mac_dev->port[TX]; 132762306a36Sopenharmony_ci int err; 132862306a36Sopenharmony_ci 132962306a36Sopenharmony_ci err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, 133062306a36Sopenharmony_ci port_fqs->tx_defq, &buf_layout[TX]); 133162306a36Sopenharmony_ci if (err) 133262306a36Sopenharmony_ci return err; 133362306a36Sopenharmony_ci 133462306a36Sopenharmony_ci err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq, 133562306a36Sopenharmony_ci port_fqs->rx_defq, port_fqs->rx_pcdq, 133662306a36Sopenharmony_ci &buf_layout[RX]); 133762306a36Sopenharmony_ci 133862306a36Sopenharmony_ci return err; 133962306a36Sopenharmony_ci} 134062306a36Sopenharmony_ci 134162306a36Sopenharmony_cistatic int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, 134262306a36Sopenharmony_ci struct bm_buffer *bmb, int cnt) 134362306a36Sopenharmony_ci{ 134462306a36Sopenharmony_ci int err; 134562306a36Sopenharmony_ci 134662306a36Sopenharmony_ci err = bman_release(dpaa_bp->pool, bmb, cnt); 134762306a36Sopenharmony_ci /* Should never occur, address anyway to avoid leaking the buffers */ 134862306a36Sopenharmony_ci if (WARN_ON(err) && dpaa_bp->free_buf_cb) 134962306a36Sopenharmony_ci while (cnt-- > 0) 135062306a36Sopenharmony_ci dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); 135162306a36Sopenharmony_ci 135262306a36Sopenharmony_ci return cnt; 135362306a36Sopenharmony_ci} 135462306a36Sopenharmony_ci 135562306a36Sopenharmony_cistatic void dpaa_release_sgt_members(struct qm_sg_entry *sgt) 135662306a36Sopenharmony_ci{ 135762306a36Sopenharmony_ci struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX]; 135862306a36Sopenharmony_ci struct dpaa_bp *dpaa_bp; 135962306a36Sopenharmony_ci int i = 0, j; 136062306a36Sopenharmony_ci 136162306a36Sopenharmony_ci memset(bmb, 0, sizeof(bmb)); 136262306a36Sopenharmony_ci 136362306a36Sopenharmony_ci do { 136462306a36Sopenharmony_ci dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 136562306a36Sopenharmony_ci if (!dpaa_bp) 136662306a36Sopenharmony_ci return; 136762306a36Sopenharmony_ci 136862306a36Sopenharmony_ci j = 0; 136962306a36Sopenharmony_ci do { 137062306a36Sopenharmony_ci WARN_ON(qm_sg_entry_is_ext(&sgt[i])); 137162306a36Sopenharmony_ci 137262306a36Sopenharmony_ci bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i])); 137362306a36Sopenharmony_ci 137462306a36Sopenharmony_ci j++; i++; 137562306a36Sopenharmony_ci } while (j < ARRAY_SIZE(bmb) && 137662306a36Sopenharmony_ci !qm_sg_entry_is_final(&sgt[i - 1]) && 137762306a36Sopenharmony_ci sgt[i - 1].bpid == sgt[i].bpid); 137862306a36Sopenharmony_ci 137962306a36Sopenharmony_ci dpaa_bman_release(dpaa_bp, bmb, j); 138062306a36Sopenharmony_ci } while (!qm_sg_entry_is_final(&sgt[i - 1])); 138162306a36Sopenharmony_ci} 138262306a36Sopenharmony_ci 138362306a36Sopenharmony_cistatic void dpaa_fd_release(const struct net_device *net_dev, 138462306a36Sopenharmony_ci const struct qm_fd *fd) 138562306a36Sopenharmony_ci{ 138662306a36Sopenharmony_ci struct qm_sg_entry *sgt; 138762306a36Sopenharmony_ci struct dpaa_bp *dpaa_bp; 138862306a36Sopenharmony_ci struct bm_buffer bmb; 138962306a36Sopenharmony_ci dma_addr_t addr; 139062306a36Sopenharmony_ci void *vaddr; 139162306a36Sopenharmony_ci 139262306a36Sopenharmony_ci bmb.data = 0; 139362306a36Sopenharmony_ci bm_buffer_set64(&bmb, qm_fd_addr(fd)); 139462306a36Sopenharmony_ci 139562306a36Sopenharmony_ci dpaa_bp = dpaa_bpid2pool(fd->bpid); 139662306a36Sopenharmony_ci if (!dpaa_bp) 139762306a36Sopenharmony_ci return; 139862306a36Sopenharmony_ci 139962306a36Sopenharmony_ci if (qm_fd_get_format(fd) == qm_fd_sg) { 140062306a36Sopenharmony_ci vaddr = phys_to_virt(qm_fd_addr(fd)); 140162306a36Sopenharmony_ci sgt = vaddr + qm_fd_get_offset(fd); 140262306a36Sopenharmony_ci 140362306a36Sopenharmony_ci dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd), 140462306a36Sopenharmony_ci DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); 140562306a36Sopenharmony_ci 140662306a36Sopenharmony_ci dpaa_release_sgt_members(sgt); 140762306a36Sopenharmony_ci 140862306a36Sopenharmony_ci addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, 140962306a36Sopenharmony_ci virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE, 141062306a36Sopenharmony_ci DMA_FROM_DEVICE); 141162306a36Sopenharmony_ci if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) { 141262306a36Sopenharmony_ci netdev_err(net_dev, "DMA mapping failed\n"); 141362306a36Sopenharmony_ci return; 141462306a36Sopenharmony_ci } 141562306a36Sopenharmony_ci bm_buffer_set64(&bmb, addr); 141662306a36Sopenharmony_ci } 141762306a36Sopenharmony_ci 141862306a36Sopenharmony_ci dpaa_bman_release(dpaa_bp, &bmb, 1); 141962306a36Sopenharmony_ci} 142062306a36Sopenharmony_ci 142162306a36Sopenharmony_cistatic void count_ern(struct dpaa_percpu_priv *percpu_priv, 142262306a36Sopenharmony_ci const union qm_mr_entry *msg) 142362306a36Sopenharmony_ci{ 142462306a36Sopenharmony_ci switch (msg->ern.rc & QM_MR_RC_MASK) { 142562306a36Sopenharmony_ci case QM_MR_RC_CGR_TAILDROP: 142662306a36Sopenharmony_ci percpu_priv->ern_cnt.cg_tdrop++; 142762306a36Sopenharmony_ci break; 142862306a36Sopenharmony_ci case QM_MR_RC_WRED: 142962306a36Sopenharmony_ci percpu_priv->ern_cnt.wred++; 143062306a36Sopenharmony_ci break; 143162306a36Sopenharmony_ci case QM_MR_RC_ERROR: 143262306a36Sopenharmony_ci percpu_priv->ern_cnt.err_cond++; 143362306a36Sopenharmony_ci break; 143462306a36Sopenharmony_ci case QM_MR_RC_ORPWINDOW_EARLY: 143562306a36Sopenharmony_ci percpu_priv->ern_cnt.early_window++; 143662306a36Sopenharmony_ci break; 143762306a36Sopenharmony_ci case QM_MR_RC_ORPWINDOW_LATE: 143862306a36Sopenharmony_ci percpu_priv->ern_cnt.late_window++; 143962306a36Sopenharmony_ci break; 144062306a36Sopenharmony_ci case QM_MR_RC_FQ_TAILDROP: 144162306a36Sopenharmony_ci percpu_priv->ern_cnt.fq_tdrop++; 144262306a36Sopenharmony_ci break; 144362306a36Sopenharmony_ci case QM_MR_RC_ORPWINDOW_RETIRED: 144462306a36Sopenharmony_ci percpu_priv->ern_cnt.fq_retired++; 144562306a36Sopenharmony_ci break; 144662306a36Sopenharmony_ci case QM_MR_RC_ORP_ZERO: 144762306a36Sopenharmony_ci percpu_priv->ern_cnt.orp_zero++; 144862306a36Sopenharmony_ci break; 144962306a36Sopenharmony_ci } 145062306a36Sopenharmony_ci} 145162306a36Sopenharmony_ci 145262306a36Sopenharmony_ci/* Turn on HW checksum computation for this outgoing frame. 145362306a36Sopenharmony_ci * If the current protocol is not something we support in this regard 145462306a36Sopenharmony_ci * (or if the stack has already computed the SW checksum), we do nothing. 145562306a36Sopenharmony_ci * 145662306a36Sopenharmony_ci * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value 145762306a36Sopenharmony_ci * otherwise. 145862306a36Sopenharmony_ci * 145962306a36Sopenharmony_ci * Note that this function may modify the fd->cmd field and the skb data buffer 146062306a36Sopenharmony_ci * (the Parse Results area). 146162306a36Sopenharmony_ci */ 146262306a36Sopenharmony_cistatic int dpaa_enable_tx_csum(struct dpaa_priv *priv, 146362306a36Sopenharmony_ci struct sk_buff *skb, 146462306a36Sopenharmony_ci struct qm_fd *fd, 146562306a36Sopenharmony_ci void *parse_results) 146662306a36Sopenharmony_ci{ 146762306a36Sopenharmony_ci struct fman_prs_result *parse_result; 146862306a36Sopenharmony_ci u16 ethertype = ntohs(skb->protocol); 146962306a36Sopenharmony_ci struct ipv6hdr *ipv6h = NULL; 147062306a36Sopenharmony_ci struct iphdr *iph; 147162306a36Sopenharmony_ci int retval = 0; 147262306a36Sopenharmony_ci u8 l4_proto; 147362306a36Sopenharmony_ci 147462306a36Sopenharmony_ci if (skb->ip_summed != CHECKSUM_PARTIAL) 147562306a36Sopenharmony_ci return 0; 147662306a36Sopenharmony_ci 147762306a36Sopenharmony_ci /* Note: L3 csum seems to be already computed in sw, but we can't choose 147862306a36Sopenharmony_ci * L4 alone from the FM configuration anyway. 147962306a36Sopenharmony_ci */ 148062306a36Sopenharmony_ci 148162306a36Sopenharmony_ci /* Fill in some fields of the Parse Results array, so the FMan 148262306a36Sopenharmony_ci * can find them as if they came from the FMan Parser. 148362306a36Sopenharmony_ci */ 148462306a36Sopenharmony_ci parse_result = (struct fman_prs_result *)parse_results; 148562306a36Sopenharmony_ci 148662306a36Sopenharmony_ci /* If we're dealing with VLAN, get the real Ethernet type */ 148762306a36Sopenharmony_ci if (ethertype == ETH_P_8021Q) 148862306a36Sopenharmony_ci ethertype = ntohs(skb_vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); 148962306a36Sopenharmony_ci 149062306a36Sopenharmony_ci /* Fill in the relevant L3 parse result fields 149162306a36Sopenharmony_ci * and read the L4 protocol type 149262306a36Sopenharmony_ci */ 149362306a36Sopenharmony_ci switch (ethertype) { 149462306a36Sopenharmony_ci case ETH_P_IP: 149562306a36Sopenharmony_ci parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); 149662306a36Sopenharmony_ci iph = ip_hdr(skb); 149762306a36Sopenharmony_ci WARN_ON(!iph); 149862306a36Sopenharmony_ci l4_proto = iph->protocol; 149962306a36Sopenharmony_ci break; 150062306a36Sopenharmony_ci case ETH_P_IPV6: 150162306a36Sopenharmony_ci parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); 150262306a36Sopenharmony_ci ipv6h = ipv6_hdr(skb); 150362306a36Sopenharmony_ci WARN_ON(!ipv6h); 150462306a36Sopenharmony_ci l4_proto = ipv6h->nexthdr; 150562306a36Sopenharmony_ci break; 150662306a36Sopenharmony_ci default: 150762306a36Sopenharmony_ci /* We shouldn't even be here */ 150862306a36Sopenharmony_ci if (net_ratelimit()) 150962306a36Sopenharmony_ci netif_alert(priv, tx_err, priv->net_dev, 151062306a36Sopenharmony_ci "Can't compute HW csum for L3 proto 0x%x\n", 151162306a36Sopenharmony_ci ntohs(skb->protocol)); 151262306a36Sopenharmony_ci retval = -EIO; 151362306a36Sopenharmony_ci goto return_error; 151462306a36Sopenharmony_ci } 151562306a36Sopenharmony_ci 151662306a36Sopenharmony_ci /* Fill in the relevant L4 parse result fields */ 151762306a36Sopenharmony_ci switch (l4_proto) { 151862306a36Sopenharmony_ci case IPPROTO_UDP: 151962306a36Sopenharmony_ci parse_result->l4r = FM_L4_PARSE_RESULT_UDP; 152062306a36Sopenharmony_ci break; 152162306a36Sopenharmony_ci case IPPROTO_TCP: 152262306a36Sopenharmony_ci parse_result->l4r = FM_L4_PARSE_RESULT_TCP; 152362306a36Sopenharmony_ci break; 152462306a36Sopenharmony_ci default: 152562306a36Sopenharmony_ci if (net_ratelimit()) 152662306a36Sopenharmony_ci netif_alert(priv, tx_err, priv->net_dev, 152762306a36Sopenharmony_ci "Can't compute HW csum for L4 proto 0x%x\n", 152862306a36Sopenharmony_ci l4_proto); 152962306a36Sopenharmony_ci retval = -EIO; 153062306a36Sopenharmony_ci goto return_error; 153162306a36Sopenharmony_ci } 153262306a36Sopenharmony_ci 153362306a36Sopenharmony_ci /* At index 0 is IPOffset_1 as defined in the Parse Results */ 153462306a36Sopenharmony_ci parse_result->ip_off[0] = (u8)skb_network_offset(skb); 153562306a36Sopenharmony_ci parse_result->l4_off = (u8)skb_transport_offset(skb); 153662306a36Sopenharmony_ci 153762306a36Sopenharmony_ci /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ 153862306a36Sopenharmony_ci fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC); 153962306a36Sopenharmony_ci 154062306a36Sopenharmony_ci /* On P1023 and similar platforms fd->cmd interpretation could 154162306a36Sopenharmony_ci * be disabled by setting CONTEXT_A bit ICMD; currently this bit 154262306a36Sopenharmony_ci * is not set so we do not need to check; in the future, if/when 154362306a36Sopenharmony_ci * using context_a we need to check this bit 154462306a36Sopenharmony_ci */ 154562306a36Sopenharmony_ci 154662306a36Sopenharmony_cireturn_error: 154762306a36Sopenharmony_ci return retval; 154862306a36Sopenharmony_ci} 154962306a36Sopenharmony_ci 155062306a36Sopenharmony_cistatic int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) 155162306a36Sopenharmony_ci{ 155262306a36Sopenharmony_ci struct net_device *net_dev = dpaa_bp->priv->net_dev; 155362306a36Sopenharmony_ci struct bm_buffer bmb[8]; 155462306a36Sopenharmony_ci dma_addr_t addr; 155562306a36Sopenharmony_ci struct page *p; 155662306a36Sopenharmony_ci u8 i; 155762306a36Sopenharmony_ci 155862306a36Sopenharmony_ci for (i = 0; i < 8; i++) { 155962306a36Sopenharmony_ci p = dev_alloc_pages(0); 156062306a36Sopenharmony_ci if (unlikely(!p)) { 156162306a36Sopenharmony_ci netdev_err(net_dev, "dev_alloc_pages() failed\n"); 156262306a36Sopenharmony_ci goto release_previous_buffs; 156362306a36Sopenharmony_ci } 156462306a36Sopenharmony_ci 156562306a36Sopenharmony_ci addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0, 156662306a36Sopenharmony_ci DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); 156762306a36Sopenharmony_ci if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev, 156862306a36Sopenharmony_ci addr))) { 156962306a36Sopenharmony_ci netdev_err(net_dev, "DMA map failed\n"); 157062306a36Sopenharmony_ci goto release_previous_buffs; 157162306a36Sopenharmony_ci } 157262306a36Sopenharmony_ci 157362306a36Sopenharmony_ci bmb[i].data = 0; 157462306a36Sopenharmony_ci bm_buffer_set64(&bmb[i], addr); 157562306a36Sopenharmony_ci } 157662306a36Sopenharmony_ci 157762306a36Sopenharmony_cirelease_bufs: 157862306a36Sopenharmony_ci return dpaa_bman_release(dpaa_bp, bmb, i); 157962306a36Sopenharmony_ci 158062306a36Sopenharmony_cirelease_previous_buffs: 158162306a36Sopenharmony_ci WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n"); 158262306a36Sopenharmony_ci 158362306a36Sopenharmony_ci bm_buffer_set64(&bmb[i], 0); 158462306a36Sopenharmony_ci /* Avoid releasing a completely null buffer; bman_release() requires 158562306a36Sopenharmony_ci * at least one buffer. 158662306a36Sopenharmony_ci */ 158762306a36Sopenharmony_ci if (likely(i)) 158862306a36Sopenharmony_ci goto release_bufs; 158962306a36Sopenharmony_ci 159062306a36Sopenharmony_ci return 0; 159162306a36Sopenharmony_ci} 159262306a36Sopenharmony_ci 159362306a36Sopenharmony_cistatic int dpaa_bp_seed(struct dpaa_bp *dpaa_bp) 159462306a36Sopenharmony_ci{ 159562306a36Sopenharmony_ci int i; 159662306a36Sopenharmony_ci 159762306a36Sopenharmony_ci /* Give each CPU an allotment of "config_count" buffers */ 159862306a36Sopenharmony_ci for_each_possible_cpu(i) { 159962306a36Sopenharmony_ci int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i); 160062306a36Sopenharmony_ci int j; 160162306a36Sopenharmony_ci 160262306a36Sopenharmony_ci /* Although we access another CPU's counters here 160362306a36Sopenharmony_ci * we do it at boot time so it is safe 160462306a36Sopenharmony_ci */ 160562306a36Sopenharmony_ci for (j = 0; j < dpaa_bp->config_count; j += 8) 160662306a36Sopenharmony_ci *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp); 160762306a36Sopenharmony_ci } 160862306a36Sopenharmony_ci return 0; 160962306a36Sopenharmony_ci} 161062306a36Sopenharmony_ci 161162306a36Sopenharmony_ci/* Add buffers/(pages) for Rx processing whenever bpool count falls below 161262306a36Sopenharmony_ci * REFILL_THRESHOLD. 161362306a36Sopenharmony_ci */ 161462306a36Sopenharmony_cistatic int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr) 161562306a36Sopenharmony_ci{ 161662306a36Sopenharmony_ci int count = *countptr; 161762306a36Sopenharmony_ci int new_bufs; 161862306a36Sopenharmony_ci 161962306a36Sopenharmony_ci if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) { 162062306a36Sopenharmony_ci do { 162162306a36Sopenharmony_ci new_bufs = dpaa_bp_add_8_bufs(dpaa_bp); 162262306a36Sopenharmony_ci if (unlikely(!new_bufs)) { 162362306a36Sopenharmony_ci /* Avoid looping forever if we've temporarily 162462306a36Sopenharmony_ci * run out of memory. We'll try again at the 162562306a36Sopenharmony_ci * next NAPI cycle. 162662306a36Sopenharmony_ci */ 162762306a36Sopenharmony_ci break; 162862306a36Sopenharmony_ci } 162962306a36Sopenharmony_ci count += new_bufs; 163062306a36Sopenharmony_ci } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT); 163162306a36Sopenharmony_ci 163262306a36Sopenharmony_ci *countptr = count; 163362306a36Sopenharmony_ci if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT)) 163462306a36Sopenharmony_ci return -ENOMEM; 163562306a36Sopenharmony_ci } 163662306a36Sopenharmony_ci 163762306a36Sopenharmony_ci return 0; 163862306a36Sopenharmony_ci} 163962306a36Sopenharmony_ci 164062306a36Sopenharmony_cistatic int dpaa_eth_refill_bpools(struct dpaa_priv *priv) 164162306a36Sopenharmony_ci{ 164262306a36Sopenharmony_ci struct dpaa_bp *dpaa_bp; 164362306a36Sopenharmony_ci int *countptr; 164462306a36Sopenharmony_ci 164562306a36Sopenharmony_ci dpaa_bp = priv->dpaa_bp; 164662306a36Sopenharmony_ci if (!dpaa_bp) 164762306a36Sopenharmony_ci return -EINVAL; 164862306a36Sopenharmony_ci countptr = this_cpu_ptr(dpaa_bp->percpu_count); 164962306a36Sopenharmony_ci 165062306a36Sopenharmony_ci return dpaa_eth_refill_bpool(dpaa_bp, countptr); 165162306a36Sopenharmony_ci} 165262306a36Sopenharmony_ci 165362306a36Sopenharmony_ci/* Cleanup function for outgoing frame descriptors that were built on Tx path, 165462306a36Sopenharmony_ci * either contiguous frames or scatter/gather ones. 165562306a36Sopenharmony_ci * Skb freeing is not handled here. 165662306a36Sopenharmony_ci * 165762306a36Sopenharmony_ci * This function may be called on error paths in the Tx function, so guard 165862306a36Sopenharmony_ci * against cases when not all fd relevant fields were filled in. To avoid 165962306a36Sopenharmony_ci * reading the invalid transmission timestamp for the error paths set ts to 166062306a36Sopenharmony_ci * false. 166162306a36Sopenharmony_ci * 166262306a36Sopenharmony_ci * Return the skb backpointer, since for S/G frames the buffer containing it 166362306a36Sopenharmony_ci * gets freed here. 166462306a36Sopenharmony_ci * 166562306a36Sopenharmony_ci * No skb backpointer is set when transmitting XDP frames. Cleanup the buffer 166662306a36Sopenharmony_ci * and return NULL in this case. 166762306a36Sopenharmony_ci */ 166862306a36Sopenharmony_cistatic struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, 166962306a36Sopenharmony_ci const struct qm_fd *fd, bool ts) 167062306a36Sopenharmony_ci{ 167162306a36Sopenharmony_ci const enum dma_data_direction dma_dir = DMA_TO_DEVICE; 167262306a36Sopenharmony_ci struct device *dev = priv->net_dev->dev.parent; 167362306a36Sopenharmony_ci struct skb_shared_hwtstamps shhwtstamps; 167462306a36Sopenharmony_ci dma_addr_t addr = qm_fd_addr(fd); 167562306a36Sopenharmony_ci void *vaddr = phys_to_virt(addr); 167662306a36Sopenharmony_ci const struct qm_sg_entry *sgt; 167762306a36Sopenharmony_ci struct dpaa_eth_swbp *swbp; 167862306a36Sopenharmony_ci struct sk_buff *skb; 167962306a36Sopenharmony_ci u64 ns; 168062306a36Sopenharmony_ci int i; 168162306a36Sopenharmony_ci 168262306a36Sopenharmony_ci if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { 168362306a36Sopenharmony_ci dma_unmap_page(priv->tx_dma_dev, addr, 168462306a36Sopenharmony_ci qm_fd_get_offset(fd) + DPAA_SGT_SIZE, 168562306a36Sopenharmony_ci dma_dir); 168662306a36Sopenharmony_ci 168762306a36Sopenharmony_ci /* The sgt buffer has been allocated with netdev_alloc_frag(), 168862306a36Sopenharmony_ci * it's from lowmem. 168962306a36Sopenharmony_ci */ 169062306a36Sopenharmony_ci sgt = vaddr + qm_fd_get_offset(fd); 169162306a36Sopenharmony_ci 169262306a36Sopenharmony_ci /* sgt[0] is from lowmem, was dma_map_single()-ed */ 169362306a36Sopenharmony_ci dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]), 169462306a36Sopenharmony_ci qm_sg_entry_get_len(&sgt[0]), dma_dir); 169562306a36Sopenharmony_ci 169662306a36Sopenharmony_ci /* remaining pages were mapped with skb_frag_dma_map() */ 169762306a36Sopenharmony_ci for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) && 169862306a36Sopenharmony_ci !qm_sg_entry_is_final(&sgt[i - 1]); i++) { 169962306a36Sopenharmony_ci WARN_ON(qm_sg_entry_is_ext(&sgt[i])); 170062306a36Sopenharmony_ci 170162306a36Sopenharmony_ci dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]), 170262306a36Sopenharmony_ci qm_sg_entry_get_len(&sgt[i]), dma_dir); 170362306a36Sopenharmony_ci } 170462306a36Sopenharmony_ci } else { 170562306a36Sopenharmony_ci dma_unmap_single(priv->tx_dma_dev, addr, 170662306a36Sopenharmony_ci qm_fd_get_offset(fd) + qm_fd_get_length(fd), 170762306a36Sopenharmony_ci dma_dir); 170862306a36Sopenharmony_ci } 170962306a36Sopenharmony_ci 171062306a36Sopenharmony_ci swbp = (struct dpaa_eth_swbp *)vaddr; 171162306a36Sopenharmony_ci skb = swbp->skb; 171262306a36Sopenharmony_ci 171362306a36Sopenharmony_ci /* No skb backpointer is set when running XDP. An xdp_frame 171462306a36Sopenharmony_ci * backpointer is saved instead. 171562306a36Sopenharmony_ci */ 171662306a36Sopenharmony_ci if (!skb) { 171762306a36Sopenharmony_ci xdp_return_frame(swbp->xdpf); 171862306a36Sopenharmony_ci return NULL; 171962306a36Sopenharmony_ci } 172062306a36Sopenharmony_ci 172162306a36Sopenharmony_ci /* DMA unmapping is required before accessing the HW provided info */ 172262306a36Sopenharmony_ci if (ts && priv->tx_tstamp && 172362306a36Sopenharmony_ci skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 172462306a36Sopenharmony_ci memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 172562306a36Sopenharmony_ci 172662306a36Sopenharmony_ci if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr, 172762306a36Sopenharmony_ci &ns)) { 172862306a36Sopenharmony_ci shhwtstamps.hwtstamp = ns_to_ktime(ns); 172962306a36Sopenharmony_ci skb_tstamp_tx(skb, &shhwtstamps); 173062306a36Sopenharmony_ci } else { 173162306a36Sopenharmony_ci dev_warn(dev, "fman_port_get_tstamp failed!\n"); 173262306a36Sopenharmony_ci } 173362306a36Sopenharmony_ci } 173462306a36Sopenharmony_ci 173562306a36Sopenharmony_ci if (qm_fd_get_format(fd) == qm_fd_sg) 173662306a36Sopenharmony_ci /* Free the page that we allocated on Tx for the SGT */ 173762306a36Sopenharmony_ci free_pages((unsigned long)vaddr, 0); 173862306a36Sopenharmony_ci 173962306a36Sopenharmony_ci return skb; 174062306a36Sopenharmony_ci} 174162306a36Sopenharmony_ci 174262306a36Sopenharmony_cistatic u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd) 174362306a36Sopenharmony_ci{ 174462306a36Sopenharmony_ci /* The parser has run and performed L4 checksum validation. 174562306a36Sopenharmony_ci * We know there were no parser errors (and implicitly no 174662306a36Sopenharmony_ci * L4 csum error), otherwise we wouldn't be here. 174762306a36Sopenharmony_ci */ 174862306a36Sopenharmony_ci if ((priv->net_dev->features & NETIF_F_RXCSUM) && 174962306a36Sopenharmony_ci (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV)) 175062306a36Sopenharmony_ci return CHECKSUM_UNNECESSARY; 175162306a36Sopenharmony_ci 175262306a36Sopenharmony_ci /* We're here because either the parser didn't run or the L4 checksum 175362306a36Sopenharmony_ci * was not verified. This may include the case of a UDP frame with 175462306a36Sopenharmony_ci * checksum zero or an L4 proto other than TCP/UDP 175562306a36Sopenharmony_ci */ 175662306a36Sopenharmony_ci return CHECKSUM_NONE; 175762306a36Sopenharmony_ci} 175862306a36Sopenharmony_ci 175962306a36Sopenharmony_ci#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a))) 176062306a36Sopenharmony_ci 176162306a36Sopenharmony_ci/* Build a linear skb around the received buffer. 176262306a36Sopenharmony_ci * We are guaranteed there is enough room at the end of the data buffer to 176362306a36Sopenharmony_ci * accommodate the shared info area of the skb. 176462306a36Sopenharmony_ci */ 176562306a36Sopenharmony_cistatic struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, 176662306a36Sopenharmony_ci const struct qm_fd *fd) 176762306a36Sopenharmony_ci{ 176862306a36Sopenharmony_ci ssize_t fd_off = qm_fd_get_offset(fd); 176962306a36Sopenharmony_ci dma_addr_t addr = qm_fd_addr(fd); 177062306a36Sopenharmony_ci struct dpaa_bp *dpaa_bp; 177162306a36Sopenharmony_ci struct sk_buff *skb; 177262306a36Sopenharmony_ci void *vaddr; 177362306a36Sopenharmony_ci 177462306a36Sopenharmony_ci vaddr = phys_to_virt(addr); 177562306a36Sopenharmony_ci WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); 177662306a36Sopenharmony_ci 177762306a36Sopenharmony_ci dpaa_bp = dpaa_bpid2pool(fd->bpid); 177862306a36Sopenharmony_ci if (!dpaa_bp) 177962306a36Sopenharmony_ci goto free_buffer; 178062306a36Sopenharmony_ci 178162306a36Sopenharmony_ci skb = build_skb(vaddr, dpaa_bp->size + 178262306a36Sopenharmony_ci SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 178362306a36Sopenharmony_ci if (WARN_ONCE(!skb, "Build skb failure on Rx\n")) 178462306a36Sopenharmony_ci goto free_buffer; 178562306a36Sopenharmony_ci skb_reserve(skb, fd_off); 178662306a36Sopenharmony_ci skb_put(skb, qm_fd_get_length(fd)); 178762306a36Sopenharmony_ci 178862306a36Sopenharmony_ci skb->ip_summed = rx_csum_offload(priv, fd); 178962306a36Sopenharmony_ci 179062306a36Sopenharmony_ci return skb; 179162306a36Sopenharmony_ci 179262306a36Sopenharmony_cifree_buffer: 179362306a36Sopenharmony_ci free_pages((unsigned long)vaddr, 0); 179462306a36Sopenharmony_ci return NULL; 179562306a36Sopenharmony_ci} 179662306a36Sopenharmony_ci 179762306a36Sopenharmony_ci/* Build an skb with the data of the first S/G entry in the linear portion and 179862306a36Sopenharmony_ci * the rest of the frame as skb fragments. 179962306a36Sopenharmony_ci * 180062306a36Sopenharmony_ci * The page fragment holding the S/G Table is recycled here. 180162306a36Sopenharmony_ci */ 180262306a36Sopenharmony_cistatic struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, 180362306a36Sopenharmony_ci const struct qm_fd *fd) 180462306a36Sopenharmony_ci{ 180562306a36Sopenharmony_ci ssize_t fd_off = qm_fd_get_offset(fd); 180662306a36Sopenharmony_ci dma_addr_t addr = qm_fd_addr(fd); 180762306a36Sopenharmony_ci const struct qm_sg_entry *sgt; 180862306a36Sopenharmony_ci struct page *page, *head_page; 180962306a36Sopenharmony_ci struct dpaa_bp *dpaa_bp; 181062306a36Sopenharmony_ci void *vaddr, *sg_vaddr; 181162306a36Sopenharmony_ci int frag_off, frag_len; 181262306a36Sopenharmony_ci struct sk_buff *skb; 181362306a36Sopenharmony_ci dma_addr_t sg_addr; 181462306a36Sopenharmony_ci int page_offset; 181562306a36Sopenharmony_ci unsigned int sz; 181662306a36Sopenharmony_ci int *count_ptr; 181762306a36Sopenharmony_ci int i, j; 181862306a36Sopenharmony_ci 181962306a36Sopenharmony_ci vaddr = phys_to_virt(addr); 182062306a36Sopenharmony_ci WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); 182162306a36Sopenharmony_ci 182262306a36Sopenharmony_ci /* Iterate through the SGT entries and add data buffers to the skb */ 182362306a36Sopenharmony_ci sgt = vaddr + fd_off; 182462306a36Sopenharmony_ci skb = NULL; 182562306a36Sopenharmony_ci for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) { 182662306a36Sopenharmony_ci /* Extension bit is not supported */ 182762306a36Sopenharmony_ci WARN_ON(qm_sg_entry_is_ext(&sgt[i])); 182862306a36Sopenharmony_ci 182962306a36Sopenharmony_ci sg_addr = qm_sg_addr(&sgt[i]); 183062306a36Sopenharmony_ci sg_vaddr = phys_to_virt(sg_addr); 183162306a36Sopenharmony_ci WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES)); 183262306a36Sopenharmony_ci 183362306a36Sopenharmony_ci dma_unmap_page(priv->rx_dma_dev, sg_addr, 183462306a36Sopenharmony_ci DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); 183562306a36Sopenharmony_ci 183662306a36Sopenharmony_ci /* We may use multiple Rx pools */ 183762306a36Sopenharmony_ci dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 183862306a36Sopenharmony_ci if (!dpaa_bp) 183962306a36Sopenharmony_ci goto free_buffers; 184062306a36Sopenharmony_ci 184162306a36Sopenharmony_ci if (!skb) { 184262306a36Sopenharmony_ci sz = dpaa_bp->size + 184362306a36Sopenharmony_ci SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 184462306a36Sopenharmony_ci skb = build_skb(sg_vaddr, sz); 184562306a36Sopenharmony_ci if (WARN_ON(!skb)) 184662306a36Sopenharmony_ci goto free_buffers; 184762306a36Sopenharmony_ci 184862306a36Sopenharmony_ci skb->ip_summed = rx_csum_offload(priv, fd); 184962306a36Sopenharmony_ci 185062306a36Sopenharmony_ci /* Make sure forwarded skbs will have enough space 185162306a36Sopenharmony_ci * on Tx, if extra headers are added. 185262306a36Sopenharmony_ci */ 185362306a36Sopenharmony_ci WARN_ON(fd_off != priv->rx_headroom); 185462306a36Sopenharmony_ci skb_reserve(skb, fd_off); 185562306a36Sopenharmony_ci skb_put(skb, qm_sg_entry_get_len(&sgt[i])); 185662306a36Sopenharmony_ci } else { 185762306a36Sopenharmony_ci /* Not the first S/G entry; all data from buffer will 185862306a36Sopenharmony_ci * be added in an skb fragment; fragment index is offset 185962306a36Sopenharmony_ci * by one since first S/G entry was incorporated in the 186062306a36Sopenharmony_ci * linear part of the skb. 186162306a36Sopenharmony_ci * 186262306a36Sopenharmony_ci * Caution: 'page' may be a tail page. 186362306a36Sopenharmony_ci */ 186462306a36Sopenharmony_ci page = virt_to_page(sg_vaddr); 186562306a36Sopenharmony_ci head_page = virt_to_head_page(sg_vaddr); 186662306a36Sopenharmony_ci 186762306a36Sopenharmony_ci /* Compute offset in (possibly tail) page */ 186862306a36Sopenharmony_ci page_offset = ((unsigned long)sg_vaddr & 186962306a36Sopenharmony_ci (PAGE_SIZE - 1)) + 187062306a36Sopenharmony_ci (page_address(page) - page_address(head_page)); 187162306a36Sopenharmony_ci /* page_offset only refers to the beginning of sgt[i]; 187262306a36Sopenharmony_ci * but the buffer itself may have an internal offset. 187362306a36Sopenharmony_ci */ 187462306a36Sopenharmony_ci frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset; 187562306a36Sopenharmony_ci frag_len = qm_sg_entry_get_len(&sgt[i]); 187662306a36Sopenharmony_ci /* skb_add_rx_frag() does no checking on the page; if 187762306a36Sopenharmony_ci * we pass it a tail page, we'll end up with 187862306a36Sopenharmony_ci * bad page accounting and eventually with segafults. 187962306a36Sopenharmony_ci */ 188062306a36Sopenharmony_ci skb_add_rx_frag(skb, i - 1, head_page, frag_off, 188162306a36Sopenharmony_ci frag_len, dpaa_bp->size); 188262306a36Sopenharmony_ci } 188362306a36Sopenharmony_ci 188462306a36Sopenharmony_ci /* Update the pool count for the current {cpu x bpool} */ 188562306a36Sopenharmony_ci count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 188662306a36Sopenharmony_ci (*count_ptr)--; 188762306a36Sopenharmony_ci 188862306a36Sopenharmony_ci if (qm_sg_entry_is_final(&sgt[i])) 188962306a36Sopenharmony_ci break; 189062306a36Sopenharmony_ci } 189162306a36Sopenharmony_ci WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); 189262306a36Sopenharmony_ci 189362306a36Sopenharmony_ci /* free the SG table buffer */ 189462306a36Sopenharmony_ci free_pages((unsigned long)vaddr, 0); 189562306a36Sopenharmony_ci 189662306a36Sopenharmony_ci return skb; 189762306a36Sopenharmony_ci 189862306a36Sopenharmony_cifree_buffers: 189962306a36Sopenharmony_ci /* free all the SG entries */ 190062306a36Sopenharmony_ci for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) { 190162306a36Sopenharmony_ci sg_addr = qm_sg_addr(&sgt[j]); 190262306a36Sopenharmony_ci sg_vaddr = phys_to_virt(sg_addr); 190362306a36Sopenharmony_ci /* all pages 0..i were unmaped */ 190462306a36Sopenharmony_ci if (j > i) 190562306a36Sopenharmony_ci dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]), 190662306a36Sopenharmony_ci DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); 190762306a36Sopenharmony_ci free_pages((unsigned long)sg_vaddr, 0); 190862306a36Sopenharmony_ci /* counters 0..i-1 were decremented */ 190962306a36Sopenharmony_ci if (j >= i) { 191062306a36Sopenharmony_ci dpaa_bp = dpaa_bpid2pool(sgt[j].bpid); 191162306a36Sopenharmony_ci if (dpaa_bp) { 191262306a36Sopenharmony_ci count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 191362306a36Sopenharmony_ci (*count_ptr)--; 191462306a36Sopenharmony_ci } 191562306a36Sopenharmony_ci } 191662306a36Sopenharmony_ci 191762306a36Sopenharmony_ci if (qm_sg_entry_is_final(&sgt[j])) 191862306a36Sopenharmony_ci break; 191962306a36Sopenharmony_ci } 192062306a36Sopenharmony_ci /* free the SGT fragment */ 192162306a36Sopenharmony_ci free_pages((unsigned long)vaddr, 0); 192262306a36Sopenharmony_ci 192362306a36Sopenharmony_ci return NULL; 192462306a36Sopenharmony_ci} 192562306a36Sopenharmony_ci 192662306a36Sopenharmony_cistatic int skb_to_contig_fd(struct dpaa_priv *priv, 192762306a36Sopenharmony_ci struct sk_buff *skb, struct qm_fd *fd, 192862306a36Sopenharmony_ci int *offset) 192962306a36Sopenharmony_ci{ 193062306a36Sopenharmony_ci struct net_device *net_dev = priv->net_dev; 193162306a36Sopenharmony_ci enum dma_data_direction dma_dir; 193262306a36Sopenharmony_ci struct dpaa_eth_swbp *swbp; 193362306a36Sopenharmony_ci unsigned char *buff_start; 193462306a36Sopenharmony_ci dma_addr_t addr; 193562306a36Sopenharmony_ci int err; 193662306a36Sopenharmony_ci 193762306a36Sopenharmony_ci /* We are guaranteed to have at least tx_headroom bytes 193862306a36Sopenharmony_ci * available, so just use that for offset. 193962306a36Sopenharmony_ci */ 194062306a36Sopenharmony_ci fd->bpid = FSL_DPAA_BPID_INV; 194162306a36Sopenharmony_ci buff_start = skb->data - priv->tx_headroom; 194262306a36Sopenharmony_ci dma_dir = DMA_TO_DEVICE; 194362306a36Sopenharmony_ci 194462306a36Sopenharmony_ci swbp = (struct dpaa_eth_swbp *)buff_start; 194562306a36Sopenharmony_ci swbp->skb = skb; 194662306a36Sopenharmony_ci 194762306a36Sopenharmony_ci /* Enable L3/L4 hardware checksum computation. 194862306a36Sopenharmony_ci * 194962306a36Sopenharmony_ci * We must do this before dma_map_single(DMA_TO_DEVICE), because we may 195062306a36Sopenharmony_ci * need to write into the skb. 195162306a36Sopenharmony_ci */ 195262306a36Sopenharmony_ci err = dpaa_enable_tx_csum(priv, skb, fd, 195362306a36Sopenharmony_ci buff_start + DPAA_TX_PRIV_DATA_SIZE); 195462306a36Sopenharmony_ci if (unlikely(err < 0)) { 195562306a36Sopenharmony_ci if (net_ratelimit()) 195662306a36Sopenharmony_ci netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", 195762306a36Sopenharmony_ci err); 195862306a36Sopenharmony_ci return err; 195962306a36Sopenharmony_ci } 196062306a36Sopenharmony_ci 196162306a36Sopenharmony_ci /* Fill in the rest of the FD fields */ 196262306a36Sopenharmony_ci qm_fd_set_contig(fd, priv->tx_headroom, skb->len); 196362306a36Sopenharmony_ci fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); 196462306a36Sopenharmony_ci 196562306a36Sopenharmony_ci /* Map the entire buffer size that may be seen by FMan, but no more */ 196662306a36Sopenharmony_ci addr = dma_map_single(priv->tx_dma_dev, buff_start, 196762306a36Sopenharmony_ci priv->tx_headroom + skb->len, dma_dir); 196862306a36Sopenharmony_ci if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { 196962306a36Sopenharmony_ci if (net_ratelimit()) 197062306a36Sopenharmony_ci netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); 197162306a36Sopenharmony_ci return -EINVAL; 197262306a36Sopenharmony_ci } 197362306a36Sopenharmony_ci qm_fd_addr_set64(fd, addr); 197462306a36Sopenharmony_ci 197562306a36Sopenharmony_ci return 0; 197662306a36Sopenharmony_ci} 197762306a36Sopenharmony_ci 197862306a36Sopenharmony_cistatic int skb_to_sg_fd(struct dpaa_priv *priv, 197962306a36Sopenharmony_ci struct sk_buff *skb, struct qm_fd *fd) 198062306a36Sopenharmony_ci{ 198162306a36Sopenharmony_ci const enum dma_data_direction dma_dir = DMA_TO_DEVICE; 198262306a36Sopenharmony_ci const int nr_frags = skb_shinfo(skb)->nr_frags; 198362306a36Sopenharmony_ci struct net_device *net_dev = priv->net_dev; 198462306a36Sopenharmony_ci struct dpaa_eth_swbp *swbp; 198562306a36Sopenharmony_ci struct qm_sg_entry *sgt; 198662306a36Sopenharmony_ci void *buff_start; 198762306a36Sopenharmony_ci skb_frag_t *frag; 198862306a36Sopenharmony_ci dma_addr_t addr; 198962306a36Sopenharmony_ci size_t frag_len; 199062306a36Sopenharmony_ci struct page *p; 199162306a36Sopenharmony_ci int i, j, err; 199262306a36Sopenharmony_ci 199362306a36Sopenharmony_ci /* get a page to store the SGTable */ 199462306a36Sopenharmony_ci p = dev_alloc_pages(0); 199562306a36Sopenharmony_ci if (unlikely(!p)) { 199662306a36Sopenharmony_ci netdev_err(net_dev, "dev_alloc_pages() failed\n"); 199762306a36Sopenharmony_ci return -ENOMEM; 199862306a36Sopenharmony_ci } 199962306a36Sopenharmony_ci buff_start = page_address(p); 200062306a36Sopenharmony_ci 200162306a36Sopenharmony_ci /* Enable L3/L4 hardware checksum computation. 200262306a36Sopenharmony_ci * 200362306a36Sopenharmony_ci * We must do this before dma_map_single(DMA_TO_DEVICE), because we may 200462306a36Sopenharmony_ci * need to write into the skb. 200562306a36Sopenharmony_ci */ 200662306a36Sopenharmony_ci err = dpaa_enable_tx_csum(priv, skb, fd, 200762306a36Sopenharmony_ci buff_start + DPAA_TX_PRIV_DATA_SIZE); 200862306a36Sopenharmony_ci if (unlikely(err < 0)) { 200962306a36Sopenharmony_ci if (net_ratelimit()) 201062306a36Sopenharmony_ci netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", 201162306a36Sopenharmony_ci err); 201262306a36Sopenharmony_ci goto csum_failed; 201362306a36Sopenharmony_ci } 201462306a36Sopenharmony_ci 201562306a36Sopenharmony_ci /* SGT[0] is used by the linear part */ 201662306a36Sopenharmony_ci sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom); 201762306a36Sopenharmony_ci frag_len = skb_headlen(skb); 201862306a36Sopenharmony_ci qm_sg_entry_set_len(&sgt[0], frag_len); 201962306a36Sopenharmony_ci sgt[0].bpid = FSL_DPAA_BPID_INV; 202062306a36Sopenharmony_ci sgt[0].offset = 0; 202162306a36Sopenharmony_ci addr = dma_map_single(priv->tx_dma_dev, skb->data, 202262306a36Sopenharmony_ci skb_headlen(skb), dma_dir); 202362306a36Sopenharmony_ci if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { 202462306a36Sopenharmony_ci netdev_err(priv->net_dev, "DMA mapping failed\n"); 202562306a36Sopenharmony_ci err = -EINVAL; 202662306a36Sopenharmony_ci goto sg0_map_failed; 202762306a36Sopenharmony_ci } 202862306a36Sopenharmony_ci qm_sg_entry_set64(&sgt[0], addr); 202962306a36Sopenharmony_ci 203062306a36Sopenharmony_ci /* populate the rest of SGT entries */ 203162306a36Sopenharmony_ci for (i = 0; i < nr_frags; i++) { 203262306a36Sopenharmony_ci frag = &skb_shinfo(skb)->frags[i]; 203362306a36Sopenharmony_ci frag_len = skb_frag_size(frag); 203462306a36Sopenharmony_ci WARN_ON(!skb_frag_page(frag)); 203562306a36Sopenharmony_ci addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0, 203662306a36Sopenharmony_ci frag_len, dma_dir); 203762306a36Sopenharmony_ci if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { 203862306a36Sopenharmony_ci netdev_err(priv->net_dev, "DMA mapping failed\n"); 203962306a36Sopenharmony_ci err = -EINVAL; 204062306a36Sopenharmony_ci goto sg_map_failed; 204162306a36Sopenharmony_ci } 204262306a36Sopenharmony_ci 204362306a36Sopenharmony_ci qm_sg_entry_set_len(&sgt[i + 1], frag_len); 204462306a36Sopenharmony_ci sgt[i + 1].bpid = FSL_DPAA_BPID_INV; 204562306a36Sopenharmony_ci sgt[i + 1].offset = 0; 204662306a36Sopenharmony_ci 204762306a36Sopenharmony_ci /* keep the offset in the address */ 204862306a36Sopenharmony_ci qm_sg_entry_set64(&sgt[i + 1], addr); 204962306a36Sopenharmony_ci } 205062306a36Sopenharmony_ci 205162306a36Sopenharmony_ci /* Set the final bit in the last used entry of the SGT */ 205262306a36Sopenharmony_ci qm_sg_entry_set_f(&sgt[nr_frags], frag_len); 205362306a36Sopenharmony_ci 205462306a36Sopenharmony_ci /* set fd offset to priv->tx_headroom */ 205562306a36Sopenharmony_ci qm_fd_set_sg(fd, priv->tx_headroom, skb->len); 205662306a36Sopenharmony_ci 205762306a36Sopenharmony_ci /* DMA map the SGT page */ 205862306a36Sopenharmony_ci swbp = (struct dpaa_eth_swbp *)buff_start; 205962306a36Sopenharmony_ci swbp->skb = skb; 206062306a36Sopenharmony_ci 206162306a36Sopenharmony_ci addr = dma_map_page(priv->tx_dma_dev, p, 0, 206262306a36Sopenharmony_ci priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); 206362306a36Sopenharmony_ci if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { 206462306a36Sopenharmony_ci netdev_err(priv->net_dev, "DMA mapping failed\n"); 206562306a36Sopenharmony_ci err = -EINVAL; 206662306a36Sopenharmony_ci goto sgt_map_failed; 206762306a36Sopenharmony_ci } 206862306a36Sopenharmony_ci 206962306a36Sopenharmony_ci fd->bpid = FSL_DPAA_BPID_INV; 207062306a36Sopenharmony_ci fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); 207162306a36Sopenharmony_ci qm_fd_addr_set64(fd, addr); 207262306a36Sopenharmony_ci 207362306a36Sopenharmony_ci return 0; 207462306a36Sopenharmony_ci 207562306a36Sopenharmony_cisgt_map_failed: 207662306a36Sopenharmony_cisg_map_failed: 207762306a36Sopenharmony_ci for (j = 0; j < i; j++) 207862306a36Sopenharmony_ci dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]), 207962306a36Sopenharmony_ci qm_sg_entry_get_len(&sgt[j]), dma_dir); 208062306a36Sopenharmony_cisg0_map_failed: 208162306a36Sopenharmony_cicsum_failed: 208262306a36Sopenharmony_ci free_pages((unsigned long)buff_start, 0); 208362306a36Sopenharmony_ci 208462306a36Sopenharmony_ci return err; 208562306a36Sopenharmony_ci} 208662306a36Sopenharmony_ci 208762306a36Sopenharmony_cistatic inline int dpaa_xmit(struct dpaa_priv *priv, 208862306a36Sopenharmony_ci struct rtnl_link_stats64 *percpu_stats, 208962306a36Sopenharmony_ci int queue, 209062306a36Sopenharmony_ci struct qm_fd *fd) 209162306a36Sopenharmony_ci{ 209262306a36Sopenharmony_ci struct qman_fq *egress_fq; 209362306a36Sopenharmony_ci int err, i; 209462306a36Sopenharmony_ci 209562306a36Sopenharmony_ci egress_fq = priv->egress_fqs[queue]; 209662306a36Sopenharmony_ci if (fd->bpid == FSL_DPAA_BPID_INV) 209762306a36Sopenharmony_ci fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue])); 209862306a36Sopenharmony_ci 209962306a36Sopenharmony_ci /* Trace this Tx fd */ 210062306a36Sopenharmony_ci trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); 210162306a36Sopenharmony_ci 210262306a36Sopenharmony_ci for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) { 210362306a36Sopenharmony_ci err = qman_enqueue(egress_fq, fd); 210462306a36Sopenharmony_ci if (err != -EBUSY) 210562306a36Sopenharmony_ci break; 210662306a36Sopenharmony_ci } 210762306a36Sopenharmony_ci 210862306a36Sopenharmony_ci if (unlikely(err < 0)) { 210962306a36Sopenharmony_ci percpu_stats->tx_fifo_errors++; 211062306a36Sopenharmony_ci return err; 211162306a36Sopenharmony_ci } 211262306a36Sopenharmony_ci 211362306a36Sopenharmony_ci percpu_stats->tx_packets++; 211462306a36Sopenharmony_ci percpu_stats->tx_bytes += qm_fd_get_length(fd); 211562306a36Sopenharmony_ci 211662306a36Sopenharmony_ci return 0; 211762306a36Sopenharmony_ci} 211862306a36Sopenharmony_ci 211962306a36Sopenharmony_ci#ifdef CONFIG_DPAA_ERRATUM_A050385 212062306a36Sopenharmony_cistatic int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s) 212162306a36Sopenharmony_ci{ 212262306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(net_dev); 212362306a36Sopenharmony_ci struct sk_buff *new_skb, *skb = *s; 212462306a36Sopenharmony_ci unsigned char *start, i; 212562306a36Sopenharmony_ci 212662306a36Sopenharmony_ci /* check linear buffer alignment */ 212762306a36Sopenharmony_ci if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN)) 212862306a36Sopenharmony_ci goto workaround; 212962306a36Sopenharmony_ci 213062306a36Sopenharmony_ci /* linear buffers just need to have an aligned start */ 213162306a36Sopenharmony_ci if (!skb_is_nonlinear(skb)) 213262306a36Sopenharmony_ci return 0; 213362306a36Sopenharmony_ci 213462306a36Sopenharmony_ci /* linear data size for nonlinear skbs needs to be aligned */ 213562306a36Sopenharmony_ci if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN)) 213662306a36Sopenharmony_ci goto workaround; 213762306a36Sopenharmony_ci 213862306a36Sopenharmony_ci for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 213962306a36Sopenharmony_ci skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 214062306a36Sopenharmony_ci 214162306a36Sopenharmony_ci /* all fragments need to have aligned start addresses */ 214262306a36Sopenharmony_ci if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN)) 214362306a36Sopenharmony_ci goto workaround; 214462306a36Sopenharmony_ci 214562306a36Sopenharmony_ci /* all but last fragment need to have aligned sizes */ 214662306a36Sopenharmony_ci if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) && 214762306a36Sopenharmony_ci (i < skb_shinfo(skb)->nr_frags - 1)) 214862306a36Sopenharmony_ci goto workaround; 214962306a36Sopenharmony_ci } 215062306a36Sopenharmony_ci 215162306a36Sopenharmony_ci return 0; 215262306a36Sopenharmony_ci 215362306a36Sopenharmony_ciworkaround: 215462306a36Sopenharmony_ci /* copy all the skb content into a new linear buffer */ 215562306a36Sopenharmony_ci new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 + 215662306a36Sopenharmony_ci priv->tx_headroom); 215762306a36Sopenharmony_ci if (!new_skb) 215862306a36Sopenharmony_ci return -ENOMEM; 215962306a36Sopenharmony_ci 216062306a36Sopenharmony_ci /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */ 216162306a36Sopenharmony_ci skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD); 216262306a36Sopenharmony_ci 216362306a36Sopenharmony_ci /* Workaround for DPAA_A050385 requires data start to be aligned */ 216462306a36Sopenharmony_ci start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN); 216562306a36Sopenharmony_ci if (start - new_skb->data) 216662306a36Sopenharmony_ci skb_reserve(new_skb, start - new_skb->data); 216762306a36Sopenharmony_ci 216862306a36Sopenharmony_ci skb_put(new_skb, skb->len); 216962306a36Sopenharmony_ci skb_copy_bits(skb, 0, new_skb->data, skb->len); 217062306a36Sopenharmony_ci skb_copy_header(new_skb, skb); 217162306a36Sopenharmony_ci new_skb->dev = skb->dev; 217262306a36Sopenharmony_ci 217362306a36Sopenharmony_ci /* Copy relevant timestamp info from the old skb to the new */ 217462306a36Sopenharmony_ci if (priv->tx_tstamp) { 217562306a36Sopenharmony_ci skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags; 217662306a36Sopenharmony_ci skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps; 217762306a36Sopenharmony_ci skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey; 217862306a36Sopenharmony_ci if (skb->sk) 217962306a36Sopenharmony_ci skb_set_owner_w(new_skb, skb->sk); 218062306a36Sopenharmony_ci } 218162306a36Sopenharmony_ci 218262306a36Sopenharmony_ci /* We move the headroom when we align it so we have to reset the 218362306a36Sopenharmony_ci * network and transport header offsets relative to the new data 218462306a36Sopenharmony_ci * pointer. The checksum offload relies on these offsets. 218562306a36Sopenharmony_ci */ 218662306a36Sopenharmony_ci skb_set_network_header(new_skb, skb_network_offset(skb)); 218762306a36Sopenharmony_ci skb_set_transport_header(new_skb, skb_transport_offset(skb)); 218862306a36Sopenharmony_ci 218962306a36Sopenharmony_ci dev_kfree_skb(skb); 219062306a36Sopenharmony_ci *s = new_skb; 219162306a36Sopenharmony_ci 219262306a36Sopenharmony_ci return 0; 219362306a36Sopenharmony_ci} 219462306a36Sopenharmony_ci 219562306a36Sopenharmony_cistatic int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv, 219662306a36Sopenharmony_ci struct xdp_frame **init_xdpf) 219762306a36Sopenharmony_ci{ 219862306a36Sopenharmony_ci struct xdp_frame *new_xdpf, *xdpf = *init_xdpf; 219962306a36Sopenharmony_ci void *new_buff, *aligned_data; 220062306a36Sopenharmony_ci struct page *p; 220162306a36Sopenharmony_ci u32 data_shift; 220262306a36Sopenharmony_ci int headroom; 220362306a36Sopenharmony_ci 220462306a36Sopenharmony_ci /* Check the data alignment and make sure the headroom is large 220562306a36Sopenharmony_ci * enough to store the xdpf backpointer. Use an aligned headroom 220662306a36Sopenharmony_ci * value. 220762306a36Sopenharmony_ci * 220862306a36Sopenharmony_ci * Due to alignment constraints, we give XDP access to the full 256 220962306a36Sopenharmony_ci * byte frame headroom. If the XDP program uses all of it, copy the 221062306a36Sopenharmony_ci * data to a new buffer and make room for storing the backpointer. 221162306a36Sopenharmony_ci */ 221262306a36Sopenharmony_ci if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) && 221362306a36Sopenharmony_ci xdpf->headroom >= priv->tx_headroom) { 221462306a36Sopenharmony_ci xdpf->headroom = priv->tx_headroom; 221562306a36Sopenharmony_ci return 0; 221662306a36Sopenharmony_ci } 221762306a36Sopenharmony_ci 221862306a36Sopenharmony_ci /* Try to move the data inside the buffer just enough to align it and 221962306a36Sopenharmony_ci * store the xdpf backpointer. If the available headroom isn't large 222062306a36Sopenharmony_ci * enough, resort to allocating a new buffer and copying the data. 222162306a36Sopenharmony_ci */ 222262306a36Sopenharmony_ci aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT); 222362306a36Sopenharmony_ci data_shift = xdpf->data - aligned_data; 222462306a36Sopenharmony_ci 222562306a36Sopenharmony_ci /* The XDP frame's headroom needs to be large enough to accommodate 222662306a36Sopenharmony_ci * shifting the data as well as storing the xdpf backpointer. 222762306a36Sopenharmony_ci */ 222862306a36Sopenharmony_ci if (xdpf->headroom >= data_shift + priv->tx_headroom) { 222962306a36Sopenharmony_ci memmove(aligned_data, xdpf->data, xdpf->len); 223062306a36Sopenharmony_ci xdpf->data = aligned_data; 223162306a36Sopenharmony_ci xdpf->headroom = priv->tx_headroom; 223262306a36Sopenharmony_ci return 0; 223362306a36Sopenharmony_ci } 223462306a36Sopenharmony_ci 223562306a36Sopenharmony_ci /* The new xdp_frame is stored in the new buffer. Reserve enough space 223662306a36Sopenharmony_ci * in the headroom for storing it along with the driver's private 223762306a36Sopenharmony_ci * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to 223862306a36Sopenharmony_ci * guarantee the data's alignment in the buffer. 223962306a36Sopenharmony_ci */ 224062306a36Sopenharmony_ci headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom, 224162306a36Sopenharmony_ci DPAA_FD_DATA_ALIGNMENT); 224262306a36Sopenharmony_ci 224362306a36Sopenharmony_ci /* Assure the extended headroom and data don't overflow the buffer, 224462306a36Sopenharmony_ci * while maintaining the mandatory tailroom. 224562306a36Sopenharmony_ci */ 224662306a36Sopenharmony_ci if (headroom + xdpf->len > DPAA_BP_RAW_SIZE - 224762306a36Sopenharmony_ci SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 224862306a36Sopenharmony_ci return -ENOMEM; 224962306a36Sopenharmony_ci 225062306a36Sopenharmony_ci p = dev_alloc_pages(0); 225162306a36Sopenharmony_ci if (unlikely(!p)) 225262306a36Sopenharmony_ci return -ENOMEM; 225362306a36Sopenharmony_ci 225462306a36Sopenharmony_ci /* Copy the data to the new buffer at a properly aligned offset */ 225562306a36Sopenharmony_ci new_buff = page_address(p); 225662306a36Sopenharmony_ci memcpy(new_buff + headroom, xdpf->data, xdpf->len); 225762306a36Sopenharmony_ci 225862306a36Sopenharmony_ci /* Create an XDP frame around the new buffer in a similar fashion 225962306a36Sopenharmony_ci * to xdp_convert_buff_to_frame. 226062306a36Sopenharmony_ci */ 226162306a36Sopenharmony_ci new_xdpf = new_buff; 226262306a36Sopenharmony_ci new_xdpf->data = new_buff + headroom; 226362306a36Sopenharmony_ci new_xdpf->len = xdpf->len; 226462306a36Sopenharmony_ci new_xdpf->headroom = priv->tx_headroom; 226562306a36Sopenharmony_ci new_xdpf->frame_sz = DPAA_BP_RAW_SIZE; 226662306a36Sopenharmony_ci new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; 226762306a36Sopenharmony_ci 226862306a36Sopenharmony_ci /* Release the initial buffer */ 226962306a36Sopenharmony_ci xdp_return_frame_rx_napi(xdpf); 227062306a36Sopenharmony_ci 227162306a36Sopenharmony_ci *init_xdpf = new_xdpf; 227262306a36Sopenharmony_ci return 0; 227362306a36Sopenharmony_ci} 227462306a36Sopenharmony_ci#endif 227562306a36Sopenharmony_ci 227662306a36Sopenharmony_cistatic netdev_tx_t 227762306a36Sopenharmony_cidpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 227862306a36Sopenharmony_ci{ 227962306a36Sopenharmony_ci const int queue_mapping = skb_get_queue_mapping(skb); 228062306a36Sopenharmony_ci bool nonlinear = skb_is_nonlinear(skb); 228162306a36Sopenharmony_ci struct rtnl_link_stats64 *percpu_stats; 228262306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 228362306a36Sopenharmony_ci struct netdev_queue *txq; 228462306a36Sopenharmony_ci struct dpaa_priv *priv; 228562306a36Sopenharmony_ci struct qm_fd fd; 228662306a36Sopenharmony_ci int offset = 0; 228762306a36Sopenharmony_ci int err = 0; 228862306a36Sopenharmony_ci 228962306a36Sopenharmony_ci priv = netdev_priv(net_dev); 229062306a36Sopenharmony_ci percpu_priv = this_cpu_ptr(priv->percpu_priv); 229162306a36Sopenharmony_ci percpu_stats = &percpu_priv->stats; 229262306a36Sopenharmony_ci 229362306a36Sopenharmony_ci qm_fd_clear_fd(&fd); 229462306a36Sopenharmony_ci 229562306a36Sopenharmony_ci if (!nonlinear) { 229662306a36Sopenharmony_ci /* We're going to store the skb backpointer at the beginning 229762306a36Sopenharmony_ci * of the data buffer, so we need a privately owned skb 229862306a36Sopenharmony_ci * 229962306a36Sopenharmony_ci * We've made sure skb is not shared in dev->priv_flags, 230062306a36Sopenharmony_ci * we need to verify the skb head is not cloned 230162306a36Sopenharmony_ci */ 230262306a36Sopenharmony_ci if (skb_cow_head(skb, priv->tx_headroom)) 230362306a36Sopenharmony_ci goto enomem; 230462306a36Sopenharmony_ci 230562306a36Sopenharmony_ci WARN_ON(skb_is_nonlinear(skb)); 230662306a36Sopenharmony_ci } 230762306a36Sopenharmony_ci 230862306a36Sopenharmony_ci /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES; 230962306a36Sopenharmony_ci * make sure we don't feed FMan with more fragments than it supports. 231062306a36Sopenharmony_ci */ 231162306a36Sopenharmony_ci if (unlikely(nonlinear && 231262306a36Sopenharmony_ci (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) { 231362306a36Sopenharmony_ci /* If the egress skb contains more fragments than we support 231462306a36Sopenharmony_ci * we have no choice but to linearize it ourselves. 231562306a36Sopenharmony_ci */ 231662306a36Sopenharmony_ci if (__skb_linearize(skb)) 231762306a36Sopenharmony_ci goto enomem; 231862306a36Sopenharmony_ci 231962306a36Sopenharmony_ci nonlinear = skb_is_nonlinear(skb); 232062306a36Sopenharmony_ci } 232162306a36Sopenharmony_ci 232262306a36Sopenharmony_ci#ifdef CONFIG_DPAA_ERRATUM_A050385 232362306a36Sopenharmony_ci if (unlikely(fman_has_errata_a050385())) { 232462306a36Sopenharmony_ci if (dpaa_a050385_wa_skb(net_dev, &skb)) 232562306a36Sopenharmony_ci goto enomem; 232662306a36Sopenharmony_ci nonlinear = skb_is_nonlinear(skb); 232762306a36Sopenharmony_ci } 232862306a36Sopenharmony_ci#endif 232962306a36Sopenharmony_ci 233062306a36Sopenharmony_ci if (nonlinear) { 233162306a36Sopenharmony_ci /* Just create a S/G fd based on the skb */ 233262306a36Sopenharmony_ci err = skb_to_sg_fd(priv, skb, &fd); 233362306a36Sopenharmony_ci percpu_priv->tx_frag_skbuffs++; 233462306a36Sopenharmony_ci } else { 233562306a36Sopenharmony_ci /* Create a contig FD from this skb */ 233662306a36Sopenharmony_ci err = skb_to_contig_fd(priv, skb, &fd, &offset); 233762306a36Sopenharmony_ci } 233862306a36Sopenharmony_ci if (unlikely(err < 0)) 233962306a36Sopenharmony_ci goto skb_to_fd_failed; 234062306a36Sopenharmony_ci 234162306a36Sopenharmony_ci txq = netdev_get_tx_queue(net_dev, queue_mapping); 234262306a36Sopenharmony_ci 234362306a36Sopenharmony_ci /* LLTX requires to do our own update of trans_start */ 234462306a36Sopenharmony_ci txq_trans_cond_update(txq); 234562306a36Sopenharmony_ci 234662306a36Sopenharmony_ci if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 234762306a36Sopenharmony_ci fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); 234862306a36Sopenharmony_ci skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 234962306a36Sopenharmony_ci } 235062306a36Sopenharmony_ci 235162306a36Sopenharmony_ci if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) 235262306a36Sopenharmony_ci return NETDEV_TX_OK; 235362306a36Sopenharmony_ci 235462306a36Sopenharmony_ci dpaa_cleanup_tx_fd(priv, &fd, false); 235562306a36Sopenharmony_ciskb_to_fd_failed: 235662306a36Sopenharmony_cienomem: 235762306a36Sopenharmony_ci percpu_stats->tx_errors++; 235862306a36Sopenharmony_ci dev_kfree_skb(skb); 235962306a36Sopenharmony_ci return NETDEV_TX_OK; 236062306a36Sopenharmony_ci} 236162306a36Sopenharmony_ci 236262306a36Sopenharmony_cistatic void dpaa_rx_error(struct net_device *net_dev, 236362306a36Sopenharmony_ci const struct dpaa_priv *priv, 236462306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv, 236562306a36Sopenharmony_ci const struct qm_fd *fd, 236662306a36Sopenharmony_ci u32 fqid) 236762306a36Sopenharmony_ci{ 236862306a36Sopenharmony_ci if (net_ratelimit()) 236962306a36Sopenharmony_ci netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", 237062306a36Sopenharmony_ci be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS); 237162306a36Sopenharmony_ci 237262306a36Sopenharmony_ci percpu_priv->stats.rx_errors++; 237362306a36Sopenharmony_ci 237462306a36Sopenharmony_ci if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA) 237562306a36Sopenharmony_ci percpu_priv->rx_errors.dme++; 237662306a36Sopenharmony_ci if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL) 237762306a36Sopenharmony_ci percpu_priv->rx_errors.fpe++; 237862306a36Sopenharmony_ci if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE) 237962306a36Sopenharmony_ci percpu_priv->rx_errors.fse++; 238062306a36Sopenharmony_ci if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR) 238162306a36Sopenharmony_ci percpu_priv->rx_errors.phe++; 238262306a36Sopenharmony_ci 238362306a36Sopenharmony_ci dpaa_fd_release(net_dev, fd); 238462306a36Sopenharmony_ci} 238562306a36Sopenharmony_ci 238662306a36Sopenharmony_cistatic void dpaa_tx_error(struct net_device *net_dev, 238762306a36Sopenharmony_ci const struct dpaa_priv *priv, 238862306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv, 238962306a36Sopenharmony_ci const struct qm_fd *fd, 239062306a36Sopenharmony_ci u32 fqid) 239162306a36Sopenharmony_ci{ 239262306a36Sopenharmony_ci struct sk_buff *skb; 239362306a36Sopenharmony_ci 239462306a36Sopenharmony_ci if (net_ratelimit()) 239562306a36Sopenharmony_ci netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", 239662306a36Sopenharmony_ci be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS); 239762306a36Sopenharmony_ci 239862306a36Sopenharmony_ci percpu_priv->stats.tx_errors++; 239962306a36Sopenharmony_ci 240062306a36Sopenharmony_ci skb = dpaa_cleanup_tx_fd(priv, fd, false); 240162306a36Sopenharmony_ci dev_kfree_skb(skb); 240262306a36Sopenharmony_ci} 240362306a36Sopenharmony_ci 240462306a36Sopenharmony_cistatic int dpaa_eth_poll(struct napi_struct *napi, int budget) 240562306a36Sopenharmony_ci{ 240662306a36Sopenharmony_ci struct dpaa_napi_portal *np = 240762306a36Sopenharmony_ci container_of(napi, struct dpaa_napi_portal, napi); 240862306a36Sopenharmony_ci int cleaned; 240962306a36Sopenharmony_ci 241062306a36Sopenharmony_ci np->xdp_act = 0; 241162306a36Sopenharmony_ci 241262306a36Sopenharmony_ci cleaned = qman_p_poll_dqrr(np->p, budget); 241362306a36Sopenharmony_ci 241462306a36Sopenharmony_ci if (np->xdp_act & XDP_REDIRECT) 241562306a36Sopenharmony_ci xdp_do_flush(); 241662306a36Sopenharmony_ci 241762306a36Sopenharmony_ci if (cleaned < budget) { 241862306a36Sopenharmony_ci napi_complete_done(napi, cleaned); 241962306a36Sopenharmony_ci qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); 242062306a36Sopenharmony_ci } else if (np->down) { 242162306a36Sopenharmony_ci qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); 242262306a36Sopenharmony_ci } 242362306a36Sopenharmony_ci 242462306a36Sopenharmony_ci return cleaned; 242562306a36Sopenharmony_ci} 242662306a36Sopenharmony_ci 242762306a36Sopenharmony_cistatic void dpaa_tx_conf(struct net_device *net_dev, 242862306a36Sopenharmony_ci const struct dpaa_priv *priv, 242962306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv, 243062306a36Sopenharmony_ci const struct qm_fd *fd, 243162306a36Sopenharmony_ci u32 fqid) 243262306a36Sopenharmony_ci{ 243362306a36Sopenharmony_ci struct sk_buff *skb; 243462306a36Sopenharmony_ci 243562306a36Sopenharmony_ci if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) { 243662306a36Sopenharmony_ci if (net_ratelimit()) 243762306a36Sopenharmony_ci netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", 243862306a36Sopenharmony_ci be32_to_cpu(fd->status) & 243962306a36Sopenharmony_ci FM_FD_STAT_TX_ERRORS); 244062306a36Sopenharmony_ci 244162306a36Sopenharmony_ci percpu_priv->stats.tx_errors++; 244262306a36Sopenharmony_ci } 244362306a36Sopenharmony_ci 244462306a36Sopenharmony_ci percpu_priv->tx_confirm++; 244562306a36Sopenharmony_ci 244662306a36Sopenharmony_ci skb = dpaa_cleanup_tx_fd(priv, fd, true); 244762306a36Sopenharmony_ci 244862306a36Sopenharmony_ci consume_skb(skb); 244962306a36Sopenharmony_ci} 245062306a36Sopenharmony_ci 245162306a36Sopenharmony_cistatic inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv, 245262306a36Sopenharmony_ci struct qman_portal *portal, bool sched_napi) 245362306a36Sopenharmony_ci{ 245462306a36Sopenharmony_ci if (sched_napi) { 245562306a36Sopenharmony_ci /* Disable QMan IRQ and invoke NAPI */ 245662306a36Sopenharmony_ci qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); 245762306a36Sopenharmony_ci 245862306a36Sopenharmony_ci percpu_priv->np.p = portal; 245962306a36Sopenharmony_ci napi_schedule(&percpu_priv->np.napi); 246062306a36Sopenharmony_ci percpu_priv->in_interrupt++; 246162306a36Sopenharmony_ci return 1; 246262306a36Sopenharmony_ci } 246362306a36Sopenharmony_ci return 0; 246462306a36Sopenharmony_ci} 246562306a36Sopenharmony_ci 246662306a36Sopenharmony_cistatic enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, 246762306a36Sopenharmony_ci struct qman_fq *fq, 246862306a36Sopenharmony_ci const struct qm_dqrr_entry *dq, 246962306a36Sopenharmony_ci bool sched_napi) 247062306a36Sopenharmony_ci{ 247162306a36Sopenharmony_ci struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); 247262306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 247362306a36Sopenharmony_ci struct net_device *net_dev; 247462306a36Sopenharmony_ci struct dpaa_bp *dpaa_bp; 247562306a36Sopenharmony_ci struct dpaa_priv *priv; 247662306a36Sopenharmony_ci 247762306a36Sopenharmony_ci net_dev = dpaa_fq->net_dev; 247862306a36Sopenharmony_ci priv = netdev_priv(net_dev); 247962306a36Sopenharmony_ci dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); 248062306a36Sopenharmony_ci if (!dpaa_bp) 248162306a36Sopenharmony_ci return qman_cb_dqrr_consume; 248262306a36Sopenharmony_ci 248362306a36Sopenharmony_ci percpu_priv = this_cpu_ptr(priv->percpu_priv); 248462306a36Sopenharmony_ci 248562306a36Sopenharmony_ci if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)) 248662306a36Sopenharmony_ci return qman_cb_dqrr_stop; 248762306a36Sopenharmony_ci 248862306a36Sopenharmony_ci dpaa_eth_refill_bpools(priv); 248962306a36Sopenharmony_ci dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); 249062306a36Sopenharmony_ci 249162306a36Sopenharmony_ci return qman_cb_dqrr_consume; 249262306a36Sopenharmony_ci} 249362306a36Sopenharmony_ci 249462306a36Sopenharmony_cistatic int dpaa_xdp_xmit_frame(struct net_device *net_dev, 249562306a36Sopenharmony_ci struct xdp_frame *xdpf) 249662306a36Sopenharmony_ci{ 249762306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(net_dev); 249862306a36Sopenharmony_ci struct rtnl_link_stats64 *percpu_stats; 249962306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 250062306a36Sopenharmony_ci struct dpaa_eth_swbp *swbp; 250162306a36Sopenharmony_ci struct netdev_queue *txq; 250262306a36Sopenharmony_ci void *buff_start; 250362306a36Sopenharmony_ci struct qm_fd fd; 250462306a36Sopenharmony_ci dma_addr_t addr; 250562306a36Sopenharmony_ci int err; 250662306a36Sopenharmony_ci 250762306a36Sopenharmony_ci percpu_priv = this_cpu_ptr(priv->percpu_priv); 250862306a36Sopenharmony_ci percpu_stats = &percpu_priv->stats; 250962306a36Sopenharmony_ci 251062306a36Sopenharmony_ci#ifdef CONFIG_DPAA_ERRATUM_A050385 251162306a36Sopenharmony_ci if (unlikely(fman_has_errata_a050385())) { 251262306a36Sopenharmony_ci if (dpaa_a050385_wa_xdpf(priv, &xdpf)) { 251362306a36Sopenharmony_ci err = -ENOMEM; 251462306a36Sopenharmony_ci goto out_error; 251562306a36Sopenharmony_ci } 251662306a36Sopenharmony_ci } 251762306a36Sopenharmony_ci#endif 251862306a36Sopenharmony_ci 251962306a36Sopenharmony_ci if (xdpf->headroom < DPAA_TX_PRIV_DATA_SIZE) { 252062306a36Sopenharmony_ci err = -EINVAL; 252162306a36Sopenharmony_ci goto out_error; 252262306a36Sopenharmony_ci } 252362306a36Sopenharmony_ci 252462306a36Sopenharmony_ci buff_start = xdpf->data - xdpf->headroom; 252562306a36Sopenharmony_ci 252662306a36Sopenharmony_ci /* Leave empty the skb backpointer at the start of the buffer. 252762306a36Sopenharmony_ci * Save the XDP frame for easy cleanup on confirmation. 252862306a36Sopenharmony_ci */ 252962306a36Sopenharmony_ci swbp = (struct dpaa_eth_swbp *)buff_start; 253062306a36Sopenharmony_ci swbp->skb = NULL; 253162306a36Sopenharmony_ci swbp->xdpf = xdpf; 253262306a36Sopenharmony_ci 253362306a36Sopenharmony_ci qm_fd_clear_fd(&fd); 253462306a36Sopenharmony_ci fd.bpid = FSL_DPAA_BPID_INV; 253562306a36Sopenharmony_ci fd.cmd |= cpu_to_be32(FM_FD_CMD_FCO); 253662306a36Sopenharmony_ci qm_fd_set_contig(&fd, xdpf->headroom, xdpf->len); 253762306a36Sopenharmony_ci 253862306a36Sopenharmony_ci addr = dma_map_single(priv->tx_dma_dev, buff_start, 253962306a36Sopenharmony_ci xdpf->headroom + xdpf->len, 254062306a36Sopenharmony_ci DMA_TO_DEVICE); 254162306a36Sopenharmony_ci if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { 254262306a36Sopenharmony_ci err = -EINVAL; 254362306a36Sopenharmony_ci goto out_error; 254462306a36Sopenharmony_ci } 254562306a36Sopenharmony_ci 254662306a36Sopenharmony_ci qm_fd_addr_set64(&fd, addr); 254762306a36Sopenharmony_ci 254862306a36Sopenharmony_ci /* Bump the trans_start */ 254962306a36Sopenharmony_ci txq = netdev_get_tx_queue(net_dev, smp_processor_id()); 255062306a36Sopenharmony_ci txq_trans_cond_update(txq); 255162306a36Sopenharmony_ci 255262306a36Sopenharmony_ci err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd); 255362306a36Sopenharmony_ci if (err) { 255462306a36Sopenharmony_ci dma_unmap_single(priv->tx_dma_dev, addr, 255562306a36Sopenharmony_ci qm_fd_get_offset(&fd) + qm_fd_get_length(&fd), 255662306a36Sopenharmony_ci DMA_TO_DEVICE); 255762306a36Sopenharmony_ci goto out_error; 255862306a36Sopenharmony_ci } 255962306a36Sopenharmony_ci 256062306a36Sopenharmony_ci return 0; 256162306a36Sopenharmony_ci 256262306a36Sopenharmony_ciout_error: 256362306a36Sopenharmony_ci percpu_stats->tx_errors++; 256462306a36Sopenharmony_ci return err; 256562306a36Sopenharmony_ci} 256662306a36Sopenharmony_ci 256762306a36Sopenharmony_cistatic u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr, 256862306a36Sopenharmony_ci struct dpaa_fq *dpaa_fq, unsigned int *xdp_meta_len) 256962306a36Sopenharmony_ci{ 257062306a36Sopenharmony_ci ssize_t fd_off = qm_fd_get_offset(fd); 257162306a36Sopenharmony_ci struct bpf_prog *xdp_prog; 257262306a36Sopenharmony_ci struct xdp_frame *xdpf; 257362306a36Sopenharmony_ci struct xdp_buff xdp; 257462306a36Sopenharmony_ci u32 xdp_act; 257562306a36Sopenharmony_ci int err; 257662306a36Sopenharmony_ci 257762306a36Sopenharmony_ci xdp_prog = READ_ONCE(priv->xdp_prog); 257862306a36Sopenharmony_ci if (!xdp_prog) 257962306a36Sopenharmony_ci return XDP_PASS; 258062306a36Sopenharmony_ci 258162306a36Sopenharmony_ci xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE, 258262306a36Sopenharmony_ci &dpaa_fq->xdp_rxq); 258362306a36Sopenharmony_ci xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM, 258462306a36Sopenharmony_ci XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true); 258562306a36Sopenharmony_ci 258662306a36Sopenharmony_ci /* We reserve a fixed headroom of 256 bytes under the erratum and we 258762306a36Sopenharmony_ci * offer it all to XDP programs to use. If no room is left for the 258862306a36Sopenharmony_ci * xdpf backpointer on TX, we will need to copy the data. 258962306a36Sopenharmony_ci * Disable metadata support since data realignments might be required 259062306a36Sopenharmony_ci * and the information can be lost. 259162306a36Sopenharmony_ci */ 259262306a36Sopenharmony_ci#ifdef CONFIG_DPAA_ERRATUM_A050385 259362306a36Sopenharmony_ci if (unlikely(fman_has_errata_a050385())) { 259462306a36Sopenharmony_ci xdp_set_data_meta_invalid(&xdp); 259562306a36Sopenharmony_ci xdp.data_hard_start = vaddr; 259662306a36Sopenharmony_ci xdp.frame_sz = DPAA_BP_RAW_SIZE; 259762306a36Sopenharmony_ci } 259862306a36Sopenharmony_ci#endif 259962306a36Sopenharmony_ci 260062306a36Sopenharmony_ci xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); 260162306a36Sopenharmony_ci 260262306a36Sopenharmony_ci /* Update the length and the offset of the FD */ 260362306a36Sopenharmony_ci qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data); 260462306a36Sopenharmony_ci 260562306a36Sopenharmony_ci switch (xdp_act) { 260662306a36Sopenharmony_ci case XDP_PASS: 260762306a36Sopenharmony_ci#ifdef CONFIG_DPAA_ERRATUM_A050385 260862306a36Sopenharmony_ci *xdp_meta_len = xdp_data_meta_unsupported(&xdp) ? 0 : 260962306a36Sopenharmony_ci xdp.data - xdp.data_meta; 261062306a36Sopenharmony_ci#else 261162306a36Sopenharmony_ci *xdp_meta_len = xdp.data - xdp.data_meta; 261262306a36Sopenharmony_ci#endif 261362306a36Sopenharmony_ci break; 261462306a36Sopenharmony_ci case XDP_TX: 261562306a36Sopenharmony_ci /* We can access the full headroom when sending the frame 261662306a36Sopenharmony_ci * back out 261762306a36Sopenharmony_ci */ 261862306a36Sopenharmony_ci xdp.data_hard_start = vaddr; 261962306a36Sopenharmony_ci xdp.frame_sz = DPAA_BP_RAW_SIZE; 262062306a36Sopenharmony_ci xdpf = xdp_convert_buff_to_frame(&xdp); 262162306a36Sopenharmony_ci if (unlikely(!xdpf)) { 262262306a36Sopenharmony_ci free_pages((unsigned long)vaddr, 0); 262362306a36Sopenharmony_ci break; 262462306a36Sopenharmony_ci } 262562306a36Sopenharmony_ci 262662306a36Sopenharmony_ci if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf)) 262762306a36Sopenharmony_ci xdp_return_frame_rx_napi(xdpf); 262862306a36Sopenharmony_ci 262962306a36Sopenharmony_ci break; 263062306a36Sopenharmony_ci case XDP_REDIRECT: 263162306a36Sopenharmony_ci /* Allow redirect to use the full headroom */ 263262306a36Sopenharmony_ci xdp.data_hard_start = vaddr; 263362306a36Sopenharmony_ci xdp.frame_sz = DPAA_BP_RAW_SIZE; 263462306a36Sopenharmony_ci 263562306a36Sopenharmony_ci err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); 263662306a36Sopenharmony_ci if (err) { 263762306a36Sopenharmony_ci trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); 263862306a36Sopenharmony_ci free_pages((unsigned long)vaddr, 0); 263962306a36Sopenharmony_ci } 264062306a36Sopenharmony_ci break; 264162306a36Sopenharmony_ci default: 264262306a36Sopenharmony_ci bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act); 264362306a36Sopenharmony_ci fallthrough; 264462306a36Sopenharmony_ci case XDP_ABORTED: 264562306a36Sopenharmony_ci trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); 264662306a36Sopenharmony_ci fallthrough; 264762306a36Sopenharmony_ci case XDP_DROP: 264862306a36Sopenharmony_ci /* Free the buffer */ 264962306a36Sopenharmony_ci free_pages((unsigned long)vaddr, 0); 265062306a36Sopenharmony_ci break; 265162306a36Sopenharmony_ci } 265262306a36Sopenharmony_ci 265362306a36Sopenharmony_ci return xdp_act; 265462306a36Sopenharmony_ci} 265562306a36Sopenharmony_ci 265662306a36Sopenharmony_cistatic enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, 265762306a36Sopenharmony_ci struct qman_fq *fq, 265862306a36Sopenharmony_ci const struct qm_dqrr_entry *dq, 265962306a36Sopenharmony_ci bool sched_napi) 266062306a36Sopenharmony_ci{ 266162306a36Sopenharmony_ci bool ts_valid = false, hash_valid = false; 266262306a36Sopenharmony_ci struct skb_shared_hwtstamps *shhwtstamps; 266362306a36Sopenharmony_ci unsigned int skb_len, xdp_meta_len = 0; 266462306a36Sopenharmony_ci struct rtnl_link_stats64 *percpu_stats; 266562306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 266662306a36Sopenharmony_ci const struct qm_fd *fd = &dq->fd; 266762306a36Sopenharmony_ci dma_addr_t addr = qm_fd_addr(fd); 266862306a36Sopenharmony_ci struct dpaa_napi_portal *np; 266962306a36Sopenharmony_ci enum qm_fd_format fd_format; 267062306a36Sopenharmony_ci struct net_device *net_dev; 267162306a36Sopenharmony_ci u32 fd_status, hash_offset; 267262306a36Sopenharmony_ci struct qm_sg_entry *sgt; 267362306a36Sopenharmony_ci struct dpaa_bp *dpaa_bp; 267462306a36Sopenharmony_ci struct dpaa_fq *dpaa_fq; 267562306a36Sopenharmony_ci struct dpaa_priv *priv; 267662306a36Sopenharmony_ci struct sk_buff *skb; 267762306a36Sopenharmony_ci int *count_ptr; 267862306a36Sopenharmony_ci u32 xdp_act; 267962306a36Sopenharmony_ci void *vaddr; 268062306a36Sopenharmony_ci u32 hash; 268162306a36Sopenharmony_ci u64 ns; 268262306a36Sopenharmony_ci 268362306a36Sopenharmony_ci dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); 268462306a36Sopenharmony_ci fd_status = be32_to_cpu(fd->status); 268562306a36Sopenharmony_ci fd_format = qm_fd_get_format(fd); 268662306a36Sopenharmony_ci net_dev = dpaa_fq->net_dev; 268762306a36Sopenharmony_ci priv = netdev_priv(net_dev); 268862306a36Sopenharmony_ci dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); 268962306a36Sopenharmony_ci if (!dpaa_bp) 269062306a36Sopenharmony_ci return qman_cb_dqrr_consume; 269162306a36Sopenharmony_ci 269262306a36Sopenharmony_ci /* Trace the Rx fd */ 269362306a36Sopenharmony_ci trace_dpaa_rx_fd(net_dev, fq, &dq->fd); 269462306a36Sopenharmony_ci 269562306a36Sopenharmony_ci percpu_priv = this_cpu_ptr(priv->percpu_priv); 269662306a36Sopenharmony_ci percpu_stats = &percpu_priv->stats; 269762306a36Sopenharmony_ci np = &percpu_priv->np; 269862306a36Sopenharmony_ci 269962306a36Sopenharmony_ci if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))) 270062306a36Sopenharmony_ci return qman_cb_dqrr_stop; 270162306a36Sopenharmony_ci 270262306a36Sopenharmony_ci /* Make sure we didn't run out of buffers */ 270362306a36Sopenharmony_ci if (unlikely(dpaa_eth_refill_bpools(priv))) { 270462306a36Sopenharmony_ci /* Unable to refill the buffer pool due to insufficient 270562306a36Sopenharmony_ci * system memory. Just release the frame back into the pool, 270662306a36Sopenharmony_ci * otherwise we'll soon end up with an empty buffer pool. 270762306a36Sopenharmony_ci */ 270862306a36Sopenharmony_ci dpaa_fd_release(net_dev, &dq->fd); 270962306a36Sopenharmony_ci return qman_cb_dqrr_consume; 271062306a36Sopenharmony_ci } 271162306a36Sopenharmony_ci 271262306a36Sopenharmony_ci if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { 271362306a36Sopenharmony_ci if (net_ratelimit()) 271462306a36Sopenharmony_ci netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", 271562306a36Sopenharmony_ci fd_status & FM_FD_STAT_RX_ERRORS); 271662306a36Sopenharmony_ci 271762306a36Sopenharmony_ci percpu_stats->rx_errors++; 271862306a36Sopenharmony_ci dpaa_fd_release(net_dev, fd); 271962306a36Sopenharmony_ci return qman_cb_dqrr_consume; 272062306a36Sopenharmony_ci } 272162306a36Sopenharmony_ci 272262306a36Sopenharmony_ci dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, 272362306a36Sopenharmony_ci DMA_FROM_DEVICE); 272462306a36Sopenharmony_ci 272562306a36Sopenharmony_ci /* prefetch the first 64 bytes of the frame or the SGT start */ 272662306a36Sopenharmony_ci vaddr = phys_to_virt(addr); 272762306a36Sopenharmony_ci prefetch(vaddr + qm_fd_get_offset(fd)); 272862306a36Sopenharmony_ci 272962306a36Sopenharmony_ci /* The only FD types that we may receive are contig and S/G */ 273062306a36Sopenharmony_ci WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); 273162306a36Sopenharmony_ci 273262306a36Sopenharmony_ci /* Account for either the contig buffer or the SGT buffer (depending on 273362306a36Sopenharmony_ci * which case we were in) having been removed from the pool. 273462306a36Sopenharmony_ci */ 273562306a36Sopenharmony_ci count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 273662306a36Sopenharmony_ci (*count_ptr)--; 273762306a36Sopenharmony_ci 273862306a36Sopenharmony_ci /* Extract the timestamp stored in the headroom before running XDP */ 273962306a36Sopenharmony_ci if (priv->rx_tstamp) { 274062306a36Sopenharmony_ci if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) 274162306a36Sopenharmony_ci ts_valid = true; 274262306a36Sopenharmony_ci else 274362306a36Sopenharmony_ci WARN_ONCE(1, "fman_port_get_tstamp failed!\n"); 274462306a36Sopenharmony_ci } 274562306a36Sopenharmony_ci 274662306a36Sopenharmony_ci /* Extract the hash stored in the headroom before running XDP */ 274762306a36Sopenharmony_ci if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && 274862306a36Sopenharmony_ci !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], 274962306a36Sopenharmony_ci &hash_offset)) { 275062306a36Sopenharmony_ci hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset)); 275162306a36Sopenharmony_ci hash_valid = true; 275262306a36Sopenharmony_ci } 275362306a36Sopenharmony_ci 275462306a36Sopenharmony_ci if (likely(fd_format == qm_fd_contig)) { 275562306a36Sopenharmony_ci xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr, 275662306a36Sopenharmony_ci dpaa_fq, &xdp_meta_len); 275762306a36Sopenharmony_ci np->xdp_act |= xdp_act; 275862306a36Sopenharmony_ci if (xdp_act != XDP_PASS) { 275962306a36Sopenharmony_ci percpu_stats->rx_packets++; 276062306a36Sopenharmony_ci percpu_stats->rx_bytes += qm_fd_get_length(fd); 276162306a36Sopenharmony_ci return qman_cb_dqrr_consume; 276262306a36Sopenharmony_ci } 276362306a36Sopenharmony_ci skb = contig_fd_to_skb(priv, fd); 276462306a36Sopenharmony_ci } else { 276562306a36Sopenharmony_ci /* XDP doesn't support S/G frames. Return the fragments to the 276662306a36Sopenharmony_ci * buffer pool and release the SGT. 276762306a36Sopenharmony_ci */ 276862306a36Sopenharmony_ci if (READ_ONCE(priv->xdp_prog)) { 276962306a36Sopenharmony_ci WARN_ONCE(1, "S/G frames not supported under XDP\n"); 277062306a36Sopenharmony_ci sgt = vaddr + qm_fd_get_offset(fd); 277162306a36Sopenharmony_ci dpaa_release_sgt_members(sgt); 277262306a36Sopenharmony_ci free_pages((unsigned long)vaddr, 0); 277362306a36Sopenharmony_ci return qman_cb_dqrr_consume; 277462306a36Sopenharmony_ci } 277562306a36Sopenharmony_ci skb = sg_fd_to_skb(priv, fd); 277662306a36Sopenharmony_ci } 277762306a36Sopenharmony_ci if (!skb) 277862306a36Sopenharmony_ci return qman_cb_dqrr_consume; 277962306a36Sopenharmony_ci 278062306a36Sopenharmony_ci if (xdp_meta_len) 278162306a36Sopenharmony_ci skb_metadata_set(skb, xdp_meta_len); 278262306a36Sopenharmony_ci 278362306a36Sopenharmony_ci /* Set the previously extracted timestamp */ 278462306a36Sopenharmony_ci if (ts_valid) { 278562306a36Sopenharmony_ci shhwtstamps = skb_hwtstamps(skb); 278662306a36Sopenharmony_ci memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 278762306a36Sopenharmony_ci shhwtstamps->hwtstamp = ns_to_ktime(ns); 278862306a36Sopenharmony_ci } 278962306a36Sopenharmony_ci 279062306a36Sopenharmony_ci skb->protocol = eth_type_trans(skb, net_dev); 279162306a36Sopenharmony_ci 279262306a36Sopenharmony_ci /* Set the previously extracted hash */ 279362306a36Sopenharmony_ci if (hash_valid) { 279462306a36Sopenharmony_ci enum pkt_hash_types type; 279562306a36Sopenharmony_ci 279662306a36Sopenharmony_ci /* if L4 exists, it was used in the hash generation */ 279762306a36Sopenharmony_ci type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ? 279862306a36Sopenharmony_ci PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; 279962306a36Sopenharmony_ci skb_set_hash(skb, hash, type); 280062306a36Sopenharmony_ci } 280162306a36Sopenharmony_ci 280262306a36Sopenharmony_ci skb_len = skb->len; 280362306a36Sopenharmony_ci 280462306a36Sopenharmony_ci if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { 280562306a36Sopenharmony_ci percpu_stats->rx_dropped++; 280662306a36Sopenharmony_ci return qman_cb_dqrr_consume; 280762306a36Sopenharmony_ci } 280862306a36Sopenharmony_ci 280962306a36Sopenharmony_ci percpu_stats->rx_packets++; 281062306a36Sopenharmony_ci percpu_stats->rx_bytes += skb_len; 281162306a36Sopenharmony_ci 281262306a36Sopenharmony_ci return qman_cb_dqrr_consume; 281362306a36Sopenharmony_ci} 281462306a36Sopenharmony_ci 281562306a36Sopenharmony_cistatic enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, 281662306a36Sopenharmony_ci struct qman_fq *fq, 281762306a36Sopenharmony_ci const struct qm_dqrr_entry *dq, 281862306a36Sopenharmony_ci bool sched_napi) 281962306a36Sopenharmony_ci{ 282062306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 282162306a36Sopenharmony_ci struct net_device *net_dev; 282262306a36Sopenharmony_ci struct dpaa_priv *priv; 282362306a36Sopenharmony_ci 282462306a36Sopenharmony_ci net_dev = ((struct dpaa_fq *)fq)->net_dev; 282562306a36Sopenharmony_ci priv = netdev_priv(net_dev); 282662306a36Sopenharmony_ci 282762306a36Sopenharmony_ci percpu_priv = this_cpu_ptr(priv->percpu_priv); 282862306a36Sopenharmony_ci 282962306a36Sopenharmony_ci if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)) 283062306a36Sopenharmony_ci return qman_cb_dqrr_stop; 283162306a36Sopenharmony_ci 283262306a36Sopenharmony_ci dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); 283362306a36Sopenharmony_ci 283462306a36Sopenharmony_ci return qman_cb_dqrr_consume; 283562306a36Sopenharmony_ci} 283662306a36Sopenharmony_ci 283762306a36Sopenharmony_cistatic enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, 283862306a36Sopenharmony_ci struct qman_fq *fq, 283962306a36Sopenharmony_ci const struct qm_dqrr_entry *dq, 284062306a36Sopenharmony_ci bool sched_napi) 284162306a36Sopenharmony_ci{ 284262306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 284362306a36Sopenharmony_ci struct net_device *net_dev; 284462306a36Sopenharmony_ci struct dpaa_priv *priv; 284562306a36Sopenharmony_ci 284662306a36Sopenharmony_ci net_dev = ((struct dpaa_fq *)fq)->net_dev; 284762306a36Sopenharmony_ci priv = netdev_priv(net_dev); 284862306a36Sopenharmony_ci 284962306a36Sopenharmony_ci /* Trace the fd */ 285062306a36Sopenharmony_ci trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); 285162306a36Sopenharmony_ci 285262306a36Sopenharmony_ci percpu_priv = this_cpu_ptr(priv->percpu_priv); 285362306a36Sopenharmony_ci 285462306a36Sopenharmony_ci if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)) 285562306a36Sopenharmony_ci return qman_cb_dqrr_stop; 285662306a36Sopenharmony_ci 285762306a36Sopenharmony_ci dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); 285862306a36Sopenharmony_ci 285962306a36Sopenharmony_ci return qman_cb_dqrr_consume; 286062306a36Sopenharmony_ci} 286162306a36Sopenharmony_ci 286262306a36Sopenharmony_cistatic void egress_ern(struct qman_portal *portal, 286362306a36Sopenharmony_ci struct qman_fq *fq, 286462306a36Sopenharmony_ci const union qm_mr_entry *msg) 286562306a36Sopenharmony_ci{ 286662306a36Sopenharmony_ci const struct qm_fd *fd = &msg->ern.fd; 286762306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 286862306a36Sopenharmony_ci const struct dpaa_priv *priv; 286962306a36Sopenharmony_ci struct net_device *net_dev; 287062306a36Sopenharmony_ci struct sk_buff *skb; 287162306a36Sopenharmony_ci 287262306a36Sopenharmony_ci net_dev = ((struct dpaa_fq *)fq)->net_dev; 287362306a36Sopenharmony_ci priv = netdev_priv(net_dev); 287462306a36Sopenharmony_ci percpu_priv = this_cpu_ptr(priv->percpu_priv); 287562306a36Sopenharmony_ci 287662306a36Sopenharmony_ci percpu_priv->stats.tx_dropped++; 287762306a36Sopenharmony_ci percpu_priv->stats.tx_fifo_errors++; 287862306a36Sopenharmony_ci count_ern(percpu_priv, msg); 287962306a36Sopenharmony_ci 288062306a36Sopenharmony_ci skb = dpaa_cleanup_tx_fd(priv, fd, false); 288162306a36Sopenharmony_ci dev_kfree_skb_any(skb); 288262306a36Sopenharmony_ci} 288362306a36Sopenharmony_ci 288462306a36Sopenharmony_cistatic const struct dpaa_fq_cbs dpaa_fq_cbs = { 288562306a36Sopenharmony_ci .rx_defq = { .cb = { .dqrr = rx_default_dqrr } }, 288662306a36Sopenharmony_ci .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } }, 288762306a36Sopenharmony_ci .rx_errq = { .cb = { .dqrr = rx_error_dqrr } }, 288862306a36Sopenharmony_ci .tx_errq = { .cb = { .dqrr = conf_error_dqrr } }, 288962306a36Sopenharmony_ci .egress_ern = { .cb = { .ern = egress_ern } } 289062306a36Sopenharmony_ci}; 289162306a36Sopenharmony_ci 289262306a36Sopenharmony_cistatic void dpaa_eth_napi_enable(struct dpaa_priv *priv) 289362306a36Sopenharmony_ci{ 289462306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 289562306a36Sopenharmony_ci int i; 289662306a36Sopenharmony_ci 289762306a36Sopenharmony_ci for_each_online_cpu(i) { 289862306a36Sopenharmony_ci percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 289962306a36Sopenharmony_ci 290062306a36Sopenharmony_ci percpu_priv->np.down = false; 290162306a36Sopenharmony_ci napi_enable(&percpu_priv->np.napi); 290262306a36Sopenharmony_ci } 290362306a36Sopenharmony_ci} 290462306a36Sopenharmony_ci 290562306a36Sopenharmony_cistatic void dpaa_eth_napi_disable(struct dpaa_priv *priv) 290662306a36Sopenharmony_ci{ 290762306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 290862306a36Sopenharmony_ci int i; 290962306a36Sopenharmony_ci 291062306a36Sopenharmony_ci for_each_online_cpu(i) { 291162306a36Sopenharmony_ci percpu_priv = per_cpu_ptr(priv->percpu_priv, i); 291262306a36Sopenharmony_ci 291362306a36Sopenharmony_ci percpu_priv->np.down = true; 291462306a36Sopenharmony_ci napi_disable(&percpu_priv->np.napi); 291562306a36Sopenharmony_ci } 291662306a36Sopenharmony_ci} 291762306a36Sopenharmony_ci 291862306a36Sopenharmony_cistatic int dpaa_open(struct net_device *net_dev) 291962306a36Sopenharmony_ci{ 292062306a36Sopenharmony_ci struct mac_device *mac_dev; 292162306a36Sopenharmony_ci struct dpaa_priv *priv; 292262306a36Sopenharmony_ci int err, i; 292362306a36Sopenharmony_ci 292462306a36Sopenharmony_ci priv = netdev_priv(net_dev); 292562306a36Sopenharmony_ci mac_dev = priv->mac_dev; 292662306a36Sopenharmony_ci dpaa_eth_napi_enable(priv); 292762306a36Sopenharmony_ci 292862306a36Sopenharmony_ci err = phylink_of_phy_connect(mac_dev->phylink, 292962306a36Sopenharmony_ci mac_dev->dev->of_node, 0); 293062306a36Sopenharmony_ci if (err) 293162306a36Sopenharmony_ci goto phy_init_failed; 293262306a36Sopenharmony_ci 293362306a36Sopenharmony_ci for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { 293462306a36Sopenharmony_ci err = fman_port_enable(mac_dev->port[i]); 293562306a36Sopenharmony_ci if (err) 293662306a36Sopenharmony_ci goto mac_start_failed; 293762306a36Sopenharmony_ci } 293862306a36Sopenharmony_ci 293962306a36Sopenharmony_ci err = priv->mac_dev->enable(mac_dev->fman_mac); 294062306a36Sopenharmony_ci if (err < 0) { 294162306a36Sopenharmony_ci netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err); 294262306a36Sopenharmony_ci goto mac_start_failed; 294362306a36Sopenharmony_ci } 294462306a36Sopenharmony_ci phylink_start(mac_dev->phylink); 294562306a36Sopenharmony_ci 294662306a36Sopenharmony_ci netif_tx_start_all_queues(net_dev); 294762306a36Sopenharmony_ci 294862306a36Sopenharmony_ci return 0; 294962306a36Sopenharmony_ci 295062306a36Sopenharmony_cimac_start_failed: 295162306a36Sopenharmony_ci for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) 295262306a36Sopenharmony_ci fman_port_disable(mac_dev->port[i]); 295362306a36Sopenharmony_ci phylink_disconnect_phy(mac_dev->phylink); 295462306a36Sopenharmony_ci 295562306a36Sopenharmony_ciphy_init_failed: 295662306a36Sopenharmony_ci dpaa_eth_napi_disable(priv); 295762306a36Sopenharmony_ci 295862306a36Sopenharmony_ci return err; 295962306a36Sopenharmony_ci} 296062306a36Sopenharmony_ci 296162306a36Sopenharmony_cistatic int dpaa_eth_stop(struct net_device *net_dev) 296262306a36Sopenharmony_ci{ 296362306a36Sopenharmony_ci struct dpaa_priv *priv; 296462306a36Sopenharmony_ci int err; 296562306a36Sopenharmony_ci 296662306a36Sopenharmony_ci err = dpaa_stop(net_dev); 296762306a36Sopenharmony_ci 296862306a36Sopenharmony_ci priv = netdev_priv(net_dev); 296962306a36Sopenharmony_ci dpaa_eth_napi_disable(priv); 297062306a36Sopenharmony_ci 297162306a36Sopenharmony_ci return err; 297262306a36Sopenharmony_ci} 297362306a36Sopenharmony_ci 297462306a36Sopenharmony_cistatic bool xdp_validate_mtu(struct dpaa_priv *priv, int mtu) 297562306a36Sopenharmony_ci{ 297662306a36Sopenharmony_ci int max_contig_data = priv->dpaa_bp->size - priv->rx_headroom; 297762306a36Sopenharmony_ci 297862306a36Sopenharmony_ci /* We do not support S/G fragments when XDP is enabled. 297962306a36Sopenharmony_ci * Limit the MTU in relation to the buffer size. 298062306a36Sopenharmony_ci */ 298162306a36Sopenharmony_ci if (mtu + VLAN_ETH_HLEN + ETH_FCS_LEN > max_contig_data) { 298262306a36Sopenharmony_ci dev_warn(priv->net_dev->dev.parent, 298362306a36Sopenharmony_ci "The maximum MTU for XDP is %d\n", 298462306a36Sopenharmony_ci max_contig_data - VLAN_ETH_HLEN - ETH_FCS_LEN); 298562306a36Sopenharmony_ci return false; 298662306a36Sopenharmony_ci } 298762306a36Sopenharmony_ci 298862306a36Sopenharmony_ci return true; 298962306a36Sopenharmony_ci} 299062306a36Sopenharmony_ci 299162306a36Sopenharmony_cistatic int dpaa_change_mtu(struct net_device *net_dev, int new_mtu) 299262306a36Sopenharmony_ci{ 299362306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(net_dev); 299462306a36Sopenharmony_ci 299562306a36Sopenharmony_ci if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu)) 299662306a36Sopenharmony_ci return -EINVAL; 299762306a36Sopenharmony_ci 299862306a36Sopenharmony_ci net_dev->mtu = new_mtu; 299962306a36Sopenharmony_ci return 0; 300062306a36Sopenharmony_ci} 300162306a36Sopenharmony_ci 300262306a36Sopenharmony_cistatic int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf) 300362306a36Sopenharmony_ci{ 300462306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(net_dev); 300562306a36Sopenharmony_ci struct bpf_prog *old_prog; 300662306a36Sopenharmony_ci int err; 300762306a36Sopenharmony_ci bool up; 300862306a36Sopenharmony_ci 300962306a36Sopenharmony_ci /* S/G fragments are not supported in XDP-mode */ 301062306a36Sopenharmony_ci if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) { 301162306a36Sopenharmony_ci NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); 301262306a36Sopenharmony_ci return -EINVAL; 301362306a36Sopenharmony_ci } 301462306a36Sopenharmony_ci 301562306a36Sopenharmony_ci up = netif_running(net_dev); 301662306a36Sopenharmony_ci 301762306a36Sopenharmony_ci if (up) 301862306a36Sopenharmony_ci dpaa_eth_stop(net_dev); 301962306a36Sopenharmony_ci 302062306a36Sopenharmony_ci old_prog = xchg(&priv->xdp_prog, bpf->prog); 302162306a36Sopenharmony_ci if (old_prog) 302262306a36Sopenharmony_ci bpf_prog_put(old_prog); 302362306a36Sopenharmony_ci 302462306a36Sopenharmony_ci if (up) { 302562306a36Sopenharmony_ci err = dpaa_open(net_dev); 302662306a36Sopenharmony_ci if (err) { 302762306a36Sopenharmony_ci NL_SET_ERR_MSG_MOD(bpf->extack, "dpaa_open() failed"); 302862306a36Sopenharmony_ci return err; 302962306a36Sopenharmony_ci } 303062306a36Sopenharmony_ci } 303162306a36Sopenharmony_ci 303262306a36Sopenharmony_ci return 0; 303362306a36Sopenharmony_ci} 303462306a36Sopenharmony_ci 303562306a36Sopenharmony_cistatic int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp) 303662306a36Sopenharmony_ci{ 303762306a36Sopenharmony_ci switch (xdp->command) { 303862306a36Sopenharmony_ci case XDP_SETUP_PROG: 303962306a36Sopenharmony_ci return dpaa_setup_xdp(net_dev, xdp); 304062306a36Sopenharmony_ci default: 304162306a36Sopenharmony_ci return -EINVAL; 304262306a36Sopenharmony_ci } 304362306a36Sopenharmony_ci} 304462306a36Sopenharmony_ci 304562306a36Sopenharmony_cistatic int dpaa_xdp_xmit(struct net_device *net_dev, int n, 304662306a36Sopenharmony_ci struct xdp_frame **frames, u32 flags) 304762306a36Sopenharmony_ci{ 304862306a36Sopenharmony_ci struct xdp_frame *xdpf; 304962306a36Sopenharmony_ci int i, nxmit = 0; 305062306a36Sopenharmony_ci 305162306a36Sopenharmony_ci if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 305262306a36Sopenharmony_ci return -EINVAL; 305362306a36Sopenharmony_ci 305462306a36Sopenharmony_ci if (!netif_running(net_dev)) 305562306a36Sopenharmony_ci return -ENETDOWN; 305662306a36Sopenharmony_ci 305762306a36Sopenharmony_ci for (i = 0; i < n; i++) { 305862306a36Sopenharmony_ci xdpf = frames[i]; 305962306a36Sopenharmony_ci if (dpaa_xdp_xmit_frame(net_dev, xdpf)) 306062306a36Sopenharmony_ci break; 306162306a36Sopenharmony_ci nxmit++; 306262306a36Sopenharmony_ci } 306362306a36Sopenharmony_ci 306462306a36Sopenharmony_ci return nxmit; 306562306a36Sopenharmony_ci} 306662306a36Sopenharmony_ci 306762306a36Sopenharmony_cistatic int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 306862306a36Sopenharmony_ci{ 306962306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(dev); 307062306a36Sopenharmony_ci struct hwtstamp_config config; 307162306a36Sopenharmony_ci 307262306a36Sopenharmony_ci if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 307362306a36Sopenharmony_ci return -EFAULT; 307462306a36Sopenharmony_ci 307562306a36Sopenharmony_ci switch (config.tx_type) { 307662306a36Sopenharmony_ci case HWTSTAMP_TX_OFF: 307762306a36Sopenharmony_ci /* Couldn't disable rx/tx timestamping separately. 307862306a36Sopenharmony_ci * Do nothing here. 307962306a36Sopenharmony_ci */ 308062306a36Sopenharmony_ci priv->tx_tstamp = false; 308162306a36Sopenharmony_ci break; 308262306a36Sopenharmony_ci case HWTSTAMP_TX_ON: 308362306a36Sopenharmony_ci priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); 308462306a36Sopenharmony_ci priv->tx_tstamp = true; 308562306a36Sopenharmony_ci break; 308662306a36Sopenharmony_ci default: 308762306a36Sopenharmony_ci return -ERANGE; 308862306a36Sopenharmony_ci } 308962306a36Sopenharmony_ci 309062306a36Sopenharmony_ci if (config.rx_filter == HWTSTAMP_FILTER_NONE) { 309162306a36Sopenharmony_ci /* Couldn't disable rx/tx timestamping separately. 309262306a36Sopenharmony_ci * Do nothing here. 309362306a36Sopenharmony_ci */ 309462306a36Sopenharmony_ci priv->rx_tstamp = false; 309562306a36Sopenharmony_ci } else { 309662306a36Sopenharmony_ci priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); 309762306a36Sopenharmony_ci priv->rx_tstamp = true; 309862306a36Sopenharmony_ci /* TS is set for all frame types, not only those requested */ 309962306a36Sopenharmony_ci config.rx_filter = HWTSTAMP_FILTER_ALL; 310062306a36Sopenharmony_ci } 310162306a36Sopenharmony_ci 310262306a36Sopenharmony_ci return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? 310362306a36Sopenharmony_ci -EFAULT : 0; 310462306a36Sopenharmony_ci} 310562306a36Sopenharmony_ci 310662306a36Sopenharmony_cistatic int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) 310762306a36Sopenharmony_ci{ 310862306a36Sopenharmony_ci int ret = -EINVAL; 310962306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(net_dev); 311062306a36Sopenharmony_ci 311162306a36Sopenharmony_ci if (cmd == SIOCGMIIREG) { 311262306a36Sopenharmony_ci if (net_dev->phydev) 311362306a36Sopenharmony_ci return phylink_mii_ioctl(priv->mac_dev->phylink, rq, 311462306a36Sopenharmony_ci cmd); 311562306a36Sopenharmony_ci } 311662306a36Sopenharmony_ci 311762306a36Sopenharmony_ci if (cmd == SIOCSHWTSTAMP) 311862306a36Sopenharmony_ci return dpaa_ts_ioctl(net_dev, rq, cmd); 311962306a36Sopenharmony_ci 312062306a36Sopenharmony_ci return ret; 312162306a36Sopenharmony_ci} 312262306a36Sopenharmony_ci 312362306a36Sopenharmony_cistatic const struct net_device_ops dpaa_ops = { 312462306a36Sopenharmony_ci .ndo_open = dpaa_open, 312562306a36Sopenharmony_ci .ndo_start_xmit = dpaa_start_xmit, 312662306a36Sopenharmony_ci .ndo_stop = dpaa_eth_stop, 312762306a36Sopenharmony_ci .ndo_tx_timeout = dpaa_tx_timeout, 312862306a36Sopenharmony_ci .ndo_get_stats64 = dpaa_get_stats64, 312962306a36Sopenharmony_ci .ndo_change_carrier = fixed_phy_change_carrier, 313062306a36Sopenharmony_ci .ndo_set_mac_address = dpaa_set_mac_address, 313162306a36Sopenharmony_ci .ndo_validate_addr = eth_validate_addr, 313262306a36Sopenharmony_ci .ndo_set_rx_mode = dpaa_set_rx_mode, 313362306a36Sopenharmony_ci .ndo_eth_ioctl = dpaa_ioctl, 313462306a36Sopenharmony_ci .ndo_setup_tc = dpaa_setup_tc, 313562306a36Sopenharmony_ci .ndo_change_mtu = dpaa_change_mtu, 313662306a36Sopenharmony_ci .ndo_bpf = dpaa_xdp, 313762306a36Sopenharmony_ci .ndo_xdp_xmit = dpaa_xdp_xmit, 313862306a36Sopenharmony_ci}; 313962306a36Sopenharmony_ci 314062306a36Sopenharmony_cistatic int dpaa_napi_add(struct net_device *net_dev) 314162306a36Sopenharmony_ci{ 314262306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(net_dev); 314362306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 314462306a36Sopenharmony_ci int cpu; 314562306a36Sopenharmony_ci 314662306a36Sopenharmony_ci for_each_possible_cpu(cpu) { 314762306a36Sopenharmony_ci percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); 314862306a36Sopenharmony_ci 314962306a36Sopenharmony_ci netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll); 315062306a36Sopenharmony_ci } 315162306a36Sopenharmony_ci 315262306a36Sopenharmony_ci return 0; 315362306a36Sopenharmony_ci} 315462306a36Sopenharmony_ci 315562306a36Sopenharmony_cistatic void dpaa_napi_del(struct net_device *net_dev) 315662306a36Sopenharmony_ci{ 315762306a36Sopenharmony_ci struct dpaa_priv *priv = netdev_priv(net_dev); 315862306a36Sopenharmony_ci struct dpaa_percpu_priv *percpu_priv; 315962306a36Sopenharmony_ci int cpu; 316062306a36Sopenharmony_ci 316162306a36Sopenharmony_ci for_each_possible_cpu(cpu) { 316262306a36Sopenharmony_ci percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); 316362306a36Sopenharmony_ci 316462306a36Sopenharmony_ci netif_napi_del(&percpu_priv->np.napi); 316562306a36Sopenharmony_ci } 316662306a36Sopenharmony_ci} 316762306a36Sopenharmony_ci 316862306a36Sopenharmony_cistatic inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, 316962306a36Sopenharmony_ci struct bm_buffer *bmb) 317062306a36Sopenharmony_ci{ 317162306a36Sopenharmony_ci dma_addr_t addr = bm_buf_addr(bmb); 317262306a36Sopenharmony_ci 317362306a36Sopenharmony_ci dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, 317462306a36Sopenharmony_ci DMA_FROM_DEVICE); 317562306a36Sopenharmony_ci 317662306a36Sopenharmony_ci skb_free_frag(phys_to_virt(addr)); 317762306a36Sopenharmony_ci} 317862306a36Sopenharmony_ci 317962306a36Sopenharmony_ci/* Alloc the dpaa_bp struct and configure default values */ 318062306a36Sopenharmony_cistatic struct dpaa_bp *dpaa_bp_alloc(struct device *dev) 318162306a36Sopenharmony_ci{ 318262306a36Sopenharmony_ci struct dpaa_bp *dpaa_bp; 318362306a36Sopenharmony_ci 318462306a36Sopenharmony_ci dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL); 318562306a36Sopenharmony_ci if (!dpaa_bp) 318662306a36Sopenharmony_ci return ERR_PTR(-ENOMEM); 318762306a36Sopenharmony_ci 318862306a36Sopenharmony_ci dpaa_bp->bpid = FSL_DPAA_BPID_INV; 318962306a36Sopenharmony_ci dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); 319062306a36Sopenharmony_ci if (!dpaa_bp->percpu_count) 319162306a36Sopenharmony_ci return ERR_PTR(-ENOMEM); 319262306a36Sopenharmony_ci 319362306a36Sopenharmony_ci dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; 319462306a36Sopenharmony_ci 319562306a36Sopenharmony_ci dpaa_bp->seed_cb = dpaa_bp_seed; 319662306a36Sopenharmony_ci dpaa_bp->free_buf_cb = dpaa_bp_free_pf; 319762306a36Sopenharmony_ci 319862306a36Sopenharmony_ci return dpaa_bp; 319962306a36Sopenharmony_ci} 320062306a36Sopenharmony_ci 320162306a36Sopenharmony_ci/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR. 320262306a36Sopenharmony_ci * We won't be sending congestion notifications to FMan; for now, we just use 320362306a36Sopenharmony_ci * this CGR to generate enqueue rejections to FMan in order to drop the frames 320462306a36Sopenharmony_ci * before they reach our ingress queues and eat up memory. 320562306a36Sopenharmony_ci */ 320662306a36Sopenharmony_cistatic int dpaa_ingress_cgr_init(struct dpaa_priv *priv) 320762306a36Sopenharmony_ci{ 320862306a36Sopenharmony_ci struct qm_mcc_initcgr initcgr; 320962306a36Sopenharmony_ci u32 cs_th; 321062306a36Sopenharmony_ci int err; 321162306a36Sopenharmony_ci 321262306a36Sopenharmony_ci err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); 321362306a36Sopenharmony_ci if (err < 0) { 321462306a36Sopenharmony_ci if (netif_msg_drv(priv)) 321562306a36Sopenharmony_ci pr_err("Error %d allocating CGR ID\n", err); 321662306a36Sopenharmony_ci goto out_error; 321762306a36Sopenharmony_ci } 321862306a36Sopenharmony_ci 321962306a36Sopenharmony_ci /* Enable CS TD, but disable Congestion State Change Notifications. */ 322062306a36Sopenharmony_ci memset(&initcgr, 0, sizeof(initcgr)); 322162306a36Sopenharmony_ci initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES); 322262306a36Sopenharmony_ci initcgr.cgr.cscn_en = QM_CGR_EN; 322362306a36Sopenharmony_ci cs_th = DPAA_INGRESS_CS_THRESHOLD; 322462306a36Sopenharmony_ci qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); 322562306a36Sopenharmony_ci 322662306a36Sopenharmony_ci initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); 322762306a36Sopenharmony_ci initcgr.cgr.cstd_en = QM_CGR_EN; 322862306a36Sopenharmony_ci 322962306a36Sopenharmony_ci /* This CGR will be associated with the SWP affined to the current CPU. 323062306a36Sopenharmony_ci * However, we'll place all our ingress FQs in it. 323162306a36Sopenharmony_ci */ 323262306a36Sopenharmony_ci err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, 323362306a36Sopenharmony_ci &initcgr); 323462306a36Sopenharmony_ci if (err < 0) { 323562306a36Sopenharmony_ci if (netif_msg_drv(priv)) 323662306a36Sopenharmony_ci pr_err("Error %d creating ingress CGR with ID %d\n", 323762306a36Sopenharmony_ci err, priv->ingress_cgr.cgrid); 323862306a36Sopenharmony_ci qman_release_cgrid(priv->ingress_cgr.cgrid); 323962306a36Sopenharmony_ci goto out_error; 324062306a36Sopenharmony_ci } 324162306a36Sopenharmony_ci if (netif_msg_drv(priv)) 324262306a36Sopenharmony_ci pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", 324362306a36Sopenharmony_ci priv->ingress_cgr.cgrid, priv->mac_dev->addr); 324462306a36Sopenharmony_ci 324562306a36Sopenharmony_ci priv->use_ingress_cgr = true; 324662306a36Sopenharmony_ci 324762306a36Sopenharmony_ciout_error: 324862306a36Sopenharmony_ci return err; 324962306a36Sopenharmony_ci} 325062306a36Sopenharmony_ci 325162306a36Sopenharmony_cistatic u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl, 325262306a36Sopenharmony_ci enum port_type port) 325362306a36Sopenharmony_ci{ 325462306a36Sopenharmony_ci u16 headroom; 325562306a36Sopenharmony_ci 325662306a36Sopenharmony_ci /* The frame headroom must accommodate: 325762306a36Sopenharmony_ci * - the driver private data area 325862306a36Sopenharmony_ci * - parse results, hash results, timestamp if selected 325962306a36Sopenharmony_ci * If either hash results or time stamp are selected, both will 326062306a36Sopenharmony_ci * be copied to/from the frame headroom, as TS is located between PR and 326162306a36Sopenharmony_ci * HR in the IC and IC copy size has a granularity of 16bytes 326262306a36Sopenharmony_ci * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) 326362306a36Sopenharmony_ci * 326462306a36Sopenharmony_ci * Also make sure the headroom is a multiple of data_align bytes 326562306a36Sopenharmony_ci */ 326662306a36Sopenharmony_ci headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE); 326762306a36Sopenharmony_ci 326862306a36Sopenharmony_ci if (port == RX) { 326962306a36Sopenharmony_ci#ifdef CONFIG_DPAA_ERRATUM_A050385 327062306a36Sopenharmony_ci if (unlikely(fman_has_errata_a050385())) 327162306a36Sopenharmony_ci headroom = XDP_PACKET_HEADROOM; 327262306a36Sopenharmony_ci#endif 327362306a36Sopenharmony_ci 327462306a36Sopenharmony_ci return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT); 327562306a36Sopenharmony_ci } else { 327662306a36Sopenharmony_ci return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT); 327762306a36Sopenharmony_ci } 327862306a36Sopenharmony_ci} 327962306a36Sopenharmony_ci 328062306a36Sopenharmony_cistatic int dpaa_eth_probe(struct platform_device *pdev) 328162306a36Sopenharmony_ci{ 328262306a36Sopenharmony_ci struct net_device *net_dev = NULL; 328362306a36Sopenharmony_ci struct dpaa_bp *dpaa_bp = NULL; 328462306a36Sopenharmony_ci struct dpaa_fq *dpaa_fq, *tmp; 328562306a36Sopenharmony_ci struct dpaa_priv *priv = NULL; 328662306a36Sopenharmony_ci struct fm_port_fqs port_fqs; 328762306a36Sopenharmony_ci struct mac_device *mac_dev; 328862306a36Sopenharmony_ci int err = 0, channel; 328962306a36Sopenharmony_ci struct device *dev; 329062306a36Sopenharmony_ci 329162306a36Sopenharmony_ci dev = &pdev->dev; 329262306a36Sopenharmony_ci 329362306a36Sopenharmony_ci err = bman_is_probed(); 329462306a36Sopenharmony_ci if (!err) 329562306a36Sopenharmony_ci return -EPROBE_DEFER; 329662306a36Sopenharmony_ci if (err < 0) { 329762306a36Sopenharmony_ci dev_err(dev, "failing probe due to bman probe error\n"); 329862306a36Sopenharmony_ci return -ENODEV; 329962306a36Sopenharmony_ci } 330062306a36Sopenharmony_ci err = qman_is_probed(); 330162306a36Sopenharmony_ci if (!err) 330262306a36Sopenharmony_ci return -EPROBE_DEFER; 330362306a36Sopenharmony_ci if (err < 0) { 330462306a36Sopenharmony_ci dev_err(dev, "failing probe due to qman probe error\n"); 330562306a36Sopenharmony_ci return -ENODEV; 330662306a36Sopenharmony_ci } 330762306a36Sopenharmony_ci err = bman_portals_probed(); 330862306a36Sopenharmony_ci if (!err) 330962306a36Sopenharmony_ci return -EPROBE_DEFER; 331062306a36Sopenharmony_ci if (err < 0) { 331162306a36Sopenharmony_ci dev_err(dev, 331262306a36Sopenharmony_ci "failing probe due to bman portals probe error\n"); 331362306a36Sopenharmony_ci return -ENODEV; 331462306a36Sopenharmony_ci } 331562306a36Sopenharmony_ci err = qman_portals_probed(); 331662306a36Sopenharmony_ci if (!err) 331762306a36Sopenharmony_ci return -EPROBE_DEFER; 331862306a36Sopenharmony_ci if (err < 0) { 331962306a36Sopenharmony_ci dev_err(dev, 332062306a36Sopenharmony_ci "failing probe due to qman portals probe error\n"); 332162306a36Sopenharmony_ci return -ENODEV; 332262306a36Sopenharmony_ci } 332362306a36Sopenharmony_ci 332462306a36Sopenharmony_ci /* Allocate this early, so we can store relevant information in 332562306a36Sopenharmony_ci * the private area 332662306a36Sopenharmony_ci */ 332762306a36Sopenharmony_ci net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); 332862306a36Sopenharmony_ci if (!net_dev) { 332962306a36Sopenharmony_ci dev_err(dev, "alloc_etherdev_mq() failed\n"); 333062306a36Sopenharmony_ci return -ENOMEM; 333162306a36Sopenharmony_ci } 333262306a36Sopenharmony_ci 333362306a36Sopenharmony_ci /* Do this here, so we can be verbose early */ 333462306a36Sopenharmony_ci SET_NETDEV_DEV(net_dev, dev->parent); 333562306a36Sopenharmony_ci dev_set_drvdata(dev, net_dev); 333662306a36Sopenharmony_ci 333762306a36Sopenharmony_ci priv = netdev_priv(net_dev); 333862306a36Sopenharmony_ci priv->net_dev = net_dev; 333962306a36Sopenharmony_ci 334062306a36Sopenharmony_ci priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); 334162306a36Sopenharmony_ci 334262306a36Sopenharmony_ci mac_dev = dpaa_mac_dev_get(pdev); 334362306a36Sopenharmony_ci if (IS_ERR(mac_dev)) { 334462306a36Sopenharmony_ci netdev_err(net_dev, "dpaa_mac_dev_get() failed\n"); 334562306a36Sopenharmony_ci err = PTR_ERR(mac_dev); 334662306a36Sopenharmony_ci goto free_netdev; 334762306a36Sopenharmony_ci } 334862306a36Sopenharmony_ci 334962306a36Sopenharmony_ci /* Devices used for DMA mapping */ 335062306a36Sopenharmony_ci priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]); 335162306a36Sopenharmony_ci priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]); 335262306a36Sopenharmony_ci err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40)); 335362306a36Sopenharmony_ci if (!err) 335462306a36Sopenharmony_ci err = dma_coerce_mask_and_coherent(priv->tx_dma_dev, 335562306a36Sopenharmony_ci DMA_BIT_MASK(40)); 335662306a36Sopenharmony_ci if (err) { 335762306a36Sopenharmony_ci netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n"); 335862306a36Sopenharmony_ci goto free_netdev; 335962306a36Sopenharmony_ci } 336062306a36Sopenharmony_ci 336162306a36Sopenharmony_ci /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, 336262306a36Sopenharmony_ci * we choose conservatively and let the user explicitly set a higher 336362306a36Sopenharmony_ci * MTU via ifconfig. Otherwise, the user may end up with different MTUs 336462306a36Sopenharmony_ci * in the same LAN. 336562306a36Sopenharmony_ci * If on the other hand fsl_fm_max_frm has been chosen below 1500, 336662306a36Sopenharmony_ci * start with the maximum allowed. 336762306a36Sopenharmony_ci */ 336862306a36Sopenharmony_ci net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); 336962306a36Sopenharmony_ci 337062306a36Sopenharmony_ci netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", 337162306a36Sopenharmony_ci net_dev->mtu); 337262306a36Sopenharmony_ci 337362306a36Sopenharmony_ci priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ 337462306a36Sopenharmony_ci priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ 337562306a36Sopenharmony_ci 337662306a36Sopenharmony_ci /* bp init */ 337762306a36Sopenharmony_ci dpaa_bp = dpaa_bp_alloc(dev); 337862306a36Sopenharmony_ci if (IS_ERR(dpaa_bp)) { 337962306a36Sopenharmony_ci err = PTR_ERR(dpaa_bp); 338062306a36Sopenharmony_ci goto free_dpaa_bps; 338162306a36Sopenharmony_ci } 338262306a36Sopenharmony_ci /* the raw size of the buffers used for reception */ 338362306a36Sopenharmony_ci dpaa_bp->raw_size = DPAA_BP_RAW_SIZE; 338462306a36Sopenharmony_ci /* avoid runtime computations by keeping the usable size here */ 338562306a36Sopenharmony_ci dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size); 338662306a36Sopenharmony_ci dpaa_bp->priv = priv; 338762306a36Sopenharmony_ci 338862306a36Sopenharmony_ci err = dpaa_bp_alloc_pool(dpaa_bp); 338962306a36Sopenharmony_ci if (err < 0) 339062306a36Sopenharmony_ci goto free_dpaa_bps; 339162306a36Sopenharmony_ci priv->dpaa_bp = dpaa_bp; 339262306a36Sopenharmony_ci 339362306a36Sopenharmony_ci INIT_LIST_HEAD(&priv->dpaa_fq_list); 339462306a36Sopenharmony_ci 339562306a36Sopenharmony_ci memset(&port_fqs, 0, sizeof(port_fqs)); 339662306a36Sopenharmony_ci 339762306a36Sopenharmony_ci err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); 339862306a36Sopenharmony_ci if (err < 0) { 339962306a36Sopenharmony_ci dev_err(dev, "dpaa_alloc_all_fqs() failed\n"); 340062306a36Sopenharmony_ci goto free_dpaa_bps; 340162306a36Sopenharmony_ci } 340262306a36Sopenharmony_ci 340362306a36Sopenharmony_ci priv->mac_dev = mac_dev; 340462306a36Sopenharmony_ci 340562306a36Sopenharmony_ci channel = dpaa_get_channel(); 340662306a36Sopenharmony_ci if (channel < 0) { 340762306a36Sopenharmony_ci dev_err(dev, "dpaa_get_channel() failed\n"); 340862306a36Sopenharmony_ci err = channel; 340962306a36Sopenharmony_ci goto free_dpaa_bps; 341062306a36Sopenharmony_ci } 341162306a36Sopenharmony_ci 341262306a36Sopenharmony_ci priv->channel = (u16)channel; 341362306a36Sopenharmony_ci 341462306a36Sopenharmony_ci /* Walk the CPUs with affine portals 341562306a36Sopenharmony_ci * and add this pool channel to each's dequeue mask. 341662306a36Sopenharmony_ci */ 341762306a36Sopenharmony_ci dpaa_eth_add_channel(priv->channel, &pdev->dev); 341862306a36Sopenharmony_ci 341962306a36Sopenharmony_ci dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); 342062306a36Sopenharmony_ci 342162306a36Sopenharmony_ci /* Create a congestion group for this netdev, with 342262306a36Sopenharmony_ci * dynamically-allocated CGR ID. 342362306a36Sopenharmony_ci * Must be executed after probing the MAC, but before 342462306a36Sopenharmony_ci * assigning the egress FQs to the CGRs. 342562306a36Sopenharmony_ci */ 342662306a36Sopenharmony_ci err = dpaa_eth_cgr_init(priv); 342762306a36Sopenharmony_ci if (err < 0) { 342862306a36Sopenharmony_ci dev_err(dev, "Error initializing CGR\n"); 342962306a36Sopenharmony_ci goto free_dpaa_bps; 343062306a36Sopenharmony_ci } 343162306a36Sopenharmony_ci 343262306a36Sopenharmony_ci err = dpaa_ingress_cgr_init(priv); 343362306a36Sopenharmony_ci if (err < 0) { 343462306a36Sopenharmony_ci dev_err(dev, "Error initializing ingress CGR\n"); 343562306a36Sopenharmony_ci goto delete_egress_cgr; 343662306a36Sopenharmony_ci } 343762306a36Sopenharmony_ci 343862306a36Sopenharmony_ci /* Add the FQs to the interface, and make them active */ 343962306a36Sopenharmony_ci list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { 344062306a36Sopenharmony_ci err = dpaa_fq_init(dpaa_fq, false); 344162306a36Sopenharmony_ci if (err < 0) 344262306a36Sopenharmony_ci goto free_dpaa_fqs; 344362306a36Sopenharmony_ci } 344462306a36Sopenharmony_ci 344562306a36Sopenharmony_ci priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX); 344662306a36Sopenharmony_ci priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX); 344762306a36Sopenharmony_ci 344862306a36Sopenharmony_ci /* All real interfaces need their ports initialized */ 344962306a36Sopenharmony_ci err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs, 345062306a36Sopenharmony_ci &priv->buf_layout[0], dev); 345162306a36Sopenharmony_ci if (err) 345262306a36Sopenharmony_ci goto free_dpaa_fqs; 345362306a36Sopenharmony_ci 345462306a36Sopenharmony_ci /* Rx traffic distribution based on keygen hashing defaults to on */ 345562306a36Sopenharmony_ci priv->keygen_in_use = true; 345662306a36Sopenharmony_ci 345762306a36Sopenharmony_ci priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); 345862306a36Sopenharmony_ci if (!priv->percpu_priv) { 345962306a36Sopenharmony_ci dev_err(dev, "devm_alloc_percpu() failed\n"); 346062306a36Sopenharmony_ci err = -ENOMEM; 346162306a36Sopenharmony_ci goto free_dpaa_fqs; 346262306a36Sopenharmony_ci } 346362306a36Sopenharmony_ci 346462306a36Sopenharmony_ci priv->num_tc = 1; 346562306a36Sopenharmony_ci netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); 346662306a36Sopenharmony_ci 346762306a36Sopenharmony_ci /* Initialize NAPI */ 346862306a36Sopenharmony_ci err = dpaa_napi_add(net_dev); 346962306a36Sopenharmony_ci if (err < 0) 347062306a36Sopenharmony_ci goto delete_dpaa_napi; 347162306a36Sopenharmony_ci 347262306a36Sopenharmony_ci err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout); 347362306a36Sopenharmony_ci if (err < 0) 347462306a36Sopenharmony_ci goto delete_dpaa_napi; 347562306a36Sopenharmony_ci 347662306a36Sopenharmony_ci dpaa_eth_sysfs_init(&net_dev->dev); 347762306a36Sopenharmony_ci 347862306a36Sopenharmony_ci netif_info(priv, probe, net_dev, "Probed interface %s\n", 347962306a36Sopenharmony_ci net_dev->name); 348062306a36Sopenharmony_ci 348162306a36Sopenharmony_ci return 0; 348262306a36Sopenharmony_ci 348362306a36Sopenharmony_cidelete_dpaa_napi: 348462306a36Sopenharmony_ci dpaa_napi_del(net_dev); 348562306a36Sopenharmony_cifree_dpaa_fqs: 348662306a36Sopenharmony_ci dpaa_fq_free(dev, &priv->dpaa_fq_list); 348762306a36Sopenharmony_ci qman_delete_cgr_safe(&priv->ingress_cgr); 348862306a36Sopenharmony_ci qman_release_cgrid(priv->ingress_cgr.cgrid); 348962306a36Sopenharmony_cidelete_egress_cgr: 349062306a36Sopenharmony_ci qman_delete_cgr_safe(&priv->cgr_data.cgr); 349162306a36Sopenharmony_ci qman_release_cgrid(priv->cgr_data.cgr.cgrid); 349262306a36Sopenharmony_cifree_dpaa_bps: 349362306a36Sopenharmony_ci dpaa_bps_free(priv); 349462306a36Sopenharmony_cifree_netdev: 349562306a36Sopenharmony_ci dev_set_drvdata(dev, NULL); 349662306a36Sopenharmony_ci free_netdev(net_dev); 349762306a36Sopenharmony_ci 349862306a36Sopenharmony_ci return err; 349962306a36Sopenharmony_ci} 350062306a36Sopenharmony_ci 350162306a36Sopenharmony_cistatic void dpaa_remove(struct platform_device *pdev) 350262306a36Sopenharmony_ci{ 350362306a36Sopenharmony_ci struct net_device *net_dev; 350462306a36Sopenharmony_ci struct dpaa_priv *priv; 350562306a36Sopenharmony_ci struct device *dev; 350662306a36Sopenharmony_ci int err; 350762306a36Sopenharmony_ci 350862306a36Sopenharmony_ci dev = &pdev->dev; 350962306a36Sopenharmony_ci net_dev = dev_get_drvdata(dev); 351062306a36Sopenharmony_ci 351162306a36Sopenharmony_ci priv = netdev_priv(net_dev); 351262306a36Sopenharmony_ci 351362306a36Sopenharmony_ci dpaa_eth_sysfs_remove(dev); 351462306a36Sopenharmony_ci 351562306a36Sopenharmony_ci dev_set_drvdata(dev, NULL); 351662306a36Sopenharmony_ci unregister_netdev(net_dev); 351762306a36Sopenharmony_ci phylink_destroy(priv->mac_dev->phylink); 351862306a36Sopenharmony_ci 351962306a36Sopenharmony_ci err = dpaa_fq_free(dev, &priv->dpaa_fq_list); 352062306a36Sopenharmony_ci if (err) 352162306a36Sopenharmony_ci dev_err(dev, "Failed to free FQs on remove (%pE)\n", 352262306a36Sopenharmony_ci ERR_PTR(err)); 352362306a36Sopenharmony_ci 352462306a36Sopenharmony_ci qman_delete_cgr_safe(&priv->ingress_cgr); 352562306a36Sopenharmony_ci qman_release_cgrid(priv->ingress_cgr.cgrid); 352662306a36Sopenharmony_ci qman_delete_cgr_safe(&priv->cgr_data.cgr); 352762306a36Sopenharmony_ci qman_release_cgrid(priv->cgr_data.cgr.cgrid); 352862306a36Sopenharmony_ci 352962306a36Sopenharmony_ci dpaa_napi_del(net_dev); 353062306a36Sopenharmony_ci 353162306a36Sopenharmony_ci dpaa_bps_free(priv); 353262306a36Sopenharmony_ci 353362306a36Sopenharmony_ci free_netdev(net_dev); 353462306a36Sopenharmony_ci} 353562306a36Sopenharmony_ci 353662306a36Sopenharmony_cistatic const struct platform_device_id dpaa_devtype[] = { 353762306a36Sopenharmony_ci { 353862306a36Sopenharmony_ci .name = "dpaa-ethernet", 353962306a36Sopenharmony_ci .driver_data = 0, 354062306a36Sopenharmony_ci }, { 354162306a36Sopenharmony_ci } 354262306a36Sopenharmony_ci}; 354362306a36Sopenharmony_ciMODULE_DEVICE_TABLE(platform, dpaa_devtype); 354462306a36Sopenharmony_ci 354562306a36Sopenharmony_cistatic struct platform_driver dpaa_driver = { 354662306a36Sopenharmony_ci .driver = { 354762306a36Sopenharmony_ci .name = KBUILD_MODNAME, 354862306a36Sopenharmony_ci }, 354962306a36Sopenharmony_ci .id_table = dpaa_devtype, 355062306a36Sopenharmony_ci .probe = dpaa_eth_probe, 355162306a36Sopenharmony_ci .remove_new = dpaa_remove 355262306a36Sopenharmony_ci}; 355362306a36Sopenharmony_ci 355462306a36Sopenharmony_cistatic int __init dpaa_load(void) 355562306a36Sopenharmony_ci{ 355662306a36Sopenharmony_ci int err; 355762306a36Sopenharmony_ci 355862306a36Sopenharmony_ci pr_debug("FSL DPAA Ethernet driver\n"); 355962306a36Sopenharmony_ci 356062306a36Sopenharmony_ci /* initialize dpaa_eth mirror values */ 356162306a36Sopenharmony_ci dpaa_rx_extra_headroom = fman_get_rx_extra_headroom(); 356262306a36Sopenharmony_ci dpaa_max_frm = fman_get_max_frm(); 356362306a36Sopenharmony_ci 356462306a36Sopenharmony_ci err = platform_driver_register(&dpaa_driver); 356562306a36Sopenharmony_ci if (err < 0) 356662306a36Sopenharmony_ci pr_err("Error, platform_driver_register() = %d\n", err); 356762306a36Sopenharmony_ci 356862306a36Sopenharmony_ci return err; 356962306a36Sopenharmony_ci} 357062306a36Sopenharmony_cimodule_init(dpaa_load); 357162306a36Sopenharmony_ci 357262306a36Sopenharmony_cistatic void __exit dpaa_unload(void) 357362306a36Sopenharmony_ci{ 357462306a36Sopenharmony_ci platform_driver_unregister(&dpaa_driver); 357562306a36Sopenharmony_ci 357662306a36Sopenharmony_ci /* Only one channel is used and needs to be released after all 357762306a36Sopenharmony_ci * interfaces are removed 357862306a36Sopenharmony_ci */ 357962306a36Sopenharmony_ci dpaa_release_channel(); 358062306a36Sopenharmony_ci} 358162306a36Sopenharmony_cimodule_exit(dpaa_unload); 358262306a36Sopenharmony_ci 358362306a36Sopenharmony_ciMODULE_LICENSE("Dual BSD/GPL"); 358462306a36Sopenharmony_ciMODULE_DESCRIPTION("FSL DPAA Ethernet driver"); 3585