Lines Matching defs:dev

78 	struct device *dev = priv->net_dev->dev.parent;
88 dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
93 dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
100 dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
143 struct device *dev = priv->net_dev->dev.parent;
164 dma_unmap_page(dev, addr, priv->rx_buf_size,
203 struct device *dev = priv->net_dev->dev.parent;
222 dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
284 struct device *dev = priv->net_dev->dev.parent;
294 dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
468 dma_unmap_page(priv->net_dev->dev.parent, addr,
478 addr = dma_map_page(priv->net_dev->dev.parent,
481 if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
589 struct device *dev = priv->net_dev->dev.parent;
598 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
617 dma_unmap_page(dev, addr, priv->rx_buf_size,
626 dma_unmap_page(dev, addr, priv->rx_buf_size,
660 struct device *dev = priv->net_dev->dev.parent;
671 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
677 dma_unmap_page(dev, addr, priv->rx_buf_size,
681 dma_unmap_page(dev, addr, priv->rx_buf_size,
903 struct device *dev = priv->net_dev->dev.parent;
933 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
977 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
978 if (unlikely(dma_mapping_error(dev, addr))) {
994 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
1011 struct device *dev = priv->net_dev->dev.parent;
1026 addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
1027 if (unlikely(dma_mapping_error(dev, addr))) {
1045 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1046 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1061 dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
1074 struct device *dev = priv->net_dev->dev.parent;
1096 addr = dma_map_single(dev, buffer_start,
1099 if (unlikely(dma_mapping_error(dev, addr)))
1124 struct device *dev = priv->net_dev->dev.parent;
1146 dma_unmap_single(dev, fd_addr,
1151 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
1159 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1164 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1173 dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
1178 dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
1184 dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1191 dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size,
1197 dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1203 dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1259 struct device *dev = priv->net_dev->dev.parent;
1300 tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1301 if (dma_mapping_error(dev, tso_hdr_dma)) {
1321 addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
1322 if (dma_mapping_error(dev, addr)) {
1348 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1349 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1378 dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1382 dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1680 struct device *dev = priv->net_dev->dev.parent;
1701 addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1703 if (unlikely(dma_mapping_error(dev, addr)))
1731 if (unlikely(dma_mapping_error(dev, addr)))
1814 channel->bp->dev->obj_desc.id,
2343 struct device *dev = net_dev->dev.parent;
2348 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
2355 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
2588 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2590 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2624 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2626 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2630 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2685 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2687 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2701 dev->mtu = new_mtu;
2731 static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
2733 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2739 if (prog && !xdp_mtu_valid(priv, dev->mtu))
2745 up = netif_running(dev);
2749 dev_close(dev);
2757 err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
2777 err = dev_open(dev, NULL);
2788 dev_open(dev, NULL);
2793 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2797 return dpaa2_eth_setup_xdp(dev, xdp->prog);
2799 return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id);
2811 struct device *dev = net_dev->dev.parent;
2840 addr = dma_map_single(dev, buffer_start,
2843 if (unlikely(dma_mapping_error(dev, addr)))
3056 struct device *dev = priv->net_dev->dev.parent;
3059 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
3063 dev_dbg(dev, "Waiting for DPCON\n");
3066 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
3073 dev_err(dev, "dpcon_open() failed\n");
3079 dev_err(dev, "dpcon_reset() failed\n");
3085 dev_err(dev, "dpcon_enable() failed\n");
3111 struct device *dev = priv->net_dev->dev.parent;
3127 dev_err(dev, "dpcon_get_attributes() failed\n");
3159 struct device *dev = priv->net_dev->dev.parent;
3178 dev_dbg(dev, "waiting for affine channel\n");
3180 dev_info(dev,
3195 err = dpaa2_io_service_register(channel->dpio, nctx, dev);
3197 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
3215 dev_err(dev, "dpcon_set_notification failed()\n");
3235 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3243 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3251 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
3255 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
3263 struct device *dev = priv->net_dev->dev.parent;
3270 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
3278 struct device *dev = priv->net_dev->dev.parent;
3288 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
3295 struct device *dev = priv->net_dev->dev.parent;
3323 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
3365 struct device *dev = priv->net_dev->dev.parent;
3371 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
3377 dev_err(dev, "DPBP device allocation failed\n");
3390 dev_err(dev, "dpbp_open() failed\n");
3396 dev_err(dev, "dpbp_reset() failed\n");
3402 dev_err(dev, "dpbp_enable() failed\n");
3409 dev_err(dev, "dpbp_get_attributes() failed\n");
3413 bp->dev = dpbp_dev;
3460 dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
3461 dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
3462 fsl_mc_object_free(bp->dev);
3480 struct device *dev = priv->net_dev->dev.parent;
3510 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3520 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3530 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3535 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3552 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3609 struct device *dev = priv->net_dev->dev.parent;
3616 dev_err(dev, "dpni_get_link_cfg() failed\n");
3625 dev_err(dev, "dpni_set_link_cfg() failed\n");
3678 struct device *dev = priv->net_dev->dev.parent;
3692 dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3708 dev_err(dev, "dpni_prepare_key_cfg failed\n");
3715 qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3718 if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3719 dev_err(dev, "QoS table DMA mapping failed\n");
3726 dev_err(dev, "dpni_set_qos_table failed\n");
3739 key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3741 if (dma_mapping_error(dev, key_params.key_iova)) {
3742 dev_err(dev, "Qos table entry DMA mapping failed\n");
3757 dma_sync_single_for_device(dev, key_params.key_iova,
3763 dev_err(dev, "dpni_add_qos_entry failed\n");
3775 dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3779 dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3790 struct device *dev = &ls_dev->dev;
3795 net_dev = dev_get_drvdata(dev);
3801 dev_err(dev, "dpni_open() failed\n");
3809 dev_err(dev, "dpni_get_api_version() failed\n");
3813 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3825 dev_err(dev, "dpni_reset() failed\n");
3832 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3853 priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3884 struct device *dev = priv->net_dev->dev.parent;
3892 dev_err(dev, "dpni_get_queue(RX) failed\n");
3907 dev_err(dev, "dpni_set_queue(RX) failed\n");
3919 dev_err(dev, "xdp_rxq_info_reg failed\n");
3926 dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3936 struct device *dev = priv->net_dev->dev.parent;
3946 dev_err(dev, "dpni_get_queue(TX) failed\n");
3959 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3974 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3984 struct device *dev = priv->net_dev->dev.parent;
3993 dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
4006 dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
4083 struct device *dev = priv->net_dev->dev.parent;
4097 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
4108 struct device *dev = priv->net_dev->dev.parent;
4123 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
4140 struct device *dev = priv->net_dev->dev.parent;
4155 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
4223 struct device *dev = net_dev->dev.parent;
4247 dev_err(dev, "error adding key extraction rule, too many rules?\n");
4264 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
4269 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
4271 if (dma_mapping_error(dev, key_iova)) {
4272 dev_err(dev, "DMA mapping failed\n");
4286 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
4319 struct device *dev = priv->net_dev->dev.parent;
4324 dev_dbg(dev, "Rx cls not supported by current MC version\n");
4329 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
4334 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
4363 struct device *dev = net_dev->dev.parent;
4369 pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
4374 dev_err(dev, "dpni_set_pools() failed\n");
4383 dev_err(dev, "Failed to configure hashing\n");
4390 dev_err(dev, "Failed to configure Rx classification key\n");
4399 dev_err(dev, "dpni_set_errors_behavior failed\n");
4416 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
4426 dev_err(dev, "dpni_get_qdid() failed\n");
4437 struct device *dev = net_dev->dev.parent;
4442 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
4472 struct device *dev = net_dev->dev.parent;
4479 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
4487 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
4499 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4509 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
4514 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4536 struct device *dev = net_dev->dev.parent;
4555 dev_err(dev, "dpni_add_mac_addr() failed\n");
4564 dev_err(dev, "dpni_set_max_frame_length() failed\n");
4572 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
4577 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
4636 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4644 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
4708 struct device *dev = (struct device *)arg;
4709 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4710 struct net_device *net_dev = dev_get_drvdata(dev);
4750 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4755 err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
4758 dev_name(&ls_dev->dev), &ls_dev->dev);
4760 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
4768 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
4775 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
4782 devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
4814 struct device *dev;
4819 dev = &dpni_dev->dev;
4824 dev_err(dev, "alloc_etherdev_mq() failed\n");
4828 SET_NETDEV_DEV(net_dev, dev);
4829 dev_set_drvdata(dev, net_dev);
4837 priv->iommu_domain = iommu_get_domain_for_dev(dev);
4859 dev_dbg(dev, "waiting for MC portal\n");
4862 dev_err(dev, "MC portal allocation failed\n");
4892 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4898 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4905 dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4912 dev_err(dev, "alloc_percpu(fds) failed\n");
4940 dev_dbg(dev, "PFC not supported\n");
4954 dev_err(dev, "Error starting polling thread\n");
4976 dev_err(dev, "register_netdev() failed\n");
4985 dev_info(dev, "Probed interface %s\n", net_dev->name);
5026 dev_set_drvdata(dev, NULL);
5034 struct device *dev;
5038 dev = &ls_dev->dev;
5039 net_dev = dev_get_drvdata(dev);
5077 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);