/kernel/linux/linux-6.6/drivers/net/wwan/ |
H A D | qcom_bam_dmux.c | 88 struct net_device *netdevs[BAM_DMUX_NUM_CH]; member 139 struct net_device *netdev = dmux->netdevs[i]; in bam_dmux_tx_wake_queues() 153 struct net_device *netdev = dmux->netdevs[i]; in bam_dmux_tx_stop_queues() 441 if (dmux->netdevs[ch]) in bam_dmux_register_netdev_work() 464 dmux->netdevs[ch] = netdev; in bam_dmux_register_netdev_work() 507 struct net_device *netdev = dmux->netdevs[hdr->ch]; in bam_dmux_cmd_data() 544 struct net_device *netdev = dmux->netdevs[hdr->ch]; in bam_dmux_cmd_open() 563 struct net_device *netdev = dmux->netdevs[hdr->ch]; in bam_dmux_cmd_close() 860 if (dmux->netdevs[i]) in bam_dmux_remove() 861 unregister_netdevice_queue(dmux->netdevs[ in bam_dmux_remove() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
H A D | netdev_rx.c | 167 dd_dev_info(dd, "No receive contexts available for netdevs.\n"); in hfi1_num_netdev_contexts() 172 dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n"); in hfi1_num_netdev_contexts() 302 * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time, 313 if (atomic_fetch_inc(&priv->netdevs)) in hfi1_netdev_rx_init() 324 * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0 334 if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) { in hfi1_netdev_rx_destroy() 366 atomic_set(&priv->netdevs, 0); in hfi1_netdev_alloc()
|
H A D | netdev.h | 46 * @enabled: atomic counter of netdevs enabling receive queues. 48 * @netdevs: atomic counter of netdevs using dummy netdev. 59 /* count of netdevs on top */ 60 atomic_t netdevs; member
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/ |
H A D | netdev_rx.c | 167 dd_dev_info(dd, "No receive contexts available for netdevs.\n"); in hfi1_num_netdev_contexts() 172 dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n"); in hfi1_num_netdev_contexts() 301 * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time, 312 if (atomic_fetch_inc(&rx->netdevs)) in hfi1_netdev_rx_init() 322 * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0 332 if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) { in hfi1_netdev_rx_destroy() 367 atomic_set(&rx->netdevs, 0); in hfi1_alloc_rx()
|
H A D | netdev.h | 46 * @enabled: atomic counter of netdevs enabling receive queues. 48 * @netdevs: atomic counter of netdevs using dummy netdev. 60 /* count of netdevs on top */ 61 atomic_t netdevs; member
|
/kernel/linux/linux-5.10/kernel/bpf/ |
H A D | offload.c | 37 struct list_head netdevs; member 617 list_add(&ondev->offdev_netdevs, &offdev->netdevs); in bpf_offload_dev_netdev_register() 646 altdev = list_first_entry_or_null(&offdev->netdevs, in bpf_offload_dev_netdev_unregister() 695 INIT_LIST_HEAD(&offdev->netdevs); in bpf_offload_dev_create() 703 WARN_ON(!list_empty(&offdev->netdevs)); in bpf_offload_dev_destroy()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/ |
H A D | hns_roce_main.c | 112 netdev = hr_dev->iboe.netdevs[port]; in handle_en_event() 151 if (dev == iboe->netdevs[port]) { in hns_roce_netdev_event() 172 hr_dev->iboe.netdevs[i]->dev_addr); in hns_roce_setup_mtu_mac() 252 net_dev = hr_dev->iboe.netdevs[port]; in hns_roce_query_port() 546 if (!hr_dev->iboe.netdevs[i]) in hns_roce_register_device() 549 ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i], in hns_roce_register_device()
|
H A D | hns_roce_device.h | 672 struct net_device *netdevs[HNS_ROCE_MAX_PORTS]; member
|
H A D | hns_roce_qp.c | 1085 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); in check_mtu_validate()
|
H A D | hns_roce_hw_v2.c | 6223 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev; in hns_roce_hw_v2_get_cfg() 6227 hr_dev->iboe.netdevs[0]->dev_addr); in hns_roce_hw_v2_get_cfg()
|
H A D | hns_roce_hw_v1.c | 4559 hr_dev->iboe.netdevs[port_cnt] = netdev; in hns_roce_get_cfg()
|
/kernel/linux/linux-6.6/drivers/net/ethernet/fungible/funeth/ |
H A D | funeth_main.c | 1798 ed->netdevs[portid] = netdev; in fun_create_netdev() 1805 ed->netdevs[portid] = NULL; in fun_create_netdev() 1848 ed->netdevs = kcalloc(nports, sizeof(*ed->netdevs), GFP_KERNEL); in fun_create_ports() 1849 if (!ed->netdevs) in fun_create_ports() 1863 fun_destroy_netdev(ed->netdevs[--i]); in fun_create_ports() 1864 kfree(ed->netdevs); in fun_create_ports() 1865 ed->netdevs = NULL; in fun_create_ports() 1875 fun_destroy_netdev(ed->netdevs[i]); in fun_destroy_ports() 1877 kfree(ed->netdevs); in fun_destroy_ports() [all...] |
H A D | funeth.h | 48 struct net_device **netdevs; member
|
/kernel/linux/linux-6.6/kernel/bpf/ |
H A D | offload.c | 38 struct list_head netdevs; member 100 list_add(&ondev->offdev_netdevs, &offdev->netdevs); in __bpf_offload_dev_netdev_register() 163 altdev = list_first_entry_or_null(&offdev->netdevs, in __bpf_offload_dev_netdev_unregister() 784 INIT_LIST_HEAD(&offdev->netdevs); in bpf_offload_dev_create() 792 WARN_ON(!list_empty(&offdev->netdevs)); in bpf_offload_dev_destroy()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/hns/ |
H A D | hns_roce_main.c | 98 netdev = hr_dev->iboe.netdevs[port]; in handle_en_event() 137 if (dev == iboe->netdevs[port]) { in hns_roce_netdev_event() 155 hr_dev->iboe.netdevs[i]->dev_addr); in hns_roce_setup_mtu_mac() 242 net_dev = hr_dev->iboe.netdevs[port]; in hns_roce_query_port() 725 if (!hr_dev->iboe.netdevs[i]) in hns_roce_register_device() 728 ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i], in hns_roce_register_device()
|
H A D | hns_roce_device.h | 639 struct net_device *netdevs[HNS_ROCE_MAX_PORTS]; member
|
H A D | hns_roce_qp.c | 1261 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); in check_mtu_validate()
|
H A D | hns_roce_hw_v2.c | 6693 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev; in hns_roce_hw_v2_get_cfg() 6697 hr_dev->iboe.netdevs[0]->dev_addr); in hns_roce_hw_v2_get_cfg()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
H A D | main.c | 792 ndev = iboe->netdevs[port - 1]; in eth_link_query_port() 1314 ndev = mdev->iboe.netdevs[mqp->port - 1]; in mlx4_ib_add_mc() 1965 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; in mlx4_ib_mcg_detach() 2329 iboe->netdevs[port - 1] = in mlx4_ib_scan_netdevs() 2332 if (dev == iboe->netdevs[port - 1] && in mlx4_ib_scan_netdevs() 2337 if (dev == iboe->netdevs[port - 1] && in mlx4_ib_scan_netdevs() 3144 struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; in handle_bonded_port_state_event()
|
H A D | mlx4_ib.h | 546 struct net_device *netdevs[MLX4_MAX_PORTS]; member
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx4/ |
H A D | main.c | 790 ndev = iboe->netdevs[port - 1]; in eth_link_query_port() 1314 ndev = mdev->iboe.netdevs[mqp->port - 1]; in mlx4_ib_add_mc() 1961 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; in mlx4_ib_mcg_detach() 2342 iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL; in mlx4_ib_scan_netdev() 3142 struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; in handle_bonded_port_state_event()
|
H A D | mlx4_ib.h | 547 struct net_device *netdevs[MLX4_MAX_PORTS]; member
|