Searched refs:rxqueue (Results 1 - 8 of 8) sorted by relevance
/kernel/linux/linux-5.10/drivers/hsi/controllers/ |
H A D | omap_ssi_port.c | 397 queue = &omap_port->rxqueue[msg->channel]; in ssi_async() 571 ssi_flush_queue(&omap_port->rxqueue[i], NULL); in ssi_flush() 698 if (list_empty(&omap_port->rxqueue[i])) in ssi_cleanup_queues() 700 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, in ssi_cleanup_queues() 706 ssi_flush_queue(&omap_port->rxqueue[i], cl); in ssi_cleanup_queues() 708 if (!list_empty(&omap_port->rxqueue[i])) in ssi_cleanup_queues() 848 if (list_empty(&omap_port->rxqueue[i])) in ssi_error() 850 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, in ssi_error() 857 ssi_transfer(omap_port, &omap_port->rxqueue[i]); in ssi_error() 977 ssi_pio_complete(port, &omap_port->rxqueue[c in ssi_pio_thread() [all...] |
H A D | omap_ssi.h | 61 * @rxqueue: RX message queues 86 struct list_head rxqueue[SSI_MAX_CHANNELS]; member
|
/kernel/linux/linux-6.6/drivers/hsi/controllers/ |
H A D | omap_ssi_port.c | 391 queue = &omap_port->rxqueue[msg->channel]; in ssi_async() 565 ssi_flush_queue(&omap_port->rxqueue[i], NULL); in ssi_flush() 692 if (list_empty(&omap_port->rxqueue[i])) in ssi_cleanup_queues() 694 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, in ssi_cleanup_queues() 700 ssi_flush_queue(&omap_port->rxqueue[i], cl); in ssi_cleanup_queues() 702 if (!list_empty(&omap_port->rxqueue[i])) in ssi_cleanup_queues() 842 if (list_empty(&omap_port->rxqueue[i])) in ssi_error() 844 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, in ssi_error() 851 ssi_transfer(omap_port, &omap_port->rxqueue[i]); in ssi_error() 971 ssi_pio_complete(port, &omap_port->rxqueue[c in ssi_pio_thread() [all...] |
H A D | omap_ssi.h | 61 * @rxqueue: RX message queues 86 struct list_head rxqueue[SSI_MAX_CHANNELS]; member
|
/kernel/linux/linux-6.6/net/bpf/ |
H A D | test_run.c | 1083 struct netdev_rx_queue *rxqueue; in xdp_convert_md_to_buff() local 1107 rxqueue = __netif_get_rx_queue(device, rx_queue_index); in xdp_convert_md_to_buff() 1109 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq)) in xdp_convert_md_to_buff() 1112 xdp->rxq = &rxqueue->xdp_rxq; in xdp_convert_md_to_buff() 1148 struct netdev_rx_queue *rxqueue; in bpf_prog_test_run_xdp() local 1205 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); in bpf_prog_test_run_xdp() 1206 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom; in bpf_prog_test_run_xdp() 1207 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq); in bpf_prog_test_run_xdp()
|
/kernel/linux/linux-5.10/net/bpf/ |
H A D | test_run.c | 632 struct netdev_rx_queue *rxqueue; in bpf_prog_test_run_xdp() local 658 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); in bpf_prog_test_run_xdp() 659 xdp.rxq = &rxqueue->xdp_rxq; in bpf_prog_test_run_xdp()
|
/kernel/linux/linux-5.10/net/core/ |
H A D | dev.c | 4310 struct netdev_rx_queue *rxqueue; in set_rps_cpu() local 4325 rxqueue = dev->_rx + rxq_index; in set_rps_cpu() 4326 flow_table = rcu_dereference(rxqueue->rps_flow_table); in set_rps_cpu() 4358 struct netdev_rx_queue *rxqueue = dev->_rx; in get_rps_cpu() local 4375 rxqueue += index; in get_rps_cpu() 4378 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ in get_rps_cpu() 4380 flow_table = rcu_dereference(rxqueue->rps_flow_table); in get_rps_cpu() 4381 map = rcu_dereference(rxqueue->rps_map); in get_rps_cpu() 4467 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; in rps_may_expire_flow() local 4474 flow_table = rcu_dereference(rxqueue in rps_may_expire_flow() 4616 struct netdev_rx_queue *rxqueue; netif_get_rxqueue() local 4640 struct netdev_rx_queue *rxqueue; netif_receive_generic_xdp() local [all...] |
/kernel/linux/linux-6.6/net/core/ |
H A D | dev.c | 4516 struct netdev_rx_queue *rxqueue; in set_rps_cpu() local 4531 rxqueue = dev->_rx + rxq_index; in set_rps_cpu() 4532 flow_table = rcu_dereference(rxqueue->rps_flow_table); in set_rps_cpu() 4564 struct netdev_rx_queue *rxqueue = dev->_rx; in get_rps_cpu() local 4581 rxqueue += index; in get_rps_cpu() 4584 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ in get_rps_cpu() 4586 flow_table = rcu_dereference(rxqueue->rps_flow_table); in get_rps_cpu() 4587 map = rcu_dereference(rxqueue->rps_map); in get_rps_cpu() 4673 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; in rps_may_expire_flow() local 4680 flow_table = rcu_dereference(rxqueue in rps_may_expire_flow() 4836 struct netdev_rx_queue *rxqueue; netif_get_rxqueue() local 4860 struct netdev_rx_queue *rxqueue; bpf_prog_run_generic_xdp() local [all...] |
Completed in 47 milliseconds