/kernel/linux/linux-5.10/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_eqs.c | 33 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ 34 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) 37 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ 38 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) 41 HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ 42 HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num)) 45 HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ 46 HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num)) 80 container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) 83 container_of((eq) - (eq)->q_id, struc 722 init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size, struct msix_entry entry) init_eq() argument 865 int err, i, q_id; hinic_aeqs_init() local 899 int q_id; hinic_aeqs_free() local 923 int i, q_id, err; hinic_ceqs_init() local 953 int q_id; hinic_ceqs_free() local 963 int q_id; hinic_dump_ceq_info() local 983 int q_id; hinic_dump_aeq_info() local [all...] |
H A D | hinic_hw_csr.h | 88 #define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ 89 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ 92 #define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ 93 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ 96 #define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ 97 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ 100 #define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ 101 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
|
H A D | hinic_hw_io.c | 30 #define CI_ADDR(base_addr, q_id) ((base_addr) + \ 31 (q_id) * CI_Q_ADDR_SIZE) 132 base_qpn + qp->q_id); in write_sq_ctxts() 176 base_qpn + qp->q_id); in write_rq_ctxts() 265 * @q_id: the id of the qp 272 struct hinic_qp *qp, int q_id, in init_qp() 281 qp->q_id = q_id; in init_qp() 283 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], in init_qp() 291 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], in init_qp() 271 init_qp(struct hinic_func_to_io *func_to_io, struct hinic_qp *qp, int q_id, struct msix_entry *sq_msix_entry, struct msix_entry *rq_msix_entry) init_qp() argument 350 int q_id = qp->q_id; destroy_qp() local [all...] |
H A D | hinic_tx.c | 492 u16 prod_idx, q_id = skb->queue_mapping; in hinic_lb_xmit_frame() local 500 txq = &nic_dev->txqs[q_id]; in hinic_lb_xmit_frame() 512 netif_stop_subqueue(netdev, qp->q_id); in hinic_lb_xmit_frame() 516 netif_wake_subqueue(nic_dev->netdev, qp->q_id); in hinic_lb_xmit_frame() 535 netdev_txq = netdev_get_tx_queue(netdev, q_id); in hinic_lb_xmit_frame() 553 u16 prod_idx, q_id = skb->queue_mapping; in hinic_xmit_frame() local 561 txq = &nic_dev->txqs[q_id]; in hinic_xmit_frame() 593 netif_stop_subqueue(netdev, qp->q_id); in hinic_xmit_frame() 600 netif_wake_subqueue(nic_dev->netdev, qp->q_id); in hinic_xmit_frame() 624 netdev_txq = netdev_get_tx_queue(netdev, q_id); in hinic_xmit_frame() [all...] |
H A D | hinic_hw_cmdq.h | 118 u32 q_id; member 181 enum hinic_set_arm_qtype q_type, u32 q_id);
|
H A D | hinic_main.c | 825 u16 num_sqs, q_id; in hinic_tx_timeout() local 831 for (q_id = 0; q_id < num_sqs; q_id++) { in hinic_tx_timeout() 832 if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) in hinic_tx_timeout() 835 sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id); in hinic_tx_timeout() 840 q_id, sw_pi, hw_ci, sw_ci, in hinic_tx_timeout() 841 nic_dev->txqs[q_id].napi.state); in hinic_tx_timeout()
|
/kernel/linux/linux-6.6/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_eqs.c | 33 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ 34 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) 37 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ 38 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) 41 HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ 42 HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num)) 45 HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ 46 HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num)) 80 container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) 83 container_of((eq) - (eq)->q_id, struc 722 init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size, struct msix_entry entry) init_eq() argument 865 int err, i, q_id; hinic_aeqs_init() local 899 int q_id; hinic_aeqs_free() local 923 int i, q_id, err; hinic_ceqs_init() local 953 int q_id; hinic_ceqs_free() local 963 int q_id; hinic_dump_ceq_info() local 983 int q_id; hinic_dump_aeq_info() local [all...] |
H A D | hinic_hw_csr.h | 87 #define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ 88 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ 91 #define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ 92 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ 95 #define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ 96 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ 99 #define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ 100 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
|
H A D | hinic_hw_io.c | 30 #define CI_ADDR(base_addr, q_id) ((base_addr) + \ 31 (q_id) * CI_Q_ADDR_SIZE) 132 base_qpn + qp->q_id); in write_sq_ctxts() 176 base_qpn + qp->q_id); in write_rq_ctxts() 265 * @q_id: the id of the qp 272 struct hinic_qp *qp, int q_id, in init_qp() 281 qp->q_id = q_id; in init_qp() 283 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], in init_qp() 291 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], in init_qp() 271 init_qp(struct hinic_func_to_io *func_to_io, struct hinic_qp *qp, int q_id, struct msix_entry *sq_msix_entry, struct msix_entry *rq_msix_entry) init_qp() argument 350 int q_id = qp->q_id; destroy_qp() local [all...] |
H A D | hinic_tx.c | 494 u16 prod_idx, q_id = skb->queue_mapping; in hinic_lb_xmit_frame() local 502 txq = &nic_dev->txqs[q_id]; in hinic_lb_xmit_frame() 514 netif_stop_subqueue(netdev, qp->q_id); in hinic_lb_xmit_frame() 518 netif_wake_subqueue(nic_dev->netdev, qp->q_id); in hinic_lb_xmit_frame() 537 netdev_txq = netdev_get_tx_queue(netdev, q_id); in hinic_lb_xmit_frame() 555 u16 prod_idx, q_id = skb->queue_mapping; in hinic_xmit_frame() local 563 txq = &nic_dev->txqs[q_id]; in hinic_xmit_frame() 595 netif_stop_subqueue(netdev, qp->q_id); in hinic_xmit_frame() 602 netif_wake_subqueue(nic_dev->netdev, qp->q_id); in hinic_xmit_frame() 626 netdev_txq = netdev_get_tx_queue(netdev, q_id); in hinic_xmit_frame() [all...] |
/kernel/linux/linux-5.10/tools/cgroup/ |
H A D | iocost_monitor.py | 67 def walk(self, blkcg, q_id, parent_path): 75 address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id)) 83 self.walk(c, q_id, path) 85 def __init__(self, root_blkcg, q_id, include_dying=False): 88 self.walk(root_blkcg, q_id, '') 217 q_id = None variable 225 q_id = blkg.q.id.value_() 251 for path, blkg in BlkgIterator(blkcg_root, q_id):
|
/kernel/linux/linux-6.6/tools/cgroup/ |
H A D | iocost_monitor.py | 64 def __init__(self, root_blkcg, q_id, include_dying=False): 67 self.walk(root_blkcg, q_id, '') 72 def walk(self, blkcg, q_id, parent_path): 80 address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id)) 88 self.walk(c, q_id, path) 224 q_id = None variable 232 q_id = blkg.q.id.value_() 258 for path, blkg in BlkgIterator(blkcg_root, q_id):
|
/kernel/linux/linux-6.6/drivers/block/ |
H A D | ublk_drv.c | 131 int q_id; member 681 static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id) in ublk_queue_cmd_buf() argument 683 return ublk_get_queue(ub, q_id)->io_cmd_buf; in ublk_queue_cmd_buf() 686 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id) in ublk_queue_cmd_buf_size() argument 688 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_queue_cmd_buf_size() 1138 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, in __ublk_rq_task_work() 1164 __func__, io->cmd->cmd_op, ubq->q_id, in __ublk_rq_task_work() 1178 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, in __ublk_rq_task_work() 1362 int q_id, ret = 0; in ublk_ch_mmap() local 1381 q_id in ublk_ch_mmap() 1637 ublk_handle_need_get_data(struct ublk_device *ub, int q_id, int tag) ublk_handle_need_get_data() argument 1862 u16 tag, q_id; ublk_check_and_get_req() local 1951 ublk_deinit_queue(struct ublk_device *ub, int q_id) ublk_deinit_queue() argument 1962 ublk_init_queue(struct ublk_device *ub, int q_id) ublk_init_queue() argument [all...] |
/kernel/linux/linux-5.10/fs/xfs/scrub/ |
H A D | quota.c | 94 offset = dq->q_id / qi->qi_dqperchunk; in xchk_quota_item() 95 if (dq->q_id && dq->q_id <= sqi->last_id) in xchk_quota_item() 98 sqi->last_id = dq->q_id; in xchk_quota_item() 148 if (dq->q_id == 0) in xchk_quota_item()
|
/kernel/linux/linux-6.6/fs/xfs/scrub/ |
H A D | quota.c | 100 offset = dq->q_id / qi->qi_dqperchunk; in xchk_quota_item() 101 if (dq->q_id && dq->q_id <= sqi->last_id) in xchk_quota_item() 104 sqi->last_id = dq->q_id; in xchk_quota_item() 154 if (dq->q_id == 0) in xchk_quota_item()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ice/ |
H A D | ice_base.c | 579 u16 q_id, q_base; in ice_vsi_map_rings_to_vectors() local 589 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { in ice_vsi_map_rings_to_vectors() 590 struct ice_ring *tx_ring = vsi->tx_rings[q_id]; in ice_vsi_map_rings_to_vectors() 606 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { in ice_vsi_map_rings_to_vectors() 607 struct ice_ring *rx_ring = vsi->rx_rings[q_id]; in ice_vsi_map_rings_to_vectors() 830 &txq_meta->q_id, in ice_vsi_stop_tx_ring() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/ice/ |
H A D | ice_base.c | 758 u16 q_id, q_base; in ice_vsi_map_rings_to_vectors() local 768 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { in ice_vsi_map_rings_to_vectors() 769 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; in ice_vsi_map_rings_to_vectors() 785 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { in ice_vsi_map_rings_to_vectors() 786 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; in ice_vsi_map_rings_to_vectors() 1016 &txq_meta->q_id, in ice_vsi_stop_tx_ring() [all...] |
H A D | ice_eswitch.c | 40 rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + in ice_eswitch_add_vf_sp_rule() 148 int q_id; in ice_eswitch_remap_rings_to_vectors() local 150 ice_for_each_txq(vsi, q_id) { in ice_eswitch_remap_rings_to_vectors() 157 vf = ice_get_vf_by_id(pf, q_id); in ice_eswitch_remap_rings_to_vectors() 163 tx_ring = vsi->tx_rings[q_id]; in ice_eswitch_remap_rings_to_vectors() 164 rx_ring = vsi->rx_rings[q_id]; in ice_eswitch_remap_rings_to_vectors()
|
H A D | ice_idc.c | 119 u16 q_id; in ice_del_rdma_qset() local 130 q_id = qset->qs_handle; in ice_del_rdma_qset() 135 return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id); in ice_del_rdma_qset()
|
/kernel/linux/linux-5.10/fs/xfs/ |
H A D | xfs_dquot.c | 77 ASSERT(dq->q_id); in xfs_qm_adjust_dqlimits() 167 ASSERT(dq->q_id); in xfs_qm_adjust_dqtimers() 345 xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp); in xfs_dquot_disk_alloc() 458 dqp->q_id = id; in xfs_dquot_alloc() 515 if (be32_to_cpu(ddqp->d_id) != dqp->q_id) in xfs_dquot_check_type() 524 dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0) in xfs_dquot_check_type() 554 __this_address, dqp->q_id); in xfs_dquot_from_disk() 602 ddqp->d_id = cpu_to_be32(dqp->q_id); in xfs_dquot_to_disk() 1204 if (dqp->q_id == 0) in xfs_qm_dqflush_check() 1223 if (dqp->q_id in xfs_qm_dqflush_check() [all...] |
/kernel/linux/linux-6.6/fs/xfs/ |
H A D | xfs_dquot.c | 77 ASSERT(dq->q_id); in xfs_qm_adjust_dqlimits() 164 ASSERT(dq->q_id); in xfs_qm_adjust_dqtimers() 356 xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp); in xfs_dquot_disk_alloc() 478 dqp->q_id = id; in xfs_dquot_alloc() 535 if (be32_to_cpu(ddqp->d_id) != dqp->q_id) in xfs_dquot_check_type() 544 dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0) in xfs_dquot_check_type() 575 __this_address, dqp->q_id); in xfs_dquot_from_disk() 619 ddqp->d_id = cpu_to_be32(dqp->q_id); in xfs_dquot_to_disk() 1178 if (dqp->q_id == 0) in xfs_qm_dqflush_check() 1197 if (dqp->q_id in xfs_qm_dqflush_check() [all...] |
/kernel/linux/linux-5.10/drivers/net/wireless/intel/iwlwifi/pcie/ |
H A D | tx.c | 144 txq_id != trans->txqs.cmd.q_id && in iwl_pcie_txq_inc_wr_ptr() 312 if (txq_id != trans->txqs.cmd.q_id) { in iwl_pcie_txq_unmap() 325 if (txq_id == trans->txqs.cmd.q_id) in iwl_pcie_txq_unmap() 363 if (txq_id == trans->txqs.cmd.q_id) in iwl_pcie_txq_free() 426 iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, in iwl_pcie_tx_start() 635 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); in iwl_pcie_tx_alloc() 689 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); in iwl_pcie_tx_init() 761 if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) in iwl_trans_pcie_reclaim() 1018 if (txq_id == trans->txqs.cmd.q_id && in iwl_trans_pcie_txq_enable() 1026 if (txq_id != trans->txqs.cmd.q_id) in iwl_trans_pcie_txq_enable() [all...] |
H A D | tx-gen2.c | 78 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; in iwl_pcie_gen2_enqueue_hcmd() 190 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | in iwl_pcie_gen2_enqueue_hcmd() 238 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); in iwl_pcie_gen2_enqueue_hcmd() 315 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; in iwl_pcie_gen2_send_hcmd_sync()
|
/kernel/linux/linux-6.6/drivers/net/wireless/intel/iwlwifi/pcie/ |
H A D | tx.c | 87 txq_id != trans->txqs.cmd.q_id && in iwl_pcie_txq_inc_wr_ptr() 201 if (txq_id != trans->txqs.cmd.q_id) { in iwl_pcie_txq_unmap() 213 txq_id == trans->txqs.cmd.q_id) in iwl_pcie_txq_unmap() 249 if (txq_id == trans->txqs.cmd.q_id) in iwl_pcie_txq_free() 312 iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, in iwl_pcie_tx_start() 520 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); in iwl_pcie_tx_alloc() 574 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); in iwl_pcie_tx_init() 736 if (txq_id == trans->txqs.cmd.q_id && in iwl_trans_pcie_txq_enable() 744 if (txq_id != trans->txqs.cmd.q_id) in iwl_trans_pcie_txq_enable() 814 if (txq_id == trans->txqs.cmd.q_id in iwl_trans_pcie_txq_enable() [all...] |
H A D | tx-gen2.c | 31 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; in iwl_pcie_gen2_enqueue_hcmd() 143 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | in iwl_pcie_gen2_enqueue_hcmd() 191 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); in iwl_pcie_gen2_enqueue_hcmd()
|