/kernel/linux/linux-5.10/drivers/iommu/arm/arm-smmu-v3/ |
H A D | arm-smmu-v3.c | 156 writel_relaxed(q->llq.cons, q->cons_reg); in queue_sync_cons_out() 167 struct arm_smmu_ll_queue *llq = &q->llq; in queue_sync_cons_ovf() local 169 if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons))) in queue_sync_cons_ovf() 172 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | in queue_sync_cons_ovf() 173 Q_IDX(llq, ll in queue_sync_cons_ovf() 531 struct arm_smmu_ll_queue llq = { __arm_smmu_cmdq_poll_set_valid_map() local 588 arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, struct arm_smmu_ll_queue *llq) arm_smmu_cmdq_poll_until_not_full() argument 623 __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, struct arm_smmu_ll_queue *llq) __arm_smmu_cmdq_poll_until_msi() argument 647 __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, struct arm_smmu_ll_queue *llq) __arm_smmu_cmdq_poll_until_consumed() argument 697 arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu, struct arm_smmu_ll_queue *llq) arm_smmu_cmdq_poll_until_sync() argument 710 struct arm_smmu_ll_queue llq = { arm_smmu_cmdq_write_entries() local 747 struct arm_smmu_ll_queue llq = { arm_smmu_cmdq_issue_cmdlist() local 1371 struct arm_smmu_ll_queue *llq = &q->llq; arm_smmu_evtq_thread() local 1441 struct arm_smmu_ll_queue *llq = &q->llq; arm_smmu_priq_thread() local [all...] |
H A D | arm-smmu-v3.h | 169 #define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1)) 170 #define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift)) 174 Q_IDX(&((q)->llq), p) * \ 489 struct arm_smmu_ll_queue llq; member
|
/kernel/linux/linux-6.6/drivers/iommu/arm/arm-smmu-v3/ |
H A D | arm-smmu-v3.c | 146 writel_relaxed(q->llq.cons, q->cons_reg); in queue_sync_cons_out() 157 struct arm_smmu_ll_queue *llq = &q->llq; in queue_sync_cons_ovf() local 159 if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons))) in queue_sync_cons_ovf() 162 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | in queue_sync_cons_ovf() 163 Q_IDX(llq, ll in queue_sync_cons_ovf() 536 struct arm_smmu_ll_queue llq = { __arm_smmu_cmdq_poll_set_valid_map() local 593 arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, struct arm_smmu_ll_queue *llq) arm_smmu_cmdq_poll_until_not_full() argument 628 __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, struct arm_smmu_ll_queue *llq) __arm_smmu_cmdq_poll_until_msi() argument 652 __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, struct arm_smmu_ll_queue *llq) __arm_smmu_cmdq_poll_until_consumed() argument 702 arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu, struct arm_smmu_ll_queue *llq) arm_smmu_cmdq_poll_until_sync() argument 715 struct arm_smmu_ll_queue llq = { arm_smmu_cmdq_write_entries() local 752 struct arm_smmu_ll_queue llq, head; arm_smmu_cmdq_issue_cmdlist() local 1568 struct arm_smmu_ll_queue *llq = &q->llq; arm_smmu_evtq_thread() local 1644 struct arm_smmu_ll_queue *llq = &q->llq; arm_smmu_priq_thread() local [all...] |
H A D | arm-smmu-v3.h | 175 #define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1)) 176 #define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift)) 180 Q_IDX(&((q)->llq), p) * \ 528 struct arm_smmu_ll_queue llq; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/amazon/ena/ |
H A D | ena_com.c | 605 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; in ena_com_set_llq() 606 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; in ena_com_set_llq() 607 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; in ena_com_set_llq() 608 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; in ena_com_set_llq() 610 cmd.u.llq.accel_mode.u.set.enabled_flags = in ena_com_set_llq() 663 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", in ena_com_config_llq_info() 691 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", in ena_com_config_llq_info() 727 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", in ena_com_config_llq_info() 1982 memcpy(&get_feat_ctx->llq, &get_resp.u.llq, in ena_com_get_dev_attr_feat() [all...] |
H A D | ena_admin_defs.h | 583 * support those requirements in order to use accelerated llq 952 struct ena_admin_feature_llq_desc llq; member 1005 struct ena_admin_feature_llq_desc llq; member
|
H A D | ena_netdev.c | 141 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", in ena_xmit_common() 245 /* llq push buffer */ in ena_xdp_tx_map_buff() 3288 struct ena_admin_feature_llq_desc *llq, in ena_set_queues_placement_policy() 3302 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); in ena_set_queues_placement_policy() 3427 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, in ena_device_init() 3921 /* In case of LLQ use the llq fields for the tx SQ/CQ */ in ena_calc_max_io_queue_num() 3923 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; in ena_calc_max_io_queue_num() 4053 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; in ena_calc_io_queue_size() local 4069 llq in ena_calc_io_queue_size() 3286 ena_set_queues_placement_policy(struct pci_dev *pdev, struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq, struct ena_llq_configurations *llq_default_configurations) ena_set_queues_placement_policy() argument [all...] |
H A D | ena_com.h | 150 /* This struct is to keep tracking the current location of the next llq entry */ 342 struct ena_admin_feature_llq_desc llq; member
|
/kernel/linux/linux-6.6/drivers/net/ethernet/amazon/ena/ |
H A D | ena_com.c | 621 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; in ena_com_set_llq() 622 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; in ena_com_set_llq() 623 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; in ena_com_set_llq() 624 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; in ena_com_set_llq() 626 cmd.u.llq.accel_mode.u.set.enabled_flags = in ena_com_set_llq() 683 "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", in ena_com_config_llq_info() 713 "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", in ena_com_config_llq_info() 752 "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", in ena_com_config_llq_info() 2038 memcpy(&get_feat_ctx->llq, &get_resp.u.llq, in ena_com_get_dev_attr_feat() [all...] |
H A D | ena_admin_defs.h | 591 * support those requirements in order to use accelerated llq 962 struct ena_admin_feature_llq_desc llq; member 1015 struct ena_admin_feature_llq_desc llq; member
|
H A D | ena_netdev.c | 151 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", in ena_xmit_common() 3423 struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq; in ena_calc_io_queue_size() local 3448 llq->max_llq_depth); in ena_calc_io_queue_size() 3466 llq->max_llq_depth); in ena_calc_io_queue_size() 3484 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && in ena_calc_io_queue_size() 3537 struct ena_admin_feature_llq_desc *llq) in set_default_llq_configurations() 3548 !!(llq->entry_size_ctrl_supported & in set_default_llq_configurations() 3551 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && in set_default_llq_configurations() 3563 struct ena_admin_feature_llq_desc *llq, in ena_set_queues_placement_policy() 3535 set_default_llq_configurations(struct ena_adapter *adapter, struct ena_llq_configurations *llq_config, struct ena_admin_feature_llq_desc *llq) set_default_llq_configurations() argument 3561 ena_set_queues_placement_policy(struct pci_dev *pdev, struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq, struct ena_llq_configurations *llq_default_configurations) ena_set_queues_placement_policy() argument [all...] |
H A D | ena_com.h | 150 /* This struct is to keep tracking the current location of the next llq entry */ 344 struct ena_admin_feature_llq_desc llq; member
|