Lines Matching refs:qm

306 	struct hisi_qm *qm = s->private;
308 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
394 u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
398 cap_val_h = qm->cap_tables.dev_cap_table[high].cap_val;
399 cap_val_l = qm->cap_tables.dev_cap_table[low].cap_val;
424 static void sec_set_endian(struct hisi_qm *qm)
428 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
436 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
439 static void sec_engine_sva_config(struct hisi_qm *qm)
443 if (qm->ver > QM_HW_V2) {
444 reg = readl_relaxed(qm->io_base +
447 writel_relaxed(reg, qm->io_base +
450 reg = readl_relaxed(qm->io_base +
454 writel_relaxed(reg, qm->io_base +
457 reg = readl_relaxed(qm->io_base +
460 writel_relaxed(reg, qm->io_base +
462 reg = readl_relaxed(qm->io_base +
465 if (qm->use_sva)
469 writel_relaxed(reg, qm->io_base +
474 static void sec_open_sva_prefetch(struct hisi_qm *qm)
479 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
483 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
485 writel(val, qm->io_base + SEC_PREFETCH_CFG);
487 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
491 pci_err(qm->pdev, "failed to open sva prefetch\n");
494 static void sec_close_sva_prefetch(struct hisi_qm *qm)
499 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
502 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
504 writel(val, qm->io_base + SEC_PREFETCH_CFG);
506 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
510 pci_err(qm->pdev, "failed to close sva prefetch\n");
513 static void sec_enable_clock_gate(struct hisi_qm *qm)
517 if (qm->ver < QM_HW_V3)
520 val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
522 writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
524 val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG);
526 writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG);
528 val = readl(qm->io_base + SEC_CORE_AUTO_GATE);
530 writel(val, qm->io_base + SEC_CORE_AUTO_GATE);
533 static void sec_disable_clock_gate(struct hisi_qm *qm)
538 val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
540 writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
543 static int sec_engine_init(struct hisi_qm *qm)
549 sec_disable_clock_gate(qm);
551 writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
553 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,
557 pci_err(qm->pdev, "fail to init sec mem\n");
561 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
563 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
565 sec_engine_sva_config(qm);
568 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
570 reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver);
571 writel(reg, qm->io_base + SEC_SAA_EN_REG);
573 if (qm->ver < QM_HW_V3) {
576 qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
580 qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
582 qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
586 sec_set_endian(qm);
588 sec_enable_clock_gate(qm);
593 static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
595 /* qm user domain */
596 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
597 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
598 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
599 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
600 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
602 /* qm cache */
603 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
604 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
607 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
608 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
613 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
615 return sec_engine_init(qm);
619 static void sec_debug_regs_clear(struct hisi_qm *qm)
624 writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
626 readl(qm->io_base + sec_dfx_regs[i].offset);
629 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
631 hisi_qm_debug_regs_clear(qm);
634 static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
638 val1 = readl(qm->io_base + SEC_CONTROL_REG);
641 val2 = hisi_qm_get_hw_info(qm, sec_basic_info,
642 SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
648 if (qm->ver > QM_HW_V2)
649 writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL);
651 writel(val1, qm->io_base + SEC_CONTROL_REG);
654 static void sec_hw_error_enable(struct hisi_qm *qm)
658 if (qm->ver == QM_HW_V1) {
659 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
660 pci_info(qm->pdev, "V1 not support hw error handle\n");
664 ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
665 nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
668 writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE);
671 writel(ce, qm->io_base + SEC_RAS_CE_REG);
672 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
673 writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
676 sec_master_ooo_ctrl(qm, true);
679 writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK);
682 static void sec_hw_error_disable(struct hisi_qm *qm)
685 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
688 sec_master_ooo_ctrl(qm, false);
691 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
692 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
693 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
696 static u32 sec_clear_enable_read(struct hisi_qm *qm)
698 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
702 static int sec_clear_enable_write(struct hisi_qm *qm, u32 val)
709 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
711 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
721 struct hisi_qm *qm = file->qm;
725 ret = hisi_qm_get_dfx_access(qm);
733 val = sec_clear_enable_read(qm);
741 hisi_qm_put_dfx_access(qm);
747 hisi_qm_put_dfx_access(qm);
756 struct hisi_qm *qm = file->qm;
775 ret = hisi_qm_get_dfx_access(qm);
783 ret = sec_clear_enable_write(qm, val);
796 hisi_qm_put_dfx_access(qm);
836 static int sec_core_debug_init(struct hisi_qm *qm)
838 struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs;
839 struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
840 struct device *dev = &qm->pdev->dev;
846 tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
854 regset->base = qm->io_base;
857 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF)
859 if (qm->fun_type == QM_HW_PF && sec_regs)
861 qm, &sec_diff_regs_fops);
873 static int sec_debug_init(struct hisi_qm *qm)
875 struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
878 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) {
882 sec->debug.files[i].qm = qm;
885 qm->debug.debug_root,
891 return sec_core_debug_init(qm);
894 static int sec_debugfs_init(struct hisi_qm *qm)
896 struct device *dev = &qm->pdev->dev;
899 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
901 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
902 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
904 ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs));
910 hisi_qm_debug_init(qm);
912 ret = sec_debug_init(qm);
919 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
925 static void sec_debugfs_exit(struct hisi_qm *qm)
927 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
929 debugfs_remove_recursive(qm->debug.debug_root);
932 static int sec_show_last_regs_init(struct hisi_qm *qm)
934 struct qm_debug *debug = &qm->debug;
943 debug->last_words[i] = readl_relaxed(qm->io_base +
949 static void sec_show_last_regs_uninit(struct hisi_qm *qm)
951 struct qm_debug *debug = &qm->debug;
953 if (qm->fun_type == QM_HW_VF || !debug->last_words)
960 static void sec_show_last_dfx_regs(struct hisi_qm *qm)
962 struct qm_debug *debug = &qm->debug;
963 struct pci_dev *pdev = qm->pdev;
967 if (qm->fun_type == QM_HW_VF || !debug->last_words)
972 val = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset);
979 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
982 struct device *dev = &qm->pdev->dev;
991 err_val = readl(qm->io_base +
1002 static u32 sec_get_hw_err_status(struct hisi_qm *qm)
1004 return readl(qm->io_base + SEC_CORE_INT_STATUS);
1007 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
1011 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
1012 nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
1013 writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
1016 static void sec_open_axi_master_ooo(struct hisi_qm *qm)
1020 val = readl(qm->io_base + SEC_CONTROL_REG);
1021 writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);
1022 writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
1025 static void sec_err_info_init(struct hisi_qm *qm)
1027 struct hisi_qm_err_info *err_info = &qm->err_info;
1030 err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
1031 err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
1033 err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
1034 SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1035 err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
1036 SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1037 err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
1038 SEC_QM_RESET_MASK_CAP, qm->cap_ver);
1039 err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
1040 SEC_RESET_MASK_CAP, qm->cap_ver);
1061 struct hisi_qm *qm = &sec->qm;
1064 qm->err_ini = &sec_err_ini;
1065 qm->err_ini->err_info_init(qm);
1067 ret = sec_set_user_domain_and_cache(qm);
1071 sec_open_sva_prefetch(qm);
1072 hisi_qm_dev_err_init(qm);
1073 sec_debug_regs_clear(qm);
1074 ret = sec_show_last_regs_init(qm);
1076 pci_err(qm->pdev, "Failed to init last word regs!\n");
1081 static int sec_pre_store_cap_reg(struct hisi_qm *qm)
1084 struct pci_dev *pdev = qm->pdev;
1094 sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info,
1095 sec_pre_store_caps[i], qm->cap_ver);
1098 qm->cap_tables.dev_cap_table = sec_cap;
1103 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
1108 qm->pdev = pdev;
1109 qm->ver = pdev->revision;
1110 qm->mode = uacce_mode;
1111 qm->sqe_size = SEC_SQE_SIZE;
1112 qm->dev_name = sec_name;
1114 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) ?
1116 if (qm->fun_type == QM_HW_PF) {
1117 qm->qp_base = SEC_PF_DEF_Q_BASE;
1118 qm->qp_num = pf_q_num;
1119 qm->debug.curr_qm_qp_num = pf_q_num;
1120 qm->qm_list = &sec_devices;
1122 set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
1123 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
1125 * have no way to get qm configure in VM in v1 hardware,
1130 qm->qp_base = SEC_PF_DEF_Q_NUM;
1131 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
1134 ret = hisi_qm_init(qm);
1136 pci_err(qm->pdev, "Failed to init sec qm configures!\n");
1141 ret = sec_pre_store_cap_reg(qm);
1143 pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
1144 hisi_qm_uninit(qm);
1148 alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX);
1149 ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs));
1151 pci_err(qm->pdev, "Failed to set sec algs!\n");
1152 hisi_qm_uninit(qm);
1158 static void sec_qm_uninit(struct hisi_qm *qm)
1160 hisi_qm_uninit(qm);
1166 struct hisi_qm *qm = &sec->qm;
1169 if (qm->fun_type == QM_HW_PF) {
1174 if (qm->ver >= QM_HW_V3) {
1176 qm->type_rate = type_rate;
1183 static void sec_probe_uninit(struct hisi_qm *qm)
1185 hisi_qm_dev_err_uninit(qm);
1191 struct device *dev = &sec->qm.pdev->dev;
1208 struct hisi_qm *qm;
1215 qm = &sec->qm;
1216 ret = sec_qm_init(qm, pdev);
1231 ret = hisi_qm_start(qm);
1233 pci_err(pdev, "Failed to start sec qm!\n");
1237 ret = sec_debugfs_init(qm);
1241 if (qm->qp_num >= ctx_q_num) {
1242 ret = hisi_qm_alg_register(qm, &sec_devices);
1248 pci_warn(qm->pdev,
1252 if (qm->uacce) {
1253 ret = uacce_register(qm->uacce);
1260 if (qm->fun_type == QM_HW_PF && vfs_num) {
1266 hisi_qm_pm_init(qm);
1271 if (qm->qp_num >= ctx_q_num)
1272 hisi_qm_alg_unregister(qm, &sec_devices);
1274 sec_debugfs_exit(qm);
1275 hisi_qm_stop(qm, QM_NORMAL);
1277 sec_show_last_regs_uninit(qm);
1278 sec_probe_uninit(qm);
1280 sec_qm_uninit(qm);
1286 struct hisi_qm *qm = pci_get_drvdata(pdev);
1288 hisi_qm_pm_uninit(qm);
1289 hisi_qm_wait_task_finish(qm, &sec_devices);
1290 if (qm->qp_num >= ctx_q_num)
1291 hisi_qm_alg_unregister(qm, &sec_devices);
1293 if (qm->fun_type == QM_HW_PF && qm->vfs_num)
1296 sec_debugfs_exit(qm);
1298 (void)hisi_qm_stop(qm, QM_NORMAL);
1300 if (qm->fun_type == QM_HW_PF)
1301 sec_debug_regs_clear(qm);
1302 sec_show_last_regs_uninit(qm);
1304 sec_probe_uninit(qm);
1306 sec_qm_uninit(qm);