Lines Matching refs:qm
360 bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
364 cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
373 struct hisi_qm *qm = s->private;
375 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
460 static void hpre_config_pasid(struct hisi_qm *qm)
464 if (qm->ver >= QM_HW_V3)
467 val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
468 val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
469 if (qm->use_sva) {
476 writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);
477 writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);
480 static int hpre_cfg_by_dsm(struct hisi_qm *qm)
482 struct device *dev = &qm->pdev->dev;
504 static int hpre_set_cluster(struct hisi_qm *qm)
506 struct device *dev = &qm->pdev->dev;
513 cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
514 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
520 qm->io_base + offset + HPRE_CORE_ENB);
521 writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG);
522 ret = readl_relaxed_poll_timeout(qm->io_base + offset +
543 static void disable_flr_of_bme(struct hisi_qm *qm)
547 val = readl(qm->io_base + QM_PEH_AXUSER_CFG);
550 writel(val, qm->io_base + QM_PEH_AXUSER_CFG);
551 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
554 static void hpre_open_sva_prefetch(struct hisi_qm *qm)
559 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
563 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
565 writel(val, qm->io_base + HPRE_PREFETCH_CFG);
567 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
572 pci_err(qm->pdev, "failed to open sva prefetch\n");
575 static void hpre_close_sva_prefetch(struct hisi_qm *qm)
580 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
583 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
585 writel(val, qm->io_base + HPRE_PREFETCH_CFG);
587 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
592 pci_err(qm->pdev, "failed to close sva prefetch\n");
595 static void hpre_enable_clock_gate(struct hisi_qm *qm)
599 if (qm->ver < QM_HW_V3)
602 val = readl(qm->io_base + HPRE_CLKGATE_CTL);
604 writel(val, qm->io_base + HPRE_CLKGATE_CTL);
606 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
608 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
610 val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
612 writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
614 val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
616 writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
619 static void hpre_disable_clock_gate(struct hisi_qm *qm)
623 if (qm->ver < QM_HW_V3)
626 val = readl(qm->io_base + HPRE_CLKGATE_CTL);
628 writel(val, qm->io_base + HPRE_CLKGATE_CTL);
630 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
632 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
634 val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
636 writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
638 val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
640 writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
643 static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
645 struct device *dev = &qm->pdev->dev;
650 hpre_disable_clock_gate(qm);
652 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
653 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
654 writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
657 val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK);
659 writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK);
661 if (qm->ver >= QM_HW_V3)
663 qm->io_base + HPRE_TYPES_ENB);
665 writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB);
667 writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
668 writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
669 writel(0x0, qm->io_base + HPRE_INT_MASK);
670 writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
671 writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE);
672 writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
674 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
675 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG);
676 writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG);
677 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val,
686 ret = hpre_set_cluster(qm);
691 if (qm->ver == QM_HW_V2) {
692 ret = hpre_cfg_by_dsm(qm);
696 disable_flr_of_bme(qm);
700 hpre_config_pasid(qm);
702 hpre_enable_clock_gate(qm);
707 static void hpre_cnt_regs_clear(struct hisi_qm *qm)
714 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
717 writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
721 writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
723 hisi_qm_debug_regs_clear(qm);
726 static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
730 val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
733 val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
734 HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
740 if (qm->ver > QM_HW_V2)
741 writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
743 writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
746 static void hpre_hw_error_disable(struct hisi_qm *qm)
750 ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
751 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
754 writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
756 hpre_master_ooo_ctrl(qm, false);
759 static void hpre_hw_error_enable(struct hisi_qm *qm)
763 ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
764 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
767 writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
770 writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
771 writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
772 writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
775 hpre_master_ooo_ctrl(qm, true);
778 writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
785 return &hpre->qm;
790 struct hisi_qm *qm = hpre_file_to_qm(file);
792 return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
798 struct hisi_qm *qm = hpre_file_to_qm(file);
804 tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
806 writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
813 struct hisi_qm *qm = hpre_file_to_qm(file);
818 return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
823 struct hisi_qm *qm = hpre_file_to_qm(file);
828 writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
835 struct hisi_qm *qm = hpre_file_to_qm(file);
840 ret = hisi_qm_get_dfx_access(qm);
857 hisi_qm_put_dfx_access(qm);
863 hisi_qm_put_dfx_access(qm);
871 struct hisi_qm *qm = hpre_file_to_qm(file);
891 ret = hisi_qm_get_dfx_access(qm);
914 hisi_qm_put_dfx_access(qm);
954 static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
957 struct hpre *hpre = container_of(qm, struct hpre, qm);
964 file_dir = qm->debug.debug_root;
979 static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
981 struct device *dev = &qm->pdev->dev;
990 regset->base = qm->io_base;
993 debugfs_create_file("regs", 0444, qm->debug.debug_root,
999 static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
1001 struct device *dev = &qm->pdev->dev;
1008 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
1013 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
1021 regset->base = qm->io_base + hpre_cluster_offsets[i];
1026 ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
1035 static int hpre_ctrl_debug_init(struct hisi_qm *qm)
1039 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
1044 ret = hpre_pf_comm_regs_debugfs_init(qm);
1048 return hpre_cluster_debugfs_init(qm);
1051 static void hpre_dfx_debug_init(struct hisi_qm *qm)
1053 struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;
1054 struct hpre *hpre = container_of(qm, struct hpre, qm);
1059 parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
1066 if (qm->fun_type == QM_HW_PF && hpre_regs)
1068 qm, &hpre_diff_regs_fops);
1071 static int hpre_debugfs_init(struct hisi_qm *qm)
1073 struct device *dev = &qm->pdev->dev;
1076 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
1079 qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
1080 qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
1081 ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));
1087 hisi_qm_debug_init(qm);
1089 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) {
1090 ret = hpre_ctrl_debug_init(qm);
1095 hpre_dfx_debug_init(qm);
1100 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
1102 debugfs_remove_recursive(qm->debug.debug_root);
1106 static void hpre_debugfs_exit(struct hisi_qm *qm)
1108 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
1110 debugfs_remove_recursive(qm->debug.debug_root);
1113 static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
1116 struct device *dev = &qm->pdev->dev;
1126 hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
1127 hpre_pre_store_caps[i], qm->cap_ver);
1136 qm->cap_tables.dev_cap_table = hpre_cap;
1141 static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
1151 qm->mode = uacce_mode;
1152 qm->pdev = pdev;
1153 qm->ver = pdev->revision;
1154 qm->sqe_size = HPRE_SQE_SIZE;
1155 qm->dev_name = hpre_name;
1157 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ?
1159 if (qm->fun_type == QM_HW_PF) {
1160 qm->qp_base = HPRE_PF_DEF_Q_BASE;
1161 qm->qp_num = pf_q_num;
1162 qm->debug.curr_qm_qp_num = pf_q_num;
1163 qm->qm_list = &hpre_devices;
1165 set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
1168 ret = hisi_qm_init(qm);
1170 pci_err(pdev, "Failed to init hpre qm configures!\n");
1175 ret = hpre_pre_store_cap_reg(qm);
1178 hisi_qm_uninit(qm);
1182 alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
1183 ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
1186 hisi_qm_uninit(qm);
1192 static int hpre_show_last_regs_init(struct hisi_qm *qm)
1196 struct qm_debug *debug = &qm->debug;
1201 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
1208 debug->last_words[i] = readl_relaxed(qm->io_base +
1212 io_base = qm->io_base + hpre_cluster_offsets[i];
1223 static void hpre_show_last_regs_uninit(struct hisi_qm *qm)
1225 struct qm_debug *debug = &qm->debug;
1227 if (qm->fun_type == QM_HW_VF || !debug->last_words)
1234 static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
1238 struct qm_debug *debug = &qm->debug;
1239 struct pci_dev *pdev = qm->pdev;
1245 if (qm->fun_type == QM_HW_VF || !debug->last_words)
1250 val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset);
1256 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
1258 io_base = qm->io_base + hpre_cluster_offsets[i];
1270 static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
1273 struct device *dev = &qm->pdev->dev;
1283 static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
1285 return readl(qm->io_base + HPRE_INT_STATUS);
1288 static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
1292 writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
1293 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
1294 writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
1297 static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
1301 value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1303 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1305 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1308 static void hpre_err_info_init(struct hisi_qm *qm)
1310 struct hisi_qm_err_info *err_info = &qm->err_info;
1313 err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
1314 err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
1316 err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1317 HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1318 err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1319 HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1320 err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1321 HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
1322 err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1323 HPRE_RESET_MASK_CAP, qm->cap_ver);
1344 struct hisi_qm *qm = &hpre->qm;
1347 ret = hpre_set_user_domain_and_cache(qm);
1351 hpre_open_sva_prefetch(qm);
1353 qm->err_ini = &hpre_err_ini;
1354 qm->err_ini->err_info_init(qm);
1355 hisi_qm_dev_err_init(qm);
1356 ret = hpre_show_last_regs_init(qm);
1358 pci_err(qm->pdev, "Failed to init last word regs!\n");
1366 struct hisi_qm *qm = &hpre->qm;
1369 if (qm->fun_type == QM_HW_PF) {
1374 if (qm->ver >= QM_HW_V3) {
1376 qm->type_rate = type_rate;
1385 struct hisi_qm *qm;
1393 qm = &hpre->qm;
1394 ret = hpre_qm_init(qm, pdev);
1406 ret = hisi_qm_start(qm);
1410 ret = hpre_debugfs_init(qm);
1414 ret = hisi_qm_alg_register(qm, &hpre_devices);
1420 if (qm->uacce) {
1421 ret = uacce_register(qm->uacce);
1428 if (qm->fun_type == QM_HW_PF && vfs_num) {
1434 hisi_qm_pm_init(qm);
1439 hisi_qm_alg_unregister(qm, &hpre_devices);
1442 hpre_debugfs_exit(qm);
1443 hisi_qm_stop(qm, QM_NORMAL);
1446 hpre_show_last_regs_uninit(qm);
1447 hisi_qm_dev_err_uninit(qm);
1450 hisi_qm_uninit(qm);
1457 struct hisi_qm *qm = pci_get_drvdata(pdev);
1459 hisi_qm_pm_uninit(qm);
1460 hisi_qm_wait_task_finish(qm, &hpre_devices);
1461 hisi_qm_alg_unregister(qm, &hpre_devices);
1462 if (qm->fun_type == QM_HW_PF && qm->vfs_num)
1465 hpre_debugfs_exit(qm);
1466 hisi_qm_stop(qm, QM_NORMAL);
1468 if (qm->fun_type == QM_HW_PF) {
1469 hpre_cnt_regs_clear(qm);
1470 qm->debug.curr_qm_qp_num = 0;
1471 hpre_show_last_regs_uninit(qm);
1472 hisi_qm_dev_err_uninit(qm);
1475 hisi_qm_uninit(qm);