Lines Matching refs:qm
71 #define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset))
217 static int hpre_cfg_by_dsm(struct hisi_qm *qm)
219 struct device *dev = &qm->pdev->dev;
246 static void disable_flr_of_bme(struct hisi_qm *qm)
250 val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
253 writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
254 writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
257 static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
259 struct device *dev = &qm->pdev->dev;
264 writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
265 writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
266 writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
269 val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
271 writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
273 writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB));
274 writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
275 writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
276 writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
277 writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH));
278 writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
279 writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
280 writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
282 writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG));
283 writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
284 writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
285 ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val,
299 HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
300 writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
301 ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
314 ret = hpre_cfg_by_dsm(qm);
318 disable_flr_of_bme(qm);
323 static void hpre_cnt_regs_clear(struct hisi_qm *qm)
329 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
330 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
335 writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
339 writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
341 hisi_qm_debug_regs_clear(qm);
344 static void hpre_hw_error_disable(struct hisi_qm *qm)
349 writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
352 val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
354 writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
357 static void hpre_hw_error_enable(struct hisi_qm *qm)
362 writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
365 writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
366 writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
367 writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
368 writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
371 val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
373 writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
380 return &hpre->qm;
385 struct hisi_qm *qm = hpre_file_to_qm(file);
387 return readl(qm->io_base + QM_DFX_MB_CNT_VF);
392 struct hisi_qm *qm = hpre_file_to_qm(file);
393 u32 num_vfs = qm->vfs_num;
401 qm->debug.curr_qm_qp_num = qm->qp_num;
403 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
405 qm->debug.curr_qm_qp_num =
406 qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
408 qm->debug.curr_qm_qp_num = vfq_num;
412 writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
413 writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
416 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
417 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
420 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
421 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
428 struct hisi_qm *qm = hpre_file_to_qm(file);
430 return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
436 struct hisi_qm *qm = hpre_file_to_qm(file);
442 tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
444 writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
451 struct hisi_qm *qm = hpre_file_to_qm(file);
456 return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
461 struct hisi_qm *qm = hpre_file_to_qm(file);
466 writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
588 static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
591 struct hpre *hpre = container_of(qm, struct hpre, qm);
598 file_dir = qm->debug.debug_root;
613 static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
615 struct device *dev = &qm->pdev->dev;
624 regset->base = qm->io_base;
626 debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset);
630 static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
632 struct device *dev = &qm->pdev->dev;
642 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
650 regset->base = qm->io_base + hpre_cluster_offsets[i];
653 ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
662 static int hpre_ctrl_debug_init(struct hisi_qm *qm)
666 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM,
671 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
676 ret = hpre_pf_comm_regs_debugfs_init(qm);
680 return hpre_cluster_debugfs_init(qm);
683 static void hpre_dfx_debug_init(struct hisi_qm *qm)
685 struct hpre *hpre = container_of(qm, struct hpre, qm);
690 parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
698 static int hpre_debugfs_init(struct hisi_qm *qm)
700 struct device *dev = &qm->pdev->dev;
703 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
706 qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
707 qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
708 ret = hisi_qm_debug_init(qm);
712 if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
713 ret = hpre_ctrl_debug_init(qm);
718 hpre_dfx_debug_init(qm);
723 debugfs_remove_recursive(qm->debug.debug_root);
727 static void hpre_debugfs_exit(struct hisi_qm *qm)
729 debugfs_remove_recursive(qm->debug.debug_root);
732 static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
739 qm->pdev = pdev;
740 qm->ver = pdev->revision;
741 qm->sqe_size = HPRE_SQE_SIZE;
742 qm->dev_name = hpre_name;
744 qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
746 if (qm->fun_type == QM_HW_PF) {
747 qm->qp_base = HPRE_PF_DEF_Q_BASE;
748 qm->qp_num = pf_q_num;
749 qm->debug.curr_qm_qp_num = pf_q_num;
750 qm->qm_list = &hpre_devices;
753 return hisi_qm_init(qm);
756 static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
759 struct device *dev = &qm->pdev->dev;
769 static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
771 return readl(qm->io_base + HPRE_HAC_INT_STATUS);
774 static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
776 writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
779 static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
783 value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
785 HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
787 HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
811 struct hisi_qm *qm = &hpre->qm;
814 qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
816 ret = hpre_set_user_domain_and_cache(qm);
820 qm->err_ini = &hpre_err_ini;
821 hisi_qm_dev_err_init(qm);
828 struct hisi_qm *qm = &hpre->qm;
831 if (qm->fun_type == QM_HW_PF) {
842 struct hisi_qm *qm;
850 qm = &hpre->qm;
851 ret = hpre_qm_init(qm, pdev);
863 ret = hisi_qm_start(qm);
867 ret = hpre_debugfs_init(qm);
871 ret = hisi_qm_alg_register(qm, &hpre_devices);
877 if (qm->fun_type == QM_HW_PF && vfs_num) {
886 hisi_qm_alg_unregister(qm, &hpre_devices);
889 hpre_debugfs_exit(qm);
890 hisi_qm_stop(qm, QM_NORMAL);
893 hisi_qm_dev_err_uninit(qm);
896 hisi_qm_uninit(qm);
903 struct hisi_qm *qm = pci_get_drvdata(pdev);
906 hisi_qm_wait_task_finish(qm, &hpre_devices);
907 hisi_qm_alg_unregister(qm, &hpre_devices);
908 if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
909 ret = hisi_qm_sriov_disable(pdev, qm->is_frozen);
915 if (qm->fun_type == QM_HW_PF) {
916 hpre_cnt_regs_clear(qm);
917 qm->debug.curr_qm_qp_num = 0;
920 hpre_debugfs_exit(qm);
921 hisi_qm_stop(qm, QM_NORMAL);
922 hisi_qm_dev_err_uninit(qm);
923 hisi_qm_uninit(qm);