Lines Matching refs:qm

87 #define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
244 static u8 sec_get_endian(struct hisi_qm *qm)
252 if (qm->pdev->is_virtfn) {
253 dev_err_ratelimited(&qm->pdev->dev,
257 reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
273 static int sec_engine_init(struct hisi_qm *qm)
279 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
281 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
283 writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
285 ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
289 pci_err(qm->pdev, "fail to init sec mem\n");
293 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
295 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
297 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
299 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
301 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
303 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
306 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
308 writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
312 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0));
315 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
317 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3));
320 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
321 reg |= sec_get_endian(qm);
322 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
327 static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
329 /* qm user domain */
330 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
331 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
332 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
333 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
334 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
336 /* qm cache */
337 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
338 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
341 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
342 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
347 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
349 return sec_engine_init(qm);
353 static void sec_debug_regs_clear(struct hisi_qm *qm)
358 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
359 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
362 writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
364 readl(qm->io_base + sec_dfx_regs[i].offset);
367 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
369 hisi_qm_debug_regs_clear(qm);
372 static void sec_hw_error_enable(struct hisi_qm *qm)
376 if (qm->ver == QM_HW_V1) {
377 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
378 pci_info(qm->pdev, "V1 not support hw error handle\n");
382 val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
385 writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
388 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
391 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
392 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
393 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
398 writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
401 static void sec_hw_error_disable(struct hisi_qm *qm)
405 val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
408 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
409 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
410 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
413 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
418 writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
423 struct hisi_qm *qm = file->qm;
425 return readl(qm->io_base + QM_DFX_MB_CNT_VF);
430 struct hisi_qm *qm = file->qm;
434 if (val > qm->vfs_num)
439 qm->debug.curr_qm_qp_num = qm->qp_num;
441 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
443 if (val == qm->vfs_num)
444 qm->debug.curr_qm_qp_num =
445 qm->ctrl_qp_num - qm->qp_num -
446 (qm->vfs_num - 1) * vfq_num;
448 qm->debug.curr_qm_qp_num = vfq_num;
451 writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
452 writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
455 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
456 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
459 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
460 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
467 struct hisi_qm *qm = file->qm;
469 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
475 struct hisi_qm *qm = file->qm;
481 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
483 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
593 static int sec_core_debug_init(struct hisi_qm *qm)
595 struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
596 struct device *dev = &qm->pdev->dev;
602 tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
610 regset->base = qm->io_base;
612 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
625 static int sec_debug_init(struct hisi_qm *qm)
627 struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
630 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
634 sec->debug.files[i].qm = qm;
637 qm->debug.debug_root,
643 return sec_core_debug_init(qm);
646 static int sec_debugfs_init(struct hisi_qm *qm)
648 struct device *dev = &qm->pdev->dev;
651 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
653 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
654 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
655 ret = hisi_qm_debug_init(qm);
659 ret = sec_debug_init(qm);
672 static void sec_debugfs_exit(struct hisi_qm *qm)
674 debugfs_remove_recursive(qm->debug.debug_root);
677 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
680 struct device *dev = &qm->pdev->dev;
689 err_val = readl(qm->io_base +
699 static u32 sec_get_hw_err_status(struct hisi_qm *qm)
701 return readl(qm->io_base + SEC_CORE_INT_STATUS);
704 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
706 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
709 static void sec_open_axi_master_ooo(struct hisi_qm *qm)
713 val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
714 writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
715 writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
739 struct hisi_qm *qm = &sec->qm;
742 if (qm->ver == QM_HW_V1)
743 qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
745 qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
747 qm->err_ini = &sec_err_ini;
749 ret = sec_set_user_domain_and_cache(qm);
753 hisi_qm_dev_err_init(qm);
754 sec_debug_regs_clear(qm);
759 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
763 qm->pdev = pdev;
764 qm->ver = pdev->revision;
765 qm->sqe_size = SEC_SQE_SIZE;
766 qm->dev_name = sec_name;
768 qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
770 if (qm->fun_type == QM_HW_PF) {
771 qm->qp_base = SEC_PF_DEF_Q_BASE;
772 qm->qp_num = pf_q_num;
773 qm->debug.curr_qm_qp_num = pf_q_num;
774 qm->qm_list = &sec_devices;
775 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
777 * have no way to get qm configure in VM in v1 hardware,
782 qm->qp_base = SEC_PF_DEF_Q_NUM;
783 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
792 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
794 pci_name(qm->pdev));
795 if (!qm->wq) {
796 pci_err(qm->pdev, "fail to alloc workqueue\n");
800 ret = hisi_qm_init(qm);
802 destroy_workqueue(qm->wq);
807 static void sec_qm_uninit(struct hisi_qm *qm)
809 hisi_qm_uninit(qm);
814 struct hisi_qm *qm = &sec->qm;
817 if (qm->fun_type == QM_HW_PF) {
826 static void sec_probe_uninit(struct hisi_qm *qm)
828 hisi_qm_dev_err_uninit(qm);
830 destroy_workqueue(qm->wq);
836 struct device *dev = &sec->qm.pdev->dev;
853 struct hisi_qm *qm;
860 qm = &sec->qm;
861 ret = sec_qm_init(qm, pdev);
876 ret = hisi_qm_start(qm);
878 pci_err(pdev, "Failed to start sec qm!\n");
882 ret = sec_debugfs_init(qm);
886 ret = hisi_qm_alg_register(qm, &sec_devices);
892 if (qm->fun_type == QM_HW_PF && vfs_num) {
901 hisi_qm_alg_unregister(qm, &sec_devices);
904 sec_debugfs_exit(qm);
905 hisi_qm_stop(qm, QM_NORMAL);
908 sec_probe_uninit(qm);
911 sec_qm_uninit(qm);
918 struct hisi_qm *qm = pci_get_drvdata(pdev);
920 hisi_qm_wait_task_finish(qm, &sec_devices);
921 hisi_qm_alg_unregister(qm, &sec_devices);
922 if (qm->fun_type == QM_HW_PF && qm->vfs_num)
923 hisi_qm_sriov_disable(pdev, qm->is_frozen);
925 sec_debugfs_exit(qm);
927 (void)hisi_qm_stop(qm, QM_NORMAL);
929 if (qm->fun_type == QM_HW_PF)
930 sec_debug_regs_clear(qm);
932 sec_probe_uninit(qm);
934 sec_qm_uninit(qm);