Lines Matching refs:qm

370 	struct hisi_qm *qm;
386 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
387 void (*qm_db)(struct hisi_qm *qm, u16 qn,
389 int (*debug_init)(struct hisi_qm *qm);
390 void (*hw_error_init)(struct hisi_qm *qm);
391 void (*hw_error_uninit)(struct hisi_qm *qm);
392 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
393 int (*set_msi)(struct hisi_qm *qm, bool set);
457 static void qm_irqs_unregister(struct hisi_qm *qm);
459 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
461 enum qm_state curr = atomic_read(&qm->status.flags);
481 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
485 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
491 static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
494 enum qm_state qm_curr = atomic_read(&qm->status.flags);
527 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
531 dev_warn(&qm->pdev->dev,
538 static u32 qm_get_hw_error_status(struct hisi_qm *qm)
540 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
543 static u32 qm_get_dev_err_status(struct hisi_qm *qm)
545 return qm->err_ini->get_dev_hw_err_status(qm);
549 static bool qm_check_dev_error(struct hisi_qm *qm)
553 if (qm->fun_type == QM_HW_VF)
556 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
557 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
562 static int qm_wait_reset_finish(struct hisi_qm *qm)
567 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
576 static int qm_reset_prepare_ready(struct hisi_qm *qm)
578 struct pci_dev *pdev = qm->pdev;
585 if (qm->ver < QM_HW_V3)
588 return qm_wait_reset_finish(qm);
591 static void qm_reset_bit_clear(struct hisi_qm *qm)
593 struct pci_dev *pdev = qm->pdev;
596 if (qm->ver < QM_HW_V3)
599 clear_bit(QM_RESETTING, &qm->misc_ctl);
615 int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
619 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
626 static void qm_mb_write(struct hisi_qm *qm, const void *src)
628 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
652 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
657 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
658 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
663 qm_mb_write(qm, mailbox);
665 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
666 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
671 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
673 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
681 atomic64_inc(&qm->debug.dfx.mb_err_cnt);
685 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
691 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
696 mutex_lock(&qm->mailbox_lock);
697 ret = qm_mb_nolock(qm, &mailbox);
698 mutex_unlock(&qm->mailbox_lock);
704 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
712 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
715 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
717 void __iomem *io_base = qm->io_base;
722 io_base = qm->db_io_base + (u64)qn * qm->db_interval +
735 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
737 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
740 qm->ops->qm_db(qm, qn, cmd, index, priority);
743 static void qm_disable_clock_gate(struct hisi_qm *qm)
747 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */
748 if (qm->ver < QM_HW_V3)
751 val = readl(qm->io_base + QM_PM_CTRL);
753 writel(val, qm->io_base + QM_PM_CTRL);
756 static int qm_dev_mem_reset(struct hisi_qm *qm)
760 writel(0x1, qm->io_base + QM_MEM_START_INIT);
761 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
768 * @qm: The qm which want to get information.
775 u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
781 switch (qm->ver) {
790 val = readl(qm->io_base + info_table[index].offset);
796 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
801 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
806 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
809 struct device *dev = &qm->pdev->dev;
813 if (!qm->uacce)
833 qm->uacce->algs = algs;
840 static u32 qm_get_irq_num(struct hisi_qm *qm)
842 if (qm->fun_type == QM_HW_PF)
843 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver);
845 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver);
848 static int qm_pm_get_sync(struct hisi_qm *qm)
850 struct device *dev = &qm->pdev->dev;
853 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
865 static void qm_pm_put_sync(struct hisi_qm *qm)
867 struct device *dev = &qm->pdev->dev;
869 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
889 struct hisi_qm *qm = qp->qm;
893 qp->req_cb(qp, qp->sqe + qm->sqe_size *
897 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
905 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
912 struct hisi_qm *qm = poll_data->qm;
918 qp = &qm->qp_array[poll_data->qp_finish_id[i]];
932 static void qm_get_complete_eqe_num(struct hisi_qm *qm)
934 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
936 u16 eq_depth = qm->eq_depth;
939 if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) {
940 atomic64_inc(&qm->debug.dfx.err_irq_cnt);
941 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
946 if (unlikely(cqn >= qm->qp_num))
948 poll_data = &qm->poll_data[cqn];
950 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
955 if (qm->status.eq_head == eq_depth - 1) {
956 qm->status.eqc_phase = !qm->status.eqc_phase;
957 eqe = qm->eqe;
958 qm->status.eq_head = 0;
961 qm->status.eq_head++;
969 queue_work(qm->wq, &poll_data->work);
970 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
975 struct hisi_qm *qm = data;
978 qm_get_complete_eqe_num(qm);
985 struct hisi_qm *qm = data;
988 val = readl(qm->io_base + QM_IFC_INT_STATUS);
993 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) {
994 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n");
998 schedule_work(&qm->cmd_process);
1017 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
1019 struct hisi_qp *qp = &qm->qp_array[qp_id];
1026 static void qm_reset_function(struct hisi_qm *qm)
1028 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
1029 struct device *dev = &qm->pdev->dev;
1035 ret = qm_reset_prepare_ready(qm);
1041 ret = hisi_qm_stop(qm, QM_DOWN);
1043 dev_err(dev, "failed to stop qm when reset function\n");
1047 ret = hisi_qm_start(qm);
1049 dev_err(dev, "failed to start qm when reset function\n");
1052 qm_reset_bit_clear(qm);
1057 struct hisi_qm *qm = data;
1058 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
1059 u16 aeq_depth = qm->aeq_depth;
1062 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
1064 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
1070 dev_err(&qm->pdev->dev, "eq overflow, reset function\n");
1071 qm_reset_function(qm);
1074 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n",
1078 qm_disable_qp(qm, qp_id);
1081 dev_err(&qm->pdev->dev, "unknown error type %u\n",
1086 if (qm->status.aeq_head == aeq_depth - 1) {
1087 qm->status.aeqc_phase = !qm->status.aeqc_phase;
1088 aeqe = qm->aeqe;
1089 qm->status.aeq_head = 0;
1092 qm->status.aeq_head++;
1096 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
1111 static void qm_init_prefetch(struct hisi_qm *qm)
1113 struct device *dev = &qm->pdev->dev;
1116 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
1134 writel(page_type, qm->io_base + QM_PAGE_SIZE);
1204 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
1212 if (qm->ver == QM_HW_V1) {
1225 if (qm->ver == QM_HW_V1) {
1246 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
1247 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
1250 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
1257 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
1258 factor = &qm->factor[fun_num];
1260 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1266 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
1267 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
1271 writel(fun_num, qm->io_base + QM_VFT_CFG);
1273 qm_vft_data_cfg(qm, type, base, number, factor);
1275 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
1276 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
1278 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1283 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
1285 u32 qos = qm->factor[fun_num].func_qos;
1288 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
1290 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
1293 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
1296 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
1305 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
1311 ret = qm_set_vft_common(qm, i, fun_num, base, number);
1317 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
1318 ret = qm_shaper_init_vft(qm, fun_num);
1326 qm_set_vft_common(qm, i, fun_num, 0, 0);
1331 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
1336 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
1340 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1341 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1349 void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1352 struct device *dev = &qm->pdev->dev;
1369 void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1372 struct device *dev = &qm->pdev->dev;
1378 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1380 return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1383 static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1385 return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1388 static void qm_hw_error_init_v1(struct hisi_qm *qm)
1390 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1393 static void qm_hw_error_cfg(struct hisi_qm *qm)
1395 struct hisi_qm_err_info *err_info = &qm->err_info;
1397 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
1399 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1402 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
1403 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1404 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1405 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
1408 static void qm_hw_error_init_v2(struct hisi_qm *qm)
1412 qm_hw_error_cfg(qm);
1414 irq_unmask = ~qm->error_mask;
1415 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1416 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1419 static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1421 u32 irq_mask = qm->error_mask;
1423 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1424 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
1427 static void qm_hw_error_init_v3(struct hisi_qm *qm)
1431 qm_hw_error_cfg(qm);
1434 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1436 irq_unmask = ~qm->error_mask;
1437 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1438 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1441 static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
1443 u32 irq_mask = qm->error_mask;
1445 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1446 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
1449 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1452 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1455 struct device *dev = &qm->pdev->dev;
1468 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
1472 dev_err(dev, "qm %s doorbell timeout in function %u\n",
1475 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
1481 dev_err(dev, "qm %s fifo overflow in function %u\n",
1489 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
1494 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
1495 error_status = qm->error_mask & tmp;
1499 qm->err_status.is_qm_ecc_mbit = true;
1501 qm_log_hw_error(qm, error_status);
1502 if (error_status & qm->err_info.qm_reset_mask)
1505 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1506 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1512 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
1518 mutex_lock(&qm->mailbox_lock);
1519 ret = qm_mb_nolock(qm, &mailbox);
1523 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1524 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1527 mutex_unlock(&qm->mailbox_lock);
1531 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
1535 if (qm->fun_type == QM_HW_PF)
1536 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
1538 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
1540 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
1543 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
1545 struct device *dev = &qm->pdev->dev;
1550 ret = qm_get_mb_cmd(qm, &msg, vf_id);
1573 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
1575 struct device *dev = &qm->pdev->dev;
1576 u32 vfs_num = qm->vfs_num;
1582 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
1586 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
1602 qm_handle_vf_msg(qm, i);
1608 qm_clear_cmd_interrupt(qm, val);
1613 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
1617 val = readl(qm->io_base + QM_IFC_INT_CFG);
1620 writel(val, qm->io_base + QM_IFC_INT_CFG);
1622 val = readl(qm->io_base + QM_IFC_INT_SET_P);
1624 writel(val, qm->io_base + QM_IFC_INT_SET_P);
1627 static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
1631 val = readl(qm->io_base + QM_IFC_INT_SET_V);
1633 writel(val, qm->io_base + QM_IFC_INT_SET_V);
1636 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
1638 struct device *dev = &qm->pdev->dev;
1645 mutex_lock(&qm->mailbox_lock);
1646 ret = qm_mb_nolock(qm, &mailbox);
1652 qm_trigger_vf_interrupt(qm, fun_num);
1655 val = readq(qm->io_base + QM_IFC_READY_STATUS);
1668 mutex_unlock(&qm->mailbox_lock);
1672 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
1674 struct device *dev = &qm->pdev->dev;
1675 u32 vfs_num = qm->vfs_num;
1683 mutex_lock(&qm->mailbox_lock);
1685 ret = qm_mb_nolock(qm, &mailbox);
1688 mutex_unlock(&qm->mailbox_lock);
1692 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
1695 val = readq(qm->io_base + QM_IFC_READY_STATUS);
1698 mutex_unlock(&qm->mailbox_lock);
1706 mutex_unlock(&qm->mailbox_lock);
1717 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
1725 mutex_lock(&qm->mailbox_lock);
1726 ret = qm_mb_nolock(qm, &mailbox);
1728 dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
1732 qm_trigger_pf_interrupt(qm);
1736 val = readl(qm->io_base + QM_IFC_INT_SET_V);
1747 mutex_unlock(&qm->mailbox_lock);
1753 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
1756 static int qm_set_msi(struct hisi_qm *qm, bool set)
1758 struct pci_dev *pdev = qm->pdev;
1766 if (qm->err_status.is_qm_ecc_mbit ||
1767 qm->err_status.is_dev_ecc_mbit)
1771 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
1778 static void qm_wait_msi_finish(struct hisi_qm *qm)
1780 struct pci_dev *pdev = qm->pdev;
1800 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
1806 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
1813 static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
1815 struct pci_dev *pdev = qm->pdev;
1836 qm_wait_msi_finish(qm);
1875 return qp->sqe + sq_tail * qp->qm->sqe_size;
1887 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
1889 struct device *dev = &qm->pdev->dev;
1893 if (!qm_qp_avail_state(qm, NULL, QP_INIT))
1896 if (qm->qp_in_used == qm->qp_num) {
1898 qm->qp_num);
1899 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1903 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
1906 qm->qp_num);
1907 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1911 qp = &qm->qp_array[qp_id];
1920 qm->qp_in_used++;
1927 * hisi_qm_create_qp() - Create a queue pair from qm.
1928 * @qm: The qm we create a qp from.
1933 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
1938 ret = qm_pm_get_sync(qm);
1942 down_write(&qm->qps_lock);
1943 qp = qm_create_qp_nolock(qm, alg_type);
1944 up_write(&qm->qps_lock);
1947 qm_pm_put_sync(qm);
1953 * hisi_qm_release_qp() - Release a qp back to its qm.
1960 struct hisi_qm *qm = qp->qm;
1962 down_write(&qm->qps_lock);
1964 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
1965 up_write(&qm->qps_lock);
1969 qm->qp_in_used--;
1970 idr_remove(&qm->qp_idr, qp->qp_id);
1972 up_write(&qm->qps_lock);
1974 qm_pm_put_sync(qm);
1979 struct hisi_qm *qm = qp->qm;
1980 struct device *dev = &qm->pdev->dev;
1981 enum qm_hw_ver ver = qm->ver;
1992 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
1995 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
2001 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2012 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
2021 struct hisi_qm *qm = qp->qm;
2022 struct device *dev = &qm->pdev->dev;
2023 enum qm_hw_ver ver = qm->ver;
2043 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2053 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
2075 struct hisi_qm *qm = qp->qm;
2076 struct device *dev = &qm->pdev->dev;
2081 if (!qm_qp_avail_state(qm, qp, QP_START))
2104 struct hisi_qm *qm = qp->qm;
2107 down_write(&qm->qps_lock);
2109 up_write(&qm->qps_lock);
2127 struct hisi_qm *qm = qp->qm;
2133 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
2148 struct hisi_qm *qm = qp->qm;
2149 struct device *dev = &qm->pdev->dev;
2157 if (qm_check_dev_error(qm))
2161 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
2168 addr = hisi_qm_ctx_alloc(qm, size, &dma_addr);
2175 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
2182 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
2203 hisi_qm_ctx_free(qm, size, addr, &dma_addr);
2210 struct device *dev = &qp->qm->pdev->dev;
2224 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
2234 flush_workqueue(qp->qm->wq);
2244 * hisi_qm_stop_qp() - Stop a qp in qm.
2253 down_write(&qp->qm->qps_lock);
2255 up_write(&qp->qm->qps_lock);
2267 * if qp related qm is resetting.
2284 atomic_read(&qp->qm->status.flags) == QM_STOP ||
2286 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
2293 memcpy(sqe, msg, qp->qm->sqe_size);
2295 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
2303 static void hisi_qm_cache_wb(struct hisi_qm *qm)
2307 if (qm->ver == QM_HW_V1)
2310 writel(0x1, qm->io_base + QM_CACHE_WB_START);
2311 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2314 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
2322 /* This function returns free number of qp in qm. */
2325 struct hisi_qm *qm = uacce->priv;
2328 down_read(&qm->qps_lock);
2329 ret = qm->qp_num - qm->qp_in_used;
2330 up_read(&qm->qps_lock);
2335 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
2339 for (i = 0; i < qm->qp_num; i++)
2340 qm_set_qp_disable(&qm->qp_array[i], offset);
2347 struct hisi_qm *qm = uacce->priv;
2351 qp = hisi_qm_create_qp(qm, alg_type);
2378 struct hisi_qm *qm = qp->qm;
2379 resource_size_t phys_base = qm->db_phys_base +
2380 qp->qp_id * qm->db_interval;
2382 struct pci_dev *pdev = qm->pdev;
2389 if (qm->ver == QM_HW_V1) {
2392 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
2397 if (sz > qm->db_interval)
2457 struct hisi_qm *qm = q->uacce->priv;
2460 down_write(&qm->qps_lock);
2462 up_write(&qm->qps_lock);
2493 qp_info.sqe_size = qp->qm->sqe_size;
2510 * @qm: the uacce device
2512 static int qm_hw_err_isolate(struct hisi_qm *qm)
2518 isolate = &qm->isolate_data;
2523 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold)
2556 static void qm_hw_err_destroy(struct hisi_qm *qm)
2560 mutex_lock(&qm->isolate_data.isolate_lock);
2561 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) {
2565 mutex_unlock(&qm->isolate_data.isolate_lock);
2570 struct hisi_qm *qm = uacce->priv;
2574 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
2576 pf_qm = qm;
2584 struct hisi_qm *qm = uacce->priv;
2590 if (qm->isolate_data.is_isolate)
2593 qm->isolate_data.err_threshold = num;
2596 qm_hw_err_destroy(qm);
2603 struct hisi_qm *qm = uacce->priv;
2607 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
2611 return qm->isolate_data.err_threshold;
2628 static void qm_remove_uacce(struct hisi_qm *qm)
2630 struct uacce_device *uacce = qm->uacce;
2632 if (qm->use_sva) {
2633 qm_hw_err_destroy(qm);
2635 qm->uacce = NULL;
2639 static int qm_alloc_uacce(struct hisi_qm *qm)
2641 struct pci_dev *pdev = qm->pdev;
2662 qm->use_sva = true;
2665 qm_remove_uacce(qm);
2670 uacce->priv = qm;
2672 if (qm->ver == QM_HW_V1)
2674 else if (qm->ver == QM_HW_V2)
2679 if (qm->ver == QM_HW_V1)
2681 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
2685 mmio_page_nr = qm->db_interval / PAGE_SIZE;
2687 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
2690 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
2697 qm->uacce = uacce;
2698 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
2699 mutex_init(&qm->isolate_data.isolate_lock);
2707 * @qm: The qm needed to be fronzen.
2711 static int qm_frozen(struct hisi_qm *qm)
2713 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
2716 down_write(&qm->qps_lock);
2718 if (!qm->qp_in_used) {
2719 qm->qp_in_used = qm->qp_num;
2720 up_write(&qm->qps_lock);
2721 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
2725 up_write(&qm->qps_lock);
2733 struct hisi_qm *qm, *vf_qm;
2742 list_for_each_entry(qm, &qm_list->list, list) {
2743 dev = qm->pdev;
2763 * @qm: The qm needed to wait for the task to finish.
2766 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
2768 while (qm_frozen(qm) ||
2769 ((qm->fun_type == QM_HW_PF) &&
2770 qm_try_frozen_vfs(qm->pdev, qm_list))) {
2774 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
2775 test_bit(QM_RESETTING, &qm->misc_ctl))
2778 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2779 flush_work(&qm->cmd_process);
2785 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
2787 struct device *dev = &qm->pdev->dev;
2792 qdma = &qm->qp_array[i].qdma;
2794 kfree(qm->poll_data[i].qp_finish_id);
2797 kfree(qm->poll_data);
2798 kfree(qm->qp_array);
2801 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
2804 struct device *dev = &qm->pdev->dev;
2805 size_t off = qm->sqe_size * sq_depth;
2809 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16),
2811 if (!qm->poll_data[id].qp_finish_id)
2814 qp = &qm->qp_array[id];
2827 qp->qm = qm;
2833 kfree(qm->poll_data[id].qp_finish_id);
2837 static void hisi_qm_pre_init(struct hisi_qm *qm)
2839 struct pci_dev *pdev = qm->pdev;
2841 if (qm->ver == QM_HW_V1)
2842 qm->ops = &qm_hw_ops_v1;
2843 else if (qm->ver == QM_HW_V2)
2844 qm->ops = &qm_hw_ops_v2;
2846 qm->ops = &qm_hw_ops_v3;
2848 pci_set_drvdata(pdev, qm);
2849 mutex_init(&qm->mailbox_lock);
2850 init_rwsem(&qm->qps_lock);
2851 qm->qp_in_used = 0;
2852 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
2858 static void qm_cmd_uninit(struct hisi_qm *qm)
2862 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2865 val = readl(qm->io_base + QM_IFC_INT_MASK);
2867 writel(val, qm->io_base + QM_IFC_INT_MASK);
2870 static void qm_cmd_init(struct hisi_qm *qm)
2874 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2878 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
2881 val = readl(qm->io_base + QM_IFC_INT_MASK);
2883 writel(val, qm->io_base + QM_IFC_INT_MASK);
2886 static void qm_put_pci_res(struct hisi_qm *qm)
2888 struct pci_dev *pdev = qm->pdev;
2890 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
2891 iounmap(qm->db_io_base);
2893 iounmap(qm->io_base);
2897 static void hisi_qm_pci_uninit(struct hisi_qm *qm)
2899 struct pci_dev *pdev = qm->pdev;
2902 qm_put_pci_res(qm);
2906 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
2908 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF)
2909 writel(state, qm->io_base + QM_VF_STATE);
2912 static void hisi_qm_unint_work(struct hisi_qm *qm)
2914 destroy_workqueue(qm->wq);
2917 static void hisi_qm_memory_uninit(struct hisi_qm *qm)
2919 struct device *dev = &qm->pdev->dev;
2921 hisi_qp_memory_uninit(qm, qm->qp_num);
2922 if (qm->qdma.va) {
2923 hisi_qm_cache_wb(qm);
2924 dma_free_coherent(dev, qm->qdma.size,
2925 qm->qdma.va, qm->qdma.dma);
2928 idr_destroy(&qm->qp_idr);
2930 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
2931 kfree(qm->factor);
2935 * hisi_qm_uninit() - Uninitialize qm.
2936 * @qm: The qm needed uninit.
2938 * This function uninits qm related device resources.
2940 void hisi_qm_uninit(struct hisi_qm *qm)
2942 qm_cmd_uninit(qm);
2943 hisi_qm_unint_work(qm);
2944 down_write(&qm->qps_lock);
2946 if (!qm_avail_state(qm, QM_CLOSE)) {
2947 up_write(&qm->qps_lock);
2951 hisi_qm_memory_uninit(qm);
2952 hisi_qm_set_state(qm, QM_NOT_READY);
2953 up_write(&qm->qps_lock);
2955 qm_irqs_unregister(qm);
2956 hisi_qm_pci_uninit(qm);
2957 if (qm->use_sva) {
2958 uacce_remove(qm->uacce);
2959 qm->uacce = NULL;
2965 * hisi_qm_get_vft() - Get vft from a qm.
2966 * @qm: The qm we want to get its vft.
2970 * We can allocate multiple queues to a qm by configuring virtual function
2974 * qm hw v1 does not support this interface.
2976 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
2981 if (!qm->ops->get_vft) {
2982 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
2986 return qm->ops->get_vft(qm, base, number);
2990 * hisi_qm_set_vft() - Set vft to a qm.
2991 * @qm: The qm we want to set its vft.
2999 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
3000 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3003 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
3006 u32 max_q_num = qm->ctrl_qp_num;
3012 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
3015 static void qm_init_eq_aeq_status(struct hisi_qm *qm)
3017 struct hisi_qm_status *status = &qm->status;
3025 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm)
3028 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
3029 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
3031 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
3032 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
3035 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
3037 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
3038 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
3041 static int qm_eq_ctx_cfg(struct hisi_qm *qm)
3043 struct device *dev = &qm->pdev->dev;
3052 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
3053 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
3054 if (qm->ver == QM_HW_V1)
3056 eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
3065 ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
3072 static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
3074 struct device *dev = &qm->pdev->dev;
3083 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
3084 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
3085 aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
3094 ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
3101 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
3103 struct device *dev = &qm->pdev->dev;
3106 qm_init_eq_aeq_status(qm);
3108 ret = qm_eq_ctx_cfg(qm);
3114 return qm_aeq_ctx_cfg(qm);
3117 static int __hisi_qm_start(struct hisi_qm *qm)
3121 WARN_ON(!qm->qdma.va);
3123 if (qm->fun_type == QM_HW_PF) {
3124 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
3129 ret = qm_eq_aeq_ctx_cfg(qm);
3133 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
3137 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
3141 qm_init_prefetch(qm);
3142 qm_enable_eq_aeq_interrupts(qm);
3148 * hisi_qm_start() - start qm
3149 * @qm: The qm to be started.
3151 * This function starts a qm, then we can allocate qp from this qm.
3153 int hisi_qm_start(struct hisi_qm *qm)
3155 struct device *dev = &qm->pdev->dev;
3158 down_write(&qm->qps_lock);
3160 if (!qm_avail_state(qm, QM_START)) {
3161 up_write(&qm->qps_lock);
3165 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
3167 if (!qm->qp_num) {
3173 ret = __hisi_qm_start(qm);
3175 atomic_set(&qm->status.flags, QM_START);
3177 hisi_qm_set_state(qm, QM_READY);
3179 up_write(&qm->qps_lock);
3184 static int qm_restart(struct hisi_qm *qm)
3186 struct device *dev = &qm->pdev->dev;
3190 ret = hisi_qm_start(qm);
3194 down_write(&qm->qps_lock);
3195 for (i = 0; i < qm->qp_num; i++) {
3196 qp = &qm->qp_array[i];
3203 up_write(&qm->qps_lock);
3209 up_write(&qm->qps_lock);
3215 static int qm_stop_started_qp(struct hisi_qm *qm)
3217 struct device *dev = &qm->pdev->dev;
3221 for (i = 0; i < qm->qp_num; i++) {
3222 qp = &qm->qp_array[i];
3237 * qm_clear_queues() - Clear all queues memory in a qm.
3238 * @qm: The qm in which the queues will be cleared.
3240 * This function clears all queues memory in a qm. Reset of accelerator can
3243 static void qm_clear_queues(struct hisi_qm *qm)
3248 for (i = 0; i < qm->qp_num; i++) {
3249 qp = &qm->qp_array[i];
3254 memset(qm->qdma.va, 0, qm->qdma.size);
3258 * hisi_qm_stop() - Stop a qm.
3259 * @qm: The qm which will be stopped.
3260 * @r: The reason to stop qm.
3262 * This function stops qm and its qps, then qm can not accept request.
3264 * to let qm start again.
3266 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
3268 struct device *dev = &qm->pdev->dev;
3271 down_write(&qm->qps_lock);
3273 qm->status.stop_reason = r;
3274 if (!qm_avail_state(qm, QM_STOP)) {
3279 if (qm->status.stop_reason == QM_SOFT_RESET ||
3280 qm->status.stop_reason == QM_DOWN) {
3281 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
3282 ret = qm_stop_started_qp(qm);
3287 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
3290 qm_disable_eq_aeq_interrupts(qm);
3291 if (qm->fun_type == QM_HW_PF) {
3292 ret = hisi_qm_set_vft(qm, 0, 0, 0);
3300 qm_clear_queues(qm);
3301 atomic_set(&qm->status.flags, QM_STOP);
3304 up_write(&qm->qps_lock);
3309 static void qm_hw_error_init(struct hisi_qm *qm)
3311 if (!qm->ops->hw_error_init) {
3312 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
3316 qm->ops->hw_error_init(qm);
3319 static void qm_hw_error_uninit(struct hisi_qm *qm)
3321 if (!qm->ops->hw_error_uninit) {
3322 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
3326 qm->ops->hw_error_uninit(qm);
3329 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
3331 if (!qm->ops->hw_error_handle) {
3332 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
3336 return qm->ops->hw_error_handle(qm);
3341 * @qm: The qm for which we want to do error initialization.
3345 void hisi_qm_dev_err_init(struct hisi_qm *qm)
3347 if (qm->fun_type == QM_HW_VF)
3350 qm_hw_error_init(qm);
3352 if (!qm->err_ini->hw_err_enable) {
3353 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
3356 qm->err_ini->hw_err_enable(qm);
3362 * @qm: The qm for which we want to do error uninitialization.
3366 void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
3368 if (qm->fun_type == QM_HW_VF)
3371 qm_hw_error_uninit(qm);
3373 if (!qm->err_ini->hw_err_disable) {
3374 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
3377 qm->err_ini->hw_err_disable(qm);
3412 struct hisi_qm *qm;
3417 list_for_each_entry(qm, &qm_list->list, list) {
3418 dev = &qm->pdev->dev;
3428 res->qm = qm;
3474 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
3498 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3501 u32 max_qp_num = qm->max_qp_num;
3502 u32 q_base = qm->qp_num;
3508 vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
3533 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
3536 hisi_qm_set_vft(qm, j, 0, 0);
3545 static int qm_clear_vft_config(struct hisi_qm *qm)
3550 for (i = 1; i <= qm->vfs_num; i++) {
3551 ret = hisi_qm_set_vft(qm, i, 0, 0);
3555 qm->vfs_num = 0;
3560 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
3562 struct device *dev = &qm->pdev->dev;
3566 total_vfs = pci_sriov_get_totalvfs(qm->pdev);
3570 qm->factor[fun_index].func_qos = qos;
3572 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
3580 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
3590 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
3598 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3604 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
3605 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
3606 writel(fun_index, qm->io_base + QM_VFT_CFG);
3608 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
3609 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
3611 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3617 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
3618 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
3629 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
3633 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
3640 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
3642 struct device *dev = &qm->pdev->dev;
3647 qos = qm_get_shaper_vft_qos(qm, fun_num);
3654 ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
3659 static int qm_vf_read_qos(struct hisi_qm *qm)
3665 qm->mb_qos = 0;
3668 ret = qm_ping_pf(qm, QM_VF_GET_QOS);
3670 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
3676 if (qm->mb_qos)
3680 pci_err(qm->pdev, "PF ping VF timeout!\n");
3691 struct hisi_qm *qm = filp->private_data;
3696 ret = hisi_qm_get_dfx_access(qm);
3701 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
3702 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
3707 if (qm->fun_type == QM_HW_PF) {
3708 ir = qm_get_shaper_vft_qos(qm, 0);
3710 ret = qm_vf_read_qos(qm);
3713 ir = qm->mb_qos;
3722 clear_bit(QM_RESETTING, &qm->misc_ctl);
3724 hisi_qm_put_dfx_access(qm);
3728 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
3732 const struct bus_type *bus_type = qm->pdev->dev.bus;
3745 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
3751 pci_err(qm->pdev, "input pci bdf number is error!\n");
3765 struct hisi_qm *qm = filp->private_data;
3782 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index);
3787 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
3788 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
3792 ret = qm_pm_get_sync(qm);
3798 ret = qm_func_shaper_enable(qm, fun_index, val);
3800 pci_err(qm->pdev, "failed to enable function shaper!\n");
3805 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n",
3810 qm_pm_put_sync(qm);
3812 clear_bit(QM_RESETTING, &qm->misc_ctl);
3825 * @qm: The qm for which we want to add debugfs files.
3829 void hisi_qm_set_algqos_init(struct hisi_qm *qm)
3831 if (qm->fun_type == QM_HW_PF)
3832 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
3833 qm, &qm_algqos_fops);
3834 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
3835 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
3836 qm, &qm_algqos_fops);
3839 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
3844 qm->factor[i].func_qos = QM_QOS_MAX_VAL;
3858 struct hisi_qm *qm = pci_get_drvdata(pdev);
3861 ret = qm_pm_get_sync(qm);
3881 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
3882 hisi_qm_init_vf_qos(qm, num_vfs);
3884 ret = qm_vf_q_assign(qm, num_vfs);
3890 qm->vfs_num = num_vfs;
3895 qm_clear_vft_config(qm);
3904 qm_pm_put_sync(qm);
3918 struct hisi_qm *qm = pci_get_drvdata(pdev);
3927 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
3934 ret = qm_clear_vft_config(qm);
3938 qm_pm_put_sync(qm);
3960 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
3964 if (!qm->err_ini->get_dev_hw_err_status) {
3965 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
3970 err_sts = qm->err_ini->get_dev_hw_err_status(qm);
3972 if (err_sts & qm->err_info.ecc_2bits_mask)
3973 qm->err_status.is_dev_ecc_mbit = true;
3975 if (qm->err_ini->log_dev_hw_err)
3976 qm->err_ini->log_dev_hw_err(qm, err_sts);
3978 if (err_sts & qm->err_info.dev_reset_mask)
3981 if (qm->err_ini->clear_dev_hw_err_status)
3982 qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
3988 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
3992 /* log qm error */
3993 qm_ret = qm_hw_error_handle(qm);
3996 dev_ret = qm_dev_err_handle(qm);
4004 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
4009 * qm hardware error status when error occur.
4014 struct hisi_qm *qm = pci_get_drvdata(pdev);
4024 ret = qm_process_dev_error(qm);
4032 static int qm_check_req_recv(struct hisi_qm *qm)
4034 struct pci_dev *pdev = qm->pdev;
4038 if (qm->ver >= QM_HW_V3)
4041 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
4042 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4050 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
4051 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4060 static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
4062 struct pci_dev *pdev = qm->pdev;
4084 static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
4086 struct pci_dev *pdev = qm->pdev;
4111 static int qm_vf_reset_prepare(struct hisi_qm *qm,
4114 struct hisi_qm_list *qm_list = qm->qm_list;
4115 struct pci_dev *pdev = qm->pdev;
4141 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
4144 struct pci_dev *pdev = qm->pdev;
4147 if (!qm->vfs_num)
4151 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
4152 ret = qm_ping_all_vfs(qm, cmd);
4156 ret = qm_vf_reset_prepare(qm, stop_reason);
4164 static int qm_controller_reset_prepare(struct hisi_qm *qm)
4166 struct pci_dev *pdev = qm->pdev;
4169 ret = qm_reset_prepare_ready(qm);
4176 qm_cmd_uninit(qm);
4179 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
4183 ret = hisi_qm_stop(qm, QM_SOFT_RESET);
4186 qm_reset_bit_clear(qm);
4190 if (qm->use_sva) {
4191 ret = qm_hw_err_isolate(qm);
4196 ret = qm_wait_vf_prepare_finish(qm);
4200 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4205 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
4210 if (qm->ver >= QM_HW_V3)
4213 if (!qm->err_status.is_dev_ecc_mbit &&
4214 qm->err_status.is_qm_ecc_mbit &&
4215 qm->err_ini->close_axi_master_ooo) {
4216 qm->err_ini->close_axi_master_ooo(qm);
4217 } else if (qm->err_status.is_dev_ecc_mbit &&
4218 !qm->err_status.is_qm_ecc_mbit &&
4219 !qm->err_ini->close_axi_master_ooo) {
4220 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
4222 qm->io_base + QM_RAS_NFE_ENABLE);
4223 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
4227 static int qm_soft_reset(struct hisi_qm *qm)
4229 struct pci_dev *pdev = qm->pdev;
4234 ret = qm_check_req_recv(qm);
4238 if (qm->vfs_num) {
4239 ret = qm_set_vf_mse(qm, false);
4246 ret = qm->ops->set_msi(qm, false);
4252 qm_dev_ecc_mbit_handle(qm);
4256 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
4259 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
4268 if (qm->err_ini->close_sva_prefetch)
4269 qm->err_ini->close_sva_prefetch(qm);
4271 ret = qm_set_pf_mse(qm, false);
4283 qm->err_info.acpi_rst,
4302 static int qm_vf_reset_done(struct hisi_qm *qm)
4304 struct hisi_qm_list *qm_list = qm->qm_list;
4305 struct pci_dev *pdev = qm->pdev;
4331 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
4333 struct pci_dev *pdev = qm->pdev;
4336 if (!qm->vfs_num)
4339 ret = qm_vf_q_assign(qm, qm->vfs_num);
4346 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
4347 ret = qm_ping_all_vfs(qm, cmd);
4351 ret = qm_vf_reset_done(qm);
4359 static int qm_dev_hw_init(struct hisi_qm *qm)
4361 return qm->err_ini->hw_init(qm);
4364 static void qm_restart_prepare(struct hisi_qm *qm)
4368 if (qm->err_ini->open_sva_prefetch)
4369 qm->err_ini->open_sva_prefetch(qm);
4371 if (qm->ver >= QM_HW_V3)
4374 if (!qm->err_status.is_qm_ecc_mbit &&
4375 !qm->err_status.is_dev_ecc_mbit)
4379 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4380 writel(value & ~qm->err_info.msi_wr_port,
4381 qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4384 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
4385 if (value && qm->err_ini->clear_dev_hw_err_status)
4386 qm->err_ini->clear_dev_hw_err_status(qm, value);
4389 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
4392 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
4395 static void qm_restart_done(struct hisi_qm *qm)
4399 if (qm->ver >= QM_HW_V3)
4402 if (!qm->err_status.is_qm_ecc_mbit &&
4403 !qm->err_status.is_dev_ecc_mbit)
4407 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4408 value |= qm->err_info.msi_wr_port;
4409 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4412 qm->err_status.is_qm_ecc_mbit = false;
4413 qm->err_status.is_dev_ecc_mbit = false;
4416 static int qm_controller_reset_done(struct hisi_qm *qm)
4418 struct pci_dev *pdev = qm->pdev;
4421 ret = qm->ops->set_msi(qm, true);
4427 ret = qm_set_pf_mse(qm, true);
4433 if (qm->vfs_num) {
4434 ret = qm_set_vf_mse(qm, true);
4441 ret = qm_dev_hw_init(qm);
4447 qm_restart_prepare(qm);
4448 hisi_qm_dev_err_init(qm);
4449 if (qm->err_ini->open_axi_master_ooo)
4450 qm->err_ini->open_axi_master_ooo(qm);
4452 ret = qm_dev_mem_reset(qm);
4458 ret = qm_restart(qm);
4464 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
4468 ret = qm_wait_vf_prepare_finish(qm);
4472 qm_cmd_init(qm);
4473 qm_restart_done(qm);
4475 qm_reset_bit_clear(qm);
4480 static int qm_controller_reset(struct hisi_qm *qm)
4482 struct pci_dev *pdev = qm->pdev;
4487 ret = qm_controller_reset_prepare(qm);
4489 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4490 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4491 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4495 hisi_qm_show_last_dfx_regs(qm);
4496 if (qm->err_ini->show_last_dfx_regs)
4497 qm->err_ini->show_last_dfx_regs(qm);
4499 ret = qm_soft_reset(qm);
4503 ret = qm_controller_reset_done(qm);
4513 qm_reset_bit_clear(qm);
4516 if (qm->use_sva)
4517 qm->isolate_data.is_isolate = true;
4530 struct hisi_qm *qm = pci_get_drvdata(pdev);
4537 ret = qm_controller_reset(qm);
4550 struct hisi_qm *qm = pci_get_drvdata(pdev);
4566 ret = qm_reset_prepare_ready(qm);
4573 if (qm->fun_type == QM_HW_PF)
4574 qm_cmd_uninit(qm);
4576 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN);
4580 ret = hisi_qm_stop(qm, QM_DOWN);
4583 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4584 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4588 ret = qm_wait_vf_prepare_finish(qm);
4599 struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
4602 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
4614 struct hisi_qm *qm = pci_get_drvdata(pdev);
4617 if (qm->fun_type == QM_HW_PF) {
4618 ret = qm_dev_hw_init(qm);
4627 ret = qm_restart(qm);
4633 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
4637 ret = qm_wait_vf_prepare_finish(qm);
4642 if (qm->fun_type == QM_HW_PF)
4643 qm_cmd_init(qm);
4648 qm_reset_bit_clear(qm);
4654 struct hisi_qm *qm = data;
4657 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
4658 ret = qm_process_dev_error(qm);
4660 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
4661 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
4662 schedule_work(&qm->rst_work);
4671 * This function will stop qm when OS shutdown or rebooting.
4675 struct hisi_qm *qm = pci_get_drvdata(pdev);
4678 ret = hisi_qm_stop(qm, QM_DOWN);
4680 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
4682 hisi_qm_cache_wb(qm);
4688 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
4691 ret = qm_pm_get_sync(qm);
4693 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4698 ret = qm_controller_reset(qm);
4700 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
4702 qm_pm_put_sync(qm);
4705 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
4709 struct pci_dev *pdev = qm->pdev;
4712 ret = qm_reset_prepare_ready(qm);
4715 atomic_set(&qm->status.flags, QM_STOP);
4720 ret = hisi_qm_stop(qm, stop_reason);
4723 atomic_set(&qm->status.flags, QM_STOP);
4731 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4732 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4735 ret = qm_ping_pf(qm, cmd);
4740 static void qm_pf_reset_vf_done(struct hisi_qm *qm)
4743 struct pci_dev *pdev = qm->pdev;
4747 ret = hisi_qm_start(qm);
4753 qm_cmd_init(qm);
4754 ret = qm_ping_pf(qm, cmd);
4758 qm_reset_bit_clear(qm);
4761 static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
4763 struct device *dev = &qm->pdev->dev;
4769 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
4782 ret = qm_get_mb_cmd(qm, &msg, 0);
4783 qm_clear_cmd_interrupt(qm, 0);
4798 static void qm_pf_reset_vf_process(struct hisi_qm *qm,
4801 struct device *dev = &qm->pdev->dev;
4807 qm_cmd_uninit(qm);
4808 qm_pf_reset_vf_prepare(qm, stop_reason);
4810 ret = qm_wait_pf_reset_finish(qm);
4814 qm_pf_reset_vf_done(qm);
4821 qm_cmd_init(qm);
4822 qm_reset_bit_clear(qm);
4825 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
4827 struct device *dev = &qm->pdev->dev;
4836 ret = qm_get_mb_cmd(qm, &msg, fun_num);
4837 qm_clear_cmd_interrupt(qm, BIT(fun_num));
4846 qm_pf_reset_vf_process(qm, QM_DOWN);
4849 qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
4852 qm_vf_get_qos(qm, fun_num);
4855 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
4865 struct hisi_qm *qm = container_of(cmd_process,
4867 u32 vfs_num = qm->vfs_num;
4871 if (qm->fun_type == QM_HW_PF) {
4872 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
4878 qm_handle_cmd_msg(qm, i);
4884 qm_handle_cmd_msg(qm, 0);
4888 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
4889 * @qm: The qm needs add.
4890 * @qm_list: The qm list.
4892 * This function adds qm to qm list, and will register algorithm to
4893 * crypto when the qm list is empty.
4895 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
4897 struct device *dev = &qm->pdev->dev;
4904 list_add_tail(&qm->list, &qm_list->list);
4907 if (qm->ver <= QM_HW_V2 && qm->use_sva) {
4913 ret = qm_list->register_to_crypto(qm);
4916 list_del(&qm->list);
4926 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
4927 * qm list.
4928 * @qm: The qm needs delete.
4929 * @qm_list: The qm list.
4931 * This function deletes qm from qm list, and will unregister algorithm
4932 * from crypto when the qm list is empty.
4934 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
4937 list_del(&qm->list);
4940 if (qm->ver <= QM_HW_V2 && qm->use_sva)
4944 qm_list->unregister_from_crypto(qm);
4948 static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
4950 struct pci_dev *pdev = qm->pdev;
4953 if (qm->fun_type == QM_HW_VF)
4956 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
4961 free_irq(pci_irq_vector(pdev, irq_vector), qm);
4964 static int qm_register_abnormal_irq(struct hisi_qm *qm)
4966 struct pci_dev *pdev = qm->pdev;
4970 if (qm->fun_type == QM_HW_VF)
4973 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
4978 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
4980 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
4985 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
4987 struct pci_dev *pdev = qm->pdev;
4990 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
4995 free_irq(pci_irq_vector(pdev, irq_vector), qm);
4998 static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
5000 struct pci_dev *pdev = qm->pdev;
5004 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
5009 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm);
5016 static void qm_unregister_aeq_irq(struct hisi_qm *qm)
5018 struct pci_dev *pdev = qm->pdev;
5021 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
5026 free_irq(pci_irq_vector(pdev, irq_vector), qm);
5029 static int qm_register_aeq_irq(struct hisi_qm *qm)
5031 struct pci_dev *pdev = qm->pdev;
5035 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
5041 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm);
5048 static void qm_unregister_eq_irq(struct hisi_qm *qm)
5050 struct pci_dev *pdev = qm->pdev;
5053 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
5058 free_irq(pci_irq_vector(pdev, irq_vector), qm);
5061 static int qm_register_eq_irq(struct hisi_qm *qm)
5063 struct pci_dev *pdev = qm->pdev;
5067 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
5072 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm);
5079 static void qm_irqs_unregister(struct hisi_qm *qm)
5081 qm_unregister_mb_cmd_irq(qm);
5082 qm_unregister_abnormal_irq(qm);
5083 qm_unregister_aeq_irq(qm);
5084 qm_unregister_eq_irq(qm);
5087 static int qm_irqs_register(struct hisi_qm *qm)
5091 ret = qm_register_eq_irq(qm);
5095 ret = qm_register_aeq_irq(qm);
5099 ret = qm_register_abnormal_irq(qm);
5103 ret = qm_register_mb_cmd_irq(qm);
5110 qm_unregister_abnormal_irq(qm);
5112 qm_unregister_aeq_irq(qm);
5114 qm_unregister_eq_irq(qm);
5118 static int qm_get_qp_num(struct hisi_qm *qm)
5120 struct device *dev = &qm->pdev->dev;
5124 if (qm->fun_type == QM_HW_VF) {
5125 if (qm->ver != QM_HW_V1)
5127 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
5132 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
5133 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true);
5134 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
5137 if (qm->qp_num <= qm->max_qp_num)
5140 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) {
5143 qm->qp_num, qm->max_qp_num);
5148 qm->qp_num, qm->max_qp_num);
5149 qm->qp_num = qm->max_qp_num;
5150 qm->debug.curr_qm_qp_num = qm->qp_num;
5155 static int qm_pre_store_irq_type_caps(struct hisi_qm *qm)
5158 struct pci_dev *pdev = qm->pdev;
5168 qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
5169 qm_pre_store_caps[i], qm->cap_ver);
5172 qm->cap_tables.qm_cap_table = qm_cap;
5177 static int qm_get_hw_caps(struct hisi_qm *qm)
5179 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
5181 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) :
5186 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true);
5188 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
5190 if (qm->ver >= QM_HW_V3) {
5191 val = readl(qm->io_base + QM_FUNC_CAPS_REG);
5192 qm->cap_ver = val & QM_CAPBILITY_VERSION;
5197 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver);
5199 set_bit(qm_cap_info_comm[i].type, &qm->caps);
5204 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver);
5206 set_bit(cap_info[i].type, &qm->caps);
5210 return qm_pre_store_irq_type_caps(qm);
5213 static int qm_get_pci_res(struct hisi_qm *qm)
5215 struct pci_dev *pdev = qm->pdev;
5219 ret = pci_request_mem_regions(pdev, qm->dev_name);
5225 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
5226 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
5227 if (!qm->io_base) {
5232 ret = qm_get_hw_caps(qm);
5236 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
5237 qm->db_interval = QM_QP_DB_INTERVAL;
5238 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
5239 qm->db_io_base = ioremap(qm->db_phys_base,
5241 if (!qm->db_io_base) {
5246 qm->db_phys_base = qm->phys_base;
5247 qm->db_io_base = qm->io_base;
5248 qm->db_interval = 0;
5251 ret = qm_get_qp_num(qm);
5258 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
5259 iounmap(qm->db_io_base);
5261 iounmap(qm->io_base);
5267 static int hisi_qm_pci_init(struct hisi_qm *qm)
5269 struct pci_dev *pdev = qm->pdev;
5280 ret = qm_get_pci_res(qm);
5289 num_vec = qm_get_irq_num(qm);
5299 qm_put_pci_res(qm);
5305 static int hisi_qm_init_work(struct hisi_qm *qm)
5309 for (i = 0; i < qm->qp_num; i++)
5310 INIT_WORK(&qm->poll_data[i].work, qm_work_process);
5312 if (qm->fun_type == QM_HW_PF)
5313 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
5315 if (qm->ver > QM_HW_V2)
5316 INIT_WORK(&qm->cmd_process, qm_cmd_process);
5318 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
5320 pci_name(qm->pdev));
5321 if (!qm->wq) {
5322 pci_err(qm->pdev, "failed to alloc workqueue!\n");
5329 static int hisi_qp_alloc_memory(struct hisi_qm *qm)
5331 struct device *dev = &qm->pdev->dev;
5336 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
5337 if (!qm->qp_array)
5340 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL);
5341 if (!qm->poll_data) {
5342 kfree(qm->qp_array);
5346 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
5349 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth;
5351 for (i = 0; i < qm->qp_num; i++) {
5352 qm->poll_data[i].qm = qm;
5353 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth);
5362 hisi_qp_memory_uninit(qm, i);
5367 static int hisi_qm_memory_init(struct hisi_qm *qm)
5369 struct device *dev = &qm->pdev->dev;
5373 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
5374 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
5375 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
5376 if (!qm->factor)
5380 qm->factor[0].func_qos = QM_QOS_MAX_VAL;
5383 #define QM_INIT_BUF(qm, type, num) do { \
5384 (qm)->type = ((qm)->qdma.va + (off)); \
5385 (qm)->type##_dma = (qm)->qdma.dma + (off); \
5389 idr_init(&qm->qp_idr);
5390 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP);
5391 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) +
5392 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) +
5393 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
5394 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
5395 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
5397 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
5398 if (!qm->qdma.va) {
5403 QM_INIT_BUF(qm, eqe, qm->eq_depth);
5404 QM_INIT_BUF(qm, aeqe, qm->aeq_depth);
5405 QM_INIT_BUF(qm, sqc, qm->qp_num);
5406 QM_INIT_BUF(qm, cqc, qm->qp_num);
5408 ret = hisi_qp_alloc_memory(qm);
5415 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
5417 idr_destroy(&qm->qp_idr);
5418 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
5419 kfree(qm->factor);
5425 * hisi_qm_init() - Initialize configures about qm.
5426 * @qm: The qm needing init.
5428 * This function init qm, then we can call hisi_qm_start to put qm into work.
5430 int hisi_qm_init(struct hisi_qm *qm)
5432 struct pci_dev *pdev = qm->pdev;
5436 hisi_qm_pre_init(qm);
5438 ret = hisi_qm_pci_init(qm);
5442 ret = qm_irqs_register(qm);
5446 if (qm->fun_type == QM_HW_PF) {
5448 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
5449 qm_disable_clock_gate(qm);
5450 ret = qm_dev_mem_reset(qm);
5457 if (qm->mode == UACCE_MODE_SVA) {
5458 ret = qm_alloc_uacce(qm);
5463 ret = hisi_qm_memory_init(qm);
5467 ret = hisi_qm_init_work(qm);
5471 qm_cmd_init(qm);
5472 atomic_set(&qm->status.flags, QM_INIT);
5477 hisi_qm_memory_uninit(qm);
5479 qm_remove_uacce(qm);
5481 qm_irqs_unregister(qm);
5483 hisi_qm_pci_uninit(qm);
5490 * @qm: pointer to accelerator device.
5497 int hisi_qm_get_dfx_access(struct hisi_qm *qm)
5499 struct device *dev = &qm->pdev->dev;
5506 return qm_pm_get_sync(qm);
5512 * @qm: pointer to accelerator device.
5516 void hisi_qm_put_dfx_access(struct hisi_qm *qm)
5518 qm_pm_put_sync(qm);
5523 * hisi_qm_pm_init() - Initialize qm runtime PM.
5524 * @qm: pointer to accelerator device.
5526 * Function that initialize qm runtime PM.
5528 void hisi_qm_pm_init(struct hisi_qm *qm)
5530 struct device *dev = &qm->pdev->dev;
5532 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
5542 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
5543 * @qm: pointer to accelerator device.
5545 * Function that uninitialize qm runtime PM.
5547 void hisi_qm_pm_uninit(struct hisi_qm *qm)
5549 struct device *dev = &qm->pdev->dev;
5551 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
5559 static int qm_prepare_for_suspend(struct hisi_qm *qm)
5561 struct pci_dev *pdev = qm->pdev;
5565 ret = qm->ops->set_msi(qm, false);
5573 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
5575 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
5584 ret = qm_set_pf_mse(qm, false);
5591 static int qm_rebuild_for_resume(struct hisi_qm *qm)
5593 struct pci_dev *pdev = qm->pdev;
5596 ret = qm_set_pf_mse(qm, true);
5602 ret = qm->ops->set_msi(qm, true);
5608 ret = qm_dev_hw_init(qm);
5614 qm_cmd_init(qm);
5615 hisi_qm_dev_err_init(qm);
5617 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
5618 qm_disable_clock_gate(qm);
5619 ret = qm_dev_mem_reset(qm);
5635 struct hisi_qm *qm = pci_get_drvdata(pdev);
5640 ret = hisi_qm_stop(qm, QM_NORMAL);
5642 pci_err(pdev, "failed to stop qm(%d)\n", ret);
5646 ret = qm_prepare_for_suspend(qm);
5663 struct hisi_qm *qm = pci_get_drvdata(pdev);
5668 ret = qm_rebuild_for_resume(qm);
5674 ret = hisi_qm_start(qm);
5676 if (qm_check_dev_error(qm)) {
5677 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n");
5681 pci_err(pdev, "failed to start qm(%d)!\n", ret);