Lines Matching defs:cpt
16 #define DRV_NAME "thunder-cpt"
26 static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
32 struct device *dev = &cpt->pdev->dev;
35 coremask = (coremask << cpt->max_se_cores);
38 grpmask = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
39 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
42 grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
45 grp = cpt_read_csr64(cpt->reg_base,
54 pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
55 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
63 static void cpt_enable_cores(struct cpt_device *cpt, u64 coremask,
69 coremask = (coremask << cpt->max_se_cores);
71 pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
72 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
77 static void cpt_configure_group(struct cpt_device *cpt, u8 grp,
83 coremask = (coremask << cpt->max_se_cores);
85 pf_gx_en = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
86 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
91 static void cpt_disable_mbox_interrupts(struct cpt_device *cpt)
94 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), ~0ull);
97 static void cpt_disable_ecc_interrupts(struct cpt_device *cpt)
100 cpt_write_csr64(cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), ~0ull);
103 static void cpt_disable_exec_interrupts(struct cpt_device *cpt)
106 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), ~0ull);
109 static void cpt_disable_all_interrupts(struct cpt_device *cpt)
111 cpt_disable_mbox_interrupts(cpt);
112 cpt_disable_ecc_interrupts(cpt);
113 cpt_disable_exec_interrupts(cpt);
116 static void cpt_enable_mbox_interrupts(struct cpt_device *cpt)
119 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), ~0ull);
122 static int cpt_load_microcode(struct cpt_device *cpt, struct microcode *mcode)
126 struct device *dev = &cpt->pdev->dev;
152 cpt_write_csr64(cpt->reg_base,
160 static int do_cpt_init(struct cpt_device *cpt, struct microcode *mcode)
163 struct device *dev = &cpt->pdev->dev;
166 cpt->flags &= ~CPT_FLAG_DEVICE_READY;
168 cpt_disable_all_interrupts(cpt);
171 if (mcode->num_cores > cpt->max_ae_cores) {
177 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
182 mcode->group = cpt->next_group;
185 cpt_disable_cores(cpt, mcode->core_mask, AE_TYPES,
188 ret = cpt_load_microcode(cpt, mcode);
194 cpt->next_group++;
196 cpt_configure_group(cpt, mcode->group, mcode->core_mask,
199 cpt_enable_cores(cpt, mcode->core_mask, AE_TYPES);
201 if (mcode->num_cores > cpt->max_se_cores) {
206 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
211 mcode->group = cpt->next_group;
214 cpt_disable_cores(cpt, mcode->core_mask, SE_TYPES,
217 ret = cpt_load_microcode(cpt, mcode);
223 cpt->next_group++;
225 cpt_configure_group(cpt, mcode->group, mcode->core_mask,
228 cpt_enable_cores(cpt, mcode->core_mask, SE_TYPES);
232 cpt_enable_mbox_interrupts(cpt);
233 cpt->flags |= CPT_FLAG_DEVICE_READY;
239 cpt_enable_mbox_interrupts(cpt);
251 static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
254 struct device *dev = &cpt->pdev->dev;
265 mcode = &cpt->mcode[cpt->next_mc_idx];
279 mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
303 ret = do_cpt_init(cpt, mcode);
311 cpt->next_mc_idx++;
319 static int cpt_ucode_load(struct cpt_device *cpt)
322 struct device *dev = &cpt->pdev->dev;
324 ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-ae.out", true);
329 ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-se.out", false);
340 struct cpt_device *cpt = (struct cpt_device *)cpt_irq;
342 cpt_mbox_intr_handler(cpt, 0);
347 static void cpt_reset(struct cpt_device *cpt)
349 cpt_write_csr64(cpt->reg_base, CPTX_PF_RESET(0), 1);
352 static void cpt_find_max_enabled_cores(struct cpt_device *cpt)
356 pf_cnsts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_CONSTANTS(0));
357 cpt->max_se_cores = pf_cnsts.s.se;
358 cpt->max_ae_cores = pf_cnsts.s.ae;
361 static u32 cpt_check_bist_status(struct cpt_device *cpt)
365 bist_sts.u = cpt_read_csr64(cpt->reg_base,
371 static u64 cpt_check_exe_bist_status(struct cpt_device *cpt)
375 bist_sts.u = cpt_read_csr64(cpt->reg_base,
381 static void cpt_disable_all_cores(struct cpt_device *cpt)
384 struct device *dev = &cpt->pdev->dev;
388 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), 0);
392 grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
395 grp = cpt_read_csr64(cpt->reg_base,
403 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0);
411 static void cpt_unload_microcode(struct cpt_device *cpt)
417 struct microcode *mcode = &cpt->mcode[grp];
419 if (cpt->mcode[grp].code)
420 dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
426 cpt_write_csr64(cpt->reg_base,
430 static int cpt_device_init(struct cpt_device *cpt)
433 struct device *dev = &cpt->pdev->dev;
436 cpt_reset(cpt);
440 bist = (u64)cpt_check_bist_status(cpt);
446 bist = cpt_check_exe_bist_status(cpt);
454 cpt_find_max_enabled_cores(cpt);
456 cpt_disable_all_cores(cpt);
458 cpt->next_mc_idx = 0;
459 cpt->next_group = 0;
461 cpt->flags |= CPT_FLAG_DEVICE_READY;
466 static int cpt_register_interrupts(struct cpt_device *cpt)
469 struct device *dev = &cpt->pdev->dev;
472 ret = pci_alloc_irq_vectors(cpt->pdev, CPT_PF_MSIX_VECTORS,
475 dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n",
481 ret = request_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)),
482 cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
487 cpt_enable_mbox_interrupts(cpt);
492 pci_disable_msix(cpt->pdev);
496 static void cpt_unregister_interrupts(struct cpt_device *cpt)
498 free_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt);
499 pci_disable_msix(cpt->pdev);
502 static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs)
507 struct pci_dev *pdev = cpt->pdev;
515 cpt->num_vf_en = num_vfs; /* User requested VFs */
517 if (total_vf_cnt < cpt->num_vf_en)
518 cpt->num_vf_en = total_vf_cnt;
524 err = pci_enable_sriov(pdev, cpt->num_vf_en);
527 cpt->num_vf_en);
528 cpt->num_vf_en = 0;
535 cpt->num_vf_en);
537 cpt->flags |= CPT_FLAG_SRIOV_ENABLED;
545 struct cpt_device *cpt;
554 cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
555 if (!cpt)
558 pci_set_drvdata(pdev, cpt);
559 cpt->pdev = pdev;
580 cpt->reg_base = pcim_iomap(pdev, 0, 0);
581 if (!cpt->reg_base) {
588 cpt_device_init(cpt);
591 err = cpt_register_interrupts(cpt);
595 err = cpt_ucode_load(cpt);
600 err = cpt_sriov_init(cpt, num_vfs);
607 cpt_unregister_interrupts(cpt);
618 struct cpt_device *cpt = pci_get_drvdata(pdev);
621 cpt_disable_all_cores(cpt);
623 cpt_unload_microcode(cpt);
624 cpt_unregister_interrupts(cpt);
633 struct cpt_device *cpt = pci_get_drvdata(pdev);
635 if (!cpt)
641 cpt_unregister_interrupts(cpt);