Lines Matching defs:cpt
17 #define DRV_NAME "thunder-cpt"
27 static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
33 struct device *dev = &cpt->pdev->dev;
36 coremask = (coremask << cpt->max_se_cores);
39 grpmask = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
40 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
43 grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
46 grp = cpt_read_csr64(cpt->reg_base,
55 pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
56 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
64 static void cpt_enable_cores(struct cpt_device *cpt, u64 coremask,
70 coremask = (coremask << cpt->max_se_cores);
72 pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
73 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
78 static void cpt_configure_group(struct cpt_device *cpt, u8 grp,
84 coremask = (coremask << cpt->max_se_cores);
86 pf_gx_en = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
87 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
92 static void cpt_disable_mbox_interrupts(struct cpt_device *cpt)
95 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), ~0ull);
98 static void cpt_disable_ecc_interrupts(struct cpt_device *cpt)
101 cpt_write_csr64(cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), ~0ull);
104 static void cpt_disable_exec_interrupts(struct cpt_device *cpt)
107 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), ~0ull);
110 static void cpt_disable_all_interrupts(struct cpt_device *cpt)
112 cpt_disable_mbox_interrupts(cpt);
113 cpt_disable_ecc_interrupts(cpt);
114 cpt_disable_exec_interrupts(cpt);
117 static void cpt_enable_mbox_interrupts(struct cpt_device *cpt)
120 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), ~0ull);
123 static int cpt_load_microcode(struct cpt_device *cpt, struct microcode *mcode)
127 struct device *dev = &cpt->pdev->dev;
153 cpt_write_csr64(cpt->reg_base,
161 static int do_cpt_init(struct cpt_device *cpt, struct microcode *mcode)
164 struct device *dev = &cpt->pdev->dev;
167 cpt->flags &= ~CPT_FLAG_DEVICE_READY;
169 cpt_disable_all_interrupts(cpt);
172 if (mcode->num_cores > cpt->max_ae_cores) {
178 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
183 mcode->group = cpt->next_group;
186 cpt_disable_cores(cpt, mcode->core_mask, AE_TYPES,
189 ret = cpt_load_microcode(cpt, mcode);
195 cpt->next_group++;
197 cpt_configure_group(cpt, mcode->group, mcode->core_mask,
200 cpt_enable_cores(cpt, mcode->core_mask, AE_TYPES);
202 if (mcode->num_cores > cpt->max_se_cores) {
207 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
212 mcode->group = cpt->next_group;
215 cpt_disable_cores(cpt, mcode->core_mask, SE_TYPES,
218 ret = cpt_load_microcode(cpt, mcode);
224 cpt->next_group++;
226 cpt_configure_group(cpt, mcode->group, mcode->core_mask,
229 cpt_enable_cores(cpt, mcode->core_mask, SE_TYPES);
233 cpt_enable_mbox_interrupts(cpt);
234 cpt->flags |= CPT_FLAG_DEVICE_READY;
240 cpt_enable_mbox_interrupts(cpt);
252 static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
255 struct device *dev = &cpt->pdev->dev;
266 mcode = &cpt->mcode[cpt->next_mc_idx];
280 mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
304 ret = do_cpt_init(cpt, mcode);
312 cpt->next_mc_idx++;
320 static int cpt_ucode_load(struct cpt_device *cpt)
323 struct device *dev = &cpt->pdev->dev;
325 ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-ae.out", true);
330 ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-se.out", false);
341 struct cpt_device *cpt = (struct cpt_device *)cpt_irq;
343 cpt_mbox_intr_handler(cpt, 0);
348 static void cpt_reset(struct cpt_device *cpt)
350 cpt_write_csr64(cpt->reg_base, CPTX_PF_RESET(0), 1);
353 static void cpt_find_max_enabled_cores(struct cpt_device *cpt)
357 pf_cnsts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_CONSTANTS(0));
358 cpt->max_se_cores = pf_cnsts.s.se;
359 cpt->max_ae_cores = pf_cnsts.s.ae;
362 static u32 cpt_check_bist_status(struct cpt_device *cpt)
366 bist_sts.u = cpt_read_csr64(cpt->reg_base,
372 static u64 cpt_check_exe_bist_status(struct cpt_device *cpt)
376 bist_sts.u = cpt_read_csr64(cpt->reg_base,
382 static void cpt_disable_all_cores(struct cpt_device *cpt)
385 struct device *dev = &cpt->pdev->dev;
389 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), 0);
393 grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
396 grp = cpt_read_csr64(cpt->reg_base,
404 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0);
412 static void cpt_unload_microcode(struct cpt_device *cpt)
418 struct microcode *mcode = &cpt->mcode[grp];
420 if (cpt->mcode[grp].code)
421 dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
427 cpt_write_csr64(cpt->reg_base,
431 static int cpt_device_init(struct cpt_device *cpt)
434 struct device *dev = &cpt->pdev->dev;
437 cpt_reset(cpt);
441 bist = (u64)cpt_check_bist_status(cpt);
447 bist = cpt_check_exe_bist_status(cpt);
455 cpt_find_max_enabled_cores(cpt);
457 cpt_disable_all_cores(cpt);
459 cpt->next_mc_idx = 0;
460 cpt->next_group = 0;
462 cpt->flags |= CPT_FLAG_DEVICE_READY;
467 static int cpt_register_interrupts(struct cpt_device *cpt)
470 struct device *dev = &cpt->pdev->dev;
473 ret = pci_alloc_irq_vectors(cpt->pdev, CPT_PF_MSIX_VECTORS,
476 dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n",
482 ret = request_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)),
483 cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
488 cpt_enable_mbox_interrupts(cpt);
493 pci_disable_msix(cpt->pdev);
497 static void cpt_unregister_interrupts(struct cpt_device *cpt)
499 free_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt);
500 pci_disable_msix(cpt->pdev);
503 static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs)
508 struct pci_dev *pdev = cpt->pdev;
516 cpt->num_vf_en = num_vfs; /* User requested VFs */
518 if (total_vf_cnt < cpt->num_vf_en)
519 cpt->num_vf_en = total_vf_cnt;
525 err = pci_enable_sriov(pdev, cpt->num_vf_en);
528 cpt->num_vf_en);
529 cpt->num_vf_en = 0;
536 cpt->num_vf_en);
538 cpt->flags |= CPT_FLAG_SRIOV_ENABLED;
546 struct cpt_device *cpt;
555 cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
556 if (!cpt)
559 pci_set_drvdata(pdev, cpt);
560 cpt->pdev = pdev;
587 cpt->reg_base = pcim_iomap(pdev, 0, 0);
588 if (!cpt->reg_base) {
595 cpt_device_init(cpt);
598 err = cpt_register_interrupts(cpt);
602 err = cpt_ucode_load(cpt);
607 err = cpt_sriov_init(cpt, num_vfs);
614 cpt_unregister_interrupts(cpt);
625 struct cpt_device *cpt = pci_get_drvdata(pdev);
628 cpt_disable_all_cores(cpt);
630 cpt_unload_microcode(cpt);
631 cpt_unregister_interrupts(cpt);
640 struct cpt_device *cpt = pci_get_drvdata(pdev);
642 if (!cpt)
648 cpt_unregister_interrupts(cpt);