Lines Matching defs:lfs
16 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
19 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
27 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
30 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
34 static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs,
39 for (slot = 0; slot < lfs->lfs_num; slot++)
40 cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait);
43 static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait)
47 for (slot = 0; slot < lfs->lfs_num; slot++)
48 cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait);
53 struct otx2_cptlfs_info *lfs = lf->lfs;
57 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
59 &lf_ctrl.u, lfs->blkaddr);
65 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
67 lf_ctrl.u, lfs->blkaddr);
74 struct otx2_cptlfs_info *lfs = lf->lfs;
78 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
80 &lf_ctrl.u, lfs->blkaddr);
86 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
88 lf_ctrl.u, lfs->blkaddr);
92 static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
97 for (slot = 0; slot < lfs->lfs_num; slot++) {
98 ret = cptlf_set_pri(&lfs->lf[slot], pri);
102 ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask);
109 static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
112 otx2_cptlf_disable_iqueues(lfs);
115 otx2_cptlf_set_iqueues_base_addr(lfs);
118 otx2_cptlf_set_iqueues_size(lfs);
121 cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD);
124 cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD);
127 otx2_cptlf_enable_iqueues(lfs);
130 static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs)
133 otx2_cptlf_disable_iqueues(lfs);
136 static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
149 for (slot = 0; slot < lfs->lfs_num; slot++)
150 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg,
154 static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
159 for (slot = 0; slot < lfs->lfs_num; slot++)
160 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
163 cptlf_set_misc_intrs(lfs, true);
166 static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
170 for (slot = 0; slot < lfs->lfs_num; slot++)
171 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
173 cptlf_set_misc_intrs(lfs, false);
180 irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
191 dev = &lf->lfs->pdev->dev;
192 irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
225 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
240 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
243 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
246 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
249 dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
260 void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
264 for (i = 0; i < lfs->lfs_num; i++) {
266 if (!lfs->lf[i].is_irq_reg[offs])
269 vector = pci_irq_vector(lfs->pdev,
270 lfs->lf[i].msix_offset + offs);
271 free_irq(vector, &lfs->lf[i]);
272 lfs->lf[i].is_irq_reg[offs] = false;
275 cptlf_disable_intrs(lfs);
280 static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
286 vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset +
289 lfs->lf[lf_num].irq_name[irq_offset],
290 &lfs->lf[lf_num]);
294 lfs->lf[lf_num].is_irq_reg[irq_offset] = true;
299 int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
303 for (i = 0; i < lfs->lfs_num; i++) {
305 snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
306 ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
312 snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
314 ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
319 cptlf_enable_intrs(lfs);
323 otx2_cptlf_unregister_interrupts(lfs);
328 void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
332 for (slot = 0; slot < lfs->lfs_num; slot++) {
334 irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
335 lfs->lf[slot].msix_offset +
337 free_cpumask_var(lfs->lf[slot].affinity_mask);
342 int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
344 struct otx2_cptlf_info *lf = lfs->lf;
347 for (slot = 0; slot < lfs->lfs_num; slot++) {
349 dev_err(&lfs->pdev->dev,
356 dev_to_node(&lfs->pdev->dev)),
360 ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
370 otx2_cptlf_free_irqs_affinity(lfs);
375 int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
380 if (!lfs->pdev || !lfs->reg_base)
383 lfs->lfs_num = lfs_num;
384 for (slot = 0; slot < lfs->lfs_num; slot++) {
385 lfs->lf[slot].lfs = lfs;
386 lfs->lf[slot].slot = slot;
387 if (lfs->lmt_base)
388 lfs->lf[slot].lmtline = lfs->lmt_base +
391 lfs->lf[slot].lmtline = lfs->reg_base +
395 lfs->lf[slot].ioreg = lfs->reg_base +
396 OTX2_CPT_RVU_FUNC_ADDR_S(lfs->blkaddr, slot,
400 ret = otx2_cpt_attach_rscrs_msg(lfs);
404 ret = otx2_cpt_alloc_instruction_queues(lfs);
406 dev_err(&lfs->pdev->dev,
410 cptlf_hw_init(lfs);
415 ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri);
422 cptlf_hw_cleanup(lfs);
423 otx2_cpt_free_instruction_queues(lfs);
425 otx2_cpt_detach_rsrcs_msg(lfs);
427 lfs->lfs_num = 0;
432 void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
435 cptlf_hw_cleanup(lfs);
437 otx2_cpt_free_instruction_queues(lfs);
439 otx2_cpt_detach_rsrcs_msg(lfs);
440 lfs->lfs_num = 0;