Lines Matching defs:cptvf

9 #include "cptvf.h"
11 #define DRV_NAME "thunder-cptvf"
16 void *cptvf;
29 vq_post_process(cwqe->cptvf, cwqe->qno);
32 static int init_worker_threads(struct cpt_vf *cptvf)
34 struct pci_dev *pdev = cptvf->pdev;
42 if (cptvf->nr_queues) {
44 cptvf->nr_queues);
47 for (i = 0; i < cptvf->nr_queues; i++) {
51 cwqe_info->vq_wqe[i].cptvf = cptvf;
54 cptvf->wqe_info = cwqe_info;
59 static void cleanup_worker_threads(struct cpt_vf *cptvf)
62 struct pci_dev *pdev = cptvf->pdev;
65 cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
69 if (cptvf->nr_queues) {
71 cptvf->nr_queues);
74 for (i = 0; i < cptvf->nr_queues; i++)
78 cptvf->wqe_info = NULL;
136 static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
138 struct pci_dev *pdev = cptvf->pdev;
144 ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
154 static void cleanup_pending_queues(struct cpt_vf *cptvf)
156 struct pci_dev *pdev = cptvf->pdev;
158 if (!cptvf->nr_queues)
162 cptvf->nr_queues);
163 free_pending_queues(&cptvf->pqinfo);
166 static void free_command_queues(struct cpt_vf *cptvf,
172 struct pci_dev *pdev = cptvf->pdev;
176 for (i = 0; i < cptvf->nr_queues; i++) {
200 static int alloc_command_queues(struct cpt_vf *cptvf,
207 struct pci_dev *pdev = cptvf->pdev;
212 cptvf->qsize = min(qlen, cqinfo->qchunksize) *
218 for (i = 0; i < cptvf->nr_queues; i++) {
272 free_command_queues(cptvf, cqinfo);
276 static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
278 struct pci_dev *pdev = cptvf->pdev;
282 ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
286 cptvf->nr_queues);
293 static void cleanup_command_queues(struct cpt_vf *cptvf)
295 struct pci_dev *pdev = cptvf->pdev;
297 if (!cptvf->nr_queues)
301 cptvf->nr_queues);
302 free_command_queues(cptvf, &cptvf->cqinfo);
305 static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
307 cleanup_worker_threads(cptvf);
308 cleanup_pending_queues(cptvf);
309 cleanup_command_queues(cptvf);
312 static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
314 struct pci_dev *pdev = cptvf->pdev;
321 cptvf->nr_queues = nr_queues;
323 ret = init_command_queues(cptvf, qlen);
330 ret = init_pending_queues(cptvf, qlen, nr_queues);
338 ret = init_worker_threads(cptvf);
347 cleanup_worker_threads(cptvf);
348 cleanup_pending_queues(cptvf);
351 cleanup_command_queues(cptvf);
356 static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
358 irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
359 free_cpumask_var(cptvf->affinity_mask[vec]);
362 static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
366 vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
368 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
371 void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
375 vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
378 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
382 static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
386 vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
388 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
391 static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
395 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
398 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
402 static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
406 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
409 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
413 static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
417 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
421 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
425 static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
429 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
433 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
437 static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
441 vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
445 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
449 static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
453 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
457 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
461 static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
465 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
469 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
473 static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
477 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
481 cpt_write_csr64(cptvf->reg_base,
485 static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
489 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
493 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
497 static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
501 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
505 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
509 static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
511 return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
516 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
517 struct pci_dev *pdev = cptvf->pdev;
520 intr = cptvf_read_vf_misc_intr_status(cptvf);
524 intr, cptvf->vfid);
525 cptvf_handle_mbox_intr(cptvf);
526 cptvf_clear_mbox_intr(cptvf);
528 cptvf_clear_dovf_intr(cptvf);
530 cptvf_write_vq_doorbell(cptvf, 0);
532 intr, cptvf->vfid);
534 cptvf_clear_irde_intr(cptvf);
536 intr, cptvf->vfid);
538 cptvf_clear_nwrp_intr(cptvf);
540 intr, cptvf->vfid);
542 cptvf_clear_swerr_intr(cptvf);
544 intr, cptvf->vfid);
547 cptvf->vfid);
553 static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
558 if (unlikely(qno >= cptvf->nr_queues))
560 nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
565 static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
569 vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
573 static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
578 vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
581 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
587 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
588 struct pci_dev *pdev = cptvf->pdev;
590 u32 intr = cptvf_read_vq_done_count(cptvf);
598 cptvf_write_vq_done_ack(cptvf, intr);
599 wqe = get_cptvf_vq_wqe(cptvf, 0);
602 cptvf->vfid);
611 static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
613 struct pci_dev *pdev = cptvf->pdev;
616 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
619 cptvf->vfid);
623 cpu = cptvf->vfid % num_online_cpus();
624 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
625 cptvf->affinity_mask[vec]);
627 cptvf->affinity_mask[vec]);
630 static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
635 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
638 static void cptvf_device_init(struct cpt_vf *cptvf)
643 cptvf_write_vq_ctl(cptvf, 0);
645 cptvf_write_vq_doorbell(cptvf, 0);
647 cptvf_write_vq_inprog(cptvf, 0);
650 base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
651 cptvf_write_vq_saddr(cptvf, base_addr);
653 cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
654 cptvf_write_vq_done_numwait(cptvf, 1);
656 cptvf_write_vq_ctl(cptvf, 1);
658 cptvf->flags |= CPT_FLAG_DEVICE_READY;
664 struct cpt_vf *cptvf;
667 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
668 if (!cptvf)
671 pci_set_drvdata(pdev, cptvf);
672 cptvf->pdev = pdev;
686 cptvf->flags |= CPT_FLAG_VF_DRIVER;
694 cptvf->reg_base = pcim_iomap(pdev, 0, 0);
695 if (!cptvf->reg_base) {
701 cptvf->node = dev_to_node(&pdev->dev);
712 cptvf);
719 cptvf_enable_mbox_interrupts(cptvf);
720 cptvf_enable_swerr_interrupts(cptvf);
724 err = cptvf_check_pf_ready(cptvf);
731 cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
732 err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
738 err = cptvf_send_vq_size_msg(cptvf);
745 cptvf_device_init(cptvf);
747 cptvf->vfgrp = 1;
748 err = cptvf_send_vf_to_grp_msg(cptvf);
754 cptvf->priority = 1;
755 err = cptvf_send_vf_priority_msg(cptvf);
763 cptvf);
770 cptvf_enable_done_interrupts(cptvf);
773 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
774 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
776 err = cptvf_send_vf_up(cptvf);
781 err = cvm_crypto_init(cptvf);
789 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
790 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
792 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
794 pci_free_irq_vectors(cptvf->pdev);
806 struct cpt_vf *cptvf = pci_get_drvdata(pdev);
808 if (!cptvf) {
814 if (cptvf_send_vf_down(cptvf)) {
817 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
818 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
819 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
820 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
821 pci_free_irq_vectors(cptvf->pdev);
822 cptvf_sw_cleanup(cptvf);