Lines Matching defs:cptvf

9 #include "cptvf.h"
11 #define DRV_NAME "thunder-cptvf"
16 void *cptvf;
29 vq_post_process(cwqe->cptvf, cwqe->qno);
32 static int init_worker_threads(struct cpt_vf *cptvf)
34 struct pci_dev *pdev = cptvf->pdev;
42 if (cptvf->nr_queues) {
44 cptvf->nr_queues);
47 for (i = 0; i < cptvf->nr_queues; i++) {
51 cwqe_info->vq_wqe[i].cptvf = cptvf;
54 cptvf->wqe_info = cwqe_info;
59 static void cleanup_worker_threads(struct cpt_vf *cptvf)
62 struct pci_dev *pdev = cptvf->pdev;
65 cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
69 if (cptvf->nr_queues) {
71 cptvf->nr_queues);
74 for (i = 0; i < cptvf->nr_queues; i++)
78 cptvf->wqe_info = NULL;
139 static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
141 struct pci_dev *pdev = cptvf->pdev;
147 ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
157 static void cleanup_pending_queues(struct cpt_vf *cptvf)
159 struct pci_dev *pdev = cptvf->pdev;
161 if (!cptvf->nr_queues)
165 cptvf->nr_queues);
166 free_pending_queues(&cptvf->pqinfo);
169 static void free_command_queues(struct cpt_vf *cptvf,
175 struct pci_dev *pdev = cptvf->pdev;
179 for (i = 0; i < cptvf->nr_queues; i++) {
203 static int alloc_command_queues(struct cpt_vf *cptvf,
210 struct pci_dev *pdev = cptvf->pdev;
215 cptvf->qsize = min(qlen, cqinfo->qchunksize) *
221 for (i = 0; i < cptvf->nr_queues; i++) {
275 free_command_queues(cptvf, cqinfo);
279 static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
281 struct pci_dev *pdev = cptvf->pdev;
285 ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
289 cptvf->nr_queues);
296 static void cleanup_command_queues(struct cpt_vf *cptvf)
298 struct pci_dev *pdev = cptvf->pdev;
300 if (!cptvf->nr_queues)
304 cptvf->nr_queues);
305 free_command_queues(cptvf, &cptvf->cqinfo);
308 static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
310 cleanup_worker_threads(cptvf);
311 cleanup_pending_queues(cptvf);
312 cleanup_command_queues(cptvf);
315 static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
317 struct pci_dev *pdev = cptvf->pdev;
324 cptvf->nr_queues = nr_queues;
326 ret = init_command_queues(cptvf, qlen);
333 ret = init_pending_queues(cptvf, qlen, nr_queues);
341 ret = init_worker_threads(cptvf);
350 cleanup_worker_threads(cptvf);
351 cleanup_pending_queues(cptvf);
354 cleanup_command_queues(cptvf);
359 static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
361 irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
362 free_cpumask_var(cptvf->affinity_mask[vec]);
365 static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
369 vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
371 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
374 void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
378 vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
381 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
385 static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
389 vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
391 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
394 static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
398 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
401 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
405 static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
409 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
412 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
416 static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
420 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
424 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
428 static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
432 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
436 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
440 static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
444 vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
448 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
452 static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
456 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
460 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
464 static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
468 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
472 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
476 static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
480 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
484 cpt_write_csr64(cptvf->reg_base,
488 static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
492 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
496 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
500 static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
504 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
508 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
512 static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
514 return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
519 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
520 struct pci_dev *pdev = cptvf->pdev;
523 intr = cptvf_read_vf_misc_intr_status(cptvf);
527 intr, cptvf->vfid);
528 cptvf_handle_mbox_intr(cptvf);
529 cptvf_clear_mbox_intr(cptvf);
531 cptvf_clear_dovf_intr(cptvf);
533 cptvf_write_vq_doorbell(cptvf, 0);
535 intr, cptvf->vfid);
537 cptvf_clear_irde_intr(cptvf);
539 intr, cptvf->vfid);
541 cptvf_clear_nwrp_intr(cptvf);
543 intr, cptvf->vfid);
545 cptvf_clear_swerr_intr(cptvf);
547 intr, cptvf->vfid);
550 cptvf->vfid);
556 static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
561 if (unlikely(qno >= cptvf->nr_queues))
563 nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
568 static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
572 vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
576 static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
581 vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
584 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
590 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
591 struct pci_dev *pdev = cptvf->pdev;
593 u32 intr = cptvf_read_vq_done_count(cptvf);
601 cptvf_write_vq_done_ack(cptvf, intr);
602 wqe = get_cptvf_vq_wqe(cptvf, 0);
605 cptvf->vfid);
614 static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
616 struct pci_dev *pdev = cptvf->pdev;
619 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
622 cptvf->vfid);
626 cpu = cptvf->vfid % num_online_cpus();
627 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
628 cptvf->affinity_mask[vec]);
630 cptvf->affinity_mask[vec]);
633 static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
638 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
641 static void cptvf_device_init(struct cpt_vf *cptvf)
646 cptvf_write_vq_ctl(cptvf, 0);
648 cptvf_write_vq_doorbell(cptvf, 0);
650 cptvf_write_vq_inprog(cptvf, 0);
653 base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
654 cptvf_write_vq_saddr(cptvf, base_addr);
656 cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
657 cptvf_write_vq_done_numwait(cptvf, 1);
659 cptvf_write_vq_ctl(cptvf, 1);
661 cptvf->flags |= CPT_FLAG_DEVICE_READY;
667 struct cpt_vf *cptvf;
670 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
671 if (!cptvf)
674 pci_set_drvdata(pdev, cptvf);
675 cptvf->pdev = pdev;
689 cptvf->flags |= CPT_FLAG_VF_DRIVER;
703 cptvf->reg_base = pcim_iomap(pdev, 0, 0);
704 if (!cptvf->reg_base) {
710 cptvf->node = dev_to_node(&pdev->dev);
721 cptvf);
728 cptvf_enable_mbox_interrupts(cptvf);
729 cptvf_enable_swerr_interrupts(cptvf);
733 err = cptvf_check_pf_ready(cptvf);
740 cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
741 err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
747 err = cptvf_send_vq_size_msg(cptvf);
754 cptvf_device_init(cptvf);
756 cptvf->vfgrp = 1;
757 err = cptvf_send_vf_to_grp_msg(cptvf);
763 cptvf->priority = 1;
764 err = cptvf_send_vf_priority_msg(cptvf);
772 cptvf);
779 cptvf_enable_done_interrupts(cptvf);
782 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
783 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
785 err = cptvf_send_vf_up(cptvf);
790 err = cvm_crypto_init(cptvf);
798 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
799 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
801 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
803 pci_free_irq_vectors(cptvf->pdev);
815 struct cpt_vf *cptvf = pci_get_drvdata(pdev);
817 if (!cptvf) {
823 if (cptvf_send_vf_down(cptvf)) {
826 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
827 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
828 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
829 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
830 pci_free_irq_vectors(cptvf->pdev);
831 cptvf_sw_cleanup(cptvf);