1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016 Cavium, Inc.
4 */
5
6#include <linux/interrupt.h>
7#include <linux/module.h>
8
9#include "cptvf.h"
10
11#define DRV_NAME	"thunder-cptvf"
12#define DRV_VERSION	"1.0"
13
14struct cptvf_wqe {
15	struct tasklet_struct twork;
16	void *cptvf;
17	u32 qno;
18};
19
20struct cptvf_wqe_info {
21	struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF];
22};
23
24static void vq_work_handler(unsigned long data)
25{
26	struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data;
27	struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0];
28
29	vq_post_process(cwqe->cptvf, cwqe->qno);
30}
31
32static int init_worker_threads(struct cpt_vf *cptvf)
33{
34	struct pci_dev *pdev = cptvf->pdev;
35	struct cptvf_wqe_info *cwqe_info;
36	int i;
37
38	cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
39	if (!cwqe_info)
40		return -ENOMEM;
41
42	if (cptvf->nr_queues) {
43		dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n",
44			 cptvf->nr_queues);
45	}
46
47	for (i = 0; i < cptvf->nr_queues; i++) {
48		tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
49			     (u64)cwqe_info);
50		cwqe_info->vq_wqe[i].qno = i;
51		cwqe_info->vq_wqe[i].cptvf = cptvf;
52	}
53
54	cptvf->wqe_info = cwqe_info;
55
56	return 0;
57}
58
59static void cleanup_worker_threads(struct cpt_vf *cptvf)
60{
61	struct cptvf_wqe_info *cwqe_info;
62	struct pci_dev *pdev = cptvf->pdev;
63	int i;
64
65	cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
66	if (!cwqe_info)
67		return;
68
69	if (cptvf->nr_queues) {
70		dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
71			 cptvf->nr_queues);
72	}
73
74	for (i = 0; i < cptvf->nr_queues; i++)
75		tasklet_kill(&cwqe_info->vq_wqe[i].twork);
76
77	kfree_sensitive(cwqe_info);
78	cptvf->wqe_info = NULL;
79}
80
81static void free_pending_queues(struct pending_qinfo *pqinfo)
82{
83	int i;
84	struct pending_queue *queue;
85
86	for_each_pending_queue(pqinfo, queue, i) {
87		if (!queue->head)
88			continue;
89
90		/* free single queue */
91		kfree_sensitive((queue->head));
92
93		queue->front = 0;
94		queue->rear = 0;
95
96		return;
97	}
98
99	pqinfo->qlen = 0;
100	pqinfo->nr_queues = 0;
101}
102
103static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
104				u32 nr_queues)
105{
106	u32 i;
107	int ret;
108	struct pending_queue *queue = NULL;
109
110	pqinfo->nr_queues = nr_queues;
111	pqinfo->qlen = qlen;
112
113	for_each_pending_queue(pqinfo, queue, i) {
114		queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL);
115		if (!queue->head) {
116			ret = -ENOMEM;
117			goto pending_qfail;
118		}
119
120		queue->front = 0;
121		queue->rear = 0;
122		atomic64_set((&queue->pending_count), (0));
123
124		/* init queue spin lock */
125		spin_lock_init(&queue->lock);
126	}
127
128	return 0;
129
130pending_qfail:
131	free_pending_queues(pqinfo);
132
133	return ret;
134}
135
136static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
137{
138	struct pci_dev *pdev = cptvf->pdev;
139	int ret;
140
141	if (!nr_queues)
142		return 0;
143
144	ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
145	if (ret) {
146		dev_err(&pdev->dev, "failed to setup pending queues (%u)\n",
147			nr_queues);
148		return ret;
149	}
150
151	return 0;
152}
153
154static void cleanup_pending_queues(struct cpt_vf *cptvf)
155{
156	struct pci_dev *pdev = cptvf->pdev;
157
158	if (!cptvf->nr_queues)
159		return;
160
161	dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
162		 cptvf->nr_queues);
163	free_pending_queues(&cptvf->pqinfo);
164}
165
166static void free_command_queues(struct cpt_vf *cptvf,
167				struct command_qinfo *cqinfo)
168{
169	int i;
170	struct command_queue *queue = NULL;
171	struct command_chunk *chunk = NULL;
172	struct pci_dev *pdev = cptvf->pdev;
173	struct hlist_node *node;
174
175	/* clean up for each queue */
176	for (i = 0; i < cptvf->nr_queues; i++) {
177		queue = &cqinfo->queue[i];
178		if (hlist_empty(&cqinfo->queue[i].chead))
179			continue;
180
181		hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
182					  nextchunk) {
183			dma_free_coherent(&pdev->dev, chunk->size,
184					  chunk->head,
185					  chunk->dma_addr);
186			chunk->head = NULL;
187			chunk->dma_addr = 0;
188			hlist_del(&chunk->nextchunk);
189			kfree_sensitive(chunk);
190		}
191
192		queue->nchunks = 0;
193		queue->idx = 0;
194	}
195
196	/* common cleanup */
197	cqinfo->cmd_size = 0;
198}
199
200static int alloc_command_queues(struct cpt_vf *cptvf,
201				struct command_qinfo *cqinfo, size_t cmd_size,
202				u32 qlen)
203{
204	int i;
205	size_t q_size;
206	struct command_queue *queue = NULL;
207	struct pci_dev *pdev = cptvf->pdev;
208
209	/* common init */
210	cqinfo->cmd_size = cmd_size;
211	/* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
212	cptvf->qsize = min(qlen, cqinfo->qchunksize) *
213			CPT_NEXT_CHUNK_PTR_SIZE + 1;
214	/* Qsize in bytes to create space for alignment */
215	q_size = qlen * cqinfo->cmd_size;
216
217	/* per queue initialization */
218	for (i = 0; i < cptvf->nr_queues; i++) {
219		size_t c_size = 0;
220		size_t rem_q_size = q_size;
221		struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
222		u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size;
223
224		queue = &cqinfo->queue[i];
225		INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
226		do {
227			curr = kzalloc(sizeof(*curr), GFP_KERNEL);
228			if (!curr)
229				goto cmd_qfail;
230
231			c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
232					rem_q_size;
233			curr->head = dma_alloc_coherent(&pdev->dev,
234							c_size + CPT_NEXT_CHUNK_PTR_SIZE,
235							&curr->dma_addr,
236							GFP_KERNEL);
237			if (!curr->head) {
238				dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
239					i, queue->nchunks);
240				kfree(curr);
241				goto cmd_qfail;
242			}
243
244			curr->size = c_size;
245			if (queue->nchunks == 0) {
246				hlist_add_head(&curr->nextchunk,
247					       &cqinfo->queue[i].chead);
248				first = curr;
249			} else {
250				hlist_add_behind(&curr->nextchunk,
251						 &last->nextchunk);
252			}
253
254			queue->nchunks++;
255			rem_q_size -= c_size;
256			if (last)
257				*((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
258
259			last = curr;
260		} while (rem_q_size);
261
262		/* Make the queue circular */
263		/* Tie back last chunk entry to head */
264		curr = first;
265		*((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
266		queue->qhead = curr;
267		spin_lock_init(&queue->lock);
268	}
269	return 0;
270
271cmd_qfail:
272	free_command_queues(cptvf, cqinfo);
273	return -ENOMEM;
274}
275
276static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
277{
278	struct pci_dev *pdev = cptvf->pdev;
279	int ret;
280
281	/* setup AE command queues */
282	ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
283				   qlen);
284	if (ret) {
285		dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n",
286			cptvf->nr_queues);
287		return ret;
288	}
289
290	return ret;
291}
292
293static void cleanup_command_queues(struct cpt_vf *cptvf)
294{
295	struct pci_dev *pdev = cptvf->pdev;
296
297	if (!cptvf->nr_queues)
298		return;
299
300	dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n",
301		 cptvf->nr_queues);
302	free_command_queues(cptvf, &cptvf->cqinfo);
303}
304
305static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
306{
307	cleanup_worker_threads(cptvf);
308	cleanup_pending_queues(cptvf);
309	cleanup_command_queues(cptvf);
310}
311
312static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
313{
314	struct pci_dev *pdev = cptvf->pdev;
315	int ret = 0;
316	u32 max_dev_queues = 0;
317
318	max_dev_queues = CPT_NUM_QS_PER_VF;
319	/* possible cpus */
320	nr_queues = min_t(u32, nr_queues, max_dev_queues);
321	cptvf->nr_queues = nr_queues;
322
323	ret = init_command_queues(cptvf, qlen);
324	if (ret) {
325		dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
326			nr_queues);
327		return ret;
328	}
329
330	ret = init_pending_queues(cptvf, qlen, nr_queues);
331	if (ret) {
332		dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
333			nr_queues);
334		goto setup_pqfail;
335	}
336
337	/* Create worker threads for BH processing */
338	ret = init_worker_threads(cptvf);
339	if (ret) {
340		dev_err(&pdev->dev, "Failed to setup worker threads\n");
341		goto init_work_fail;
342	}
343
344	return 0;
345
346init_work_fail:
347	cleanup_worker_threads(cptvf);
348	cleanup_pending_queues(cptvf);
349
350setup_pqfail:
351	cleanup_command_queues(cptvf);
352
353	return ret;
354}
355
356static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
357{
358	irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
359	free_cpumask_var(cptvf->affinity_mask[vec]);
360}
361
362static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
363{
364	union cptx_vqx_ctl vqx_ctl;
365
366	vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
367	vqx_ctl.s.ena = val;
368	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
369}
370
371void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
372{
373	union cptx_vqx_doorbell vqx_dbell;
374
375	vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
376				     CPTX_VQX_DOORBELL(0, 0));
377	vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
378	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
379			vqx_dbell.u);
380}
381
382static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
383{
384	union cptx_vqx_inprog vqx_inprg;
385
386	vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
387	vqx_inprg.s.inflight = val;
388	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
389}
390
391static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
392{
393	union cptx_vqx_done_wait vqx_dwait;
394
395	vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
396				     CPTX_VQX_DONE_WAIT(0, 0));
397	vqx_dwait.s.num_wait = val;
398	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
399			vqx_dwait.u);
400}
401
402static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
403{
404	union cptx_vqx_done_wait vqx_dwait;
405
406	vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
407				     CPTX_VQX_DONE_WAIT(0, 0));
408	vqx_dwait.s.time_wait = time;
409	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
410			vqx_dwait.u);
411}
412
413static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
414{
415	union cptx_vqx_misc_ena_w1s vqx_misc_ena;
416
417	vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
418					CPTX_VQX_MISC_ENA_W1S(0, 0));
419	/* Set mbox(0) interupts for the requested vf */
420	vqx_misc_ena.s.swerr = 1;
421	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
422			vqx_misc_ena.u);
423}
424
425static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
426{
427	union cptx_vqx_misc_ena_w1s vqx_misc_ena;
428
429	vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
430					CPTX_VQX_MISC_ENA_W1S(0, 0));
431	/* Set mbox(0) interupts for the requested vf */
432	vqx_misc_ena.s.mbox = 1;
433	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
434			vqx_misc_ena.u);
435}
436
437static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
438{
439	union cptx_vqx_done_ena_w1s vqx_done_ena;
440
441	vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
442					CPTX_VQX_DONE_ENA_W1S(0, 0));
443	/* Set DONE interrupt for the requested vf */
444	vqx_done_ena.s.done = 1;
445	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
446			vqx_done_ena.u);
447}
448
449static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
450{
451	union cptx_vqx_misc_int vqx_misc_int;
452
453	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
454					CPTX_VQX_MISC_INT(0, 0));
455	/* W1C for the VF */
456	vqx_misc_int.s.dovf = 1;
457	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
458			vqx_misc_int.u);
459}
460
461static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
462{
463	union cptx_vqx_misc_int vqx_misc_int;
464
465	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
466					CPTX_VQX_MISC_INT(0, 0));
467	/* W1C for the VF */
468	vqx_misc_int.s.irde = 1;
469	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
470			vqx_misc_int.u);
471}
472
473static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
474{
475	union cptx_vqx_misc_int vqx_misc_int;
476
477	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
478					CPTX_VQX_MISC_INT(0, 0));
479	/* W1C for the VF */
480	vqx_misc_int.s.nwrp = 1;
481	cpt_write_csr64(cptvf->reg_base,
482			CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
483}
484
485static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
486{
487	union cptx_vqx_misc_int vqx_misc_int;
488
489	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
490					CPTX_VQX_MISC_INT(0, 0));
491	/* W1C for the VF */
492	vqx_misc_int.s.mbox = 1;
493	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
494			vqx_misc_int.u);
495}
496
497static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
498{
499	union cptx_vqx_misc_int vqx_misc_int;
500
501	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
502					CPTX_VQX_MISC_INT(0, 0));
503	/* W1C for the VF */
504	vqx_misc_int.s.swerr = 1;
505	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
506			vqx_misc_int.u);
507}
508
509static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
510{
511	return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
512}
513
514static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
515{
516	struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
517	struct pci_dev *pdev = cptvf->pdev;
518	u64 intr;
519
520	intr = cptvf_read_vf_misc_intr_status(cptvf);
521	/*Check for MISC interrupt types*/
522	if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
523		dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
524			intr, cptvf->vfid);
525		cptvf_handle_mbox_intr(cptvf);
526		cptvf_clear_mbox_intr(cptvf);
527	} else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
528		cptvf_clear_dovf_intr(cptvf);
529		/*Clear doorbell count*/
530		cptvf_write_vq_doorbell(cptvf, 0);
531		dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
532			intr, cptvf->vfid);
533	} else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
534		cptvf_clear_irde_intr(cptvf);
535		dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
536			intr, cptvf->vfid);
537	} else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
538		cptvf_clear_nwrp_intr(cptvf);
539		dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
540			intr, cptvf->vfid);
541	} else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) {
542		cptvf_clear_swerr_intr(cptvf);
543		dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n",
544			intr, cptvf->vfid);
545	} else {
546		dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n",
547			cptvf->vfid);
548	}
549
550	return IRQ_HANDLED;
551}
552
553static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
554						 int qno)
555{
556	struct cptvf_wqe_info *nwqe_info;
557
558	if (unlikely(qno >= cptvf->nr_queues))
559		return NULL;
560	nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
561
562	return &nwqe_info->vq_wqe[qno];
563}
564
565static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
566{
567	union cptx_vqx_done vqx_done;
568
569	vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
570	return vqx_done.s.done;
571}
572
573static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
574					   u32 ackcnt)
575{
576	union cptx_vqx_done_ack vqx_dack_cnt;
577
578	vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
579					CPTX_VQX_DONE_ACK(0, 0));
580	vqx_dack_cnt.s.done_ack = ackcnt;
581	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
582			vqx_dack_cnt.u);
583}
584
585static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
586{
587	struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
588	struct pci_dev *pdev = cptvf->pdev;
589	/* Read the number of completions */
590	u32 intr = cptvf_read_vq_done_count(cptvf);
591
592	if (intr) {
593		struct cptvf_wqe *wqe;
594
595		/* Acknowledge the number of
596		 * scheduled completions for processing
597		 */
598		cptvf_write_vq_done_ack(cptvf, intr);
599		wqe = get_cptvf_vq_wqe(cptvf, 0);
600		if (unlikely(!wqe)) {
601			dev_err(&pdev->dev, "No work to schedule for VF (%d)",
602				cptvf->vfid);
603			return IRQ_NONE;
604		}
605		tasklet_hi_schedule(&wqe->twork);
606	}
607
608	return IRQ_HANDLED;
609}
610
611static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
612{
613	struct pci_dev *pdev = cptvf->pdev;
614	int cpu;
615
616	if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
617				GFP_KERNEL)) {
618		dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
619			cptvf->vfid);
620		return;
621	}
622
623	cpu = cptvf->vfid % num_online_cpus();
624	cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
625			cptvf->affinity_mask[vec]);
626	irq_set_affinity_hint(pci_irq_vector(pdev, vec),
627			cptvf->affinity_mask[vec]);
628}
629
630static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
631{
632	union cptx_vqx_saddr vqx_saddr;
633
634	vqx_saddr.u = val;
635	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
636}
637
638static void cptvf_device_init(struct cpt_vf *cptvf)
639{
640	u64 base_addr = 0;
641
642	/* Disable the VQ */
643	cptvf_write_vq_ctl(cptvf, 0);
644	/* Reset the doorbell */
645	cptvf_write_vq_doorbell(cptvf, 0);
646	/* Clear inflight */
647	cptvf_write_vq_inprog(cptvf, 0);
648	/* Write VQ SADDR */
649	/* TODO: for now only one queue, so hard coded */
650	base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
651	cptvf_write_vq_saddr(cptvf, base_addr);
652	/* Configure timerhold / coalescence */
653	cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
654	cptvf_write_vq_done_numwait(cptvf, 1);
655	/* Enable the VQ */
656	cptvf_write_vq_ctl(cptvf, 1);
657	/* Flag the VF ready */
658	cptvf->flags |= CPT_FLAG_DEVICE_READY;
659}
660
661static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
662{
663	struct device *dev = &pdev->dev;
664	struct cpt_vf *cptvf;
665	int    err;
666
667	cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
668	if (!cptvf)
669		return -ENOMEM;
670
671	pci_set_drvdata(pdev, cptvf);
672	cptvf->pdev = pdev;
673	err = pci_enable_device(pdev);
674	if (err) {
675		dev_err(dev, "Failed to enable PCI device\n");
676		pci_set_drvdata(pdev, NULL);
677		return err;
678	}
679
680	err = pci_request_regions(pdev, DRV_NAME);
681	if (err) {
682		dev_err(dev, "PCI request regions failed 0x%x\n", err);
683		goto cptvf_err_disable_device;
684	}
685	/* Mark as VF driver */
686	cptvf->flags |= CPT_FLAG_VF_DRIVER;
687	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
688	if (err) {
689		dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
690		goto cptvf_err_release_regions;
691	}
692
693	/* MAP PF's configuration registers */
694	cptvf->reg_base = pcim_iomap(pdev, 0, 0);
695	if (!cptvf->reg_base) {
696		dev_err(dev, "Cannot map config register space, aborting\n");
697		err = -ENOMEM;
698		goto cptvf_err_release_regions;
699	}
700
701	cptvf->node = dev_to_node(&pdev->dev);
702	err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
703			CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
704	if (err < 0) {
705		dev_err(dev, "Request for #%d msix vectors failed\n",
706			CPT_VF_MSIX_VECTORS);
707		goto cptvf_err_release_regions;
708	}
709
710	err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
711			  cptvf_misc_intr_handler, 0, "CPT VF misc intr",
712			  cptvf);
713	if (err) {
714		dev_err(dev, "Request misc irq failed");
715		goto cptvf_free_vectors;
716	}
717
718	/* Enable mailbox interrupt */
719	cptvf_enable_mbox_interrupts(cptvf);
720	cptvf_enable_swerr_interrupts(cptvf);
721
722	/* Check ready with PF */
723	/* Gets chip ID / device Id from PF if ready */
724	err = cptvf_check_pf_ready(cptvf);
725	if (err) {
726		dev_err(dev, "PF not responding to READY msg");
727		goto cptvf_free_misc_irq;
728	}
729
730	/* CPT VF software resources initialization */
731	cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
732	err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
733	if (err) {
734		dev_err(dev, "cptvf_sw_init() failed");
735		goto cptvf_free_misc_irq;
736	}
737	/* Convey VQ LEN to PF */
738	err = cptvf_send_vq_size_msg(cptvf);
739	if (err) {
740		dev_err(dev, "PF not responding to QLEN msg");
741		goto cptvf_free_misc_irq;
742	}
743
744	/* CPT VF device initialization */
745	cptvf_device_init(cptvf);
746	/* Send msg to PF to assign currnet Q to required group */
747	cptvf->vfgrp = 1;
748	err = cptvf_send_vf_to_grp_msg(cptvf);
749	if (err) {
750		dev_err(dev, "PF not responding to VF_GRP msg");
751		goto cptvf_free_misc_irq;
752	}
753
754	cptvf->priority = 1;
755	err = cptvf_send_vf_priority_msg(cptvf);
756	if (err) {
757		dev_err(dev, "PF not responding to VF_PRIO msg");
758		goto cptvf_free_misc_irq;
759	}
760
761	err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
762			  cptvf_done_intr_handler, 0, "CPT VF done intr",
763			  cptvf);
764	if (err) {
765		dev_err(dev, "Request done irq failed\n");
766		goto cptvf_free_misc_irq;
767	}
768
769	/* Enable mailbox interrupt */
770	cptvf_enable_done_interrupts(cptvf);
771
772	/* Set irq affinity masks */
773	cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
774	cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
775
776	err = cptvf_send_vf_up(cptvf);
777	if (err) {
778		dev_err(dev, "PF not responding to UP msg");
779		goto cptvf_free_irq_affinity;
780	}
781	err = cvm_crypto_init(cptvf);
782	if (err) {
783		dev_err(dev, "Algorithm register failed\n");
784		goto cptvf_free_irq_affinity;
785	}
786	return 0;
787
788cptvf_free_irq_affinity:
789	cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
790	cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
791cptvf_free_misc_irq:
792	free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
793cptvf_free_vectors:
794	pci_free_irq_vectors(cptvf->pdev);
795cptvf_err_release_regions:
796	pci_release_regions(pdev);
797cptvf_err_disable_device:
798	pci_disable_device(pdev);
799	pci_set_drvdata(pdev, NULL);
800
801	return err;
802}
803
804static void cptvf_remove(struct pci_dev *pdev)
805{
806	struct cpt_vf *cptvf = pci_get_drvdata(pdev);
807
808	if (!cptvf) {
809		dev_err(&pdev->dev, "Invalid CPT-VF device\n");
810		return;
811	}
812
813	/* Convey DOWN to PF */
814	if (cptvf_send_vf_down(cptvf)) {
815		dev_err(&pdev->dev, "PF not responding to DOWN msg");
816	} else {
817		cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
818		cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
819		free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
820		free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
821		pci_free_irq_vectors(cptvf->pdev);
822		cptvf_sw_cleanup(cptvf);
823		pci_set_drvdata(pdev, NULL);
824		pci_release_regions(pdev);
825		pci_disable_device(pdev);
826		cvm_crypto_exit();
827	}
828}
829
830static void cptvf_shutdown(struct pci_dev *pdev)
831{
832	cptvf_remove(pdev);
833}
834
835/* Supported devices */
836static const struct pci_device_id cptvf_id_table[] = {
837	{PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0},
838	{ 0, }  /* end of table */
839};
840
841static struct pci_driver cptvf_pci_driver = {
842	.name = DRV_NAME,
843	.id_table = cptvf_id_table,
844	.probe = cptvf_probe,
845	.remove = cptvf_remove,
846	.shutdown = cptvf_shutdown,
847};
848
849module_pci_driver(cptvf_pci_driver);
850
851MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
852MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
853MODULE_LICENSE("GPL v2");
854MODULE_VERSION(DRV_VERSION);
855MODULE_DEVICE_TABLE(pci, cptvf_id_table);
856