1// SPDX-License-Identifier: GPL-2.0
2/* Marvell OcteonTX CPT driver
3 *
4 * Copyright (C) 2019 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/interrupt.h>
12#include <linux/module.h>
13#include "otx_cptvf.h"
14#include "otx_cptvf_algs.h"
15#include "otx_cptvf_reqmgr.h"
16
17#define DRV_NAME	"octeontx-cptvf"
18#define DRV_VERSION	"1.0"
19
20static void vq_work_handler(unsigned long data)
21{
22	struct otx_cptvf_wqe_info *cwqe_info =
23					(struct otx_cptvf_wqe_info *) data;
24
25	otx_cpt_post_process(&cwqe_info->vq_wqe[0]);
26}
27
28static int init_worker_threads(struct otx_cptvf *cptvf)
29{
30	struct pci_dev *pdev = cptvf->pdev;
31	struct otx_cptvf_wqe_info *cwqe_info;
32	int i;
33
34	cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
35	if (!cwqe_info)
36		return -ENOMEM;
37
38	if (cptvf->num_queues) {
39		dev_dbg(&pdev->dev, "Creating VQ worker threads (%d)\n",
40			cptvf->num_queues);
41	}
42
43	for (i = 0; i < cptvf->num_queues; i++) {
44		tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
45			     (u64)cwqe_info);
46		cwqe_info->vq_wqe[i].cptvf = cptvf;
47	}
48	cptvf->wqe_info = cwqe_info;
49
50	return 0;
51}
52
53static void cleanup_worker_threads(struct otx_cptvf *cptvf)
54{
55	struct pci_dev *pdev = cptvf->pdev;
56	struct otx_cptvf_wqe_info *cwqe_info;
57	int i;
58
59	cwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
60	if (!cwqe_info)
61		return;
62
63	if (cptvf->num_queues) {
64		dev_dbg(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
65			cptvf->num_queues);
66	}
67
68	for (i = 0; i < cptvf->num_queues; i++)
69		tasklet_kill(&cwqe_info->vq_wqe[i].twork);
70
71	kfree_sensitive(cwqe_info);
72	cptvf->wqe_info = NULL;
73}
74
75static void free_pending_queues(struct otx_cpt_pending_qinfo *pqinfo)
76{
77	struct otx_cpt_pending_queue *queue;
78	int i;
79
80	for_each_pending_queue(pqinfo, queue, i) {
81		if (!queue->head)
82			continue;
83
84		/* free single queue */
85		kfree_sensitive((queue->head));
86		queue->front = 0;
87		queue->rear = 0;
88		queue->qlen = 0;
89	}
90	pqinfo->num_queues = 0;
91}
92
93static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen,
94				u32 num_queues)
95{
96	struct otx_cpt_pending_queue *queue = NULL;
97	size_t size;
98	int ret;
99	u32 i;
100
101	pqinfo->num_queues = num_queues;
102	size = (qlen * sizeof(struct otx_cpt_pending_entry));
103
104	for_each_pending_queue(pqinfo, queue, i) {
105		queue->head = kzalloc((size), GFP_KERNEL);
106		if (!queue->head) {
107			ret = -ENOMEM;
108			goto pending_qfail;
109		}
110
111		queue->pending_count = 0;
112		queue->front = 0;
113		queue->rear = 0;
114		queue->qlen = qlen;
115
116		/* init queue spin lock */
117		spin_lock_init(&queue->lock);
118	}
119	return 0;
120
121pending_qfail:
122	free_pending_queues(pqinfo);
123
124	return ret;
125}
126
127static int init_pending_queues(struct otx_cptvf *cptvf, u32 qlen,
128			       u32 num_queues)
129{
130	struct pci_dev *pdev = cptvf->pdev;
131	int ret;
132
133	if (!num_queues)
134		return 0;
135
136	ret = alloc_pending_queues(&cptvf->pqinfo, qlen, num_queues);
137	if (ret) {
138		dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
139			num_queues);
140		return ret;
141	}
142	return 0;
143}
144
145static void cleanup_pending_queues(struct otx_cptvf *cptvf)
146{
147	struct pci_dev *pdev = cptvf->pdev;
148
149	if (!cptvf->num_queues)
150		return;
151
152	dev_dbg(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
153		cptvf->num_queues);
154	free_pending_queues(&cptvf->pqinfo);
155}
156
157static void free_command_queues(struct otx_cptvf *cptvf,
158				struct otx_cpt_cmd_qinfo *cqinfo)
159{
160	struct otx_cpt_cmd_queue *queue = NULL;
161	struct otx_cpt_cmd_chunk *chunk = NULL;
162	struct pci_dev *pdev = cptvf->pdev;
163	int i;
164
165	/* clean up for each queue */
166	for (i = 0; i < cptvf->num_queues; i++) {
167		queue = &cqinfo->queue[i];
168
169		while (!list_empty(&cqinfo->queue[i].chead)) {
170			chunk = list_first_entry(&cqinfo->queue[i].chead,
171					struct otx_cpt_cmd_chunk, nextchunk);
172
173			dma_free_coherent(&pdev->dev, chunk->size,
174					  chunk->head,
175					  chunk->dma_addr);
176			chunk->head = NULL;
177			chunk->dma_addr = 0;
178			list_del(&chunk->nextchunk);
179			kfree_sensitive(chunk);
180		}
181		queue->num_chunks = 0;
182		queue->idx = 0;
183
184	}
185}
186
187static int alloc_command_queues(struct otx_cptvf *cptvf,
188				struct otx_cpt_cmd_qinfo *cqinfo,
189				u32 qlen)
190{
191	struct otx_cpt_cmd_chunk *curr, *first, *last;
192	struct otx_cpt_cmd_queue *queue = NULL;
193	struct pci_dev *pdev = cptvf->pdev;
194	size_t q_size, c_size, rem_q_size;
195	u32 qcsize_bytes;
196	int i;
197
198
199	/* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
200	cptvf->qsize = min(qlen, cqinfo->qchunksize) *
201		       OTX_CPT_NEXT_CHUNK_PTR_SIZE + 1;
202	/* Qsize in bytes to create space for alignment */
203	q_size = qlen * OTX_CPT_INST_SIZE;
204
205	qcsize_bytes = cqinfo->qchunksize * OTX_CPT_INST_SIZE;
206
207	/* per queue initialization */
208	for (i = 0; i < cptvf->num_queues; i++) {
209		c_size = 0;
210		rem_q_size = q_size;
211		first = NULL;
212		last = NULL;
213
214		queue = &cqinfo->queue[i];
215		INIT_LIST_HEAD(&queue->chead);
216		do {
217			curr = kzalloc(sizeof(*curr), GFP_KERNEL);
218			if (!curr)
219				goto cmd_qfail;
220
221			c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
222					rem_q_size;
223			curr->head = dma_alloc_coherent(&pdev->dev,
224					   c_size + OTX_CPT_NEXT_CHUNK_PTR_SIZE,
225					   &curr->dma_addr, GFP_KERNEL);
226			if (!curr->head) {
227				dev_err(&pdev->dev,
228				"Command Q (%d) chunk (%d) allocation failed\n",
229					i, queue->num_chunks);
230				goto free_curr;
231			}
232			curr->size = c_size;
233
234			if (queue->num_chunks == 0) {
235				first = curr;
236				queue->base  = first;
237			}
238			list_add_tail(&curr->nextchunk,
239				      &cqinfo->queue[i].chead);
240
241			queue->num_chunks++;
242			rem_q_size -= c_size;
243			if (last)
244				*((u64 *)(&last->head[last->size])) =
245					(u64)curr->dma_addr;
246
247			last = curr;
248		} while (rem_q_size);
249
250		/*
251		 * Make the queue circular, tie back last chunk entry to head
252		 */
253		curr = first;
254		*((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
255		queue->qhead = curr;
256	}
257	return 0;
258free_curr:
259	kfree(curr);
260cmd_qfail:
261	free_command_queues(cptvf, cqinfo);
262	return -ENOMEM;
263}
264
265static int init_command_queues(struct otx_cptvf *cptvf, u32 qlen)
266{
267	struct pci_dev *pdev = cptvf->pdev;
268	int ret;
269
270	/* setup command queues */
271	ret = alloc_command_queues(cptvf, &cptvf->cqinfo, qlen);
272	if (ret) {
273		dev_err(&pdev->dev, "Failed to allocate command queues (%u)\n",
274			cptvf->num_queues);
275		return ret;
276	}
277	return ret;
278}
279
280static void cleanup_command_queues(struct otx_cptvf *cptvf)
281{
282	struct pci_dev *pdev = cptvf->pdev;
283
284	if (!cptvf->num_queues)
285		return;
286
287	dev_dbg(&pdev->dev, "Cleaning VQ command queue (%u)\n",
288		cptvf->num_queues);
289	free_command_queues(cptvf, &cptvf->cqinfo);
290}
291
292static void cptvf_sw_cleanup(struct otx_cptvf *cptvf)
293{
294	cleanup_worker_threads(cptvf);
295	cleanup_pending_queues(cptvf);
296	cleanup_command_queues(cptvf);
297}
298
299static int cptvf_sw_init(struct otx_cptvf *cptvf, u32 qlen, u32 num_queues)
300{
301	struct pci_dev *pdev = cptvf->pdev;
302	u32 max_dev_queues = 0;
303	int ret;
304
305	max_dev_queues = OTX_CPT_NUM_QS_PER_VF;
306	/* possible cpus */
307	num_queues = min_t(u32, num_queues, max_dev_queues);
308	cptvf->num_queues = num_queues;
309
310	ret = init_command_queues(cptvf, qlen);
311	if (ret) {
312		dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
313			num_queues);
314		return ret;
315	}
316
317	ret = init_pending_queues(cptvf, qlen, num_queues);
318	if (ret) {
319		dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
320			num_queues);
321		goto setup_pqfail;
322	}
323
324	/* Create worker threads for BH processing */
325	ret = init_worker_threads(cptvf);
326	if (ret) {
327		dev_err(&pdev->dev, "Failed to setup worker threads\n");
328		goto init_work_fail;
329	}
330	return 0;
331
332init_work_fail:
333	cleanup_worker_threads(cptvf);
334	cleanup_pending_queues(cptvf);
335
336setup_pqfail:
337	cleanup_command_queues(cptvf);
338
339	return ret;
340}
341
342static void cptvf_free_irq_affinity(struct otx_cptvf *cptvf, int vec)
343{
344	irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
345	free_cpumask_var(cptvf->affinity_mask[vec]);
346}
347
348static void cptvf_write_vq_ctl(struct otx_cptvf *cptvf, bool val)
349{
350	union otx_cptx_vqx_ctl vqx_ctl;
351
352	vqx_ctl.u = readq(cptvf->reg_base + OTX_CPT_VQX_CTL(0));
353	vqx_ctl.s.ena = val;
354	writeq(vqx_ctl.u, cptvf->reg_base + OTX_CPT_VQX_CTL(0));
355}
356
357void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val)
358{
359	union otx_cptx_vqx_doorbell vqx_dbell;
360
361	vqx_dbell.u = readq(cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
362	vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
363	writeq(vqx_dbell.u, cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
364}
365
366static void cptvf_write_vq_inprog(struct otx_cptvf *cptvf, u8 val)
367{
368	union otx_cptx_vqx_inprog vqx_inprg;
369
370	vqx_inprg.u = readq(cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
371	vqx_inprg.s.inflight = val;
372	writeq(vqx_inprg.u, cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
373}
374
375static void cptvf_write_vq_done_numwait(struct otx_cptvf *cptvf, u32 val)
376{
377	union otx_cptx_vqx_done_wait vqx_dwait;
378
379	vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
380	vqx_dwait.s.num_wait = val;
381	writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
382}
383
384static u32 cptvf_read_vq_done_numwait(struct otx_cptvf *cptvf)
385{
386	union otx_cptx_vqx_done_wait vqx_dwait;
387
388	vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
389	return vqx_dwait.s.num_wait;
390}
391
392static void cptvf_write_vq_done_timewait(struct otx_cptvf *cptvf, u16 time)
393{
394	union otx_cptx_vqx_done_wait vqx_dwait;
395
396	vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
397	vqx_dwait.s.time_wait = time;
398	writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
399}
400
401
402static u16 cptvf_read_vq_done_timewait(struct otx_cptvf *cptvf)
403{
404	union otx_cptx_vqx_done_wait vqx_dwait;
405
406	vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
407	return vqx_dwait.s.time_wait;
408}
409
410static void cptvf_enable_swerr_interrupts(struct otx_cptvf *cptvf)
411{
412	union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
413
414	vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
415	/* Enable SWERR interrupts for the requested VF */
416	vqx_misc_ena.s.swerr = 1;
417	writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
418}
419
420static void cptvf_enable_mbox_interrupts(struct otx_cptvf *cptvf)
421{
422	union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
423
424	vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
425	/* Enable MBOX interrupt for the requested VF */
426	vqx_misc_ena.s.mbox = 1;
427	writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
428}
429
430static void cptvf_enable_done_interrupts(struct otx_cptvf *cptvf)
431{
432	union otx_cptx_vqx_done_ena_w1s vqx_done_ena;
433
434	vqx_done_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
435	/* Enable DONE interrupt for the requested VF */
436	vqx_done_ena.s.done = 1;
437	writeq(vqx_done_ena.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
438}
439
440static void cptvf_clear_dovf_intr(struct otx_cptvf *cptvf)
441{
442	union otx_cptx_vqx_misc_int vqx_misc_int;
443
444	vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
445	/* W1C for the VF */
446	vqx_misc_int.s.dovf = 1;
447	writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
448}
449
450static void cptvf_clear_irde_intr(struct otx_cptvf *cptvf)
451{
452	union otx_cptx_vqx_misc_int vqx_misc_int;
453
454	vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
455	/* W1C for the VF */
456	vqx_misc_int.s.irde = 1;
457	writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
458}
459
460static void cptvf_clear_nwrp_intr(struct otx_cptvf *cptvf)
461{
462	union otx_cptx_vqx_misc_int vqx_misc_int;
463
464	vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
465	/* W1C for the VF */
466	vqx_misc_int.s.nwrp = 1;
467	writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
468}
469
470static void cptvf_clear_mbox_intr(struct otx_cptvf *cptvf)
471{
472	union otx_cptx_vqx_misc_int vqx_misc_int;
473
474	vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
475	/* W1C for the VF */
476	vqx_misc_int.s.mbox = 1;
477	writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
478}
479
480static void cptvf_clear_swerr_intr(struct otx_cptvf *cptvf)
481{
482	union otx_cptx_vqx_misc_int vqx_misc_int;
483
484	vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
485	/* W1C for the VF */
486	vqx_misc_int.s.swerr = 1;
487	writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
488}
489
490static u64 cptvf_read_vf_misc_intr_status(struct otx_cptvf *cptvf)
491{
492	return readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
493}
494
495static irqreturn_t cptvf_misc_intr_handler(int __always_unused irq,
496					   void *arg)
497{
498	struct otx_cptvf *cptvf = arg;
499	struct pci_dev *pdev = cptvf->pdev;
500	u64 intr;
501
502	intr = cptvf_read_vf_misc_intr_status(cptvf);
503	/* Check for MISC interrupt types */
504	if (likely(intr & OTX_CPT_VF_INTR_MBOX_MASK)) {
505		dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
506			intr, cptvf->vfid);
507		otx_cptvf_handle_mbox_intr(cptvf);
508		cptvf_clear_mbox_intr(cptvf);
509	} else if (unlikely(intr & OTX_CPT_VF_INTR_DOVF_MASK)) {
510		cptvf_clear_dovf_intr(cptvf);
511		/* Clear doorbell count */
512		otx_cptvf_write_vq_doorbell(cptvf, 0);
513		dev_err(&pdev->dev,
514		"Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
515			intr, cptvf->vfid);
516	} else if (unlikely(intr & OTX_CPT_VF_INTR_IRDE_MASK)) {
517		cptvf_clear_irde_intr(cptvf);
518		dev_err(&pdev->dev,
519		"Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
520			intr, cptvf->vfid);
521	} else if (unlikely(intr & OTX_CPT_VF_INTR_NWRP_MASK)) {
522		cptvf_clear_nwrp_intr(cptvf);
523		dev_err(&pdev->dev,
524		"NCB response write error interrupt 0x%llx on CPT VF %d\n",
525			intr, cptvf->vfid);
526	} else if (unlikely(intr & OTX_CPT_VF_INTR_SERR_MASK)) {
527		cptvf_clear_swerr_intr(cptvf);
528		dev_err(&pdev->dev,
529			"Software error interrupt 0x%llx on CPT VF %d\n",
530			intr, cptvf->vfid);
531	} else {
532		dev_err(&pdev->dev, "Unhandled interrupt in OTX_CPT VF %d\n",
533			cptvf->vfid);
534	}
535
536	return IRQ_HANDLED;
537}
538
539static inline struct otx_cptvf_wqe *get_cptvf_vq_wqe(struct otx_cptvf *cptvf,
540						     int qno)
541{
542	struct otx_cptvf_wqe_info *nwqe_info;
543
544	if (unlikely(qno >= cptvf->num_queues))
545		return NULL;
546	nwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
547
548	return &nwqe_info->vq_wqe[qno];
549}
550
551static inline u32 cptvf_read_vq_done_count(struct otx_cptvf *cptvf)
552{
553	union otx_cptx_vqx_done vqx_done;
554
555	vqx_done.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE(0));
556	return vqx_done.s.done;
557}
558
559static inline void cptvf_write_vq_done_ack(struct otx_cptvf *cptvf,
560					   u32 ackcnt)
561{
562	union otx_cptx_vqx_done_ack vqx_dack_cnt;
563
564	vqx_dack_cnt.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
565	vqx_dack_cnt.s.done_ack = ackcnt;
566	writeq(vqx_dack_cnt.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
567}
568
569static irqreturn_t cptvf_done_intr_handler(int __always_unused irq,
570					   void *cptvf_dev)
571{
572	struct otx_cptvf *cptvf = (struct otx_cptvf *)cptvf_dev;
573	struct pci_dev *pdev = cptvf->pdev;
574	/* Read the number of completions */
575	u32 intr = cptvf_read_vq_done_count(cptvf);
576
577	if (intr) {
578		struct otx_cptvf_wqe *wqe;
579
580		/*
581		 * Acknowledge the number of scheduled completions for
582		 * processing
583		 */
584		cptvf_write_vq_done_ack(cptvf, intr);
585		wqe = get_cptvf_vq_wqe(cptvf, 0);
586		if (unlikely(!wqe)) {
587			dev_err(&pdev->dev, "No work to schedule for VF (%d)\n",
588				cptvf->vfid);
589			return IRQ_NONE;
590		}
591		tasklet_hi_schedule(&wqe->twork);
592	}
593
594	return IRQ_HANDLED;
595}
596
597static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec)
598{
599	struct pci_dev *pdev = cptvf->pdev;
600	int cpu;
601
602	if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
603				GFP_KERNEL)) {
604		dev_err(&pdev->dev,
605			"Allocation failed for affinity_mask for VF %d\n",
606			cptvf->vfid);
607		return;
608	}
609
610	cpu = cptvf->vfid % num_online_cpus();
611	cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
612			cptvf->affinity_mask[vec]);
613	irq_set_affinity_hint(pci_irq_vector(pdev, vec),
614			      cptvf->affinity_mask[vec]);
615}
616
617static void cptvf_write_vq_saddr(struct otx_cptvf *cptvf, u64 val)
618{
619	union otx_cptx_vqx_saddr vqx_saddr;
620
621	vqx_saddr.u = val;
622	writeq(vqx_saddr.u, cptvf->reg_base + OTX_CPT_VQX_SADDR(0));
623}
624
625static void cptvf_device_init(struct otx_cptvf *cptvf)
626{
627	u64 base_addr = 0;
628
629	/* Disable the VQ */
630	cptvf_write_vq_ctl(cptvf, 0);
631	/* Reset the doorbell */
632	otx_cptvf_write_vq_doorbell(cptvf, 0);
633	/* Clear inflight */
634	cptvf_write_vq_inprog(cptvf, 0);
635	/* Write VQ SADDR */
636	base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
637	cptvf_write_vq_saddr(cptvf, base_addr);
638	/* Configure timerhold / coalescence */
639	cptvf_write_vq_done_timewait(cptvf, OTX_CPT_TIMER_HOLD);
640	cptvf_write_vq_done_numwait(cptvf, OTX_CPT_COUNT_HOLD);
641	/* Enable the VQ */
642	cptvf_write_vq_ctl(cptvf, 1);
643	/* Flag the VF ready */
644	cptvf->flags |= OTX_CPT_FLAG_DEVICE_READY;
645}
646
647static ssize_t vf_type_show(struct device *dev,
648			    struct device_attribute *attr,
649			    char *buf)
650{
651	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
652	char *msg;
653
654	switch (cptvf->vftype) {
655	case OTX_CPT_AE_TYPES:
656		msg = "AE";
657		break;
658
659	case OTX_CPT_SE_TYPES:
660		msg = "SE";
661		break;
662
663	default:
664		msg = "Invalid";
665	}
666
667	return scnprintf(buf, PAGE_SIZE, "%s\n", msg);
668}
669
670static ssize_t vf_engine_group_show(struct device *dev,
671				    struct device_attribute *attr,
672				    char *buf)
673{
674	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
675
676	return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp);
677}
678
679static ssize_t vf_engine_group_store(struct device *dev,
680				     struct device_attribute *attr,
681				     const char *buf, size_t count)
682{
683	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
684	int val, ret;
685
686	ret = kstrtoint(buf, 10, &val);
687	if (ret)
688		return ret;
689
690	if (val < 0)
691		return -EINVAL;
692
693	if (val >= OTX_CPT_MAX_ENGINE_GROUPS) {
694		dev_err(dev, "Engine group >= than max available groups %d\n",
695			OTX_CPT_MAX_ENGINE_GROUPS);
696		return -EINVAL;
697	}
698
699	ret = otx_cptvf_send_vf_to_grp_msg(cptvf, val);
700	if (ret)
701		return ret;
702
703	return count;
704}
705
706static ssize_t vf_coalesc_time_wait_show(struct device *dev,
707					 struct device_attribute *attr,
708					 char *buf)
709{
710	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
711
712	return scnprintf(buf, PAGE_SIZE, "%d\n",
713			 cptvf_read_vq_done_timewait(cptvf));
714}
715
716static ssize_t vf_coalesc_num_wait_show(struct device *dev,
717					struct device_attribute *attr,
718					char *buf)
719{
720	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
721
722	return scnprintf(buf, PAGE_SIZE, "%d\n",
723			 cptvf_read_vq_done_numwait(cptvf));
724}
725
726static ssize_t vf_coalesc_time_wait_store(struct device *dev,
727					  struct device_attribute *attr,
728					  const char *buf, size_t count)
729{
730	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
731	long val;
732	int ret;
733
734	ret = kstrtol(buf, 10, &val);
735	if (ret != 0)
736		return ret;
737
738	if (val < OTX_CPT_COALESC_MIN_TIME_WAIT ||
739	    val > OTX_CPT_COALESC_MAX_TIME_WAIT)
740		return -EINVAL;
741
742	cptvf_write_vq_done_timewait(cptvf, val);
743	return count;
744}
745
746static ssize_t vf_coalesc_num_wait_store(struct device *dev,
747					 struct device_attribute *attr,
748					 const char *buf, size_t count)
749{
750	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
751	long val;
752	int ret;
753
754	ret = kstrtol(buf, 10, &val);
755	if (ret != 0)
756		return ret;
757
758	if (val < OTX_CPT_COALESC_MIN_NUM_WAIT ||
759	    val > OTX_CPT_COALESC_MAX_NUM_WAIT)
760		return -EINVAL;
761
762	cptvf_write_vq_done_numwait(cptvf, val);
763	return count;
764}
765
766static DEVICE_ATTR_RO(vf_type);
767static DEVICE_ATTR_RW(vf_engine_group);
768static DEVICE_ATTR_RW(vf_coalesc_time_wait);
769static DEVICE_ATTR_RW(vf_coalesc_num_wait);
770
771static struct attribute *otx_cptvf_attrs[] = {
772	&dev_attr_vf_type.attr,
773	&dev_attr_vf_engine_group.attr,
774	&dev_attr_vf_coalesc_time_wait.attr,
775	&dev_attr_vf_coalesc_num_wait.attr,
776	NULL
777};
778
779static const struct attribute_group otx_cptvf_sysfs_group = {
780	.attrs = otx_cptvf_attrs,
781};
782
783static int otx_cptvf_probe(struct pci_dev *pdev,
784			   const struct pci_device_id *ent)
785{
786	struct device *dev = &pdev->dev;
787	struct otx_cptvf *cptvf;
788	int err;
789
790	cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
791	if (!cptvf)
792		return -ENOMEM;
793
794	pci_set_drvdata(pdev, cptvf);
795	cptvf->pdev = pdev;
796
797	err = pci_enable_device(pdev);
798	if (err) {
799		dev_err(dev, "Failed to enable PCI device\n");
800		goto clear_drvdata;
801	}
802	err = pci_request_regions(pdev, DRV_NAME);
803	if (err) {
804		dev_err(dev, "PCI request regions failed 0x%x\n", err);
805		goto disable_device;
806	}
807	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
808	if (err) {
809		dev_err(dev, "Unable to get usable DMA configuration\n");
810		goto release_regions;
811	}
812
813	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
814	if (err) {
815		dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
816		goto release_regions;
817	}
818
819	/* MAP PF's configuration registers */
820	cptvf->reg_base = pci_iomap(pdev, OTX_CPT_VF_PCI_CFG_BAR, 0);
821	if (!cptvf->reg_base) {
822		dev_err(dev, "Cannot map config register space, aborting\n");
823		err = -ENOMEM;
824		goto release_regions;
825	}
826
827	cptvf->node = dev_to_node(&pdev->dev);
828	err = pci_alloc_irq_vectors(pdev, OTX_CPT_VF_MSIX_VECTORS,
829				    OTX_CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
830	if (err < 0) {
831		dev_err(dev, "Request for #%d msix vectors failed\n",
832			OTX_CPT_VF_MSIX_VECTORS);
833		goto unmap_region;
834	}
835
836	err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
837			  cptvf_misc_intr_handler, 0, "CPT VF misc intr",
838			  cptvf);
839	if (err) {
840		dev_err(dev, "Failed to request misc irq\n");
841		goto free_vectors;
842	}
843
844	/* Enable mailbox interrupt */
845	cptvf_enable_mbox_interrupts(cptvf);
846	cptvf_enable_swerr_interrupts(cptvf);
847
848	/* Check cpt pf status, gets chip ID / device Id from PF if ready */
849	err = otx_cptvf_check_pf_ready(cptvf);
850	if (err)
851		goto free_misc_irq;
852
853	/* CPT VF software resources initialization */
854	cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE;
855	err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF);
856	if (err) {
857		dev_err(dev, "cptvf_sw_init() failed\n");
858		goto free_misc_irq;
859	}
860	/* Convey VQ LEN to PF */
861	err = otx_cptvf_send_vq_size_msg(cptvf);
862	if (err)
863		goto sw_cleanup;
864
865	/* CPT VF device initialization */
866	cptvf_device_init(cptvf);
867	/* Send msg to PF to assign currnet Q to required group */
868	err = otx_cptvf_send_vf_to_grp_msg(cptvf, cptvf->vfgrp);
869	if (err)
870		goto sw_cleanup;
871
872	cptvf->priority = 1;
873	err = otx_cptvf_send_vf_priority_msg(cptvf);
874	if (err)
875		goto sw_cleanup;
876
877	err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
878			  cptvf_done_intr_handler, 0, "CPT VF done intr",
879			  cptvf);
880	if (err) {
881		dev_err(dev, "Failed to request done irq\n");
882		goto free_done_irq;
883	}
884
885	/* Enable done interrupt */
886	cptvf_enable_done_interrupts(cptvf);
887
888	/* Set irq affinity masks */
889	cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
890	cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
891
892	err = otx_cptvf_send_vf_up(cptvf);
893	if (err)
894		goto free_irq_affinity;
895
896	/* Initialize algorithms and set ops */
897	err = otx_cpt_crypto_init(pdev, THIS_MODULE,
898		    cptvf->vftype == OTX_CPT_SE_TYPES ? OTX_CPT_SE : OTX_CPT_AE,
899		    cptvf->vftype, 1, cptvf->num_vfs);
900	if (err) {
901		dev_err(dev, "Failed to register crypto algs\n");
902		goto free_irq_affinity;
903	}
904
905	err = sysfs_create_group(&dev->kobj, &otx_cptvf_sysfs_group);
906	if (err) {
907		dev_err(dev, "Creating sysfs entries failed\n");
908		goto crypto_exit;
909	}
910
911	return 0;
912
913crypto_exit:
914	otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
915free_irq_affinity:
916	cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
917	cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
918free_done_irq:
919	free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
920sw_cleanup:
921	cptvf_sw_cleanup(cptvf);
922free_misc_irq:
923	free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
924free_vectors:
925	pci_free_irq_vectors(cptvf->pdev);
926unmap_region:
927	pci_iounmap(pdev, cptvf->reg_base);
928release_regions:
929	pci_release_regions(pdev);
930disable_device:
931	pci_disable_device(pdev);
932clear_drvdata:
933	pci_set_drvdata(pdev, NULL);
934
935	return err;
936}
937
938static void otx_cptvf_remove(struct pci_dev *pdev)
939{
940	struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
941
942	if (!cptvf) {
943		dev_err(&pdev->dev, "Invalid CPT-VF device\n");
944		return;
945	}
946
947	/* Convey DOWN to PF */
948	if (otx_cptvf_send_vf_down(cptvf)) {
949		dev_err(&pdev->dev, "PF not responding to DOWN msg\n");
950	} else {
951		sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group);
952		otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
953		cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
954		cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
955		free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
956		free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
957		cptvf_sw_cleanup(cptvf);
958		pci_free_irq_vectors(cptvf->pdev);
959		pci_iounmap(pdev, cptvf->reg_base);
960		pci_release_regions(pdev);
961		pci_disable_device(pdev);
962		pci_set_drvdata(pdev, NULL);
963	}
964}
965
966/* Supported devices */
967static const struct pci_device_id otx_cptvf_id_table[] = {
968	{PCI_VDEVICE(CAVIUM, OTX_CPT_PCI_VF_DEVICE_ID), 0},
969	{ 0, }  /* end of table */
970};
971
972static struct pci_driver otx_cptvf_pci_driver = {
973	.name = DRV_NAME,
974	.id_table = otx_cptvf_id_table,
975	.probe = otx_cptvf_probe,
976	.remove = otx_cptvf_remove,
977};
978
979module_pci_driver(otx_cptvf_pci_driver);
980
981MODULE_AUTHOR("Marvell International Ltd.");
982MODULE_DESCRIPTION("Marvell OcteonTX CPT Virtual Function Driver");
983MODULE_LICENSE("GPL v2");
984MODULE_VERSION(DRV_VERSION);
985MODULE_DEVICE_TABLE(pci, otx_cptvf_id_table);
986