1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2020 Marvell. */
3
4#include <linux/firmware.h>
5#include "otx2_cpt_hw_types.h"
6#include "otx2_cpt_common.h"
7#include "otx2_cpt_devlink.h"
8#include "otx2_cptpf_ucode.h"
9#include "otx2_cptpf.h"
10#include "cn10k_cpt.h"
11#include "rvu_reg.h"
12
13#define OTX2_CPT_DRV_NAME    "rvu_cptpf"
14#define OTX2_CPT_DRV_STRING  "Marvell RVU CPT Physical Function Driver"
15
16#define CPT_UC_RID_CN9K_B0   1
17
18static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
19					int num_vfs)
20{
21	int ena_bits;
22
23	/* Clear any pending interrupts */
24	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
25			 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
26	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
27			 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
28
29	/* Enable VF interrupts for VFs from 0 to 63 */
30	ena_bits = ((num_vfs - 1) % 64);
31	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
32			 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
33			 GENMASK_ULL(ena_bits, 0));
34
35	if (num_vfs > 64) {
36		/* Enable VF interrupts for VFs from 64 to 127 */
37		ena_bits = num_vfs - 64 - 1;
38		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
39				RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
40				GENMASK_ULL(ena_bits, 0));
41	}
42}
43
44static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
45					 int num_vfs)
46{
47	int vector;
48
49	/* Disable VF-PF interrupts */
50	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
51			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
52	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
53			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
54	/* Clear any pending interrupts */
55	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
56			 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
57
58	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
59	free_irq(vector, cptpf);
60
61	if (num_vfs > 64) {
62		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
63				 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
64		vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
65		free_irq(vector, cptpf);
66	}
67}
68
69static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
70					 int num_vfs)
71{
72	/* Clear FLR interrupt if any */
73	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
74			 INTR_MASK(num_vfs));
75
76	/* Enable VF FLR interrupts */
77	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
78			 RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
79	/* Clear ME interrupt if any */
80	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
81			 INTR_MASK(num_vfs));
82	/* Enable VF ME interrupts */
83	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
84			 RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
85
86	if (num_vfs <= 64)
87		return;
88
89	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
90			 INTR_MASK(num_vfs - 64));
91	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
92			 RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
93
94	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
95			 INTR_MASK(num_vfs - 64));
96	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
97			 RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
98}
99
100static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
101				       int num_vfs)
102{
103	int vector;
104
105	/* Disable VF FLR interrupts */
106	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
107			 RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
108	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
109	free_irq(vector, cptpf);
110
111	/* Disable VF ME interrupts */
112	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
113			 RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
114	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
115	free_irq(vector, cptpf);
116
117	if (num_vfs <= 64)
118		return;
119
120	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
121			 RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
122	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
123	free_irq(vector, cptpf);
124
125	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
126			 RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
127	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
128	free_irq(vector, cptpf);
129}
130
131static void cptpf_flr_wq_handler(struct work_struct *work)
132{
133	struct cptpf_flr_work *flr_work;
134	struct otx2_cptpf_dev *pf;
135	struct mbox_msghdr *req;
136	struct otx2_mbox *mbox;
137	int vf, reg = 0;
138
139	flr_work = container_of(work, struct cptpf_flr_work, work);
140	pf = flr_work->pf;
141	mbox = &pf->afpf_mbox;
142
143	vf = flr_work - pf->flr_work;
144
145	mutex_lock(&pf->lock);
146	req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
147				      sizeof(struct msg_rsp));
148	if (!req) {
149		mutex_unlock(&pf->lock);
150		return;
151	}
152
153	req->sig = OTX2_MBOX_REQ_SIG;
154	req->id = MBOX_MSG_VF_FLR;
155	req->pcifunc &= RVU_PFVF_FUNC_MASK;
156	req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
157
158	otx2_cpt_send_mbox_msg(mbox, pf->pdev);
159	if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
160
161		if (vf >= 64) {
162			reg = 1;
163			vf = vf - 64;
164		}
165		/* Clear transaction pending register */
166		otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
167				 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
168		otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
169				 RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
170	}
171	mutex_unlock(&pf->lock);
172}
173
174static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
175{
176	int reg, dev, vf, start_vf, num_reg = 1;
177	struct otx2_cptpf_dev *cptpf = arg;
178	u64 intr;
179
180	if (cptpf->max_vfs > 64)
181		num_reg = 2;
182
183	for (reg = 0; reg < num_reg; reg++) {
184		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
185				       RVU_PF_VFFLR_INTX(reg));
186		if (!intr)
187			continue;
188		start_vf = 64 * reg;
189		for (vf = 0; vf < 64; vf++) {
190			if (!(intr & BIT_ULL(vf)))
191				continue;
192			dev = vf + start_vf;
193			queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
194			/* Clear interrupt */
195			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
196					 RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
197			/* Disable the interrupt */
198			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
199					 RVU_PF_VFFLR_INT_ENA_W1CX(reg),
200					 BIT_ULL(vf));
201		}
202	}
203	return IRQ_HANDLED;
204}
205
206static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
207{
208	struct otx2_cptpf_dev *cptpf = arg;
209	int reg, vf, num_reg = 1;
210	u64 intr;
211
212	if (cptpf->max_vfs > 64)
213		num_reg = 2;
214
215	for (reg = 0; reg < num_reg; reg++) {
216		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
217				       RVU_PF_VFME_INTX(reg));
218		if (!intr)
219			continue;
220		for (vf = 0; vf < 64; vf++) {
221			if (!(intr & BIT_ULL(vf)))
222				continue;
223			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
224					 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
225			/* Clear interrupt */
226			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
227					 RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
228		}
229	}
230	return IRQ_HANDLED;
231}
232
233static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
234				       int num_vfs)
235{
236	cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
237	cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
238}
239
240static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
241{
242	struct pci_dev *pdev = cptpf->pdev;
243	struct device *dev = &pdev->dev;
244	int ret, vector;
245
246	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
247	/* Register VF-PF mailbox interrupt handler */
248	ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
249			  cptpf);
250	if (ret) {
251		dev_err(dev,
252			"IRQ registration failed for PFVF mbox0 irq\n");
253		return ret;
254	}
255	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
256	/* Register VF FLR interrupt handler */
257	ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
258	if (ret) {
259		dev_err(dev,
260			"IRQ registration failed for VFFLR0 irq\n");
261		goto free_mbox0_irq;
262	}
263	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
264	/* Register VF ME interrupt handler */
265	ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
266	if (ret) {
267		dev_err(dev,
268			"IRQ registration failed for PFVF mbox0 irq\n");
269		goto free_flr0_irq;
270	}
271
272	if (num_vfs > 64) {
273		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
274		ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
275				  "CPTVFPF Mbox1", cptpf);
276		if (ret) {
277			dev_err(dev,
278				"IRQ registration failed for PFVF mbox1 irq\n");
279			goto free_me0_irq;
280		}
281		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
282		/* Register VF FLR interrupt handler */
283		ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
284				  cptpf);
285		if (ret) {
286			dev_err(dev,
287				"IRQ registration failed for VFFLR1 irq\n");
288			goto free_mbox1_irq;
289		}
290		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
291		/* Register VF FLR interrupt handler */
292		ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
293				  cptpf);
294		if (ret) {
295			dev_err(dev,
296				"IRQ registration failed for VFFLR1 irq\n");
297			goto free_flr1_irq;
298		}
299	}
300	cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
301	cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
302
303	return 0;
304
305free_flr1_irq:
306	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
307	free_irq(vector, cptpf);
308free_mbox1_irq:
309	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
310	free_irq(vector, cptpf);
311free_me0_irq:
312	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
313	free_irq(vector, cptpf);
314free_flr0_irq:
315	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
316	free_irq(vector, cptpf);
317free_mbox0_irq:
318	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
319	free_irq(vector, cptpf);
320	return ret;
321}
322
323static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
324{
325	if (!pf->flr_wq)
326		return;
327	destroy_workqueue(pf->flr_wq);
328	pf->flr_wq = NULL;
329	kfree(pf->flr_work);
330}
331
332static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
333{
334	int vf;
335
336	cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
337	if (!cptpf->flr_wq)
338		return -ENOMEM;
339
340	cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
341				  GFP_KERNEL);
342	if (!cptpf->flr_work)
343		goto destroy_wq;
344
345	for (vf = 0; vf < num_vfs; vf++) {
346		cptpf->flr_work[vf].pf = cptpf;
347		INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
348	}
349	return 0;
350
351destroy_wq:
352	destroy_workqueue(cptpf->flr_wq);
353	return -ENOMEM;
354}
355
356static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
357{
358	struct device *dev = &cptpf->pdev->dev;
359	u64 vfpf_mbox_base;
360	int err, i;
361
362	cptpf->vfpf_mbox_wq =
363		alloc_ordered_workqueue("cpt_vfpf_mailbox",
364					WQ_HIGHPRI | WQ_MEM_RECLAIM);
365	if (!cptpf->vfpf_mbox_wq)
366		return -ENOMEM;
367
368	/* Map VF-PF mailbox memory */
369	if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
370		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
371	else
372		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
373
374	if (!vfpf_mbox_base) {
375		dev_err(dev, "VF-PF mailbox address not configured\n");
376		err = -ENOMEM;
377		goto free_wqe;
378	}
379	cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
380						MBOX_SIZE * cptpf->max_vfs);
381	if (!cptpf->vfpf_mbox_base) {
382		dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
383		err = -ENOMEM;
384		goto free_wqe;
385	}
386	err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
387			     cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
388			     num_vfs);
389	if (err)
390		goto free_wqe;
391
392	for (i = 0; i < num_vfs; i++) {
393		cptpf->vf[i].vf_id = i;
394		cptpf->vf[i].cptpf = cptpf;
395		cptpf->vf[i].intr_idx = i % 64;
396		INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
397			  otx2_cptpf_vfpf_mbox_handler);
398	}
399	return 0;
400
401free_wqe:
402	destroy_workqueue(cptpf->vfpf_mbox_wq);
403	return err;
404}
405
406static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
407{
408	destroy_workqueue(cptpf->vfpf_mbox_wq);
409	otx2_mbox_destroy(&cptpf->vfpf_mbox);
410}
411
412static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
413{
414	/* Disable AF-PF interrupt */
415	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
416			 0x1ULL);
417	/* Clear interrupt if any */
418	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
419}
420
421static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
422{
423	struct pci_dev *pdev = cptpf->pdev;
424	struct device *dev = &pdev->dev;
425	int ret, irq;
426
427	irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
428	/* Register AF-PF mailbox interrupt handler */
429	ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
430			       "CPTAFPF Mbox", cptpf);
431	if (ret) {
432		dev_err(dev,
433			"IRQ registration failed for PFAF mbox irq\n");
434		return ret;
435	}
436	/* Clear interrupt if any, to avoid spurious interrupts */
437	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
438	/* Enable AF-PF interrupt */
439	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
440			 0x1ULL);
441
442	ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
443	if (ret) {
444		dev_warn(dev,
445			 "AF not responding to mailbox, deferring probe\n");
446		cptpf_disable_afpf_mbox_intr(cptpf);
447		return -EPROBE_DEFER;
448	}
449	return 0;
450}
451
452static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
453{
454	struct pci_dev *pdev = cptpf->pdev;
455	resource_size_t offset;
456	int err;
457
458	cptpf->afpf_mbox_wq =
459		alloc_ordered_workqueue("cpt_afpf_mailbox",
460					WQ_HIGHPRI | WQ_MEM_RECLAIM);
461	if (!cptpf->afpf_mbox_wq)
462		return -ENOMEM;
463
464	offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
465	/* Map AF-PF mailbox memory */
466	cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
467	if (!cptpf->afpf_mbox_base) {
468		dev_err(&pdev->dev, "Unable to map BAR4\n");
469		err = -ENOMEM;
470		goto error;
471	}
472
473	err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
474			     pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
475	if (err)
476		goto error;
477
478	err = otx2_mbox_init(&cptpf->afpf_mbox_up, cptpf->afpf_mbox_base,
479			     pdev, cptpf->reg_base, MBOX_DIR_PFAF_UP, 1);
480	if (err)
481		goto mbox_cleanup;
482
483	INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
484	INIT_WORK(&cptpf->afpf_mbox_up_work, otx2_cptpf_afpf_mbox_up_handler);
485	mutex_init(&cptpf->lock);
486
487	return 0;
488
489mbox_cleanup:
490	otx2_mbox_destroy(&cptpf->afpf_mbox);
491error:
492	destroy_workqueue(cptpf->afpf_mbox_wq);
493	return err;
494}
495
496static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
497{
498	destroy_workqueue(cptpf->afpf_mbox_wq);
499	otx2_mbox_destroy(&cptpf->afpf_mbox);
500	otx2_mbox_destroy(&cptpf->afpf_mbox_up);
501}
502
503static ssize_t sso_pf_func_ovrd_show(struct device *dev,
504				     struct device_attribute *attr, char *buf)
505{
506	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
507
508	return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd);
509}
510
511static ssize_t sso_pf_func_ovrd_store(struct device *dev,
512				      struct device_attribute *attr,
513				      const char *buf, size_t count)
514{
515	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
516	u8 sso_pf_func_ovrd;
517
518	if (!(cptpf->pdev->revision == CPT_UC_RID_CN9K_B0))
519		return count;
520
521	if (kstrtou8(buf, 0, &sso_pf_func_ovrd))
522		return -EINVAL;
523
524	cptpf->sso_pf_func_ovrd = sso_pf_func_ovrd;
525
526	return count;
527}
528
529static ssize_t kvf_limits_show(struct device *dev,
530			       struct device_attribute *attr, char *buf)
531{
532	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
533
534	return sprintf(buf, "%d\n", cptpf->kvf_limits);
535}
536
537static ssize_t kvf_limits_store(struct device *dev,
538				struct device_attribute *attr,
539				const char *buf, size_t count)
540{
541	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
542	int lfs_num;
543	int ret;
544
545	ret = kstrtoint(buf, 0, &lfs_num);
546	if (ret)
547		return ret;
548	if (lfs_num < 1 || lfs_num > num_online_cpus()) {
549		dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
550			lfs_num, num_online_cpus());
551		return -EINVAL;
552	}
553	cptpf->kvf_limits = lfs_num;
554
555	return count;
556}
557
558static DEVICE_ATTR_RW(kvf_limits);
559static DEVICE_ATTR_RW(sso_pf_func_ovrd);
560
561static struct attribute *cptpf_attrs[] = {
562	&dev_attr_kvf_limits.attr,
563	&dev_attr_sso_pf_func_ovrd.attr,
564	NULL
565};
566
567static const struct attribute_group cptpf_sysfs_group = {
568	.attrs = cptpf_attrs,
569};
570
571static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
572{
573	u64 rev;
574
575	rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
576			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
577	rev = (rev >> 12) & 0xFF;
578	/*
579	 * Check if AF has setup revision for RVUM block, otherwise
580	 * driver probe should be deferred until AF driver comes up
581	 */
582	if (!rev) {
583		dev_warn(&cptpf->pdev->dev,
584			 "AF is not initialized, deferring probe\n");
585		return -EPROBE_DEFER;
586	}
587	return 0;
588}
589
590static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
591{
592	int timeout = 10, ret;
593	u64 reg = 0;
594
595	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
596				    CPT_AF_BLK_RST, 0x1, blkaddr);
597	if (ret)
598		return ret;
599
600	do {
601		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
602					   CPT_AF_BLK_RST, &reg, blkaddr);
603		if (ret)
604			return ret;
605
606		if (!((reg >> 63) & 0x1))
607			break;
608
609		usleep_range(10000, 20000);
610		if (timeout-- < 0)
611			return -EBUSY;
612	} while (1);
613
614	return ret;
615}
616
617static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
618{
619	int ret = 0;
620
621	if (cptpf->has_cpt1) {
622		ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
623		if (ret)
624			return ret;
625	}
626	return cptx_device_reset(cptpf, BLKADDR_CPT0);
627}
628
629static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
630{
631	u64 cfg;
632
633	cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
634			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
635	if (cfg & BIT_ULL(11))
636		cptpf->has_cpt1 = true;
637}
638
639static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
640{
641	union otx2_cptx_af_constants1 af_cnsts1 = {0};
642	int ret = 0;
643
644	/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
645	cptpf_check_block_implemented(cptpf);
646	/* Reset the CPT PF device */
647	ret = cptpf_device_reset(cptpf);
648	if (ret)
649		return ret;
650
651	/* Get number of SE, IE and AE engines */
652	ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
653				   CPT_AF_CONSTANTS1, &af_cnsts1.u,
654				   BLKADDR_CPT0);
655	if (ret)
656		return ret;
657
658	cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
659	cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
660	cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
661
662	/* Disable all cores */
663	ret = otx2_cpt_disable_all_cores(cptpf);
664
665	return ret;
666}
667
668static int cptpf_sriov_disable(struct pci_dev *pdev)
669{
670	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
671	int num_vfs = pci_num_vf(pdev);
672
673	if (!num_vfs)
674		return 0;
675
676	pci_disable_sriov(pdev);
677	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
678	cptpf_flr_wq_destroy(cptpf);
679	cptpf_vfpf_mbox_destroy(cptpf);
680	module_put(THIS_MODULE);
681	cptpf->enabled_vfs = 0;
682
683	return 0;
684}
685
686static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
687{
688	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
689	int ret;
690
691	/* Initialize VF<=>PF mailbox */
692	ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
693	if (ret)
694		return ret;
695
696	ret = cptpf_flr_wq_init(cptpf, num_vfs);
697	if (ret)
698		goto destroy_mbox;
699	/* Register VF<=>PF mailbox interrupt */
700	ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
701	if (ret)
702		goto destroy_flr;
703
704	/* Get CPT HW capabilities using LOAD_FVC operation. */
705	ret = otx2_cpt_discover_eng_capabilities(cptpf);
706	if (ret)
707		goto disable_intr;
708
709	ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
710	if (ret)
711		goto disable_intr;
712
713	cptpf->enabled_vfs = num_vfs;
714	ret = pci_enable_sriov(pdev, num_vfs);
715	if (ret)
716		goto disable_intr;
717
718	dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
719
720	try_module_get(THIS_MODULE);
721	return num_vfs;
722
723disable_intr:
724	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
725	cptpf->enabled_vfs = 0;
726destroy_flr:
727	cptpf_flr_wq_destroy(cptpf);
728destroy_mbox:
729	cptpf_vfpf_mbox_destroy(cptpf);
730	return ret;
731}
732
733static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
734{
735	if (num_vfs > 0) {
736		return cptpf_sriov_enable(pdev, num_vfs);
737	} else {
738		return cptpf_sriov_disable(pdev);
739	}
740}
741
742static int otx2_cptpf_probe(struct pci_dev *pdev,
743			    const struct pci_device_id *ent)
744{
745	struct device *dev = &pdev->dev;
746	struct otx2_cptpf_dev *cptpf;
747	int err;
748
749	cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
750	if (!cptpf)
751		return -ENOMEM;
752
753	err = pcim_enable_device(pdev);
754	if (err) {
755		dev_err(dev, "Failed to enable PCI device\n");
756		goto clear_drvdata;
757	}
758
759	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
760	if (err) {
761		dev_err(dev, "Unable to get usable DMA configuration\n");
762		goto clear_drvdata;
763	}
764	/* Map PF's configuration registers */
765	err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
766					     OTX2_CPT_DRV_NAME);
767	if (err) {
768		dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
769		goto clear_drvdata;
770	}
771	pci_set_master(pdev);
772	pci_set_drvdata(pdev, cptpf);
773	cptpf->pdev = pdev;
774
775	cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
776
777	/* Check if AF driver is up, otherwise defer probe */
778	err = cpt_is_pf_usable(cptpf);
779	if (err)
780		goto clear_drvdata;
781
782	err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
783				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
784	if (err < 0) {
785		dev_err(dev, "Request for %d msix vectors failed\n",
786			RVU_PF_INT_VEC_CNT);
787		goto clear_drvdata;
788	}
789	otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
790	/* Initialize AF-PF mailbox */
791	err = cptpf_afpf_mbox_init(cptpf);
792	if (err)
793		goto clear_drvdata;
794	/* Register mailbox interrupt */
795	err = cptpf_register_afpf_mbox_intr(cptpf);
796	if (err)
797		goto destroy_afpf_mbox;
798
799	cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
800
801	err = cn10k_cptpf_lmtst_init(cptpf);
802	if (err)
803		goto unregister_intr;
804
805	/* Initialize CPT PF device */
806	err = cptpf_device_init(cptpf);
807	if (err)
808		goto unregister_intr;
809
810	/* Initialize engine groups */
811	err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
812	if (err)
813		goto unregister_intr;
814
815	err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
816	if (err)
817		goto cleanup_eng_grps;
818
819	err = otx2_cpt_register_dl(cptpf);
820	if (err)
821		goto sysfs_grp_del;
822
823	return 0;
824
825sysfs_grp_del:
826	sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
827cleanup_eng_grps:
828	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
829unregister_intr:
830	cptpf_disable_afpf_mbox_intr(cptpf);
831destroy_afpf_mbox:
832	cptpf_afpf_mbox_destroy(cptpf);
833clear_drvdata:
834	pci_set_drvdata(pdev, NULL);
835	return err;
836}
837
838static void otx2_cptpf_remove(struct pci_dev *pdev)
839{
840	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
841
842	if (!cptpf)
843		return;
844
845	cptpf_sriov_disable(pdev);
846	otx2_cpt_unregister_dl(cptpf);
847	/* Delete sysfs entry created for kernel VF limits */
848	sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
849	/* Cleanup engine groups */
850	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
851	/* Disable AF-PF mailbox interrupt */
852	cptpf_disable_afpf_mbox_intr(cptpf);
853	/* Destroy AF-PF mbox */
854	cptpf_afpf_mbox_destroy(cptpf);
855	pci_set_drvdata(pdev, NULL);
856}
857
858/* Supported devices */
859static const struct pci_device_id otx2_cpt_id_table[] = {
860	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
861	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
862	{ 0, }  /* end of table */
863};
864
865static struct pci_driver otx2_cpt_pci_driver = {
866	.name = OTX2_CPT_DRV_NAME,
867	.id_table = otx2_cpt_id_table,
868	.probe = otx2_cptpf_probe,
869	.remove = otx2_cptpf_remove,
870	.sriov_configure = otx2_cptpf_sriov_configure
871};
872
873module_pci_driver(otx2_cpt_pci_driver);
874
875MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
876
877MODULE_AUTHOR("Marvell");
878MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
879MODULE_LICENSE("GPL v2");
880MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
881