1// SPDX-License-Identifier: GPL-2.0
2#include <linux/workqueue.h>
3
4#include "nitrox_csr.h"
5#include "nitrox_hal.h"
6#include "nitrox_dev.h"
7
8#define RING_TO_VFNO(_x, _y)	((_x) / (_y))
9
10/**
11 * mbx_msg_type - Mailbox message types
12 */
13enum mbx_msg_type {
14	MBX_MSG_TYPE_NOP,
15	MBX_MSG_TYPE_REQ,
16	MBX_MSG_TYPE_ACK,
17	MBX_MSG_TYPE_NACK,
18};
19
20/**
21 * mbx_msg_opcode - Mailbox message opcodes
22 */
23enum mbx_msg_opcode {
24	MSG_OP_VF_MODE = 1,
25	MSG_OP_VF_UP,
26	MSG_OP_VF_DOWN,
27	MSG_OP_CHIPID_VFID,
28	MSG_OP_MCODE_INFO = 11,
29};
30
31struct pf2vf_work {
32	struct nitrox_vfdev *vfdev;
33	struct nitrox_device *ndev;
34	struct work_struct pf2vf_resp;
35};
36
37static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring)
38{
39	u64 reg_addr;
40
41	reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring);
42	return nitrox_read_csr(ndev, reg_addr);
43}
44
45static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value,
46				    int ring)
47{
48	u64 reg_addr;
49
50	reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring);
51	nitrox_write_csr(ndev, reg_addr, value);
52}
53
54static void pf2vf_send_response(struct nitrox_device *ndev,
55				struct nitrox_vfdev *vfdev)
56{
57	union mbox_msg msg;
58
59	msg.value = vfdev->msg.value;
60
61	switch (vfdev->msg.opcode) {
62	case MSG_OP_VF_MODE:
63		msg.data = ndev->mode;
64		break;
65	case MSG_OP_VF_UP:
66		vfdev->nr_queues = vfdev->msg.data;
67		atomic_set(&vfdev->state, __NDEV_READY);
68		break;
69	case MSG_OP_CHIPID_VFID:
70		msg.id.chipid = ndev->idx;
71		msg.id.vfid = vfdev->vfno;
72		break;
73	case MSG_OP_VF_DOWN:
74		vfdev->nr_queues = 0;
75		atomic_set(&vfdev->state, __NDEV_NOT_READY);
76		break;
77	case MSG_OP_MCODE_INFO:
78		msg.data = 0;
79		msg.mcode_info.count = 2;
80		msg.mcode_info.info = MCODE_TYPE_SE_SSL | (MCODE_TYPE_AE << 5);
81		msg.mcode_info.next_se_grp = 1;
82		msg.mcode_info.next_ae_grp = 1;
83		break;
84	default:
85		msg.type = MBX_MSG_TYPE_NOP;
86		break;
87	}
88
89	if (msg.type == MBX_MSG_TYPE_NOP)
90		return;
91
92	/* send ACK to VF */
93	msg.type = MBX_MSG_TYPE_ACK;
94	pf2vf_write_mbox(ndev, msg.value, vfdev->ring);
95
96	vfdev->msg.value = 0;
97	atomic64_inc(&vfdev->mbx_resp);
98}
99
100static void pf2vf_resp_handler(struct work_struct *work)
101{
102	struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work,
103						     pf2vf_resp);
104	struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev;
105	struct nitrox_device *ndev = pf2vf_resp->ndev;
106
107	switch (vfdev->msg.type) {
108	case MBX_MSG_TYPE_REQ:
109		/* process the request from VF */
110		pf2vf_send_response(ndev, vfdev);
111		break;
112	case MBX_MSG_TYPE_ACK:
113	case MBX_MSG_TYPE_NACK:
114		break;
115	};
116
117	kfree(pf2vf_resp);
118}
119
120void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
121{
122	struct nitrox_vfdev *vfdev;
123	struct pf2vf_work *pfwork;
124	u64 value, reg_addr;
125	u32 i;
126	int vfno;
127
128	/* loop for VF(0..63) */
129	reg_addr = NPS_PKT_MBOX_INT_LO;
130	value = nitrox_read_csr(ndev, reg_addr);
131	for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
132		/* get the vfno from ring */
133		vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
134		vfdev = ndev->iov.vfdev + vfno;
135		vfdev->ring = i;
136		/* fill the vf mailbox data */
137		vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
138		pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
139		if (!pfwork)
140			continue;
141
142		pfwork->vfdev = vfdev;
143		pfwork->ndev = ndev;
144		INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
145		queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
146		/* clear the corresponding vf bit */
147		nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
148	}
149
150	/* loop for VF(64..127) */
151	reg_addr = NPS_PKT_MBOX_INT_HI;
152	value = nitrox_read_csr(ndev, reg_addr);
153	for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
154		/* get the vfno from ring */
155		vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
156		vfdev = ndev->iov.vfdev + vfno;
157		vfdev->ring = (i + 64);
158		/* fill the vf mailbox data */
159		vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
160
161		pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
162		if (!pfwork)
163			continue;
164
165		pfwork->vfdev = vfdev;
166		pfwork->ndev = ndev;
167		INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
168		queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
169		/* clear the corresponding vf bit */
170		nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
171	}
172}
173
174int nitrox_mbox_init(struct nitrox_device *ndev)
175{
176	struct nitrox_vfdev *vfdev;
177	int i;
178
179	ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs,
180				  sizeof(struct nitrox_vfdev), GFP_KERNEL);
181	if (!ndev->iov.vfdev)
182		return -ENOMEM;
183
184	for (i = 0; i < ndev->iov.num_vfs; i++) {
185		vfdev = ndev->iov.vfdev + i;
186		vfdev->vfno = i;
187	}
188
189	/* allocate pf2vf response workqueue */
190	ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
191	if (!ndev->iov.pf2vf_wq) {
192		kfree(ndev->iov.vfdev);
193		ndev->iov.vfdev = NULL;
194		return -ENOMEM;
195	}
196	/* enable pf2vf mailbox interrupts */
197	enable_pf2vf_mbox_interrupts(ndev);
198
199	return 0;
200}
201
202void nitrox_mbox_cleanup(struct nitrox_device *ndev)
203{
204	/* disable pf2vf mailbox interrupts */
205	disable_pf2vf_mbox_interrupts(ndev);
206	/* destroy workqueue */
207	if (ndev->iov.pf2vf_wq)
208		destroy_workqueue(ndev->iov.pf2vf_wq);
209
210	kfree(ndev->iov.vfdev);
211	ndev->iov.pf2vf_wq = NULL;
212	ndev->iov.vfdev = NULL;
213}
214