1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11
12 #include "rvu.h"
13 #include "cgx.h"
14 #include "lmac_common.h"
15 #include "rvu_reg.h"
16 #include "rvu_trace.h"
17 #include "rvu_npc_hash.h"
18
19 struct cgx_evq_entry {
20 struct list_head evq_node;
21 struct cgx_link_event link_event;
22 };
23
24 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
25 static struct _req_type __maybe_unused \
26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
27 { \
28 struct _req_type *req; \
29 \
30 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
31 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
32 sizeof(struct _rsp_type)); \
33 if (!req) \
34 return NULL; \
35 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
36 req->hdr.id = _id; \
37 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
38 return req; \
39 }
40
41 MBOX_UP_CGX_MESSAGES
42 #undef M
43
is_mac_feature_supported(struct rvu *rvu, int pf, int feature)44 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
45 {
46 u8 cgx_id, lmac_id;
47 void *cgxd;
48
49 if (!is_pf_cgxmapped(rvu, pf))
50 return 0;
51
52 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
53 cgxd = rvu_cgx_pdata(cgx_id, rvu);
54
55 return (cgx_features_get(cgxd) & feature);
56 }
57
58 #define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx)
59 /* Returns bitmap of mapped PFs */
cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)60 static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
61 {
62 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
63 }
64
cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)65 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
66 {
67 unsigned long pfmap;
68
69 pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
70
71 /* Assumes only one pf mapped to a cgx lmac port */
72 if (!pfmap)
73 return -ENODEV;
74 else
75 return find_first_bit(&pfmap,
76 rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
77 }
78
cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)79 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
80 {
81 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
82 }
83
rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)84 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
85 {
86 if (cgx_id >= rvu->cgx_cnt_max)
87 return NULL;
88
89 return rvu->cgx_idmap[cgx_id];
90 }
91
92 /* Return first enabled CGX instance if none are enabled then return NULL */
rvu_first_cgx_pdata(struct rvu *rvu)93 void *rvu_first_cgx_pdata(struct rvu *rvu)
94 {
95 int first_enabled_cgx = 0;
96 void *cgxd = NULL;
97
98 for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
99 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
100 if (cgxd)
101 break;
102 }
103
104 return cgxd;
105 }
106
107 /* Based on P2X connectivity find mapped NIX block for a PF */
rvu_map_cgx_nix_block(struct rvu *rvu, int pf, int cgx_id, int lmac_id)108 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
109 int cgx_id, int lmac_id)
110 {
111 struct rvu_pfvf *pfvf = &rvu->pf[pf];
112 u8 p2x;
113
114 p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
115 /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
116 pfvf->nix_blkaddr = BLKADDR_NIX0;
117 if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1)
118 pfvf->nix_blkaddr = BLKADDR_NIX1;
119 }
120
rvu_map_cgx_lmac_pf(struct rvu *rvu)121 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
122 {
123 struct npc_pkind *pkind = &rvu->hw->pkind;
124 int cgx_cnt_max = rvu->cgx_cnt_max;
125 int pf = PF_CGXMAP_BASE;
126 unsigned long lmac_bmap;
127 int size, free_pkind;
128 int cgx, lmac, iter;
129 int numvfs, hwvfs;
130
131 if (!cgx_cnt_max)
132 return 0;
133
134 if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
135 return -EINVAL;
136
137 /* Alloc map table
138 * An additional entry is required since PF id starts from 1 and
139 * hence entry at offset 0 is invalid.
140 */
141 size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
142 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
143 if (!rvu->pf2cgxlmac_map)
144 return -ENOMEM;
145
146 /* Initialize all entries with an invalid cgx and lmac id */
147 memset(rvu->pf2cgxlmac_map, 0xFF, size);
148
149 /* Reverse map table */
150 rvu->cgxlmac2pf_map =
151 devm_kzalloc(rvu->dev,
152 cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
153 GFP_KERNEL);
154 if (!rvu->cgxlmac2pf_map)
155 return -ENOMEM;
156
157 rvu->cgx_mapped_pfs = 0;
158 for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
159 if (!rvu_cgx_pdata(cgx, rvu))
160 continue;
161 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
162 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
163 lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
164 iter);
165 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
166 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
167 free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
168 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
169 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
170 rvu->cgx_mapped_pfs++;
171 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
172 rvu->cgx_mapped_vfs += numvfs;
173 pf++;
174 }
175 }
176 return 0;
177 }
178
rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)179 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
180 {
181 struct cgx_evq_entry *qentry;
182 unsigned long flags;
183 int err;
184
185 qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
186 if (!qentry)
187 return -ENOMEM;
188
189 /* Lock the event queue before we read the local link status */
190 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
191 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
192 &qentry->link_event.link_uinfo);
193 qentry->link_event.cgx_id = cgx_id;
194 qentry->link_event.lmac_id = lmac_id;
195 if (err) {
196 kfree(qentry);
197 goto skip_add;
198 }
199 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
200 skip_add:
201 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
202
203 /* start worker to process the events */
204 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
205
206 return 0;
207 }
208
209 /* This is called from interrupt context and is expected to be atomic */
cgx_lmac_postevent(struct cgx_link_event *event, void *data)210 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
211 {
212 struct cgx_evq_entry *qentry;
213 struct rvu *rvu = data;
214
215 /* post event to the event queue */
216 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
217 if (!qentry)
218 return -ENOMEM;
219 qentry->link_event = *event;
220 spin_lock(&rvu->cgx_evq_lock);
221 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
222 spin_unlock(&rvu->cgx_evq_lock);
223
224 /* start worker to process the events */
225 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
226
227 return 0;
228 }
229
cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)230 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
231 {
232 struct cgx_link_user_info *linfo;
233 struct cgx_link_info_msg *msg;
234 unsigned long pfmap;
235 int pfid;
236
237 linfo = &event->link_uinfo;
238 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
239 if (!pfmap) {
240 dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n",
241 event->cgx_id, event->lmac_id);
242 return;
243 }
244
245 do {
246 pfid = find_first_bit(&pfmap,
247 rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
248 clear_bit(pfid, &pfmap);
249
250 /* check if notification is enabled */
251 if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
252 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
253 event->cgx_id, event->lmac_id,
254 linfo->link_up ? "UP" : "DOWN");
255 continue;
256 }
257
258 mutex_lock(&rvu->mbox_lock);
259
260 /* Send mbox message to PF */
261 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
262 if (!msg) {
263 mutex_unlock(&rvu->mbox_lock);
264 continue;
265 }
266
267 msg->link_info = *linfo;
268
269 otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid);
270
271 otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
272
273 mutex_unlock(&rvu->mbox_lock);
274 } while (pfmap);
275 }
276
cgx_evhandler_task(struct work_struct *work)277 static void cgx_evhandler_task(struct work_struct *work)
278 {
279 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
280 struct cgx_evq_entry *qentry;
281 struct cgx_link_event *event;
282 unsigned long flags;
283
284 do {
285 /* Dequeue an event */
286 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
287 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
288 struct cgx_evq_entry,
289 evq_node);
290 if (qentry)
291 list_del(&qentry->evq_node);
292 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
293 if (!qentry)
294 break; /* nothing more to process */
295
296 event = &qentry->link_event;
297
298 /* process event */
299 cgx_notify_pfs(event, rvu);
300 kfree(qentry);
301 } while (1);
302 }
303
cgx_lmac_event_handler_init(struct rvu *rvu)304 static int cgx_lmac_event_handler_init(struct rvu *rvu)
305 {
306 unsigned long lmac_bmap;
307 struct cgx_event_cb cb;
308 int cgx, lmac, err;
309 void *cgxd;
310
311 spin_lock_init(&rvu->cgx_evq_lock);
312 INIT_LIST_HEAD(&rvu->cgx_evq_head);
313 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
314 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
315 if (!rvu->cgx_evh_wq) {
316 dev_err(rvu->dev, "alloc workqueue failed");
317 return -ENOMEM;
318 }
319
320 cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
321 cb.data = rvu;
322
323 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
324 cgxd = rvu_cgx_pdata(cgx, rvu);
325 if (!cgxd)
326 continue;
327 lmac_bmap = cgx_get_lmac_bmap(cgxd);
328 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
329 err = cgx_lmac_evh_register(&cb, cgxd, lmac);
330 if (err)
331 dev_err(rvu->dev,
332 "%d:%d handler register failed\n",
333 cgx, lmac);
334 }
335 }
336
337 return 0;
338 }
339
rvu_cgx_wq_destroy(struct rvu *rvu)340 static void rvu_cgx_wq_destroy(struct rvu *rvu)
341 {
342 if (rvu->cgx_evh_wq) {
343 destroy_workqueue(rvu->cgx_evh_wq);
344 rvu->cgx_evh_wq = NULL;
345 }
346 }
347
rvu_cgx_init(struct rvu *rvu)348 int rvu_cgx_init(struct rvu *rvu)
349 {
350 int cgx, err;
351 void *cgxd;
352
353 /* CGX port id starts from 0 and are not necessarily contiguous
354 * Hence we allocate resources based on the maximum port id value.
355 */
356 rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
357 if (!rvu->cgx_cnt_max) {
358 dev_info(rvu->dev, "No CGX devices found!\n");
359 return 0;
360 }
361
362 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
363 sizeof(void *), GFP_KERNEL);
364 if (!rvu->cgx_idmap)
365 return -ENOMEM;
366
367 /* Initialize the cgxdata table */
368 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
369 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
370
371 /* Map CGX LMAC interfaces to RVU PFs */
372 err = rvu_map_cgx_lmac_pf(rvu);
373 if (err)
374 return err;
375
376 /* Register for CGX events */
377 err = cgx_lmac_event_handler_init(rvu);
378 if (err)
379 return err;
380
381 mutex_init(&rvu->cgx_cfg_lock);
382
383 /* Ensure event handler registration is completed, before
384 * we turn on the links
385 */
386 mb();
387
388 /* Do link up for all CGX ports */
389 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
390 cgxd = rvu_cgx_pdata(cgx, rvu);
391 if (!cgxd)
392 continue;
393 err = cgx_lmac_linkup_start(cgxd);
394 if (err)
395 dev_err(rvu->dev,
396 "Link up process failed to start on cgx %d\n",
397 cgx);
398 }
399
400 return 0;
401 }
402
rvu_cgx_exit(struct rvu *rvu)403 int rvu_cgx_exit(struct rvu *rvu)
404 {
405 unsigned long lmac_bmap;
406 int cgx, lmac;
407 void *cgxd;
408
409 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
410 cgxd = rvu_cgx_pdata(cgx, rvu);
411 if (!cgxd)
412 continue;
413 lmac_bmap = cgx_get_lmac_bmap(cgxd);
414 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
415 cgx_lmac_evh_unregister(cgxd, lmac);
416 }
417
418 /* Ensure event handler unregister is completed */
419 mb();
420
421 rvu_cgx_wq_destroy(rvu);
422 return 0;
423 }
424
425 /* Most of the CGX configuration is restricted to the mapped PF only,
426 * VF's of mapped PF and other PFs are not allowed. This fn() checks
427 * whether a PFFUNC is permitted to do the config or not.
428 */
is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)429 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
430 {
431 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
432 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
433 return false;
434 return true;
435 }
436
rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)437 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
438 {
439 struct mac_ops *mac_ops;
440 u8 cgx_id, lmac_id;
441 void *cgxd;
442
443 if (!is_pf_cgxmapped(rvu, pf))
444 return;
445
446 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
447 cgxd = rvu_cgx_pdata(cgx_id, rvu);
448
449 mac_ops = get_mac_ops(cgxd);
450 /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
451 if (enable)
452 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
453 else
454 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
455 }
456
rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)457 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
458 {
459 int pf = rvu_get_pf(pcifunc);
460 struct mac_ops *mac_ops;
461 u8 cgx_id, lmac_id;
462 void *cgxd;
463
464 if (!is_cgx_config_permitted(rvu, pcifunc))
465 return LMAC_AF_ERR_PERM_DENIED;
466
467 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
468 cgxd = rvu_cgx_pdata(cgx_id, rvu);
469 mac_ops = get_mac_ops(cgxd);
470
471 return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
472 }
473
rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)474 int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
475 {
476 int pf = rvu_get_pf(pcifunc);
477 struct mac_ops *mac_ops;
478 u8 cgx_id, lmac_id;
479 void *cgxd;
480
481 if (!is_cgx_config_permitted(rvu, pcifunc))
482 return LMAC_AF_ERR_PERM_DENIED;
483
484 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
485 cgxd = rvu_cgx_pdata(cgx_id, rvu);
486 mac_ops = get_mac_ops(cgxd);
487
488 return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
489 }
490
rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)491 int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
492 {
493 struct mac_ops *mac_ops;
494
495 mac_ops = get_mac_ops(cgxd);
496 return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
497 }
498
rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)499 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
500 {
501 int pf = rvu_get_pf(pcifunc);
502 int i = 0, lmac_count = 0;
503 struct mac_ops *mac_ops;
504 u8 max_dmac_filters;
505 u8 cgx_id, lmac_id;
506 void *cgx_dev;
507
508 if (!is_cgx_config_permitted(rvu, pcifunc))
509 return;
510
511 if (rvu_npc_exact_has_match_table(rvu)) {
512 rvu_npc_exact_reset(rvu, pcifunc);
513 return;
514 }
515
516 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
517 cgx_dev = cgx_get_pdata(cgx_id);
518 lmac_count = cgx_get_lmac_cnt(cgx_dev);
519
520 mac_ops = get_mac_ops(cgx_dev);
521 if (!mac_ops)
522 return;
523
524 max_dmac_filters = mac_ops->dmac_filter_count / lmac_count;
525
526 for (i = 0; i < max_dmac_filters; i++)
527 cgx_lmac_addr_del(cgx_id, lmac_id, i);
528
529 /* As cgx_lmac_addr_del does not clear entry for index 0
530 * so it needs to be done explicitly
531 */
532 cgx_lmac_addr_reset(cgx_id, lmac_id);
533 }
534
rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)535 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
536 struct msg_rsp *rsp)
537 {
538 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
539 return 0;
540 }
541
rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)542 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
543 struct msg_rsp *rsp)
544 {
545 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
546 return 0;
547 }
548
rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, void *rsp)549 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
550 void *rsp)
551 {
552 int pf = rvu_get_pf(req->hdr.pcifunc);
553 struct mac_ops *mac_ops;
554 int stat = 0, err = 0;
555 u64 tx_stat, rx_stat;
556 u8 cgx_idx, lmac;
557 void *cgxd;
558
559 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
560 return LMAC_AF_ERR_PERM_DENIED;
561
562 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
563 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
564 mac_ops = get_mac_ops(cgxd);
565
566 /* Rx stats */
567 while (stat < mac_ops->rx_stats_cnt) {
568 err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
569 if (err)
570 return err;
571 if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
572 ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
573 else
574 ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
575 stat++;
576 }
577
578 /* Tx stats */
579 stat = 0;
580 while (stat < mac_ops->tx_stats_cnt) {
581 err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
582 if (err)
583 return err;
584 if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
585 ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
586 else
587 ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
588 stat++;
589 }
590 return 0;
591 }
592
rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, struct cgx_stats_rsp *rsp)593 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
594 struct cgx_stats_rsp *rsp)
595 {
596 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
597 }
598
rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, struct rpm_stats_rsp *rsp)599 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
600 struct rpm_stats_rsp *rsp)
601 {
602 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
603 }
604
rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, struct msg_req *req, struct cgx_fec_stats_rsp *rsp)605 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
606 struct msg_req *req,
607 struct cgx_fec_stats_rsp *rsp)
608 {
609 int pf = rvu_get_pf(req->hdr.pcifunc);
610 struct mac_ops *mac_ops;
611 u8 cgx_idx, lmac;
612 void *cgxd;
613
614 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
615 return LMAC_AF_ERR_PERM_DENIED;
616 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
617
618 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
619 mac_ops = get_mac_ops(cgxd);
620 return mac_ops->get_fec_stats(cgxd, lmac, rsp);
621 }
622
rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp)623 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
624 struct cgx_mac_addr_set_or_get *req,
625 struct cgx_mac_addr_set_or_get *rsp)
626 {
627 int pf = rvu_get_pf(req->hdr.pcifunc);
628 u8 cgx_id, lmac_id;
629
630 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
631 return -EPERM;
632
633 if (rvu_npc_exact_has_match_table(rvu))
634 return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
635
636 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
637
638 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
639
640 return 0;
641 }
642
rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, struct cgx_mac_addr_add_req *req, struct cgx_mac_addr_add_rsp *rsp)643 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
644 struct cgx_mac_addr_add_req *req,
645 struct cgx_mac_addr_add_rsp *rsp)
646 {
647 int pf = rvu_get_pf(req->hdr.pcifunc);
648 u8 cgx_id, lmac_id;
649 int rc = 0;
650
651 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
652 return -EPERM;
653
654 if (rvu_npc_exact_has_match_table(rvu))
655 return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
656
657 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
658 rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
659 if (rc >= 0) {
660 rsp->index = rc;
661 return 0;
662 }
663
664 return rc;
665 }
666
rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, struct cgx_mac_addr_del_req *req, struct msg_rsp *rsp)667 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
668 struct cgx_mac_addr_del_req *req,
669 struct msg_rsp *rsp)
670 {
671 int pf = rvu_get_pf(req->hdr.pcifunc);
672 u8 cgx_id, lmac_id;
673
674 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
675 return -EPERM;
676
677 if (rvu_npc_exact_has_match_table(rvu))
678 return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
679
680 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
681 return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
682 }
683
rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, struct msg_req *req, struct cgx_max_dmac_entries_get_rsp *rsp)684 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
685 struct msg_req *req,
686 struct cgx_max_dmac_entries_get_rsp
687 *rsp)
688 {
689 int pf = rvu_get_pf(req->hdr.pcifunc);
690 u8 cgx_id, lmac_id;
691
692 /* If msg is received from PFs(which are not mapped to CGX LMACs)
693 * or VF then no entries are allocated for DMAC filters at CGX level.
694 * So returning zero.
695 */
696 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
697 rsp->max_dmac_filters = 0;
698 return 0;
699 }
700
701 if (rvu_npc_exact_has_match_table(rvu)) {
702 rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
703 return 0;
704 }
705
706 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
707 rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
708 return 0;
709 }
710
rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp)711 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
712 struct cgx_mac_addr_set_or_get *req,
713 struct cgx_mac_addr_set_or_get *rsp)
714 {
715 int pf = rvu_get_pf(req->hdr.pcifunc);
716 u8 cgx_id, lmac_id;
717 int rc = 0;
718 u64 cfg;
719
720 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
721 return -EPERM;
722
723 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
724
725 rsp->hdr.rc = rc;
726 cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
727 /* copy 48 bit mac address to req->mac_addr */
728 u64_to_ether_addr(cfg, rsp->mac_addr);
729 return 0;
730 }
731
rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)732 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
733 struct msg_rsp *rsp)
734 {
735 u16 pcifunc = req->hdr.pcifunc;
736 int pf = rvu_get_pf(pcifunc);
737 u8 cgx_id, lmac_id;
738
739 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
740 return -EPERM;
741
742 /* Disable drop on non hit rule */
743 if (rvu_npc_exact_has_match_table(rvu))
744 return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
745
746 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
747
748 cgx_lmac_promisc_config(cgx_id, lmac_id, true);
749 return 0;
750 }
751
rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)752 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
753 struct msg_rsp *rsp)
754 {
755 int pf = rvu_get_pf(req->hdr.pcifunc);
756 u8 cgx_id, lmac_id;
757
758 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
759 return -EPERM;
760
761 /* Disable drop on non hit rule */
762 if (rvu_npc_exact_has_match_table(rvu))
763 return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
764
765 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
766
767 cgx_lmac_promisc_config(cgx_id, lmac_id, false);
768 return 0;
769 }
770
rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)771 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
772 {
773 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
774 int pf = rvu_get_pf(pcifunc);
775 struct mac_ops *mac_ops;
776 u8 cgx_id, lmac_id;
777 void *cgxd;
778
779 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
780 return 0;
781
782 /* This msg is expected only from PFs that are mapped to CGX LMACs,
783 * if received from other PF/VF simply ACK, nothing to do.
784 */
785 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
786 !is_pf_cgxmapped(rvu, pf))
787 return -ENODEV;
788
789 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
790 cgxd = rvu_cgx_pdata(cgx_id, rvu);
791
792 mac_ops = get_mac_ops(cgxd);
793 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable);
794 /* If PTP is enabled then inform NPC that packets to be
795 * parsed by this PF will have their data shifted by 8 bytes
796 * and if PTP is disabled then no shift is required
797 */
798 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
799 return -EINVAL;
800 /* This flag is required to clean up CGX conf if app gets killed */
801 pfvf->hw_rx_tstamp_en = enable;
802
803 /* Inform MCS about 8B RX header */
804 rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
805 return 0;
806 }
807
rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)808 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
809 struct msg_rsp *rsp)
810 {
811 if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
812 return -EPERM;
813
814 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
815 }
816
rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)817 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
818 struct msg_rsp *rsp)
819 {
820 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
821 }
822
rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)823 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
824 {
825 int pf = rvu_get_pf(pcifunc);
826 u8 cgx_id, lmac_id;
827
828 if (!is_cgx_config_permitted(rvu, pcifunc))
829 return -EPERM;
830
831 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
832
833 if (en) {
834 set_bit(pf, &rvu->pf_notify_bmap);
835 /* Send the current link status to PF */
836 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
837 } else {
838 clear_bit(pf, &rvu->pf_notify_bmap);
839 }
840
841 return 0;
842 }
843
rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)844 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
845 struct msg_rsp *rsp)
846 {
847 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
848 return 0;
849 }
850
rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)851 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
852 struct msg_rsp *rsp)
853 {
854 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
855 return 0;
856 }
857
rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, struct cgx_link_info_msg *rsp)858 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
859 struct cgx_link_info_msg *rsp)
860 {
861 u8 cgx_id, lmac_id;
862 int pf, err;
863
864 pf = rvu_get_pf(req->hdr.pcifunc);
865
866 if (!is_pf_cgxmapped(rvu, pf))
867 return -ENODEV;
868
869 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
870
871 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
872 &rsp->link_info);
873 return err;
874 }
875
rvu_mbox_handler_cgx_features_get(struct rvu *rvu, struct msg_req *req, struct cgx_features_info_msg *rsp)876 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
877 struct msg_req *req,
878 struct cgx_features_info_msg *rsp)
879 {
880 int pf = rvu_get_pf(req->hdr.pcifunc);
881 u8 cgx_idx, lmac;
882 void *cgxd;
883
884 if (!is_pf_cgxmapped(rvu, pf))
885 return 0;
886
887 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
888 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
889 rsp->lmac_features = cgx_features_get(cgxd);
890
891 return 0;
892 }
893
rvu_cgx_get_fifolen(struct rvu *rvu)894 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
895 {
896 struct mac_ops *mac_ops;
897 u32 fifo_len;
898
899 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
900 fifo_len = mac_ops ? mac_ops->fifo_len : 0;
901
902 return fifo_len;
903 }
904
rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)905 u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
906 {
907 struct mac_ops *mac_ops;
908 void *cgxd;
909
910 cgxd = rvu_cgx_pdata(cgx, rvu);
911 if (!cgxd)
912 return 0;
913
914 mac_ops = get_mac_ops(cgxd);
915 if (!mac_ops->lmac_fifo_len)
916 return 0;
917
918 return mac_ops->lmac_fifo_len(cgxd, lmac);
919 }
920
rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)921 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
922 {
923 int pf = rvu_get_pf(pcifunc);
924 struct mac_ops *mac_ops;
925 u8 cgx_id, lmac_id;
926
927 if (!is_cgx_config_permitted(rvu, pcifunc))
928 return -EPERM;
929
930 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
931 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
932
933 return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
934 lmac_id, en);
935 }
936
rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)937 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
938 struct msg_rsp *rsp)
939 {
940 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
941 return 0;
942 }
943
rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)944 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
945 struct msg_rsp *rsp)
946 {
947 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
948 return 0;
949 }
950
rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)951 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
952 {
953 int pf = rvu_get_pf(pcifunc);
954 u8 rx_pfc = 0, tx_pfc = 0;
955 struct mac_ops *mac_ops;
956 u8 cgx_id, lmac_id;
957 void *cgxd;
958
959 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
960 return 0;
961
962 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
963 * if received from other PF/VF simply ACK, nothing to do.
964 */
965 if (!is_pf_cgxmapped(rvu, pf))
966 return LMAC_AF_ERR_PF_NOT_MAPPED;
967
968 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
969 cgxd = rvu_cgx_pdata(cgx_id, rvu);
970 mac_ops = get_mac_ops(cgxd);
971
972 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
973 if (tx_pfc || rx_pfc) {
974 dev_warn(rvu->dev,
975 "Can not configure 802.3X flow control as PFC frames are enabled");
976 return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
977 }
978
979 mutex_lock(&rvu->rsrc_lock);
980 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
981 pcifunc & RVU_PFVF_FUNC_MASK)) {
982 mutex_unlock(&rvu->rsrc_lock);
983 return LMAC_AF_ERR_PERM_DENIED;
984 }
985 mutex_unlock(&rvu->rsrc_lock);
986
987 return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
988 }
989
rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, struct cgx_pause_frm_cfg *req, struct cgx_pause_frm_cfg *rsp)990 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
991 struct cgx_pause_frm_cfg *req,
992 struct cgx_pause_frm_cfg *rsp)
993 {
994 int pf = rvu_get_pf(req->hdr.pcifunc);
995 struct mac_ops *mac_ops;
996 u8 cgx_id, lmac_id;
997 int err = 0;
998 void *cgxd;
999
1000 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1001 * if received from other PF/VF simply ACK, nothing to do.
1002 */
1003 if (!is_pf_cgxmapped(rvu, pf))
1004 return -ENODEV;
1005
1006 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1007 cgxd = rvu_cgx_pdata(cgx_id, rvu);
1008 mac_ops = get_mac_ops(cgxd);
1009
1010 if (req->set)
1011 err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
1012 else
1013 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
1014
1015 return err;
1016 }
1017
rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)1018 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
1019 struct msg_rsp *rsp)
1020 {
1021 int pf = rvu_get_pf(req->hdr.pcifunc);
1022 u8 cgx_id, lmac_id;
1023
1024 if (!is_pf_cgxmapped(rvu, pf))
1025 return LMAC_AF_ERR_PF_NOT_MAPPED;
1026
1027 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1028 return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
1029 }
1030
1031 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
1032 * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
1033 */
rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index, int rxtxflag, u64 *stat)1034 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
1035 int index, int rxtxflag, u64 *stat)
1036 {
1037 struct rvu_block *block;
1038 int blkaddr;
1039 u16 pcifunc;
1040 int pf, lf;
1041
1042 *stat = 0;
1043
1044 if (!cgxd || !rvu)
1045 return -EINVAL;
1046
1047 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
1048 if (pf < 0)
1049 return pf;
1050
1051 /* Assumes LF of a PF and all of its VF belongs to the same
1052 * NIX block
1053 */
1054 pcifunc = pf << RVU_PFVF_PF_SHIFT;
1055 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1056 if (blkaddr < 0)
1057 return 0;
1058 block = &rvu->hw->block[blkaddr];
1059
1060 for (lf = 0; lf < block->lf.max; lf++) {
1061 /* Check if a lf is attached to this PF or one of its VFs */
1062 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
1063 ~RVU_PFVF_FUNC_MASK)))
1064 continue;
1065 if (rxtxflag == NIX_STATS_RX)
1066 *stat += rvu_read64(rvu, blkaddr,
1067 NIX_AF_LFX_RX_STATX(lf, index));
1068 else
1069 *stat += rvu_read64(rvu, blkaddr,
1070 NIX_AF_LFX_TX_STATX(lf, index));
1071 }
1072
1073 return 0;
1074 }
1075
rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)1076 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
1077 {
1078 struct rvu_pfvf *parent_pf, *pfvf;
1079 int cgx_users, err = 0;
1080
1081 if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
1082 return 0;
1083
1084 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
1085 pfvf = rvu_get_pfvf(rvu, pcifunc);
1086
1087 mutex_lock(&rvu->cgx_cfg_lock);
1088
1089 if (start && pfvf->cgx_in_use)
1090 goto exit; /* CGX is already started hence nothing to do */
1091 if (!start && !pfvf->cgx_in_use)
1092 goto exit; /* CGX is already stopped hence nothing to do */
1093
1094 if (start) {
1095 cgx_users = parent_pf->cgx_users;
1096 parent_pf->cgx_users++;
1097 } else {
1098 parent_pf->cgx_users--;
1099 cgx_users = parent_pf->cgx_users;
1100 }
1101
1102 /* Start CGX when first of all NIXLFs is started.
1103 * Stop CGX when last of all NIXLFs is stopped.
1104 */
1105 if (!cgx_users) {
1106 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
1107 start);
1108 if (err) {
1109 dev_err(rvu->dev, "Unable to %s CGX\n",
1110 start ? "start" : "stop");
1111 /* Revert the usage count in case of error */
1112 parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
1113 : parent_pf->cgx_users + 1;
1114 goto exit;
1115 }
1116 }
1117 pfvf->cgx_in_use = start;
1118 exit:
1119 mutex_unlock(&rvu->cgx_cfg_lock);
1120 return err;
1121 }
1122
rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, struct fec_mode *req, struct fec_mode *rsp)1123 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
1124 struct fec_mode *req,
1125 struct fec_mode *rsp)
1126 {
1127 int pf = rvu_get_pf(req->hdr.pcifunc);
1128 u8 cgx_id, lmac_id;
1129
1130 if (!is_pf_cgxmapped(rvu, pf))
1131 return -EPERM;
1132
1133 if (req->fec == OTX2_FEC_OFF)
1134 req->fec = OTX2_FEC_NONE;
1135 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1136 rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
1137 return 0;
1138 }
1139
rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, struct cgx_fw_data *rsp)1140 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
1141 struct cgx_fw_data *rsp)
1142 {
1143 int pf = rvu_get_pf(req->hdr.pcifunc);
1144 u8 cgx_id, lmac_id;
1145
1146 if (!rvu->fwdata)
1147 return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
1148
1149 if (!is_pf_cgxmapped(rvu, pf))
1150 return -EPERM;
1151
1152 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1153
1154 if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
1155 memcpy(&rsp->fwdata,
1156 &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id],
1157 sizeof(struct cgx_lmac_fwdata_s));
1158 else
1159 memcpy(&rsp->fwdata,
1160 &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
1161 sizeof(struct cgx_lmac_fwdata_s));
1162
1163 return 0;
1164 }
1165
rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, struct cgx_set_link_mode_req *req, struct cgx_set_link_mode_rsp *rsp)1166 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
1167 struct cgx_set_link_mode_req *req,
1168 struct cgx_set_link_mode_rsp *rsp)
1169 {
1170 int pf = rvu_get_pf(req->hdr.pcifunc);
1171 u8 cgx_idx, lmac;
1172 void *cgxd;
1173
1174 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1175 return -EPERM;
1176
1177 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
1178 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
1179 rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
1180 return 0;
1181 }
1182
rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, struct msg_rsp *rsp)1183 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1184 struct msg_rsp *rsp)
1185 {
1186 int pf = rvu_get_pf(req->hdr.pcifunc);
1187 u8 cgx_id, lmac_id;
1188
1189 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1190 return LMAC_AF_ERR_PERM_DENIED;
1191
1192 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1193
1194 if (rvu_npc_exact_has_match_table(rvu))
1195 return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
1196
1197 return cgx_lmac_addr_reset(cgx_id, lmac_id);
1198 }
1199
rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, struct cgx_mac_addr_update_req *req, struct cgx_mac_addr_update_rsp *rsp)1200 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
1201 struct cgx_mac_addr_update_req *req,
1202 struct cgx_mac_addr_update_rsp *rsp)
1203 {
1204 int pf = rvu_get_pf(req->hdr.pcifunc);
1205 u8 cgx_id, lmac_id;
1206
1207 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1208 return LMAC_AF_ERR_PERM_DENIED;
1209
1210 if (rvu_npc_exact_has_match_table(rvu))
1211 return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
1212
1213 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1214 return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
1215 }
1216
rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause, u16 pfc_en)1217 int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
1218 u8 rx_pause, u16 pfc_en)
1219 {
1220 int pf = rvu_get_pf(pcifunc);
1221 u8 rx_8023 = 0, tx_8023 = 0;
1222 struct mac_ops *mac_ops;
1223 u8 cgx_id, lmac_id;
1224 void *cgxd;
1225
1226 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1227 * if received from other PF/VF simply ACK, nothing to do.
1228 */
1229 if (!is_pf_cgxmapped(rvu, pf))
1230 return -ENODEV;
1231
1232 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1233 cgxd = rvu_cgx_pdata(cgx_id, rvu);
1234 mac_ops = get_mac_ops(cgxd);
1235
1236 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
1237 if (tx_8023 || rx_8023) {
1238 dev_warn(rvu->dev,
1239 "Can not configure PFC as 802.3X pause frames are enabled");
1240 return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
1241 }
1242
1243 mutex_lock(&rvu->rsrc_lock);
1244 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
1245 pcifunc & RVU_PFVF_FUNC_MASK)) {
1246 mutex_unlock(&rvu->rsrc_lock);
1247 return LMAC_AF_ERR_PERM_DENIED;
1248 }
1249 mutex_unlock(&rvu->rsrc_lock);
1250
1251 return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
1252 }
1253
rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, struct cgx_pfc_cfg *req, struct cgx_pfc_rsp *rsp)1254 int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
1255 struct cgx_pfc_cfg *req,
1256 struct cgx_pfc_rsp *rsp)
1257 {
1258 int pf = rvu_get_pf(req->hdr.pcifunc);
1259 struct mac_ops *mac_ops;
1260 u8 cgx_id, lmac_id;
1261 void *cgxd;
1262 int err;
1263
1264 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1265 * if received from other PF/VF simply ACK, nothing to do.
1266 */
1267 if (!is_pf_cgxmapped(rvu, pf))
1268 return -ENODEV;
1269
1270 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1271 cgxd = rvu_cgx_pdata(cgx_id, rvu);
1272 mac_ops = get_mac_ops(cgxd);
1273
1274 err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
1275 req->rx_pause, req->pfc_en);
1276
1277 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
1278 return err;
1279 }
1280
rvu_mac_reset(struct rvu *rvu, u16 pcifunc)1281 void rvu_mac_reset(struct rvu *rvu, u16 pcifunc)
1282 {
1283 int pf = rvu_get_pf(pcifunc);
1284 struct mac_ops *mac_ops;
1285 struct cgx *cgxd;
1286 u8 cgx, lmac;
1287
1288 if (!is_pf_cgxmapped(rvu, pf))
1289 return;
1290
1291 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
1292 cgxd = rvu_cgx_pdata(cgx, rvu);
1293 mac_ops = get_mac_ops(cgxd);
1294
1295 if (mac_ops->mac_reset(cgxd, lmac, !is_vf(pcifunc)))
1296 dev_err(rvu->dev, "Failed to reset MAC\n");
1297 }
1298