1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_ulp.h"
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
64 #include "bnxt_dcb.h"
65 #include "bnxt_xdp.h"
66 #include "bnxt_vfr.h"
67 #include "bnxt_tc.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
70
71 #define BNXT_TX_TIMEOUT (5 * HZ)
72 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
73 NETIF_MSG_TX_ERR)
74
75 MODULE_LICENSE("GPL");
76 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
77
78 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
79 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
80 #define BNXT_RX_COPY_THRESH 256
81
82 #define BNXT_TX_PUSH_THRESH 164
83
84 enum board_idx {
85 BCM57301,
86 BCM57302,
87 BCM57304,
88 BCM57417_NPAR,
89 BCM58700,
90 BCM57311,
91 BCM57312,
92 BCM57402,
93 BCM57404,
94 BCM57406,
95 BCM57402_NPAR,
96 BCM57407,
97 BCM57412,
98 BCM57414,
99 BCM57416,
100 BCM57417,
101 BCM57412_NPAR,
102 BCM57314,
103 BCM57417_SFP,
104 BCM57416_SFP,
105 BCM57404_NPAR,
106 BCM57406_NPAR,
107 BCM57407_SFP,
108 BCM57407_NPAR,
109 BCM57414_NPAR,
110 BCM57416_NPAR,
111 BCM57452,
112 BCM57454,
113 BCM5745x_NPAR,
114 BCM57508,
115 BCM57504,
116 BCM57502,
117 BCM57508_NPAR,
118 BCM57504_NPAR,
119 BCM57502_NPAR,
120 BCM58802,
121 BCM58804,
122 BCM58808,
123 NETXTREME_E_VF,
124 NETXTREME_C_VF,
125 NETXTREME_S_VF,
126 NETXTREME_C_VF_HV,
127 NETXTREME_E_VF_HV,
128 NETXTREME_E_P5_VF,
129 NETXTREME_E_P5_VF_HV,
130 };
131
132 /* indexed by enum above */
133 static const struct {
134 char *name;
135 } board_info[] = {
136 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
137 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
138 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
139 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
140 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
141 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
142 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
143 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
144 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
145 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
146 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
147 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
148 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
149 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
150 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
151 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
152 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
153 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
154 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
155 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
156 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
157 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
158 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
159 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
160 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
161 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
162 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
163 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
164 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
165 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
166 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
167 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
168 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
169 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
170 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
171 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
172 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
174 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
175 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
176 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
177 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
178 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
179 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
180 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
181 };
182
183 static const struct pci_device_id bnxt_pci_tbl[] = {
184 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
185 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
186 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
187 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
189 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
190 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
191 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
193 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
194 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
195 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
196 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
197 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
198 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
199 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
200 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
201 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
202 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
203 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
204 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
205 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
206 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
207 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
208 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
209 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
210 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
211 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
212 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
213 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
214 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
215 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
216 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
217 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
218 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
219 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
220 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
221 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
222 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
223 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
224 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
225 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
226 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
227 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
228 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
229 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
230 #ifdef CONFIG_BNXT_SRIOV
231 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
232 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
233 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
234 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
235 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
236 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
237 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
238 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
239 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
240 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
241 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
242 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
243 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
244 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
245 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
246 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
247 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
248 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
249 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
250 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
251 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
252 #endif
253 { 0 }
254 };
255
256 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
257
258 static const u16 bnxt_vf_req_snif[] = {
259 HWRM_FUNC_CFG,
260 HWRM_FUNC_VF_CFG,
261 HWRM_PORT_PHY_QCFG,
262 HWRM_CFA_L2_FILTER_ALLOC,
263 };
264
265 static const u16 bnxt_async_events_arr[] = {
266 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
267 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
268 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
269 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
270 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
271 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
272 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
273 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
274 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
275 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
276 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
277 };
278
279 static struct workqueue_struct *bnxt_pf_wq;
280
bnxt_vf_pciid(enum board_idx idx)281 static bool bnxt_vf_pciid(enum board_idx idx)
282 {
283 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
284 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
285 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
286 idx == NETXTREME_E_P5_VF_HV);
287 }
288
289 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
290 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
291 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
292
293 #define BNXT_CP_DB_IRQ_DIS(db) \
294 writel(DB_CP_IRQ_DIS_FLAGS, db)
295
296 #define BNXT_DB_CQ(db, idx) \
297 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
298
299 #define BNXT_DB_NQ_P5(db, idx) \
300 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
301
302 #define BNXT_DB_CQ_ARM(db, idx) \
303 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
304
305 #define BNXT_DB_NQ_ARM_P5(db, idx) \
306 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
307
bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)308 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
309 {
310 if (bp->flags & BNXT_FLAG_CHIP_P5)
311 BNXT_DB_NQ_P5(db, idx);
312 else
313 BNXT_DB_CQ(db, idx);
314 }
315
bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)316 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
317 {
318 if (bp->flags & BNXT_FLAG_CHIP_P5)
319 BNXT_DB_NQ_ARM_P5(db, idx);
320 else
321 BNXT_DB_CQ_ARM(db, idx);
322 }
323
bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)324 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
325 {
326 if (bp->flags & BNXT_FLAG_CHIP_P5)
327 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
328 db->doorbell);
329 else
330 BNXT_DB_CQ(db, idx);
331 }
332
333 const u16 bnxt_lhint_arr[] = {
334 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
335 TX_BD_FLAGS_LHINT_512_TO_1023,
336 TX_BD_FLAGS_LHINT_1024_TO_2047,
337 TX_BD_FLAGS_LHINT_1024_TO_2047,
338 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
339 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
340 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
341 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
342 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
343 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
344 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
345 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
346 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353 };
354
bnxt_xmit_get_cfa_action(struct sk_buff *skb)355 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
356 {
357 struct metadata_dst *md_dst = skb_metadata_dst(skb);
358
359 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
360 return 0;
361
362 return md_dst->u.port_info.port_id;
363 }
364
bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, u16 prod)365 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
366 u16 prod)
367 {
368 bnxt_db_write(bp, &txr->tx_db, prod);
369 txr->kick_pending = 0;
370 }
371
bnxt_txr_netif_try_stop_queue(struct bnxt *bp, struct bnxt_tx_ring_info *txr, struct netdev_queue *txq)372 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
373 struct bnxt_tx_ring_info *txr,
374 struct netdev_queue *txq)
375 {
376 netif_tx_stop_queue(txq);
377
378 /* netif_tx_stop_queue() must be done before checking
379 * tx index in bnxt_tx_avail() below, because in
380 * bnxt_tx_int(), we update tx index before checking for
381 * netif_tx_queue_stopped().
382 */
383 smp_mb();
384 if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
385 netif_tx_wake_queue(txq);
386 return false;
387 }
388
389 return true;
390 }
391
bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)392 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
393 {
394 struct bnxt *bp = netdev_priv(dev);
395 struct tx_bd *txbd;
396 struct tx_bd_ext *txbd1;
397 struct netdev_queue *txq;
398 int i;
399 dma_addr_t mapping;
400 unsigned int length, pad = 0;
401 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
402 u16 prod, last_frag;
403 struct pci_dev *pdev = bp->pdev;
404 struct bnxt_tx_ring_info *txr;
405 struct bnxt_sw_tx_bd *tx_buf;
406
407 i = skb_get_queue_mapping(skb);
408 if (unlikely(i >= bp->tx_nr_rings)) {
409 dev_kfree_skb_any(skb);
410 atomic_long_inc(&dev->tx_dropped);
411 return NETDEV_TX_OK;
412 }
413
414 txq = netdev_get_tx_queue(dev, i);
415 txr = &bp->tx_ring[bp->tx_ring_map[i]];
416 prod = txr->tx_prod;
417
418 free_size = bnxt_tx_avail(bp, txr);
419 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
420 /* We must have raced with NAPI cleanup */
421 if (net_ratelimit() && txr->kick_pending)
422 netif_warn(bp, tx_err, dev,
423 "bnxt: ring busy w/ flush pending!\n");
424 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
425 return NETDEV_TX_BUSY;
426 }
427
428 length = skb->len;
429 len = skb_headlen(skb);
430 last_frag = skb_shinfo(skb)->nr_frags;
431
432 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
433
434 txbd->tx_bd_opaque = prod;
435
436 tx_buf = &txr->tx_buf_ring[prod];
437 tx_buf->skb = skb;
438 tx_buf->nr_frags = last_frag;
439
440 vlan_tag_flags = 0;
441 cfa_action = bnxt_xmit_get_cfa_action(skb);
442 if (skb_vlan_tag_present(skb)) {
443 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
444 skb_vlan_tag_get(skb);
445 /* Currently supports 8021Q, 8021AD vlan offloads
446 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
447 */
448 if (skb->vlan_proto == htons(ETH_P_8021Q))
449 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
450 }
451
452 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
453 struct tx_push_buffer *tx_push_buf = txr->tx_push;
454 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
455 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
456 void __iomem *db = txr->tx_db.doorbell;
457 void *pdata = tx_push_buf->data;
458 u64 *end;
459 int j, push_len;
460
461 /* Set COAL_NOW to be ready quickly for the next push */
462 tx_push->tx_bd_len_flags_type =
463 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
464 TX_BD_TYPE_LONG_TX_BD |
465 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
466 TX_BD_FLAGS_COAL_NOW |
467 TX_BD_FLAGS_PACKET_END |
468 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
469
470 if (skb->ip_summed == CHECKSUM_PARTIAL)
471 tx_push1->tx_bd_hsize_lflags =
472 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
473 else
474 tx_push1->tx_bd_hsize_lflags = 0;
475
476 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
477 tx_push1->tx_bd_cfa_action =
478 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
479
480 end = pdata + length;
481 end = PTR_ALIGN(end, 8) - 1;
482 *end = 0;
483
484 skb_copy_from_linear_data(skb, pdata, len);
485 pdata += len;
486 for (j = 0; j < last_frag; j++) {
487 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
488 void *fptr;
489
490 fptr = skb_frag_address_safe(frag);
491 if (!fptr)
492 goto normal_tx;
493
494 memcpy(pdata, fptr, skb_frag_size(frag));
495 pdata += skb_frag_size(frag);
496 }
497
498 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
499 txbd->tx_bd_haddr = txr->data_mapping;
500 prod = NEXT_TX(prod);
501 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
502 memcpy(txbd, tx_push1, sizeof(*txbd));
503 prod = NEXT_TX(prod);
504 tx_push->doorbell =
505 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
506 txr->tx_prod = prod;
507
508 tx_buf->is_push = 1;
509 netdev_tx_sent_queue(txq, skb->len);
510 wmb(); /* Sync is_push and byte queue before pushing data */
511
512 push_len = (length + sizeof(*tx_push) + 7) / 8;
513 if (push_len > 16) {
514 __iowrite64_copy(db, tx_push_buf, 16);
515 __iowrite32_copy(db + 4, tx_push_buf + 1,
516 (push_len - 16) << 1);
517 } else {
518 __iowrite64_copy(db, tx_push_buf, push_len);
519 }
520
521 goto tx_done;
522 }
523
524 normal_tx:
525 if (length < BNXT_MIN_PKT_SIZE) {
526 pad = BNXT_MIN_PKT_SIZE - length;
527 if (skb_pad(skb, pad))
528 /* SKB already freed. */
529 goto tx_kick_pending;
530 length = BNXT_MIN_PKT_SIZE;
531 }
532
533 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
534
535 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
536 goto tx_free;
537
538 dma_unmap_addr_set(tx_buf, mapping, mapping);
539 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
540 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
541
542 txbd->tx_bd_haddr = cpu_to_le64(mapping);
543
544 prod = NEXT_TX(prod);
545 txbd1 = (struct tx_bd_ext *)
546 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
547
548 txbd1->tx_bd_hsize_lflags = 0;
549 if (skb_is_gso(skb)) {
550 u32 hdr_len;
551
552 if (skb->encapsulation)
553 hdr_len = skb_inner_network_offset(skb) +
554 skb_inner_network_header_len(skb) +
555 inner_tcp_hdrlen(skb);
556 else
557 hdr_len = skb_transport_offset(skb) +
558 tcp_hdrlen(skb);
559
560 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
561 TX_BD_FLAGS_T_IPID |
562 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
563 length = skb_shinfo(skb)->gso_size;
564 txbd1->tx_bd_mss = cpu_to_le32(length);
565 length += hdr_len;
566 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
567 txbd1->tx_bd_hsize_lflags =
568 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
569 txbd1->tx_bd_mss = 0;
570 }
571
572 length >>= 9;
573 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
574 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
575 skb->len);
576 i = 0;
577 goto tx_dma_error;
578 }
579 flags |= bnxt_lhint_arr[length];
580 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
581
582 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
583 txbd1->tx_bd_cfa_action =
584 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
585 for (i = 0; i < last_frag; i++) {
586 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
587
588 prod = NEXT_TX(prod);
589 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
590
591 len = skb_frag_size(frag);
592 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
593 DMA_TO_DEVICE);
594
595 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
596 goto tx_dma_error;
597
598 tx_buf = &txr->tx_buf_ring[prod];
599 dma_unmap_addr_set(tx_buf, mapping, mapping);
600
601 txbd->tx_bd_haddr = cpu_to_le64(mapping);
602
603 flags = len << TX_BD_LEN_SHIFT;
604 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
605 }
606
607 flags &= ~TX_BD_LEN;
608 txbd->tx_bd_len_flags_type =
609 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
610 TX_BD_FLAGS_PACKET_END);
611
612 netdev_tx_sent_queue(txq, skb->len);
613
614 /* Sync BD data before updating doorbell */
615 wmb();
616
617 prod = NEXT_TX(prod);
618 txr->tx_prod = prod;
619
620 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
621 bnxt_txr_db_kick(bp, txr, prod);
622 else
623 txr->kick_pending = 1;
624
625 tx_done:
626
627 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
628 if (netdev_xmit_more() && !tx_buf->is_push)
629 bnxt_txr_db_kick(bp, txr, prod);
630
631 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
632 }
633 return NETDEV_TX_OK;
634
635 tx_dma_error:
636 last_frag = i;
637
638 /* start back at beginning and unmap skb */
639 prod = txr->tx_prod;
640 tx_buf = &txr->tx_buf_ring[prod];
641 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
642 skb_headlen(skb), PCI_DMA_TODEVICE);
643 prod = NEXT_TX(prod);
644
645 /* unmap remaining mapped pages */
646 for (i = 0; i < last_frag; i++) {
647 prod = NEXT_TX(prod);
648 tx_buf = &txr->tx_buf_ring[prod];
649 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
650 skb_frag_size(&skb_shinfo(skb)->frags[i]),
651 PCI_DMA_TODEVICE);
652 }
653
654 tx_free:
655 dev_kfree_skb_any(skb);
656 tx_kick_pending:
657 if (txr->kick_pending)
658 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
659 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
660 atomic_long_inc(&dev->tx_dropped);
661 return NETDEV_TX_OK;
662 }
663
bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)664 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
665 {
666 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
667 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
668 u16 cons = txr->tx_cons;
669 struct pci_dev *pdev = bp->pdev;
670 int i;
671 unsigned int tx_bytes = 0;
672
673 for (i = 0; i < nr_pkts; i++) {
674 struct bnxt_sw_tx_bd *tx_buf;
675 struct sk_buff *skb;
676 int j, last;
677
678 tx_buf = &txr->tx_buf_ring[cons];
679 cons = NEXT_TX(cons);
680 skb = tx_buf->skb;
681 tx_buf->skb = NULL;
682
683 if (tx_buf->is_push) {
684 tx_buf->is_push = 0;
685 goto next_tx_int;
686 }
687
688 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
689 skb_headlen(skb), PCI_DMA_TODEVICE);
690 last = tx_buf->nr_frags;
691
692 for (j = 0; j < last; j++) {
693 cons = NEXT_TX(cons);
694 tx_buf = &txr->tx_buf_ring[cons];
695 dma_unmap_page(
696 &pdev->dev,
697 dma_unmap_addr(tx_buf, mapping),
698 skb_frag_size(&skb_shinfo(skb)->frags[j]),
699 PCI_DMA_TODEVICE);
700 }
701
702 next_tx_int:
703 cons = NEXT_TX(cons);
704
705 tx_bytes += skb->len;
706 dev_kfree_skb_any(skb);
707 }
708
709 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
710 txr->tx_cons = cons;
711
712 /* Need to make the tx_cons update visible to bnxt_start_xmit()
713 * before checking for netif_tx_queue_stopped(). Without the
714 * memory barrier, there is a small possibility that bnxt_start_xmit()
715 * will miss it and cause the queue to be stopped forever.
716 */
717 smp_mb();
718
719 if (unlikely(netif_tx_queue_stopped(txq)) &&
720 bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
721 READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
722 netif_tx_wake_queue(txq);
723 }
724
__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, struct bnxt_rx_ring_info *rxr, gfp_t gfp)725 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
726 struct bnxt_rx_ring_info *rxr,
727 gfp_t gfp)
728 {
729 struct device *dev = &bp->pdev->dev;
730 struct page *page;
731
732 page = page_pool_dev_alloc_pages(rxr->page_pool);
733 if (!page)
734 return NULL;
735
736 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
737 DMA_ATTR_WEAK_ORDERING);
738 if (dma_mapping_error(dev, *mapping)) {
739 page_pool_recycle_direct(rxr->page_pool, page);
740 return NULL;
741 }
742 *mapping += bp->rx_dma_offset;
743 return page;
744 }
745
__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, gfp_t gfp)746 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
747 gfp_t gfp)
748 {
749 u8 *data;
750 struct pci_dev *pdev = bp->pdev;
751
752 data = kmalloc(bp->rx_buf_size, gfp);
753 if (!data)
754 return NULL;
755
756 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
757 bp->rx_buf_use_size, bp->rx_dir,
758 DMA_ATTR_WEAK_ORDERING);
759
760 if (dma_mapping_error(&pdev->dev, *mapping)) {
761 kfree(data);
762 data = NULL;
763 }
764 return data;
765 }
766
bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 prod, gfp_t gfp)767 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
768 u16 prod, gfp_t gfp)
769 {
770 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
771 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
772 dma_addr_t mapping;
773
774 if (BNXT_RX_PAGE_MODE(bp)) {
775 struct page *page =
776 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
777
778 if (!page)
779 return -ENOMEM;
780
781 rx_buf->data = page;
782 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
783 } else {
784 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
785
786 if (!data)
787 return -ENOMEM;
788
789 rx_buf->data = data;
790 rx_buf->data_ptr = data + bp->rx_offset;
791 }
792 rx_buf->mapping = mapping;
793
794 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
795 return 0;
796 }
797
bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)798 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
799 {
800 u16 prod = rxr->rx_prod;
801 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
802 struct rx_bd *cons_bd, *prod_bd;
803
804 prod_rx_buf = &rxr->rx_buf_ring[prod];
805 cons_rx_buf = &rxr->rx_buf_ring[cons];
806
807 prod_rx_buf->data = data;
808 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
809
810 prod_rx_buf->mapping = cons_rx_buf->mapping;
811
812 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
813 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
814
815 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
816 }
817
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)818 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
819 {
820 u16 next, max = rxr->rx_agg_bmap_size;
821
822 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
823 if (next >= max)
824 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
825 return next;
826 }
827
bnxt_alloc_rx_page(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 prod, gfp_t gfp)828 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
829 struct bnxt_rx_ring_info *rxr,
830 u16 prod, gfp_t gfp)
831 {
832 struct rx_bd *rxbd =
833 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
834 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
835 struct pci_dev *pdev = bp->pdev;
836 struct page *page;
837 dma_addr_t mapping;
838 u16 sw_prod = rxr->rx_sw_agg_prod;
839 unsigned int offset = 0;
840
841 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
842 page = rxr->rx_page;
843 if (!page) {
844 page = alloc_page(gfp);
845 if (!page)
846 return -ENOMEM;
847 rxr->rx_page = page;
848 rxr->rx_page_offset = 0;
849 }
850 offset = rxr->rx_page_offset;
851 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
852 if (rxr->rx_page_offset == PAGE_SIZE)
853 rxr->rx_page = NULL;
854 else
855 get_page(page);
856 } else {
857 page = alloc_page(gfp);
858 if (!page)
859 return -ENOMEM;
860 }
861
862 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
863 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
864 DMA_ATTR_WEAK_ORDERING);
865 if (dma_mapping_error(&pdev->dev, mapping)) {
866 __free_page(page);
867 return -EIO;
868 }
869
870 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
871 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
872
873 __set_bit(sw_prod, rxr->rx_agg_bmap);
874 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
875 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
876
877 rx_agg_buf->page = page;
878 rx_agg_buf->offset = offset;
879 rx_agg_buf->mapping = mapping;
880 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
881 rxbd->rx_bd_opaque = sw_prod;
882 return 0;
883 }
884
bnxt_get_agg(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u16 cp_cons, u16 curr)885 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
886 struct bnxt_cp_ring_info *cpr,
887 u16 cp_cons, u16 curr)
888 {
889 struct rx_agg_cmp *agg;
890
891 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
892 agg = (struct rx_agg_cmp *)
893 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
894 return agg;
895 }
896
bnxt_get_tpa_agg_p5(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 agg_id, u16 curr)897 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
898 struct bnxt_rx_ring_info *rxr,
899 u16 agg_id, u16 curr)
900 {
901 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
902
903 return &tpa_info->agg_arr[curr];
904 }
905
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, u16 start, u32 agg_bufs, bool tpa)906 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
907 u16 start, u32 agg_bufs, bool tpa)
908 {
909 struct bnxt_napi *bnapi = cpr->bnapi;
910 struct bnxt *bp = bnapi->bp;
911 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
912 u16 prod = rxr->rx_agg_prod;
913 u16 sw_prod = rxr->rx_sw_agg_prod;
914 bool p5_tpa = false;
915 u32 i;
916
917 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
918 p5_tpa = true;
919
920 for (i = 0; i < agg_bufs; i++) {
921 u16 cons;
922 struct rx_agg_cmp *agg;
923 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
924 struct rx_bd *prod_bd;
925 struct page *page;
926
927 if (p5_tpa)
928 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
929 else
930 agg = bnxt_get_agg(bp, cpr, idx, start + i);
931 cons = agg->rx_agg_cmp_opaque;
932 __clear_bit(cons, rxr->rx_agg_bmap);
933
934 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
935 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
936
937 __set_bit(sw_prod, rxr->rx_agg_bmap);
938 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
939 cons_rx_buf = &rxr->rx_agg_ring[cons];
940
941 /* It is possible for sw_prod to be equal to cons, so
942 * set cons_rx_buf->page to NULL first.
943 */
944 page = cons_rx_buf->page;
945 cons_rx_buf->page = NULL;
946 prod_rx_buf->page = page;
947 prod_rx_buf->offset = cons_rx_buf->offset;
948
949 prod_rx_buf->mapping = cons_rx_buf->mapping;
950
951 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
952
953 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
954 prod_bd->rx_bd_opaque = sw_prod;
955
956 prod = NEXT_RX_AGG(prod);
957 sw_prod = NEXT_RX_AGG(sw_prod);
958 }
959 rxr->rx_agg_prod = prod;
960 rxr->rx_sw_agg_prod = sw_prod;
961 }
962
bnxt_rx_page_skb(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, void *data, u8 *data_ptr, dma_addr_t dma_addr, unsigned int offset_and_len)963 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
964 struct bnxt_rx_ring_info *rxr,
965 u16 cons, void *data, u8 *data_ptr,
966 dma_addr_t dma_addr,
967 unsigned int offset_and_len)
968 {
969 unsigned int payload = offset_and_len >> 16;
970 unsigned int len = offset_and_len & 0xffff;
971 skb_frag_t *frag;
972 struct page *page = data;
973 u16 prod = rxr->rx_prod;
974 struct sk_buff *skb;
975 int off, err;
976
977 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
978 if (unlikely(err)) {
979 bnxt_reuse_rx_data(rxr, cons, data);
980 return NULL;
981 }
982 dma_addr -= bp->rx_dma_offset;
983 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
984 DMA_ATTR_WEAK_ORDERING);
985 page_pool_release_page(rxr->page_pool, page);
986
987 if (unlikely(!payload))
988 payload = eth_get_headlen(bp->dev, data_ptr, len);
989
990 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
991 if (!skb) {
992 __free_page(page);
993 return NULL;
994 }
995
996 off = (void *)data_ptr - page_address(page);
997 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
998 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
999 payload + NET_IP_ALIGN);
1000
1001 frag = &skb_shinfo(skb)->frags[0];
1002 skb_frag_size_sub(frag, payload);
1003 skb_frag_off_add(frag, payload);
1004 skb->data_len -= payload;
1005 skb->tail += payload;
1006
1007 return skb;
1008 }
1009
bnxt_rx_skb(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, void *data, u8 *data_ptr, dma_addr_t dma_addr, unsigned int offset_and_len)1010 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1011 struct bnxt_rx_ring_info *rxr, u16 cons,
1012 void *data, u8 *data_ptr,
1013 dma_addr_t dma_addr,
1014 unsigned int offset_and_len)
1015 {
1016 u16 prod = rxr->rx_prod;
1017 struct sk_buff *skb;
1018 int err;
1019
1020 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1021 if (unlikely(err)) {
1022 bnxt_reuse_rx_data(rxr, cons, data);
1023 return NULL;
1024 }
1025
1026 skb = build_skb(data, 0);
1027 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1028 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1029 if (!skb) {
1030 kfree(data);
1031 return NULL;
1032 }
1033
1034 skb_reserve(skb, bp->rx_offset);
1035 skb_put(skb, offset_and_len & 0xffff);
1036 return skb;
1037 }
1038
bnxt_rx_pages(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct sk_buff *skb, u16 idx, u32 agg_bufs, bool tpa)1039 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1040 struct bnxt_cp_ring_info *cpr,
1041 struct sk_buff *skb, u16 idx,
1042 u32 agg_bufs, bool tpa)
1043 {
1044 struct bnxt_napi *bnapi = cpr->bnapi;
1045 struct pci_dev *pdev = bp->pdev;
1046 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1047 u16 prod = rxr->rx_agg_prod;
1048 bool p5_tpa = false;
1049 u32 i;
1050
1051 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1052 p5_tpa = true;
1053
1054 for (i = 0; i < agg_bufs; i++) {
1055 u16 cons, frag_len;
1056 struct rx_agg_cmp *agg;
1057 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1058 struct page *page;
1059 dma_addr_t mapping;
1060
1061 if (p5_tpa)
1062 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1063 else
1064 agg = bnxt_get_agg(bp, cpr, idx, i);
1065 cons = agg->rx_agg_cmp_opaque;
1066 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1067 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1068
1069 cons_rx_buf = &rxr->rx_agg_ring[cons];
1070 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1071 cons_rx_buf->offset, frag_len);
1072 __clear_bit(cons, rxr->rx_agg_bmap);
1073
1074 /* It is possible for bnxt_alloc_rx_page() to allocate
1075 * a sw_prod index that equals the cons index, so we
1076 * need to clear the cons entry now.
1077 */
1078 mapping = cons_rx_buf->mapping;
1079 page = cons_rx_buf->page;
1080 cons_rx_buf->page = NULL;
1081
1082 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1083 struct skb_shared_info *shinfo;
1084 unsigned int nr_frags;
1085
1086 shinfo = skb_shinfo(skb);
1087 nr_frags = --shinfo->nr_frags;
1088 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1089
1090 dev_kfree_skb(skb);
1091
1092 cons_rx_buf->page = page;
1093
1094 /* Update prod since possibly some pages have been
1095 * allocated already.
1096 */
1097 rxr->rx_agg_prod = prod;
1098 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1099 return NULL;
1100 }
1101
1102 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1103 PCI_DMA_FROMDEVICE,
1104 DMA_ATTR_WEAK_ORDERING);
1105
1106 skb->data_len += frag_len;
1107 skb->len += frag_len;
1108 skb->truesize += PAGE_SIZE;
1109
1110 prod = NEXT_RX_AGG(prod);
1111 }
1112 rxr->rx_agg_prod = prod;
1113 return skb;
1114 }
1115
bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u8 agg_bufs, u32 *raw_cons)1116 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1117 u8 agg_bufs, u32 *raw_cons)
1118 {
1119 u16 last;
1120 struct rx_agg_cmp *agg;
1121
1122 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1123 last = RING_CMP(*raw_cons);
1124 agg = (struct rx_agg_cmp *)
1125 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1126 return RX_AGG_CMP_VALID(agg, *raw_cons);
1127 }
1128
bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, unsigned int len, dma_addr_t mapping)1129 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1130 unsigned int len,
1131 dma_addr_t mapping)
1132 {
1133 struct bnxt *bp = bnapi->bp;
1134 struct pci_dev *pdev = bp->pdev;
1135 struct sk_buff *skb;
1136
1137 skb = napi_alloc_skb(&bnapi->napi, len);
1138 if (!skb)
1139 return NULL;
1140
1141 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1142 bp->rx_dir);
1143
1144 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1145 len + NET_IP_ALIGN);
1146
1147 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1148 bp->rx_dir);
1149
1150 skb_put(skb, len);
1151 return skb;
1152 }
1153
bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u32 *raw_cons, void *cmp)1154 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1155 u32 *raw_cons, void *cmp)
1156 {
1157 struct rx_cmp *rxcmp = cmp;
1158 u32 tmp_raw_cons = *raw_cons;
1159 u8 cmp_type, agg_bufs = 0;
1160
1161 cmp_type = RX_CMP_TYPE(rxcmp);
1162
1163 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1164 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1165 RX_CMP_AGG_BUFS) >>
1166 RX_CMP_AGG_BUFS_SHIFT;
1167 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1168 struct rx_tpa_end_cmp *tpa_end = cmp;
1169
1170 if (bp->flags & BNXT_FLAG_CHIP_P5)
1171 return 0;
1172
1173 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1174 }
1175
1176 if (agg_bufs) {
1177 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1178 return -EBUSY;
1179 }
1180 *raw_cons = tmp_raw_cons;
1181 return 0;
1182 }
1183
bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)1184 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1185 {
1186 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1187 return;
1188
1189 if (BNXT_PF(bp))
1190 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1191 else
1192 schedule_delayed_work(&bp->fw_reset_task, delay);
1193 }
1194
bnxt_queue_sp_work(struct bnxt *bp)1195 static void bnxt_queue_sp_work(struct bnxt *bp)
1196 {
1197 if (BNXT_PF(bp))
1198 queue_work(bnxt_pf_wq, &bp->sp_task);
1199 else
1200 schedule_work(&bp->sp_task);
1201 }
1202
bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)1203 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1204 {
1205 if (!rxr->bnapi->in_reset) {
1206 rxr->bnapi->in_reset = true;
1207 if (bp->flags & BNXT_FLAG_CHIP_P5)
1208 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1209 else
1210 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1211 bnxt_queue_sp_work(bp);
1212 }
1213 rxr->rx_next_cons = 0xffff;
1214 }
1215
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)1216 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1217 {
1218 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1219 u16 idx = agg_id & MAX_TPA_P5_MASK;
1220
1221 if (test_bit(idx, map->agg_idx_bmap))
1222 idx = find_first_zero_bit(map->agg_idx_bmap,
1223 BNXT_AGG_IDX_BMAP_SIZE);
1224 __set_bit(idx, map->agg_idx_bmap);
1225 map->agg_id_tbl[agg_id] = idx;
1226 return idx;
1227 }
1228
bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)1229 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1230 {
1231 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1232
1233 __clear_bit(idx, map->agg_idx_bmap);
1234 }
1235
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)1236 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1237 {
1238 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1239
1240 return map->agg_id_tbl[agg_id];
1241 }
1242
bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, struct rx_tpa_start_cmp *tpa_start, struct rx_tpa_start_cmp_ext *tpa_start1)1243 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1244 struct rx_tpa_start_cmp *tpa_start,
1245 struct rx_tpa_start_cmp_ext *tpa_start1)
1246 {
1247 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1248 struct bnxt_tpa_info *tpa_info;
1249 u16 cons, prod, agg_id;
1250 struct rx_bd *prod_bd;
1251 dma_addr_t mapping;
1252
1253 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1254 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1255 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1256 } else {
1257 agg_id = TPA_START_AGG_ID(tpa_start);
1258 }
1259 cons = tpa_start->rx_tpa_start_cmp_opaque;
1260 prod = rxr->rx_prod;
1261 cons_rx_buf = &rxr->rx_buf_ring[cons];
1262 prod_rx_buf = &rxr->rx_buf_ring[prod];
1263 tpa_info = &rxr->rx_tpa[agg_id];
1264
1265 if (unlikely(cons != rxr->rx_next_cons ||
1266 TPA_START_ERROR(tpa_start))) {
1267 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1268 cons, rxr->rx_next_cons,
1269 TPA_START_ERROR_CODE(tpa_start1));
1270 bnxt_sched_reset(bp, rxr);
1271 return;
1272 }
1273 /* Store cfa_code in tpa_info to use in tpa_end
1274 * completion processing.
1275 */
1276 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1277 prod_rx_buf->data = tpa_info->data;
1278 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1279
1280 mapping = tpa_info->mapping;
1281 prod_rx_buf->mapping = mapping;
1282
1283 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1284
1285 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1286
1287 tpa_info->data = cons_rx_buf->data;
1288 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1289 cons_rx_buf->data = NULL;
1290 tpa_info->mapping = cons_rx_buf->mapping;
1291
1292 tpa_info->len =
1293 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1294 RX_TPA_START_CMP_LEN_SHIFT;
1295 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1296 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1297
1298 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1299 tpa_info->gso_type = SKB_GSO_TCPV4;
1300 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1301 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1302 tpa_info->gso_type = SKB_GSO_TCPV6;
1303 tpa_info->rss_hash =
1304 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1305 } else {
1306 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1307 tpa_info->gso_type = 0;
1308 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1309 }
1310 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1311 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1312 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1313 tpa_info->agg_count = 0;
1314
1315 rxr->rx_prod = NEXT_RX(prod);
1316 cons = NEXT_RX(cons);
1317 rxr->rx_next_cons = NEXT_RX(cons);
1318 cons_rx_buf = &rxr->rx_buf_ring[cons];
1319
1320 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1321 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1322 cons_rx_buf->data = NULL;
1323 }
1324
bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)1325 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1326 {
1327 if (agg_bufs)
1328 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1329 }
1330
1331 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)1332 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1333 {
1334 struct udphdr *uh = NULL;
1335
1336 if (ip_proto == htons(ETH_P_IP)) {
1337 struct iphdr *iph = (struct iphdr *)skb->data;
1338
1339 if (iph->protocol == IPPROTO_UDP)
1340 uh = (struct udphdr *)(iph + 1);
1341 } else {
1342 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1343
1344 if (iph->nexthdr == IPPROTO_UDP)
1345 uh = (struct udphdr *)(iph + 1);
1346 }
1347 if (uh) {
1348 if (uh->check)
1349 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1350 else
1351 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1352 }
1353 }
1354 #endif
1355
bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, int payload_off, int tcp_ts, struct sk_buff *skb)1356 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1357 int payload_off, int tcp_ts,
1358 struct sk_buff *skb)
1359 {
1360 #ifdef CONFIG_INET
1361 struct tcphdr *th;
1362 int len, nw_off;
1363 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1364 u32 hdr_info = tpa_info->hdr_info;
1365 bool loopback = false;
1366
1367 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1368 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1369 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1370
1371 /* If the packet is an internal loopback packet, the offsets will
1372 * have an extra 4 bytes.
1373 */
1374 if (inner_mac_off == 4) {
1375 loopback = true;
1376 } else if (inner_mac_off > 4) {
1377 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1378 ETH_HLEN - 2));
1379
1380 /* We only support inner iPv4/ipv6. If we don't see the
1381 * correct protocol ID, it must be a loopback packet where
1382 * the offsets are off by 4.
1383 */
1384 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1385 loopback = true;
1386 }
1387 if (loopback) {
1388 /* internal loopback packet, subtract all offsets by 4 */
1389 inner_ip_off -= 4;
1390 inner_mac_off -= 4;
1391 outer_ip_off -= 4;
1392 }
1393
1394 nw_off = inner_ip_off - ETH_HLEN;
1395 skb_set_network_header(skb, nw_off);
1396 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1397 struct ipv6hdr *iph = ipv6_hdr(skb);
1398
1399 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1400 len = skb->len - skb_transport_offset(skb);
1401 th = tcp_hdr(skb);
1402 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1403 } else {
1404 struct iphdr *iph = ip_hdr(skb);
1405
1406 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1407 len = skb->len - skb_transport_offset(skb);
1408 th = tcp_hdr(skb);
1409 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1410 }
1411
1412 if (inner_mac_off) { /* tunnel */
1413 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1414 ETH_HLEN - 2));
1415
1416 bnxt_gro_tunnel(skb, proto);
1417 }
1418 #endif
1419 return skb;
1420 }
1421
bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, int payload_off, int tcp_ts, struct sk_buff *skb)1422 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1423 int payload_off, int tcp_ts,
1424 struct sk_buff *skb)
1425 {
1426 #ifdef CONFIG_INET
1427 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1428 u32 hdr_info = tpa_info->hdr_info;
1429 int iphdr_len, nw_off;
1430
1431 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1432 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1433 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1434
1435 nw_off = inner_ip_off - ETH_HLEN;
1436 skb_set_network_header(skb, nw_off);
1437 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1438 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1439 skb_set_transport_header(skb, nw_off + iphdr_len);
1440
1441 if (inner_mac_off) { /* tunnel */
1442 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1443 ETH_HLEN - 2));
1444
1445 bnxt_gro_tunnel(skb, proto);
1446 }
1447 #endif
1448 return skb;
1449 }
1450
1451 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1452 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1453
bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, int payload_off, int tcp_ts, struct sk_buff *skb)1454 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1455 int payload_off, int tcp_ts,
1456 struct sk_buff *skb)
1457 {
1458 #ifdef CONFIG_INET
1459 struct tcphdr *th;
1460 int len, nw_off, tcp_opt_len = 0;
1461
1462 if (tcp_ts)
1463 tcp_opt_len = 12;
1464
1465 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1466 struct iphdr *iph;
1467
1468 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1469 ETH_HLEN;
1470 skb_set_network_header(skb, nw_off);
1471 iph = ip_hdr(skb);
1472 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1473 len = skb->len - skb_transport_offset(skb);
1474 th = tcp_hdr(skb);
1475 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1476 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1477 struct ipv6hdr *iph;
1478
1479 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1480 ETH_HLEN;
1481 skb_set_network_header(skb, nw_off);
1482 iph = ipv6_hdr(skb);
1483 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1484 len = skb->len - skb_transport_offset(skb);
1485 th = tcp_hdr(skb);
1486 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1487 } else {
1488 dev_kfree_skb_any(skb);
1489 return NULL;
1490 }
1491
1492 if (nw_off) /* tunnel */
1493 bnxt_gro_tunnel(skb, skb->protocol);
1494 #endif
1495 return skb;
1496 }
1497
bnxt_gro_skb(struct bnxt *bp, struct bnxt_tpa_info *tpa_info, struct rx_tpa_end_cmp *tpa_end, struct rx_tpa_end_cmp_ext *tpa_end1, struct sk_buff *skb)1498 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1499 struct bnxt_tpa_info *tpa_info,
1500 struct rx_tpa_end_cmp *tpa_end,
1501 struct rx_tpa_end_cmp_ext *tpa_end1,
1502 struct sk_buff *skb)
1503 {
1504 #ifdef CONFIG_INET
1505 int payload_off;
1506 u16 segs;
1507
1508 segs = TPA_END_TPA_SEGS(tpa_end);
1509 if (segs == 1)
1510 return skb;
1511
1512 NAPI_GRO_CB(skb)->count = segs;
1513 skb_shinfo(skb)->gso_size =
1514 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1515 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1516 if (bp->flags & BNXT_FLAG_CHIP_P5)
1517 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1518 else
1519 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1520 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1521 if (likely(skb))
1522 tcp_gro_complete(skb);
1523 #endif
1524 return skb;
1525 }
1526
1527 /* Given the cfa_code of a received packet determine which
1528 * netdev (vf-rep or PF) the packet is destined to.
1529 */
bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)1530 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1531 {
1532 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1533
1534 /* if vf-rep dev is NULL, the must belongs to the PF */
1535 return dev ? dev : bp->dev;
1536 }
1537
bnxt_tpa_end(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u32 *raw_cons, struct rx_tpa_end_cmp *tpa_end, struct rx_tpa_end_cmp_ext *tpa_end1, u8 *event)1538 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1539 struct bnxt_cp_ring_info *cpr,
1540 u32 *raw_cons,
1541 struct rx_tpa_end_cmp *tpa_end,
1542 struct rx_tpa_end_cmp_ext *tpa_end1,
1543 u8 *event)
1544 {
1545 struct bnxt_napi *bnapi = cpr->bnapi;
1546 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1547 u8 *data_ptr, agg_bufs;
1548 unsigned int len;
1549 struct bnxt_tpa_info *tpa_info;
1550 dma_addr_t mapping;
1551 struct sk_buff *skb;
1552 u16 idx = 0, agg_id;
1553 void *data;
1554 bool gro;
1555
1556 if (unlikely(bnapi->in_reset)) {
1557 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1558
1559 if (rc < 0)
1560 return ERR_PTR(-EBUSY);
1561 return NULL;
1562 }
1563
1564 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1565 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1566 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1567 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1568 tpa_info = &rxr->rx_tpa[agg_id];
1569 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1570 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1571 agg_bufs, tpa_info->agg_count);
1572 agg_bufs = tpa_info->agg_count;
1573 }
1574 tpa_info->agg_count = 0;
1575 *event |= BNXT_AGG_EVENT;
1576 bnxt_free_agg_idx(rxr, agg_id);
1577 idx = agg_id;
1578 gro = !!(bp->flags & BNXT_FLAG_GRO);
1579 } else {
1580 agg_id = TPA_END_AGG_ID(tpa_end);
1581 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1582 tpa_info = &rxr->rx_tpa[agg_id];
1583 idx = RING_CMP(*raw_cons);
1584 if (agg_bufs) {
1585 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1586 return ERR_PTR(-EBUSY);
1587
1588 *event |= BNXT_AGG_EVENT;
1589 idx = NEXT_CMP(idx);
1590 }
1591 gro = !!TPA_END_GRO(tpa_end);
1592 }
1593 data = tpa_info->data;
1594 data_ptr = tpa_info->data_ptr;
1595 prefetch(data_ptr);
1596 len = tpa_info->len;
1597 mapping = tpa_info->mapping;
1598
1599 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1600 bnxt_abort_tpa(cpr, idx, agg_bufs);
1601 if (agg_bufs > MAX_SKB_FRAGS)
1602 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1603 agg_bufs, (int)MAX_SKB_FRAGS);
1604 return NULL;
1605 }
1606
1607 if (len <= bp->rx_copy_thresh) {
1608 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1609 if (!skb) {
1610 bnxt_abort_tpa(cpr, idx, agg_bufs);
1611 return NULL;
1612 }
1613 } else {
1614 u8 *new_data;
1615 dma_addr_t new_mapping;
1616
1617 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1618 if (!new_data) {
1619 bnxt_abort_tpa(cpr, idx, agg_bufs);
1620 return NULL;
1621 }
1622
1623 tpa_info->data = new_data;
1624 tpa_info->data_ptr = new_data + bp->rx_offset;
1625 tpa_info->mapping = new_mapping;
1626
1627 skb = build_skb(data, 0);
1628 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1629 bp->rx_buf_use_size, bp->rx_dir,
1630 DMA_ATTR_WEAK_ORDERING);
1631
1632 if (!skb) {
1633 kfree(data);
1634 bnxt_abort_tpa(cpr, idx, agg_bufs);
1635 return NULL;
1636 }
1637 skb_reserve(skb, bp->rx_offset);
1638 skb_put(skb, len);
1639 }
1640
1641 if (agg_bufs) {
1642 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1643 if (!skb) {
1644 /* Page reuse already handled by bnxt_rx_pages(). */
1645 return NULL;
1646 }
1647 }
1648
1649 skb->protocol =
1650 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1651
1652 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1653 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1654
1655 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1656 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1657 __be16 vlan_proto = htons(tpa_info->metadata >>
1658 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1659 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1660
1661 if (eth_type_vlan(vlan_proto)) {
1662 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1663 } else {
1664 dev_kfree_skb(skb);
1665 return NULL;
1666 }
1667 }
1668
1669 skb_checksum_none_assert(skb);
1670 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1671 skb->ip_summed = CHECKSUM_UNNECESSARY;
1672 skb->csum_level =
1673 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1674 }
1675
1676 if (gro)
1677 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1678
1679 return skb;
1680 }
1681
bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, struct rx_agg_cmp *rx_agg)1682 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1683 struct rx_agg_cmp *rx_agg)
1684 {
1685 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1686 struct bnxt_tpa_info *tpa_info;
1687
1688 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1689 tpa_info = &rxr->rx_tpa[agg_id];
1690 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1691 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1692 }
1693
bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, struct sk_buff *skb)1694 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1695 struct sk_buff *skb)
1696 {
1697 if (skb->dev != bp->dev) {
1698 /* this packet belongs to a vf-rep */
1699 bnxt_vf_rep_rx(bp, skb);
1700 return;
1701 }
1702 skb_record_rx_queue(skb, bnapi->index);
1703 napi_gro_receive(&bnapi->napi, skb);
1704 }
1705
1706 /* returns the following:
1707 * 1 - 1 packet successfully received
1708 * 0 - successful TPA_START, packet not completed yet
1709 * -EBUSY - completion ring does not have all the agg buffers yet
1710 * -ENOMEM - packet aborted due to out of memory
1711 * -EIO - packet aborted due to hw error indicated in BD
1712 */
bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u32 *raw_cons, u8 *event)1713 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1714 u32 *raw_cons, u8 *event)
1715 {
1716 struct bnxt_napi *bnapi = cpr->bnapi;
1717 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1718 struct net_device *dev = bp->dev;
1719 struct rx_cmp *rxcmp;
1720 struct rx_cmp_ext *rxcmp1;
1721 u32 tmp_raw_cons = *raw_cons;
1722 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1723 struct bnxt_sw_rx_bd *rx_buf;
1724 unsigned int len;
1725 u8 *data_ptr, agg_bufs, cmp_type;
1726 dma_addr_t dma_addr;
1727 struct sk_buff *skb;
1728 void *data;
1729 int rc = 0;
1730 u32 misc;
1731
1732 rxcmp = (struct rx_cmp *)
1733 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1734
1735 cmp_type = RX_CMP_TYPE(rxcmp);
1736
1737 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1738 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1739 goto next_rx_no_prod_no_len;
1740 }
1741
1742 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1743 cp_cons = RING_CMP(tmp_raw_cons);
1744 rxcmp1 = (struct rx_cmp_ext *)
1745 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1746
1747 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1748 return -EBUSY;
1749
1750 /* The valid test of the entry must be done first before
1751 * reading any further.
1752 */
1753 dma_rmb();
1754 prod = rxr->rx_prod;
1755
1756 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1757 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1758 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1759
1760 *event |= BNXT_RX_EVENT;
1761 goto next_rx_no_prod_no_len;
1762
1763 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1764 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1765 (struct rx_tpa_end_cmp *)rxcmp,
1766 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1767
1768 if (IS_ERR(skb))
1769 return -EBUSY;
1770
1771 rc = -ENOMEM;
1772 if (likely(skb)) {
1773 bnxt_deliver_skb(bp, bnapi, skb);
1774 rc = 1;
1775 }
1776 *event |= BNXT_RX_EVENT;
1777 goto next_rx_no_prod_no_len;
1778 }
1779
1780 cons = rxcmp->rx_cmp_opaque;
1781 if (unlikely(cons != rxr->rx_next_cons)) {
1782 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1783
1784 /* 0xffff is forced error, don't print it */
1785 if (rxr->rx_next_cons != 0xffff)
1786 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1787 cons, rxr->rx_next_cons);
1788 bnxt_sched_reset(bp, rxr);
1789 if (rc1)
1790 return rc1;
1791 goto next_rx_no_prod_no_len;
1792 }
1793 rx_buf = &rxr->rx_buf_ring[cons];
1794 data = rx_buf->data;
1795 data_ptr = rx_buf->data_ptr;
1796 prefetch(data_ptr);
1797
1798 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1799 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1800
1801 if (agg_bufs) {
1802 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1803 return -EBUSY;
1804
1805 cp_cons = NEXT_CMP(cp_cons);
1806 *event |= BNXT_AGG_EVENT;
1807 }
1808 *event |= BNXT_RX_EVENT;
1809
1810 rx_buf->data = NULL;
1811 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1812 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1813
1814 bnxt_reuse_rx_data(rxr, cons, data);
1815 if (agg_bufs)
1816 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1817 false);
1818
1819 rc = -EIO;
1820 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1821 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1822 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1823 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1824 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1825 rx_err);
1826 bnxt_sched_reset(bp, rxr);
1827 }
1828 }
1829 goto next_rx_no_len;
1830 }
1831
1832 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1833 dma_addr = rx_buf->mapping;
1834
1835 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1836 rc = 1;
1837 goto next_rx;
1838 }
1839
1840 if (len <= bp->rx_copy_thresh) {
1841 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1842 bnxt_reuse_rx_data(rxr, cons, data);
1843 if (!skb) {
1844 if (agg_bufs)
1845 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1846 agg_bufs, false);
1847 rc = -ENOMEM;
1848 goto next_rx;
1849 }
1850 } else {
1851 u32 payload;
1852
1853 if (rx_buf->data_ptr == data_ptr)
1854 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1855 else
1856 payload = 0;
1857 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1858 payload | len);
1859 if (!skb) {
1860 rc = -ENOMEM;
1861 goto next_rx;
1862 }
1863 }
1864
1865 if (agg_bufs) {
1866 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1867 if (!skb) {
1868 rc = -ENOMEM;
1869 goto next_rx;
1870 }
1871 }
1872
1873 if (RX_CMP_HASH_VALID(rxcmp)) {
1874 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1875 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1876
1877 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1878 if (hash_type != 1 && hash_type != 3)
1879 type = PKT_HASH_TYPE_L3;
1880 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1881 }
1882
1883 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1884 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1885
1886 if ((rxcmp1->rx_cmp_flags2 &
1887 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1888 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1889 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1890 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1891 __be16 vlan_proto = htons(meta_data >>
1892 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1893
1894 if (eth_type_vlan(vlan_proto)) {
1895 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1896 } else {
1897 dev_kfree_skb(skb);
1898 goto next_rx;
1899 }
1900 }
1901
1902 skb_checksum_none_assert(skb);
1903 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1904 if (dev->features & NETIF_F_RXCSUM) {
1905 skb->ip_summed = CHECKSUM_UNNECESSARY;
1906 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1907 }
1908 } else {
1909 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1910 if (dev->features & NETIF_F_RXCSUM)
1911 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1912 }
1913 }
1914
1915 bnxt_deliver_skb(bp, bnapi, skb);
1916 rc = 1;
1917
1918 next_rx:
1919 cpr->rx_packets += 1;
1920 cpr->rx_bytes += len;
1921
1922 next_rx_no_len:
1923 rxr->rx_prod = NEXT_RX(prod);
1924 rxr->rx_next_cons = NEXT_RX(cons);
1925
1926 next_rx_no_prod_no_len:
1927 *raw_cons = tmp_raw_cons;
1928
1929 return rc;
1930 }
1931
1932 /* In netpoll mode, if we are using a combined completion ring, we need to
1933 * discard the rx packets and recycle the buffers.
1934 */
bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u32 *raw_cons, u8 *event)1935 static int bnxt_force_rx_discard(struct bnxt *bp,
1936 struct bnxt_cp_ring_info *cpr,
1937 u32 *raw_cons, u8 *event)
1938 {
1939 u32 tmp_raw_cons = *raw_cons;
1940 struct rx_cmp_ext *rxcmp1;
1941 struct rx_cmp *rxcmp;
1942 u16 cp_cons;
1943 u8 cmp_type;
1944
1945 cp_cons = RING_CMP(tmp_raw_cons);
1946 rxcmp = (struct rx_cmp *)
1947 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1948
1949 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1950 cp_cons = RING_CMP(tmp_raw_cons);
1951 rxcmp1 = (struct rx_cmp_ext *)
1952 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1953
1954 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1955 return -EBUSY;
1956
1957 /* The valid test of the entry must be done first before
1958 * reading any further.
1959 */
1960 dma_rmb();
1961 cmp_type = RX_CMP_TYPE(rxcmp);
1962 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1963 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1964 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1965 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1966 struct rx_tpa_end_cmp_ext *tpa_end1;
1967
1968 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1969 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1970 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1971 }
1972 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1973 }
1974
bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)1975 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1976 {
1977 struct bnxt_fw_health *fw_health = bp->fw_health;
1978 u32 reg = fw_health->regs[reg_idx];
1979 u32 reg_type, reg_off, val = 0;
1980
1981 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1982 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1983 switch (reg_type) {
1984 case BNXT_FW_HEALTH_REG_TYPE_CFG:
1985 pci_read_config_dword(bp->pdev, reg_off, &val);
1986 break;
1987 case BNXT_FW_HEALTH_REG_TYPE_GRC:
1988 reg_off = fw_health->mapped_regs[reg_idx];
1989 fallthrough;
1990 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1991 val = readl(bp->bar0 + reg_off);
1992 break;
1993 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1994 val = readl(bp->bar1 + reg_off);
1995 break;
1996 }
1997 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1998 val &= fw_health->fw_reset_inprog_reg_mask;
1999 return val;
2000 }
2001
bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)2002 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2003 {
2004 int i;
2005
2006 for (i = 0; i < bp->rx_nr_rings; i++) {
2007 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2008 struct bnxt_ring_grp_info *grp_info;
2009
2010 grp_info = &bp->grp_info[grp_idx];
2011 if (grp_info->agg_fw_ring_id == ring_id)
2012 return grp_idx;
2013 }
2014 return INVALID_HW_RING_ID;
2015 }
2016
2017 #define BNXT_GET_EVENT_PORT(data) \
2018 ((data) & \
2019 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2020
2021 #define BNXT_EVENT_RING_TYPE(data2) \
2022 ((data2) & \
2023 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2024
2025 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2026 (BNXT_EVENT_RING_TYPE(data2) == \
2027 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2028
bnxt_async_event_process(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)2029 static int bnxt_async_event_process(struct bnxt *bp,
2030 struct hwrm_async_event_cmpl *cmpl)
2031 {
2032 u16 event_id = le16_to_cpu(cmpl->event_id);
2033 u32 data1 = le32_to_cpu(cmpl->event_data1);
2034 u32 data2 = le32_to_cpu(cmpl->event_data2);
2035
2036 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2037 switch (event_id) {
2038 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2039 struct bnxt_link_info *link_info = &bp->link_info;
2040
2041 if (BNXT_VF(bp))
2042 goto async_event_process_exit;
2043
2044 /* print unsupported speed warning in forced speed mode only */
2045 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2046 (data1 & 0x20000)) {
2047 u16 fw_speed = link_info->force_link_speed;
2048 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2049
2050 if (speed != SPEED_UNKNOWN)
2051 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2052 speed);
2053 }
2054 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2055 }
2056 fallthrough;
2057 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2058 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2059 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2060 fallthrough;
2061 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2062 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2063 break;
2064 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2065 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2066 break;
2067 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2068 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2069
2070 if (BNXT_VF(bp))
2071 break;
2072
2073 if (bp->pf.port_id != port_id)
2074 break;
2075
2076 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2077 break;
2078 }
2079 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2080 if (BNXT_PF(bp))
2081 goto async_event_process_exit;
2082 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2083 break;
2084 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2085 char *fatal_str = "non-fatal";
2086
2087 if (!bp->fw_health)
2088 goto async_event_process_exit;
2089
2090 bp->fw_reset_timestamp = jiffies;
2091 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2092 if (!bp->fw_reset_min_dsecs)
2093 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2094 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2095 if (!bp->fw_reset_max_dsecs)
2096 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2097 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2098 fatal_str = "fatal";
2099 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2100 }
2101 netif_warn(bp, hw, bp->dev,
2102 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2103 fatal_str, data1, data2,
2104 bp->fw_reset_min_dsecs * 100,
2105 bp->fw_reset_max_dsecs * 100);
2106 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2107 break;
2108 }
2109 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2110 struct bnxt_fw_health *fw_health = bp->fw_health;
2111
2112 if (!fw_health)
2113 goto async_event_process_exit;
2114
2115 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2116 fw_health->enabled = false;
2117 netif_info(bp, drv, bp->dev,
2118 "Error recovery info: error recovery[0]\n");
2119 break;
2120 }
2121 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2122 fw_health->tmr_multiplier =
2123 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2124 bp->current_interval * 10);
2125 fw_health->tmr_counter = fw_health->tmr_multiplier;
2126 if (!fw_health->enabled)
2127 fw_health->last_fw_heartbeat =
2128 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2129 fw_health->last_fw_reset_cnt =
2130 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2131 netif_info(bp, drv, bp->dev,
2132 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2133 fw_health->master, fw_health->last_fw_reset_cnt,
2134 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2135 if (!fw_health->enabled) {
2136 /* Make sure tmr_counter is set and visible to
2137 * bnxt_health_check() before setting enabled to true.
2138 */
2139 smp_wmb();
2140 fw_health->enabled = true;
2141 }
2142 goto async_event_process_exit;
2143 }
2144 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2145 netif_notice(bp, hw, bp->dev,
2146 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2147 data1, data2);
2148 goto async_event_process_exit;
2149 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2150 struct bnxt_rx_ring_info *rxr;
2151 u16 grp_idx;
2152
2153 if (bp->flags & BNXT_FLAG_CHIP_P5)
2154 goto async_event_process_exit;
2155
2156 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2157 BNXT_EVENT_RING_TYPE(data2), data1);
2158 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2159 goto async_event_process_exit;
2160
2161 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2162 if (grp_idx == INVALID_HW_RING_ID) {
2163 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2164 data1);
2165 goto async_event_process_exit;
2166 }
2167 rxr = bp->bnapi[grp_idx]->rx_ring;
2168 bnxt_sched_reset(bp, rxr);
2169 goto async_event_process_exit;
2170 }
2171 default:
2172 goto async_event_process_exit;
2173 }
2174 bnxt_queue_sp_work(bp);
2175 async_event_process_exit:
2176 bnxt_ulp_async_events(bp, cmpl);
2177 return 0;
2178 }
2179
bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)2180 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2181 {
2182 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2183 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2184 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2185 (struct hwrm_fwd_req_cmpl *)txcmp;
2186
2187 switch (cmpl_type) {
2188 case CMPL_BASE_TYPE_HWRM_DONE:
2189 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2190 if (seq_id == bp->hwrm_intr_seq_id)
2191 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2192 else
2193 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2194 break;
2195
2196 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2197 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2198
2199 if ((vf_id < bp->pf.first_vf_id) ||
2200 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2201 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2202 vf_id);
2203 return -EINVAL;
2204 }
2205
2206 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2207 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2208 bnxt_queue_sp_work(bp);
2209 break;
2210
2211 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2212 bnxt_async_event_process(bp,
2213 (struct hwrm_async_event_cmpl *)txcmp);
2214
2215 default:
2216 break;
2217 }
2218
2219 return 0;
2220 }
2221
bnxt_msix(int irq, void *dev_instance)2222 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2223 {
2224 struct bnxt_napi *bnapi = dev_instance;
2225 struct bnxt *bp = bnapi->bp;
2226 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2227 u32 cons = RING_CMP(cpr->cp_raw_cons);
2228
2229 cpr->event_ctr++;
2230 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2231 napi_schedule(&bnapi->napi);
2232 return IRQ_HANDLED;
2233 }
2234
bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)2235 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2236 {
2237 u32 raw_cons = cpr->cp_raw_cons;
2238 u16 cons = RING_CMP(raw_cons);
2239 struct tx_cmp *txcmp;
2240
2241 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2242
2243 return TX_CMP_VALID(txcmp, raw_cons);
2244 }
2245
bnxt_inta(int irq, void *dev_instance)2246 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2247 {
2248 struct bnxt_napi *bnapi = dev_instance;
2249 struct bnxt *bp = bnapi->bp;
2250 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2251 u32 cons = RING_CMP(cpr->cp_raw_cons);
2252 u32 int_status;
2253
2254 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2255
2256 if (!bnxt_has_work(bp, cpr)) {
2257 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2258 /* return if erroneous interrupt */
2259 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2260 return IRQ_NONE;
2261 }
2262
2263 /* disable ring IRQ */
2264 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2265
2266 /* Return here if interrupt is shared and is disabled. */
2267 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2268 return IRQ_HANDLED;
2269
2270 napi_schedule(&bnapi->napi);
2271 return IRQ_HANDLED;
2272 }
2273
__bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, int budget)2274 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2275 int budget)
2276 {
2277 struct bnxt_napi *bnapi = cpr->bnapi;
2278 u32 raw_cons = cpr->cp_raw_cons;
2279 u32 cons;
2280 int tx_pkts = 0;
2281 int rx_pkts = 0;
2282 u8 event = 0;
2283 struct tx_cmp *txcmp;
2284
2285 cpr->has_more_work = 0;
2286 cpr->had_work_done = 1;
2287 while (1) {
2288 int rc;
2289
2290 cons = RING_CMP(raw_cons);
2291 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2292
2293 if (!TX_CMP_VALID(txcmp, raw_cons))
2294 break;
2295
2296 /* The valid test of the entry must be done first before
2297 * reading any further.
2298 */
2299 dma_rmb();
2300 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2301 tx_pkts++;
2302 /* return full budget so NAPI will complete. */
2303 if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
2304 rx_pkts = budget;
2305 raw_cons = NEXT_RAW_CMP(raw_cons);
2306 if (budget)
2307 cpr->has_more_work = 1;
2308 break;
2309 }
2310 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2311 if (likely(budget))
2312 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2313 else
2314 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2315 &event);
2316 if (likely(rc >= 0))
2317 rx_pkts += rc;
2318 /* Increment rx_pkts when rc is -ENOMEM to count towards
2319 * the NAPI budget. Otherwise, we may potentially loop
2320 * here forever if we consistently cannot allocate
2321 * buffers.
2322 */
2323 else if (rc == -ENOMEM && budget)
2324 rx_pkts++;
2325 else if (rc == -EBUSY) /* partial completion */
2326 break;
2327 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2328 CMPL_BASE_TYPE_HWRM_DONE) ||
2329 (TX_CMP_TYPE(txcmp) ==
2330 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2331 (TX_CMP_TYPE(txcmp) ==
2332 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2333 bnxt_hwrm_handler(bp, txcmp);
2334 }
2335 raw_cons = NEXT_RAW_CMP(raw_cons);
2336
2337 if (rx_pkts && rx_pkts == budget) {
2338 cpr->has_more_work = 1;
2339 break;
2340 }
2341 }
2342
2343 if (event & BNXT_REDIRECT_EVENT)
2344 xdp_do_flush_map();
2345
2346 if (event & BNXT_TX_EVENT) {
2347 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2348 u16 prod = txr->tx_prod;
2349
2350 /* Sync BD data before updating doorbell */
2351 wmb();
2352
2353 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2354 }
2355
2356 cpr->cp_raw_cons = raw_cons;
2357 bnapi->tx_pkts += tx_pkts;
2358 bnapi->events |= event;
2359 return rx_pkts;
2360 }
2361
__bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)2362 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2363 {
2364 if (bnapi->tx_pkts) {
2365 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2366 bnapi->tx_pkts = 0;
2367 }
2368
2369 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2370 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2371
2372 if (bnapi->events & BNXT_AGG_EVENT)
2373 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2374 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2375 }
2376 bnapi->events = 0;
2377 }
2378
bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, int budget)2379 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2380 int budget)
2381 {
2382 struct bnxt_napi *bnapi = cpr->bnapi;
2383 int rx_pkts;
2384
2385 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2386
2387 /* ACK completion ring before freeing tx ring and producing new
2388 * buffers in rx/agg rings to prevent overflowing the completion
2389 * ring.
2390 */
2391 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2392
2393 __bnxt_poll_work_done(bp, bnapi);
2394 return rx_pkts;
2395 }
2396
bnxt_poll_nitroa0(struct napi_struct *napi, int budget)2397 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2398 {
2399 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2400 struct bnxt *bp = bnapi->bp;
2401 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2402 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2403 struct tx_cmp *txcmp;
2404 struct rx_cmp_ext *rxcmp1;
2405 u32 cp_cons, tmp_raw_cons;
2406 u32 raw_cons = cpr->cp_raw_cons;
2407 bool flush_xdp = false;
2408 u32 rx_pkts = 0;
2409 u8 event = 0;
2410
2411 while (1) {
2412 int rc;
2413
2414 cp_cons = RING_CMP(raw_cons);
2415 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2416
2417 if (!TX_CMP_VALID(txcmp, raw_cons))
2418 break;
2419
2420 /* The valid test of the entry must be done first before
2421 * reading any further.
2422 */
2423 dma_rmb();
2424 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2425 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2426 cp_cons = RING_CMP(tmp_raw_cons);
2427 rxcmp1 = (struct rx_cmp_ext *)
2428 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2429
2430 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2431 break;
2432
2433 /* force an error to recycle the buffer */
2434 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2435 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2436
2437 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2438 if (likely(rc == -EIO) && budget)
2439 rx_pkts++;
2440 else if (rc == -EBUSY) /* partial completion */
2441 break;
2442 if (event & BNXT_REDIRECT_EVENT)
2443 flush_xdp = true;
2444 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2445 CMPL_BASE_TYPE_HWRM_DONE)) {
2446 bnxt_hwrm_handler(bp, txcmp);
2447 } else {
2448 netdev_err(bp->dev,
2449 "Invalid completion received on special ring\n");
2450 }
2451 raw_cons = NEXT_RAW_CMP(raw_cons);
2452
2453 if (rx_pkts == budget)
2454 break;
2455 }
2456
2457 cpr->cp_raw_cons = raw_cons;
2458 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2459 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2460
2461 if (event & BNXT_AGG_EVENT)
2462 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2463 if (flush_xdp)
2464 xdp_do_flush();
2465
2466 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2467 napi_complete_done(napi, rx_pkts);
2468 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2469 }
2470 return rx_pkts;
2471 }
2472
bnxt_poll(struct napi_struct *napi, int budget)2473 static int bnxt_poll(struct napi_struct *napi, int budget)
2474 {
2475 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2476 struct bnxt *bp = bnapi->bp;
2477 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2478 int work_done = 0;
2479
2480 while (1) {
2481 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2482
2483 if (work_done >= budget) {
2484 if (!budget)
2485 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2486 break;
2487 }
2488
2489 if (!bnxt_has_work(bp, cpr)) {
2490 if (napi_complete_done(napi, work_done))
2491 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2492 break;
2493 }
2494 }
2495 if (bp->flags & BNXT_FLAG_DIM) {
2496 struct dim_sample dim_sample = {};
2497
2498 dim_update_sample(cpr->event_ctr,
2499 cpr->rx_packets,
2500 cpr->rx_bytes,
2501 &dim_sample);
2502 net_dim(&cpr->dim, dim_sample);
2503 }
2504 return work_done;
2505 }
2506
__bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)2507 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2508 {
2509 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2510 int i, work_done = 0;
2511
2512 for (i = 0; i < 2; i++) {
2513 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2514
2515 if (cpr2) {
2516 work_done += __bnxt_poll_work(bp, cpr2,
2517 budget - work_done);
2518 cpr->has_more_work |= cpr2->has_more_work;
2519 }
2520 }
2521 return work_done;
2522 }
2523
__bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, u64 dbr_type)2524 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2525 u64 dbr_type)
2526 {
2527 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2528 int i;
2529
2530 for (i = 0; i < 2; i++) {
2531 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2532 struct bnxt_db_info *db;
2533
2534 if (cpr2 && cpr2->had_work_done) {
2535 db = &cpr2->cp_db;
2536 writeq(db->db_key64 | dbr_type |
2537 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2538 cpr2->had_work_done = 0;
2539 }
2540 }
2541 __bnxt_poll_work_done(bp, bnapi);
2542 }
2543
bnxt_poll_p5(struct napi_struct *napi, int budget)2544 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2545 {
2546 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2547 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2548 u32 raw_cons = cpr->cp_raw_cons;
2549 struct bnxt *bp = bnapi->bp;
2550 struct nqe_cn *nqcmp;
2551 int work_done = 0;
2552 u32 cons;
2553
2554 if (cpr->has_more_work) {
2555 cpr->has_more_work = 0;
2556 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2557 }
2558 while (1) {
2559 cons = RING_CMP(raw_cons);
2560 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2561
2562 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2563 if (cpr->has_more_work)
2564 break;
2565
2566 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2567 cpr->cp_raw_cons = raw_cons;
2568 if (napi_complete_done(napi, work_done))
2569 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2570 cpr->cp_raw_cons);
2571 return work_done;
2572 }
2573
2574 /* The valid test of the entry must be done first before
2575 * reading any further.
2576 */
2577 dma_rmb();
2578
2579 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2580 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2581 struct bnxt_cp_ring_info *cpr2;
2582
2583 /* No more budget for RX work */
2584 if (budget && work_done >= budget && idx == BNXT_RX_HDL)
2585 break;
2586
2587 cpr2 = cpr->cp_ring_arr[idx];
2588 work_done += __bnxt_poll_work(bp, cpr2,
2589 budget - work_done);
2590 cpr->has_more_work |= cpr2->has_more_work;
2591 } else {
2592 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2593 }
2594 raw_cons = NEXT_RAW_CMP(raw_cons);
2595 }
2596 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2597 if (raw_cons != cpr->cp_raw_cons) {
2598 cpr->cp_raw_cons = raw_cons;
2599 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2600 }
2601 return work_done;
2602 }
2603
bnxt_free_tx_skbs(struct bnxt *bp)2604 static void bnxt_free_tx_skbs(struct bnxt *bp)
2605 {
2606 int i, max_idx;
2607 struct pci_dev *pdev = bp->pdev;
2608
2609 if (!bp->tx_ring)
2610 return;
2611
2612 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2613 for (i = 0; i < bp->tx_nr_rings; i++) {
2614 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2615 int j;
2616
2617 if (!txr->tx_buf_ring)
2618 continue;
2619
2620 for (j = 0; j < max_idx;) {
2621 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2622 struct sk_buff *skb;
2623 int k, last;
2624
2625 if (i < bp->tx_nr_rings_xdp &&
2626 tx_buf->action == XDP_REDIRECT) {
2627 dma_unmap_single(&pdev->dev,
2628 dma_unmap_addr(tx_buf, mapping),
2629 dma_unmap_len(tx_buf, len),
2630 PCI_DMA_TODEVICE);
2631 xdp_return_frame(tx_buf->xdpf);
2632 tx_buf->action = 0;
2633 tx_buf->xdpf = NULL;
2634 j++;
2635 continue;
2636 }
2637
2638 skb = tx_buf->skb;
2639 if (!skb) {
2640 j++;
2641 continue;
2642 }
2643
2644 tx_buf->skb = NULL;
2645
2646 if (tx_buf->is_push) {
2647 dev_kfree_skb(skb);
2648 j += 2;
2649 continue;
2650 }
2651
2652 dma_unmap_single(&pdev->dev,
2653 dma_unmap_addr(tx_buf, mapping),
2654 skb_headlen(skb),
2655 PCI_DMA_TODEVICE);
2656
2657 last = tx_buf->nr_frags;
2658 j += 2;
2659 for (k = 0; k < last; k++, j++) {
2660 int ring_idx = j & bp->tx_ring_mask;
2661 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2662
2663 tx_buf = &txr->tx_buf_ring[ring_idx];
2664 dma_unmap_page(
2665 &pdev->dev,
2666 dma_unmap_addr(tx_buf, mapping),
2667 skb_frag_size(frag), PCI_DMA_TODEVICE);
2668 }
2669 dev_kfree_skb(skb);
2670 }
2671 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2672 }
2673 }
2674
bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)2675 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2676 {
2677 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2678 struct pci_dev *pdev = bp->pdev;
2679 struct bnxt_tpa_idx_map *map;
2680 int i, max_idx, max_agg_idx;
2681
2682 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2683 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2684 if (!rxr->rx_tpa)
2685 goto skip_rx_tpa_free;
2686
2687 for (i = 0; i < bp->max_tpa; i++) {
2688 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2689 u8 *data = tpa_info->data;
2690
2691 if (!data)
2692 continue;
2693
2694 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2695 bp->rx_buf_use_size, bp->rx_dir,
2696 DMA_ATTR_WEAK_ORDERING);
2697
2698 tpa_info->data = NULL;
2699
2700 kfree(data);
2701 }
2702
2703 skip_rx_tpa_free:
2704 if (!rxr->rx_buf_ring)
2705 goto skip_rx_buf_free;
2706
2707 for (i = 0; i < max_idx; i++) {
2708 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2709 dma_addr_t mapping = rx_buf->mapping;
2710 void *data = rx_buf->data;
2711
2712 if (!data)
2713 continue;
2714
2715 rx_buf->data = NULL;
2716 if (BNXT_RX_PAGE_MODE(bp)) {
2717 mapping -= bp->rx_dma_offset;
2718 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2719 bp->rx_dir,
2720 DMA_ATTR_WEAK_ORDERING);
2721 page_pool_recycle_direct(rxr->page_pool, data);
2722 } else {
2723 dma_unmap_single_attrs(&pdev->dev, mapping,
2724 bp->rx_buf_use_size, bp->rx_dir,
2725 DMA_ATTR_WEAK_ORDERING);
2726 kfree(data);
2727 }
2728 }
2729
2730 skip_rx_buf_free:
2731 if (!rxr->rx_agg_ring)
2732 goto skip_rx_agg_free;
2733
2734 for (i = 0; i < max_agg_idx; i++) {
2735 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2736 struct page *page = rx_agg_buf->page;
2737
2738 if (!page)
2739 continue;
2740
2741 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2742 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2743 DMA_ATTR_WEAK_ORDERING);
2744
2745 rx_agg_buf->page = NULL;
2746 __clear_bit(i, rxr->rx_agg_bmap);
2747
2748 __free_page(page);
2749 }
2750
2751 skip_rx_agg_free:
2752 if (rxr->rx_page) {
2753 __free_page(rxr->rx_page);
2754 rxr->rx_page = NULL;
2755 }
2756 map = rxr->rx_tpa_idx_map;
2757 if (map)
2758 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2759 }
2760
bnxt_free_rx_skbs(struct bnxt *bp)2761 static void bnxt_free_rx_skbs(struct bnxt *bp)
2762 {
2763 int i;
2764
2765 if (!bp->rx_ring)
2766 return;
2767
2768 for (i = 0; i < bp->rx_nr_rings; i++)
2769 bnxt_free_one_rx_ring_skbs(bp, i);
2770 }
2771
bnxt_free_skbs(struct bnxt *bp)2772 static void bnxt_free_skbs(struct bnxt *bp)
2773 {
2774 bnxt_free_tx_skbs(bp);
2775 bnxt_free_rx_skbs(bp);
2776 }
2777
bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)2778 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2779 {
2780 struct pci_dev *pdev = bp->pdev;
2781 int i;
2782
2783 for (i = 0; i < rmem->nr_pages; i++) {
2784 if (!rmem->pg_arr[i])
2785 continue;
2786
2787 dma_free_coherent(&pdev->dev, rmem->page_size,
2788 rmem->pg_arr[i], rmem->dma_arr[i]);
2789
2790 rmem->pg_arr[i] = NULL;
2791 }
2792 if (rmem->pg_tbl) {
2793 size_t pg_tbl_size = rmem->nr_pages * 8;
2794
2795 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2796 pg_tbl_size = rmem->page_size;
2797 dma_free_coherent(&pdev->dev, pg_tbl_size,
2798 rmem->pg_tbl, rmem->pg_tbl_map);
2799 rmem->pg_tbl = NULL;
2800 }
2801 if (rmem->vmem_size && *rmem->vmem) {
2802 vfree(*rmem->vmem);
2803 *rmem->vmem = NULL;
2804 }
2805 }
2806
bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)2807 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2808 {
2809 struct pci_dev *pdev = bp->pdev;
2810 u64 valid_bit = 0;
2811 int i;
2812
2813 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2814 valid_bit = PTU_PTE_VALID;
2815 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2816 size_t pg_tbl_size = rmem->nr_pages * 8;
2817
2818 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2819 pg_tbl_size = rmem->page_size;
2820 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2821 &rmem->pg_tbl_map,
2822 GFP_KERNEL);
2823 if (!rmem->pg_tbl)
2824 return -ENOMEM;
2825 }
2826
2827 for (i = 0; i < rmem->nr_pages; i++) {
2828 u64 extra_bits = valid_bit;
2829
2830 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2831 rmem->page_size,
2832 &rmem->dma_arr[i],
2833 GFP_KERNEL);
2834 if (!rmem->pg_arr[i])
2835 return -ENOMEM;
2836
2837 if (rmem->init_val)
2838 memset(rmem->pg_arr[i], rmem->init_val,
2839 rmem->page_size);
2840 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2841 if (i == rmem->nr_pages - 2 &&
2842 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2843 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2844 else if (i == rmem->nr_pages - 1 &&
2845 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2846 extra_bits |= PTU_PTE_LAST;
2847 rmem->pg_tbl[i] =
2848 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2849 }
2850 }
2851
2852 if (rmem->vmem_size) {
2853 *rmem->vmem = vzalloc(rmem->vmem_size);
2854 if (!(*rmem->vmem))
2855 return -ENOMEM;
2856 }
2857 return 0;
2858 }
2859
bnxt_free_tpa_info(struct bnxt *bp)2860 static void bnxt_free_tpa_info(struct bnxt *bp)
2861 {
2862 int i, j;
2863
2864 for (i = 0; i < bp->rx_nr_rings; i++) {
2865 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2866
2867 kfree(rxr->rx_tpa_idx_map);
2868 rxr->rx_tpa_idx_map = NULL;
2869 if (rxr->rx_tpa) {
2870 for (j = 0; j < bp->max_tpa; j++) {
2871 kfree(rxr->rx_tpa[j].agg_arr);
2872 rxr->rx_tpa[j].agg_arr = NULL;
2873 }
2874 }
2875 kfree(rxr->rx_tpa);
2876 rxr->rx_tpa = NULL;
2877 }
2878 }
2879
bnxt_alloc_tpa_info(struct bnxt *bp)2880 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2881 {
2882 int i, j;
2883
2884 bp->max_tpa = MAX_TPA;
2885 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2886 if (!bp->max_tpa_v2)
2887 return 0;
2888 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2889 }
2890
2891 for (i = 0; i < bp->rx_nr_rings; i++) {
2892 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2893 struct rx_agg_cmp *agg;
2894
2895 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2896 GFP_KERNEL);
2897 if (!rxr->rx_tpa)
2898 return -ENOMEM;
2899
2900 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2901 continue;
2902 for (j = 0; j < bp->max_tpa; j++) {
2903 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
2904 if (!agg)
2905 return -ENOMEM;
2906 rxr->rx_tpa[j].agg_arr = agg;
2907 }
2908 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2909 GFP_KERNEL);
2910 if (!rxr->rx_tpa_idx_map)
2911 return -ENOMEM;
2912 }
2913 return 0;
2914 }
2915
bnxt_free_rx_rings(struct bnxt *bp)2916 static void bnxt_free_rx_rings(struct bnxt *bp)
2917 {
2918 int i;
2919
2920 if (!bp->rx_ring)
2921 return;
2922
2923 bnxt_free_tpa_info(bp);
2924 for (i = 0; i < bp->rx_nr_rings; i++) {
2925 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2926 struct bnxt_ring_struct *ring;
2927
2928 if (rxr->xdp_prog)
2929 bpf_prog_put(rxr->xdp_prog);
2930
2931 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2932 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2933
2934 page_pool_destroy(rxr->page_pool);
2935 rxr->page_pool = NULL;
2936
2937 kfree(rxr->rx_agg_bmap);
2938 rxr->rx_agg_bmap = NULL;
2939
2940 ring = &rxr->rx_ring_struct;
2941 bnxt_free_ring(bp, &ring->ring_mem);
2942
2943 ring = &rxr->rx_agg_ring_struct;
2944 bnxt_free_ring(bp, &ring->ring_mem);
2945 }
2946 }
2947
bnxt_alloc_rx_page_pool(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)2948 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2949 struct bnxt_rx_ring_info *rxr)
2950 {
2951 struct page_pool_params pp = { 0 };
2952
2953 pp.pool_size = bp->rx_ring_size;
2954 pp.nid = dev_to_node(&bp->pdev->dev);
2955 pp.dev = &bp->pdev->dev;
2956 pp.dma_dir = DMA_BIDIRECTIONAL;
2957
2958 rxr->page_pool = page_pool_create(&pp);
2959 if (IS_ERR(rxr->page_pool)) {
2960 int err = PTR_ERR(rxr->page_pool);
2961
2962 rxr->page_pool = NULL;
2963 return err;
2964 }
2965 return 0;
2966 }
2967
bnxt_alloc_rx_rings(struct bnxt *bp)2968 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2969 {
2970 int i, rc = 0, agg_rings = 0;
2971
2972 if (!bp->rx_ring)
2973 return -ENOMEM;
2974
2975 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2976 agg_rings = 1;
2977
2978 for (i = 0; i < bp->rx_nr_rings; i++) {
2979 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2980 struct bnxt_ring_struct *ring;
2981
2982 ring = &rxr->rx_ring_struct;
2983
2984 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2985 if (rc)
2986 return rc;
2987
2988 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2989 if (rc < 0)
2990 return rc;
2991
2992 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2993 MEM_TYPE_PAGE_POOL,
2994 rxr->page_pool);
2995 if (rc) {
2996 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2997 return rc;
2998 }
2999
3000 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3001 if (rc)
3002 return rc;
3003
3004 ring->grp_idx = i;
3005 if (agg_rings) {
3006 u16 mem_size;
3007
3008 ring = &rxr->rx_agg_ring_struct;
3009 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3010 if (rc)
3011 return rc;
3012
3013 ring->grp_idx = i;
3014 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3015 mem_size = rxr->rx_agg_bmap_size / 8;
3016 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3017 if (!rxr->rx_agg_bmap)
3018 return -ENOMEM;
3019 }
3020 }
3021 if (bp->flags & BNXT_FLAG_TPA)
3022 rc = bnxt_alloc_tpa_info(bp);
3023 return rc;
3024 }
3025
bnxt_free_tx_rings(struct bnxt *bp)3026 static void bnxt_free_tx_rings(struct bnxt *bp)
3027 {
3028 int i;
3029 struct pci_dev *pdev = bp->pdev;
3030
3031 if (!bp->tx_ring)
3032 return;
3033
3034 for (i = 0; i < bp->tx_nr_rings; i++) {
3035 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3036 struct bnxt_ring_struct *ring;
3037
3038 if (txr->tx_push) {
3039 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3040 txr->tx_push, txr->tx_push_mapping);
3041 txr->tx_push = NULL;
3042 }
3043
3044 ring = &txr->tx_ring_struct;
3045
3046 bnxt_free_ring(bp, &ring->ring_mem);
3047 }
3048 }
3049
bnxt_alloc_tx_rings(struct bnxt *bp)3050 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3051 {
3052 int i, j, rc;
3053 struct pci_dev *pdev = bp->pdev;
3054
3055 bp->tx_push_size = 0;
3056 if (bp->tx_push_thresh) {
3057 int push_size;
3058
3059 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3060 bp->tx_push_thresh);
3061
3062 if (push_size > 256) {
3063 push_size = 0;
3064 bp->tx_push_thresh = 0;
3065 }
3066
3067 bp->tx_push_size = push_size;
3068 }
3069
3070 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3071 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3072 struct bnxt_ring_struct *ring;
3073 u8 qidx;
3074
3075 ring = &txr->tx_ring_struct;
3076
3077 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3078 if (rc)
3079 return rc;
3080
3081 ring->grp_idx = txr->bnapi->index;
3082 if (bp->tx_push_size) {
3083 dma_addr_t mapping;
3084
3085 /* One pre-allocated DMA buffer to backup
3086 * TX push operation
3087 */
3088 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3089 bp->tx_push_size,
3090 &txr->tx_push_mapping,
3091 GFP_KERNEL);
3092
3093 if (!txr->tx_push)
3094 return -ENOMEM;
3095
3096 mapping = txr->tx_push_mapping +
3097 sizeof(struct tx_push_bd);
3098 txr->data_mapping = cpu_to_le64(mapping);
3099 }
3100 qidx = bp->tc_to_qidx[j];
3101 ring->queue_id = bp->q_info[qidx].queue_id;
3102 if (i < bp->tx_nr_rings_xdp)
3103 continue;
3104 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3105 j++;
3106 }
3107 return 0;
3108 }
3109
bnxt_free_cp_rings(struct bnxt *bp)3110 static void bnxt_free_cp_rings(struct bnxt *bp)
3111 {
3112 int i;
3113
3114 if (!bp->bnapi)
3115 return;
3116
3117 for (i = 0; i < bp->cp_nr_rings; i++) {
3118 struct bnxt_napi *bnapi = bp->bnapi[i];
3119 struct bnxt_cp_ring_info *cpr;
3120 struct bnxt_ring_struct *ring;
3121 int j;
3122
3123 if (!bnapi)
3124 continue;
3125
3126 cpr = &bnapi->cp_ring;
3127 ring = &cpr->cp_ring_struct;
3128
3129 bnxt_free_ring(bp, &ring->ring_mem);
3130
3131 for (j = 0; j < 2; j++) {
3132 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3133
3134 if (cpr2) {
3135 ring = &cpr2->cp_ring_struct;
3136 bnxt_free_ring(bp, &ring->ring_mem);
3137 kfree(cpr2);
3138 cpr->cp_ring_arr[j] = NULL;
3139 }
3140 }
3141 }
3142 }
3143
bnxt_alloc_cp_sub_ring(struct bnxt *bp)3144 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3145 {
3146 struct bnxt_ring_mem_info *rmem;
3147 struct bnxt_ring_struct *ring;
3148 struct bnxt_cp_ring_info *cpr;
3149 int rc;
3150
3151 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3152 if (!cpr)
3153 return NULL;
3154
3155 ring = &cpr->cp_ring_struct;
3156 rmem = &ring->ring_mem;
3157 rmem->nr_pages = bp->cp_nr_pages;
3158 rmem->page_size = HW_CMPD_RING_SIZE;
3159 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3160 rmem->dma_arr = cpr->cp_desc_mapping;
3161 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3162 rc = bnxt_alloc_ring(bp, rmem);
3163 if (rc) {
3164 bnxt_free_ring(bp, rmem);
3165 kfree(cpr);
3166 cpr = NULL;
3167 }
3168 return cpr;
3169 }
3170
bnxt_alloc_cp_rings(struct bnxt *bp)3171 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3172 {
3173 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3174 int i, rc, ulp_base_vec, ulp_msix;
3175
3176 ulp_msix = bnxt_get_ulp_msix_num(bp);
3177 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3178 for (i = 0; i < bp->cp_nr_rings; i++) {
3179 struct bnxt_napi *bnapi = bp->bnapi[i];
3180 struct bnxt_cp_ring_info *cpr;
3181 struct bnxt_ring_struct *ring;
3182
3183 if (!bnapi)
3184 continue;
3185
3186 cpr = &bnapi->cp_ring;
3187 cpr->bnapi = bnapi;
3188 ring = &cpr->cp_ring_struct;
3189
3190 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3191 if (rc)
3192 return rc;
3193
3194 if (ulp_msix && i >= ulp_base_vec)
3195 ring->map_idx = i + ulp_msix;
3196 else
3197 ring->map_idx = i;
3198
3199 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3200 continue;
3201
3202 if (i < bp->rx_nr_rings) {
3203 struct bnxt_cp_ring_info *cpr2 =
3204 bnxt_alloc_cp_sub_ring(bp);
3205
3206 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3207 if (!cpr2)
3208 return -ENOMEM;
3209 cpr2->bnapi = bnapi;
3210 }
3211 if ((sh && i < bp->tx_nr_rings) ||
3212 (!sh && i >= bp->rx_nr_rings)) {
3213 struct bnxt_cp_ring_info *cpr2 =
3214 bnxt_alloc_cp_sub_ring(bp);
3215
3216 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3217 if (!cpr2)
3218 return -ENOMEM;
3219 cpr2->bnapi = bnapi;
3220 }
3221 }
3222 return 0;
3223 }
3224
bnxt_init_ring_struct(struct bnxt *bp)3225 static void bnxt_init_ring_struct(struct bnxt *bp)
3226 {
3227 int i;
3228
3229 for (i = 0; i < bp->cp_nr_rings; i++) {
3230 struct bnxt_napi *bnapi = bp->bnapi[i];
3231 struct bnxt_ring_mem_info *rmem;
3232 struct bnxt_cp_ring_info *cpr;
3233 struct bnxt_rx_ring_info *rxr;
3234 struct bnxt_tx_ring_info *txr;
3235 struct bnxt_ring_struct *ring;
3236
3237 if (!bnapi)
3238 continue;
3239
3240 cpr = &bnapi->cp_ring;
3241 ring = &cpr->cp_ring_struct;
3242 rmem = &ring->ring_mem;
3243 rmem->nr_pages = bp->cp_nr_pages;
3244 rmem->page_size = HW_CMPD_RING_SIZE;
3245 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3246 rmem->dma_arr = cpr->cp_desc_mapping;
3247 rmem->vmem_size = 0;
3248
3249 rxr = bnapi->rx_ring;
3250 if (!rxr)
3251 goto skip_rx;
3252
3253 ring = &rxr->rx_ring_struct;
3254 rmem = &ring->ring_mem;
3255 rmem->nr_pages = bp->rx_nr_pages;
3256 rmem->page_size = HW_RXBD_RING_SIZE;
3257 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3258 rmem->dma_arr = rxr->rx_desc_mapping;
3259 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3260 rmem->vmem = (void **)&rxr->rx_buf_ring;
3261
3262 ring = &rxr->rx_agg_ring_struct;
3263 rmem = &ring->ring_mem;
3264 rmem->nr_pages = bp->rx_agg_nr_pages;
3265 rmem->page_size = HW_RXBD_RING_SIZE;
3266 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3267 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3268 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3269 rmem->vmem = (void **)&rxr->rx_agg_ring;
3270
3271 skip_rx:
3272 txr = bnapi->tx_ring;
3273 if (!txr)
3274 continue;
3275
3276 ring = &txr->tx_ring_struct;
3277 rmem = &ring->ring_mem;
3278 rmem->nr_pages = bp->tx_nr_pages;
3279 rmem->page_size = HW_RXBD_RING_SIZE;
3280 rmem->pg_arr = (void **)txr->tx_desc_ring;
3281 rmem->dma_arr = txr->tx_desc_mapping;
3282 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3283 rmem->vmem = (void **)&txr->tx_buf_ring;
3284 }
3285 }
3286
bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)3287 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3288 {
3289 int i;
3290 u32 prod;
3291 struct rx_bd **rx_buf_ring;
3292
3293 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3294 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3295 int j;
3296 struct rx_bd *rxbd;
3297
3298 rxbd = rx_buf_ring[i];
3299 if (!rxbd)
3300 continue;
3301
3302 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3303 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3304 rxbd->rx_bd_opaque = prod;
3305 }
3306 }
3307 }
3308
bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)3309 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3310 {
3311 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3312 struct net_device *dev = bp->dev;
3313 u32 prod;
3314 int i;
3315
3316 prod = rxr->rx_prod;
3317 for (i = 0; i < bp->rx_ring_size; i++) {
3318 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3319 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3320 ring_nr, i, bp->rx_ring_size);
3321 break;
3322 }
3323 prod = NEXT_RX(prod);
3324 }
3325 rxr->rx_prod = prod;
3326
3327 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3328 return 0;
3329
3330 prod = rxr->rx_agg_prod;
3331 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3332 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3333 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3334 ring_nr, i, bp->rx_ring_size);
3335 break;
3336 }
3337 prod = NEXT_RX_AGG(prod);
3338 }
3339 rxr->rx_agg_prod = prod;
3340
3341 if (rxr->rx_tpa) {
3342 dma_addr_t mapping;
3343 u8 *data;
3344
3345 for (i = 0; i < bp->max_tpa; i++) {
3346 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3347 if (!data)
3348 return -ENOMEM;
3349
3350 rxr->rx_tpa[i].data = data;
3351 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3352 rxr->rx_tpa[i].mapping = mapping;
3353 }
3354 }
3355 return 0;
3356 }
3357
bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)3358 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3359 {
3360 struct bnxt_rx_ring_info *rxr;
3361 struct bnxt_ring_struct *ring;
3362 u32 type;
3363
3364 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3365 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3366
3367 if (NET_IP_ALIGN == 2)
3368 type |= RX_BD_FLAGS_SOP;
3369
3370 rxr = &bp->rx_ring[ring_nr];
3371 ring = &rxr->rx_ring_struct;
3372 bnxt_init_rxbd_pages(ring, type);
3373
3374 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3375 bpf_prog_add(bp->xdp_prog, 1);
3376 rxr->xdp_prog = bp->xdp_prog;
3377 }
3378 ring->fw_ring_id = INVALID_HW_RING_ID;
3379
3380 ring = &rxr->rx_agg_ring_struct;
3381 ring->fw_ring_id = INVALID_HW_RING_ID;
3382
3383 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3384 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3385 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3386
3387 bnxt_init_rxbd_pages(ring, type);
3388 }
3389
3390 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3391 }
3392
bnxt_init_cp_rings(struct bnxt *bp)3393 static void bnxt_init_cp_rings(struct bnxt *bp)
3394 {
3395 int i, j;
3396
3397 for (i = 0; i < bp->cp_nr_rings; i++) {
3398 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3399 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3400
3401 ring->fw_ring_id = INVALID_HW_RING_ID;
3402 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3403 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3404 for (j = 0; j < 2; j++) {
3405 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3406
3407 if (!cpr2)
3408 continue;
3409
3410 ring = &cpr2->cp_ring_struct;
3411 ring->fw_ring_id = INVALID_HW_RING_ID;
3412 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3413 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3414 }
3415 }
3416 }
3417
bnxt_init_rx_rings(struct bnxt *bp)3418 static int bnxt_init_rx_rings(struct bnxt *bp)
3419 {
3420 int i, rc = 0;
3421
3422 if (BNXT_RX_PAGE_MODE(bp)) {
3423 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3424 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3425 } else {
3426 bp->rx_offset = BNXT_RX_OFFSET;
3427 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3428 }
3429
3430 for (i = 0; i < bp->rx_nr_rings; i++) {
3431 rc = bnxt_init_one_rx_ring(bp, i);
3432 if (rc)
3433 break;
3434 }
3435
3436 return rc;
3437 }
3438
bnxt_init_tx_rings(struct bnxt *bp)3439 static int bnxt_init_tx_rings(struct bnxt *bp)
3440 {
3441 u16 i;
3442
3443 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3444 BNXT_MIN_TX_DESC_CNT);
3445
3446 for (i = 0; i < bp->tx_nr_rings; i++) {
3447 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3448 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3449
3450 ring->fw_ring_id = INVALID_HW_RING_ID;
3451 }
3452
3453 return 0;
3454 }
3455
bnxt_free_ring_grps(struct bnxt *bp)3456 static void bnxt_free_ring_grps(struct bnxt *bp)
3457 {
3458 kfree(bp->grp_info);
3459 bp->grp_info = NULL;
3460 }
3461
bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)3462 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3463 {
3464 int i;
3465
3466 if (irq_re_init) {
3467 bp->grp_info = kcalloc(bp->cp_nr_rings,
3468 sizeof(struct bnxt_ring_grp_info),
3469 GFP_KERNEL);
3470 if (!bp->grp_info)
3471 return -ENOMEM;
3472 }
3473 for (i = 0; i < bp->cp_nr_rings; i++) {
3474 if (irq_re_init)
3475 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3476 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3477 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3478 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3479 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3480 }
3481 return 0;
3482 }
3483
bnxt_free_vnics(struct bnxt *bp)3484 static void bnxt_free_vnics(struct bnxt *bp)
3485 {
3486 kfree(bp->vnic_info);
3487 bp->vnic_info = NULL;
3488 bp->nr_vnics = 0;
3489 }
3490
bnxt_alloc_vnics(struct bnxt *bp)3491 static int bnxt_alloc_vnics(struct bnxt *bp)
3492 {
3493 int num_vnics = 1;
3494
3495 #ifdef CONFIG_RFS_ACCEL
3496 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3497 num_vnics += bp->rx_nr_rings;
3498 #endif
3499
3500 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3501 num_vnics++;
3502
3503 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3504 GFP_KERNEL);
3505 if (!bp->vnic_info)
3506 return -ENOMEM;
3507
3508 bp->nr_vnics = num_vnics;
3509 return 0;
3510 }
3511
bnxt_init_vnics(struct bnxt *bp)3512 static void bnxt_init_vnics(struct bnxt *bp)
3513 {
3514 int i;
3515
3516 for (i = 0; i < bp->nr_vnics; i++) {
3517 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3518 int j;
3519
3520 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3521 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3522 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3523
3524 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3525
3526 if (bp->vnic_info[i].rss_hash_key) {
3527 if (i == 0)
3528 prandom_bytes(vnic->rss_hash_key,
3529 HW_HASH_KEY_SIZE);
3530 else
3531 memcpy(vnic->rss_hash_key,
3532 bp->vnic_info[0].rss_hash_key,
3533 HW_HASH_KEY_SIZE);
3534 }
3535 }
3536 }
3537
bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)3538 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3539 {
3540 int pages;
3541
3542 pages = ring_size / desc_per_pg;
3543
3544 if (!pages)
3545 return 1;
3546
3547 pages++;
3548
3549 while (pages & (pages - 1))
3550 pages++;
3551
3552 return pages;
3553 }
3554
bnxt_set_tpa_flags(struct bnxt *bp)3555 void bnxt_set_tpa_flags(struct bnxt *bp)
3556 {
3557 bp->flags &= ~BNXT_FLAG_TPA;
3558 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3559 return;
3560 if (bp->dev->features & NETIF_F_LRO)
3561 bp->flags |= BNXT_FLAG_LRO;
3562 else if (bp->dev->features & NETIF_F_GRO_HW)
3563 bp->flags |= BNXT_FLAG_GRO;
3564 }
3565
3566 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3567 * be set on entry.
3568 */
bnxt_set_ring_params(struct bnxt *bp)3569 void bnxt_set_ring_params(struct bnxt *bp)
3570 {
3571 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3572 u32 agg_factor = 0, agg_ring_size = 0;
3573
3574 /* 8 for CRC and VLAN */
3575 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3576
3577 rx_space = rx_size + NET_SKB_PAD +
3578 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3579
3580 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3581 ring_size = bp->rx_ring_size;
3582 bp->rx_agg_ring_size = 0;
3583 bp->rx_agg_nr_pages = 0;
3584
3585 if (bp->flags & BNXT_FLAG_TPA)
3586 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3587
3588 bp->flags &= ~BNXT_FLAG_JUMBO;
3589 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3590 u32 jumbo_factor;
3591
3592 bp->flags |= BNXT_FLAG_JUMBO;
3593 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3594 if (jumbo_factor > agg_factor)
3595 agg_factor = jumbo_factor;
3596 }
3597 agg_ring_size = ring_size * agg_factor;
3598
3599 if (agg_ring_size) {
3600 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3601 RX_DESC_CNT);
3602 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3603 u32 tmp = agg_ring_size;
3604
3605 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3606 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3607 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3608 tmp, agg_ring_size);
3609 }
3610 bp->rx_agg_ring_size = agg_ring_size;
3611 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3612 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3613 rx_space = rx_size + NET_SKB_PAD +
3614 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3615 }
3616
3617 bp->rx_buf_use_size = rx_size;
3618 bp->rx_buf_size = rx_space;
3619
3620 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3621 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3622
3623 ring_size = bp->tx_ring_size;
3624 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3625 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3626
3627 max_rx_cmpl = bp->rx_ring_size;
3628 /* MAX TPA needs to be added because TPA_START completions are
3629 * immediately recycled, so the TPA completions are not bound by
3630 * the RX ring size.
3631 */
3632 if (bp->flags & BNXT_FLAG_TPA)
3633 max_rx_cmpl += bp->max_tpa;
3634 /* RX and TPA completions are 32-byte, all others are 16-byte */
3635 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3636 bp->cp_ring_size = ring_size;
3637
3638 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3639 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3640 bp->cp_nr_pages = MAX_CP_PAGES;
3641 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3642 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3643 ring_size, bp->cp_ring_size);
3644 }
3645 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3646 bp->cp_ring_mask = bp->cp_bit - 1;
3647 }
3648
3649 /* Changing allocation mode of RX rings.
3650 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3651 */
bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)3652 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3653 {
3654 if (page_mode) {
3655 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3656 return -EOPNOTSUPP;
3657 bp->dev->max_mtu =
3658 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3659 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3660 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3661 bp->rx_dir = DMA_BIDIRECTIONAL;
3662 bp->rx_skb_func = bnxt_rx_page_skb;
3663 /* Disable LRO or GRO_HW */
3664 netdev_update_features(bp->dev);
3665 } else {
3666 bp->dev->max_mtu = bp->max_mtu;
3667 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3668 bp->rx_dir = DMA_FROM_DEVICE;
3669 bp->rx_skb_func = bnxt_rx_skb;
3670 }
3671 return 0;
3672 }
3673
bnxt_free_vnic_attributes(struct bnxt *bp)3674 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3675 {
3676 int i;
3677 struct bnxt_vnic_info *vnic;
3678 struct pci_dev *pdev = bp->pdev;
3679
3680 if (!bp->vnic_info)
3681 return;
3682
3683 for (i = 0; i < bp->nr_vnics; i++) {
3684 vnic = &bp->vnic_info[i];
3685
3686 kfree(vnic->fw_grp_ids);
3687 vnic->fw_grp_ids = NULL;
3688
3689 kfree(vnic->uc_list);
3690 vnic->uc_list = NULL;
3691
3692 if (vnic->mc_list) {
3693 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3694 vnic->mc_list, vnic->mc_list_mapping);
3695 vnic->mc_list = NULL;
3696 }
3697
3698 if (vnic->rss_table) {
3699 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3700 vnic->rss_table,
3701 vnic->rss_table_dma_addr);
3702 vnic->rss_table = NULL;
3703 }
3704
3705 vnic->rss_hash_key = NULL;
3706 vnic->flags = 0;
3707 }
3708 }
3709
bnxt_alloc_vnic_attributes(struct bnxt *bp)3710 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3711 {
3712 int i, rc = 0, size;
3713 struct bnxt_vnic_info *vnic;
3714 struct pci_dev *pdev = bp->pdev;
3715 int max_rings;
3716
3717 for (i = 0; i < bp->nr_vnics; i++) {
3718 vnic = &bp->vnic_info[i];
3719
3720 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3721 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3722
3723 if (mem_size > 0) {
3724 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3725 if (!vnic->uc_list) {
3726 rc = -ENOMEM;
3727 goto out;
3728 }
3729 }
3730 }
3731
3732 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3733 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3734 vnic->mc_list =
3735 dma_alloc_coherent(&pdev->dev,
3736 vnic->mc_list_size,
3737 &vnic->mc_list_mapping,
3738 GFP_KERNEL);
3739 if (!vnic->mc_list) {
3740 rc = -ENOMEM;
3741 goto out;
3742 }
3743 }
3744
3745 if (bp->flags & BNXT_FLAG_CHIP_P5)
3746 goto vnic_skip_grps;
3747
3748 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3749 max_rings = bp->rx_nr_rings;
3750 else
3751 max_rings = 1;
3752
3753 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3754 if (!vnic->fw_grp_ids) {
3755 rc = -ENOMEM;
3756 goto out;
3757 }
3758 vnic_skip_grps:
3759 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3760 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3761 continue;
3762
3763 /* Allocate rss table and hash key */
3764 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3765 if (bp->flags & BNXT_FLAG_CHIP_P5)
3766 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3767
3768 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3769 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3770 vnic->rss_table_size,
3771 &vnic->rss_table_dma_addr,
3772 GFP_KERNEL);
3773 if (!vnic->rss_table) {
3774 rc = -ENOMEM;
3775 goto out;
3776 }
3777
3778 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3779 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3780 }
3781 return 0;
3782
3783 out:
3784 return rc;
3785 }
3786
bnxt_free_hwrm_resources(struct bnxt *bp)3787 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3788 {
3789 struct pci_dev *pdev = bp->pdev;
3790
3791 if (bp->hwrm_cmd_resp_addr) {
3792 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3793 bp->hwrm_cmd_resp_dma_addr);
3794 bp->hwrm_cmd_resp_addr = NULL;
3795 }
3796
3797 if (bp->hwrm_cmd_kong_resp_addr) {
3798 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3799 bp->hwrm_cmd_kong_resp_addr,
3800 bp->hwrm_cmd_kong_resp_dma_addr);
3801 bp->hwrm_cmd_kong_resp_addr = NULL;
3802 }
3803 }
3804
bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)3805 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3806 {
3807 struct pci_dev *pdev = bp->pdev;
3808
3809 if (bp->hwrm_cmd_kong_resp_addr)
3810 return 0;
3811
3812 bp->hwrm_cmd_kong_resp_addr =
3813 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3814 &bp->hwrm_cmd_kong_resp_dma_addr,
3815 GFP_KERNEL);
3816 if (!bp->hwrm_cmd_kong_resp_addr)
3817 return -ENOMEM;
3818
3819 return 0;
3820 }
3821
bnxt_alloc_hwrm_resources(struct bnxt *bp)3822 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3823 {
3824 struct pci_dev *pdev = bp->pdev;
3825
3826 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3827 &bp->hwrm_cmd_resp_dma_addr,
3828 GFP_KERNEL);
3829 if (!bp->hwrm_cmd_resp_addr)
3830 return -ENOMEM;
3831
3832 return 0;
3833 }
3834
bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)3835 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3836 {
3837 if (bp->hwrm_short_cmd_req_addr) {
3838 struct pci_dev *pdev = bp->pdev;
3839
3840 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3841 bp->hwrm_short_cmd_req_addr,
3842 bp->hwrm_short_cmd_req_dma_addr);
3843 bp->hwrm_short_cmd_req_addr = NULL;
3844 }
3845 }
3846
bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)3847 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3848 {
3849 struct pci_dev *pdev = bp->pdev;
3850
3851 if (bp->hwrm_short_cmd_req_addr)
3852 return 0;
3853
3854 bp->hwrm_short_cmd_req_addr =
3855 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3856 &bp->hwrm_short_cmd_req_dma_addr,
3857 GFP_KERNEL);
3858 if (!bp->hwrm_short_cmd_req_addr)
3859 return -ENOMEM;
3860
3861 return 0;
3862 }
3863
bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)3864 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3865 {
3866 kfree(stats->hw_masks);
3867 stats->hw_masks = NULL;
3868 kfree(stats->sw_stats);
3869 stats->sw_stats = NULL;
3870 if (stats->hw_stats) {
3871 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3872 stats->hw_stats_map);
3873 stats->hw_stats = NULL;
3874 }
3875 }
3876
bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, bool alloc_masks)3877 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3878 bool alloc_masks)
3879 {
3880 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3881 &stats->hw_stats_map, GFP_KERNEL);
3882 if (!stats->hw_stats)
3883 return -ENOMEM;
3884
3885 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3886 if (!stats->sw_stats)
3887 goto stats_mem_err;
3888
3889 if (alloc_masks) {
3890 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3891 if (!stats->hw_masks)
3892 goto stats_mem_err;
3893 }
3894 return 0;
3895
3896 stats_mem_err:
3897 bnxt_free_stats_mem(bp, stats);
3898 return -ENOMEM;
3899 }
3900
bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)3901 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3902 {
3903 int i;
3904
3905 for (i = 0; i < count; i++)
3906 mask_arr[i] = mask;
3907 }
3908
bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)3909 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3910 {
3911 int i;
3912
3913 for (i = 0; i < count; i++)
3914 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3915 }
3916
bnxt_hwrm_func_qstat_ext(struct bnxt *bp, struct bnxt_stats_mem *stats)3917 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3918 struct bnxt_stats_mem *stats)
3919 {
3920 struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3921 struct hwrm_func_qstats_ext_input req = {0};
3922 __le64 *hw_masks;
3923 int rc;
3924
3925 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3926 !(bp->flags & BNXT_FLAG_CHIP_P5))
3927 return -EOPNOTSUPP;
3928
3929 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
3930 req.fid = cpu_to_le16(0xffff);
3931 req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3932 mutex_lock(&bp->hwrm_cmd_lock);
3933 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3934 if (rc)
3935 goto qstat_exit;
3936
3937 hw_masks = &resp->rx_ucast_pkts;
3938 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
3939
3940 qstat_exit:
3941 mutex_unlock(&bp->hwrm_cmd_lock);
3942 return rc;
3943 }
3944
3945 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3946 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3947
bnxt_init_stats(struct bnxt *bp)3948 static void bnxt_init_stats(struct bnxt *bp)
3949 {
3950 struct bnxt_napi *bnapi = bp->bnapi[0];
3951 struct bnxt_cp_ring_info *cpr;
3952 struct bnxt_stats_mem *stats;
3953 __le64 *rx_stats, *tx_stats;
3954 int rc, rx_count, tx_count;
3955 u64 *rx_masks, *tx_masks;
3956 u64 mask;
3957 u8 flags;
3958
3959 cpr = &bnapi->cp_ring;
3960 stats = &cpr->stats;
3961 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
3962 if (rc) {
3963 if (bp->flags & BNXT_FLAG_CHIP_P5)
3964 mask = (1ULL << 48) - 1;
3965 else
3966 mask = -1ULL;
3967 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
3968 }
3969 if (bp->flags & BNXT_FLAG_PORT_STATS) {
3970 stats = &bp->port_stats;
3971 rx_stats = stats->hw_stats;
3972 rx_masks = stats->hw_masks;
3973 rx_count = sizeof(struct rx_port_stats) / 8;
3974 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3975 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3976 tx_count = sizeof(struct tx_port_stats) / 8;
3977
3978 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
3979 rc = bnxt_hwrm_port_qstats(bp, flags);
3980 if (rc) {
3981 mask = (1ULL << 40) - 1;
3982
3983 bnxt_fill_masks(rx_masks, mask, rx_count);
3984 bnxt_fill_masks(tx_masks, mask, tx_count);
3985 } else {
3986 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3987 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
3988 bnxt_hwrm_port_qstats(bp, 0);
3989 }
3990 }
3991 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
3992 stats = &bp->rx_port_stats_ext;
3993 rx_stats = stats->hw_stats;
3994 rx_masks = stats->hw_masks;
3995 rx_count = sizeof(struct rx_port_stats_ext) / 8;
3996 stats = &bp->tx_port_stats_ext;
3997 tx_stats = stats->hw_stats;
3998 tx_masks = stats->hw_masks;
3999 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4000
4001 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4002 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4003 if (rc) {
4004 mask = (1ULL << 40) - 1;
4005
4006 bnxt_fill_masks(rx_masks, mask, rx_count);
4007 if (tx_stats)
4008 bnxt_fill_masks(tx_masks, mask, tx_count);
4009 } else {
4010 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4011 if (tx_stats)
4012 bnxt_copy_hw_masks(tx_masks, tx_stats,
4013 tx_count);
4014 bnxt_hwrm_port_qstats_ext(bp, 0);
4015 }
4016 }
4017 }
4018
bnxt_free_port_stats(struct bnxt *bp)4019 static void bnxt_free_port_stats(struct bnxt *bp)
4020 {
4021 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4022 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4023
4024 bnxt_free_stats_mem(bp, &bp->port_stats);
4025 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4026 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4027 }
4028
bnxt_free_ring_stats(struct bnxt *bp)4029 static void bnxt_free_ring_stats(struct bnxt *bp)
4030 {
4031 int i;
4032
4033 if (!bp->bnapi)
4034 return;
4035
4036 for (i = 0; i < bp->cp_nr_rings; i++) {
4037 struct bnxt_napi *bnapi = bp->bnapi[i];
4038 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4039
4040 bnxt_free_stats_mem(bp, &cpr->stats);
4041 }
4042 }
4043
bnxt_alloc_stats(struct bnxt *bp)4044 static int bnxt_alloc_stats(struct bnxt *bp)
4045 {
4046 u32 size, i;
4047 int rc;
4048
4049 size = bp->hw_ring_stats_size;
4050
4051 for (i = 0; i < bp->cp_nr_rings; i++) {
4052 struct bnxt_napi *bnapi = bp->bnapi[i];
4053 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4054
4055 cpr->stats.len = size;
4056 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4057 if (rc)
4058 return rc;
4059
4060 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4061 }
4062
4063 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4064 return 0;
4065
4066 if (bp->port_stats.hw_stats)
4067 goto alloc_ext_stats;
4068
4069 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4070 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4071 if (rc)
4072 return rc;
4073
4074 bp->flags |= BNXT_FLAG_PORT_STATS;
4075
4076 alloc_ext_stats:
4077 /* Display extended statistics only if FW supports it */
4078 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4079 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4080 return 0;
4081
4082 if (bp->rx_port_stats_ext.hw_stats)
4083 goto alloc_tx_ext_stats;
4084
4085 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4086 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4087 /* Extended stats are optional */
4088 if (rc)
4089 return 0;
4090
4091 alloc_tx_ext_stats:
4092 if (bp->tx_port_stats_ext.hw_stats)
4093 return 0;
4094
4095 if (bp->hwrm_spec_code >= 0x10902 ||
4096 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4097 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4098 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4099 /* Extended stats are optional */
4100 if (rc)
4101 return 0;
4102 }
4103 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4104 return 0;
4105 }
4106
bnxt_clear_ring_indices(struct bnxt *bp)4107 static void bnxt_clear_ring_indices(struct bnxt *bp)
4108 {
4109 int i;
4110
4111 if (!bp->bnapi)
4112 return;
4113
4114 for (i = 0; i < bp->cp_nr_rings; i++) {
4115 struct bnxt_napi *bnapi = bp->bnapi[i];
4116 struct bnxt_cp_ring_info *cpr;
4117 struct bnxt_rx_ring_info *rxr;
4118 struct bnxt_tx_ring_info *txr;
4119
4120 if (!bnapi)
4121 continue;
4122
4123 cpr = &bnapi->cp_ring;
4124 cpr->cp_raw_cons = 0;
4125
4126 txr = bnapi->tx_ring;
4127 if (txr) {
4128 txr->tx_prod = 0;
4129 txr->tx_cons = 0;
4130 }
4131
4132 rxr = bnapi->rx_ring;
4133 if (rxr) {
4134 rxr->rx_prod = 0;
4135 rxr->rx_agg_prod = 0;
4136 rxr->rx_sw_agg_prod = 0;
4137 rxr->rx_next_cons = 0;
4138 }
4139 }
4140 }
4141
bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)4142 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4143 {
4144 #ifdef CONFIG_RFS_ACCEL
4145 int i;
4146
4147 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4148 * safe to delete the hash table.
4149 */
4150 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4151 struct hlist_head *head;
4152 struct hlist_node *tmp;
4153 struct bnxt_ntuple_filter *fltr;
4154
4155 head = &bp->ntp_fltr_hash_tbl[i];
4156 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4157 hlist_del(&fltr->hash);
4158 kfree(fltr);
4159 }
4160 }
4161 if (irq_reinit) {
4162 kfree(bp->ntp_fltr_bmap);
4163 bp->ntp_fltr_bmap = NULL;
4164 }
4165 bp->ntp_fltr_count = 0;
4166 #endif
4167 }
4168
bnxt_alloc_ntp_fltrs(struct bnxt *bp)4169 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4170 {
4171 #ifdef CONFIG_RFS_ACCEL
4172 int i, rc = 0;
4173
4174 if (!(bp->flags & BNXT_FLAG_RFS))
4175 return 0;
4176
4177 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4178 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4179
4180 bp->ntp_fltr_count = 0;
4181 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4182 sizeof(long),
4183 GFP_KERNEL);
4184
4185 if (!bp->ntp_fltr_bmap)
4186 rc = -ENOMEM;
4187
4188 return rc;
4189 #else
4190 return 0;
4191 #endif
4192 }
4193
bnxt_free_mem(struct bnxt *bp, bool irq_re_init)4194 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4195 {
4196 bnxt_free_vnic_attributes(bp);
4197 bnxt_free_tx_rings(bp);
4198 bnxt_free_rx_rings(bp);
4199 bnxt_free_cp_rings(bp);
4200 bnxt_free_ntp_fltrs(bp, irq_re_init);
4201 if (irq_re_init) {
4202 bnxt_free_ring_stats(bp);
4203 if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) ||
4204 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4205 bnxt_free_port_stats(bp);
4206 bnxt_free_ring_grps(bp);
4207 bnxt_free_vnics(bp);
4208 kfree(bp->tx_ring_map);
4209 bp->tx_ring_map = NULL;
4210 kfree(bp->tx_ring);
4211 bp->tx_ring = NULL;
4212 kfree(bp->rx_ring);
4213 bp->rx_ring = NULL;
4214 kfree(bp->bnapi);
4215 bp->bnapi = NULL;
4216 } else {
4217 bnxt_clear_ring_indices(bp);
4218 }
4219 }
4220
bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)4221 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4222 {
4223 int i, j, rc, size, arr_size;
4224 void *bnapi;
4225
4226 if (irq_re_init) {
4227 /* Allocate bnapi mem pointer array and mem block for
4228 * all queues
4229 */
4230 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4231 bp->cp_nr_rings);
4232 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4233 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4234 if (!bnapi)
4235 return -ENOMEM;
4236
4237 bp->bnapi = bnapi;
4238 bnapi += arr_size;
4239 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4240 bp->bnapi[i] = bnapi;
4241 bp->bnapi[i]->index = i;
4242 bp->bnapi[i]->bp = bp;
4243 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4244 struct bnxt_cp_ring_info *cpr =
4245 &bp->bnapi[i]->cp_ring;
4246
4247 cpr->cp_ring_struct.ring_mem.flags =
4248 BNXT_RMEM_RING_PTE_FLAG;
4249 }
4250 }
4251
4252 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4253 sizeof(struct bnxt_rx_ring_info),
4254 GFP_KERNEL);
4255 if (!bp->rx_ring)
4256 return -ENOMEM;
4257
4258 for (i = 0; i < bp->rx_nr_rings; i++) {
4259 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4260
4261 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4262 rxr->rx_ring_struct.ring_mem.flags =
4263 BNXT_RMEM_RING_PTE_FLAG;
4264 rxr->rx_agg_ring_struct.ring_mem.flags =
4265 BNXT_RMEM_RING_PTE_FLAG;
4266 }
4267 rxr->bnapi = bp->bnapi[i];
4268 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4269 }
4270
4271 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4272 sizeof(struct bnxt_tx_ring_info),
4273 GFP_KERNEL);
4274 if (!bp->tx_ring)
4275 return -ENOMEM;
4276
4277 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4278 GFP_KERNEL);
4279
4280 if (!bp->tx_ring_map)
4281 return -ENOMEM;
4282
4283 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4284 j = 0;
4285 else
4286 j = bp->rx_nr_rings;
4287
4288 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4289 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4290
4291 if (bp->flags & BNXT_FLAG_CHIP_P5)
4292 txr->tx_ring_struct.ring_mem.flags =
4293 BNXT_RMEM_RING_PTE_FLAG;
4294 txr->bnapi = bp->bnapi[j];
4295 bp->bnapi[j]->tx_ring = txr;
4296 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4297 if (i >= bp->tx_nr_rings_xdp) {
4298 txr->txq_index = i - bp->tx_nr_rings_xdp;
4299 bp->bnapi[j]->tx_int = bnxt_tx_int;
4300 } else {
4301 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4302 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4303 }
4304 }
4305
4306 rc = bnxt_alloc_stats(bp);
4307 if (rc)
4308 goto alloc_mem_err;
4309 bnxt_init_stats(bp);
4310
4311 rc = bnxt_alloc_ntp_fltrs(bp);
4312 if (rc)
4313 goto alloc_mem_err;
4314
4315 rc = bnxt_alloc_vnics(bp);
4316 if (rc)
4317 goto alloc_mem_err;
4318 }
4319
4320 bnxt_init_ring_struct(bp);
4321
4322 rc = bnxt_alloc_rx_rings(bp);
4323 if (rc)
4324 goto alloc_mem_err;
4325
4326 rc = bnxt_alloc_tx_rings(bp);
4327 if (rc)
4328 goto alloc_mem_err;
4329
4330 rc = bnxt_alloc_cp_rings(bp);
4331 if (rc)
4332 goto alloc_mem_err;
4333
4334 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4335 BNXT_VNIC_UCAST_FLAG;
4336 rc = bnxt_alloc_vnic_attributes(bp);
4337 if (rc)
4338 goto alloc_mem_err;
4339 return 0;
4340
4341 alloc_mem_err:
4342 bnxt_free_mem(bp, true);
4343 return rc;
4344 }
4345
bnxt_disable_int(struct bnxt *bp)4346 static void bnxt_disable_int(struct bnxt *bp)
4347 {
4348 int i;
4349
4350 if (!bp->bnapi)
4351 return;
4352
4353 for (i = 0; i < bp->cp_nr_rings; i++) {
4354 struct bnxt_napi *bnapi = bp->bnapi[i];
4355 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4356 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4357
4358 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4359 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4360 }
4361 }
4362
bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)4363 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4364 {
4365 struct bnxt_napi *bnapi = bp->bnapi[n];
4366 struct bnxt_cp_ring_info *cpr;
4367
4368 cpr = &bnapi->cp_ring;
4369 return cpr->cp_ring_struct.map_idx;
4370 }
4371
bnxt_disable_int_sync(struct bnxt *bp)4372 static void bnxt_disable_int_sync(struct bnxt *bp)
4373 {
4374 int i;
4375
4376 atomic_inc(&bp->intr_sem);
4377
4378 bnxt_disable_int(bp);
4379 for (i = 0; i < bp->cp_nr_rings; i++) {
4380 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4381
4382 synchronize_irq(bp->irq_tbl[map_idx].vector);
4383 }
4384 }
4385
bnxt_enable_int(struct bnxt *bp)4386 static void bnxt_enable_int(struct bnxt *bp)
4387 {
4388 int i;
4389
4390 atomic_set(&bp->intr_sem, 0);
4391 for (i = 0; i < bp->cp_nr_rings; i++) {
4392 struct bnxt_napi *bnapi = bp->bnapi[i];
4393 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4394
4395 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4396 }
4397 }
4398
bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, u16 cmpl_ring, u16 target_id)4399 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4400 u16 cmpl_ring, u16 target_id)
4401 {
4402 struct input *req = request;
4403
4404 req->req_type = cpu_to_le16(req_type);
4405 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4406 req->target_id = cpu_to_le16(target_id);
4407 if (bnxt_kong_hwrm_message(bp, req))
4408 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4409 else
4410 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4411 }
4412
bnxt_hwrm_to_stderr(u32 hwrm_err)4413 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4414 {
4415 switch (hwrm_err) {
4416 case HWRM_ERR_CODE_SUCCESS:
4417 return 0;
4418 case HWRM_ERR_CODE_RESOURCE_LOCKED:
4419 return -EROFS;
4420 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4421 return -EACCES;
4422 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4423 return -ENOSPC;
4424 case HWRM_ERR_CODE_INVALID_PARAMS:
4425 case HWRM_ERR_CODE_INVALID_FLAGS:
4426 case HWRM_ERR_CODE_INVALID_ENABLES:
4427 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4428 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4429 return -EINVAL;
4430 case HWRM_ERR_CODE_NO_BUFFER:
4431 return -ENOMEM;
4432 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4433 case HWRM_ERR_CODE_BUSY:
4434 return -EAGAIN;
4435 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4436 return -EOPNOTSUPP;
4437 default:
4438 return -EIO;
4439 }
4440 }
4441
bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, int timeout, bool silent)4442 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4443 int timeout, bool silent)
4444 {
4445 int i, intr_process, rc, tmo_count;
4446 struct input *req = msg;
4447 u32 *data = msg;
4448 u8 *valid;
4449 u16 cp_ring_id, len = 0;
4450 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4451 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4452 struct hwrm_short_input short_input = {0};
4453 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4454 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4455 u16 dst = BNXT_HWRM_CHNL_CHIMP;
4456
4457 if (BNXT_NO_FW_ACCESS(bp) &&
4458 le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
4459 return -EBUSY;
4460
4461 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4462 if (msg_len > bp->hwrm_max_ext_req_len ||
4463 !bp->hwrm_short_cmd_req_addr)
4464 return -EINVAL;
4465 }
4466
4467 if (bnxt_hwrm_kong_chnl(bp, req)) {
4468 dst = BNXT_HWRM_CHNL_KONG;
4469 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4470 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4471 resp = bp->hwrm_cmd_kong_resp_addr;
4472 }
4473
4474 memset(resp, 0, PAGE_SIZE);
4475 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4476 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4477
4478 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4479 /* currently supports only one outstanding message */
4480 if (intr_process)
4481 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4482
4483 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4484 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4485 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4486 u16 max_msg_len;
4487
4488 /* Set boundary for maximum extended request length for short
4489 * cmd format. If passed up from device use the max supported
4490 * internal req length.
4491 */
4492 max_msg_len = bp->hwrm_max_ext_req_len;
4493
4494 memcpy(short_cmd_req, req, msg_len);
4495 if (msg_len < max_msg_len)
4496 memset(short_cmd_req + msg_len, 0,
4497 max_msg_len - msg_len);
4498
4499 short_input.req_type = req->req_type;
4500 short_input.signature =
4501 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4502 short_input.size = cpu_to_le16(msg_len);
4503 short_input.req_addr =
4504 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4505
4506 data = (u32 *)&short_input;
4507 msg_len = sizeof(short_input);
4508
4509 /* Sync memory write before updating doorbell */
4510 wmb();
4511
4512 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4513 }
4514
4515 /* Write request msg to hwrm channel */
4516 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4517
4518 for (i = msg_len; i < max_req_len; i += 4)
4519 writel(0, bp->bar0 + bar_offset + i);
4520
4521 /* Ring channel doorbell */
4522 writel(1, bp->bar0 + doorbell_offset);
4523
4524 if (!pci_is_enabled(bp->pdev))
4525 return 0;
4526
4527 if (!timeout)
4528 timeout = DFLT_HWRM_CMD_TIMEOUT;
4529 /* convert timeout to usec */
4530 timeout *= 1000;
4531
4532 i = 0;
4533 /* Short timeout for the first few iterations:
4534 * number of loops = number of loops for short timeout +
4535 * number of loops for standard timeout.
4536 */
4537 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4538 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4539 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4540
4541 if (intr_process) {
4542 u16 seq_id = bp->hwrm_intr_seq_id;
4543
4544 /* Wait until hwrm response cmpl interrupt is processed */
4545 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4546 i++ < tmo_count) {
4547 /* Abort the wait for completion if the FW health
4548 * check has failed.
4549 */
4550 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4551 return -EBUSY;
4552 /* on first few passes, just barely sleep */
4553 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4554 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4555 HWRM_SHORT_MAX_TIMEOUT);
4556 else
4557 usleep_range(HWRM_MIN_TIMEOUT,
4558 HWRM_MAX_TIMEOUT);
4559 }
4560
4561 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4562 if (!silent)
4563 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4564 le16_to_cpu(req->req_type));
4565 return -EBUSY;
4566 }
4567 len = le16_to_cpu(resp->resp_len);
4568 valid = ((u8 *)resp) + len - 1;
4569 } else {
4570 int j;
4571
4572 /* Check if response len is updated */
4573 for (i = 0; i < tmo_count; i++) {
4574 /* Abort the wait for completion if the FW health
4575 * check has failed.
4576 */
4577 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4578 return -EBUSY;
4579 len = le16_to_cpu(resp->resp_len);
4580 if (len)
4581 break;
4582 /* on first few passes, just barely sleep */
4583 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4584 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4585 HWRM_SHORT_MAX_TIMEOUT);
4586 else
4587 usleep_range(HWRM_MIN_TIMEOUT,
4588 HWRM_MAX_TIMEOUT);
4589 }
4590
4591 if (i >= tmo_count) {
4592 if (!silent)
4593 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4594 HWRM_TOTAL_TIMEOUT(i),
4595 le16_to_cpu(req->req_type),
4596 le16_to_cpu(req->seq_id), len);
4597 return -EBUSY;
4598 }
4599
4600 /* Last byte of resp contains valid bit */
4601 valid = ((u8 *)resp) + len - 1;
4602 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4603 /* make sure we read from updated DMA memory */
4604 dma_rmb();
4605 if (*valid)
4606 break;
4607 usleep_range(1, 5);
4608 }
4609
4610 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4611 if (!silent)
4612 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4613 HWRM_TOTAL_TIMEOUT(i),
4614 le16_to_cpu(req->req_type),
4615 le16_to_cpu(req->seq_id), len,
4616 *valid);
4617 return -EBUSY;
4618 }
4619 }
4620
4621 /* Zero valid bit for compatibility. Valid bit in an older spec
4622 * may become a new field in a newer spec. We must make sure that
4623 * a new field not implemented by old spec will read zero.
4624 */
4625 *valid = 0;
4626 rc = le16_to_cpu(resp->error_code);
4627 if (rc && !silent)
4628 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4629 le16_to_cpu(resp->req_type),
4630 le16_to_cpu(resp->seq_id), rc);
4631 return bnxt_hwrm_to_stderr(rc);
4632 }
4633
_hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)4634 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4635 {
4636 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4637 }
4638
_hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, int timeout)4639 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4640 int timeout)
4641 {
4642 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4643 }
4644
hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)4645 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4646 {
4647 int rc;
4648
4649 mutex_lock(&bp->hwrm_cmd_lock);
4650 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4651 mutex_unlock(&bp->hwrm_cmd_lock);
4652 return rc;
4653 }
4654
hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, int timeout)4655 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4656 int timeout)
4657 {
4658 int rc;
4659
4660 mutex_lock(&bp->hwrm_cmd_lock);
4661 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4662 mutex_unlock(&bp->hwrm_cmd_lock);
4663 return rc;
4664 }
4665
bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, bool async_only)4666 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4667 bool async_only)
4668 {
4669 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4670 struct hwrm_func_drv_rgtr_input req = {0};
4671 DECLARE_BITMAP(async_events_bmap, 256);
4672 u32 *events = (u32 *)async_events_bmap;
4673 u32 flags;
4674 int rc, i;
4675
4676 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4677
4678 req.enables =
4679 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4680 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4681 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4682
4683 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4684 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4685 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4686 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4687 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4688 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4689 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4690 req.flags = cpu_to_le32(flags);
4691 req.ver_maj_8b = DRV_VER_MAJ;
4692 req.ver_min_8b = DRV_VER_MIN;
4693 req.ver_upd_8b = DRV_VER_UPD;
4694 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4695 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4696 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4697
4698 if (BNXT_PF(bp)) {
4699 u32 data[8];
4700 int i;
4701
4702 memset(data, 0, sizeof(data));
4703 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4704 u16 cmd = bnxt_vf_req_snif[i];
4705 unsigned int bit, idx;
4706
4707 idx = cmd / 32;
4708 bit = cmd % 32;
4709 data[idx] |= 1 << bit;
4710 }
4711
4712 for (i = 0; i < 8; i++)
4713 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4714
4715 req.enables |=
4716 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4717 }
4718
4719 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4720 req.flags |= cpu_to_le32(
4721 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4722
4723 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4724 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4725 u16 event_id = bnxt_async_events_arr[i];
4726
4727 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4728 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4729 continue;
4730 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4731 }
4732 if (bmap && bmap_size) {
4733 for (i = 0; i < bmap_size; i++) {
4734 if (test_bit(i, bmap))
4735 __set_bit(i, async_events_bmap);
4736 }
4737 }
4738 for (i = 0; i < 8; i++)
4739 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4740
4741 if (async_only)
4742 req.enables =
4743 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4744
4745 mutex_lock(&bp->hwrm_cmd_lock);
4746 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4747 if (!rc) {
4748 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4749 if (resp->flags &
4750 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4751 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4752 }
4753 mutex_unlock(&bp->hwrm_cmd_lock);
4754 return rc;
4755 }
4756
bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)4757 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4758 {
4759 struct hwrm_func_drv_unrgtr_input req = {0};
4760
4761 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4762 return 0;
4763
4764 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4765 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4766 }
4767
bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)4768 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4769 {
4770 u32 rc = 0;
4771 struct hwrm_tunnel_dst_port_free_input req = {0};
4772
4773 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4774 req.tunnel_type = tunnel_type;
4775
4776 switch (tunnel_type) {
4777 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4778 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4779 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4780 break;
4781 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4782 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4783 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4784 break;
4785 default:
4786 break;
4787 }
4788
4789 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4790 if (rc)
4791 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4792 rc);
4793 return rc;
4794 }
4795
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, u8 tunnel_type)4796 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4797 u8 tunnel_type)
4798 {
4799 u32 rc = 0;
4800 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4801 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4802
4803 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4804
4805 req.tunnel_type = tunnel_type;
4806 req.tunnel_dst_port_val = port;
4807
4808 mutex_lock(&bp->hwrm_cmd_lock);
4809 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4810 if (rc) {
4811 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4812 rc);
4813 goto err_out;
4814 }
4815
4816 switch (tunnel_type) {
4817 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4818 bp->vxlan_fw_dst_port_id =
4819 le16_to_cpu(resp->tunnel_dst_port_id);
4820 break;
4821 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4822 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4823 break;
4824 default:
4825 break;
4826 }
4827
4828 err_out:
4829 mutex_unlock(&bp->hwrm_cmd_lock);
4830 return rc;
4831 }
4832
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)4833 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4834 {
4835 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4836 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4837
4838 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4839 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4840
4841 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4842 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4843 req.mask = cpu_to_le32(vnic->rx_mask);
4844 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4845 }
4846
4847 #ifdef CONFIG_RFS_ACCEL
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)4848 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4849 struct bnxt_ntuple_filter *fltr)
4850 {
4851 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4852
4853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4854 req.ntuple_filter_id = fltr->filter_id;
4855 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4856 }
4857
4858 #define BNXT_NTP_FLTR_FLAGS \
4859 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4860 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4861 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4862 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4863 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4864 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4865 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4866 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4867 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4868 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4869 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4870 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4871 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4872 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4873
4874 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4875 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4876
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)4877 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4878 struct bnxt_ntuple_filter *fltr)
4879 {
4880 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4881 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4882 struct flow_keys *keys = &fltr->fkeys;
4883 struct bnxt_vnic_info *vnic;
4884 u32 flags = 0;
4885 int rc = 0;
4886
4887 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4888 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4889
4890 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4891 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4892 req.dst_id = cpu_to_le16(fltr->rxq);
4893 } else {
4894 vnic = &bp->vnic_info[fltr->rxq + 1];
4895 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4896 }
4897 req.flags = cpu_to_le32(flags);
4898 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4899
4900 req.ethertype = htons(ETH_P_IP);
4901 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4902 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4903 req.ip_protocol = keys->basic.ip_proto;
4904
4905 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4906 int i;
4907
4908 req.ethertype = htons(ETH_P_IPV6);
4909 req.ip_addr_type =
4910 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4911 *(struct in6_addr *)&req.src_ipaddr[0] =
4912 keys->addrs.v6addrs.src;
4913 *(struct in6_addr *)&req.dst_ipaddr[0] =
4914 keys->addrs.v6addrs.dst;
4915 for (i = 0; i < 4; i++) {
4916 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4917 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4918 }
4919 } else {
4920 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4921 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4922 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4923 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4924 }
4925 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4926 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4927 req.tunnel_type =
4928 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4929 }
4930
4931 req.src_port = keys->ports.src;
4932 req.src_port_mask = cpu_to_be16(0xffff);
4933 req.dst_port = keys->ports.dst;
4934 req.dst_port_mask = cpu_to_be16(0xffff);
4935
4936 mutex_lock(&bp->hwrm_cmd_lock);
4937 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4938 if (!rc) {
4939 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4940 fltr->filter_id = resp->ntuple_filter_id;
4941 }
4942 mutex_unlock(&bp->hwrm_cmd_lock);
4943 return rc;
4944 }
4945 #endif
4946
bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, u8 *mac_addr)4947 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4948 u8 *mac_addr)
4949 {
4950 u32 rc = 0;
4951 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4952 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4953
4954 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4955 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4956 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4957 req.flags |=
4958 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4959 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4960 req.enables =
4961 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4962 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4963 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4964 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4965 req.l2_addr_mask[0] = 0xff;
4966 req.l2_addr_mask[1] = 0xff;
4967 req.l2_addr_mask[2] = 0xff;
4968 req.l2_addr_mask[3] = 0xff;
4969 req.l2_addr_mask[4] = 0xff;
4970 req.l2_addr_mask[5] = 0xff;
4971
4972 mutex_lock(&bp->hwrm_cmd_lock);
4973 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4974 if (!rc)
4975 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4976 resp->l2_filter_id;
4977 mutex_unlock(&bp->hwrm_cmd_lock);
4978 return rc;
4979 }
4980
bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)4981 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4982 {
4983 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4984 int rc = 0;
4985
4986 /* Any associated ntuple filters will also be cleared by firmware. */
4987 mutex_lock(&bp->hwrm_cmd_lock);
4988 for (i = 0; i < num_of_vnics; i++) {
4989 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4990
4991 for (j = 0; j < vnic->uc_filter_count; j++) {
4992 struct hwrm_cfa_l2_filter_free_input req = {0};
4993
4994 bnxt_hwrm_cmd_hdr_init(bp, &req,
4995 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4996
4997 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4998
4999 rc = _hwrm_send_message(bp, &req, sizeof(req),
5000 HWRM_CMD_TIMEOUT);
5001 }
5002 vnic->uc_filter_count = 0;
5003 }
5004 mutex_unlock(&bp->hwrm_cmd_lock);
5005
5006 return rc;
5007 }
5008
bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)5009 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5010 {
5011 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5012 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5013 struct hwrm_vnic_tpa_cfg_input req = {0};
5014
5015 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5016 return 0;
5017
5018 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
5019
5020 if (tpa_flags) {
5021 u16 mss = bp->dev->mtu - 40;
5022 u32 nsegs, n, segs = 0, flags;
5023
5024 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5025 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5026 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5027 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5028 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5029 if (tpa_flags & BNXT_FLAG_GRO)
5030 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5031
5032 req.flags = cpu_to_le32(flags);
5033
5034 req.enables =
5035 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5036 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5037 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5038
5039 /* Number of segs are log2 units, and first packet is not
5040 * included as part of this units.
5041 */
5042 if (mss <= BNXT_RX_PAGE_SIZE) {
5043 n = BNXT_RX_PAGE_SIZE / mss;
5044 nsegs = (MAX_SKB_FRAGS - 1) * n;
5045 } else {
5046 n = mss / BNXT_RX_PAGE_SIZE;
5047 if (mss & (BNXT_RX_PAGE_SIZE - 1))
5048 n++;
5049 nsegs = (MAX_SKB_FRAGS - n) / n;
5050 }
5051
5052 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5053 segs = MAX_TPA_SEGS_P5;
5054 max_aggs = bp->max_tpa;
5055 } else {
5056 segs = ilog2(nsegs);
5057 }
5058 req.max_agg_segs = cpu_to_le16(segs);
5059 req.max_aggs = cpu_to_le16(max_aggs);
5060
5061 req.min_agg_len = cpu_to_le32(512);
5062 }
5063 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5064
5065 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5066 }
5067
bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)5068 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5069 {
5070 struct bnxt_ring_grp_info *grp_info;
5071
5072 grp_info = &bp->grp_info[ring->grp_idx];
5073 return grp_info->cp_fw_ring_id;
5074 }
5075
bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)5076 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5077 {
5078 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5079 struct bnxt_napi *bnapi = rxr->bnapi;
5080 struct bnxt_cp_ring_info *cpr;
5081
5082 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5083 return cpr->cp_ring_struct.fw_ring_id;
5084 } else {
5085 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5086 }
5087 }
5088
bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)5089 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5090 {
5091 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5092 struct bnxt_napi *bnapi = txr->bnapi;
5093 struct bnxt_cp_ring_info *cpr;
5094
5095 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5096 return cpr->cp_ring_struct.fw_ring_id;
5097 } else {
5098 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5099 }
5100 }
5101
bnxt_alloc_rss_indir_tbl(struct bnxt *bp)5102 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5103 {
5104 int entries;
5105
5106 if (bp->flags & BNXT_FLAG_CHIP_P5)
5107 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5108 else
5109 entries = HW_HASH_INDEX_SIZE;
5110
5111 bp->rss_indir_tbl_entries = entries;
5112 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5113 GFP_KERNEL);
5114 if (!bp->rss_indir_tbl)
5115 return -ENOMEM;
5116 return 0;
5117 }
5118
bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)5119 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5120 {
5121 u16 max_rings, max_entries, pad, i;
5122
5123 if (!bp->rx_nr_rings)
5124 return;
5125
5126 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5127 max_rings = bp->rx_nr_rings - 1;
5128 else
5129 max_rings = bp->rx_nr_rings;
5130
5131 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5132
5133 for (i = 0; i < max_entries; i++)
5134 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5135
5136 pad = bp->rss_indir_tbl_entries - max_entries;
5137 if (pad)
5138 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5139 }
5140
bnxt_get_max_rss_ring(struct bnxt *bp)5141 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5142 {
5143 u16 i, tbl_size, max_ring = 0;
5144
5145 if (!bp->rss_indir_tbl)
5146 return 0;
5147
5148 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5149 for (i = 0; i < tbl_size; i++)
5150 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5151 return max_ring;
5152 }
5153
bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)5154 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5155 {
5156 if (bp->flags & BNXT_FLAG_CHIP_P5)
5157 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5158 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5159 return 2;
5160 return 1;
5161 }
5162
__bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)5163 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5164 {
5165 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5166 u16 i, j;
5167
5168 /* Fill the RSS indirection table with ring group ids */
5169 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5170 if (!no_rss)
5171 j = bp->rss_indir_tbl[i];
5172 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5173 }
5174 }
5175
__bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)5176 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5177 struct bnxt_vnic_info *vnic)
5178 {
5179 __le16 *ring_tbl = vnic->rss_table;
5180 struct bnxt_rx_ring_info *rxr;
5181 u16 tbl_size, i;
5182
5183 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5184
5185 for (i = 0; i < tbl_size; i++) {
5186 u16 ring_id, j;
5187
5188 j = bp->rss_indir_tbl[i];
5189 rxr = &bp->rx_ring[j];
5190
5191 ring_id = rxr->rx_ring_struct.fw_ring_id;
5192 *ring_tbl++ = cpu_to_le16(ring_id);
5193 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5194 *ring_tbl++ = cpu_to_le16(ring_id);
5195 }
5196 }
5197
bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)5198 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5199 {
5200 if (bp->flags & BNXT_FLAG_CHIP_P5)
5201 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5202 else
5203 __bnxt_fill_hw_rss_tbl(bp, vnic);
5204 }
5205
bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)5206 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5207 {
5208 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5209 struct hwrm_vnic_rss_cfg_input req = {0};
5210
5211 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5212 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5213 return 0;
5214
5215 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5216 if (set_rss) {
5217 bnxt_fill_hw_rss_tbl(bp, vnic);
5218 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5219 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5220 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5221 req.hash_key_tbl_addr =
5222 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5223 }
5224 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5225 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5226 }
5227
bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)5228 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5229 {
5230 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5231 struct hwrm_vnic_rss_cfg_input req = {0};
5232 dma_addr_t ring_tbl_map;
5233 u32 i, nr_ctxs;
5234
5235 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5236 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5237 if (!set_rss) {
5238 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5239 return 0;
5240 }
5241 bnxt_fill_hw_rss_tbl(bp, vnic);
5242 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5243 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5244 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5245 ring_tbl_map = vnic->rss_table_dma_addr;
5246 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5247 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5248 int rc;
5249
5250 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5251 req.ring_table_pair_index = i;
5252 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5253 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5254 if (rc)
5255 return rc;
5256 }
5257 return 0;
5258 }
5259
bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)5260 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5261 {
5262 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5263 struct hwrm_vnic_plcmodes_cfg_input req = {0};
5264
5265 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5266 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5267 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5268 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5269 req.enables =
5270 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5271 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5272 /* thresholds not implemented in firmware yet */
5273 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5274 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5275 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5276 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5277 }
5278
bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)5279 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5280 u16 ctx_idx)
5281 {
5282 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5283
5284 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5285 req.rss_cos_lb_ctx_id =
5286 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5287
5288 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5289 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5290 }
5291
bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)5292 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5293 {
5294 int i, j;
5295
5296 for (i = 0; i < bp->nr_vnics; i++) {
5297 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5298
5299 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5300 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5301 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5302 }
5303 }
5304 bp->rsscos_nr_ctxs = 0;
5305 }
5306
bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)5307 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5308 {
5309 int rc;
5310 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5311 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5312 bp->hwrm_cmd_resp_addr;
5313
5314 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5315 -1);
5316
5317 mutex_lock(&bp->hwrm_cmd_lock);
5318 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5319 if (!rc)
5320 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5321 le16_to_cpu(resp->rss_cos_lb_ctx_id);
5322 mutex_unlock(&bp->hwrm_cmd_lock);
5323
5324 return rc;
5325 }
5326
bnxt_get_roce_vnic_mode(struct bnxt *bp)5327 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5328 {
5329 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5330 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5331 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5332 }
5333
bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)5334 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5335 {
5336 unsigned int ring = 0, grp_idx;
5337 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5338 struct hwrm_vnic_cfg_input req = {0};
5339 u16 def_vlan = 0;
5340
5341 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5342
5343 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5344 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5345
5346 req.default_rx_ring_id =
5347 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5348 req.default_cmpl_ring_id =
5349 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5350 req.enables =
5351 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5352 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5353 goto vnic_mru;
5354 }
5355 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5356 /* Only RSS support for now TBD: COS & LB */
5357 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5358 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5359 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5360 VNIC_CFG_REQ_ENABLES_MRU);
5361 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5362 req.rss_rule =
5363 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5364 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5365 VNIC_CFG_REQ_ENABLES_MRU);
5366 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5367 } else {
5368 req.rss_rule = cpu_to_le16(0xffff);
5369 }
5370
5371 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5372 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5373 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5374 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5375 } else {
5376 req.cos_rule = cpu_to_le16(0xffff);
5377 }
5378
5379 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5380 ring = 0;
5381 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5382 ring = vnic_id - 1;
5383 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5384 ring = bp->rx_nr_rings - 1;
5385
5386 grp_idx = bp->rx_ring[ring].bnapi->index;
5387 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5388 req.lb_rule = cpu_to_le16(0xffff);
5389 vnic_mru:
5390 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5391
5392 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5393 #ifdef CONFIG_BNXT_SRIOV
5394 if (BNXT_VF(bp))
5395 def_vlan = bp->vf.vlan;
5396 #endif
5397 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5398 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5399 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5400 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5401
5402 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5403 }
5404
bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)5405 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5406 {
5407 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5408 struct hwrm_vnic_free_input req = {0};
5409
5410 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5411 req.vnic_id =
5412 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5413
5414 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5415 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5416 }
5417 }
5418
bnxt_hwrm_vnic_free(struct bnxt *bp)5419 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5420 {
5421 u16 i;
5422
5423 for (i = 0; i < bp->nr_vnics; i++)
5424 bnxt_hwrm_vnic_free_one(bp, i);
5425 }
5426
bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, unsigned int start_rx_ring_idx, unsigned int nr_rings)5427 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5428 unsigned int start_rx_ring_idx,
5429 unsigned int nr_rings)
5430 {
5431 int rc = 0;
5432 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5433 struct hwrm_vnic_alloc_input req = {0};
5434 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5435 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5436
5437 if (bp->flags & BNXT_FLAG_CHIP_P5)
5438 goto vnic_no_ring_grps;
5439
5440 /* map ring groups to this vnic */
5441 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5442 grp_idx = bp->rx_ring[i].bnapi->index;
5443 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5444 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5445 j, nr_rings);
5446 break;
5447 }
5448 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5449 }
5450
5451 vnic_no_ring_grps:
5452 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5453 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5454 if (vnic_id == 0)
5455 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5456
5457 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5458
5459 mutex_lock(&bp->hwrm_cmd_lock);
5460 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5461 if (!rc)
5462 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5463 mutex_unlock(&bp->hwrm_cmd_lock);
5464 return rc;
5465 }
5466
bnxt_hwrm_vnic_qcaps(struct bnxt *bp)5467 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5468 {
5469 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5470 struct hwrm_vnic_qcaps_input req = {0};
5471 int rc;
5472
5473 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5474 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5475 if (bp->hwrm_spec_code < 0x10600)
5476 return 0;
5477
5478 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5479 mutex_lock(&bp->hwrm_cmd_lock);
5480 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5481 if (!rc) {
5482 u32 flags = le32_to_cpu(resp->flags);
5483
5484 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5485 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5486 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5487 if (flags &
5488 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5489 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5490
5491 /* Older P5 fw before EXT_HW_STATS support did not set
5492 * VLAN_STRIP_CAP properly.
5493 */
5494 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5495 (BNXT_CHIP_P5_THOR(bp) &&
5496 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5497 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5498 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5499 if (bp->max_tpa_v2) {
5500 if (BNXT_CHIP_P5_THOR(bp))
5501 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5502 else
5503 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5504 }
5505 }
5506 mutex_unlock(&bp->hwrm_cmd_lock);
5507 return rc;
5508 }
5509
bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)5510 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5511 {
5512 u16 i;
5513 u32 rc = 0;
5514
5515 if (bp->flags & BNXT_FLAG_CHIP_P5)
5516 return 0;
5517
5518 mutex_lock(&bp->hwrm_cmd_lock);
5519 for (i = 0; i < bp->rx_nr_rings; i++) {
5520 struct hwrm_ring_grp_alloc_input req = {0};
5521 struct hwrm_ring_grp_alloc_output *resp =
5522 bp->hwrm_cmd_resp_addr;
5523 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5524
5525 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5526
5527 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5528 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5529 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5530 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5531
5532 rc = _hwrm_send_message(bp, &req, sizeof(req),
5533 HWRM_CMD_TIMEOUT);
5534 if (rc)
5535 break;
5536
5537 bp->grp_info[grp_idx].fw_grp_id =
5538 le32_to_cpu(resp->ring_group_id);
5539 }
5540 mutex_unlock(&bp->hwrm_cmd_lock);
5541 return rc;
5542 }
5543
bnxt_hwrm_ring_grp_free(struct bnxt *bp)5544 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5545 {
5546 u16 i;
5547 struct hwrm_ring_grp_free_input req = {0};
5548
5549 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5550 return;
5551
5552 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5553
5554 mutex_lock(&bp->hwrm_cmd_lock);
5555 for (i = 0; i < bp->cp_nr_rings; i++) {
5556 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5557 continue;
5558 req.ring_group_id =
5559 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5560
5561 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5562 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5563 }
5564 mutex_unlock(&bp->hwrm_cmd_lock);
5565 }
5566
hwrm_ring_alloc_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, u32 map_index)5567 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5568 struct bnxt_ring_struct *ring,
5569 u32 ring_type, u32 map_index)
5570 {
5571 int rc = 0, err = 0;
5572 struct hwrm_ring_alloc_input req = {0};
5573 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5574 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5575 struct bnxt_ring_grp_info *grp_info;
5576 u16 ring_id;
5577
5578 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5579
5580 req.enables = 0;
5581 if (rmem->nr_pages > 1) {
5582 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5583 /* Page size is in log2 units */
5584 req.page_size = BNXT_PAGE_SHIFT;
5585 req.page_tbl_depth = 1;
5586 } else {
5587 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5588 }
5589 req.fbo = 0;
5590 /* Association of ring index with doorbell index and MSIX number */
5591 req.logical_id = cpu_to_le16(map_index);
5592
5593 switch (ring_type) {
5594 case HWRM_RING_ALLOC_TX: {
5595 struct bnxt_tx_ring_info *txr;
5596
5597 txr = container_of(ring, struct bnxt_tx_ring_info,
5598 tx_ring_struct);
5599 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5600 /* Association of transmit ring with completion ring */
5601 grp_info = &bp->grp_info[ring->grp_idx];
5602 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5603 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5604 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5605 req.queue_id = cpu_to_le16(ring->queue_id);
5606 break;
5607 }
5608 case HWRM_RING_ALLOC_RX:
5609 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5610 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5611 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5612 u16 flags = 0;
5613
5614 /* Association of rx ring with stats context */
5615 grp_info = &bp->grp_info[ring->grp_idx];
5616 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5617 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5618 req.enables |= cpu_to_le32(
5619 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5620 if (NET_IP_ALIGN == 2)
5621 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5622 req.flags = cpu_to_le16(flags);
5623 }
5624 break;
5625 case HWRM_RING_ALLOC_AGG:
5626 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5627 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5628 /* Association of agg ring with rx ring */
5629 grp_info = &bp->grp_info[ring->grp_idx];
5630 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5631 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5632 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5633 req.enables |= cpu_to_le32(
5634 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5635 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5636 } else {
5637 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5638 }
5639 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5640 break;
5641 case HWRM_RING_ALLOC_CMPL:
5642 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5643 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5644 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5645 /* Association of cp ring with nq */
5646 grp_info = &bp->grp_info[map_index];
5647 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5648 req.cq_handle = cpu_to_le64(ring->handle);
5649 req.enables |= cpu_to_le32(
5650 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5651 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5652 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5653 }
5654 break;
5655 case HWRM_RING_ALLOC_NQ:
5656 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5657 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5658 if (bp->flags & BNXT_FLAG_USING_MSIX)
5659 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5660 break;
5661 default:
5662 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5663 ring_type);
5664 return -1;
5665 }
5666
5667 mutex_lock(&bp->hwrm_cmd_lock);
5668 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5669 err = le16_to_cpu(resp->error_code);
5670 ring_id = le16_to_cpu(resp->ring_id);
5671 mutex_unlock(&bp->hwrm_cmd_lock);
5672
5673 if (rc || err) {
5674 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5675 ring_type, rc, err);
5676 return -EIO;
5677 }
5678 ring->fw_ring_id = ring_id;
5679 return rc;
5680 }
5681
bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)5682 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5683 {
5684 int rc;
5685
5686 if (BNXT_PF(bp)) {
5687 struct hwrm_func_cfg_input req = {0};
5688
5689 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5690 req.fid = cpu_to_le16(0xffff);
5691 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5692 req.async_event_cr = cpu_to_le16(idx);
5693 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5694 } else {
5695 struct hwrm_func_vf_cfg_input req = {0};
5696
5697 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5698 req.enables =
5699 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5700 req.async_event_cr = cpu_to_le16(idx);
5701 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5702 }
5703 return rc;
5704 }
5705
bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, u32 map_idx, u32 xid)5706 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5707 u32 map_idx, u32 xid)
5708 {
5709 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5710 if (BNXT_PF(bp))
5711 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5712 else
5713 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5714 switch (ring_type) {
5715 case HWRM_RING_ALLOC_TX:
5716 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5717 break;
5718 case HWRM_RING_ALLOC_RX:
5719 case HWRM_RING_ALLOC_AGG:
5720 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5721 break;
5722 case HWRM_RING_ALLOC_CMPL:
5723 db->db_key64 = DBR_PATH_L2;
5724 break;
5725 case HWRM_RING_ALLOC_NQ:
5726 db->db_key64 = DBR_PATH_L2;
5727 break;
5728 }
5729 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5730 } else {
5731 db->doorbell = bp->bar1 + map_idx * 0x80;
5732 switch (ring_type) {
5733 case HWRM_RING_ALLOC_TX:
5734 db->db_key32 = DB_KEY_TX;
5735 break;
5736 case HWRM_RING_ALLOC_RX:
5737 case HWRM_RING_ALLOC_AGG:
5738 db->db_key32 = DB_KEY_RX;
5739 break;
5740 case HWRM_RING_ALLOC_CMPL:
5741 db->db_key32 = DB_KEY_CP;
5742 break;
5743 }
5744 }
5745 }
5746
bnxt_hwrm_ring_alloc(struct bnxt *bp)5747 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5748 {
5749 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5750 int i, rc = 0;
5751 u32 type;
5752
5753 if (bp->flags & BNXT_FLAG_CHIP_P5)
5754 type = HWRM_RING_ALLOC_NQ;
5755 else
5756 type = HWRM_RING_ALLOC_CMPL;
5757 for (i = 0; i < bp->cp_nr_rings; i++) {
5758 struct bnxt_napi *bnapi = bp->bnapi[i];
5759 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5760 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5761 u32 map_idx = ring->map_idx;
5762 unsigned int vector;
5763
5764 vector = bp->irq_tbl[map_idx].vector;
5765 disable_irq_nosync(vector);
5766 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5767 if (rc) {
5768 enable_irq(vector);
5769 goto err_out;
5770 }
5771 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5772 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5773 enable_irq(vector);
5774 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5775
5776 if (!i) {
5777 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5778 if (rc)
5779 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5780 }
5781 }
5782
5783 type = HWRM_RING_ALLOC_TX;
5784 for (i = 0; i < bp->tx_nr_rings; i++) {
5785 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5786 struct bnxt_ring_struct *ring;
5787 u32 map_idx;
5788
5789 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5790 struct bnxt_napi *bnapi = txr->bnapi;
5791 struct bnxt_cp_ring_info *cpr, *cpr2;
5792 u32 type2 = HWRM_RING_ALLOC_CMPL;
5793
5794 cpr = &bnapi->cp_ring;
5795 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5796 ring = &cpr2->cp_ring_struct;
5797 ring->handle = BNXT_TX_HDL;
5798 map_idx = bnapi->index;
5799 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5800 if (rc)
5801 goto err_out;
5802 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5803 ring->fw_ring_id);
5804 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5805 }
5806 ring = &txr->tx_ring_struct;
5807 map_idx = i;
5808 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5809 if (rc)
5810 goto err_out;
5811 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5812 }
5813
5814 type = HWRM_RING_ALLOC_RX;
5815 for (i = 0; i < bp->rx_nr_rings; i++) {
5816 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5817 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5818 struct bnxt_napi *bnapi = rxr->bnapi;
5819 u32 map_idx = bnapi->index;
5820
5821 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5822 if (rc)
5823 goto err_out;
5824 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5825 /* If we have agg rings, post agg buffers first. */
5826 if (!agg_rings)
5827 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5828 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5829 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5830 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5831 u32 type2 = HWRM_RING_ALLOC_CMPL;
5832 struct bnxt_cp_ring_info *cpr2;
5833
5834 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5835 ring = &cpr2->cp_ring_struct;
5836 ring->handle = BNXT_RX_HDL;
5837 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5838 if (rc)
5839 goto err_out;
5840 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5841 ring->fw_ring_id);
5842 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5843 }
5844 }
5845
5846 if (agg_rings) {
5847 type = HWRM_RING_ALLOC_AGG;
5848 for (i = 0; i < bp->rx_nr_rings; i++) {
5849 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5850 struct bnxt_ring_struct *ring =
5851 &rxr->rx_agg_ring_struct;
5852 u32 grp_idx = ring->grp_idx;
5853 u32 map_idx = grp_idx + bp->rx_nr_rings;
5854
5855 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5856 if (rc)
5857 goto err_out;
5858
5859 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5860 ring->fw_ring_id);
5861 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5862 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5863 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5864 }
5865 }
5866 err_out:
5867 return rc;
5868 }
5869
hwrm_ring_free_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, int cmpl_ring_id)5870 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5871 struct bnxt_ring_struct *ring,
5872 u32 ring_type, int cmpl_ring_id)
5873 {
5874 int rc;
5875 struct hwrm_ring_free_input req = {0};
5876 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5877 u16 error_code;
5878
5879 if (BNXT_NO_FW_ACCESS(bp))
5880 return 0;
5881
5882 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5883 req.ring_type = ring_type;
5884 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5885
5886 mutex_lock(&bp->hwrm_cmd_lock);
5887 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5888 error_code = le16_to_cpu(resp->error_code);
5889 mutex_unlock(&bp->hwrm_cmd_lock);
5890
5891 if (rc || error_code) {
5892 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5893 ring_type, rc, error_code);
5894 return -EIO;
5895 }
5896 return 0;
5897 }
5898
bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)5899 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5900 {
5901 u32 type;
5902 int i;
5903
5904 if (!bp->bnapi)
5905 return;
5906
5907 for (i = 0; i < bp->tx_nr_rings; i++) {
5908 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5909 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5910
5911 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5912 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5913
5914 hwrm_ring_free_send_msg(bp, ring,
5915 RING_FREE_REQ_RING_TYPE_TX,
5916 close_path ? cmpl_ring_id :
5917 INVALID_HW_RING_ID);
5918 ring->fw_ring_id = INVALID_HW_RING_ID;
5919 }
5920 }
5921
5922 for (i = 0; i < bp->rx_nr_rings; i++) {
5923 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5924 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5925 u32 grp_idx = rxr->bnapi->index;
5926
5927 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5928 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5929
5930 hwrm_ring_free_send_msg(bp, ring,
5931 RING_FREE_REQ_RING_TYPE_RX,
5932 close_path ? cmpl_ring_id :
5933 INVALID_HW_RING_ID);
5934 ring->fw_ring_id = INVALID_HW_RING_ID;
5935 bp->grp_info[grp_idx].rx_fw_ring_id =
5936 INVALID_HW_RING_ID;
5937 }
5938 }
5939
5940 if (bp->flags & BNXT_FLAG_CHIP_P5)
5941 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5942 else
5943 type = RING_FREE_REQ_RING_TYPE_RX;
5944 for (i = 0; i < bp->rx_nr_rings; i++) {
5945 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5946 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5947 u32 grp_idx = rxr->bnapi->index;
5948
5949 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5950 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5951
5952 hwrm_ring_free_send_msg(bp, ring, type,
5953 close_path ? cmpl_ring_id :
5954 INVALID_HW_RING_ID);
5955 ring->fw_ring_id = INVALID_HW_RING_ID;
5956 bp->grp_info[grp_idx].agg_fw_ring_id =
5957 INVALID_HW_RING_ID;
5958 }
5959 }
5960
5961 /* The completion rings are about to be freed. After that the
5962 * IRQ doorbell will not work anymore. So we need to disable
5963 * IRQ here.
5964 */
5965 bnxt_disable_int_sync(bp);
5966
5967 if (bp->flags & BNXT_FLAG_CHIP_P5)
5968 type = RING_FREE_REQ_RING_TYPE_NQ;
5969 else
5970 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5971 for (i = 0; i < bp->cp_nr_rings; i++) {
5972 struct bnxt_napi *bnapi = bp->bnapi[i];
5973 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5974 struct bnxt_ring_struct *ring;
5975 int j;
5976
5977 for (j = 0; j < 2; j++) {
5978 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5979
5980 if (cpr2) {
5981 ring = &cpr2->cp_ring_struct;
5982 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5983 continue;
5984 hwrm_ring_free_send_msg(bp, ring,
5985 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5986 INVALID_HW_RING_ID);
5987 ring->fw_ring_id = INVALID_HW_RING_ID;
5988 }
5989 }
5990 ring = &cpr->cp_ring_struct;
5991 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5992 hwrm_ring_free_send_msg(bp, ring, type,
5993 INVALID_HW_RING_ID);
5994 ring->fw_ring_id = INVALID_HW_RING_ID;
5995 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5996 }
5997 }
5998 }
5999
6000 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6001 bool shared);
6002
bnxt_hwrm_get_rings(struct bnxt *bp)6003 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6004 {
6005 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6006 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6007 struct hwrm_func_qcfg_input req = {0};
6008 int rc;
6009
6010 if (bp->hwrm_spec_code < 0x10601)
6011 return 0;
6012
6013 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6014 req.fid = cpu_to_le16(0xffff);
6015 mutex_lock(&bp->hwrm_cmd_lock);
6016 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6017 if (rc) {
6018 mutex_unlock(&bp->hwrm_cmd_lock);
6019 return rc;
6020 }
6021
6022 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6023 if (BNXT_NEW_RM(bp)) {
6024 u16 cp, stats;
6025
6026 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6027 hw_resc->resv_hw_ring_grps =
6028 le32_to_cpu(resp->alloc_hw_ring_grps);
6029 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6030 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6031 stats = le16_to_cpu(resp->alloc_stat_ctx);
6032 hw_resc->resv_irqs = cp;
6033 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6034 int rx = hw_resc->resv_rx_rings;
6035 int tx = hw_resc->resv_tx_rings;
6036
6037 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6038 rx >>= 1;
6039 if (cp < (rx + tx)) {
6040 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6041 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6042 rx <<= 1;
6043 hw_resc->resv_rx_rings = rx;
6044 hw_resc->resv_tx_rings = tx;
6045 }
6046 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6047 hw_resc->resv_hw_ring_grps = rx;
6048 }
6049 hw_resc->resv_cp_rings = cp;
6050 hw_resc->resv_stat_ctxs = stats;
6051 }
6052 mutex_unlock(&bp->hwrm_cmd_lock);
6053 return 0;
6054 }
6055
6056 /* Caller must hold bp->hwrm_cmd_lock */
__bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)6057 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6058 {
6059 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6060 struct hwrm_func_qcfg_input req = {0};
6061 int rc;
6062
6063 if (bp->hwrm_spec_code < 0x10601)
6064 return 0;
6065
6066 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6067 req.fid = cpu_to_le16(fid);
6068 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6069 if (!rc)
6070 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6071
6072 return rc;
6073 }
6074
6075 static bool bnxt_rfs_supported(struct bnxt *bp);
6076
6077 static void
__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics)6078 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
6079 int tx_rings, int rx_rings, int ring_grps,
6080 int cp_rings, int stats, int vnics)
6081 {
6082 u32 enables = 0;
6083
6084 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
6085 req->fid = cpu_to_le16(0xffff);
6086 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6087 req->num_tx_rings = cpu_to_le16(tx_rings);
6088 if (BNXT_NEW_RM(bp)) {
6089 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6090 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6091 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6092 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6093 enables |= tx_rings + ring_grps ?
6094 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6095 enables |= rx_rings ?
6096 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6097 } else {
6098 enables |= cp_rings ?
6099 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6100 enables |= ring_grps ?
6101 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6102 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6103 }
6104 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6105
6106 req->num_rx_rings = cpu_to_le16(rx_rings);
6107 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6108 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6109 req->num_msix = cpu_to_le16(cp_rings);
6110 req->num_rsscos_ctxs =
6111 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6112 } else {
6113 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6114 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6115 req->num_rsscos_ctxs = cpu_to_le16(1);
6116 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6117 bnxt_rfs_supported(bp))
6118 req->num_rsscos_ctxs =
6119 cpu_to_le16(ring_grps + 1);
6120 }
6121 req->num_stat_ctxs = cpu_to_le16(stats);
6122 req->num_vnics = cpu_to_le16(vnics);
6123 }
6124 req->enables = cpu_to_le32(enables);
6125 }
6126
6127 static void
__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct hwrm_func_vf_cfg_input *req, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics)6128 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6129 struct hwrm_func_vf_cfg_input *req, int tx_rings,
6130 int rx_rings, int ring_grps, int cp_rings,
6131 int stats, int vnics)
6132 {
6133 u32 enables = 0;
6134
6135 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6136 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6137 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6138 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6139 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6140 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6141 enables |= tx_rings + ring_grps ?
6142 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6143 } else {
6144 enables |= cp_rings ?
6145 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6146 enables |= ring_grps ?
6147 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6148 }
6149 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6150 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6151
6152 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6153 req->num_tx_rings = cpu_to_le16(tx_rings);
6154 req->num_rx_rings = cpu_to_le16(rx_rings);
6155 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6156 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6157 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6158 } else {
6159 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6160 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6161 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6162 }
6163 req->num_stat_ctxs = cpu_to_le16(stats);
6164 req->num_vnics = cpu_to_le16(vnics);
6165
6166 req->enables = cpu_to_le32(enables);
6167 }
6168
6169 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics)6170 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6171 int ring_grps, int cp_rings, int stats, int vnics)
6172 {
6173 struct hwrm_func_cfg_input req = {0};
6174 int rc;
6175
6176 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6177 cp_rings, stats, vnics);
6178 if (!req.enables)
6179 return 0;
6180
6181 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6182 if (rc)
6183 return rc;
6184
6185 if (bp->hwrm_spec_code < 0x10601)
6186 bp->hw_resc.resv_tx_rings = tx_rings;
6187
6188 return bnxt_hwrm_get_rings(bp);
6189 }
6190
6191 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics)6192 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6193 int ring_grps, int cp_rings, int stats, int vnics)
6194 {
6195 struct hwrm_func_vf_cfg_input req = {0};
6196 int rc;
6197
6198 if (!BNXT_NEW_RM(bp)) {
6199 bp->hw_resc.resv_tx_rings = tx_rings;
6200 return 0;
6201 }
6202
6203 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6204 cp_rings, stats, vnics);
6205 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6206 if (rc)
6207 return rc;
6208
6209 return bnxt_hwrm_get_rings(bp);
6210 }
6211
bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, int cp, int stat, int vnic)6212 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6213 int cp, int stat, int vnic)
6214 {
6215 if (BNXT_PF(bp))
6216 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6217 vnic);
6218 else
6219 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6220 vnic);
6221 }
6222
bnxt_nq_rings_in_use(struct bnxt *bp)6223 int bnxt_nq_rings_in_use(struct bnxt *bp)
6224 {
6225 int cp = bp->cp_nr_rings;
6226 int ulp_msix, ulp_base;
6227
6228 ulp_msix = bnxt_get_ulp_msix_num(bp);
6229 if (ulp_msix) {
6230 ulp_base = bnxt_get_ulp_msix_base(bp);
6231 cp += ulp_msix;
6232 if ((ulp_base + ulp_msix) > cp)
6233 cp = ulp_base + ulp_msix;
6234 }
6235 return cp;
6236 }
6237
bnxt_cp_rings_in_use(struct bnxt *bp)6238 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6239 {
6240 int cp;
6241
6242 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6243 return bnxt_nq_rings_in_use(bp);
6244
6245 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6246 return cp;
6247 }
6248
bnxt_get_func_stat_ctxs(struct bnxt *bp)6249 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6250 {
6251 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6252 int cp = bp->cp_nr_rings;
6253
6254 if (!ulp_stat)
6255 return cp;
6256
6257 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6258 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6259
6260 return cp + ulp_stat;
6261 }
6262
6263 /* Check if a default RSS map needs to be setup. This function is only
6264 * used on older firmware that does not require reserving RX rings.
6265 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)6266 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6267 {
6268 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6269
6270 /* The RSS map is valid for RX rings set to resv_rx_rings */
6271 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6272 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6273 if (!netif_is_rxfh_configured(bp->dev))
6274 bnxt_set_dflt_rss_indir_tbl(bp);
6275 }
6276 }
6277
bnxt_need_reserve_rings(struct bnxt *bp)6278 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6279 {
6280 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6281 int cp = bnxt_cp_rings_in_use(bp);
6282 int nq = bnxt_nq_rings_in_use(bp);
6283 int rx = bp->rx_nr_rings, stat;
6284 int vnic = 1, grp = rx;
6285
6286 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6287 bp->hwrm_spec_code >= 0x10601)
6288 return true;
6289
6290 /* Old firmware does not need RX ring reservations but we still
6291 * need to setup a default RSS map when needed. With new firmware
6292 * we go through RX ring reservations first and then set up the
6293 * RSS map for the successfully reserved RX rings when needed.
6294 */
6295 if (!BNXT_NEW_RM(bp)) {
6296 bnxt_check_rss_tbl_no_rmgr(bp);
6297 return false;
6298 }
6299 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6300 vnic = rx + 1;
6301 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6302 rx <<= 1;
6303 stat = bnxt_get_func_stat_ctxs(bp);
6304 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6305 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6306 (hw_resc->resv_hw_ring_grps != grp &&
6307 !(bp->flags & BNXT_FLAG_CHIP_P5)))
6308 return true;
6309 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6310 hw_resc->resv_irqs != nq)
6311 return true;
6312 return false;
6313 }
6314
__bnxt_reserve_rings(struct bnxt *bp)6315 static int __bnxt_reserve_rings(struct bnxt *bp)
6316 {
6317 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6318 int cp = bnxt_nq_rings_in_use(bp);
6319 int tx = bp->tx_nr_rings;
6320 int rx = bp->rx_nr_rings;
6321 int grp, rx_rings, rc;
6322 int vnic = 1, stat;
6323 bool sh = false;
6324
6325 if (!bnxt_need_reserve_rings(bp))
6326 return 0;
6327
6328 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6329 sh = true;
6330 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6331 vnic = rx + 1;
6332 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6333 rx <<= 1;
6334 grp = bp->rx_nr_rings;
6335 stat = bnxt_get_func_stat_ctxs(bp);
6336
6337 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6338 if (rc)
6339 return rc;
6340
6341 tx = hw_resc->resv_tx_rings;
6342 if (BNXT_NEW_RM(bp)) {
6343 rx = hw_resc->resv_rx_rings;
6344 cp = hw_resc->resv_irqs;
6345 grp = hw_resc->resv_hw_ring_grps;
6346 vnic = hw_resc->resv_vnics;
6347 stat = hw_resc->resv_stat_ctxs;
6348 }
6349
6350 rx_rings = rx;
6351 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6352 if (rx >= 2) {
6353 rx_rings = rx >> 1;
6354 } else {
6355 if (netif_running(bp->dev))
6356 return -ENOMEM;
6357
6358 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6359 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6360 bp->dev->hw_features &= ~NETIF_F_LRO;
6361 bp->dev->features &= ~NETIF_F_LRO;
6362 bnxt_set_ring_params(bp);
6363 }
6364 }
6365 rx_rings = min_t(int, rx_rings, grp);
6366 cp = min_t(int, cp, bp->cp_nr_rings);
6367 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6368 stat -= bnxt_get_ulp_stat_ctxs(bp);
6369 cp = min_t(int, cp, stat);
6370 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6371 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6372 rx = rx_rings << 1;
6373 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6374 bp->tx_nr_rings = tx;
6375
6376 /* If we cannot reserve all the RX rings, reset the RSS map only
6377 * if absolutely necessary
6378 */
6379 if (rx_rings != bp->rx_nr_rings) {
6380 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6381 rx_rings, bp->rx_nr_rings);
6382 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6383 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6384 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6385 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6386 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6387 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6388 }
6389 }
6390 bp->rx_nr_rings = rx_rings;
6391 bp->cp_nr_rings = cp;
6392
6393 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6394 return -ENOMEM;
6395
6396 if (!netif_is_rxfh_configured(bp->dev))
6397 bnxt_set_dflt_rss_indir_tbl(bp);
6398
6399 return rc;
6400 }
6401
bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics)6402 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6403 int ring_grps, int cp_rings, int stats,
6404 int vnics)
6405 {
6406 struct hwrm_func_vf_cfg_input req = {0};
6407 u32 flags;
6408
6409 if (!BNXT_NEW_RM(bp))
6410 return 0;
6411
6412 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6413 cp_rings, stats, vnics);
6414 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6415 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6416 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6417 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6418 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6419 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6420 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6421 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6422
6423 req.flags = cpu_to_le32(flags);
6424 return hwrm_send_message_silent(bp, &req, sizeof(req),
6425 HWRM_CMD_TIMEOUT);
6426 }
6427
bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics)6428 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6429 int ring_grps, int cp_rings, int stats,
6430 int vnics)
6431 {
6432 struct hwrm_func_cfg_input req = {0};
6433 u32 flags;
6434
6435 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6436 cp_rings, stats, vnics);
6437 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6438 if (BNXT_NEW_RM(bp)) {
6439 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6440 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6441 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6442 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6443 if (bp->flags & BNXT_FLAG_CHIP_P5)
6444 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6445 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6446 else
6447 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6448 }
6449
6450 req.flags = cpu_to_le32(flags);
6451 return hwrm_send_message_silent(bp, &req, sizeof(req),
6452 HWRM_CMD_TIMEOUT);
6453 }
6454
bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics)6455 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6456 int ring_grps, int cp_rings, int stats,
6457 int vnics)
6458 {
6459 if (bp->hwrm_spec_code < 0x10801)
6460 return 0;
6461
6462 if (BNXT_PF(bp))
6463 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6464 ring_grps, cp_rings, stats,
6465 vnics);
6466
6467 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6468 cp_rings, stats, vnics);
6469 }
6470
bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)6471 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6472 {
6473 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6474 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6475 struct hwrm_ring_aggint_qcaps_input req = {0};
6476 int rc;
6477
6478 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6479 coal_cap->num_cmpl_dma_aggr_max = 63;
6480 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6481 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6482 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6483 coal_cap->int_lat_tmr_min_max = 65535;
6484 coal_cap->int_lat_tmr_max_max = 65535;
6485 coal_cap->num_cmpl_aggr_int_max = 65535;
6486 coal_cap->timer_units = 80;
6487
6488 if (bp->hwrm_spec_code < 0x10902)
6489 return;
6490
6491 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6492 mutex_lock(&bp->hwrm_cmd_lock);
6493 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6494 if (!rc) {
6495 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6496 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6497 coal_cap->num_cmpl_dma_aggr_max =
6498 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6499 coal_cap->num_cmpl_dma_aggr_during_int_max =
6500 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6501 coal_cap->cmpl_aggr_dma_tmr_max =
6502 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6503 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6504 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6505 coal_cap->int_lat_tmr_min_max =
6506 le16_to_cpu(resp->int_lat_tmr_min_max);
6507 coal_cap->int_lat_tmr_max_max =
6508 le16_to_cpu(resp->int_lat_tmr_max_max);
6509 coal_cap->num_cmpl_aggr_int_max =
6510 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6511 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6512 }
6513 mutex_unlock(&bp->hwrm_cmd_lock);
6514 }
6515
bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)6516 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6517 {
6518 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6519
6520 return usec * 1000 / coal_cap->timer_units;
6521 }
6522
bnxt_hwrm_set_coal_params(struct bnxt *bp, struct bnxt_coal *hw_coal, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)6523 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6524 struct bnxt_coal *hw_coal,
6525 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6526 {
6527 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6528 u32 cmpl_params = coal_cap->cmpl_params;
6529 u16 val, tmr, max, flags = 0;
6530
6531 max = hw_coal->bufs_per_record * 128;
6532 if (hw_coal->budget)
6533 max = hw_coal->bufs_per_record * hw_coal->budget;
6534 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6535
6536 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6537 req->num_cmpl_aggr_int = cpu_to_le16(val);
6538
6539 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6540 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6541
6542 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6543 coal_cap->num_cmpl_dma_aggr_during_int_max);
6544 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6545
6546 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6547 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6548 req->int_lat_tmr_max = cpu_to_le16(tmr);
6549
6550 /* min timer set to 1/2 of interrupt timer */
6551 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6552 val = tmr / 2;
6553 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6554 req->int_lat_tmr_min = cpu_to_le16(val);
6555 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6556 }
6557
6558 /* buf timer set to 1/4 of interrupt timer */
6559 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6560 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6561
6562 if (cmpl_params &
6563 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6564 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6565 val = clamp_t(u16, tmr, 1,
6566 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6567 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6568 req->enables |=
6569 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6570 }
6571
6572 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6573 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6574 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6575 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6576 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6577 req->flags = cpu_to_le16(flags);
6578 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6579 }
6580
6581 /* Caller holds bp->hwrm_cmd_lock */
__bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, struct bnxt_coal *hw_coal)6582 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6583 struct bnxt_coal *hw_coal)
6584 {
6585 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6586 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6587 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6588 u32 nq_params = coal_cap->nq_params;
6589 u16 tmr;
6590
6591 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6592 return 0;
6593
6594 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6595 -1, -1);
6596 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6597 req.flags =
6598 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6599
6600 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6601 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6602 req.int_lat_tmr_min = cpu_to_le16(tmr);
6603 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6604 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6605 }
6606
bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)6607 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6608 {
6609 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6610 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6611 struct bnxt_coal coal;
6612
6613 /* Tick values in micro seconds.
6614 * 1 coal_buf x bufs_per_record = 1 completion record.
6615 */
6616 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6617
6618 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6619 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6620
6621 if (!bnapi->rx_ring)
6622 return -ENODEV;
6623
6624 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6625 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6626
6627 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6628
6629 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6630
6631 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6632 HWRM_CMD_TIMEOUT);
6633 }
6634
bnxt_hwrm_set_coal(struct bnxt *bp)6635 int bnxt_hwrm_set_coal(struct bnxt *bp)
6636 {
6637 int i, rc = 0;
6638 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6639 req_tx = {0}, *req;
6640
6641 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6642 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6643 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6644 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6645
6646 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6647 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6648
6649 mutex_lock(&bp->hwrm_cmd_lock);
6650 for (i = 0; i < bp->cp_nr_rings; i++) {
6651 struct bnxt_napi *bnapi = bp->bnapi[i];
6652 struct bnxt_coal *hw_coal;
6653 u16 ring_id;
6654
6655 req = &req_rx;
6656 if (!bnapi->rx_ring) {
6657 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6658 req = &req_tx;
6659 } else {
6660 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6661 }
6662 req->ring_id = cpu_to_le16(ring_id);
6663
6664 rc = _hwrm_send_message(bp, req, sizeof(*req),
6665 HWRM_CMD_TIMEOUT);
6666 if (rc)
6667 break;
6668
6669 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6670 continue;
6671
6672 if (bnapi->rx_ring && bnapi->tx_ring) {
6673 req = &req_tx;
6674 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6675 req->ring_id = cpu_to_le16(ring_id);
6676 rc = _hwrm_send_message(bp, req, sizeof(*req),
6677 HWRM_CMD_TIMEOUT);
6678 if (rc)
6679 break;
6680 }
6681 if (bnapi->rx_ring)
6682 hw_coal = &bp->rx_coal;
6683 else
6684 hw_coal = &bp->tx_coal;
6685 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6686 }
6687 mutex_unlock(&bp->hwrm_cmd_lock);
6688 return rc;
6689 }
6690
bnxt_hwrm_stat_ctx_free(struct bnxt *bp)6691 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6692 {
6693 struct hwrm_stat_ctx_clr_stats_input req0 = {0};
6694 struct hwrm_stat_ctx_free_input req = {0};
6695 int i;
6696
6697 if (!bp->bnapi)
6698 return;
6699
6700 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6701 return;
6702
6703 bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6704 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6705
6706 mutex_lock(&bp->hwrm_cmd_lock);
6707 for (i = 0; i < bp->cp_nr_rings; i++) {
6708 struct bnxt_napi *bnapi = bp->bnapi[i];
6709 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6710
6711 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6712 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6713 if (BNXT_FW_MAJ(bp) <= 20) {
6714 req0.stat_ctx_id = req.stat_ctx_id;
6715 _hwrm_send_message(bp, &req0, sizeof(req0),
6716 HWRM_CMD_TIMEOUT);
6717 }
6718 _hwrm_send_message(bp, &req, sizeof(req),
6719 HWRM_CMD_TIMEOUT);
6720
6721 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6722 }
6723 }
6724 mutex_unlock(&bp->hwrm_cmd_lock);
6725 }
6726
bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)6727 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6728 {
6729 int rc = 0, i;
6730 struct hwrm_stat_ctx_alloc_input req = {0};
6731 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6732
6733 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6734 return 0;
6735
6736 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6737
6738 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6739 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6740
6741 mutex_lock(&bp->hwrm_cmd_lock);
6742 for (i = 0; i < bp->cp_nr_rings; i++) {
6743 struct bnxt_napi *bnapi = bp->bnapi[i];
6744 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6745
6746 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6747
6748 rc = _hwrm_send_message(bp, &req, sizeof(req),
6749 HWRM_CMD_TIMEOUT);
6750 if (rc)
6751 break;
6752
6753 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6754
6755 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6756 }
6757 mutex_unlock(&bp->hwrm_cmd_lock);
6758 return rc;
6759 }
6760
bnxt_hwrm_func_qcfg(struct bnxt *bp)6761 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6762 {
6763 struct hwrm_func_qcfg_input req = {0};
6764 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6765 u32 min_db_offset = 0;
6766 u16 flags;
6767 int rc;
6768
6769 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6770 req.fid = cpu_to_le16(0xffff);
6771 mutex_lock(&bp->hwrm_cmd_lock);
6772 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6773 if (rc)
6774 goto func_qcfg_exit;
6775
6776 #ifdef CONFIG_BNXT_SRIOV
6777 if (BNXT_VF(bp)) {
6778 struct bnxt_vf_info *vf = &bp->vf;
6779
6780 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6781 } else {
6782 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6783 }
6784 #endif
6785 flags = le16_to_cpu(resp->flags);
6786 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6787 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6788 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6789 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6790 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6791 }
6792 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6793 bp->flags |= BNXT_FLAG_MULTI_HOST;
6794 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6795 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6796
6797 switch (resp->port_partition_type) {
6798 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6799 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6800 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6801 bp->port_partition_type = resp->port_partition_type;
6802 break;
6803 }
6804 if (bp->hwrm_spec_code < 0x10707 ||
6805 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6806 bp->br_mode = BRIDGE_MODE_VEB;
6807 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6808 bp->br_mode = BRIDGE_MODE_VEPA;
6809 else
6810 bp->br_mode = BRIDGE_MODE_UNDEF;
6811
6812 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6813 if (!bp->max_mtu)
6814 bp->max_mtu = BNXT_MAX_MTU;
6815
6816 if (bp->db_size)
6817 goto func_qcfg_exit;
6818
6819 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6820 if (BNXT_PF(bp))
6821 min_db_offset = DB_PF_OFFSET_P5;
6822 else
6823 min_db_offset = DB_VF_OFFSET_P5;
6824 }
6825 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6826 1024);
6827 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6828 bp->db_size <= min_db_offset)
6829 bp->db_size = pci_resource_len(bp->pdev, 2);
6830
6831 func_qcfg_exit:
6832 mutex_unlock(&bp->hwrm_cmd_lock);
6833 return rc;
6834 }
6835
bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)6836 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6837 {
6838 struct hwrm_func_backing_store_qcaps_input req = {0};
6839 struct hwrm_func_backing_store_qcaps_output *resp =
6840 bp->hwrm_cmd_resp_addr;
6841 int rc;
6842
6843 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6844 return 0;
6845
6846 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6847 mutex_lock(&bp->hwrm_cmd_lock);
6848 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6849 if (!rc) {
6850 struct bnxt_ctx_pg_info *ctx_pg;
6851 struct bnxt_ctx_mem_info *ctx;
6852 int i, tqm_rings;
6853
6854 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6855 if (!ctx) {
6856 rc = -ENOMEM;
6857 goto ctx_err;
6858 }
6859 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6860 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6861 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6862 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6863 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6864 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6865 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6866 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6867 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6868 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6869 ctx->vnic_max_vnic_entries =
6870 le16_to_cpu(resp->vnic_max_vnic_entries);
6871 ctx->vnic_max_ring_table_entries =
6872 le16_to_cpu(resp->vnic_max_ring_table_entries);
6873 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6874 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6875 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6876 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6877 ctx->tqm_min_entries_per_ring =
6878 le32_to_cpu(resp->tqm_min_entries_per_ring);
6879 ctx->tqm_max_entries_per_ring =
6880 le32_to_cpu(resp->tqm_max_entries_per_ring);
6881 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6882 if (!ctx->tqm_entries_multiple)
6883 ctx->tqm_entries_multiple = 1;
6884 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6885 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6886 ctx->mrav_num_entries_units =
6887 le16_to_cpu(resp->mrav_num_entries_units);
6888 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6889 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6890 ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
6891 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6892 if (!ctx->tqm_fp_rings_count)
6893 ctx->tqm_fp_rings_count = bp->max_q;
6894 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6895 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6896
6897 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6898 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6899 if (!ctx_pg) {
6900 kfree(ctx);
6901 rc = -ENOMEM;
6902 goto ctx_err;
6903 }
6904 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6905 ctx->tqm_mem[i] = ctx_pg;
6906 bp->ctx = ctx;
6907 } else {
6908 rc = 0;
6909 }
6910 ctx_err:
6911 mutex_unlock(&bp->hwrm_cmd_lock);
6912 return rc;
6913 }
6914
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, __le64 *pg_dir)6915 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6916 __le64 *pg_dir)
6917 {
6918 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
6919 if (rmem->depth >= 1) {
6920 if (rmem->depth == 2)
6921 *pg_attr |= 2;
6922 else
6923 *pg_attr |= 1;
6924 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6925 } else {
6926 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6927 }
6928 }
6929
6930 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6931 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6932 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6933 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6934 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6935 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6936
bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)6937 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6938 {
6939 struct hwrm_func_backing_store_cfg_input req = {0};
6940 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6941 struct bnxt_ctx_pg_info *ctx_pg;
6942 __le32 *num_entries;
6943 __le64 *pg_dir;
6944 u32 flags = 0;
6945 u8 *pg_attr;
6946 u32 ena;
6947 int i;
6948
6949 if (!ctx)
6950 return 0;
6951
6952 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6953 req.enables = cpu_to_le32(enables);
6954
6955 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6956 ctx_pg = &ctx->qp_mem;
6957 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6958 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6959 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6960 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6961 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6962 &req.qpc_pg_size_qpc_lvl,
6963 &req.qpc_page_dir);
6964 }
6965 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6966 ctx_pg = &ctx->srq_mem;
6967 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6968 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6969 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6970 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6971 &req.srq_pg_size_srq_lvl,
6972 &req.srq_page_dir);
6973 }
6974 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6975 ctx_pg = &ctx->cq_mem;
6976 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6977 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6978 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6979 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6980 &req.cq_page_dir);
6981 }
6982 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6983 ctx_pg = &ctx->vnic_mem;
6984 req.vnic_num_vnic_entries =
6985 cpu_to_le16(ctx->vnic_max_vnic_entries);
6986 req.vnic_num_ring_table_entries =
6987 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6988 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6989 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6990 &req.vnic_pg_size_vnic_lvl,
6991 &req.vnic_page_dir);
6992 }
6993 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6994 ctx_pg = &ctx->stat_mem;
6995 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6996 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6997 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6998 &req.stat_pg_size_stat_lvl,
6999 &req.stat_page_dir);
7000 }
7001 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7002 ctx_pg = &ctx->mrav_mem;
7003 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7004 if (ctx->mrav_num_entries_units)
7005 flags |=
7006 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7007 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7008 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7009 &req.mrav_pg_size_mrav_lvl,
7010 &req.mrav_page_dir);
7011 }
7012 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7013 ctx_pg = &ctx->tim_mem;
7014 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
7015 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7016 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7017 &req.tim_pg_size_tim_lvl,
7018 &req.tim_page_dir);
7019 }
7020 for (i = 0, num_entries = &req.tqm_sp_num_entries,
7021 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
7022 pg_dir = &req.tqm_sp_page_dir,
7023 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7024 i < BNXT_MAX_TQM_RINGS;
7025 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7026 if (!(enables & ena))
7027 continue;
7028
7029 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7030 ctx_pg = ctx->tqm_mem[i];
7031 *num_entries = cpu_to_le32(ctx_pg->entries);
7032 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7033 }
7034 req.flags = cpu_to_le32(flags);
7035 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7036 }
7037
bnxt_alloc_ctx_mem_blk(struct bnxt *bp, struct bnxt_ctx_pg_info *ctx_pg)7038 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7039 struct bnxt_ctx_pg_info *ctx_pg)
7040 {
7041 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7042
7043 rmem->page_size = BNXT_PAGE_SIZE;
7044 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7045 rmem->dma_arr = ctx_pg->ctx_dma_arr;
7046 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7047 if (rmem->depth >= 1)
7048 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7049 return bnxt_alloc_ring(bp, rmem);
7050 }
7051
bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, u8 depth, bool use_init_val)7052 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7053 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7054 u8 depth, bool use_init_val)
7055 {
7056 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7057 int rc;
7058
7059 if (!mem_size)
7060 return -EINVAL;
7061
7062 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7063 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7064 ctx_pg->nr_pages = 0;
7065 return -EINVAL;
7066 }
7067 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7068 int nr_tbls, i;
7069
7070 rmem->depth = 2;
7071 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7072 GFP_KERNEL);
7073 if (!ctx_pg->ctx_pg_tbl)
7074 return -ENOMEM;
7075 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7076 rmem->nr_pages = nr_tbls;
7077 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7078 if (rc)
7079 return rc;
7080 for (i = 0; i < nr_tbls; i++) {
7081 struct bnxt_ctx_pg_info *pg_tbl;
7082
7083 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7084 if (!pg_tbl)
7085 return -ENOMEM;
7086 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7087 rmem = &pg_tbl->ring_mem;
7088 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7089 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7090 rmem->depth = 1;
7091 rmem->nr_pages = MAX_CTX_PAGES;
7092 if (use_init_val)
7093 rmem->init_val = bp->ctx->ctx_kind_initializer;
7094 if (i == (nr_tbls - 1)) {
7095 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7096
7097 if (rem)
7098 rmem->nr_pages = rem;
7099 }
7100 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7101 if (rc)
7102 break;
7103 }
7104 } else {
7105 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7106 if (rmem->nr_pages > 1 || depth)
7107 rmem->depth = 1;
7108 if (use_init_val)
7109 rmem->init_val = bp->ctx->ctx_kind_initializer;
7110 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7111 }
7112 return rc;
7113 }
7114
bnxt_free_ctx_pg_tbls(struct bnxt *bp, struct bnxt_ctx_pg_info *ctx_pg)7115 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7116 struct bnxt_ctx_pg_info *ctx_pg)
7117 {
7118 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7119
7120 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7121 ctx_pg->ctx_pg_tbl) {
7122 int i, nr_tbls = rmem->nr_pages;
7123
7124 for (i = 0; i < nr_tbls; i++) {
7125 struct bnxt_ctx_pg_info *pg_tbl;
7126 struct bnxt_ring_mem_info *rmem2;
7127
7128 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7129 if (!pg_tbl)
7130 continue;
7131 rmem2 = &pg_tbl->ring_mem;
7132 bnxt_free_ring(bp, rmem2);
7133 ctx_pg->ctx_pg_arr[i] = NULL;
7134 kfree(pg_tbl);
7135 ctx_pg->ctx_pg_tbl[i] = NULL;
7136 }
7137 kfree(ctx_pg->ctx_pg_tbl);
7138 ctx_pg->ctx_pg_tbl = NULL;
7139 }
7140 bnxt_free_ring(bp, rmem);
7141 ctx_pg->nr_pages = 0;
7142 }
7143
bnxt_free_ctx_mem(struct bnxt *bp)7144 static void bnxt_free_ctx_mem(struct bnxt *bp)
7145 {
7146 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7147 int i;
7148
7149 if (!ctx)
7150 return;
7151
7152 if (ctx->tqm_mem[0]) {
7153 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7154 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7155 kfree(ctx->tqm_mem[0]);
7156 ctx->tqm_mem[0] = NULL;
7157 }
7158
7159 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7160 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7161 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7162 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7163 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7164 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7165 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7166 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7167 }
7168
bnxt_alloc_ctx_mem(struct bnxt *bp)7169 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7170 {
7171 struct bnxt_ctx_pg_info *ctx_pg;
7172 struct bnxt_ctx_mem_info *ctx;
7173 u32 mem_size, ena, entries;
7174 u32 entries_sp, min;
7175 u32 num_mr, num_ah;
7176 u32 extra_srqs = 0;
7177 u32 extra_qps = 0;
7178 u8 pg_lvl = 1;
7179 int i, rc;
7180
7181 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7182 if (rc) {
7183 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7184 rc);
7185 return rc;
7186 }
7187 ctx = bp->ctx;
7188 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7189 return 0;
7190
7191 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7192 pg_lvl = 2;
7193 extra_qps = 65536;
7194 extra_srqs = 8192;
7195 }
7196
7197 ctx_pg = &ctx->qp_mem;
7198 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7199 extra_qps;
7200 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7201 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7202 if (rc)
7203 return rc;
7204
7205 ctx_pg = &ctx->srq_mem;
7206 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7207 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7208 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7209 if (rc)
7210 return rc;
7211
7212 ctx_pg = &ctx->cq_mem;
7213 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7214 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7215 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7216 if (rc)
7217 return rc;
7218
7219 ctx_pg = &ctx->vnic_mem;
7220 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7221 ctx->vnic_max_ring_table_entries;
7222 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7223 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7224 if (rc)
7225 return rc;
7226
7227 ctx_pg = &ctx->stat_mem;
7228 ctx_pg->entries = ctx->stat_max_entries;
7229 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7230 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7231 if (rc)
7232 return rc;
7233
7234 ena = 0;
7235 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7236 goto skip_rdma;
7237
7238 ctx_pg = &ctx->mrav_mem;
7239 /* 128K extra is needed to accommodate static AH context
7240 * allocation by f/w.
7241 */
7242 num_mr = 1024 * 256;
7243 num_ah = 1024 * 128;
7244 ctx_pg->entries = num_mr + num_ah;
7245 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7246 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
7247 if (rc)
7248 return rc;
7249 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7250 if (ctx->mrav_num_entries_units)
7251 ctx_pg->entries =
7252 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7253 (num_ah / ctx->mrav_num_entries_units);
7254
7255 ctx_pg = &ctx->tim_mem;
7256 ctx_pg->entries = ctx->qp_mem.entries;
7257 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7258 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7259 if (rc)
7260 return rc;
7261 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7262
7263 skip_rdma:
7264 min = ctx->tqm_min_entries_per_ring;
7265 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7266 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7267 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7268 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7269 entries = roundup(entries, ctx->tqm_entries_multiple);
7270 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7271 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7272 ctx_pg = ctx->tqm_mem[i];
7273 ctx_pg->entries = i ? entries : entries_sp;
7274 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7275 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7276 if (rc)
7277 return rc;
7278 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7279 }
7280 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7281 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7282 if (rc) {
7283 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7284 rc);
7285 return rc;
7286 }
7287 ctx->flags |= BNXT_CTX_FLAG_INITED;
7288 return 0;
7289 }
7290
bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)7291 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7292 {
7293 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7294 struct hwrm_func_resource_qcaps_input req = {0};
7295 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7296 int rc;
7297
7298 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7299 req.fid = cpu_to_le16(0xffff);
7300
7301 mutex_lock(&bp->hwrm_cmd_lock);
7302 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7303 HWRM_CMD_TIMEOUT);
7304 if (rc)
7305 goto hwrm_func_resc_qcaps_exit;
7306
7307 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7308 if (!all)
7309 goto hwrm_func_resc_qcaps_exit;
7310
7311 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7312 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7313 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7314 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7315 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7316 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7317 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7318 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7319 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7320 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7321 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7322 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7323 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7324 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7325 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7326 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7327
7328 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7329 u16 max_msix = le16_to_cpu(resp->max_msix);
7330
7331 hw_resc->max_nqs = max_msix;
7332 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7333 }
7334
7335 if (BNXT_PF(bp)) {
7336 struct bnxt_pf_info *pf = &bp->pf;
7337
7338 pf->vf_resv_strategy =
7339 le16_to_cpu(resp->vf_reservation_strategy);
7340 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7341 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7342 }
7343 hwrm_func_resc_qcaps_exit:
7344 mutex_unlock(&bp->hwrm_cmd_lock);
7345 return rc;
7346 }
7347
__bnxt_hwrm_func_qcaps(struct bnxt *bp)7348 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7349 {
7350 int rc = 0;
7351 struct hwrm_func_qcaps_input req = {0};
7352 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7353 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7354 u32 flags, flags_ext;
7355
7356 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7357 req.fid = cpu_to_le16(0xffff);
7358
7359 mutex_lock(&bp->hwrm_cmd_lock);
7360 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7361 if (rc)
7362 goto hwrm_func_qcaps_exit;
7363
7364 flags = le32_to_cpu(resp->flags);
7365 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7366 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7367 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7368 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7369 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7370 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7371 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7372 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7373 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7374 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7375 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7376 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7377 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7378 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7379 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7380 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7381
7382 flags_ext = le32_to_cpu(resp->flags_ext);
7383 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7384 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7385
7386 bp->tx_push_thresh = 0;
7387 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7388 BNXT_FW_MAJ(bp) > 217)
7389 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7390
7391 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7392 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7393 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7394 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7395 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7396 if (!hw_resc->max_hw_ring_grps)
7397 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7398 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7399 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7400 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7401
7402 if (BNXT_PF(bp)) {
7403 struct bnxt_pf_info *pf = &bp->pf;
7404
7405 pf->fw_fid = le16_to_cpu(resp->fid);
7406 pf->port_id = le16_to_cpu(resp->port_id);
7407 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7408 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7409 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7410 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7411 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7412 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7413 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7414 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7415 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7416 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7417 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7418 bp->flags |= BNXT_FLAG_WOL_CAP;
7419 } else {
7420 #ifdef CONFIG_BNXT_SRIOV
7421 struct bnxt_vf_info *vf = &bp->vf;
7422
7423 vf->fw_fid = le16_to_cpu(resp->fid);
7424 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7425 #endif
7426 }
7427
7428 hwrm_func_qcaps_exit:
7429 mutex_unlock(&bp->hwrm_cmd_lock);
7430 return rc;
7431 }
7432
7433 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7434
bnxt_hwrm_func_qcaps(struct bnxt *bp)7435 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7436 {
7437 int rc;
7438
7439 rc = __bnxt_hwrm_func_qcaps(bp);
7440 if (rc)
7441 return rc;
7442 rc = bnxt_hwrm_queue_qportcfg(bp);
7443 if (rc) {
7444 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7445 return rc;
7446 }
7447 if (bp->hwrm_spec_code >= 0x10803) {
7448 rc = bnxt_alloc_ctx_mem(bp);
7449 if (rc)
7450 return rc;
7451 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7452 if (!rc)
7453 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7454 }
7455 return 0;
7456 }
7457
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)7458 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7459 {
7460 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7461 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7462 int rc = 0;
7463 u32 flags;
7464
7465 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7466 return 0;
7467
7468 resp = bp->hwrm_cmd_resp_addr;
7469 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7470
7471 mutex_lock(&bp->hwrm_cmd_lock);
7472 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7473 if (rc)
7474 goto hwrm_cfa_adv_qcaps_exit;
7475
7476 flags = le32_to_cpu(resp->flags);
7477 if (flags &
7478 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7479 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7480
7481 hwrm_cfa_adv_qcaps_exit:
7482 mutex_unlock(&bp->hwrm_cmd_lock);
7483 return rc;
7484 }
7485
__bnxt_alloc_fw_health(struct bnxt *bp)7486 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7487 {
7488 if (bp->fw_health)
7489 return 0;
7490
7491 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7492 if (!bp->fw_health)
7493 return -ENOMEM;
7494
7495 return 0;
7496 }
7497
bnxt_alloc_fw_health(struct bnxt *bp)7498 static int bnxt_alloc_fw_health(struct bnxt *bp)
7499 {
7500 int rc;
7501
7502 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7503 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7504 return 0;
7505
7506 rc = __bnxt_alloc_fw_health(bp);
7507 if (rc) {
7508 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7509 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7510 return rc;
7511 }
7512
7513 return 0;
7514 }
7515
__bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)7516 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7517 {
7518 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7519 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7520 BNXT_FW_HEALTH_WIN_MAP_OFF);
7521 }
7522
bnxt_try_map_fw_health_reg(struct bnxt *bp)7523 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7524 {
7525 void __iomem *hs;
7526 u32 status_loc;
7527 u32 reg_type;
7528 u32 sig;
7529
7530 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7531 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7532
7533 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7534 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7535 if (bp->fw_health)
7536 bp->fw_health->status_reliable = false;
7537 return;
7538 }
7539
7540 if (__bnxt_alloc_fw_health(bp)) {
7541 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7542 return;
7543 }
7544
7545 status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc));
7546 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7547 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7548 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7549 __bnxt_map_fw_health_reg(bp, status_loc);
7550 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7551 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7552 }
7553
7554 bp->fw_health->status_reliable = true;
7555 }
7556
bnxt_map_fw_health_regs(struct bnxt *bp)7557 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7558 {
7559 struct bnxt_fw_health *fw_health = bp->fw_health;
7560 u32 reg_base = 0xffffffff;
7561 int i;
7562
7563 /* Only pre-map the monitoring GRC registers using window 3 */
7564 for (i = 0; i < 4; i++) {
7565 u32 reg = fw_health->regs[i];
7566
7567 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7568 continue;
7569 if (reg_base == 0xffffffff)
7570 reg_base = reg & BNXT_GRC_BASE_MASK;
7571 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7572 return -ERANGE;
7573 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7574 }
7575 if (reg_base == 0xffffffff)
7576 return 0;
7577
7578 __bnxt_map_fw_health_reg(bp, reg_base);
7579 return 0;
7580 }
7581
bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)7582 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7583 {
7584 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7585 struct bnxt_fw_health *fw_health = bp->fw_health;
7586 struct hwrm_error_recovery_qcfg_input req = {0};
7587 int rc, i;
7588
7589 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7590 return 0;
7591
7592 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7593 mutex_lock(&bp->hwrm_cmd_lock);
7594 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7595 if (rc)
7596 goto err_recovery_out;
7597 fw_health->flags = le32_to_cpu(resp->flags);
7598 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7599 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7600 rc = -EINVAL;
7601 goto err_recovery_out;
7602 }
7603 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7604 fw_health->master_func_wait_dsecs =
7605 le32_to_cpu(resp->master_func_wait_period);
7606 fw_health->normal_func_wait_dsecs =
7607 le32_to_cpu(resp->normal_func_wait_period);
7608 fw_health->post_reset_wait_dsecs =
7609 le32_to_cpu(resp->master_func_wait_period_after_reset);
7610 fw_health->post_reset_max_wait_dsecs =
7611 le32_to_cpu(resp->max_bailout_time_after_reset);
7612 fw_health->regs[BNXT_FW_HEALTH_REG] =
7613 le32_to_cpu(resp->fw_health_status_reg);
7614 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7615 le32_to_cpu(resp->fw_heartbeat_reg);
7616 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7617 le32_to_cpu(resp->fw_reset_cnt_reg);
7618 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7619 le32_to_cpu(resp->reset_inprogress_reg);
7620 fw_health->fw_reset_inprog_reg_mask =
7621 le32_to_cpu(resp->reset_inprogress_reg_mask);
7622 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7623 if (fw_health->fw_reset_seq_cnt >= 16) {
7624 rc = -EINVAL;
7625 goto err_recovery_out;
7626 }
7627 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7628 fw_health->fw_reset_seq_regs[i] =
7629 le32_to_cpu(resp->reset_reg[i]);
7630 fw_health->fw_reset_seq_vals[i] =
7631 le32_to_cpu(resp->reset_reg_val[i]);
7632 fw_health->fw_reset_seq_delay_msec[i] =
7633 resp->delay_after_reset[i];
7634 }
7635 err_recovery_out:
7636 mutex_unlock(&bp->hwrm_cmd_lock);
7637 if (!rc)
7638 rc = bnxt_map_fw_health_regs(bp);
7639 if (rc)
7640 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7641 return rc;
7642 }
7643
bnxt_hwrm_func_reset(struct bnxt *bp)7644 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7645 {
7646 struct hwrm_func_reset_input req = {0};
7647
7648 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7649 req.enables = 0;
7650
7651 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7652 }
7653
bnxt_nvm_cfg_ver_get(struct bnxt *bp)7654 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7655 {
7656 struct hwrm_nvm_get_dev_info_output nvm_info;
7657
7658 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7659 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7660 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7661 nvm_info.nvm_cfg_ver_upd);
7662 }
7663
bnxt_hwrm_queue_qportcfg(struct bnxt *bp)7664 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7665 {
7666 int rc = 0;
7667 struct hwrm_queue_qportcfg_input req = {0};
7668 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7669 u8 i, j, *qptr;
7670 bool no_rdma;
7671
7672 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7673
7674 mutex_lock(&bp->hwrm_cmd_lock);
7675 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7676 if (rc)
7677 goto qportcfg_exit;
7678
7679 if (!resp->max_configurable_queues) {
7680 rc = -EINVAL;
7681 goto qportcfg_exit;
7682 }
7683 bp->max_tc = resp->max_configurable_queues;
7684 bp->max_lltc = resp->max_configurable_lossless_queues;
7685 if (bp->max_tc > BNXT_MAX_QUEUE)
7686 bp->max_tc = BNXT_MAX_QUEUE;
7687
7688 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7689 qptr = &resp->queue_id0;
7690 for (i = 0, j = 0; i < bp->max_tc; i++) {
7691 bp->q_info[j].queue_id = *qptr;
7692 bp->q_ids[i] = *qptr++;
7693 bp->q_info[j].queue_profile = *qptr++;
7694 bp->tc_to_qidx[j] = j;
7695 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7696 (no_rdma && BNXT_PF(bp)))
7697 j++;
7698 }
7699 bp->max_q = bp->max_tc;
7700 bp->max_tc = max_t(u8, j, 1);
7701
7702 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7703 bp->max_tc = 1;
7704
7705 if (bp->max_lltc > bp->max_tc)
7706 bp->max_lltc = bp->max_tc;
7707
7708 qportcfg_exit:
7709 mutex_unlock(&bp->hwrm_cmd_lock);
7710 return rc;
7711 }
7712
__bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)7713 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7714 {
7715 struct hwrm_ver_get_input req = {0};
7716 int rc;
7717
7718 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7719 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7720 req.hwrm_intf_min = HWRM_VERSION_MINOR;
7721 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7722
7723 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7724 silent);
7725 return rc;
7726 }
7727
bnxt_hwrm_ver_get(struct bnxt *bp)7728 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7729 {
7730 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7731 u16 fw_maj, fw_min, fw_bld, fw_rsv;
7732 u32 dev_caps_cfg, hwrm_ver;
7733 int rc, len;
7734
7735 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7736 mutex_lock(&bp->hwrm_cmd_lock);
7737 rc = __bnxt_hwrm_ver_get(bp, false);
7738 if (rc)
7739 goto hwrm_ver_get_exit;
7740
7741 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7742
7743 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7744 resp->hwrm_intf_min_8b << 8 |
7745 resp->hwrm_intf_upd_8b;
7746 if (resp->hwrm_intf_maj_8b < 1) {
7747 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7748 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7749 resp->hwrm_intf_upd_8b);
7750 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7751 }
7752
7753 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7754 HWRM_VERSION_UPDATE;
7755
7756 if (bp->hwrm_spec_code > hwrm_ver)
7757 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7758 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7759 HWRM_VERSION_UPDATE);
7760 else
7761 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7762 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7763 resp->hwrm_intf_upd_8b);
7764
7765 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7766 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7767 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7768 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7769 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7770 len = FW_VER_STR_LEN;
7771 } else {
7772 fw_maj = resp->hwrm_fw_maj_8b;
7773 fw_min = resp->hwrm_fw_min_8b;
7774 fw_bld = resp->hwrm_fw_bld_8b;
7775 fw_rsv = resp->hwrm_fw_rsvd_8b;
7776 len = BC_HWRM_STR_LEN;
7777 }
7778 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7779 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7780 fw_rsv);
7781
7782 if (strlen(resp->active_pkg_name)) {
7783 int fw_ver_len = strlen(bp->fw_ver_str);
7784
7785 snprintf(bp->fw_ver_str + fw_ver_len,
7786 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7787 resp->active_pkg_name);
7788 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7789 }
7790
7791 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7792 if (!bp->hwrm_cmd_timeout)
7793 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7794
7795 if (resp->hwrm_intf_maj_8b >= 1) {
7796 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7797 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7798 }
7799 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7800 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7801
7802 bp->chip_num = le16_to_cpu(resp->chip_num);
7803 bp->chip_rev = resp->chip_rev;
7804 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7805 !resp->chip_metal)
7806 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7807
7808 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7809 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7810 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7811 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7812
7813 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7814 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7815
7816 if (dev_caps_cfg &
7817 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7818 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7819
7820 if (dev_caps_cfg &
7821 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7822 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7823
7824 if (dev_caps_cfg &
7825 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7826 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7827
7828 hwrm_ver_get_exit:
7829 mutex_unlock(&bp->hwrm_cmd_lock);
7830 return rc;
7831 }
7832
bnxt_hwrm_fw_set_time(struct bnxt *bp)7833 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7834 {
7835 struct hwrm_fw_set_time_input req = {0};
7836 struct tm tm;
7837 time64_t now = ktime_get_real_seconds();
7838
7839 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7840 bp->hwrm_spec_code < 0x10400)
7841 return -EOPNOTSUPP;
7842
7843 time64_to_tm(now, 0, &tm);
7844 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7845 req.year = cpu_to_le16(1900 + tm.tm_year);
7846 req.month = 1 + tm.tm_mon;
7847 req.day = tm.tm_mday;
7848 req.hour = tm.tm_hour;
7849 req.minute = tm.tm_min;
7850 req.second = tm.tm_sec;
7851 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7852 }
7853
bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)7854 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
7855 {
7856 u64 sw_tmp;
7857
7858 hw &= mask;
7859 sw_tmp = (*sw & ~mask) | hw;
7860 if (hw < (*sw & mask))
7861 sw_tmp += mask + 1;
7862 WRITE_ONCE(*sw, sw_tmp);
7863 }
7864
__bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, int count, bool ignore_zero)7865 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
7866 int count, bool ignore_zero)
7867 {
7868 int i;
7869
7870 for (i = 0; i < count; i++) {
7871 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
7872
7873 if (ignore_zero && !hw)
7874 continue;
7875
7876 if (masks[i] == -1ULL)
7877 sw_stats[i] = hw;
7878 else
7879 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
7880 }
7881 }
7882
bnxt_accumulate_stats(struct bnxt_stats_mem *stats)7883 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
7884 {
7885 if (!stats->hw_stats)
7886 return;
7887
7888 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7889 stats->hw_masks, stats->len / 8, false);
7890 }
7891
bnxt_accumulate_all_stats(struct bnxt *bp)7892 static void bnxt_accumulate_all_stats(struct bnxt *bp)
7893 {
7894 struct bnxt_stats_mem *ring0_stats;
7895 bool ignore_zero = false;
7896 int i;
7897
7898 /* Chip bug. Counter intermittently becomes 0. */
7899 if (bp->flags & BNXT_FLAG_CHIP_P5)
7900 ignore_zero = true;
7901
7902 for (i = 0; i < bp->cp_nr_rings; i++) {
7903 struct bnxt_napi *bnapi = bp->bnapi[i];
7904 struct bnxt_cp_ring_info *cpr;
7905 struct bnxt_stats_mem *stats;
7906
7907 cpr = &bnapi->cp_ring;
7908 stats = &cpr->stats;
7909 if (!i)
7910 ring0_stats = stats;
7911 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7912 ring0_stats->hw_masks,
7913 ring0_stats->len / 8, ignore_zero);
7914 }
7915 if (bp->flags & BNXT_FLAG_PORT_STATS) {
7916 struct bnxt_stats_mem *stats = &bp->port_stats;
7917 __le64 *hw_stats = stats->hw_stats;
7918 u64 *sw_stats = stats->sw_stats;
7919 u64 *masks = stats->hw_masks;
7920 int cnt;
7921
7922 cnt = sizeof(struct rx_port_stats) / 8;
7923 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
7924
7925 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7926 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7927 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7928 cnt = sizeof(struct tx_port_stats) / 8;
7929 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
7930 }
7931 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
7932 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
7933 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
7934 }
7935 }
7936
bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)7937 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
7938 {
7939 struct bnxt_pf_info *pf = &bp->pf;
7940 struct hwrm_port_qstats_input req = {0};
7941
7942 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7943 return 0;
7944
7945 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7946 return -EOPNOTSUPP;
7947
7948 req.flags = flags;
7949 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7950 req.port_id = cpu_to_le16(pf->port_id);
7951 req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
7952 BNXT_TX_PORT_STATS_BYTE_OFFSET);
7953 req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
7954 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7955 }
7956
bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)7957 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
7958 {
7959 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7960 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
7961 struct hwrm_port_qstats_ext_input req = {0};
7962 struct bnxt_pf_info *pf = &bp->pf;
7963 u32 tx_stat_size;
7964 int rc;
7965
7966 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7967 return 0;
7968
7969 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7970 return -EOPNOTSUPP;
7971
7972 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7973 req.flags = flags;
7974 req.port_id = cpu_to_le16(pf->port_id);
7975 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7976 req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
7977 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
7978 sizeof(struct tx_port_stats_ext) : 0;
7979 req.tx_stat_size = cpu_to_le16(tx_stat_size);
7980 req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
7981 mutex_lock(&bp->hwrm_cmd_lock);
7982 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7983 if (!rc) {
7984 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7985 bp->fw_tx_stats_ext_size = tx_stat_size ?
7986 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
7987 } else {
7988 bp->fw_rx_stats_ext_size = 0;
7989 bp->fw_tx_stats_ext_size = 0;
7990 }
7991 if (flags)
7992 goto qstats_done;
7993
7994 if (bp->fw_tx_stats_ext_size <=
7995 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7996 mutex_unlock(&bp->hwrm_cmd_lock);
7997 bp->pri2cos_valid = 0;
7998 return rc;
7999 }
8000
8001 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
8002 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8003
8004 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
8005 if (!rc) {
8006 struct hwrm_queue_pri2cos_qcfg_output *resp2;
8007 u8 *pri2cos;
8008 int i, j;
8009
8010 resp2 = bp->hwrm_cmd_resp_addr;
8011 pri2cos = &resp2->pri0_cos_queue_id;
8012 for (i = 0; i < 8; i++) {
8013 u8 queue_id = pri2cos[i];
8014 u8 queue_idx;
8015
8016 /* Per port queue IDs start from 0, 10, 20, etc */
8017 queue_idx = queue_id % 10;
8018 if (queue_idx > BNXT_MAX_QUEUE) {
8019 bp->pri2cos_valid = false;
8020 goto qstats_done;
8021 }
8022 for (j = 0; j < bp->max_q; j++) {
8023 if (bp->q_ids[j] == queue_id)
8024 bp->pri2cos_idx[i] = queue_idx;
8025 }
8026 }
8027 bp->pri2cos_valid = 1;
8028 }
8029 qstats_done:
8030 mutex_unlock(&bp->hwrm_cmd_lock);
8031 return rc;
8032 }
8033
bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)8034 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8035 {
8036 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
8037 bnxt_hwrm_tunnel_dst_port_free(
8038 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8039 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
8040 bnxt_hwrm_tunnel_dst_port_free(
8041 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8042 }
8043
bnxt_set_tpa(struct bnxt *bp, bool set_tpa)8044 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8045 {
8046 int rc, i;
8047 u32 tpa_flags = 0;
8048
8049 if (set_tpa)
8050 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8051 else if (BNXT_NO_FW_ACCESS(bp))
8052 return 0;
8053 for (i = 0; i < bp->nr_vnics; i++) {
8054 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8055 if (rc) {
8056 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8057 i, rc);
8058 return rc;
8059 }
8060 }
8061 return 0;
8062 }
8063
bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)8064 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8065 {
8066 int i;
8067
8068 for (i = 0; i < bp->nr_vnics; i++)
8069 bnxt_hwrm_vnic_set_rss(bp, i, false);
8070 }
8071
bnxt_clear_vnic(struct bnxt *bp)8072 static void bnxt_clear_vnic(struct bnxt *bp)
8073 {
8074 if (!bp->vnic_info)
8075 return;
8076
8077 bnxt_hwrm_clear_vnic_filter(bp);
8078 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8079 /* clear all RSS setting before free vnic ctx */
8080 bnxt_hwrm_clear_vnic_rss(bp);
8081 bnxt_hwrm_vnic_ctx_free(bp);
8082 }
8083 /* before free the vnic, undo the vnic tpa settings */
8084 if (bp->flags & BNXT_FLAG_TPA)
8085 bnxt_set_tpa(bp, false);
8086 bnxt_hwrm_vnic_free(bp);
8087 if (bp->flags & BNXT_FLAG_CHIP_P5)
8088 bnxt_hwrm_vnic_ctx_free(bp);
8089 }
8090
bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, bool irq_re_init)8091 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8092 bool irq_re_init)
8093 {
8094 bnxt_clear_vnic(bp);
8095 bnxt_hwrm_ring_free(bp, close_path);
8096 bnxt_hwrm_ring_grp_free(bp);
8097 if (irq_re_init) {
8098 bnxt_hwrm_stat_ctx_free(bp);
8099 bnxt_hwrm_free_tunnel_ports(bp);
8100 }
8101 }
8102
bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)8103 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8104 {
8105 struct hwrm_func_cfg_input req = {0};
8106
8107 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8108 req.fid = cpu_to_le16(0xffff);
8109 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8110 if (br_mode == BRIDGE_MODE_VEB)
8111 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8112 else if (br_mode == BRIDGE_MODE_VEPA)
8113 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8114 else
8115 return -EINVAL;
8116 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8117 }
8118
bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)8119 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8120 {
8121 struct hwrm_func_cfg_input req = {0};
8122
8123 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8124 return 0;
8125
8126 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8127 req.fid = cpu_to_le16(0xffff);
8128 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8129 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8130 if (size == 128)
8131 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8132
8133 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8134 }
8135
__bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)8136 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8137 {
8138 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8139 int rc;
8140
8141 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8142 goto skip_rss_ctx;
8143
8144 /* allocate context for vnic */
8145 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8146 if (rc) {
8147 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8148 vnic_id, rc);
8149 goto vnic_setup_err;
8150 }
8151 bp->rsscos_nr_ctxs++;
8152
8153 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8154 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8155 if (rc) {
8156 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8157 vnic_id, rc);
8158 goto vnic_setup_err;
8159 }
8160 bp->rsscos_nr_ctxs++;
8161 }
8162
8163 skip_rss_ctx:
8164 /* configure default vnic, ring grp */
8165 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8166 if (rc) {
8167 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8168 vnic_id, rc);
8169 goto vnic_setup_err;
8170 }
8171
8172 /* Enable RSS hashing on vnic */
8173 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8174 if (rc) {
8175 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8176 vnic_id, rc);
8177 goto vnic_setup_err;
8178 }
8179
8180 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8181 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8182 if (rc) {
8183 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8184 vnic_id, rc);
8185 }
8186 }
8187
8188 vnic_setup_err:
8189 return rc;
8190 }
8191
__bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)8192 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8193 {
8194 int rc, i, nr_ctxs;
8195
8196 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8197 for (i = 0; i < nr_ctxs; i++) {
8198 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8199 if (rc) {
8200 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8201 vnic_id, i, rc);
8202 break;
8203 }
8204 bp->rsscos_nr_ctxs++;
8205 }
8206 if (i < nr_ctxs)
8207 return -ENOMEM;
8208
8209 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8210 if (rc) {
8211 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8212 vnic_id, rc);
8213 return rc;
8214 }
8215 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8216 if (rc) {
8217 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8218 vnic_id, rc);
8219 return rc;
8220 }
8221 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8222 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8223 if (rc) {
8224 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8225 vnic_id, rc);
8226 }
8227 }
8228 return rc;
8229 }
8230
bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)8231 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8232 {
8233 if (bp->flags & BNXT_FLAG_CHIP_P5)
8234 return __bnxt_setup_vnic_p5(bp, vnic_id);
8235 else
8236 return __bnxt_setup_vnic(bp, vnic_id);
8237 }
8238
bnxt_alloc_rfs_vnics(struct bnxt *bp)8239 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8240 {
8241 #ifdef CONFIG_RFS_ACCEL
8242 int i, rc = 0;
8243
8244 if (bp->flags & BNXT_FLAG_CHIP_P5)
8245 return 0;
8246
8247 for (i = 0; i < bp->rx_nr_rings; i++) {
8248 struct bnxt_vnic_info *vnic;
8249 u16 vnic_id = i + 1;
8250 u16 ring_id = i;
8251
8252 if (vnic_id >= bp->nr_vnics)
8253 break;
8254
8255 vnic = &bp->vnic_info[vnic_id];
8256 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8257 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8258 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8259 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8260 if (rc) {
8261 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8262 vnic_id, rc);
8263 break;
8264 }
8265 rc = bnxt_setup_vnic(bp, vnic_id);
8266 if (rc)
8267 break;
8268 }
8269 return rc;
8270 #else
8271 return 0;
8272 #endif
8273 }
8274
8275 /* Allow PF and VF with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt *bp)8276 static bool bnxt_promisc_ok(struct bnxt *bp)
8277 {
8278 #ifdef CONFIG_BNXT_SRIOV
8279 if (BNXT_VF(bp) && !bp->vf.vlan)
8280 return false;
8281 #endif
8282 return true;
8283 }
8284
bnxt_setup_nitroa0_vnic(struct bnxt *bp)8285 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8286 {
8287 unsigned int rc = 0;
8288
8289 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8290 if (rc) {
8291 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8292 rc);
8293 return rc;
8294 }
8295
8296 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8297 if (rc) {
8298 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8299 rc);
8300 return rc;
8301 }
8302 return rc;
8303 }
8304
8305 static int bnxt_cfg_rx_mode(struct bnxt *);
8306 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8307
bnxt_init_chip(struct bnxt *bp, bool irq_re_init)8308 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8309 {
8310 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8311 int rc = 0;
8312 unsigned int rx_nr_rings = bp->rx_nr_rings;
8313
8314 if (irq_re_init) {
8315 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8316 if (rc) {
8317 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8318 rc);
8319 goto err_out;
8320 }
8321 }
8322
8323 rc = bnxt_hwrm_ring_alloc(bp);
8324 if (rc) {
8325 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8326 goto err_out;
8327 }
8328
8329 rc = bnxt_hwrm_ring_grp_alloc(bp);
8330 if (rc) {
8331 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8332 goto err_out;
8333 }
8334
8335 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8336 rx_nr_rings--;
8337
8338 /* default vnic 0 */
8339 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8340 if (rc) {
8341 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8342 goto err_out;
8343 }
8344
8345 if (BNXT_VF(bp))
8346 bnxt_hwrm_func_qcfg(bp);
8347
8348 rc = bnxt_setup_vnic(bp, 0);
8349 if (rc)
8350 goto err_out;
8351
8352 if (bp->flags & BNXT_FLAG_RFS) {
8353 rc = bnxt_alloc_rfs_vnics(bp);
8354 if (rc)
8355 goto err_out;
8356 }
8357
8358 if (bp->flags & BNXT_FLAG_TPA) {
8359 rc = bnxt_set_tpa(bp, true);
8360 if (rc)
8361 goto err_out;
8362 }
8363
8364 if (BNXT_VF(bp))
8365 bnxt_update_vf_mac(bp);
8366
8367 /* Filter for default vnic 0 */
8368 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8369 if (rc) {
8370 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8371 goto err_out;
8372 }
8373 vnic->uc_filter_count = 1;
8374
8375 vnic->rx_mask = 0;
8376 if (bp->dev->flags & IFF_BROADCAST)
8377 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8378
8379 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
8380 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8381
8382 if (bp->dev->flags & IFF_ALLMULTI) {
8383 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8384 vnic->mc_list_count = 0;
8385 } else {
8386 u32 mask = 0;
8387
8388 bnxt_mc_list_updated(bp, &mask);
8389 vnic->rx_mask |= mask;
8390 }
8391
8392 rc = bnxt_cfg_rx_mode(bp);
8393 if (rc)
8394 goto err_out;
8395
8396 rc = bnxt_hwrm_set_coal(bp);
8397 if (rc)
8398 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8399 rc);
8400
8401 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8402 rc = bnxt_setup_nitroa0_vnic(bp);
8403 if (rc)
8404 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8405 rc);
8406 }
8407
8408 if (BNXT_VF(bp)) {
8409 bnxt_hwrm_func_qcfg(bp);
8410 netdev_update_features(bp->dev);
8411 }
8412
8413 return 0;
8414
8415 err_out:
8416 bnxt_hwrm_resource_free(bp, 0, true);
8417
8418 return rc;
8419 }
8420
bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)8421 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8422 {
8423 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8424 return 0;
8425 }
8426
bnxt_init_nic(struct bnxt *bp, bool irq_re_init)8427 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8428 {
8429 bnxt_init_cp_rings(bp);
8430 bnxt_init_rx_rings(bp);
8431 bnxt_init_tx_rings(bp);
8432 bnxt_init_ring_grps(bp, irq_re_init);
8433 bnxt_init_vnics(bp);
8434
8435 return bnxt_init_chip(bp, irq_re_init);
8436 }
8437
bnxt_set_real_num_queues(struct bnxt *bp)8438 static int bnxt_set_real_num_queues(struct bnxt *bp)
8439 {
8440 int rc;
8441 struct net_device *dev = bp->dev;
8442
8443 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8444 bp->tx_nr_rings_xdp);
8445 if (rc)
8446 return rc;
8447
8448 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8449 if (rc)
8450 return rc;
8451
8452 #ifdef CONFIG_RFS_ACCEL
8453 if (bp->flags & BNXT_FLAG_RFS)
8454 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8455 #endif
8456
8457 return rc;
8458 }
8459
bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, bool shared)8460 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8461 bool shared)
8462 {
8463 int _rx = *rx, _tx = *tx;
8464
8465 if (shared) {
8466 *rx = min_t(int, _rx, max);
8467 *tx = min_t(int, _tx, max);
8468 } else {
8469 if (max < 2)
8470 return -ENOMEM;
8471
8472 while (_rx + _tx > max) {
8473 if (_rx > _tx && _rx > 1)
8474 _rx--;
8475 else if (_tx > 1)
8476 _tx--;
8477 }
8478 *rx = _rx;
8479 *tx = _tx;
8480 }
8481 return 0;
8482 }
8483
bnxt_setup_msix(struct bnxt *bp)8484 static void bnxt_setup_msix(struct bnxt *bp)
8485 {
8486 const int len = sizeof(bp->irq_tbl[0].name);
8487 struct net_device *dev = bp->dev;
8488 int tcs, i;
8489
8490 tcs = netdev_get_num_tc(dev);
8491 if (tcs) {
8492 int i, off, count;
8493
8494 for (i = 0; i < tcs; i++) {
8495 count = bp->tx_nr_rings_per_tc;
8496 off = i * count;
8497 netdev_set_tc_queue(dev, i, count, off);
8498 }
8499 }
8500
8501 for (i = 0; i < bp->cp_nr_rings; i++) {
8502 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8503 char *attr;
8504
8505 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8506 attr = "TxRx";
8507 else if (i < bp->rx_nr_rings)
8508 attr = "rx";
8509 else
8510 attr = "tx";
8511
8512 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8513 attr, i);
8514 bp->irq_tbl[map_idx].handler = bnxt_msix;
8515 }
8516 }
8517
bnxt_setup_inta(struct bnxt *bp)8518 static void bnxt_setup_inta(struct bnxt *bp)
8519 {
8520 const int len = sizeof(bp->irq_tbl[0].name);
8521
8522 if (netdev_get_num_tc(bp->dev))
8523 netdev_reset_tc(bp->dev);
8524
8525 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8526 0);
8527 bp->irq_tbl[0].handler = bnxt_inta;
8528 }
8529
8530 static int bnxt_init_int_mode(struct bnxt *bp);
8531
bnxt_setup_int_mode(struct bnxt *bp)8532 static int bnxt_setup_int_mode(struct bnxt *bp)
8533 {
8534 int rc;
8535
8536 if (!bp->irq_tbl) {
8537 rc = bnxt_init_int_mode(bp);
8538 if (rc || !bp->irq_tbl)
8539 return rc ?: -ENODEV;
8540 }
8541
8542 if (bp->flags & BNXT_FLAG_USING_MSIX)
8543 bnxt_setup_msix(bp);
8544 else
8545 bnxt_setup_inta(bp);
8546
8547 rc = bnxt_set_real_num_queues(bp);
8548 return rc;
8549 }
8550
8551 #ifdef CONFIG_RFS_ACCEL
bnxt_get_max_func_rss_ctxs(struct bnxt *bp)8552 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8553 {
8554 return bp->hw_resc.max_rsscos_ctxs;
8555 }
8556
bnxt_get_max_func_vnics(struct bnxt *bp)8557 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8558 {
8559 return bp->hw_resc.max_vnics;
8560 }
8561 #endif
8562
bnxt_get_max_func_stat_ctxs(struct bnxt *bp)8563 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8564 {
8565 return bp->hw_resc.max_stat_ctxs;
8566 }
8567
bnxt_get_max_func_cp_rings(struct bnxt *bp)8568 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8569 {
8570 return bp->hw_resc.max_cp_rings;
8571 }
8572
bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)8573 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8574 {
8575 unsigned int cp = bp->hw_resc.max_cp_rings;
8576
8577 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8578 cp -= bnxt_get_ulp_msix_num(bp);
8579
8580 return cp;
8581 }
8582
bnxt_get_max_func_irqs(struct bnxt *bp)8583 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8584 {
8585 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8586
8587 if (bp->flags & BNXT_FLAG_CHIP_P5)
8588 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8589
8590 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8591 }
8592
bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)8593 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8594 {
8595 bp->hw_resc.max_irqs = max_irqs;
8596 }
8597
bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)8598 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8599 {
8600 unsigned int cp;
8601
8602 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8603 if (bp->flags & BNXT_FLAG_CHIP_P5)
8604 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8605 else
8606 return cp - bp->cp_nr_rings;
8607 }
8608
bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)8609 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8610 {
8611 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8612 }
8613
bnxt_get_avail_msix(struct bnxt *bp, int num)8614 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8615 {
8616 int max_cp = bnxt_get_max_func_cp_rings(bp);
8617 int max_irq = bnxt_get_max_func_irqs(bp);
8618 int total_req = bp->cp_nr_rings + num;
8619 int max_idx, avail_msix;
8620
8621 max_idx = bp->total_irqs;
8622 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8623 max_idx = min_t(int, bp->total_irqs, max_cp);
8624 avail_msix = max_idx - bp->cp_nr_rings;
8625 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8626 return avail_msix;
8627
8628 if (max_irq < total_req) {
8629 num = max_irq - bp->cp_nr_rings;
8630 if (num <= 0)
8631 return 0;
8632 }
8633 return num;
8634 }
8635
bnxt_get_num_msix(struct bnxt *bp)8636 static int bnxt_get_num_msix(struct bnxt *bp)
8637 {
8638 if (!BNXT_NEW_RM(bp))
8639 return bnxt_get_max_func_irqs(bp);
8640
8641 return bnxt_nq_rings_in_use(bp);
8642 }
8643
bnxt_init_msix(struct bnxt *bp)8644 static int bnxt_init_msix(struct bnxt *bp)
8645 {
8646 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8647 struct msix_entry *msix_ent;
8648
8649 total_vecs = bnxt_get_num_msix(bp);
8650 max = bnxt_get_max_func_irqs(bp);
8651 if (total_vecs > max)
8652 total_vecs = max;
8653
8654 if (!total_vecs)
8655 return 0;
8656
8657 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8658 if (!msix_ent)
8659 return -ENOMEM;
8660
8661 for (i = 0; i < total_vecs; i++) {
8662 msix_ent[i].entry = i;
8663 msix_ent[i].vector = 0;
8664 }
8665
8666 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8667 min = 2;
8668
8669 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8670 ulp_msix = bnxt_get_ulp_msix_num(bp);
8671 if (total_vecs < 0 || total_vecs < ulp_msix) {
8672 rc = -ENODEV;
8673 goto msix_setup_exit;
8674 }
8675
8676 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8677 if (bp->irq_tbl) {
8678 for (i = 0; i < total_vecs; i++)
8679 bp->irq_tbl[i].vector = msix_ent[i].vector;
8680
8681 bp->total_irqs = total_vecs;
8682 /* Trim rings based upon num of vectors allocated */
8683 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8684 total_vecs - ulp_msix, min == 1);
8685 if (rc)
8686 goto msix_setup_exit;
8687
8688 bp->cp_nr_rings = (min == 1) ?
8689 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8690 bp->tx_nr_rings + bp->rx_nr_rings;
8691
8692 } else {
8693 rc = -ENOMEM;
8694 goto msix_setup_exit;
8695 }
8696 bp->flags |= BNXT_FLAG_USING_MSIX;
8697 kfree(msix_ent);
8698 return 0;
8699
8700 msix_setup_exit:
8701 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8702 kfree(bp->irq_tbl);
8703 bp->irq_tbl = NULL;
8704 pci_disable_msix(bp->pdev);
8705 kfree(msix_ent);
8706 return rc;
8707 }
8708
bnxt_init_inta(struct bnxt *bp)8709 static int bnxt_init_inta(struct bnxt *bp)
8710 {
8711 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
8712 if (!bp->irq_tbl)
8713 return -ENOMEM;
8714
8715 bp->total_irqs = 1;
8716 bp->rx_nr_rings = 1;
8717 bp->tx_nr_rings = 1;
8718 bp->cp_nr_rings = 1;
8719 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8720 bp->irq_tbl[0].vector = bp->pdev->irq;
8721 return 0;
8722 }
8723
bnxt_init_int_mode(struct bnxt *bp)8724 static int bnxt_init_int_mode(struct bnxt *bp)
8725 {
8726 int rc = -ENODEV;
8727
8728 if (bp->flags & BNXT_FLAG_MSIX_CAP)
8729 rc = bnxt_init_msix(bp);
8730
8731 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8732 /* fallback to INTA */
8733 rc = bnxt_init_inta(bp);
8734 }
8735 return rc;
8736 }
8737
bnxt_clear_int_mode(struct bnxt *bp)8738 static void bnxt_clear_int_mode(struct bnxt *bp)
8739 {
8740 if (bp->flags & BNXT_FLAG_USING_MSIX)
8741 pci_disable_msix(bp->pdev);
8742
8743 kfree(bp->irq_tbl);
8744 bp->irq_tbl = NULL;
8745 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8746 }
8747
bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)8748 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8749 {
8750 int tcs = netdev_get_num_tc(bp->dev);
8751 bool irq_cleared = false;
8752 int rc;
8753
8754 if (!bnxt_need_reserve_rings(bp))
8755 return 0;
8756
8757 if (irq_re_init && BNXT_NEW_RM(bp) &&
8758 bnxt_get_num_msix(bp) != bp->total_irqs) {
8759 bnxt_ulp_irq_stop(bp);
8760 bnxt_clear_int_mode(bp);
8761 irq_cleared = true;
8762 }
8763 rc = __bnxt_reserve_rings(bp);
8764 if (irq_cleared) {
8765 if (!rc)
8766 rc = bnxt_init_int_mode(bp);
8767 bnxt_ulp_irq_restart(bp, rc);
8768 }
8769 if (rc) {
8770 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8771 return rc;
8772 }
8773 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
8774 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
8775 netdev_err(bp->dev, "tx ring reservation failure\n");
8776 netdev_reset_tc(bp->dev);
8777 if (bp->tx_nr_rings_xdp)
8778 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
8779 else
8780 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8781 return -ENOMEM;
8782 }
8783 return 0;
8784 }
8785
bnxt_free_irq(struct bnxt *bp)8786 static void bnxt_free_irq(struct bnxt *bp)
8787 {
8788 struct bnxt_irq *irq;
8789 int i;
8790
8791 #ifdef CONFIG_RFS_ACCEL
8792 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8793 bp->dev->rx_cpu_rmap = NULL;
8794 #endif
8795 if (!bp->irq_tbl || !bp->bnapi)
8796 return;
8797
8798 for (i = 0; i < bp->cp_nr_rings; i++) {
8799 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8800
8801 irq = &bp->irq_tbl[map_idx];
8802 if (irq->requested) {
8803 if (irq->have_cpumask) {
8804 irq_set_affinity_hint(irq->vector, NULL);
8805 free_cpumask_var(irq->cpu_mask);
8806 irq->have_cpumask = 0;
8807 }
8808 free_irq(irq->vector, bp->bnapi[i]);
8809 }
8810
8811 irq->requested = 0;
8812 }
8813 }
8814
bnxt_request_irq(struct bnxt *bp)8815 static int bnxt_request_irq(struct bnxt *bp)
8816 {
8817 int i, j, rc = 0;
8818 unsigned long flags = 0;
8819 #ifdef CONFIG_RFS_ACCEL
8820 struct cpu_rmap *rmap;
8821 #endif
8822
8823 rc = bnxt_setup_int_mode(bp);
8824 if (rc) {
8825 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8826 rc);
8827 return rc;
8828 }
8829 #ifdef CONFIG_RFS_ACCEL
8830 rmap = bp->dev->rx_cpu_rmap;
8831 #endif
8832 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8833 flags = IRQF_SHARED;
8834
8835 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8836 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8837 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8838
8839 #ifdef CONFIG_RFS_ACCEL
8840 if (rmap && bp->bnapi[i]->rx_ring) {
8841 rc = irq_cpu_rmap_add(rmap, irq->vector);
8842 if (rc)
8843 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8844 j);
8845 j++;
8846 }
8847 #endif
8848 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8849 bp->bnapi[i]);
8850 if (rc)
8851 break;
8852
8853 irq->requested = 1;
8854
8855 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8856 int numa_node = dev_to_node(&bp->pdev->dev);
8857
8858 irq->have_cpumask = 1;
8859 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8860 irq->cpu_mask);
8861 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8862 if (rc) {
8863 netdev_warn(bp->dev,
8864 "Set affinity failed, IRQ = %d\n",
8865 irq->vector);
8866 break;
8867 }
8868 }
8869 }
8870 return rc;
8871 }
8872
bnxt_del_napi(struct bnxt *bp)8873 static void bnxt_del_napi(struct bnxt *bp)
8874 {
8875 int i;
8876
8877 if (!bp->bnapi)
8878 return;
8879
8880 for (i = 0; i < bp->cp_nr_rings; i++) {
8881 struct bnxt_napi *bnapi = bp->bnapi[i];
8882
8883 __netif_napi_del(&bnapi->napi);
8884 }
8885 /* We called __netif_napi_del(), we need
8886 * to respect an RCU grace period before freeing napi structures.
8887 */
8888 synchronize_net();
8889 }
8890
bnxt_init_napi(struct bnxt *bp)8891 static void bnxt_init_napi(struct bnxt *bp)
8892 {
8893 int i;
8894 unsigned int cp_nr_rings = bp->cp_nr_rings;
8895 struct bnxt_napi *bnapi;
8896
8897 if (bp->flags & BNXT_FLAG_USING_MSIX) {
8898 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8899
8900 if (bp->flags & BNXT_FLAG_CHIP_P5)
8901 poll_fn = bnxt_poll_p5;
8902 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8903 cp_nr_rings--;
8904 for (i = 0; i < cp_nr_rings; i++) {
8905 bnapi = bp->bnapi[i];
8906 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8907 }
8908 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8909 bnapi = bp->bnapi[cp_nr_rings];
8910 netif_napi_add(bp->dev, &bnapi->napi,
8911 bnxt_poll_nitroa0, 64);
8912 }
8913 } else {
8914 bnapi = bp->bnapi[0];
8915 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8916 }
8917 }
8918
bnxt_disable_napi(struct bnxt *bp)8919 static void bnxt_disable_napi(struct bnxt *bp)
8920 {
8921 int i;
8922
8923 if (!bp->bnapi)
8924 return;
8925
8926 for (i = 0; i < bp->cp_nr_rings; i++) {
8927 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8928
8929 napi_disable(&bp->bnapi[i]->napi);
8930 if (bp->bnapi[i]->rx_ring)
8931 cancel_work_sync(&cpr->dim.work);
8932 }
8933 }
8934
bnxt_enable_napi(struct bnxt *bp)8935 static void bnxt_enable_napi(struct bnxt *bp)
8936 {
8937 int i;
8938
8939 for (i = 0; i < bp->cp_nr_rings; i++) {
8940 struct bnxt_napi *bnapi = bp->bnapi[i];
8941 struct bnxt_cp_ring_info *cpr;
8942
8943 cpr = &bnapi->cp_ring;
8944 if (bnapi->in_reset)
8945 cpr->sw_stats.rx.rx_resets++;
8946 bnapi->in_reset = false;
8947
8948 if (bnapi->rx_ring) {
8949 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
8950 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
8951 }
8952 napi_enable(&bnapi->napi);
8953 }
8954 }
8955
bnxt_tx_disable(struct bnxt *bp)8956 void bnxt_tx_disable(struct bnxt *bp)
8957 {
8958 int i;
8959 struct bnxt_tx_ring_info *txr;
8960
8961 if (bp->tx_ring) {
8962 for (i = 0; i < bp->tx_nr_rings; i++) {
8963 txr = &bp->tx_ring[i];
8964 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
8965 }
8966 }
8967 /* Make sure napi polls see @dev_state change */
8968 synchronize_net();
8969 /* Drop carrier first to prevent TX timeout */
8970 netif_carrier_off(bp->dev);
8971 /* Stop all TX queues */
8972 netif_tx_disable(bp->dev);
8973 }
8974
bnxt_tx_enable(struct bnxt *bp)8975 void bnxt_tx_enable(struct bnxt *bp)
8976 {
8977 int i;
8978 struct bnxt_tx_ring_info *txr;
8979
8980 for (i = 0; i < bp->tx_nr_rings; i++) {
8981 txr = &bp->tx_ring[i];
8982 WRITE_ONCE(txr->dev_state, 0);
8983 }
8984 /* Make sure napi polls see @dev_state change */
8985 synchronize_net();
8986 netif_tx_wake_all_queues(bp->dev);
8987 if (bp->link_info.link_up)
8988 netif_carrier_on(bp->dev);
8989 }
8990
bnxt_report_fec(struct bnxt_link_info *link_info)8991 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
8992 {
8993 u8 active_fec = link_info->active_fec_sig_mode &
8994 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
8995
8996 switch (active_fec) {
8997 default:
8998 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
8999 return "None";
9000 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9001 return "Clause 74 BaseR";
9002 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9003 return "Clause 91 RS(528,514)";
9004 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9005 return "Clause 91 RS544_1XN";
9006 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9007 return "Clause 91 RS(544,514)";
9008 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9009 return "Clause 91 RS272_1XN";
9010 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9011 return "Clause 91 RS(272,257)";
9012 }
9013 }
9014
bnxt_report_link(struct bnxt *bp)9015 static void bnxt_report_link(struct bnxt *bp)
9016 {
9017 if (bp->link_info.link_up) {
9018 const char *duplex;
9019 const char *flow_ctrl;
9020 u32 speed;
9021 u16 fec;
9022
9023 netif_carrier_on(bp->dev);
9024 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9025 if (speed == SPEED_UNKNOWN) {
9026 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9027 return;
9028 }
9029 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9030 duplex = "full";
9031 else
9032 duplex = "half";
9033 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9034 flow_ctrl = "ON - receive & transmit";
9035 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9036 flow_ctrl = "ON - transmit";
9037 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9038 flow_ctrl = "ON - receive";
9039 else
9040 flow_ctrl = "none";
9041 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
9042 speed, duplex, flow_ctrl);
9043 if (bp->flags & BNXT_FLAG_EEE_CAP)
9044 netdev_info(bp->dev, "EEE is %s\n",
9045 bp->eee.eee_active ? "active" :
9046 "not active");
9047 fec = bp->link_info.fec_cfg;
9048 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9049 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9050 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9051 bnxt_report_fec(&bp->link_info));
9052 } else {
9053 netif_carrier_off(bp->dev);
9054 netdev_err(bp->dev, "NIC Link is Down\n");
9055 }
9056 }
9057
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)9058 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9059 {
9060 if (!resp->supported_speeds_auto_mode &&
9061 !resp->supported_speeds_force_mode &&
9062 !resp->supported_pam4_speeds_auto_mode &&
9063 !resp->supported_pam4_speeds_force_mode)
9064 return true;
9065 return false;
9066 }
9067
bnxt_hwrm_phy_qcaps(struct bnxt *bp)9068 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9069 {
9070 int rc = 0;
9071 struct hwrm_port_phy_qcaps_input req = {0};
9072 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9073 struct bnxt_link_info *link_info = &bp->link_info;
9074
9075 bp->flags &= ~BNXT_FLAG_EEE_CAP;
9076 if (bp->test_info)
9077 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
9078 BNXT_TEST_FL_AN_PHY_LPBK);
9079 if (bp->hwrm_spec_code < 0x10201)
9080 return 0;
9081
9082 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
9083
9084 mutex_lock(&bp->hwrm_cmd_lock);
9085 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9086 if (rc)
9087 goto hwrm_phy_qcaps_exit;
9088
9089 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9090 struct ethtool_eee *eee = &bp->eee;
9091 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9092
9093 bp->flags |= BNXT_FLAG_EEE_CAP;
9094 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9095 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9096 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9097 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9098 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9099 }
9100 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
9101 if (bp->test_info)
9102 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
9103 }
9104 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
9105 if (bp->test_info)
9106 bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
9107 }
9108 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
9109 if (BNXT_PF(bp))
9110 bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
9111 }
9112 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
9113 bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
9114
9115 if (bp->hwrm_spec_code >= 0x10a01) {
9116 if (bnxt_phy_qcaps_no_speed(resp)) {
9117 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9118 netdev_warn(bp->dev, "Ethernet link disabled\n");
9119 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9120 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9121 netdev_info(bp->dev, "Ethernet link enabled\n");
9122 /* Phy re-enabled, reprobe the speeds */
9123 link_info->support_auto_speeds = 0;
9124 link_info->support_pam4_auto_speeds = 0;
9125 }
9126 }
9127 if (resp->supported_speeds_auto_mode)
9128 link_info->support_auto_speeds =
9129 le16_to_cpu(resp->supported_speeds_auto_mode);
9130 if (resp->supported_pam4_speeds_auto_mode)
9131 link_info->support_pam4_auto_speeds =
9132 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9133
9134 bp->port_count = resp->port_cnt;
9135
9136 hwrm_phy_qcaps_exit:
9137 mutex_unlock(&bp->hwrm_cmd_lock);
9138 return rc;
9139 }
9140
bnxt_support_dropped(u16 advertising, u16 supported)9141 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9142 {
9143 u16 diff = advertising ^ supported;
9144
9145 return ((supported | diff) != supported);
9146 }
9147
bnxt_update_link(struct bnxt *bp, bool chng_link_state)9148 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9149 {
9150 int rc = 0;
9151 struct bnxt_link_info *link_info = &bp->link_info;
9152 struct hwrm_port_phy_qcfg_input req = {0};
9153 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9154 u8 link_up = link_info->link_up;
9155 bool support_changed = false;
9156
9157 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9158
9159 mutex_lock(&bp->hwrm_cmd_lock);
9160 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9161 if (rc) {
9162 mutex_unlock(&bp->hwrm_cmd_lock);
9163 return rc;
9164 }
9165
9166 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9167 link_info->phy_link_status = resp->link;
9168 link_info->duplex = resp->duplex_cfg;
9169 if (bp->hwrm_spec_code >= 0x10800)
9170 link_info->duplex = resp->duplex_state;
9171 link_info->pause = resp->pause;
9172 link_info->auto_mode = resp->auto_mode;
9173 link_info->auto_pause_setting = resp->auto_pause;
9174 link_info->lp_pause = resp->link_partner_adv_pause;
9175 link_info->force_pause_setting = resp->force_pause;
9176 link_info->duplex_setting = resp->duplex_cfg;
9177 if (link_info->phy_link_status == BNXT_LINK_LINK)
9178 link_info->link_speed = le16_to_cpu(resp->link_speed);
9179 else
9180 link_info->link_speed = 0;
9181 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9182 link_info->force_pam4_link_speed =
9183 le16_to_cpu(resp->force_pam4_link_speed);
9184 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9185 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9186 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9187 link_info->auto_pam4_link_speeds =
9188 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9189 link_info->lp_auto_link_speeds =
9190 le16_to_cpu(resp->link_partner_adv_speeds);
9191 link_info->lp_auto_pam4_link_speeds =
9192 resp->link_partner_pam4_adv_speeds;
9193 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9194 link_info->phy_ver[0] = resp->phy_maj;
9195 link_info->phy_ver[1] = resp->phy_min;
9196 link_info->phy_ver[2] = resp->phy_bld;
9197 link_info->media_type = resp->media_type;
9198 link_info->phy_type = resp->phy_type;
9199 link_info->transceiver = resp->xcvr_pkg_type;
9200 link_info->phy_addr = resp->eee_config_phy_addr &
9201 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9202 link_info->module_status = resp->module_status;
9203
9204 if (bp->flags & BNXT_FLAG_EEE_CAP) {
9205 struct ethtool_eee *eee = &bp->eee;
9206 u16 fw_speeds;
9207
9208 eee->eee_active = 0;
9209 if (resp->eee_config_phy_addr &
9210 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9211 eee->eee_active = 1;
9212 fw_speeds = le16_to_cpu(
9213 resp->link_partner_adv_eee_link_speed_mask);
9214 eee->lp_advertised =
9215 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9216 }
9217
9218 /* Pull initial EEE config */
9219 if (!chng_link_state) {
9220 if (resp->eee_config_phy_addr &
9221 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9222 eee->eee_enabled = 1;
9223
9224 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9225 eee->advertised =
9226 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9227
9228 if (resp->eee_config_phy_addr &
9229 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9230 __le32 tmr;
9231
9232 eee->tx_lpi_enabled = 1;
9233 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9234 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9235 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9236 }
9237 }
9238 }
9239
9240 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9241 if (bp->hwrm_spec_code >= 0x10504) {
9242 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9243 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9244 }
9245 /* TODO: need to add more logic to report VF link */
9246 if (chng_link_state) {
9247 if (link_info->phy_link_status == BNXT_LINK_LINK)
9248 link_info->link_up = 1;
9249 else
9250 link_info->link_up = 0;
9251 if (link_up != link_info->link_up)
9252 bnxt_report_link(bp);
9253 } else {
9254 /* alwasy link down if not require to update link state */
9255 link_info->link_up = 0;
9256 }
9257 mutex_unlock(&bp->hwrm_cmd_lock);
9258
9259 if (!BNXT_PHY_CFG_ABLE(bp))
9260 return 0;
9261
9262 /* Check if any advertised speeds are no longer supported. The caller
9263 * holds the link_lock mutex, so we can modify link_info settings.
9264 */
9265 if (bnxt_support_dropped(link_info->advertising,
9266 link_info->support_auto_speeds)) {
9267 link_info->advertising = link_info->support_auto_speeds;
9268 support_changed = true;
9269 }
9270 if (bnxt_support_dropped(link_info->advertising_pam4,
9271 link_info->support_pam4_auto_speeds)) {
9272 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9273 support_changed = true;
9274 }
9275 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9276 bnxt_hwrm_set_link_setting(bp, true, false);
9277 return 0;
9278 }
9279
bnxt_get_port_module_status(struct bnxt *bp)9280 static void bnxt_get_port_module_status(struct bnxt *bp)
9281 {
9282 struct bnxt_link_info *link_info = &bp->link_info;
9283 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9284 u8 module_status;
9285
9286 if (bnxt_update_link(bp, true))
9287 return;
9288
9289 module_status = link_info->module_status;
9290 switch (module_status) {
9291 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9292 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9293 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9294 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9295 bp->pf.port_id);
9296 if (bp->hwrm_spec_code >= 0x10201) {
9297 netdev_warn(bp->dev, "Module part number %s\n",
9298 resp->phy_vendor_partnumber);
9299 }
9300 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9301 netdev_warn(bp->dev, "TX is disabled\n");
9302 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9303 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9304 }
9305 }
9306
9307 static void
bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)9308 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9309 {
9310 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9311 if (bp->hwrm_spec_code >= 0x10201)
9312 req->auto_pause =
9313 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9314 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9315 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9316 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9317 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9318 req->enables |=
9319 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9320 } else {
9321 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9322 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9323 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9324 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9325 req->enables |=
9326 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9327 if (bp->hwrm_spec_code >= 0x10201) {
9328 req->auto_pause = req->force_pause;
9329 req->enables |= cpu_to_le32(
9330 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9331 }
9332 }
9333 }
9334
bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)9335 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9336 {
9337 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9338 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9339 if (bp->link_info.advertising) {
9340 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9341 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9342 }
9343 if (bp->link_info.advertising_pam4) {
9344 req->enables |=
9345 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9346 req->auto_link_pam4_speed_mask =
9347 cpu_to_le16(bp->link_info.advertising_pam4);
9348 }
9349 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9350 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9351 } else {
9352 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9353 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9354 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9355 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9356 } else {
9357 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9358 }
9359 }
9360
9361 /* tell chimp that the setting takes effect immediately */
9362 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9363 }
9364
bnxt_hwrm_set_pause(struct bnxt *bp)9365 int bnxt_hwrm_set_pause(struct bnxt *bp)
9366 {
9367 struct hwrm_port_phy_cfg_input req = {0};
9368 int rc;
9369
9370 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9371 bnxt_hwrm_set_pause_common(bp, &req);
9372
9373 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9374 bp->link_info.force_link_chng)
9375 bnxt_hwrm_set_link_common(bp, &req);
9376
9377 mutex_lock(&bp->hwrm_cmd_lock);
9378 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9379 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9380 /* since changing of pause setting doesn't trigger any link
9381 * change event, the driver needs to update the current pause
9382 * result upon successfully return of the phy_cfg command
9383 */
9384 bp->link_info.pause =
9385 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9386 bp->link_info.auto_pause_setting = 0;
9387 if (!bp->link_info.force_link_chng)
9388 bnxt_report_link(bp);
9389 }
9390 bp->link_info.force_link_chng = false;
9391 mutex_unlock(&bp->hwrm_cmd_lock);
9392 return rc;
9393 }
9394
bnxt_hwrm_set_eee(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)9395 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9396 struct hwrm_port_phy_cfg_input *req)
9397 {
9398 struct ethtool_eee *eee = &bp->eee;
9399
9400 if (eee->eee_enabled) {
9401 u16 eee_speeds;
9402 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9403
9404 if (eee->tx_lpi_enabled)
9405 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9406 else
9407 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9408
9409 req->flags |= cpu_to_le32(flags);
9410 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9411 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9412 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9413 } else {
9414 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9415 }
9416 }
9417
bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)9418 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9419 {
9420 struct hwrm_port_phy_cfg_input req = {0};
9421
9422 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9423 if (set_pause)
9424 bnxt_hwrm_set_pause_common(bp, &req);
9425
9426 bnxt_hwrm_set_link_common(bp, &req);
9427
9428 if (set_eee)
9429 bnxt_hwrm_set_eee(bp, &req);
9430 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9431 }
9432
bnxt_hwrm_shutdown_link(struct bnxt *bp)9433 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9434 {
9435 struct hwrm_port_phy_cfg_input req = {0};
9436
9437 if (!BNXT_SINGLE_PF(bp))
9438 return 0;
9439
9440 if (pci_num_vf(bp->pdev))
9441 return 0;
9442
9443 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9444 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9445 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9446 }
9447
9448 static int bnxt_fw_init_one(struct bnxt *bp);
9449
bnxt_hwrm_if_change(struct bnxt *bp, bool up)9450 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9451 {
9452 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9453 struct hwrm_func_drv_if_change_input req = {0};
9454 bool fw_reset = !bp->irq_tbl;
9455 bool resc_reinit = false;
9456 u32 flags = 0;
9457 int rc;
9458
9459 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9460 return 0;
9461
9462 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9463 if (up)
9464 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9465 mutex_lock(&bp->hwrm_cmd_lock);
9466 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9467 if (!rc)
9468 flags = le32_to_cpu(resp->flags);
9469 mutex_unlock(&bp->hwrm_cmd_lock);
9470 if (rc)
9471 return rc;
9472
9473 if (!up)
9474 return 0;
9475
9476 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9477 resc_reinit = true;
9478 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9479 fw_reset = true;
9480
9481 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9482 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9483 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9484 return -ENODEV;
9485 }
9486 if (resc_reinit || fw_reset) {
9487 if (fw_reset) {
9488 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9489 bnxt_ulp_stop(bp);
9490 bnxt_free_ctx_mem(bp);
9491 kfree(bp->ctx);
9492 bp->ctx = NULL;
9493 bnxt_dcb_free(bp);
9494 rc = bnxt_fw_init_one(bp);
9495 if (rc) {
9496 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9497 return rc;
9498 }
9499 bnxt_clear_int_mode(bp);
9500 rc = bnxt_init_int_mode(bp);
9501 if (rc) {
9502 netdev_err(bp->dev, "init int mode failed\n");
9503 return rc;
9504 }
9505 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9506 }
9507 if (BNXT_NEW_RM(bp)) {
9508 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9509
9510 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9511 hw_resc->resv_cp_rings = 0;
9512 hw_resc->resv_stat_ctxs = 0;
9513 hw_resc->resv_irqs = 0;
9514 hw_resc->resv_tx_rings = 0;
9515 hw_resc->resv_rx_rings = 0;
9516 hw_resc->resv_hw_ring_grps = 0;
9517 hw_resc->resv_vnics = 0;
9518 if (!fw_reset) {
9519 bp->tx_nr_rings = 0;
9520 bp->rx_nr_rings = 0;
9521 }
9522 }
9523 }
9524 return 0;
9525 }
9526
bnxt_hwrm_port_led_qcaps(struct bnxt *bp)9527 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9528 {
9529 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9530 struct hwrm_port_led_qcaps_input req = {0};
9531 struct bnxt_pf_info *pf = &bp->pf;
9532 int rc;
9533
9534 bp->num_leds = 0;
9535 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9536 return 0;
9537
9538 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9539 req.port_id = cpu_to_le16(pf->port_id);
9540 mutex_lock(&bp->hwrm_cmd_lock);
9541 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9542 if (rc) {
9543 mutex_unlock(&bp->hwrm_cmd_lock);
9544 return rc;
9545 }
9546 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9547 int i;
9548
9549 bp->num_leds = resp->num_leds;
9550 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9551 bp->num_leds);
9552 for (i = 0; i < bp->num_leds; i++) {
9553 struct bnxt_led_info *led = &bp->leds[i];
9554 __le16 caps = led->led_state_caps;
9555
9556 if (!led->led_group_id ||
9557 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9558 bp->num_leds = 0;
9559 break;
9560 }
9561 }
9562 }
9563 mutex_unlock(&bp->hwrm_cmd_lock);
9564 return 0;
9565 }
9566
bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)9567 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9568 {
9569 struct hwrm_wol_filter_alloc_input req = {0};
9570 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9571 int rc;
9572
9573 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9574 req.port_id = cpu_to_le16(bp->pf.port_id);
9575 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9576 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9577 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9578 mutex_lock(&bp->hwrm_cmd_lock);
9579 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9580 if (!rc)
9581 bp->wol_filter_id = resp->wol_filter_id;
9582 mutex_unlock(&bp->hwrm_cmd_lock);
9583 return rc;
9584 }
9585
bnxt_hwrm_free_wol_fltr(struct bnxt *bp)9586 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9587 {
9588 struct hwrm_wol_filter_free_input req = {0};
9589
9590 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9591 req.port_id = cpu_to_le16(bp->pf.port_id);
9592 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9593 req.wol_filter_id = bp->wol_filter_id;
9594 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9595 }
9596
bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)9597 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9598 {
9599 struct hwrm_wol_filter_qcfg_input req = {0};
9600 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9601 u16 next_handle = 0;
9602 int rc;
9603
9604 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9605 req.port_id = cpu_to_le16(bp->pf.port_id);
9606 req.handle = cpu_to_le16(handle);
9607 mutex_lock(&bp->hwrm_cmd_lock);
9608 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9609 if (!rc) {
9610 next_handle = le16_to_cpu(resp->next_handle);
9611 if (next_handle != 0) {
9612 if (resp->wol_type ==
9613 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9614 bp->wol = 1;
9615 bp->wol_filter_id = resp->wol_filter_id;
9616 }
9617 }
9618 }
9619 mutex_unlock(&bp->hwrm_cmd_lock);
9620 return next_handle;
9621 }
9622
bnxt_get_wol_settings(struct bnxt *bp)9623 static void bnxt_get_wol_settings(struct bnxt *bp)
9624 {
9625 u16 handle = 0;
9626
9627 bp->wol = 0;
9628 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9629 return;
9630
9631 do {
9632 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9633 } while (handle && handle != 0xffff);
9634 }
9635
9636 #ifdef CONFIG_BNXT_HWMON
bnxt_show_temp(struct device *dev, struct device_attribute *devattr, char *buf)9637 static ssize_t bnxt_show_temp(struct device *dev,
9638 struct device_attribute *devattr, char *buf)
9639 {
9640 struct hwrm_temp_monitor_query_input req = {0};
9641 struct hwrm_temp_monitor_query_output *resp;
9642 struct bnxt *bp = dev_get_drvdata(dev);
9643 u32 len = 0;
9644 int rc;
9645
9646 resp = bp->hwrm_cmd_resp_addr;
9647 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9648 mutex_lock(&bp->hwrm_cmd_lock);
9649 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9650 if (!rc)
9651 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9652 mutex_unlock(&bp->hwrm_cmd_lock);
9653 if (rc)
9654 return rc;
9655 return len;
9656 }
9657 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9658
9659 static struct attribute *bnxt_attrs[] = {
9660 &sensor_dev_attr_temp1_input.dev_attr.attr,
9661 NULL
9662 };
9663 ATTRIBUTE_GROUPS(bnxt);
9664
bnxt_hwmon_close(struct bnxt *bp)9665 static void bnxt_hwmon_close(struct bnxt *bp)
9666 {
9667 if (bp->hwmon_dev) {
9668 hwmon_device_unregister(bp->hwmon_dev);
9669 bp->hwmon_dev = NULL;
9670 }
9671 }
9672
bnxt_hwmon_open(struct bnxt *bp)9673 static void bnxt_hwmon_open(struct bnxt *bp)
9674 {
9675 struct hwrm_temp_monitor_query_input req = {0};
9676 struct pci_dev *pdev = bp->pdev;
9677 int rc;
9678
9679 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9680 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9681 if (rc == -EACCES || rc == -EOPNOTSUPP) {
9682 bnxt_hwmon_close(bp);
9683 return;
9684 }
9685
9686 if (bp->hwmon_dev)
9687 return;
9688
9689 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9690 DRV_MODULE_NAME, bp,
9691 bnxt_groups);
9692 if (IS_ERR(bp->hwmon_dev)) {
9693 bp->hwmon_dev = NULL;
9694 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9695 }
9696 }
9697 #else
bnxt_hwmon_close(struct bnxt *bp)9698 static void bnxt_hwmon_close(struct bnxt *bp)
9699 {
9700 }
9701
bnxt_hwmon_open(struct bnxt *bp)9702 static void bnxt_hwmon_open(struct bnxt *bp)
9703 {
9704 }
9705 #endif
9706
bnxt_eee_config_ok(struct bnxt *bp)9707 static bool bnxt_eee_config_ok(struct bnxt *bp)
9708 {
9709 struct ethtool_eee *eee = &bp->eee;
9710 struct bnxt_link_info *link_info = &bp->link_info;
9711
9712 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
9713 return true;
9714
9715 if (eee->eee_enabled) {
9716 u32 advertising =
9717 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9718
9719 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9720 eee->eee_enabled = 0;
9721 return false;
9722 }
9723 if (eee->advertised & ~advertising) {
9724 eee->advertised = advertising & eee->supported;
9725 return false;
9726 }
9727 }
9728 return true;
9729 }
9730
bnxt_update_phy_setting(struct bnxt *bp)9731 static int bnxt_update_phy_setting(struct bnxt *bp)
9732 {
9733 int rc;
9734 bool update_link = false;
9735 bool update_pause = false;
9736 bool update_eee = false;
9737 struct bnxt_link_info *link_info = &bp->link_info;
9738
9739 rc = bnxt_update_link(bp, true);
9740 if (rc) {
9741 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9742 rc);
9743 return rc;
9744 }
9745 if (!BNXT_SINGLE_PF(bp))
9746 return 0;
9747
9748 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9749 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9750 link_info->req_flow_ctrl)
9751 update_pause = true;
9752 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9753 link_info->force_pause_setting != link_info->req_flow_ctrl)
9754 update_pause = true;
9755 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9756 if (BNXT_AUTO_MODE(link_info->auto_mode))
9757 update_link = true;
9758 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
9759 link_info->req_link_speed != link_info->force_link_speed)
9760 update_link = true;
9761 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
9762 link_info->req_link_speed != link_info->force_pam4_link_speed)
9763 update_link = true;
9764 if (link_info->req_duplex != link_info->duplex_setting)
9765 update_link = true;
9766 } else {
9767 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9768 update_link = true;
9769 if (link_info->advertising != link_info->auto_link_speeds ||
9770 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
9771 update_link = true;
9772 }
9773
9774 /* The last close may have shutdown the link, so need to call
9775 * PHY_CFG to bring it back up.
9776 */
9777 if (!bp->link_info.link_up)
9778 update_link = true;
9779
9780 if (!bnxt_eee_config_ok(bp))
9781 update_eee = true;
9782
9783 if (update_link)
9784 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9785 else if (update_pause)
9786 rc = bnxt_hwrm_set_pause(bp);
9787 if (rc) {
9788 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9789 rc);
9790 return rc;
9791 }
9792
9793 return rc;
9794 }
9795
9796 /* Common routine to pre-map certain register block to different GRC window.
9797 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9798 * in PF and 3 windows in VF that can be customized to map in different
9799 * register blocks.
9800 */
bnxt_preset_reg_win(struct bnxt *bp)9801 static void bnxt_preset_reg_win(struct bnxt *bp)
9802 {
9803 if (BNXT_PF(bp)) {
9804 /* CAG registers map to GRC window #4 */
9805 writel(BNXT_CAG_REG_BASE,
9806 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9807 }
9808 }
9809
9810 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9811
__bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)9812 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9813 {
9814 int rc = 0;
9815
9816 bnxt_preset_reg_win(bp);
9817 netif_carrier_off(bp->dev);
9818 if (irq_re_init) {
9819 /* Reserve rings now if none were reserved at driver probe. */
9820 rc = bnxt_init_dflt_ring_mode(bp);
9821 if (rc) {
9822 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9823 return rc;
9824 }
9825 }
9826 rc = bnxt_reserve_rings(bp, irq_re_init);
9827 if (rc)
9828 return rc;
9829 if ((bp->flags & BNXT_FLAG_RFS) &&
9830 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9831 /* disable RFS if falling back to INTA */
9832 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9833 bp->flags &= ~BNXT_FLAG_RFS;
9834 }
9835
9836 rc = bnxt_alloc_mem(bp, irq_re_init);
9837 if (rc) {
9838 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9839 goto open_err_free_mem;
9840 }
9841
9842 if (irq_re_init) {
9843 bnxt_init_napi(bp);
9844 rc = bnxt_request_irq(bp);
9845 if (rc) {
9846 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
9847 goto open_err_irq;
9848 }
9849 }
9850
9851 rc = bnxt_init_nic(bp, irq_re_init);
9852 if (rc) {
9853 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9854 goto open_err_irq;
9855 }
9856
9857 bnxt_enable_napi(bp);
9858 bnxt_debug_dev_init(bp);
9859
9860 if (link_re_init) {
9861 mutex_lock(&bp->link_lock);
9862 rc = bnxt_update_phy_setting(bp);
9863 mutex_unlock(&bp->link_lock);
9864 if (rc) {
9865 netdev_warn(bp->dev, "failed to update phy settings\n");
9866 if (BNXT_SINGLE_PF(bp)) {
9867 bp->link_info.phy_retry = true;
9868 bp->link_info.phy_retry_expires =
9869 jiffies + 5 * HZ;
9870 }
9871 }
9872 }
9873
9874 if (irq_re_init)
9875 udp_tunnel_nic_reset_ntf(bp->dev);
9876
9877 set_bit(BNXT_STATE_OPEN, &bp->state);
9878 bnxt_enable_int(bp);
9879 /* Enable TX queues */
9880 bnxt_tx_enable(bp);
9881 mod_timer(&bp->timer, jiffies + bp->current_interval);
9882 /* Poll link status and check for SFP+ module status */
9883 bnxt_get_port_module_status(bp);
9884
9885 /* VF-reps may need to be re-opened after the PF is re-opened */
9886 if (BNXT_PF(bp))
9887 bnxt_vf_reps_open(bp);
9888 return 0;
9889
9890 open_err_irq:
9891 bnxt_del_napi(bp);
9892
9893 open_err_free_mem:
9894 bnxt_free_skbs(bp);
9895 bnxt_free_irq(bp);
9896 bnxt_free_mem(bp, true);
9897 return rc;
9898 }
9899
9900 /* rtnl_lock held */
bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)9901 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9902 {
9903 int rc = 0;
9904
9905 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
9906 rc = -EIO;
9907 if (!rc)
9908 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9909 if (rc) {
9910 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9911 dev_close(bp->dev);
9912 }
9913 return rc;
9914 }
9915
9916 /* rtnl_lock held, open the NIC half way by allocating all resources, but
9917 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
9918 * self tests.
9919 */
bnxt_half_open_nic(struct bnxt *bp)9920 int bnxt_half_open_nic(struct bnxt *bp)
9921 {
9922 int rc = 0;
9923
9924 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9925 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
9926 rc = -ENODEV;
9927 goto half_open_err;
9928 }
9929
9930 rc = bnxt_alloc_mem(bp, false);
9931 if (rc) {
9932 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9933 goto half_open_err;
9934 }
9935 rc = bnxt_init_nic(bp, false);
9936 if (rc) {
9937 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9938 goto half_open_err;
9939 }
9940 return 0;
9941
9942 half_open_err:
9943 bnxt_free_skbs(bp);
9944 bnxt_free_mem(bp, false);
9945 dev_close(bp->dev);
9946 return rc;
9947 }
9948
9949 /* rtnl_lock held, this call can only be made after a previous successful
9950 * call to bnxt_half_open_nic().
9951 */
bnxt_half_close_nic(struct bnxt *bp)9952 void bnxt_half_close_nic(struct bnxt *bp)
9953 {
9954 bnxt_hwrm_resource_free(bp, false, false);
9955 bnxt_free_skbs(bp);
9956 bnxt_free_mem(bp, false);
9957 }
9958
bnxt_reenable_sriov(struct bnxt *bp)9959 static void bnxt_reenable_sriov(struct bnxt *bp)
9960 {
9961 if (BNXT_PF(bp)) {
9962 struct bnxt_pf_info *pf = &bp->pf;
9963 int n = pf->active_vfs;
9964
9965 if (n)
9966 bnxt_cfg_hw_sriov(bp, &n, true);
9967 }
9968 }
9969
bnxt_open(struct net_device *dev)9970 static int bnxt_open(struct net_device *dev)
9971 {
9972 struct bnxt *bp = netdev_priv(dev);
9973 int rc;
9974
9975 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9976 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9977 return -ENODEV;
9978 }
9979
9980 rc = bnxt_hwrm_if_change(bp, true);
9981 if (rc)
9982 return rc;
9983 rc = __bnxt_open_nic(bp, true, true);
9984 if (rc) {
9985 bnxt_hwrm_if_change(bp, false);
9986 } else {
9987 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
9988 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9989 bnxt_ulp_start(bp, 0);
9990 bnxt_reenable_sriov(bp);
9991 }
9992 }
9993 bnxt_hwmon_open(bp);
9994 }
9995
9996 return rc;
9997 }
9998
bnxt_drv_busy(struct bnxt *bp)9999 static bool bnxt_drv_busy(struct bnxt *bp)
10000 {
10001 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10002 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10003 }
10004
10005 static void bnxt_get_ring_stats(struct bnxt *bp,
10006 struct rtnl_link_stats64 *stats);
10007
__bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)10008 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10009 bool link_re_init)
10010 {
10011 /* Close the VF-reps before closing PF */
10012 if (BNXT_PF(bp))
10013 bnxt_vf_reps_close(bp);
10014
10015 /* Change device state to avoid TX queue wake up's */
10016 bnxt_tx_disable(bp);
10017
10018 clear_bit(BNXT_STATE_OPEN, &bp->state);
10019 smp_mb__after_atomic();
10020 while (bnxt_drv_busy(bp))
10021 msleep(20);
10022
10023 /* Flush rings and and disable interrupts */
10024 bnxt_shutdown_nic(bp, irq_re_init);
10025
10026 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10027
10028 bnxt_debug_dev_exit(bp);
10029 bnxt_disable_napi(bp);
10030 del_timer_sync(&bp->timer);
10031 bnxt_free_skbs(bp);
10032
10033 /* Save ring stats before shutdown */
10034 if (bp->bnapi && irq_re_init)
10035 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10036 if (irq_re_init) {
10037 bnxt_free_irq(bp);
10038 bnxt_del_napi(bp);
10039 }
10040 bnxt_free_mem(bp, irq_re_init);
10041 }
10042
bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)10043 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10044 {
10045 int rc = 0;
10046
10047 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10048 /* If we get here, it means firmware reset is in progress
10049 * while we are trying to close. We can safely proceed with
10050 * the close because we are holding rtnl_lock(). Some firmware
10051 * messages may fail as we proceed to close. We set the
10052 * ABORT_ERR flag here so that the FW reset thread will later
10053 * abort when it gets the rtnl_lock() and sees the flag.
10054 */
10055 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10056 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10057 }
10058
10059 #ifdef CONFIG_BNXT_SRIOV
10060 if (bp->sriov_cfg) {
10061 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10062 !bp->sriov_cfg,
10063 BNXT_SRIOV_CFG_WAIT_TMO);
10064 if (rc)
10065 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10066 }
10067 #endif
10068 __bnxt_close_nic(bp, irq_re_init, link_re_init);
10069 return rc;
10070 }
10071
bnxt_close(struct net_device *dev)10072 static int bnxt_close(struct net_device *dev)
10073 {
10074 struct bnxt *bp = netdev_priv(dev);
10075
10076 bnxt_hwmon_close(bp);
10077 bnxt_close_nic(bp, true, true);
10078 bnxt_hwrm_shutdown_link(bp);
10079 bnxt_hwrm_if_change(bp, false);
10080 return 0;
10081 }
10082
bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, u16 *val)10083 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10084 u16 *val)
10085 {
10086 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
10087 struct hwrm_port_phy_mdio_read_input req = {0};
10088 int rc;
10089
10090 if (bp->hwrm_spec_code < 0x10a00)
10091 return -EOPNOTSUPP;
10092
10093 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
10094 req.port_id = cpu_to_le16(bp->pf.port_id);
10095 req.phy_addr = phy_addr;
10096 req.reg_addr = cpu_to_le16(reg & 0x1f);
10097 if (mdio_phy_id_is_c45(phy_addr)) {
10098 req.cl45_mdio = 1;
10099 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10100 req.dev_addr = mdio_phy_id_devad(phy_addr);
10101 req.reg_addr = cpu_to_le16(reg);
10102 }
10103
10104 mutex_lock(&bp->hwrm_cmd_lock);
10105 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10106 if (!rc)
10107 *val = le16_to_cpu(resp->reg_data);
10108 mutex_unlock(&bp->hwrm_cmd_lock);
10109 return rc;
10110 }
10111
bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, u16 val)10112 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10113 u16 val)
10114 {
10115 struct hwrm_port_phy_mdio_write_input req = {0};
10116
10117 if (bp->hwrm_spec_code < 0x10a00)
10118 return -EOPNOTSUPP;
10119
10120 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
10121 req.port_id = cpu_to_le16(bp->pf.port_id);
10122 req.phy_addr = phy_addr;
10123 req.reg_addr = cpu_to_le16(reg & 0x1f);
10124 if (mdio_phy_id_is_c45(phy_addr)) {
10125 req.cl45_mdio = 1;
10126 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10127 req.dev_addr = mdio_phy_id_devad(phy_addr);
10128 req.reg_addr = cpu_to_le16(reg);
10129 }
10130 req.reg_data = cpu_to_le16(val);
10131
10132 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10133 }
10134
10135 /* rtnl_lock held */
bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)10136 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10137 {
10138 struct mii_ioctl_data *mdio = if_mii(ifr);
10139 struct bnxt *bp = netdev_priv(dev);
10140 int rc;
10141
10142 switch (cmd) {
10143 case SIOCGMIIPHY:
10144 mdio->phy_id = bp->link_info.phy_addr;
10145
10146 fallthrough;
10147 case SIOCGMIIREG: {
10148 u16 mii_regval = 0;
10149
10150 if (!netif_running(dev))
10151 return -EAGAIN;
10152
10153 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10154 &mii_regval);
10155 mdio->val_out = mii_regval;
10156 return rc;
10157 }
10158
10159 case SIOCSMIIREG:
10160 if (!netif_running(dev))
10161 return -EAGAIN;
10162
10163 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10164 mdio->val_in);
10165
10166 default:
10167 /* do nothing */
10168 break;
10169 }
10170 return -EOPNOTSUPP;
10171 }
10172
bnxt_get_ring_stats(struct bnxt *bp, struct rtnl_link_stats64 *stats)10173 static void bnxt_get_ring_stats(struct bnxt *bp,
10174 struct rtnl_link_stats64 *stats)
10175 {
10176 int i;
10177
10178 for (i = 0; i < bp->cp_nr_rings; i++) {
10179 struct bnxt_napi *bnapi = bp->bnapi[i];
10180 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10181 u64 *sw = cpr->stats.sw_stats;
10182
10183 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10184 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10185 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10186
10187 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10188 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10189 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10190
10191 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10192 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10193 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10194
10195 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10196 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10197 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10198
10199 stats->rx_missed_errors +=
10200 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10201
10202 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10203
10204 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10205 }
10206 }
10207
bnxt_add_prev_stats(struct bnxt *bp, struct rtnl_link_stats64 *stats)10208 static void bnxt_add_prev_stats(struct bnxt *bp,
10209 struct rtnl_link_stats64 *stats)
10210 {
10211 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10212
10213 stats->rx_packets += prev_stats->rx_packets;
10214 stats->tx_packets += prev_stats->tx_packets;
10215 stats->rx_bytes += prev_stats->rx_bytes;
10216 stats->tx_bytes += prev_stats->tx_bytes;
10217 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10218 stats->multicast += prev_stats->multicast;
10219 stats->tx_dropped += prev_stats->tx_dropped;
10220 }
10221
10222 static void
bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)10223 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10224 {
10225 struct bnxt *bp = netdev_priv(dev);
10226
10227 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10228 /* Make sure bnxt_close_nic() sees that we are reading stats before
10229 * we check the BNXT_STATE_OPEN flag.
10230 */
10231 smp_mb__after_atomic();
10232 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10233 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10234 *stats = bp->net_stats_prev;
10235 return;
10236 }
10237
10238 bnxt_get_ring_stats(bp, stats);
10239 bnxt_add_prev_stats(bp, stats);
10240
10241 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10242 u64 *rx = bp->port_stats.sw_stats;
10243 u64 *tx = bp->port_stats.sw_stats +
10244 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10245
10246 stats->rx_crc_errors =
10247 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10248 stats->rx_frame_errors =
10249 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10250 stats->rx_length_errors =
10251 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10252 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10253 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10254 stats->rx_errors =
10255 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10256 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10257 stats->collisions =
10258 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10259 stats->tx_fifo_errors =
10260 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10261 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10262 }
10263 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10264 }
10265
bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)10266 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10267 {
10268 struct net_device *dev = bp->dev;
10269 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10270 struct netdev_hw_addr *ha;
10271 u8 *haddr;
10272 int mc_count = 0;
10273 bool update = false;
10274 int off = 0;
10275
10276 netdev_for_each_mc_addr(ha, dev) {
10277 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10278 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10279 vnic->mc_list_count = 0;
10280 return false;
10281 }
10282 haddr = ha->addr;
10283 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10284 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10285 update = true;
10286 }
10287 off += ETH_ALEN;
10288 mc_count++;
10289 }
10290 if (mc_count)
10291 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10292
10293 if (mc_count != vnic->mc_list_count) {
10294 vnic->mc_list_count = mc_count;
10295 update = true;
10296 }
10297 return update;
10298 }
10299
bnxt_uc_list_updated(struct bnxt *bp)10300 static bool bnxt_uc_list_updated(struct bnxt *bp)
10301 {
10302 struct net_device *dev = bp->dev;
10303 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10304 struct netdev_hw_addr *ha;
10305 int off = 0;
10306
10307 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10308 return true;
10309
10310 netdev_for_each_uc_addr(ha, dev) {
10311 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10312 return true;
10313
10314 off += ETH_ALEN;
10315 }
10316 return false;
10317 }
10318
bnxt_set_rx_mode(struct net_device *dev)10319 static void bnxt_set_rx_mode(struct net_device *dev)
10320 {
10321 struct bnxt *bp = netdev_priv(dev);
10322 struct bnxt_vnic_info *vnic;
10323 bool mc_update = false;
10324 bool uc_update;
10325 u32 mask;
10326
10327 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10328 return;
10329
10330 vnic = &bp->vnic_info[0];
10331 mask = vnic->rx_mask;
10332 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10333 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10334 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10335 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10336
10337 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
10338 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10339
10340 uc_update = bnxt_uc_list_updated(bp);
10341
10342 if (dev->flags & IFF_BROADCAST)
10343 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10344 if (dev->flags & IFF_ALLMULTI) {
10345 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10346 vnic->mc_list_count = 0;
10347 } else {
10348 mc_update = bnxt_mc_list_updated(bp, &mask);
10349 }
10350
10351 if (mask != vnic->rx_mask || uc_update || mc_update) {
10352 vnic->rx_mask = mask;
10353
10354 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10355 bnxt_queue_sp_work(bp);
10356 }
10357 }
10358
bnxt_cfg_rx_mode(struct bnxt *bp)10359 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10360 {
10361 struct net_device *dev = bp->dev;
10362 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10363 struct netdev_hw_addr *ha;
10364 int i, off = 0, rc;
10365 bool uc_update;
10366
10367 netif_addr_lock_bh(dev);
10368 uc_update = bnxt_uc_list_updated(bp);
10369 netif_addr_unlock_bh(dev);
10370
10371 if (!uc_update)
10372 goto skip_uc;
10373
10374 mutex_lock(&bp->hwrm_cmd_lock);
10375 for (i = 1; i < vnic->uc_filter_count; i++) {
10376 struct hwrm_cfa_l2_filter_free_input req = {0};
10377
10378 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10379 -1);
10380
10381 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10382
10383 rc = _hwrm_send_message(bp, &req, sizeof(req),
10384 HWRM_CMD_TIMEOUT);
10385 }
10386 mutex_unlock(&bp->hwrm_cmd_lock);
10387
10388 vnic->uc_filter_count = 1;
10389
10390 netif_addr_lock_bh(dev);
10391 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10392 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10393 } else {
10394 netdev_for_each_uc_addr(ha, dev) {
10395 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10396 off += ETH_ALEN;
10397 vnic->uc_filter_count++;
10398 }
10399 }
10400 netif_addr_unlock_bh(dev);
10401
10402 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10403 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10404 if (rc) {
10405 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10406 rc);
10407 vnic->uc_filter_count = i;
10408 return rc;
10409 }
10410 }
10411
10412 skip_uc:
10413 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10414 if (rc && vnic->mc_list_count) {
10415 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10416 rc);
10417 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10418 vnic->mc_list_count = 0;
10419 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10420 }
10421 if (rc)
10422 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10423 rc);
10424
10425 return rc;
10426 }
10427
bnxt_can_reserve_rings(struct bnxt *bp)10428 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10429 {
10430 #ifdef CONFIG_BNXT_SRIOV
10431 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10432 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10433
10434 /* No minimum rings were provisioned by the PF. Don't
10435 * reserve rings by default when device is down.
10436 */
10437 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10438 return true;
10439
10440 if (!netif_running(bp->dev))
10441 return false;
10442 }
10443 #endif
10444 return true;
10445 }
10446
10447 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt *bp)10448 static bool bnxt_rfs_supported(struct bnxt *bp)
10449 {
10450 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10451 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10452 return true;
10453 return false;
10454 }
10455 /* 212 firmware is broken for aRFS */
10456 if (BNXT_FW_MAJ(bp) == 212)
10457 return false;
10458 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10459 return true;
10460 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10461 return true;
10462 return false;
10463 }
10464
10465 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt *bp)10466 static bool bnxt_rfs_capable(struct bnxt *bp)
10467 {
10468 #ifdef CONFIG_RFS_ACCEL
10469 int vnics, max_vnics, max_rss_ctxs;
10470
10471 if (bp->flags & BNXT_FLAG_CHIP_P5)
10472 return bnxt_rfs_supported(bp);
10473 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
10474 return false;
10475
10476 vnics = 1 + bp->rx_nr_rings;
10477 max_vnics = bnxt_get_max_func_vnics(bp);
10478 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10479
10480 /* RSS contexts not a limiting factor */
10481 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10482 max_rss_ctxs = max_vnics;
10483 if (vnics > max_vnics || vnics > max_rss_ctxs) {
10484 if (bp->rx_nr_rings > 1)
10485 netdev_warn(bp->dev,
10486 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10487 min(max_rss_ctxs - 1, max_vnics - 1));
10488 return false;
10489 }
10490
10491 if (!BNXT_NEW_RM(bp))
10492 return true;
10493
10494 if (vnics == bp->hw_resc.resv_vnics)
10495 return true;
10496
10497 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10498 if (vnics <= bp->hw_resc.resv_vnics)
10499 return true;
10500
10501 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10502 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10503 return false;
10504 #else
10505 return false;
10506 #endif
10507 }
10508
bnxt_fix_features(struct net_device *dev, netdev_features_t features)10509 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10510 netdev_features_t features)
10511 {
10512 struct bnxt *bp = netdev_priv(dev);
10513 netdev_features_t vlan_features;
10514
10515 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10516 features &= ~NETIF_F_NTUPLE;
10517
10518 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10519 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10520
10521 if (!(features & NETIF_F_GRO))
10522 features &= ~NETIF_F_GRO_HW;
10523
10524 if (features & NETIF_F_GRO_HW)
10525 features &= ~NETIF_F_LRO;
10526
10527 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10528 * turned on or off together.
10529 */
10530 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10531 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10532 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10533 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10534 else if (vlan_features)
10535 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10536 }
10537 #ifdef CONFIG_BNXT_SRIOV
10538 if (BNXT_VF(bp) && bp->vf.vlan)
10539 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10540 #endif
10541 return features;
10542 }
10543
bnxt_set_features(struct net_device *dev, netdev_features_t features)10544 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10545 {
10546 struct bnxt *bp = netdev_priv(dev);
10547 u32 flags = bp->flags;
10548 u32 changes;
10549 int rc = 0;
10550 bool re_init = false;
10551 bool update_tpa = false;
10552
10553 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10554 if (features & NETIF_F_GRO_HW)
10555 flags |= BNXT_FLAG_GRO;
10556 else if (features & NETIF_F_LRO)
10557 flags |= BNXT_FLAG_LRO;
10558
10559 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10560 flags &= ~BNXT_FLAG_TPA;
10561
10562 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10563 flags |= BNXT_FLAG_STRIP_VLAN;
10564
10565 if (features & NETIF_F_NTUPLE)
10566 flags |= BNXT_FLAG_RFS;
10567
10568 changes = flags ^ bp->flags;
10569 if (changes & BNXT_FLAG_TPA) {
10570 update_tpa = true;
10571 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10572 (flags & BNXT_FLAG_TPA) == 0 ||
10573 (bp->flags & BNXT_FLAG_CHIP_P5))
10574 re_init = true;
10575 }
10576
10577 if (changes & ~BNXT_FLAG_TPA)
10578 re_init = true;
10579
10580 if (flags != bp->flags) {
10581 u32 old_flags = bp->flags;
10582
10583 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10584 bp->flags = flags;
10585 if (update_tpa)
10586 bnxt_set_ring_params(bp);
10587 return rc;
10588 }
10589
10590 if (re_init) {
10591 bnxt_close_nic(bp, false, false);
10592 bp->flags = flags;
10593 if (update_tpa)
10594 bnxt_set_ring_params(bp);
10595
10596 return bnxt_open_nic(bp, false, false);
10597 }
10598 if (update_tpa) {
10599 bp->flags = flags;
10600 rc = bnxt_set_tpa(bp,
10601 (flags & BNXT_FLAG_TPA) ?
10602 true : false);
10603 if (rc)
10604 bp->flags = old_flags;
10605 }
10606 }
10607 return rc;
10608 }
10609
bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, u32 *reg_buf)10610 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
10611 u32 *reg_buf)
10612 {
10613 struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
10614 struct hwrm_dbg_read_direct_input req = {0};
10615 __le32 *dbg_reg_buf;
10616 dma_addr_t mapping;
10617 int rc, i;
10618
10619 dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
10620 &mapping, GFP_KERNEL);
10621 if (!dbg_reg_buf)
10622 return -ENOMEM;
10623 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
10624 req.host_dest_addr = cpu_to_le64(mapping);
10625 req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
10626 req.read_len32 = cpu_to_le32(num_words);
10627 mutex_lock(&bp->hwrm_cmd_lock);
10628 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10629 if (rc || resp->error_code) {
10630 rc = -EIO;
10631 goto dbg_rd_reg_exit;
10632 }
10633 for (i = 0; i < num_words; i++)
10634 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
10635
10636 dbg_rd_reg_exit:
10637 mutex_unlock(&bp->hwrm_cmd_lock);
10638 dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
10639 return rc;
10640 }
10641
bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, u32 ring_id, u32 *prod, u32 *cons)10642 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
10643 u32 ring_id, u32 *prod, u32 *cons)
10644 {
10645 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
10646 struct hwrm_dbg_ring_info_get_input req = {0};
10647 int rc;
10648
10649 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
10650 req.ring_type = ring_type;
10651 req.fw_ring_id = cpu_to_le32(ring_id);
10652 mutex_lock(&bp->hwrm_cmd_lock);
10653 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10654 if (!rc) {
10655 *prod = le32_to_cpu(resp->producer_index);
10656 *cons = le32_to_cpu(resp->consumer_index);
10657 }
10658 mutex_unlock(&bp->hwrm_cmd_lock);
10659 return rc;
10660 }
10661
bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)10662 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
10663 {
10664 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
10665 int i = bnapi->index;
10666
10667 if (!txr)
10668 return;
10669
10670 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
10671 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
10672 txr->tx_cons);
10673 }
10674
bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)10675 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
10676 {
10677 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
10678 int i = bnapi->index;
10679
10680 if (!rxr)
10681 return;
10682
10683 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
10684 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
10685 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
10686 rxr->rx_sw_agg_prod);
10687 }
10688
bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)10689 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
10690 {
10691 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10692 int i = bnapi->index;
10693
10694 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
10695 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
10696 }
10697
bnxt_dbg_dump_states(struct bnxt *bp)10698 static void bnxt_dbg_dump_states(struct bnxt *bp)
10699 {
10700 int i;
10701 struct bnxt_napi *bnapi;
10702
10703 for (i = 0; i < bp->cp_nr_rings; i++) {
10704 bnapi = bp->bnapi[i];
10705 if (netif_msg_drv(bp)) {
10706 bnxt_dump_tx_sw_state(bnapi);
10707 bnxt_dump_rx_sw_state(bnapi);
10708 bnxt_dump_cp_sw_state(bnapi);
10709 }
10710 }
10711 }
10712
bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)10713 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
10714 {
10715 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
10716 struct hwrm_ring_reset_input req = {0};
10717 struct bnxt_napi *bnapi = rxr->bnapi;
10718 struct bnxt_cp_ring_info *cpr;
10719 u16 cp_ring_id;
10720
10721 cpr = &bnapi->cp_ring;
10722 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
10723 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
10724 req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
10725 req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
10726 return hwrm_send_message_silent(bp, &req, sizeof(req),
10727 HWRM_CMD_TIMEOUT);
10728 }
10729
bnxt_reset_task(struct bnxt *bp, bool silent)10730 static void bnxt_reset_task(struct bnxt *bp, bool silent)
10731 {
10732 if (!silent)
10733 bnxt_dbg_dump_states(bp);
10734 if (netif_running(bp->dev)) {
10735 int rc;
10736
10737 if (silent) {
10738 bnxt_close_nic(bp, false, false);
10739 bnxt_open_nic(bp, false, false);
10740 } else {
10741 bnxt_ulp_stop(bp);
10742 bnxt_close_nic(bp, true, false);
10743 rc = bnxt_open_nic(bp, true, false);
10744 bnxt_ulp_start(bp, rc);
10745 }
10746 }
10747 }
10748
bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)10749 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
10750 {
10751 struct bnxt *bp = netdev_priv(dev);
10752
10753 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
10754 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
10755 bnxt_queue_sp_work(bp);
10756 }
10757
bnxt_fw_health_check(struct bnxt *bp)10758 static void bnxt_fw_health_check(struct bnxt *bp)
10759 {
10760 struct bnxt_fw_health *fw_health = bp->fw_health;
10761 u32 val;
10762
10763 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10764 return;
10765
10766 /* Make sure it is enabled before checking the tmr_counter. */
10767 smp_rmb();
10768 if (fw_health->tmr_counter) {
10769 fw_health->tmr_counter--;
10770 return;
10771 }
10772
10773 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10774 if (val == fw_health->last_fw_heartbeat)
10775 goto fw_reset;
10776
10777 fw_health->last_fw_heartbeat = val;
10778
10779 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10780 if (val != fw_health->last_fw_reset_cnt)
10781 goto fw_reset;
10782
10783 fw_health->tmr_counter = fw_health->tmr_multiplier;
10784 return;
10785
10786 fw_reset:
10787 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
10788 bnxt_queue_sp_work(bp);
10789 }
10790
bnxt_timer(struct timer_list *t)10791 static void bnxt_timer(struct timer_list *t)
10792 {
10793 struct bnxt *bp = from_timer(bp, t, timer);
10794 struct net_device *dev = bp->dev;
10795
10796 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
10797 return;
10798
10799 if (atomic_read(&bp->intr_sem) != 0)
10800 goto bnxt_restart_timer;
10801
10802 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
10803 bnxt_fw_health_check(bp);
10804
10805 if (bp->link_info.link_up && bp->stats_coal_ticks) {
10806 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
10807 bnxt_queue_sp_work(bp);
10808 }
10809
10810 if (bnxt_tc_flower_enabled(bp)) {
10811 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10812 bnxt_queue_sp_work(bp);
10813 }
10814
10815 #ifdef CONFIG_RFS_ACCEL
10816 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
10817 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
10818 bnxt_queue_sp_work(bp);
10819 }
10820 #endif /*CONFIG_RFS_ACCEL*/
10821
10822 if (bp->link_info.phy_retry) {
10823 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
10824 bp->link_info.phy_retry = false;
10825 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10826 } else {
10827 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10828 bnxt_queue_sp_work(bp);
10829 }
10830 }
10831
10832 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
10833 netif_carrier_ok(dev)) {
10834 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10835 bnxt_queue_sp_work(bp);
10836 }
10837 bnxt_restart_timer:
10838 mod_timer(&bp->timer, jiffies + bp->current_interval);
10839 }
10840
bnxt_rtnl_lock_sp(struct bnxt *bp)10841 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
10842 {
10843 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10844 * set. If the device is being closed, bnxt_close() may be holding
10845 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
10846 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10847 */
10848 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10849 rtnl_lock();
10850 }
10851
bnxt_rtnl_unlock_sp(struct bnxt *bp)10852 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10853 {
10854 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10855 rtnl_unlock();
10856 }
10857
10858 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt *bp, bool silent)10859 static void bnxt_reset(struct bnxt *bp, bool silent)
10860 {
10861 bnxt_rtnl_lock_sp(bp);
10862 if (test_bit(BNXT_STATE_OPEN, &bp->state))
10863 bnxt_reset_task(bp, silent);
10864 bnxt_rtnl_unlock_sp(bp);
10865 }
10866
10867 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt *bp)10868 static void bnxt_rx_ring_reset(struct bnxt *bp)
10869 {
10870 int i;
10871
10872 bnxt_rtnl_lock_sp(bp);
10873 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10874 bnxt_rtnl_unlock_sp(bp);
10875 return;
10876 }
10877 /* Disable and flush TPA before resetting the RX ring */
10878 if (bp->flags & BNXT_FLAG_TPA)
10879 bnxt_set_tpa(bp, false);
10880 for (i = 0; i < bp->rx_nr_rings; i++) {
10881 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
10882 struct bnxt_cp_ring_info *cpr;
10883 int rc;
10884
10885 if (!rxr->bnapi->in_reset)
10886 continue;
10887
10888 rc = bnxt_hwrm_rx_ring_reset(bp, i);
10889 if (rc) {
10890 if (rc == -EINVAL || rc == -EOPNOTSUPP)
10891 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
10892 else
10893 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
10894 rc);
10895 bnxt_reset_task(bp, true);
10896 break;
10897 }
10898 bnxt_free_one_rx_ring_skbs(bp, i);
10899 rxr->rx_prod = 0;
10900 rxr->rx_agg_prod = 0;
10901 rxr->rx_sw_agg_prod = 0;
10902 rxr->rx_next_cons = 0;
10903 rxr->bnapi->in_reset = false;
10904 bnxt_alloc_one_rx_ring(bp, i);
10905 cpr = &rxr->bnapi->cp_ring;
10906 cpr->sw_stats.rx.rx_resets++;
10907 if (bp->flags & BNXT_FLAG_AGG_RINGS)
10908 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10909 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10910 }
10911 if (bp->flags & BNXT_FLAG_TPA)
10912 bnxt_set_tpa(bp, true);
10913 bnxt_rtnl_unlock_sp(bp);
10914 }
10915
bnxt_fw_reset_close(struct bnxt *bp)10916 static void bnxt_fw_reset_close(struct bnxt *bp)
10917 {
10918 bnxt_ulp_stop(bp);
10919 /* When firmware is fatal state, disable PCI device to prevent
10920 * any potential bad DMAs before freeing kernel memory.
10921 */
10922 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10923 pci_disable_device(bp->pdev);
10924 __bnxt_close_nic(bp, true, false);
10925 bnxt_clear_int_mode(bp);
10926 bnxt_hwrm_func_drv_unrgtr(bp);
10927 if (pci_is_enabled(bp->pdev))
10928 pci_disable_device(bp->pdev);
10929 bnxt_free_ctx_mem(bp);
10930 kfree(bp->ctx);
10931 bp->ctx = NULL;
10932 }
10933
is_bnxt_fw_ok(struct bnxt *bp)10934 static bool is_bnxt_fw_ok(struct bnxt *bp)
10935 {
10936 struct bnxt_fw_health *fw_health = bp->fw_health;
10937 bool no_heartbeat = false, has_reset = false;
10938 u32 val;
10939
10940 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10941 if (val == fw_health->last_fw_heartbeat)
10942 no_heartbeat = true;
10943
10944 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10945 if (val != fw_health->last_fw_reset_cnt)
10946 has_reset = true;
10947
10948 if (!no_heartbeat && has_reset)
10949 return true;
10950
10951 return false;
10952 }
10953
10954 /* rtnl_lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt *bp)10955 static void bnxt_force_fw_reset(struct bnxt *bp)
10956 {
10957 struct bnxt_fw_health *fw_health = bp->fw_health;
10958 u32 wait_dsecs;
10959
10960 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10961 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10962 return;
10963
10964 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10965 bnxt_fw_reset_close(bp);
10966 wait_dsecs = fw_health->master_func_wait_dsecs;
10967 if (fw_health->master) {
10968 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
10969 wait_dsecs = 0;
10970 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10971 } else {
10972 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10973 wait_dsecs = fw_health->normal_func_wait_dsecs;
10974 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10975 }
10976
10977 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
10978 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10979 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10980 }
10981
bnxt_fw_exception(struct bnxt *bp)10982 void bnxt_fw_exception(struct bnxt *bp)
10983 {
10984 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
10985 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10986 bnxt_rtnl_lock_sp(bp);
10987 bnxt_force_fw_reset(bp);
10988 bnxt_rtnl_unlock_sp(bp);
10989 }
10990
10991 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10992 * < 0 on error.
10993 */
bnxt_get_registered_vfs(struct bnxt *bp)10994 static int bnxt_get_registered_vfs(struct bnxt *bp)
10995 {
10996 #ifdef CONFIG_BNXT_SRIOV
10997 int rc;
10998
10999 if (!BNXT_PF(bp))
11000 return 0;
11001
11002 rc = bnxt_hwrm_func_qcfg(bp);
11003 if (rc) {
11004 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11005 return rc;
11006 }
11007 if (bp->pf.registered_vfs)
11008 return bp->pf.registered_vfs;
11009 if (bp->sriov_cfg)
11010 return 1;
11011 #endif
11012 return 0;
11013 }
11014
bnxt_fw_reset(struct bnxt *bp)11015 void bnxt_fw_reset(struct bnxt *bp)
11016 {
11017 bnxt_rtnl_lock_sp(bp);
11018 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11019 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11020 int n = 0, tmo;
11021
11022 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11023 if (bp->pf.active_vfs &&
11024 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11025 n = bnxt_get_registered_vfs(bp);
11026 if (n < 0) {
11027 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11028 n);
11029 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11030 dev_close(bp->dev);
11031 goto fw_reset_exit;
11032 } else if (n > 0) {
11033 u16 vf_tmo_dsecs = n * 10;
11034
11035 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11036 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11037 bp->fw_reset_state =
11038 BNXT_FW_RESET_STATE_POLL_VF;
11039 bnxt_queue_fw_reset_work(bp, HZ / 10);
11040 goto fw_reset_exit;
11041 }
11042 bnxt_fw_reset_close(bp);
11043 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11044 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11045 tmo = HZ / 10;
11046 } else {
11047 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11048 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11049 }
11050 bnxt_queue_fw_reset_work(bp, tmo);
11051 }
11052 fw_reset_exit:
11053 bnxt_rtnl_unlock_sp(bp);
11054 }
11055
bnxt_chk_missed_irq(struct bnxt *bp)11056 static void bnxt_chk_missed_irq(struct bnxt *bp)
11057 {
11058 int i;
11059
11060 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11061 return;
11062
11063 for (i = 0; i < bp->cp_nr_rings; i++) {
11064 struct bnxt_napi *bnapi = bp->bnapi[i];
11065 struct bnxt_cp_ring_info *cpr;
11066 u32 fw_ring_id;
11067 int j;
11068
11069 if (!bnapi)
11070 continue;
11071
11072 cpr = &bnapi->cp_ring;
11073 for (j = 0; j < 2; j++) {
11074 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11075 u32 val[2];
11076
11077 if (!cpr2 || cpr2->has_more_work ||
11078 !bnxt_has_work(bp, cpr2))
11079 continue;
11080
11081 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11082 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11083 continue;
11084 }
11085 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11086 bnxt_dbg_hwrm_ring_info_get(bp,
11087 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11088 fw_ring_id, &val[0], &val[1]);
11089 cpr->sw_stats.cmn.missed_irqs++;
11090 }
11091 }
11092 }
11093
11094 static void bnxt_cfg_ntp_filters(struct bnxt *);
11095
bnxt_init_ethtool_link_settings(struct bnxt *bp)11096 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11097 {
11098 struct bnxt_link_info *link_info = &bp->link_info;
11099
11100 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11101 link_info->autoneg = BNXT_AUTONEG_SPEED;
11102 if (bp->hwrm_spec_code >= 0x10201) {
11103 if (link_info->auto_pause_setting &
11104 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11105 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11106 } else {
11107 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11108 }
11109 link_info->advertising = link_info->auto_link_speeds;
11110 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11111 } else {
11112 link_info->req_link_speed = link_info->force_link_speed;
11113 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11114 if (link_info->force_pam4_link_speed) {
11115 link_info->req_link_speed =
11116 link_info->force_pam4_link_speed;
11117 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11118 }
11119 link_info->req_duplex = link_info->duplex_setting;
11120 }
11121 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11122 link_info->req_flow_ctrl =
11123 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11124 else
11125 link_info->req_flow_ctrl = link_info->force_pause_setting;
11126 }
11127
bnxt_sp_task(struct work_struct *work)11128 static void bnxt_sp_task(struct work_struct *work)
11129 {
11130 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11131
11132 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11133 smp_mb__after_atomic();
11134 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11135 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11136 return;
11137 }
11138
11139 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11140 bnxt_cfg_rx_mode(bp);
11141
11142 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11143 bnxt_cfg_ntp_filters(bp);
11144 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11145 bnxt_hwrm_exec_fwd_req(bp);
11146 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
11147 netdev_info(bp->dev, "Receive PF driver unload event!\n");
11148 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11149 bnxt_hwrm_port_qstats(bp, 0);
11150 bnxt_hwrm_port_qstats_ext(bp, 0);
11151 bnxt_accumulate_all_stats(bp);
11152 }
11153
11154 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11155 int rc;
11156
11157 mutex_lock(&bp->link_lock);
11158 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11159 &bp->sp_event))
11160 bnxt_hwrm_phy_qcaps(bp);
11161
11162 rc = bnxt_update_link(bp, true);
11163 if (rc)
11164 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11165 rc);
11166
11167 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11168 &bp->sp_event))
11169 bnxt_init_ethtool_link_settings(bp);
11170 mutex_unlock(&bp->link_lock);
11171 }
11172 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11173 int rc;
11174
11175 mutex_lock(&bp->link_lock);
11176 rc = bnxt_update_phy_setting(bp);
11177 mutex_unlock(&bp->link_lock);
11178 if (rc) {
11179 netdev_warn(bp->dev, "update phy settings retry failed\n");
11180 } else {
11181 bp->link_info.phy_retry = false;
11182 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11183 }
11184 }
11185 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11186 mutex_lock(&bp->link_lock);
11187 bnxt_get_port_module_status(bp);
11188 mutex_unlock(&bp->link_lock);
11189 }
11190
11191 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11192 bnxt_tc_flow_stats_work(bp);
11193
11194 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11195 bnxt_chk_missed_irq(bp);
11196
11197 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11198 * must be the last functions to be called before exiting.
11199 */
11200 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11201 bnxt_reset(bp, false);
11202
11203 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11204 bnxt_reset(bp, true);
11205
11206 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11207 bnxt_rx_ring_reset(bp);
11208
11209 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11210 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11211
11212 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11213 if (!is_bnxt_fw_ok(bp))
11214 bnxt_devlink_health_report(bp,
11215 BNXT_FW_EXCEPTION_SP_EVENT);
11216 }
11217
11218 smp_mb__before_atomic();
11219 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11220 }
11221
11222 /* Under rtnl_lock */
bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int tx_xdp)11223 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11224 int tx_xdp)
11225 {
11226 int max_rx, max_tx, tx_sets = 1;
11227 int tx_rings_needed, stats;
11228 int rx_rings = rx;
11229 int cp, vnics, rc;
11230
11231 if (tcs)
11232 tx_sets = tcs;
11233
11234 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11235 if (rc)
11236 return rc;
11237
11238 if (max_rx < rx)
11239 return -ENOMEM;
11240
11241 tx_rings_needed = tx * tx_sets + tx_xdp;
11242 if (max_tx < tx_rings_needed)
11243 return -ENOMEM;
11244
11245 vnics = 1;
11246 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11247 vnics += rx_rings;
11248
11249 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11250 rx_rings <<= 1;
11251 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11252 stats = cp;
11253 if (BNXT_NEW_RM(bp)) {
11254 cp += bnxt_get_ulp_msix_num(bp);
11255 stats += bnxt_get_ulp_stat_ctxs(bp);
11256 }
11257 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11258 stats, vnics);
11259 }
11260
bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)11261 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11262 {
11263 if (bp->bar2) {
11264 pci_iounmap(pdev, bp->bar2);
11265 bp->bar2 = NULL;
11266 }
11267
11268 if (bp->bar1) {
11269 pci_iounmap(pdev, bp->bar1);
11270 bp->bar1 = NULL;
11271 }
11272
11273 if (bp->bar0) {
11274 pci_iounmap(pdev, bp->bar0);
11275 bp->bar0 = NULL;
11276 }
11277 }
11278
bnxt_cleanup_pci(struct bnxt *bp)11279 static void bnxt_cleanup_pci(struct bnxt *bp)
11280 {
11281 bnxt_unmap_bars(bp, bp->pdev);
11282 pci_release_regions(bp->pdev);
11283 if (pci_is_enabled(bp->pdev))
11284 pci_disable_device(bp->pdev);
11285 }
11286
bnxt_init_dflt_coal(struct bnxt *bp)11287 static void bnxt_init_dflt_coal(struct bnxt *bp)
11288 {
11289 struct bnxt_coal *coal;
11290
11291 /* Tick values in micro seconds.
11292 * 1 coal_buf x bufs_per_record = 1 completion record.
11293 */
11294 coal = &bp->rx_coal;
11295 coal->coal_ticks = 10;
11296 coal->coal_bufs = 30;
11297 coal->coal_ticks_irq = 1;
11298 coal->coal_bufs_irq = 2;
11299 coal->idle_thresh = 50;
11300 coal->bufs_per_record = 2;
11301 coal->budget = 64; /* NAPI budget */
11302
11303 coal = &bp->tx_coal;
11304 coal->coal_ticks = 28;
11305 coal->coal_bufs = 30;
11306 coal->coal_ticks_irq = 2;
11307 coal->coal_bufs_irq = 2;
11308 coal->bufs_per_record = 1;
11309
11310 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11311 }
11312
bnxt_fw_reset_via_optee(struct bnxt *bp)11313 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
11314 {
11315 #ifdef CONFIG_TEE_BNXT_FW
11316 int rc = tee_bnxt_fw_load();
11317
11318 if (rc)
11319 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
11320
11321 return rc;
11322 #else
11323 netdev_err(bp->dev, "OP-TEE not supported\n");
11324 return -ENODEV;
11325 #endif
11326 }
11327
bnxt_fw_init_one_p1(struct bnxt *bp)11328 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11329 {
11330 int rc;
11331
11332 bp->fw_cap = 0;
11333 rc = bnxt_hwrm_ver_get(bp);
11334 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
11335 * so wait before continuing with recovery.
11336 */
11337 if (rc)
11338 msleep(100);
11339 bnxt_try_map_fw_health_reg(bp);
11340 if (rc) {
11341 if (bp->fw_health && bp->fw_health->status_reliable) {
11342 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11343
11344 netdev_err(bp->dev,
11345 "Firmware not responding, status: 0x%x\n",
11346 sts);
11347 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
11348 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
11349 rc = bnxt_fw_reset_via_optee(bp);
11350 if (!rc)
11351 rc = bnxt_hwrm_ver_get(bp);
11352 }
11353 }
11354 if (rc)
11355 return rc;
11356 }
11357
11358 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
11359 rc = bnxt_alloc_kong_hwrm_resources(bp);
11360 if (rc)
11361 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
11362 }
11363
11364 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11365 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11366 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11367 if (rc)
11368 return rc;
11369 }
11370 bnxt_nvm_cfg_ver_get(bp);
11371
11372 rc = bnxt_hwrm_func_reset(bp);
11373 if (rc)
11374 return -ENODEV;
11375
11376 bnxt_hwrm_fw_set_time(bp);
11377 return 0;
11378 }
11379
bnxt_fw_init_one_p2(struct bnxt *bp)11380 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11381 {
11382 int rc;
11383
11384 /* Get the MAX capabilities for this function */
11385 rc = bnxt_hwrm_func_qcaps(bp);
11386 if (rc) {
11387 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11388 rc);
11389 return -ENODEV;
11390 }
11391
11392 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11393 if (rc)
11394 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11395 rc);
11396
11397 if (bnxt_alloc_fw_health(bp)) {
11398 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11399 } else {
11400 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11401 if (rc)
11402 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11403 rc);
11404 }
11405
11406 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11407 if (rc)
11408 return -ENODEV;
11409
11410 bnxt_hwrm_func_qcfg(bp);
11411 bnxt_hwrm_vnic_qcaps(bp);
11412 bnxt_hwrm_port_led_qcaps(bp);
11413 bnxt_ethtool_init(bp);
11414 bnxt_dcb_init(bp);
11415 return 0;
11416 }
11417
bnxt_set_dflt_rss_hash_type(struct bnxt *bp)11418 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11419 {
11420 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11421 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11422 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11423 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11424 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11425 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11426 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11427 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11428 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11429 }
11430 }
11431
bnxt_set_dflt_rfs(struct bnxt *bp)11432 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11433 {
11434 struct net_device *dev = bp->dev;
11435
11436 dev->hw_features &= ~NETIF_F_NTUPLE;
11437 dev->features &= ~NETIF_F_NTUPLE;
11438 bp->flags &= ~BNXT_FLAG_RFS;
11439 if (bnxt_rfs_supported(bp)) {
11440 dev->hw_features |= NETIF_F_NTUPLE;
11441 if (bnxt_rfs_capable(bp)) {
11442 bp->flags |= BNXT_FLAG_RFS;
11443 dev->features |= NETIF_F_NTUPLE;
11444 }
11445 }
11446 }
11447
bnxt_fw_init_one_p3(struct bnxt *bp)11448 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11449 {
11450 struct pci_dev *pdev = bp->pdev;
11451
11452 bnxt_set_dflt_rss_hash_type(bp);
11453 bnxt_set_dflt_rfs(bp);
11454
11455 bnxt_get_wol_settings(bp);
11456 if (bp->flags & BNXT_FLAG_WOL_CAP)
11457 device_set_wakeup_enable(&pdev->dev, bp->wol);
11458 else
11459 device_set_wakeup_capable(&pdev->dev, false);
11460
11461 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11462 bnxt_hwrm_coal_params_qcaps(bp);
11463 }
11464
11465 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11466
bnxt_fw_init_one(struct bnxt *bp)11467 static int bnxt_fw_init_one(struct bnxt *bp)
11468 {
11469 int rc;
11470
11471 rc = bnxt_fw_init_one_p1(bp);
11472 if (rc) {
11473 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11474 return rc;
11475 }
11476 rc = bnxt_fw_init_one_p2(bp);
11477 if (rc) {
11478 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11479 return rc;
11480 }
11481 rc = bnxt_probe_phy(bp, false);
11482 if (rc)
11483 return rc;
11484 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11485 if (rc)
11486 return rc;
11487
11488 /* In case fw capabilities have changed, destroy the unneeded
11489 * reporters and create newly capable ones.
11490 */
11491 bnxt_dl_fw_reporters_destroy(bp, false);
11492 bnxt_dl_fw_reporters_create(bp);
11493 bnxt_fw_init_one_p3(bp);
11494 return 0;
11495 }
11496
bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)11497 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11498 {
11499 struct bnxt_fw_health *fw_health = bp->fw_health;
11500 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11501 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11502 u32 reg_type, reg_off, delay_msecs;
11503
11504 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11505 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11506 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11507 switch (reg_type) {
11508 case BNXT_FW_HEALTH_REG_TYPE_CFG:
11509 pci_write_config_dword(bp->pdev, reg_off, val);
11510 break;
11511 case BNXT_FW_HEALTH_REG_TYPE_GRC:
11512 writel(reg_off & BNXT_GRC_BASE_MASK,
11513 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11514 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11515 fallthrough;
11516 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11517 writel(val, bp->bar0 + reg_off);
11518 break;
11519 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11520 writel(val, bp->bar1 + reg_off);
11521 break;
11522 }
11523 if (delay_msecs) {
11524 pci_read_config_dword(bp->pdev, 0, &val);
11525 msleep(delay_msecs);
11526 }
11527 }
11528
bnxt_reset_all(struct bnxt *bp)11529 static void bnxt_reset_all(struct bnxt *bp)
11530 {
11531 struct bnxt_fw_health *fw_health = bp->fw_health;
11532 int i, rc;
11533
11534 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11535 bnxt_fw_reset_via_optee(bp);
11536 bp->fw_reset_timestamp = jiffies;
11537 return;
11538 }
11539
11540 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
11541 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
11542 bnxt_fw_reset_writel(bp, i);
11543 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
11544 struct hwrm_fw_reset_input req = {0};
11545
11546 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11547 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11548 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
11549 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
11550 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
11551 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11552 if (rc)
11553 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11554 }
11555 bp->fw_reset_timestamp = jiffies;
11556 }
11557
bnxt_fw_reset_task(struct work_struct *work)11558 static void bnxt_fw_reset_task(struct work_struct *work)
11559 {
11560 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
11561 int rc;
11562
11563 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11564 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
11565 return;
11566 }
11567
11568 switch (bp->fw_reset_state) {
11569 case BNXT_FW_RESET_STATE_POLL_VF: {
11570 int n = bnxt_get_registered_vfs(bp);
11571 int tmo;
11572
11573 if (n < 0) {
11574 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
11575 n, jiffies_to_msecs(jiffies -
11576 bp->fw_reset_timestamp));
11577 goto fw_reset_abort;
11578 } else if (n > 0) {
11579 if (time_after(jiffies, bp->fw_reset_timestamp +
11580 (bp->fw_reset_max_dsecs * HZ / 10))) {
11581 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11582 bp->fw_reset_state = 0;
11583 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
11584 n);
11585 return;
11586 }
11587 bnxt_queue_fw_reset_work(bp, HZ / 10);
11588 return;
11589 }
11590 bp->fw_reset_timestamp = jiffies;
11591 rtnl_lock();
11592 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
11593 rtnl_unlock();
11594 goto fw_reset_abort;
11595 }
11596 bnxt_fw_reset_close(bp);
11597 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11598 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11599 tmo = HZ / 10;
11600 } else {
11601 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11602 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11603 }
11604 rtnl_unlock();
11605 bnxt_queue_fw_reset_work(bp, tmo);
11606 return;
11607 }
11608 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
11609 u32 val;
11610
11611 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11612 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
11613 !time_after(jiffies, bp->fw_reset_timestamp +
11614 (bp->fw_reset_max_dsecs * HZ / 10))) {
11615 bnxt_queue_fw_reset_work(bp, HZ / 5);
11616 return;
11617 }
11618
11619 if (!bp->fw_health->master) {
11620 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
11621
11622 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11623 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11624 return;
11625 }
11626 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11627 }
11628 fallthrough;
11629 case BNXT_FW_RESET_STATE_RESET_FW:
11630 bnxt_reset_all(bp);
11631 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11632 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
11633 return;
11634 case BNXT_FW_RESET_STATE_ENABLE_DEV:
11635 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11636 u32 val;
11637
11638 val = bnxt_fw_health_readl(bp,
11639 BNXT_FW_RESET_INPROG_REG);
11640 if (val)
11641 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
11642 val);
11643 }
11644 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11645 if (pci_enable_device(bp->pdev)) {
11646 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
11647 goto fw_reset_abort;
11648 }
11649 pci_set_master(bp->pdev);
11650 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
11651 fallthrough;
11652 case BNXT_FW_RESET_STATE_POLL_FW:
11653 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
11654 rc = __bnxt_hwrm_ver_get(bp, true);
11655 if (rc) {
11656 if (time_after(jiffies, bp->fw_reset_timestamp +
11657 (bp->fw_reset_max_dsecs * HZ / 10))) {
11658 netdev_err(bp->dev, "Firmware reset aborted\n");
11659 goto fw_reset_abort_status;
11660 }
11661 bnxt_queue_fw_reset_work(bp, HZ / 5);
11662 return;
11663 }
11664 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
11665 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
11666 fallthrough;
11667 case BNXT_FW_RESET_STATE_OPENING:
11668 while (!rtnl_trylock()) {
11669 bnxt_queue_fw_reset_work(bp, HZ / 10);
11670 return;
11671 }
11672 rc = bnxt_open(bp->dev);
11673 if (rc) {
11674 netdev_err(bp->dev, "bnxt_open_nic() failed\n");
11675 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11676 dev_close(bp->dev);
11677 }
11678
11679 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
11680 bp->fw_health->enabled) {
11681 bp->fw_health->last_fw_reset_cnt =
11682 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11683 }
11684 bp->fw_reset_state = 0;
11685 /* Make sure fw_reset_state is 0 before clearing the flag */
11686 smp_mb__before_atomic();
11687 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11688 bnxt_ulp_start(bp, rc);
11689 if (!rc)
11690 bnxt_reenable_sriov(bp);
11691 bnxt_dl_health_recovery_done(bp);
11692 bnxt_dl_health_status_update(bp, true);
11693 rtnl_unlock();
11694 break;
11695 }
11696 return;
11697
11698 fw_reset_abort_status:
11699 if (bp->fw_health->status_reliable ||
11700 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
11701 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11702
11703 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
11704 }
11705 fw_reset_abort:
11706 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11707 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
11708 bnxt_dl_health_status_update(bp, false);
11709 bp->fw_reset_state = 0;
11710 rtnl_lock();
11711 dev_close(bp->dev);
11712 rtnl_unlock();
11713 }
11714
bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)11715 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
11716 {
11717 int rc;
11718 struct bnxt *bp = netdev_priv(dev);
11719
11720 SET_NETDEV_DEV(dev, &pdev->dev);
11721
11722 /* enable device (incl. PCI PM wakeup), and bus-mastering */
11723 rc = pci_enable_device(pdev);
11724 if (rc) {
11725 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
11726 goto init_err;
11727 }
11728
11729 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11730 dev_err(&pdev->dev,
11731 "Cannot find PCI device base address, aborting\n");
11732 rc = -ENODEV;
11733 goto init_err_disable;
11734 }
11735
11736 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11737 if (rc) {
11738 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
11739 goto init_err_disable;
11740 }
11741
11742 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
11743 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
11744 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
11745 rc = -EIO;
11746 goto init_err_release;
11747 }
11748
11749 pci_set_master(pdev);
11750
11751 bp->dev = dev;
11752 bp->pdev = pdev;
11753
11754 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
11755 * determines the BAR size.
11756 */
11757 bp->bar0 = pci_ioremap_bar(pdev, 0);
11758 if (!bp->bar0) {
11759 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
11760 rc = -ENOMEM;
11761 goto init_err_release;
11762 }
11763
11764 bp->bar2 = pci_ioremap_bar(pdev, 4);
11765 if (!bp->bar2) {
11766 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
11767 rc = -ENOMEM;
11768 goto init_err_release;
11769 }
11770
11771 pci_enable_pcie_error_reporting(pdev);
11772
11773 INIT_WORK(&bp->sp_task, bnxt_sp_task);
11774 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
11775
11776 spin_lock_init(&bp->ntp_fltr_lock);
11777 #if BITS_PER_LONG == 32
11778 spin_lock_init(&bp->db_lock);
11779 #endif
11780
11781 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
11782 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
11783
11784 bnxt_init_dflt_coal(bp);
11785
11786 timer_setup(&bp->timer, bnxt_timer, 0);
11787 bp->current_interval = BNXT_TIMER_INTERVAL;
11788
11789 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
11790 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
11791
11792 clear_bit(BNXT_STATE_OPEN, &bp->state);
11793 return 0;
11794
11795 init_err_release:
11796 bnxt_unmap_bars(bp, pdev);
11797 pci_release_regions(pdev);
11798
11799 init_err_disable:
11800 pci_disable_device(pdev);
11801
11802 init_err:
11803 return rc;
11804 }
11805
11806 /* rtnl_lock held */
bnxt_change_mac_addr(struct net_device *dev, void *p)11807 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
11808 {
11809 struct sockaddr *addr = p;
11810 struct bnxt *bp = netdev_priv(dev);
11811 int rc = 0;
11812
11813 if (!is_valid_ether_addr(addr->sa_data))
11814 return -EADDRNOTAVAIL;
11815
11816 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
11817 return 0;
11818
11819 rc = bnxt_approve_mac(bp, addr->sa_data, true);
11820 if (rc)
11821 return rc;
11822
11823 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11824 if (netif_running(dev)) {
11825 bnxt_close_nic(bp, false, false);
11826 rc = bnxt_open_nic(bp, false, false);
11827 }
11828
11829 return rc;
11830 }
11831
11832 /* rtnl_lock held */
bnxt_change_mtu(struct net_device *dev, int new_mtu)11833 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
11834 {
11835 struct bnxt *bp = netdev_priv(dev);
11836
11837 if (netif_running(dev))
11838 bnxt_close_nic(bp, true, false);
11839
11840 dev->mtu = new_mtu;
11841 bnxt_set_ring_params(bp);
11842
11843 if (netif_running(dev))
11844 return bnxt_open_nic(bp, true, false);
11845
11846 return 0;
11847 }
11848
bnxt_setup_mq_tc(struct net_device *dev, u8 tc)11849 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
11850 {
11851 struct bnxt *bp = netdev_priv(dev);
11852 bool sh = false;
11853 int rc;
11854
11855 if (tc > bp->max_tc) {
11856 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
11857 tc, bp->max_tc);
11858 return -EINVAL;
11859 }
11860
11861 if (netdev_get_num_tc(dev) == tc)
11862 return 0;
11863
11864 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11865 sh = true;
11866
11867 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
11868 sh, tc, bp->tx_nr_rings_xdp);
11869 if (rc)
11870 return rc;
11871
11872 /* Needs to close the device and do hw resource re-allocations */
11873 if (netif_running(bp->dev))
11874 bnxt_close_nic(bp, true, false);
11875
11876 if (tc) {
11877 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
11878 netdev_set_num_tc(dev, tc);
11879 } else {
11880 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11881 netdev_reset_tc(dev);
11882 }
11883 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
11884 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
11885 bp->tx_nr_rings + bp->rx_nr_rings;
11886
11887 if (netif_running(bp->dev))
11888 return bnxt_open_nic(bp, true, false);
11889
11890 return 0;
11891 }
11892
bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)11893 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
11894 void *cb_priv)
11895 {
11896 struct bnxt *bp = cb_priv;
11897
11898 if (!bnxt_tc_flower_enabled(bp) ||
11899 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
11900 return -EOPNOTSUPP;
11901
11902 switch (type) {
11903 case TC_SETUP_CLSFLOWER:
11904 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
11905 default:
11906 return -EOPNOTSUPP;
11907 }
11908 }
11909
11910 LIST_HEAD(bnxt_block_cb_list);
11911
bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)11912 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
11913 void *type_data)
11914 {
11915 struct bnxt *bp = netdev_priv(dev);
11916
11917 switch (type) {
11918 case TC_SETUP_BLOCK:
11919 return flow_block_cb_setup_simple(type_data,
11920 &bnxt_block_cb_list,
11921 bnxt_setup_tc_block_cb,
11922 bp, bp, true);
11923 case TC_SETUP_QDISC_MQPRIO: {
11924 struct tc_mqprio_qopt *mqprio = type_data;
11925
11926 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
11927
11928 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
11929 }
11930 default:
11931 return -EOPNOTSUPP;
11932 }
11933 }
11934
11935 #ifdef CONFIG_RFS_ACCEL
bnxt_fltr_match(struct bnxt_ntuple_filter *f1, struct bnxt_ntuple_filter *f2)11936 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
11937 struct bnxt_ntuple_filter *f2)
11938 {
11939 struct flow_keys *keys1 = &f1->fkeys;
11940 struct flow_keys *keys2 = &f2->fkeys;
11941
11942 if (keys1->basic.n_proto != keys2->basic.n_proto ||
11943 keys1->basic.ip_proto != keys2->basic.ip_proto)
11944 return false;
11945
11946 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
11947 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
11948 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
11949 return false;
11950 } else {
11951 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
11952 sizeof(keys1->addrs.v6addrs.src)) ||
11953 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
11954 sizeof(keys1->addrs.v6addrs.dst)))
11955 return false;
11956 }
11957
11958 if (keys1->ports.ports == keys2->ports.ports &&
11959 keys1->control.flags == keys2->control.flags &&
11960 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
11961 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
11962 return true;
11963
11964 return false;
11965 }
11966
bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id)11967 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
11968 u16 rxq_index, u32 flow_id)
11969 {
11970 struct bnxt *bp = netdev_priv(dev);
11971 struct bnxt_ntuple_filter *fltr, *new_fltr;
11972 struct flow_keys *fkeys;
11973 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
11974 int rc = 0, idx, bit_id, l2_idx = 0;
11975 struct hlist_head *head;
11976 u32 flags;
11977
11978 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
11979 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11980 int off = 0, j;
11981
11982 netif_addr_lock_bh(dev);
11983 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
11984 if (ether_addr_equal(eth->h_dest,
11985 vnic->uc_list + off)) {
11986 l2_idx = j + 1;
11987 break;
11988 }
11989 }
11990 netif_addr_unlock_bh(dev);
11991 if (!l2_idx)
11992 return -EINVAL;
11993 }
11994 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
11995 if (!new_fltr)
11996 return -ENOMEM;
11997
11998 fkeys = &new_fltr->fkeys;
11999 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12000 rc = -EPROTONOSUPPORT;
12001 goto err_free;
12002 }
12003
12004 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12005 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12006 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12007 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12008 rc = -EPROTONOSUPPORT;
12009 goto err_free;
12010 }
12011 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12012 bp->hwrm_spec_code < 0x10601) {
12013 rc = -EPROTONOSUPPORT;
12014 goto err_free;
12015 }
12016 flags = fkeys->control.flags;
12017 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12018 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12019 rc = -EPROTONOSUPPORT;
12020 goto err_free;
12021 }
12022
12023 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12024 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12025
12026 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12027 head = &bp->ntp_fltr_hash_tbl[idx];
12028 rcu_read_lock();
12029 hlist_for_each_entry_rcu(fltr, head, hash) {
12030 if (bnxt_fltr_match(fltr, new_fltr)) {
12031 rc = fltr->sw_id;
12032 rcu_read_unlock();
12033 goto err_free;
12034 }
12035 }
12036 rcu_read_unlock();
12037
12038 spin_lock_bh(&bp->ntp_fltr_lock);
12039 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12040 BNXT_NTP_FLTR_MAX_FLTR, 0);
12041 if (bit_id < 0) {
12042 spin_unlock_bh(&bp->ntp_fltr_lock);
12043 rc = -ENOMEM;
12044 goto err_free;
12045 }
12046
12047 new_fltr->sw_id = (u16)bit_id;
12048 new_fltr->flow_id = flow_id;
12049 new_fltr->l2_fltr_idx = l2_idx;
12050 new_fltr->rxq = rxq_index;
12051 hlist_add_head_rcu(&new_fltr->hash, head);
12052 bp->ntp_fltr_count++;
12053 spin_unlock_bh(&bp->ntp_fltr_lock);
12054
12055 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12056 bnxt_queue_sp_work(bp);
12057
12058 return new_fltr->sw_id;
12059
12060 err_free:
12061 kfree(new_fltr);
12062 return rc;
12063 }
12064
bnxt_cfg_ntp_filters(struct bnxt *bp)12065 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12066 {
12067 int i;
12068
12069 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12070 struct hlist_head *head;
12071 struct hlist_node *tmp;
12072 struct bnxt_ntuple_filter *fltr;
12073 int rc;
12074
12075 head = &bp->ntp_fltr_hash_tbl[i];
12076 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12077 bool del = false;
12078
12079 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12080 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12081 fltr->flow_id,
12082 fltr->sw_id)) {
12083 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12084 fltr);
12085 del = true;
12086 }
12087 } else {
12088 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12089 fltr);
12090 if (rc)
12091 del = true;
12092 else
12093 set_bit(BNXT_FLTR_VALID, &fltr->state);
12094 }
12095
12096 if (del) {
12097 spin_lock_bh(&bp->ntp_fltr_lock);
12098 hlist_del_rcu(&fltr->hash);
12099 bp->ntp_fltr_count--;
12100 spin_unlock_bh(&bp->ntp_fltr_lock);
12101 synchronize_rcu();
12102 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12103 kfree(fltr);
12104 }
12105 }
12106 }
12107 }
12108
12109 #else
12110
bnxt_cfg_ntp_filters(struct bnxt *bp)12111 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12112 {
12113 }
12114
12115 #endif /* CONFIG_RFS_ACCEL */
12116
bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, unsigned int entry, struct udp_tunnel_info *ti)12117 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
12118 unsigned int entry, struct udp_tunnel_info *ti)
12119 {
12120 struct bnxt *bp = netdev_priv(netdev);
12121 unsigned int cmd;
12122
12123 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
12124 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12125 else
12126 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12127
12128 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
12129 }
12130
bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, unsigned int entry, struct udp_tunnel_info *ti)12131 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
12132 unsigned int entry, struct udp_tunnel_info *ti)
12133 {
12134 struct bnxt *bp = netdev_priv(netdev);
12135 unsigned int cmd;
12136
12137 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
12138 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12139 else
12140 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12141
12142 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12143 }
12144
12145 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12146 .set_port = bnxt_udp_tunnel_set_port,
12147 .unset_port = bnxt_udp_tunnel_unset_port,
12148 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12149 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12150 .tables = {
12151 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12152 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12153 },
12154 };
12155
bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags)12156 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12157 struct net_device *dev, u32 filter_mask,
12158 int nlflags)
12159 {
12160 struct bnxt *bp = netdev_priv(dev);
12161
12162 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12163 nlflags, filter_mask, NULL);
12164 }
12165
bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, struct netlink_ext_ack *extack)12166 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12167 u16 flags, struct netlink_ext_ack *extack)
12168 {
12169 struct bnxt *bp = netdev_priv(dev);
12170 struct nlattr *attr, *br_spec;
12171 int rem, rc = 0;
12172
12173 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12174 return -EOPNOTSUPP;
12175
12176 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12177 if (!br_spec)
12178 return -EINVAL;
12179
12180 nla_for_each_nested(attr, br_spec, rem) {
12181 u16 mode;
12182
12183 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12184 continue;
12185
12186 if (nla_len(attr) < sizeof(mode))
12187 return -EINVAL;
12188
12189 mode = nla_get_u16(attr);
12190 if (mode == bp->br_mode)
12191 break;
12192
12193 rc = bnxt_hwrm_set_br_mode(bp, mode);
12194 if (!rc)
12195 bp->br_mode = mode;
12196 break;
12197 }
12198 return rc;
12199 }
12200
bnxt_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid)12201 int bnxt_get_port_parent_id(struct net_device *dev,
12202 struct netdev_phys_item_id *ppid)
12203 {
12204 struct bnxt *bp = netdev_priv(dev);
12205
12206 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12207 return -EOPNOTSUPP;
12208
12209 /* The PF and it's VF-reps only support the switchdev framework */
12210 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12211 return -EOPNOTSUPP;
12212
12213 ppid->id_len = sizeof(bp->dsn);
12214 memcpy(ppid->id, bp->dsn, ppid->id_len);
12215
12216 return 0;
12217 }
12218
bnxt_get_devlink_port(struct net_device *dev)12219 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12220 {
12221 struct bnxt *bp = netdev_priv(dev);
12222
12223 return &bp->dl_port;
12224 }
12225
12226 static const struct net_device_ops bnxt_netdev_ops = {
12227 .ndo_open = bnxt_open,
12228 .ndo_start_xmit = bnxt_start_xmit,
12229 .ndo_stop = bnxt_close,
12230 .ndo_get_stats64 = bnxt_get_stats64,
12231 .ndo_set_rx_mode = bnxt_set_rx_mode,
12232 .ndo_do_ioctl = bnxt_ioctl,
12233 .ndo_validate_addr = eth_validate_addr,
12234 .ndo_set_mac_address = bnxt_change_mac_addr,
12235 .ndo_change_mtu = bnxt_change_mtu,
12236 .ndo_fix_features = bnxt_fix_features,
12237 .ndo_set_features = bnxt_set_features,
12238 .ndo_tx_timeout = bnxt_tx_timeout,
12239 #ifdef CONFIG_BNXT_SRIOV
12240 .ndo_get_vf_config = bnxt_get_vf_config,
12241 .ndo_set_vf_mac = bnxt_set_vf_mac,
12242 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12243 .ndo_set_vf_rate = bnxt_set_vf_bw,
12244 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12245 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
12246 .ndo_set_vf_trust = bnxt_set_vf_trust,
12247 #endif
12248 .ndo_setup_tc = bnxt_setup_tc,
12249 #ifdef CONFIG_RFS_ACCEL
12250 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12251 #endif
12252 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
12253 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
12254 .ndo_bpf = bnxt_xdp,
12255 .ndo_xdp_xmit = bnxt_xdp_xmit,
12256 .ndo_bridge_getlink = bnxt_bridge_getlink,
12257 .ndo_bridge_setlink = bnxt_bridge_setlink,
12258 .ndo_get_devlink_port = bnxt_get_devlink_port,
12259 };
12260
bnxt_remove_one(struct pci_dev *pdev)12261 static void bnxt_remove_one(struct pci_dev *pdev)
12262 {
12263 struct net_device *dev = pci_get_drvdata(pdev);
12264 struct bnxt *bp = netdev_priv(dev);
12265
12266 if (BNXT_PF(bp))
12267 bnxt_sriov_disable(bp);
12268
12269 if (BNXT_PF(bp))
12270 devlink_port_type_clear(&bp->dl_port);
12271 pci_disable_pcie_error_reporting(pdev);
12272 unregister_netdev(dev);
12273 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12274 /* Flush any pending tasks */
12275 cancel_work_sync(&bp->sp_task);
12276 cancel_delayed_work_sync(&bp->fw_reset_task);
12277 bp->sp_event = 0;
12278
12279 bnxt_dl_fw_reporters_destroy(bp, true);
12280 bnxt_dl_unregister(bp);
12281 bnxt_shutdown_tc(bp);
12282
12283 bnxt_clear_int_mode(bp);
12284 bnxt_hwrm_func_drv_unrgtr(bp);
12285 bnxt_free_hwrm_resources(bp);
12286 bnxt_free_hwrm_short_cmd_req(bp);
12287 bnxt_ethtool_free(bp);
12288 bnxt_dcb_free(bp);
12289 kfree(bp->edev);
12290 bp->edev = NULL;
12291 kfree(bp->fw_health);
12292 bp->fw_health = NULL;
12293 bnxt_cleanup_pci(bp);
12294 bnxt_free_ctx_mem(bp);
12295 kfree(bp->ctx);
12296 bp->ctx = NULL;
12297 kfree(bp->rss_indir_tbl);
12298 bp->rss_indir_tbl = NULL;
12299 bnxt_free_port_stats(bp);
12300 free_netdev(dev);
12301 }
12302
bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)12303 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12304 {
12305 int rc = 0;
12306 struct bnxt_link_info *link_info = &bp->link_info;
12307
12308 rc = bnxt_hwrm_phy_qcaps(bp);
12309 if (rc) {
12310 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12311 rc);
12312 return rc;
12313 }
12314 if (!fw_dflt)
12315 return 0;
12316
12317 rc = bnxt_update_link(bp, false);
12318 if (rc) {
12319 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12320 rc);
12321 return rc;
12322 }
12323
12324 /* Older firmware does not have supported_auto_speeds, so assume
12325 * that all supported speeds can be autonegotiated.
12326 */
12327 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12328 link_info->support_auto_speeds = link_info->support_speeds;
12329
12330 bnxt_init_ethtool_link_settings(bp);
12331 return 0;
12332 }
12333
bnxt_get_max_irq(struct pci_dev *pdev)12334 static int bnxt_get_max_irq(struct pci_dev *pdev)
12335 {
12336 u16 ctrl;
12337
12338 if (!pdev->msix_cap)
12339 return 1;
12340
12341 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12342 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12343 }
12344
_bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, int *max_cp)12345 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12346 int *max_cp)
12347 {
12348 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12349 int max_ring_grps = 0, max_irq;
12350
12351 *max_tx = hw_resc->max_tx_rings;
12352 *max_rx = hw_resc->max_rx_rings;
12353 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12354 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12355 bnxt_get_ulp_msix_num(bp),
12356 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12357 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12358 *max_cp = min_t(int, *max_cp, max_irq);
12359 max_ring_grps = hw_resc->max_hw_ring_grps;
12360 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12361 *max_cp -= 1;
12362 *max_rx -= 2;
12363 }
12364 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12365 *max_rx >>= 1;
12366 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12367 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12368 /* On P5 chips, max_cp output param should be available NQs */
12369 *max_cp = max_irq;
12370 }
12371 *max_rx = min_t(int, *max_rx, max_ring_grps);
12372 }
12373
bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)12374 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12375 {
12376 int rx, tx, cp;
12377
12378 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12379 *max_rx = rx;
12380 *max_tx = tx;
12381 if (!rx || !tx || !cp)
12382 return -ENOMEM;
12383
12384 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12385 }
12386
bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)12387 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12388 bool shared)
12389 {
12390 int rc;
12391
12392 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12393 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12394 /* Not enough rings, try disabling agg rings. */
12395 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12396 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12397 if (rc) {
12398 /* set BNXT_FLAG_AGG_RINGS back for consistency */
12399 bp->flags |= BNXT_FLAG_AGG_RINGS;
12400 return rc;
12401 }
12402 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12403 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12404 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12405 bnxt_set_ring_params(bp);
12406 }
12407
12408 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12409 int max_cp, max_stat, max_irq;
12410
12411 /* Reserve minimum resources for RoCE */
12412 max_cp = bnxt_get_max_func_cp_rings(bp);
12413 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12414 max_irq = bnxt_get_max_func_irqs(bp);
12415 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12416 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12417 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12418 return 0;
12419
12420 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12421 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12422 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12423 max_cp = min_t(int, max_cp, max_irq);
12424 max_cp = min_t(int, max_cp, max_stat);
12425 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12426 if (rc)
12427 rc = 0;
12428 }
12429 return rc;
12430 }
12431
12432 /* In initial default shared ring setting, each shared ring must have a
12433 * RX/TX ring pair.
12434 */
bnxt_trim_dflt_sh_rings(struct bnxt *bp)12435 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12436 {
12437 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12438 bp->rx_nr_rings = bp->cp_nr_rings;
12439 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12440 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12441 }
12442
bnxt_set_dflt_rings(struct bnxt *bp, bool sh)12443 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12444 {
12445 int dflt_rings, max_rx_rings, max_tx_rings, rc;
12446
12447 if (!bnxt_can_reserve_rings(bp))
12448 return 0;
12449
12450 if (sh)
12451 bp->flags |= BNXT_FLAG_SHARED_RINGS;
12452 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
12453 /* Reduce default rings on multi-port cards so that total default
12454 * rings do not exceed CPU count.
12455 */
12456 if (bp->port_count > 1) {
12457 int max_rings =
12458 max_t(int, num_online_cpus() / bp->port_count, 1);
12459
12460 dflt_rings = min_t(int, dflt_rings, max_rings);
12461 }
12462 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12463 if (rc)
12464 return rc;
12465 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12466 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12467 if (sh)
12468 bnxt_trim_dflt_sh_rings(bp);
12469 else
12470 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12471 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12472
12473 rc = __bnxt_reserve_rings(bp);
12474 if (rc)
12475 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
12476 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12477 if (sh)
12478 bnxt_trim_dflt_sh_rings(bp);
12479
12480 /* Rings may have been trimmed, re-reserve the trimmed rings. */
12481 if (bnxt_need_reserve_rings(bp)) {
12482 rc = __bnxt_reserve_rings(bp);
12483 if (rc)
12484 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12485 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12486 }
12487 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12488 bp->rx_nr_rings++;
12489 bp->cp_nr_rings++;
12490 }
12491 if (rc) {
12492 bp->tx_nr_rings = 0;
12493 bp->rx_nr_rings = 0;
12494 }
12495 return rc;
12496 }
12497
bnxt_init_dflt_ring_mode(struct bnxt *bp)12498 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12499 {
12500 int rc;
12501
12502 if (bp->tx_nr_rings)
12503 return 0;
12504
12505 bnxt_ulp_irq_stop(bp);
12506 bnxt_clear_int_mode(bp);
12507 rc = bnxt_set_dflt_rings(bp, true);
12508 if (rc) {
12509 netdev_err(bp->dev, "Not enough rings available.\n");
12510 goto init_dflt_ring_err;
12511 }
12512 rc = bnxt_init_int_mode(bp);
12513 if (rc)
12514 goto init_dflt_ring_err;
12515
12516 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12517
12518 bnxt_set_dflt_rfs(bp);
12519
12520 init_dflt_ring_err:
12521 bnxt_ulp_irq_restart(bp, rc);
12522 return rc;
12523 }
12524
bnxt_restore_pf_fw_resources(struct bnxt *bp)12525 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
12526 {
12527 int rc;
12528
12529 ASSERT_RTNL();
12530 bnxt_hwrm_func_qcaps(bp);
12531
12532 if (netif_running(bp->dev))
12533 __bnxt_close_nic(bp, true, false);
12534
12535 bnxt_ulp_irq_stop(bp);
12536 bnxt_clear_int_mode(bp);
12537 rc = bnxt_init_int_mode(bp);
12538 bnxt_ulp_irq_restart(bp, rc);
12539
12540 if (netif_running(bp->dev)) {
12541 if (rc)
12542 dev_close(bp->dev);
12543 else
12544 rc = bnxt_open_nic(bp, true, false);
12545 }
12546
12547 return rc;
12548 }
12549
bnxt_init_mac_addr(struct bnxt *bp)12550 static int bnxt_init_mac_addr(struct bnxt *bp)
12551 {
12552 int rc = 0;
12553
12554 if (BNXT_PF(bp)) {
12555 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
12556 } else {
12557 #ifdef CONFIG_BNXT_SRIOV
12558 struct bnxt_vf_info *vf = &bp->vf;
12559 bool strict_approval = true;
12560
12561 if (is_valid_ether_addr(vf->mac_addr)) {
12562 /* overwrite netdev dev_addr with admin VF MAC */
12563 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
12564 /* Older PF driver or firmware may not approve this
12565 * correctly.
12566 */
12567 strict_approval = false;
12568 } else {
12569 eth_hw_addr_random(bp->dev);
12570 }
12571 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
12572 #endif
12573 }
12574 return rc;
12575 }
12576
12577 #define BNXT_VPD_LEN 512
bnxt_vpd_read_info(struct bnxt *bp)12578 static void bnxt_vpd_read_info(struct bnxt *bp)
12579 {
12580 struct pci_dev *pdev = bp->pdev;
12581 int i, len, pos, ro_size, size;
12582 ssize_t vpd_size;
12583 u8 *vpd_data;
12584
12585 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
12586 if (!vpd_data)
12587 return;
12588
12589 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
12590 if (vpd_size <= 0) {
12591 netdev_err(bp->dev, "Unable to read VPD\n");
12592 goto exit;
12593 }
12594
12595 i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
12596 if (i < 0) {
12597 netdev_err(bp->dev, "VPD READ-Only not found\n");
12598 goto exit;
12599 }
12600
12601 ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
12602 i += PCI_VPD_LRDT_TAG_SIZE;
12603 if (i + ro_size > vpd_size)
12604 goto exit;
12605
12606 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12607 PCI_VPD_RO_KEYWORD_PARTNO);
12608 if (pos < 0)
12609 goto read_sn;
12610
12611 len = pci_vpd_info_field_size(&vpd_data[pos]);
12612 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12613 if (len + pos > vpd_size)
12614 goto read_sn;
12615
12616 size = min(len, BNXT_VPD_FLD_LEN - 1);
12617 memcpy(bp->board_partno, &vpd_data[pos], size);
12618
12619 read_sn:
12620 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12621 PCI_VPD_RO_KEYWORD_SERIALNO);
12622 if (pos < 0)
12623 goto exit;
12624
12625 len = pci_vpd_info_field_size(&vpd_data[pos]);
12626 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12627 if (len + pos > vpd_size)
12628 goto exit;
12629
12630 size = min(len, BNXT_VPD_FLD_LEN - 1);
12631 memcpy(bp->board_serialno, &vpd_data[pos], size);
12632 exit:
12633 kfree(vpd_data);
12634 }
12635
bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])12636 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
12637 {
12638 struct pci_dev *pdev = bp->pdev;
12639 u64 qword;
12640
12641 qword = pci_get_dsn(pdev);
12642 if (!qword) {
12643 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
12644 return -EOPNOTSUPP;
12645 }
12646
12647 put_unaligned_le64(qword, dsn);
12648
12649 bp->flags |= BNXT_FLAG_DSN_VALID;
12650 return 0;
12651 }
12652
bnxt_map_db_bar(struct bnxt *bp)12653 static int bnxt_map_db_bar(struct bnxt *bp)
12654 {
12655 if (!bp->db_size)
12656 return -ENODEV;
12657 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
12658 if (!bp->bar1)
12659 return -ENOMEM;
12660 return 0;
12661 }
12662
bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)12663 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
12664 {
12665 struct net_device *dev;
12666 struct bnxt *bp;
12667 int rc, max_irqs;
12668
12669 if (pci_is_bridge(pdev))
12670 return -ENODEV;
12671
12672 /* Clear any pending DMA transactions from crash kernel
12673 * while loading driver in capture kernel.
12674 */
12675 if (is_kdump_kernel()) {
12676 pci_clear_master(pdev);
12677 pcie_flr(pdev);
12678 }
12679
12680 max_irqs = bnxt_get_max_irq(pdev);
12681 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
12682 if (!dev)
12683 return -ENOMEM;
12684
12685 bp = netdev_priv(dev);
12686 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
12687 bnxt_set_max_func_irqs(bp, max_irqs);
12688
12689 if (bnxt_vf_pciid(ent->driver_data))
12690 bp->flags |= BNXT_FLAG_VF;
12691
12692 if (pdev->msix_cap)
12693 bp->flags |= BNXT_FLAG_MSIX_CAP;
12694
12695 rc = bnxt_init_board(pdev, dev);
12696 if (rc < 0)
12697 goto init_err_free;
12698
12699 dev->netdev_ops = &bnxt_netdev_ops;
12700 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
12701 dev->ethtool_ops = &bnxt_ethtool_ops;
12702 pci_set_drvdata(pdev, dev);
12703
12704 if (BNXT_PF(bp))
12705 bnxt_vpd_read_info(bp);
12706
12707 rc = bnxt_alloc_hwrm_resources(bp);
12708 if (rc)
12709 goto init_err_pci_clean;
12710
12711 mutex_init(&bp->hwrm_cmd_lock);
12712 mutex_init(&bp->link_lock);
12713
12714 rc = bnxt_fw_init_one_p1(bp);
12715 if (rc)
12716 goto init_err_pci_clean;
12717
12718 if (BNXT_CHIP_P5(bp)) {
12719 bp->flags |= BNXT_FLAG_CHIP_P5;
12720 if (BNXT_CHIP_SR2(bp))
12721 bp->flags |= BNXT_FLAG_CHIP_SR2;
12722 }
12723
12724 rc = bnxt_alloc_rss_indir_tbl(bp);
12725 if (rc)
12726 goto init_err_pci_clean;
12727
12728 rc = bnxt_fw_init_one_p2(bp);
12729 if (rc)
12730 goto init_err_pci_clean;
12731
12732 rc = bnxt_map_db_bar(bp);
12733 if (rc) {
12734 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
12735 rc);
12736 goto init_err_pci_clean;
12737 }
12738
12739 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12740 NETIF_F_TSO | NETIF_F_TSO6 |
12741 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12742 NETIF_F_GSO_IPXIP4 |
12743 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12744 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
12745 NETIF_F_RXCSUM | NETIF_F_GRO;
12746
12747 if (BNXT_SUPPORTS_TPA(bp))
12748 dev->hw_features |= NETIF_F_LRO;
12749
12750 dev->hw_enc_features =
12751 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12752 NETIF_F_TSO | NETIF_F_TSO6 |
12753 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12754 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12755 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
12756 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
12757
12758 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
12759 NETIF_F_GSO_GRE_CSUM;
12760 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
12761 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
12762 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
12763 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
12764 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
12765 if (BNXT_SUPPORTS_TPA(bp))
12766 dev->hw_features |= NETIF_F_GRO_HW;
12767 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
12768 if (dev->features & NETIF_F_GRO_HW)
12769 dev->features &= ~NETIF_F_LRO;
12770 dev->priv_flags |= IFF_UNICAST_FLT;
12771
12772 #ifdef CONFIG_BNXT_SRIOV
12773 init_waitqueue_head(&bp->sriov_cfg_wait);
12774 mutex_init(&bp->sriov_lock);
12775 #endif
12776 if (BNXT_SUPPORTS_TPA(bp)) {
12777 bp->gro_func = bnxt_gro_func_5730x;
12778 if (BNXT_CHIP_P4(bp))
12779 bp->gro_func = bnxt_gro_func_5731x;
12780 else if (BNXT_CHIP_P5(bp))
12781 bp->gro_func = bnxt_gro_func_5750x;
12782 }
12783 if (!BNXT_CHIP_P4_PLUS(bp))
12784 bp->flags |= BNXT_FLAG_DOUBLE_DB;
12785
12786 bp->ulp_probe = bnxt_ulp_probe;
12787
12788 rc = bnxt_init_mac_addr(bp);
12789 if (rc) {
12790 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
12791 rc = -EADDRNOTAVAIL;
12792 goto init_err_pci_clean;
12793 }
12794
12795 if (BNXT_PF(bp)) {
12796 /* Read the adapter's DSN to use as the eswitch switch_id */
12797 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
12798 }
12799
12800 /* MTU range: 60 - FW defined max */
12801 dev->min_mtu = ETH_ZLEN;
12802 dev->max_mtu = bp->max_mtu;
12803
12804 rc = bnxt_probe_phy(bp, true);
12805 if (rc)
12806 goto init_err_pci_clean;
12807
12808 bnxt_set_rx_skb_mode(bp, false);
12809 bnxt_set_tpa_flags(bp);
12810 bnxt_set_ring_params(bp);
12811 rc = bnxt_set_dflt_rings(bp, true);
12812 if (rc) {
12813 netdev_err(bp->dev, "Not enough rings available.\n");
12814 rc = -ENOMEM;
12815 goto init_err_pci_clean;
12816 }
12817
12818 bnxt_fw_init_one_p3(bp);
12819
12820 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12821 bp->flags |= BNXT_FLAG_STRIP_VLAN;
12822
12823 rc = bnxt_init_int_mode(bp);
12824 if (rc)
12825 goto init_err_pci_clean;
12826
12827 /* No TC has been set yet and rings may have been trimmed due to
12828 * limited MSIX, so we re-initialize the TX rings per TC.
12829 */
12830 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12831
12832 if (BNXT_PF(bp)) {
12833 if (!bnxt_pf_wq) {
12834 bnxt_pf_wq =
12835 create_singlethread_workqueue("bnxt_pf_wq");
12836 if (!bnxt_pf_wq) {
12837 dev_err(&pdev->dev, "Unable to create workqueue.\n");
12838 rc = -ENOMEM;
12839 goto init_err_pci_clean;
12840 }
12841 }
12842 rc = bnxt_init_tc(bp);
12843 if (rc)
12844 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
12845 rc);
12846 }
12847
12848 bnxt_dl_register(bp);
12849
12850 rc = register_netdev(dev);
12851 if (rc)
12852 goto init_err_cleanup;
12853
12854 if (BNXT_PF(bp))
12855 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
12856 bnxt_dl_fw_reporters_create(bp);
12857
12858 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
12859 board_info[ent->driver_data].name,
12860 (long)pci_resource_start(pdev, 0), dev->dev_addr);
12861 pcie_print_link_status(pdev);
12862
12863 pci_save_state(pdev);
12864 return 0;
12865
12866 init_err_cleanup:
12867 bnxt_dl_unregister(bp);
12868 bnxt_shutdown_tc(bp);
12869 bnxt_clear_int_mode(bp);
12870
12871 init_err_pci_clean:
12872 bnxt_hwrm_func_drv_unrgtr(bp);
12873 bnxt_free_hwrm_short_cmd_req(bp);
12874 bnxt_free_hwrm_resources(bp);
12875 bnxt_ethtool_free(bp);
12876 kfree(bp->fw_health);
12877 bp->fw_health = NULL;
12878 bnxt_cleanup_pci(bp);
12879 bnxt_free_ctx_mem(bp);
12880 kfree(bp->ctx);
12881 bp->ctx = NULL;
12882 kfree(bp->rss_indir_tbl);
12883 bp->rss_indir_tbl = NULL;
12884
12885 init_err_free:
12886 free_netdev(dev);
12887 return rc;
12888 }
12889
bnxt_shutdown(struct pci_dev *pdev)12890 static void bnxt_shutdown(struct pci_dev *pdev)
12891 {
12892 struct net_device *dev = pci_get_drvdata(pdev);
12893 struct bnxt *bp;
12894
12895 if (!dev)
12896 return;
12897
12898 rtnl_lock();
12899 bp = netdev_priv(dev);
12900 if (!bp)
12901 goto shutdown_exit;
12902
12903 if (netif_running(dev))
12904 dev_close(dev);
12905
12906 bnxt_ulp_shutdown(bp);
12907 bnxt_clear_int_mode(bp);
12908 pci_disable_device(pdev);
12909
12910 if (system_state == SYSTEM_POWER_OFF) {
12911 pci_wake_from_d3(pdev, bp->wol);
12912 pci_set_power_state(pdev, PCI_D3hot);
12913 }
12914
12915 shutdown_exit:
12916 rtnl_unlock();
12917 }
12918
12919 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device *device)12920 static int bnxt_suspend(struct device *device)
12921 {
12922 struct net_device *dev = dev_get_drvdata(device);
12923 struct bnxt *bp = netdev_priv(dev);
12924 int rc = 0;
12925
12926 rtnl_lock();
12927 bnxt_ulp_stop(bp);
12928 if (netif_running(dev)) {
12929 netif_device_detach(dev);
12930 rc = bnxt_close(dev);
12931 }
12932 bnxt_hwrm_func_drv_unrgtr(bp);
12933 pci_disable_device(bp->pdev);
12934 bnxt_free_ctx_mem(bp);
12935 kfree(bp->ctx);
12936 bp->ctx = NULL;
12937 rtnl_unlock();
12938 return rc;
12939 }
12940
bnxt_resume(struct device *device)12941 static int bnxt_resume(struct device *device)
12942 {
12943 struct net_device *dev = dev_get_drvdata(device);
12944 struct bnxt *bp = netdev_priv(dev);
12945 int rc = 0;
12946
12947 rtnl_lock();
12948 rc = pci_enable_device(bp->pdev);
12949 if (rc) {
12950 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
12951 rc);
12952 goto resume_exit;
12953 }
12954 pci_set_master(bp->pdev);
12955 if (bnxt_hwrm_ver_get(bp)) {
12956 rc = -ENODEV;
12957 goto resume_exit;
12958 }
12959 rc = bnxt_hwrm_func_reset(bp);
12960 if (rc) {
12961 rc = -EBUSY;
12962 goto resume_exit;
12963 }
12964
12965 rc = bnxt_hwrm_func_qcaps(bp);
12966 if (rc)
12967 goto resume_exit;
12968
12969 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
12970 rc = -ENODEV;
12971 goto resume_exit;
12972 }
12973
12974 bnxt_get_wol_settings(bp);
12975 if (netif_running(dev)) {
12976 rc = bnxt_open(dev);
12977 if (!rc)
12978 netif_device_attach(dev);
12979 }
12980
12981 resume_exit:
12982 bnxt_ulp_start(bp, rc);
12983 if (!rc)
12984 bnxt_reenable_sriov(bp);
12985 rtnl_unlock();
12986 return rc;
12987 }
12988
12989 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
12990 #define BNXT_PM_OPS (&bnxt_pm_ops)
12991
12992 #else
12993
12994 #define BNXT_PM_OPS NULL
12995
12996 #endif /* CONFIG_PM_SLEEP */
12997
12998 /**
12999 * bnxt_io_error_detected - called when PCI error is detected
13000 * @pdev: Pointer to PCI device
13001 * @state: The current pci connection state
13002 *
13003 * This function is called after a PCI bus error affecting
13004 * this device has been detected.
13005 */
bnxt_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)13006 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13007 pci_channel_state_t state)
13008 {
13009 struct net_device *netdev = pci_get_drvdata(pdev);
13010 struct bnxt *bp = netdev_priv(netdev);
13011
13012 netdev_info(netdev, "PCI I/O error detected\n");
13013
13014 rtnl_lock();
13015 netif_device_detach(netdev);
13016
13017 bnxt_ulp_stop(bp);
13018
13019 if (state == pci_channel_io_perm_failure) {
13020 rtnl_unlock();
13021 return PCI_ERS_RESULT_DISCONNECT;
13022 }
13023
13024 if (state == pci_channel_io_frozen)
13025 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13026
13027 if (netif_running(netdev))
13028 bnxt_close(netdev);
13029
13030 if (pci_is_enabled(pdev))
13031 pci_disable_device(pdev);
13032 bnxt_free_ctx_mem(bp);
13033 kfree(bp->ctx);
13034 bp->ctx = NULL;
13035 rtnl_unlock();
13036
13037 /* Request a slot slot reset. */
13038 return PCI_ERS_RESULT_NEED_RESET;
13039 }
13040
13041 /**
13042 * bnxt_io_slot_reset - called after the pci bus has been reset.
13043 * @pdev: Pointer to PCI device
13044 *
13045 * Restart the card from scratch, as if from a cold-boot.
13046 * At this point, the card has exprienced a hard reset,
13047 * followed by fixups by BIOS, and has its config space
13048 * set up identically to what it was at cold boot.
13049 */
bnxt_io_slot_reset(struct pci_dev *pdev)13050 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13051 {
13052 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13053 struct net_device *netdev = pci_get_drvdata(pdev);
13054 struct bnxt *bp = netdev_priv(netdev);
13055 int err = 0, off;
13056
13057 netdev_info(bp->dev, "PCI Slot Reset\n");
13058
13059 rtnl_lock();
13060
13061 if (pci_enable_device(pdev)) {
13062 dev_err(&pdev->dev,
13063 "Cannot re-enable PCI device after reset.\n");
13064 } else {
13065 pci_set_master(pdev);
13066 /* Upon fatal error, our device internal logic that latches to
13067 * BAR value is getting reset and will restore only upon
13068 * rewritting the BARs.
13069 *
13070 * As pci_restore_state() does not re-write the BARs if the
13071 * value is same as saved value earlier, driver needs to
13072 * write the BARs to 0 to force restore, in case of fatal error.
13073 */
13074 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13075 &bp->state)) {
13076 for (off = PCI_BASE_ADDRESS_0;
13077 off <= PCI_BASE_ADDRESS_5; off += 4)
13078 pci_write_config_dword(bp->pdev, off, 0);
13079 }
13080 pci_restore_state(pdev);
13081 pci_save_state(pdev);
13082
13083 err = bnxt_hwrm_func_reset(bp);
13084 if (!err)
13085 result = PCI_ERS_RESULT_RECOVERED;
13086 }
13087
13088 rtnl_unlock();
13089
13090 return result;
13091 }
13092
13093 /**
13094 * bnxt_io_resume - called when traffic can start flowing again.
13095 * @pdev: Pointer to PCI device
13096 *
13097 * This callback is called when the error recovery driver tells
13098 * us that its OK to resume normal operation.
13099 */
bnxt_io_resume(struct pci_dev *pdev)13100 static void bnxt_io_resume(struct pci_dev *pdev)
13101 {
13102 struct net_device *netdev = pci_get_drvdata(pdev);
13103 struct bnxt *bp = netdev_priv(netdev);
13104 int err;
13105
13106 netdev_info(bp->dev, "PCI Slot Resume\n");
13107 rtnl_lock();
13108
13109 err = bnxt_hwrm_func_qcaps(bp);
13110 if (!err && netif_running(netdev))
13111 err = bnxt_open(netdev);
13112
13113 bnxt_ulp_start(bp, err);
13114 if (!err) {
13115 bnxt_reenable_sriov(bp);
13116 netif_device_attach(netdev);
13117 }
13118
13119 rtnl_unlock();
13120 }
13121
13122 static const struct pci_error_handlers bnxt_err_handler = {
13123 .error_detected = bnxt_io_error_detected,
13124 .slot_reset = bnxt_io_slot_reset,
13125 .resume = bnxt_io_resume
13126 };
13127
13128 static struct pci_driver bnxt_pci_driver = {
13129 .name = DRV_MODULE_NAME,
13130 .id_table = bnxt_pci_tbl,
13131 .probe = bnxt_init_one,
13132 .remove = bnxt_remove_one,
13133 .shutdown = bnxt_shutdown,
13134 .driver.pm = BNXT_PM_OPS,
13135 .err_handler = &bnxt_err_handler,
13136 #if defined(CONFIG_BNXT_SRIOV)
13137 .sriov_configure = bnxt_sriov_configure,
13138 #endif
13139 };
13140
bnxt_init(void)13141 static int __init bnxt_init(void)
13142 {
13143 int err;
13144
13145 bnxt_debug_init();
13146 err = pci_register_driver(&bnxt_pci_driver);
13147 if (err) {
13148 bnxt_debug_exit();
13149 return err;
13150 }
13151
13152 return 0;
13153 }
13154
bnxt_exit(void)13155 static void __exit bnxt_exit(void)
13156 {
13157 pci_unregister_driver(&bnxt_pci_driver);
13158 if (bnxt_pf_wq)
13159 destroy_workqueue(bnxt_pf_wq);
13160 bnxt_debug_exit();
13161 }
13162
13163 module_init(bnxt_init);
13164 module_exit(bnxt_exit);
13165