1/* bnx2x_stats.c: QLogic Everest network driver.
2 *
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
5 * All rights reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
17 *
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include "bnx2x_stats.h"
23#include "bnx2x_cmn.h"
24#include "bnx2x_sriov.h"
25
26extern const u32 dmae_reg_go_c[];
27
28/* Statistics */
29
30/*
31 * General service functions
32 */
33
34static inline long bnx2x_hilo(u32 *hiref)
35{
36	u32 lo = *(hiref + 1);
37#if (BITS_PER_LONG == 64)
38	u32 hi = *hiref;
39
40	return HILO_U64(hi, lo);
41#else
42	return lo;
43#endif
44}
45
46static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
47{
48	u16 res = 0;
49
50	/* 'newest' convention - shmem2 cotains the size of the port stats */
51	if (SHMEM2_HAS(bp, sizeof_port_stats)) {
52		u32 size = SHMEM2_RD(bp, sizeof_port_stats);
53		if (size)
54			res = size;
55
56		/* prevent newer BC from causing buffer overflow */
57		if (res > sizeof(struct host_port_stats))
58			res = sizeof(struct host_port_stats);
59	}
60
61	/* Older convention - all BCs support the port stats' fields up until
62	 * the 'not_used' field
63	 */
64	if (!res) {
65		res = offsetof(struct host_port_stats, not_used) + 4;
66
67		/* if PFC stats are supported by the MFW, DMA them as well */
68		if (bp->flags & BC_SUPPORTS_PFC_STATS) {
69			res += offsetof(struct host_port_stats,
70					pfc_frames_rx_lo) -
71			       offsetof(struct host_port_stats,
72					pfc_frames_tx_hi) + 4 ;
73		}
74	}
75
76	res >>= 2;
77
78	WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
79	return res;
80}
81
82/*
83 * Init service functions
84 */
85
86static void bnx2x_dp_stats(struct bnx2x *bp)
87{
88	int i;
89
90	DP(BNX2X_MSG_STATS, "dumping stats:\n"
91	   "fw_stats_req\n"
92	   "    hdr\n"
93	   "        cmd_num %d\n"
94	   "        reserved0 %d\n"
95	   "        drv_stats_counter %d\n"
96	   "        reserved1 %d\n"
97	   "        stats_counters_addrs %x %x\n",
98	   bp->fw_stats_req->hdr.cmd_num,
99	   bp->fw_stats_req->hdr.reserved0,
100	   bp->fw_stats_req->hdr.drv_stats_counter,
101	   bp->fw_stats_req->hdr.reserved1,
102	   bp->fw_stats_req->hdr.stats_counters_addrs.hi,
103	   bp->fw_stats_req->hdr.stats_counters_addrs.lo);
104
105	for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) {
106		DP(BNX2X_MSG_STATS,
107		   "query[%d]\n"
108		   "              kind %d\n"
109		   "              index %d\n"
110		   "              funcID %d\n"
111		   "              reserved %d\n"
112		   "              address %x %x\n",
113		   i, bp->fw_stats_req->query[i].kind,
114		   bp->fw_stats_req->query[i].index,
115		   bp->fw_stats_req->query[i].funcID,
116		   bp->fw_stats_req->query[i].reserved,
117		   bp->fw_stats_req->query[i].address.hi,
118		   bp->fw_stats_req->query[i].address.lo);
119	}
120}
121
122/* Post the next statistics ramrod. Protect it with the spin in
123 * order to ensure the strict order between statistics ramrods
124 * (each ramrod has a sequence number passed in a
125 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
126 * sent in order).
127 */
128static void bnx2x_storm_stats_post(struct bnx2x *bp)
129{
130	int rc;
131
132	if (bp->stats_pending)
133		return;
134
135	bp->fw_stats_req->hdr.drv_stats_counter =
136		cpu_to_le16(bp->stats_counter++);
137
138	DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
139	   le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
140
141	/* adjust the ramrod to include VF queues statistics */
142	bnx2x_iov_adjust_stats_req(bp);
143	bnx2x_dp_stats(bp);
144
145	/* send FW stats ramrod */
146	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
147			   U64_HI(bp->fw_stats_req_mapping),
148			   U64_LO(bp->fw_stats_req_mapping),
149			   NONE_CONNECTION_TYPE);
150	if (rc == 0)
151		bp->stats_pending = 1;
152}
153
154static void bnx2x_hw_stats_post(struct bnx2x *bp)
155{
156	struct dmae_command *dmae = &bp->stats_dmae;
157	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
158
159	*stats_comp = DMAE_COMP_VAL;
160	if (CHIP_REV_IS_SLOW(bp))
161		return;
162
163	/* Update MCP's statistics if possible */
164	if (bp->func_stx)
165		memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
166		       sizeof(bp->func_stats));
167
168	/* loader */
169	if (bp->executer_idx) {
170		int loader_idx = PMF_DMAE_C(bp);
171		u32 opcode =  bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
172						 true, DMAE_COMP_GRC);
173		opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
174
175		memset(dmae, 0, sizeof(struct dmae_command));
176		dmae->opcode = opcode;
177		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
178		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
179		dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
180				     sizeof(struct dmae_command) *
181				     (loader_idx + 1)) >> 2;
182		dmae->dst_addr_hi = 0;
183		dmae->len = sizeof(struct dmae_command) >> 2;
184		if (CHIP_IS_E1(bp))
185			dmae->len--;
186		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
187		dmae->comp_addr_hi = 0;
188		dmae->comp_val = 1;
189
190		*stats_comp = 0;
191		bnx2x_post_dmae(bp, dmae, loader_idx);
192
193	} else if (bp->func_stx) {
194		*stats_comp = 0;
195		bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
196	}
197}
198
199static void bnx2x_stats_comp(struct bnx2x *bp)
200{
201	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
202	int cnt = 10;
203
204	might_sleep();
205	while (*stats_comp != DMAE_COMP_VAL) {
206		if (!cnt) {
207			BNX2X_ERR("timeout waiting for stats finished\n");
208			break;
209		}
210		cnt--;
211		usleep_range(1000, 2000);
212	}
213}
214
215/*
216 * Statistics service functions
217 */
218
219/* should be called under stats_sema */
220static void bnx2x_stats_pmf_update(struct bnx2x *bp)
221{
222	struct dmae_command *dmae;
223	u32 opcode;
224	int loader_idx = PMF_DMAE_C(bp);
225	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
226
227	/* sanity */
228	if (!bp->port.pmf || !bp->port.port_stx) {
229		BNX2X_ERR("BUG!\n");
230		return;
231	}
232
233	bp->executer_idx = 0;
234
235	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
236
237	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
238	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
239	dmae->src_addr_lo = bp->port.port_stx >> 2;
240	dmae->src_addr_hi = 0;
241	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
242	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
243	dmae->len = DMAE_LEN32_RD_MAX;
244	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
245	dmae->comp_addr_hi = 0;
246	dmae->comp_val = 1;
247
248	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
249	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
250	dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
251	dmae->src_addr_hi = 0;
252	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
253				   DMAE_LEN32_RD_MAX * 4);
254	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
255				   DMAE_LEN32_RD_MAX * 4);
256	dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
257
258	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
259	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
260	dmae->comp_val = DMAE_COMP_VAL;
261
262	*stats_comp = 0;
263	bnx2x_hw_stats_post(bp);
264	bnx2x_stats_comp(bp);
265}
266
267static void bnx2x_port_stats_init(struct bnx2x *bp)
268{
269	struct dmae_command *dmae;
270	int port = BP_PORT(bp);
271	u32 opcode;
272	int loader_idx = PMF_DMAE_C(bp);
273	u32 mac_addr;
274	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
275
276	/* sanity */
277	if (!bp->link_vars.link_up || !bp->port.pmf) {
278		BNX2X_ERR("BUG!\n");
279		return;
280	}
281
282	bp->executer_idx = 0;
283
284	/* MCP */
285	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
286				    true, DMAE_COMP_GRC);
287
288	if (bp->port.port_stx) {
289
290		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
291		dmae->opcode = opcode;
292		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
293		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
294		dmae->dst_addr_lo = bp->port.port_stx >> 2;
295		dmae->dst_addr_hi = 0;
296		dmae->len = bnx2x_get_port_stats_dma_len(bp);
297		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
298		dmae->comp_addr_hi = 0;
299		dmae->comp_val = 1;
300	}
301
302	if (bp->func_stx) {
303
304		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
305		dmae->opcode = opcode;
306		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
307		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
308		dmae->dst_addr_lo = bp->func_stx >> 2;
309		dmae->dst_addr_hi = 0;
310		dmae->len = sizeof(struct host_func_stats) >> 2;
311		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
312		dmae->comp_addr_hi = 0;
313		dmae->comp_val = 1;
314	}
315
316	/* MAC */
317	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
318				   true, DMAE_COMP_GRC);
319
320	/* EMAC is special */
321	if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
322		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
323
324		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
325		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
326		dmae->opcode = opcode;
327		dmae->src_addr_lo = (mac_addr +
328				     EMAC_REG_EMAC_RX_STAT_AC) >> 2;
329		dmae->src_addr_hi = 0;
330		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
331		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
332		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
333		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
334		dmae->comp_addr_hi = 0;
335		dmae->comp_val = 1;
336
337		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
338		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
339		dmae->opcode = opcode;
340		dmae->src_addr_lo = (mac_addr +
341				     EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
342		dmae->src_addr_hi = 0;
343		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
344		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
345		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
346		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
347		dmae->len = 1;
348		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
349		dmae->comp_addr_hi = 0;
350		dmae->comp_val = 1;
351
352		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
353		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
354		dmae->opcode = opcode;
355		dmae->src_addr_lo = (mac_addr +
356				     EMAC_REG_EMAC_TX_STAT_AC) >> 2;
357		dmae->src_addr_hi = 0;
358		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
359			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
360		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
361			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
362		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
363		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
364		dmae->comp_addr_hi = 0;
365		dmae->comp_val = 1;
366	} else {
367		u32 tx_src_addr_lo, rx_src_addr_lo;
368		u16 rx_len, tx_len;
369
370		/* configure the params according to MAC type */
371		switch (bp->link_vars.mac_type) {
372		case MAC_TYPE_BMAC:
373			mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
374					   NIG_REG_INGRESS_BMAC0_MEM);
375
376			/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
377			   BIGMAC_REGISTER_TX_STAT_GTBYT */
378			if (CHIP_IS_E1x(bp)) {
379				tx_src_addr_lo = (mac_addr +
380					BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
381				tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
382					  BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
383				rx_src_addr_lo = (mac_addr +
384					BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
385				rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
386					  BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
387			} else {
388				tx_src_addr_lo = (mac_addr +
389					BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
390				tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
391					  BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
392				rx_src_addr_lo = (mac_addr +
393					BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
394				rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
395					  BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
396			}
397			break;
398
399		case MAC_TYPE_UMAC: /* handled by MSTAT */
400		case MAC_TYPE_XMAC: /* handled by MSTAT */
401		default:
402			mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
403			tx_src_addr_lo = (mac_addr +
404					  MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
405			rx_src_addr_lo = (mac_addr +
406					  MSTAT_REG_RX_STAT_GR64_LO) >> 2;
407			tx_len = sizeof(bp->slowpath->
408					mac_stats.mstat_stats.stats_tx) >> 2;
409			rx_len = sizeof(bp->slowpath->
410					mac_stats.mstat_stats.stats_rx) >> 2;
411			break;
412		}
413
414		/* TX stats */
415		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
416		dmae->opcode = opcode;
417		dmae->src_addr_lo = tx_src_addr_lo;
418		dmae->src_addr_hi = 0;
419		dmae->len = tx_len;
420		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
421		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
422		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
423		dmae->comp_addr_hi = 0;
424		dmae->comp_val = 1;
425
426		/* RX stats */
427		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
428		dmae->opcode = opcode;
429		dmae->src_addr_hi = 0;
430		dmae->src_addr_lo = rx_src_addr_lo;
431		dmae->dst_addr_lo =
432			U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
433		dmae->dst_addr_hi =
434			U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
435		dmae->len = rx_len;
436		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
437		dmae->comp_addr_hi = 0;
438		dmae->comp_val = 1;
439	}
440
441	/* NIG */
442	if (!CHIP_IS_E3(bp)) {
443		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
444		dmae->opcode = opcode;
445		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
446					    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
447		dmae->src_addr_hi = 0;
448		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
449				offsetof(struct nig_stats, egress_mac_pkt0_lo));
450		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
451				offsetof(struct nig_stats, egress_mac_pkt0_lo));
452		dmae->len = (2*sizeof(u32)) >> 2;
453		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
454		dmae->comp_addr_hi = 0;
455		dmae->comp_val = 1;
456
457		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
458		dmae->opcode = opcode;
459		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
460					    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
461		dmae->src_addr_hi = 0;
462		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
463				offsetof(struct nig_stats, egress_mac_pkt1_lo));
464		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
465				offsetof(struct nig_stats, egress_mac_pkt1_lo));
466		dmae->len = (2*sizeof(u32)) >> 2;
467		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
468		dmae->comp_addr_hi = 0;
469		dmae->comp_val = 1;
470	}
471
472	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
473	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
474						 true, DMAE_COMP_PCI);
475	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
476				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
477	dmae->src_addr_hi = 0;
478	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
479	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
480	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
481
482	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
483	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
484	dmae->comp_val = DMAE_COMP_VAL;
485
486	*stats_comp = 0;
487}
488
489static void bnx2x_func_stats_init(struct bnx2x *bp)
490{
491	struct dmae_command *dmae = &bp->stats_dmae;
492	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
493
494	/* sanity */
495	if (!bp->func_stx) {
496		BNX2X_ERR("BUG!\n");
497		return;
498	}
499
500	bp->executer_idx = 0;
501	memset(dmae, 0, sizeof(struct dmae_command));
502
503	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
504					 true, DMAE_COMP_PCI);
505	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
506	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
507	dmae->dst_addr_lo = bp->func_stx >> 2;
508	dmae->dst_addr_hi = 0;
509	dmae->len = sizeof(struct host_func_stats) >> 2;
510	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
511	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
512	dmae->comp_val = DMAE_COMP_VAL;
513
514	*stats_comp = 0;
515}
516
517/* should be called under stats_sema */
518static void bnx2x_stats_start(struct bnx2x *bp)
519{
520	if (IS_PF(bp)) {
521		if (bp->port.pmf)
522			bnx2x_port_stats_init(bp);
523
524		else if (bp->func_stx)
525			bnx2x_func_stats_init(bp);
526
527		bnx2x_hw_stats_post(bp);
528		bnx2x_storm_stats_post(bp);
529	}
530}
531
532static void bnx2x_stats_pmf_start(struct bnx2x *bp)
533{
534	bnx2x_stats_comp(bp);
535	bnx2x_stats_pmf_update(bp);
536	bnx2x_stats_start(bp);
537}
538
539static void bnx2x_stats_restart(struct bnx2x *bp)
540{
541	/* vfs travel through here as part of the statistics FSM, but no action
542	 * is required
543	 */
544	if (IS_VF(bp))
545		return;
546
547	bnx2x_stats_comp(bp);
548	bnx2x_stats_start(bp);
549}
550
551static void bnx2x_bmac_stats_update(struct bnx2x *bp)
552{
553	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
554	struct bnx2x_eth_stats *estats = &bp->eth_stats;
555	struct {
556		u32 lo;
557		u32 hi;
558	} diff;
559
560	if (CHIP_IS_E1x(bp)) {
561		struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
562
563		/* the macros below will use "bmac1_stats" type */
564		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
565		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
566		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
567		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
568		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
569		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
570		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
571		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
572		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
573
574		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
575		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
576		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
577		UPDATE_STAT64(tx_stat_gt127,
578				tx_stat_etherstatspkts65octetsto127octets);
579		UPDATE_STAT64(tx_stat_gt255,
580				tx_stat_etherstatspkts128octetsto255octets);
581		UPDATE_STAT64(tx_stat_gt511,
582				tx_stat_etherstatspkts256octetsto511octets);
583		UPDATE_STAT64(tx_stat_gt1023,
584				tx_stat_etherstatspkts512octetsto1023octets);
585		UPDATE_STAT64(tx_stat_gt1518,
586				tx_stat_etherstatspkts1024octetsto1522octets);
587		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
588		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
589		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
590		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
591		UPDATE_STAT64(tx_stat_gterr,
592				tx_stat_dot3statsinternalmactransmiterrors);
593		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
594
595	} else {
596		struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
597
598		/* the macros below will use "bmac2_stats" type */
599		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
600		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
601		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
602		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
603		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
604		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
605		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
606		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
607		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
608		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
609		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
610		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
611		UPDATE_STAT64(tx_stat_gt127,
612				tx_stat_etherstatspkts65octetsto127octets);
613		UPDATE_STAT64(tx_stat_gt255,
614				tx_stat_etherstatspkts128octetsto255octets);
615		UPDATE_STAT64(tx_stat_gt511,
616				tx_stat_etherstatspkts256octetsto511octets);
617		UPDATE_STAT64(tx_stat_gt1023,
618				tx_stat_etherstatspkts512octetsto1023octets);
619		UPDATE_STAT64(tx_stat_gt1518,
620				tx_stat_etherstatspkts1024octetsto1522octets);
621		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
622		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
623		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
624		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
625		UPDATE_STAT64(tx_stat_gterr,
626				tx_stat_dot3statsinternalmactransmiterrors);
627		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
628
629		/* collect PFC stats */
630		pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
631		pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
632
633		pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
634		pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
635	}
636
637	estats->pause_frames_received_hi =
638				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
639	estats->pause_frames_received_lo =
640				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
641
642	estats->pause_frames_sent_hi =
643				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
644	estats->pause_frames_sent_lo =
645				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
646
647	estats->pfc_frames_received_hi =
648				pstats->pfc_frames_rx_hi;
649	estats->pfc_frames_received_lo =
650				pstats->pfc_frames_rx_lo;
651	estats->pfc_frames_sent_hi =
652				pstats->pfc_frames_tx_hi;
653	estats->pfc_frames_sent_lo =
654				pstats->pfc_frames_tx_lo;
655}
656
657static void bnx2x_mstat_stats_update(struct bnx2x *bp)
658{
659	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
660	struct bnx2x_eth_stats *estats = &bp->eth_stats;
661
662	struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
663
664	ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
665	ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
666	ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
667	ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
668	ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
669	ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
670	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
671	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
672	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
673	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
674
675	/* collect pfc stats */
676	ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
677		pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
678	ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
679		pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
680
681	ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
682	ADD_STAT64(stats_tx.tx_gt127,
683			tx_stat_etherstatspkts65octetsto127octets);
684	ADD_STAT64(stats_tx.tx_gt255,
685			tx_stat_etherstatspkts128octetsto255octets);
686	ADD_STAT64(stats_tx.tx_gt511,
687			tx_stat_etherstatspkts256octetsto511octets);
688	ADD_STAT64(stats_tx.tx_gt1023,
689			tx_stat_etherstatspkts512octetsto1023octets);
690	ADD_STAT64(stats_tx.tx_gt1518,
691			tx_stat_etherstatspkts1024octetsto1522octets);
692	ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
693
694	ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
695	ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
696	ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
697
698	ADD_STAT64(stats_tx.tx_gterr,
699			tx_stat_dot3statsinternalmactransmiterrors);
700	ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
701
702	estats->etherstatspkts1024octetsto1522octets_hi =
703	    pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
704	estats->etherstatspkts1024octetsto1522octets_lo =
705	    pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
706
707	estats->etherstatspktsover1522octets_hi =
708	    pstats->mac_stx[1].tx_stat_mac_2047_hi;
709	estats->etherstatspktsover1522octets_lo =
710	    pstats->mac_stx[1].tx_stat_mac_2047_lo;
711
712	ADD_64(estats->etherstatspktsover1522octets_hi,
713	       pstats->mac_stx[1].tx_stat_mac_4095_hi,
714	       estats->etherstatspktsover1522octets_lo,
715	       pstats->mac_stx[1].tx_stat_mac_4095_lo);
716
717	ADD_64(estats->etherstatspktsover1522octets_hi,
718	       pstats->mac_stx[1].tx_stat_mac_9216_hi,
719	       estats->etherstatspktsover1522octets_lo,
720	       pstats->mac_stx[1].tx_stat_mac_9216_lo);
721
722	ADD_64(estats->etherstatspktsover1522octets_hi,
723	       pstats->mac_stx[1].tx_stat_mac_16383_hi,
724	       estats->etherstatspktsover1522octets_lo,
725	       pstats->mac_stx[1].tx_stat_mac_16383_lo);
726
727	estats->pause_frames_received_hi =
728				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
729	estats->pause_frames_received_lo =
730				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
731
732	estats->pause_frames_sent_hi =
733				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
734	estats->pause_frames_sent_lo =
735				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
736
737	estats->pfc_frames_received_hi =
738				pstats->pfc_frames_rx_hi;
739	estats->pfc_frames_received_lo =
740				pstats->pfc_frames_rx_lo;
741	estats->pfc_frames_sent_hi =
742				pstats->pfc_frames_tx_hi;
743	estats->pfc_frames_sent_lo =
744				pstats->pfc_frames_tx_lo;
745}
746
747static void bnx2x_emac_stats_update(struct bnx2x *bp)
748{
749	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
750	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
751	struct bnx2x_eth_stats *estats = &bp->eth_stats;
752
753	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
754	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
755	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
756	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
757	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
758	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
759	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
760	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
761	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
762	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
763	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
764	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
765	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
766	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
767	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
768	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
769	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
770	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
771	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
772	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
773	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
774	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
775	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
776	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
777	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
778	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
779	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
780	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
781	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
782	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
783	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
784
785	estats->pause_frames_received_hi =
786			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
787	estats->pause_frames_received_lo =
788			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
789	ADD_64(estats->pause_frames_received_hi,
790	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
791	       estats->pause_frames_received_lo,
792	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
793
794	estats->pause_frames_sent_hi =
795			pstats->mac_stx[1].tx_stat_outxonsent_hi;
796	estats->pause_frames_sent_lo =
797			pstats->mac_stx[1].tx_stat_outxonsent_lo;
798	ADD_64(estats->pause_frames_sent_hi,
799	       pstats->mac_stx[1].tx_stat_outxoffsent_hi,
800	       estats->pause_frames_sent_lo,
801	       pstats->mac_stx[1].tx_stat_outxoffsent_lo);
802}
803
804static int bnx2x_hw_stats_update(struct bnx2x *bp)
805{
806	struct nig_stats *new = bnx2x_sp(bp, nig_stats);
807	struct nig_stats *old = &(bp->port.old_nig_stats);
808	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
809	struct bnx2x_eth_stats *estats = &bp->eth_stats;
810	struct {
811		u32 lo;
812		u32 hi;
813	} diff;
814
815	switch (bp->link_vars.mac_type) {
816	case MAC_TYPE_BMAC:
817		bnx2x_bmac_stats_update(bp);
818		break;
819
820	case MAC_TYPE_EMAC:
821		bnx2x_emac_stats_update(bp);
822		break;
823
824	case MAC_TYPE_UMAC:
825	case MAC_TYPE_XMAC:
826		bnx2x_mstat_stats_update(bp);
827		break;
828
829	case MAC_TYPE_NONE: /* unreached */
830		DP(BNX2X_MSG_STATS,
831		   "stats updated by DMAE but no MAC active\n");
832		return -1;
833
834	default: /* unreached */
835		BNX2X_ERR("Unknown MAC type\n");
836	}
837
838	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
839		      new->brb_discard - old->brb_discard);
840	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
841		      new->brb_truncate - old->brb_truncate);
842
843	if (!CHIP_IS_E3(bp)) {
844		UPDATE_STAT64_NIG(egress_mac_pkt0,
845					etherstatspkts1024octetsto1522octets);
846		UPDATE_STAT64_NIG(egress_mac_pkt1,
847					etherstatspktsover1522octets);
848	}
849
850	memcpy(old, new, sizeof(struct nig_stats));
851
852	memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
853	       sizeof(struct mac_stx));
854	estats->brb_drop_hi = pstats->brb_drop_hi;
855	estats->brb_drop_lo = pstats->brb_drop_lo;
856
857	pstats->host_port_stats_counter++;
858
859	if (CHIP_IS_E3(bp)) {
860		u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
861					  : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
862		estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
863	}
864
865	if (!BP_NOMCP(bp)) {
866		u32 nig_timer_max =
867			SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
868		if (nig_timer_max != estats->nig_timer_max) {
869			estats->nig_timer_max = nig_timer_max;
870			BNX2X_ERR("NIG timer max (%u)\n",
871				  estats->nig_timer_max);
872		}
873	}
874
875	return 0;
876}
877
878static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
879{
880	struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
881	u16 cur_stats_counter;
882	/* Make sure we use the value of the counter
883	 * used for sending the last stats ramrod.
884	 */
885	cur_stats_counter = bp->stats_counter - 1;
886
887	/* are storm stats valid? */
888	if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
889		DP(BNX2X_MSG_STATS,
890		   "stats not updated by xstorm  xstorm counter (0x%x) != stats_counter (0x%x)\n",
891		   le16_to_cpu(counters->xstats_counter), bp->stats_counter);
892		return -EAGAIN;
893	}
894
895	if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
896		DP(BNX2X_MSG_STATS,
897		   "stats not updated by ustorm  ustorm counter (0x%x) != stats_counter (0x%x)\n",
898		   le16_to_cpu(counters->ustats_counter), bp->stats_counter);
899		return -EAGAIN;
900	}
901
902	if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
903		DP(BNX2X_MSG_STATS,
904		   "stats not updated by cstorm  cstorm counter (0x%x) != stats_counter (0x%x)\n",
905		   le16_to_cpu(counters->cstats_counter), bp->stats_counter);
906		return -EAGAIN;
907	}
908
909	if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
910		DP(BNX2X_MSG_STATS,
911		   "stats not updated by tstorm  tstorm counter (0x%x) != stats_counter (0x%x)\n",
912		   le16_to_cpu(counters->tstats_counter), bp->stats_counter);
913		return -EAGAIN;
914	}
915	return 0;
916}
917
918static int bnx2x_storm_stats_update(struct bnx2x *bp)
919{
920	struct tstorm_per_port_stats *tport =
921				&bp->fw_stats_data->port.tstorm_port_statistics;
922	struct tstorm_per_pf_stats *tfunc =
923				&bp->fw_stats_data->pf.tstorm_pf_statistics;
924	struct host_func_stats *fstats = &bp->func_stats;
925	struct bnx2x_eth_stats *estats = &bp->eth_stats;
926	struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
927	int i;
928
929	/* vfs stat counter is managed by pf */
930	if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp))
931		return -EAGAIN;
932
933	estats->error_bytes_received_hi = 0;
934	estats->error_bytes_received_lo = 0;
935
936	for_each_eth_queue(bp, i) {
937		struct bnx2x_fastpath *fp = &bp->fp[i];
938		struct tstorm_per_queue_stats *tclient =
939			&bp->fw_stats_data->queue_stats[i].
940			tstorm_queue_statistics;
941		struct tstorm_per_queue_stats *old_tclient =
942			&bnx2x_fp_stats(bp, fp)->old_tclient;
943		struct ustorm_per_queue_stats *uclient =
944			&bp->fw_stats_data->queue_stats[i].
945			ustorm_queue_statistics;
946		struct ustorm_per_queue_stats *old_uclient =
947			&bnx2x_fp_stats(bp, fp)->old_uclient;
948		struct xstorm_per_queue_stats *xclient =
949			&bp->fw_stats_data->queue_stats[i].
950			xstorm_queue_statistics;
951		struct xstorm_per_queue_stats *old_xclient =
952			&bnx2x_fp_stats(bp, fp)->old_xclient;
953		struct bnx2x_eth_q_stats *qstats =
954			&bnx2x_fp_stats(bp, fp)->eth_q_stats;
955		struct bnx2x_eth_q_stats_old *qstats_old =
956			&bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
957
958		u32 diff;
959
960		DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
961		   i, xclient->ucast_pkts_sent,
962		   xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
963
964		DP(BNX2X_MSG_STATS, "---------------\n");
965
966		UPDATE_QSTAT(tclient->rcv_bcast_bytes,
967			     total_broadcast_bytes_received);
968		UPDATE_QSTAT(tclient->rcv_mcast_bytes,
969			     total_multicast_bytes_received);
970		UPDATE_QSTAT(tclient->rcv_ucast_bytes,
971			     total_unicast_bytes_received);
972
973		/*
974		 * sum to total_bytes_received all
975		 * unicast/multicast/broadcast
976		 */
977		qstats->total_bytes_received_hi =
978			qstats->total_broadcast_bytes_received_hi;
979		qstats->total_bytes_received_lo =
980			qstats->total_broadcast_bytes_received_lo;
981
982		ADD_64(qstats->total_bytes_received_hi,
983		       qstats->total_multicast_bytes_received_hi,
984		       qstats->total_bytes_received_lo,
985		       qstats->total_multicast_bytes_received_lo);
986
987		ADD_64(qstats->total_bytes_received_hi,
988		       qstats->total_unicast_bytes_received_hi,
989		       qstats->total_bytes_received_lo,
990		       qstats->total_unicast_bytes_received_lo);
991
992		qstats->valid_bytes_received_hi =
993					qstats->total_bytes_received_hi;
994		qstats->valid_bytes_received_lo =
995					qstats->total_bytes_received_lo;
996
997		UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
998					total_unicast_packets_received);
999		UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
1000					total_multicast_packets_received);
1001		UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
1002					total_broadcast_packets_received);
1003		UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
1004				      etherstatsoverrsizepkts, 32);
1005		UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
1006
1007		SUB_EXTEND_USTAT(ucast_no_buff_pkts,
1008					total_unicast_packets_received);
1009		SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1010					total_multicast_packets_received);
1011		SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1012					total_broadcast_packets_received);
1013		UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1014		UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1015		UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
1016
1017		UPDATE_QSTAT(xclient->bcast_bytes_sent,
1018			     total_broadcast_bytes_transmitted);
1019		UPDATE_QSTAT(xclient->mcast_bytes_sent,
1020			     total_multicast_bytes_transmitted);
1021		UPDATE_QSTAT(xclient->ucast_bytes_sent,
1022			     total_unicast_bytes_transmitted);
1023
1024		/*
1025		 * sum to total_bytes_transmitted all
1026		 * unicast/multicast/broadcast
1027		 */
1028		qstats->total_bytes_transmitted_hi =
1029				qstats->total_unicast_bytes_transmitted_hi;
1030		qstats->total_bytes_transmitted_lo =
1031				qstats->total_unicast_bytes_transmitted_lo;
1032
1033		ADD_64(qstats->total_bytes_transmitted_hi,
1034		       qstats->total_broadcast_bytes_transmitted_hi,
1035		       qstats->total_bytes_transmitted_lo,
1036		       qstats->total_broadcast_bytes_transmitted_lo);
1037
1038		ADD_64(qstats->total_bytes_transmitted_hi,
1039		       qstats->total_multicast_bytes_transmitted_hi,
1040		       qstats->total_bytes_transmitted_lo,
1041		       qstats->total_multicast_bytes_transmitted_lo);
1042
1043		UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1044					total_unicast_packets_transmitted);
1045		UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1046					total_multicast_packets_transmitted);
1047		UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1048					total_broadcast_packets_transmitted);
1049
1050		UPDATE_EXTEND_TSTAT(checksum_discard,
1051				    total_packets_received_checksum_discarded);
1052		UPDATE_EXTEND_TSTAT(ttl0_discard,
1053				    total_packets_received_ttl0_discarded);
1054
1055		UPDATE_EXTEND_XSTAT(error_drop_pkts,
1056				    total_transmitted_dropped_packets_error);
1057
1058		/* TPA aggregations completed */
1059		UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
1060		/* Number of network frames aggregated by TPA */
1061		UPDATE_EXTEND_E_USTAT(coalesced_pkts,
1062				      total_tpa_aggregated_frames);
1063		/* Total number of bytes in completed TPA aggregations */
1064		UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
1065
1066		UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
1067
1068		UPDATE_FSTAT_QSTAT(total_bytes_received);
1069		UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1070		UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1071		UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1072		UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1073		UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1074		UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1075		UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1076		UPDATE_FSTAT_QSTAT(valid_bytes_received);
1077	}
1078
1079	ADD_64(estats->total_bytes_received_hi,
1080	       estats->rx_stat_ifhcinbadoctets_hi,
1081	       estats->total_bytes_received_lo,
1082	       estats->rx_stat_ifhcinbadoctets_lo);
1083
1084	ADD_64_LE(estats->total_bytes_received_hi,
1085		  tfunc->rcv_error_bytes.hi,
1086		  estats->total_bytes_received_lo,
1087		  tfunc->rcv_error_bytes.lo);
1088
1089	ADD_64_LE(estats->error_bytes_received_hi,
1090		  tfunc->rcv_error_bytes.hi,
1091		  estats->error_bytes_received_lo,
1092		  tfunc->rcv_error_bytes.lo);
1093
1094	UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1095
1096	ADD_64(estats->error_bytes_received_hi,
1097	       estats->rx_stat_ifhcinbadoctets_hi,
1098	       estats->error_bytes_received_lo,
1099	       estats->rx_stat_ifhcinbadoctets_lo);
1100
1101	if (bp->port.pmf) {
1102		struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1103		UPDATE_FW_STAT(mac_filter_discard);
1104		UPDATE_FW_STAT(mf_tag_discard);
1105		UPDATE_FW_STAT(brb_truncate_discard);
1106		UPDATE_FW_STAT(mac_discard);
1107	}
1108
1109	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1110
1111	bp->stats_pending = 0;
1112
1113	return 0;
1114}
1115
1116static void bnx2x_net_stats_update(struct bnx2x *bp)
1117{
1118	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1119	struct net_device_stats *nstats = &bp->dev->stats;
1120	unsigned long tmp;
1121	int i;
1122
1123	nstats->rx_packets =
1124		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1125		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1126		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1127
1128	nstats->tx_packets =
1129		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1130		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1131		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1132
1133	nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1134
1135	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1136
1137	tmp = estats->mac_discard;
1138	for_each_rx_queue(bp, i) {
1139		struct tstorm_per_queue_stats *old_tclient =
1140			&bp->fp_stats[i].old_tclient;
1141		tmp += le32_to_cpu(old_tclient->checksum_discard);
1142	}
1143	nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
1144
1145	nstats->tx_dropped = 0;
1146
1147	nstats->multicast =
1148		bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1149
1150	nstats->collisions =
1151		bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1152
1153	nstats->rx_length_errors =
1154		bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1155		bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1156	nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1157				 bnx2x_hilo(&estats->brb_truncate_hi);
1158	nstats->rx_crc_errors =
1159		bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1160	nstats->rx_frame_errors =
1161		bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1162	nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
1163	nstats->rx_missed_errors = 0;
1164
1165	nstats->rx_errors = nstats->rx_length_errors +
1166			    nstats->rx_over_errors +
1167			    nstats->rx_crc_errors +
1168			    nstats->rx_frame_errors +
1169			    nstats->rx_fifo_errors +
1170			    nstats->rx_missed_errors;
1171
1172	nstats->tx_aborted_errors =
1173		bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1174		bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1175	nstats->tx_carrier_errors =
1176		bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1177	nstats->tx_fifo_errors = 0;
1178	nstats->tx_heartbeat_errors = 0;
1179	nstats->tx_window_errors = 0;
1180
1181	nstats->tx_errors = nstats->tx_aborted_errors +
1182			    nstats->tx_carrier_errors +
1183	    bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1184}
1185
1186static void bnx2x_drv_stats_update(struct bnx2x *bp)
1187{
1188	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1189	int i;
1190
1191	for_each_queue(bp, i) {
1192		struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1193		struct bnx2x_eth_q_stats_old *qstats_old =
1194			&bp->fp_stats[i].eth_q_stats_old;
1195
1196		UPDATE_ESTAT_QSTAT(driver_xoff);
1197		UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1198		UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1199		UPDATE_ESTAT_QSTAT(hw_csum_err);
1200		UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt);
1201	}
1202}
1203
1204static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1205{
1206	u32 val;
1207
1208	if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1209		val = SHMEM2_RD(bp, edebug_driver_if[1]);
1210
1211		if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1212			return true;
1213	}
1214
1215	return false;
1216}
1217
1218static void bnx2x_stats_update(struct bnx2x *bp)
1219{
1220	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1221
1222	if (bnx2x_edebug_stats_stopped(bp))
1223		return;
1224
1225	if (IS_PF(bp)) {
1226		if (*stats_comp != DMAE_COMP_VAL)
1227			return;
1228
1229		if (bp->port.pmf)
1230			bnx2x_hw_stats_update(bp);
1231
1232		if (bnx2x_storm_stats_update(bp)) {
1233			if (bp->stats_pending++ == 3) {
1234				BNX2X_ERR("storm stats were not updated for 3 times\n");
1235				bnx2x_panic();
1236			}
1237			return;
1238		}
1239	} else {
1240		/* vf doesn't collect HW statistics, and doesn't get completions
1241		 * perform only update
1242		 */
1243		bnx2x_storm_stats_update(bp);
1244	}
1245
1246	bnx2x_net_stats_update(bp);
1247	bnx2x_drv_stats_update(bp);
1248
1249	/* vf is done */
1250	if (IS_VF(bp))
1251		return;
1252
1253	if (netif_msg_timer(bp)) {
1254		struct bnx2x_eth_stats *estats = &bp->eth_stats;
1255
1256		netdev_dbg(bp->dev, "brb drops %u  brb truncate %u\n",
1257		       estats->brb_drop_lo, estats->brb_truncate_lo);
1258	}
1259
1260	bnx2x_hw_stats_post(bp);
1261	bnx2x_storm_stats_post(bp);
1262}
1263
1264static void bnx2x_port_stats_stop(struct bnx2x *bp)
1265{
1266	struct dmae_command *dmae;
1267	u32 opcode;
1268	int loader_idx = PMF_DMAE_C(bp);
1269	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1270
1271	bp->executer_idx = 0;
1272
1273	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
1274
1275	if (bp->port.port_stx) {
1276
1277		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1278		if (bp->func_stx)
1279			dmae->opcode = bnx2x_dmae_opcode_add_comp(
1280						opcode, DMAE_COMP_GRC);
1281		else
1282			dmae->opcode = bnx2x_dmae_opcode_add_comp(
1283						opcode, DMAE_COMP_PCI);
1284
1285		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1286		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1287		dmae->dst_addr_lo = bp->port.port_stx >> 2;
1288		dmae->dst_addr_hi = 0;
1289		dmae->len = bnx2x_get_port_stats_dma_len(bp);
1290		if (bp->func_stx) {
1291			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1292			dmae->comp_addr_hi = 0;
1293			dmae->comp_val = 1;
1294		} else {
1295			dmae->comp_addr_lo =
1296				U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1297			dmae->comp_addr_hi =
1298				U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1299			dmae->comp_val = DMAE_COMP_VAL;
1300
1301			*stats_comp = 0;
1302		}
1303	}
1304
1305	if (bp->func_stx) {
1306
1307		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1308		dmae->opcode =
1309			bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1310		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1311		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1312		dmae->dst_addr_lo = bp->func_stx >> 2;
1313		dmae->dst_addr_hi = 0;
1314		dmae->len = sizeof(struct host_func_stats) >> 2;
1315		dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1316		dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1317		dmae->comp_val = DMAE_COMP_VAL;
1318
1319		*stats_comp = 0;
1320	}
1321}
1322
1323static void bnx2x_stats_stop(struct bnx2x *bp)
1324{
1325	bool update = false;
1326
1327	bnx2x_stats_comp(bp);
1328
1329	if (bp->port.pmf)
1330		update = (bnx2x_hw_stats_update(bp) == 0);
1331
1332	update |= (bnx2x_storm_stats_update(bp) == 0);
1333
1334	if (update) {
1335		bnx2x_net_stats_update(bp);
1336
1337		if (bp->port.pmf)
1338			bnx2x_port_stats_stop(bp);
1339
1340		bnx2x_hw_stats_post(bp);
1341		bnx2x_stats_comp(bp);
1342	}
1343}
1344
1345static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1346{
1347}
1348
1349static const struct {
1350	void (*action)(struct bnx2x *bp);
1351	enum bnx2x_stats_state next_state;
1352} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1353/* state	event	*/
1354{
1355/* DISABLED	PMF	*/ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1356/*		LINK_UP	*/ {bnx2x_stats_start,      STATS_STATE_ENABLED},
1357/*		UPDATE	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1358/*		STOP	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1359},
1360{
1361/* ENABLED	PMF	*/ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
1362/*		LINK_UP	*/ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
1363/*		UPDATE	*/ {bnx2x_stats_update,     STATS_STATE_ENABLED},
1364/*		STOP	*/ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
1365}
1366};
1367
1368void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1369{
1370	enum bnx2x_stats_state state = bp->stats_state;
1371
1372	if (unlikely(bp->panic))
1373		return;
1374
1375	/* Statistics update run from timer context, and we don't want to stop
1376	 * that context in case someone is in the middle of a transition.
1377	 * For other events, wait a bit until lock is taken.
1378	 */
1379	if (down_trylock(&bp->stats_lock)) {
1380		if (event == STATS_EVENT_UPDATE)
1381			return;
1382
1383		DP(BNX2X_MSG_STATS,
1384		   "Unlikely stats' lock contention [event %d]\n", event);
1385		if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) {
1386			BNX2X_ERR("Failed to take stats lock [event %d]\n",
1387				  event);
1388			return;
1389		}
1390	}
1391
1392	bnx2x_stats_stm[state][event].action(bp);
1393	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1394
1395	up(&bp->stats_lock);
1396
1397	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1398		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1399		   state, event, bp->stats_state);
1400}
1401
1402static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1403{
1404	struct dmae_command *dmae;
1405	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1406
1407	/* sanity */
1408	if (!bp->port.pmf || !bp->port.port_stx) {
1409		BNX2X_ERR("BUG!\n");
1410		return;
1411	}
1412
1413	bp->executer_idx = 0;
1414
1415	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1416	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1417					 true, DMAE_COMP_PCI);
1418	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1419	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1420	dmae->dst_addr_lo = bp->port.port_stx >> 2;
1421	dmae->dst_addr_hi = 0;
1422	dmae->len = bnx2x_get_port_stats_dma_len(bp);
1423	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1424	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1425	dmae->comp_val = DMAE_COMP_VAL;
1426
1427	*stats_comp = 0;
1428	bnx2x_hw_stats_post(bp);
1429	bnx2x_stats_comp(bp);
1430}
1431
1432/* This function will prepare the statistics ramrod data the way
1433 * we will only have to increment the statistics counter and
1434 * send the ramrod each time we have to.
1435 */
1436static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1437{
1438	int i;
1439	int first_queue_query_index;
1440	struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1441
1442	dma_addr_t cur_data_offset;
1443	struct stats_query_entry *cur_query_entry;
1444
1445	stats_hdr->cmd_num = bp->fw_stats_num;
1446	stats_hdr->drv_stats_counter = 0;
1447
1448	/* storm_counters struct contains the counters of completed
1449	 * statistics requests per storm which are incremented by FW
1450	 * each time it completes hadning a statistics ramrod. We will
1451	 * check these counters in the timer handler and discard a
1452	 * (statistics) ramrod completion.
1453	 */
1454	cur_data_offset = bp->fw_stats_data_mapping +
1455		offsetof(struct bnx2x_fw_stats_data, storm_counters);
1456
1457	stats_hdr->stats_counters_addrs.hi =
1458		cpu_to_le32(U64_HI(cur_data_offset));
1459	stats_hdr->stats_counters_addrs.lo =
1460		cpu_to_le32(U64_LO(cur_data_offset));
1461
1462	/* prepare to the first stats ramrod (will be completed with
1463	 * the counters equal to zero) - init counters to somethig different.
1464	 */
1465	memset(&bp->fw_stats_data->storm_counters, 0xff,
1466	       sizeof(struct stats_counter));
1467
1468	/**** Port FW statistics data ****/
1469	cur_data_offset = bp->fw_stats_data_mapping +
1470		offsetof(struct bnx2x_fw_stats_data, port);
1471
1472	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1473
1474	cur_query_entry->kind = STATS_TYPE_PORT;
1475	/* For port query index is a DONT CARE */
1476	cur_query_entry->index = BP_PORT(bp);
1477	/* For port query funcID is a DONT CARE */
1478	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1479	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1480	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1481
1482	/**** PF FW statistics data ****/
1483	cur_data_offset = bp->fw_stats_data_mapping +
1484		offsetof(struct bnx2x_fw_stats_data, pf);
1485
1486	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1487
1488	cur_query_entry->kind = STATS_TYPE_PF;
1489	/* For PF query index is a DONT CARE */
1490	cur_query_entry->index = BP_PORT(bp);
1491	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1492	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1493	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1494
1495	/**** FCoE FW statistics data ****/
1496	if (!NO_FCOE(bp)) {
1497		cur_data_offset = bp->fw_stats_data_mapping +
1498			offsetof(struct bnx2x_fw_stats_data, fcoe);
1499
1500		cur_query_entry =
1501			&bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1502
1503		cur_query_entry->kind = STATS_TYPE_FCOE;
1504		/* For FCoE query index is a DONT CARE */
1505		cur_query_entry->index = BP_PORT(bp);
1506		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1507		cur_query_entry->address.hi =
1508			cpu_to_le32(U64_HI(cur_data_offset));
1509		cur_query_entry->address.lo =
1510			cpu_to_le32(U64_LO(cur_data_offset));
1511	}
1512
1513	/**** Clients' queries ****/
1514	cur_data_offset = bp->fw_stats_data_mapping +
1515		offsetof(struct bnx2x_fw_stats_data, queue_stats);
1516
1517	/* first queue query index depends whether FCoE offloaded request will
1518	 * be included in the ramrod
1519	 */
1520	if (!NO_FCOE(bp))
1521		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1522	else
1523		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1524
1525	for_each_eth_queue(bp, i) {
1526		cur_query_entry =
1527			&bp->fw_stats_req->
1528					query[first_queue_query_index + i];
1529
1530		cur_query_entry->kind = STATS_TYPE_QUEUE;
1531		cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1532		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1533		cur_query_entry->address.hi =
1534			cpu_to_le32(U64_HI(cur_data_offset));
1535		cur_query_entry->address.lo =
1536			cpu_to_le32(U64_LO(cur_data_offset));
1537
1538		cur_data_offset += sizeof(struct per_queue_stats);
1539	}
1540
1541	/* add FCoE queue query if needed */
1542	if (!NO_FCOE(bp)) {
1543		cur_query_entry =
1544			&bp->fw_stats_req->
1545					query[first_queue_query_index + i];
1546
1547		cur_query_entry->kind = STATS_TYPE_QUEUE;
1548		cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
1549		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1550		cur_query_entry->address.hi =
1551			cpu_to_le32(U64_HI(cur_data_offset));
1552		cur_query_entry->address.lo =
1553			cpu_to_le32(U64_LO(cur_data_offset));
1554	}
1555}
1556
1557void bnx2x_memset_stats(struct bnx2x *bp)
1558{
1559	int i;
1560
1561	/* function stats */
1562	for_each_queue(bp, i) {
1563		struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1564
1565		memset(&fp_stats->old_tclient, 0,
1566		       sizeof(fp_stats->old_tclient));
1567		memset(&fp_stats->old_uclient, 0,
1568		       sizeof(fp_stats->old_uclient));
1569		memset(&fp_stats->old_xclient, 0,
1570		       sizeof(fp_stats->old_xclient));
1571		if (bp->stats_init) {
1572			memset(&fp_stats->eth_q_stats, 0,
1573			       sizeof(fp_stats->eth_q_stats));
1574			memset(&fp_stats->eth_q_stats_old, 0,
1575			       sizeof(fp_stats->eth_q_stats_old));
1576		}
1577	}
1578
1579	memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1580
1581	if (bp->stats_init) {
1582		memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1583		memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1584		memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1585		memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1586		memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1587	}
1588
1589	bp->stats_state = STATS_STATE_DISABLED;
1590
1591	if (bp->port.pmf && bp->port.port_stx)
1592		bnx2x_port_stats_base_init(bp);
1593
1594	/* mark the end of statistics initialization */
1595	bp->stats_init = false;
1596}
1597
1598void bnx2x_stats_init(struct bnx2x *bp)
1599{
1600	int /*abs*/port = BP_PORT(bp);
1601	int mb_idx = BP_FW_MB_IDX(bp);
1602
1603	if (IS_VF(bp)) {
1604		bnx2x_memset_stats(bp);
1605		return;
1606	}
1607
1608	bp->stats_pending = 0;
1609	bp->executer_idx = 0;
1610	bp->stats_counter = 0;
1611
1612	/* port and func stats for management */
1613	if (!BP_NOMCP(bp)) {
1614		bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1615		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1616
1617	} else {
1618		bp->port.port_stx = 0;
1619		bp->func_stx = 0;
1620	}
1621	DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
1622	   bp->port.port_stx, bp->func_stx);
1623
1624	/* pmf should retrieve port statistics from SP on a non-init*/
1625	if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1626		bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1627
1628	port = BP_PORT(bp);
1629	/* port stats */
1630	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1631	bp->port.old_nig_stats.brb_discard =
1632			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1633	bp->port.old_nig_stats.brb_truncate =
1634			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1635	if (!CHIP_IS_E3(bp)) {
1636		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1637			    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1638		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1639			    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1640	}
1641
1642	/* Prepare statistics ramrod data */
1643	bnx2x_prep_fw_stats_req(bp);
1644
1645	/* Clean SP from previous statistics */
1646	if (bp->stats_init) {
1647		if (bp->func_stx) {
1648			memset(bnx2x_sp(bp, func_stats), 0,
1649			       sizeof(struct host_func_stats));
1650			bnx2x_func_stats_init(bp);
1651			bnx2x_hw_stats_post(bp);
1652			bnx2x_stats_comp(bp);
1653		}
1654	}
1655
1656	bnx2x_memset_stats(bp);
1657}
1658
1659void bnx2x_save_statistics(struct bnx2x *bp)
1660{
1661	int i;
1662	struct net_device_stats *nstats = &bp->dev->stats;
1663
1664	/* save queue statistics */
1665	for_each_eth_queue(bp, i) {
1666		struct bnx2x_fastpath *fp = &bp->fp[i];
1667		struct bnx2x_eth_q_stats *qstats =
1668			&bnx2x_fp_stats(bp, fp)->eth_q_stats;
1669		struct bnx2x_eth_q_stats_old *qstats_old =
1670			&bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
1671
1672		UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1673		UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1674		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1675		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1676		UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1677		UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1678		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1679		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1680		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1681		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1682		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1683		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1684		UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1685		UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1686	}
1687
1688	/* save net_device_stats statistics */
1689	bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1690
1691	/* store port firmware statistics */
1692	if (bp->port.pmf && IS_MF(bp)) {
1693		struct bnx2x_eth_stats *estats = &bp->eth_stats;
1694		struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1695		UPDATE_FW_STAT_OLD(mac_filter_discard);
1696		UPDATE_FW_STAT_OLD(mf_tag_discard);
1697		UPDATE_FW_STAT_OLD(brb_truncate_discard);
1698		UPDATE_FW_STAT_OLD(mac_discard);
1699	}
1700}
1701
1702void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1703			      u32 stats_type)
1704{
1705	int i;
1706	struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1707	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1708	struct per_queue_stats *fcoe_q_stats =
1709		&bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
1710
1711	struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1712		&fcoe_q_stats->tstorm_queue_statistics;
1713
1714	struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1715		&fcoe_q_stats->ustorm_queue_statistics;
1716
1717	struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1718		&fcoe_q_stats->xstorm_queue_statistics;
1719
1720	struct fcoe_statistics_params *fw_fcoe_stat =
1721		&bp->fw_stats_data->fcoe;
1722
1723	memset(afex_stats, 0, sizeof(struct afex_stats));
1724
1725	for_each_eth_queue(bp, i) {
1726		struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1727
1728		ADD_64(afex_stats->rx_unicast_bytes_hi,
1729		       qstats->total_unicast_bytes_received_hi,
1730		       afex_stats->rx_unicast_bytes_lo,
1731		       qstats->total_unicast_bytes_received_lo);
1732
1733		ADD_64(afex_stats->rx_broadcast_bytes_hi,
1734		       qstats->total_broadcast_bytes_received_hi,
1735		       afex_stats->rx_broadcast_bytes_lo,
1736		       qstats->total_broadcast_bytes_received_lo);
1737
1738		ADD_64(afex_stats->rx_multicast_bytes_hi,
1739		       qstats->total_multicast_bytes_received_hi,
1740		       afex_stats->rx_multicast_bytes_lo,
1741		       qstats->total_multicast_bytes_received_lo);
1742
1743		ADD_64(afex_stats->rx_unicast_frames_hi,
1744		       qstats->total_unicast_packets_received_hi,
1745		       afex_stats->rx_unicast_frames_lo,
1746		       qstats->total_unicast_packets_received_lo);
1747
1748		ADD_64(afex_stats->rx_broadcast_frames_hi,
1749		       qstats->total_broadcast_packets_received_hi,
1750		       afex_stats->rx_broadcast_frames_lo,
1751		       qstats->total_broadcast_packets_received_lo);
1752
1753		ADD_64(afex_stats->rx_multicast_frames_hi,
1754		       qstats->total_multicast_packets_received_hi,
1755		       afex_stats->rx_multicast_frames_lo,
1756		       qstats->total_multicast_packets_received_lo);
1757
1758		/* sum to rx_frames_discarded all discraded
1759		 * packets due to size, ttl0 and checksum
1760		 */
1761		ADD_64(afex_stats->rx_frames_discarded_hi,
1762		       qstats->total_packets_received_checksum_discarded_hi,
1763		       afex_stats->rx_frames_discarded_lo,
1764		       qstats->total_packets_received_checksum_discarded_lo);
1765
1766		ADD_64(afex_stats->rx_frames_discarded_hi,
1767		       qstats->total_packets_received_ttl0_discarded_hi,
1768		       afex_stats->rx_frames_discarded_lo,
1769		       qstats->total_packets_received_ttl0_discarded_lo);
1770
1771		ADD_64(afex_stats->rx_frames_discarded_hi,
1772		       qstats->etherstatsoverrsizepkts_hi,
1773		       afex_stats->rx_frames_discarded_lo,
1774		       qstats->etherstatsoverrsizepkts_lo);
1775
1776		ADD_64(afex_stats->rx_frames_dropped_hi,
1777		       qstats->no_buff_discard_hi,
1778		       afex_stats->rx_frames_dropped_lo,
1779		       qstats->no_buff_discard_lo);
1780
1781		ADD_64(afex_stats->tx_unicast_bytes_hi,
1782		       qstats->total_unicast_bytes_transmitted_hi,
1783		       afex_stats->tx_unicast_bytes_lo,
1784		       qstats->total_unicast_bytes_transmitted_lo);
1785
1786		ADD_64(afex_stats->tx_broadcast_bytes_hi,
1787		       qstats->total_broadcast_bytes_transmitted_hi,
1788		       afex_stats->tx_broadcast_bytes_lo,
1789		       qstats->total_broadcast_bytes_transmitted_lo);
1790
1791		ADD_64(afex_stats->tx_multicast_bytes_hi,
1792		       qstats->total_multicast_bytes_transmitted_hi,
1793		       afex_stats->tx_multicast_bytes_lo,
1794		       qstats->total_multicast_bytes_transmitted_lo);
1795
1796		ADD_64(afex_stats->tx_unicast_frames_hi,
1797		       qstats->total_unicast_packets_transmitted_hi,
1798		       afex_stats->tx_unicast_frames_lo,
1799		       qstats->total_unicast_packets_transmitted_lo);
1800
1801		ADD_64(afex_stats->tx_broadcast_frames_hi,
1802		       qstats->total_broadcast_packets_transmitted_hi,
1803		       afex_stats->tx_broadcast_frames_lo,
1804		       qstats->total_broadcast_packets_transmitted_lo);
1805
1806		ADD_64(afex_stats->tx_multicast_frames_hi,
1807		       qstats->total_multicast_packets_transmitted_hi,
1808		       afex_stats->tx_multicast_frames_lo,
1809		       qstats->total_multicast_packets_transmitted_lo);
1810
1811		ADD_64(afex_stats->tx_frames_dropped_hi,
1812		       qstats->total_transmitted_dropped_packets_error_hi,
1813		       afex_stats->tx_frames_dropped_lo,
1814		       qstats->total_transmitted_dropped_packets_error_lo);
1815	}
1816
1817	/* now add FCoE statistics which are collected separately
1818	 * (both offloaded and non offloaded)
1819	 */
1820	if (!NO_FCOE(bp)) {
1821		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1822			  LE32_0,
1823			  afex_stats->rx_unicast_bytes_lo,
1824			  fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1825
1826		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1827			  fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1828			  afex_stats->rx_unicast_bytes_lo,
1829			  fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1830
1831		ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1832			  fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1833			  afex_stats->rx_broadcast_bytes_lo,
1834			  fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1835
1836		ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1837			  fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1838			  afex_stats->rx_multicast_bytes_lo,
1839			  fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1840
1841		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1842			  LE32_0,
1843			  afex_stats->rx_unicast_frames_lo,
1844			  fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1845
1846		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1847			  LE32_0,
1848			  afex_stats->rx_unicast_frames_lo,
1849			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
1850
1851		ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1852			  LE32_0,
1853			  afex_stats->rx_broadcast_frames_lo,
1854			  fcoe_q_tstorm_stats->rcv_bcast_pkts);
1855
1856		ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1857			  LE32_0,
1858			  afex_stats->rx_multicast_frames_lo,
1859			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
1860
1861		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1862			  LE32_0,
1863			  afex_stats->rx_frames_discarded_lo,
1864			  fcoe_q_tstorm_stats->checksum_discard);
1865
1866		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1867			  LE32_0,
1868			  afex_stats->rx_frames_discarded_lo,
1869			  fcoe_q_tstorm_stats->pkts_too_big_discard);
1870
1871		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1872			  LE32_0,
1873			  afex_stats->rx_frames_discarded_lo,
1874			  fcoe_q_tstorm_stats->ttl0_discard);
1875
1876		ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1877			    LE16_0,
1878			    afex_stats->rx_frames_dropped_lo,
1879			    fcoe_q_tstorm_stats->no_buff_discard);
1880
1881		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1882			  LE32_0,
1883			  afex_stats->rx_frames_dropped_lo,
1884			  fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1885
1886		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1887			  LE32_0,
1888			  afex_stats->rx_frames_dropped_lo,
1889			  fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1890
1891		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1892			  LE32_0,
1893			  afex_stats->rx_frames_dropped_lo,
1894			  fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1895
1896		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1897			  LE32_0,
1898			  afex_stats->rx_frames_dropped_lo,
1899			  fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1900
1901		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1902			  LE32_0,
1903			  afex_stats->rx_frames_dropped_lo,
1904			  fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1905
1906		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1907			  LE32_0,
1908			  afex_stats->tx_unicast_bytes_lo,
1909			  fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1910
1911		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1912			  fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1913			  afex_stats->tx_unicast_bytes_lo,
1914			  fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1915
1916		ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1917			  fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1918			  afex_stats->tx_broadcast_bytes_lo,
1919			  fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1920
1921		ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1922			  fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1923			  afex_stats->tx_multicast_bytes_lo,
1924			  fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1925
1926		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1927			  LE32_0,
1928			  afex_stats->tx_unicast_frames_lo,
1929			  fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1930
1931		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1932			  LE32_0,
1933			  afex_stats->tx_unicast_frames_lo,
1934			  fcoe_q_xstorm_stats->ucast_pkts_sent);
1935
1936		ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1937			  LE32_0,
1938			  afex_stats->tx_broadcast_frames_lo,
1939			  fcoe_q_xstorm_stats->bcast_pkts_sent);
1940
1941		ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1942			  LE32_0,
1943			  afex_stats->tx_multicast_frames_lo,
1944			  fcoe_q_xstorm_stats->mcast_pkts_sent);
1945
1946		ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1947			  LE32_0,
1948			  afex_stats->tx_frames_dropped_lo,
1949			  fcoe_q_xstorm_stats->error_drop_pkts);
1950	}
1951
1952	/* if port stats are requested, add them to the PMF
1953	 * stats, as anyway they will be accumulated by the
1954	 * MCP before sent to the switch
1955	 */
1956	if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1957		ADD_64(afex_stats->rx_frames_dropped_hi,
1958		       0,
1959		       afex_stats->rx_frames_dropped_lo,
1960		       estats->mac_filter_discard);
1961		ADD_64(afex_stats->rx_frames_dropped_hi,
1962		       0,
1963		       afex_stats->rx_frames_dropped_lo,
1964		       estats->brb_truncate_discard);
1965		ADD_64(afex_stats->rx_frames_discarded_hi,
1966		       0,
1967		       afex_stats->rx_frames_discarded_lo,
1968		       estats->mac_discard);
1969	}
1970}
1971
1972int bnx2x_stats_safe_exec(struct bnx2x *bp,
1973			  void (func_to_exec)(void *cookie),
1974			  void *cookie)
1975{
1976	int cnt = 10, rc = 0;
1977
1978	/* Wait for statistics to end [while blocking further requests],
1979	 * then run supplied function 'safely'.
1980	 */
1981	rc = down_timeout(&bp->stats_lock, HZ / 10);
1982	if (unlikely(rc)) {
1983		BNX2X_ERR("Failed to take statistics lock for safe execution\n");
1984		goto out_no_lock;
1985	}
1986
1987	bnx2x_stats_comp(bp);
1988	while (bp->stats_pending && cnt--)
1989		if (bnx2x_storm_stats_update(bp))
1990			usleep_range(1000, 2000);
1991	if (bp->stats_pending) {
1992		BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
1993		rc = -EBUSY;
1994		goto out;
1995	}
1996
1997	func_to_exec(cookie);
1998
1999out:
2000	/* No need to restart statistics - if they're enabled, the timer
2001	 * will restart the statistics.
2002	 */
2003	up(&bp->stats_lock);
2004out_no_lock:
2005	return rc;
2006}
2007