1/* bnx2x_vfpf.c: QLogic Everest network driver.
2 *
3 * Copyright 2009-2013 Broadcom Corporation
4 * Copyright 2014 QLogic Corporation
5 * All rights reserved
6 *
7 * Unless you and QLogic execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2, available
10 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
11 *
12 * Notwithstanding the above, under no circumstances may you combine this
13 * software in any way with any other QLogic software provided under a
14 * license other than the GPL, without QLogic's express prior written
15 * consent.
16 *
17 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
18 * Written by: Shmulik Ravid
19 *	       Ariel Elior <ariel.elior@qlogic.com>
20 */
21
22#include "bnx2x.h"
23#include "bnx2x_cmn.h"
24#include <linux/crc32.h>
25
26static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
27
28/* place a given tlv on the tlv buffer at a given offset */
29static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
30			  u16 offset, u16 type, u16 length)
31{
32	struct channel_tlv *tl =
33		(struct channel_tlv *)(tlvs_list + offset);
34
35	tl->type = type;
36	tl->length = length;
37}
38
39/* Clear the mailbox and init the header of the first tlv */
40static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
41			    u16 type, u16 length)
42{
43	mutex_lock(&bp->vf2pf_mutex);
44
45	DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
46	   type);
47
48	/* Clear mailbox */
49	memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
50
51	/* init type and length */
52	bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
53
54	/* init first tlv header */
55	first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
56}
57
58/* releases the mailbox */
59static void bnx2x_vfpf_finalize(struct bnx2x *bp,
60				struct vfpf_first_tlv *first_tlv)
61{
62	DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
63	   first_tlv->tl.type);
64
65	mutex_unlock(&bp->vf2pf_mutex);
66}
67
68/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
69static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
70				   enum channel_tlvs req_tlv)
71{
72	struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
73
74	do {
75		if (tlv->type == req_tlv)
76			return tlv;
77
78		if (!tlv->length) {
79			BNX2X_ERR("Found TLV with length 0\n");
80			return NULL;
81		}
82
83		tlvs_list += tlv->length;
84		tlv = (struct channel_tlv *)tlvs_list;
85	} while (tlv->type != CHANNEL_TLV_LIST_END);
86
87	DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
88
89	return NULL;
90}
91
92/* list the types and lengths of the tlvs on the buffer */
93static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
94{
95	int i = 1;
96	struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
97
98	while (tlv->type != CHANNEL_TLV_LIST_END) {
99		/* output tlv */
100		DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
101		   tlv->type, tlv->length);
102
103		/* advance to next tlv */
104		tlvs_list += tlv->length;
105
106		/* cast general tlv list pointer to channel tlv header*/
107		tlv = (struct channel_tlv *)tlvs_list;
108
109		i++;
110
111		/* break condition for this loop */
112		if (i > MAX_TLVS_IN_LIST) {
113			WARN(true, "corrupt tlvs");
114			return;
115		}
116	}
117
118	/* output last tlv */
119	DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
120	   tlv->type, tlv->length);
121}
122
123/* test whether we support a tlv type */
124bool bnx2x_tlv_supported(u16 tlvtype)
125{
126	return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
127}
128
129static inline int bnx2x_pfvf_status_codes(int rc)
130{
131	switch (rc) {
132	case 0:
133		return PFVF_STATUS_SUCCESS;
134	case -ENOMEM:
135		return PFVF_STATUS_NO_RESOURCE;
136	default:
137		return PFVF_STATUS_FAILURE;
138	}
139}
140
141static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
142{
143	struct cstorm_vf_zone_data __iomem *zone_data =
144		REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
145	int tout = 100, interval = 100; /* wait for 10 seconds */
146
147	if (*done) {
148		BNX2X_ERR("done was non zero before message to pf was sent\n");
149		WARN_ON(true);
150		return -EINVAL;
151	}
152
153	/* if PF indicated channel is down avoid sending message. Return success
154	 * so calling flow can continue
155	 */
156	bnx2x_sample_bulletin(bp);
157	if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
158		DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
159		*done = PFVF_STATUS_SUCCESS;
160		return -EINVAL;
161	}
162
163	/* Write message address */
164	writel(U64_LO(msg_mapping),
165	       &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
166	writel(U64_HI(msg_mapping),
167	       &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
168
169	/* make sure the address is written before FW accesses it */
170	wmb();
171
172	/* Trigger the PF FW */
173	writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid);
174
175	/* Wait for PF to complete */
176	while ((tout >= 0) && (!*done)) {
177		msleep(interval);
178		tout -= 1;
179
180		/* progress indicator - HV can take its own sweet time in
181		 * answering VFs...
182		 */
183		DP_CONT(BNX2X_MSG_IOV, ".");
184	}
185
186	if (!*done) {
187		BNX2X_ERR("PF response has timed out\n");
188		return -EAGAIN;
189	}
190	DP(BNX2X_MSG_SP, "Got a response from PF\n");
191	return 0;
192}
193
194static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
195{
196	u32 me_reg;
197	int tout = 10, interval = 100; /* Wait for 1 sec */
198
199	do {
200		/* pxp traps vf read of doorbells and returns me reg value */
201		me_reg = readl(bp->doorbells);
202		if (GOOD_ME_REG(me_reg))
203			break;
204
205		msleep(interval);
206
207		BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
208			  me_reg);
209	} while (tout-- > 0);
210
211	if (!GOOD_ME_REG(me_reg)) {
212		BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
213		return -EINVAL;
214	}
215
216	DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
217
218	*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
219
220	return 0;
221}
222
223int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
224{
225	int rc = 0, attempts = 0;
226	struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
227	struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
228	struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
229	struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp;
230	u32 vf_id;
231	bool resources_acquired = false;
232
233	/* clear mailbox and prep first tlv */
234	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
235
236	if (bnx2x_get_vf_id(bp, &vf_id)) {
237		rc = -EAGAIN;
238		goto out;
239	}
240
241	req->vfdev_info.vf_id = vf_id;
242	req->vfdev_info.vf_os = 0;
243	req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
244
245	req->resc_request.num_rxqs = rx_count;
246	req->resc_request.num_txqs = tx_count;
247	req->resc_request.num_sbs = bp->igu_sb_cnt;
248	req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
249	req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
250	req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
251
252	/* pf 2 vf bulletin board address */
253	req->bulletin_addr = bp->pf2vf_bulletin_mapping;
254
255	/* Request physical port identifier */
256	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
257		      CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
258
259	/* Bulletin support for bulletin board with length > legacy length */
260	req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
261	/* vlan filtering is supported */
262	req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
263
264	/* add list termination tlv */
265	bnx2x_add_tlv(bp, req,
266		      req->first_tlv.tl.length + sizeof(struct channel_tlv),
267		      CHANNEL_TLV_LIST_END,
268		      sizeof(struct channel_list_end_tlv));
269
270	/* output tlvs list */
271	bnx2x_dp_tlv_list(bp, req);
272
273	while (!resources_acquired) {
274		DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
275
276		/* send acquire request */
277		rc = bnx2x_send_msg2pf(bp,
278				       &resp->hdr.status,
279				       bp->vf2pf_mbox_mapping);
280
281		/* PF timeout */
282		if (rc)
283			goto out;
284
285		/* copy acquire response from buffer to bp */
286		memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
287
288		attempts++;
289
290		/* test whether the PF accepted our request. If not, humble
291		 * the request and try again.
292		 */
293		if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
294			DP(BNX2X_MSG_SP, "resources acquired\n");
295			resources_acquired = true;
296		} else if (bp->acquire_resp.hdr.status ==
297			   PFVF_STATUS_NO_RESOURCE &&
298			   attempts < VF_ACQUIRE_THRESH) {
299			DP(BNX2X_MSG_SP,
300			   "PF unwilling to fulfill resource request. Try PF recommended amount\n");
301
302			/* humble our request */
303			req->resc_request.num_txqs =
304				min(req->resc_request.num_txqs,
305				    bp->acquire_resp.resc.num_txqs);
306			req->resc_request.num_rxqs =
307				min(req->resc_request.num_rxqs,
308				    bp->acquire_resp.resc.num_rxqs);
309			req->resc_request.num_sbs =
310				min(req->resc_request.num_sbs,
311				    bp->acquire_resp.resc.num_sbs);
312			req->resc_request.num_mac_filters =
313				min(req->resc_request.num_mac_filters,
314				    bp->acquire_resp.resc.num_mac_filters);
315			req->resc_request.num_vlan_filters =
316				min(req->resc_request.num_vlan_filters,
317				    bp->acquire_resp.resc.num_vlan_filters);
318			req->resc_request.num_mc_filters =
319				min(req->resc_request.num_mc_filters,
320				    bp->acquire_resp.resc.num_mc_filters);
321
322			/* Clear response buffer */
323			memset(&bp->vf2pf_mbox->resp, 0,
324			       sizeof(union pfvf_tlvs));
325		} else {
326			/* Determine reason of PF failure of acquire process */
327			fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
328							    CHANNEL_TLV_FP_HSI_SUPPORT);
329			if (fp_hsi_resp && !fp_hsi_resp->is_supported)
330				BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n");
331			else
332				BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
333					  bp->acquire_resp.hdr.status);
334			rc = -EAGAIN;
335			goto out;
336		}
337	}
338
339	/* Retrieve physical port id (if possible) */
340	phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
341			 bnx2x_search_tlv_list(bp, resp,
342					       CHANNEL_TLV_PHYS_PORT_ID);
343	if (phys_port_resp) {
344		memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
345		bp->flags |= HAS_PHYS_PORT_ID;
346	}
347
348	/* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV.
349	 * If that's the case, we need to make certain required FW was
350	 * supported by such a hypervisor [i.e., v0-v2].
351	 */
352	fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
353					    CHANNEL_TLV_FP_HSI_SUPPORT);
354	if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) {
355		BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n");
356
357		/* Since acquire succeeded on the PF side, we need to send a
358		 * release message in order to allow future probes.
359		 */
360		bnx2x_vfpf_finalize(bp, &req->first_tlv);
361		bnx2x_vfpf_release(bp);
362
363		rc = -EINVAL;
364		goto out;
365	}
366
367	/* get HW info */
368	bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
369	bp->link_params.chip_id = bp->common.chip_id;
370	bp->db_size = bp->acquire_resp.pfdev_info.db_size;
371	bp->common.int_block = INT_BLOCK_IGU;
372	bp->common.chip_port_mode = CHIP_2_PORT_MODE;
373	bp->igu_dsb_id = -1;
374	bp->mf_ov = 0;
375	bp->mf_mode = 0;
376	bp->common.flash_size = 0;
377	bp->flags |=
378		NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
379	bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
380	bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
381	bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
382
383	strscpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
384		sizeof(bp->fw_ver));
385
386	if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
387		eth_hw_addr_set(bp->dev,
388				bp->acquire_resp.resc.current_mac_addr);
389
390out:
391	bnx2x_vfpf_finalize(bp, &req->first_tlv);
392	return rc;
393}
394
395int bnx2x_vfpf_release(struct bnx2x *bp)
396{
397	struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
398	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
399	u32 rc, vf_id;
400
401	/* clear mailbox and prep first tlv */
402	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
403
404	if (bnx2x_get_vf_id(bp, &vf_id)) {
405		rc = -EAGAIN;
406		goto out;
407	}
408
409	req->vf_id = vf_id;
410
411	/* add list termination tlv */
412	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
413		      sizeof(struct channel_list_end_tlv));
414
415	/* output tlvs list */
416	bnx2x_dp_tlv_list(bp, req);
417
418	/* send release request */
419	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
420
421	if (rc)
422		/* PF timeout */
423		goto out;
424
425	if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
426		/* PF released us */
427		DP(BNX2X_MSG_SP, "vf released\n");
428	} else {
429		/* PF reports error */
430		BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
431			  resp->hdr.status);
432		rc = -EAGAIN;
433		goto out;
434	}
435out:
436	bnx2x_vfpf_finalize(bp, &req->first_tlv);
437
438	return rc;
439}
440
441/* Tell PF about SB addresses */
442int bnx2x_vfpf_init(struct bnx2x *bp)
443{
444	struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
445	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
446	int rc, i;
447
448	/* clear mailbox and prep first tlv */
449	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
450
451	/* status blocks */
452	for_each_eth_queue(bp, i)
453		req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
454						       status_blk_mapping);
455
456	/* statistics - requests only supports single queue for now */
457	req->stats_addr = bp->fw_stats_data_mapping +
458			  offsetof(struct bnx2x_fw_stats_data, queue_stats);
459
460	req->stats_stride = sizeof(struct per_queue_stats);
461
462	/* add list termination tlv */
463	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
464		      sizeof(struct channel_list_end_tlv));
465
466	/* output tlvs list */
467	bnx2x_dp_tlv_list(bp, req);
468
469	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
470	if (rc)
471		goto out;
472
473	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
474		BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
475			  resp->hdr.status);
476		rc = -EAGAIN;
477		goto out;
478	}
479
480	DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
481out:
482	bnx2x_vfpf_finalize(bp, &req->first_tlv);
483
484	return rc;
485}
486
487/* CLOSE VF - opposite to INIT_VF */
488void bnx2x_vfpf_close_vf(struct bnx2x *bp)
489{
490	struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
491	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
492	int i, rc;
493	u32 vf_id;
494
495	/* If we haven't got a valid VF id, there is no sense to
496	 * continue with sending messages
497	 */
498	if (bnx2x_get_vf_id(bp, &vf_id))
499		goto free_irq;
500
501	/* Close the queues */
502	for_each_queue(bp, i)
503		bnx2x_vfpf_teardown_queue(bp, i);
504
505	/* remove mac */
506	bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
507
508	/* clear mailbox and prep first tlv */
509	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
510
511	req->vf_id = vf_id;
512
513	/* add list termination tlv */
514	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
515		      sizeof(struct channel_list_end_tlv));
516
517	/* output tlvs list */
518	bnx2x_dp_tlv_list(bp, req);
519
520	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
521
522	if (rc)
523		BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
524
525	else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
526		BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
527			  resp->hdr.status);
528
529	bnx2x_vfpf_finalize(bp, &req->first_tlv);
530
531free_irq:
532	if (!bp->nic_stopped) {
533		/* Disable HW interrupts, NAPI */
534		bnx2x_netif_stop(bp, 0);
535		/* Delete all NAPI objects */
536		bnx2x_del_all_napi(bp);
537
538		/* Release IRQs */
539		bnx2x_free_irq(bp);
540		bp->nic_stopped = true;
541	}
542}
543
544static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
545				   struct bnx2x_vf_queue *q)
546{
547	u8 cl_id = vfq_cl_id(vf, q);
548	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
549
550	/* mac */
551	bnx2x_init_mac_obj(bp, &q->mac_obj,
552			   cl_id, q->cid, func_id,
553			   bnx2x_vf_sp(bp, vf, mac_rdata),
554			   bnx2x_vf_sp_map(bp, vf, mac_rdata),
555			   BNX2X_FILTER_MAC_PENDING,
556			   &vf->filter_state,
557			   BNX2X_OBJ_TYPE_RX_TX,
558			   &vf->vf_macs_pool);
559	/* vlan */
560	bnx2x_init_vlan_obj(bp, &q->vlan_obj,
561			    cl_id, q->cid, func_id,
562			    bnx2x_vf_sp(bp, vf, vlan_rdata),
563			    bnx2x_vf_sp_map(bp, vf, vlan_rdata),
564			    BNX2X_FILTER_VLAN_PENDING,
565			    &vf->filter_state,
566			    BNX2X_OBJ_TYPE_RX_TX,
567			    &vf->vf_vlans_pool);
568	/* vlan-mac */
569	bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
570				cl_id, q->cid, func_id,
571				bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
572				bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
573				BNX2X_FILTER_VLAN_MAC_PENDING,
574				&vf->filter_state,
575				BNX2X_OBJ_TYPE_RX_TX,
576				&vf->vf_macs_pool,
577				&vf->vf_vlans_pool);
578	/* mcast */
579	bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
580			     q->cid, func_id, func_id,
581			     bnx2x_vf_sp(bp, vf, mcast_rdata),
582			     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
583			     BNX2X_FILTER_MCAST_PENDING,
584			     &vf->filter_state,
585			     BNX2X_OBJ_TYPE_RX_TX);
586
587	/* rss */
588	bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
589				  func_id, func_id,
590				  bnx2x_vf_sp(bp, vf, rss_rdata),
591				  bnx2x_vf_sp_map(bp, vf, rss_rdata),
592				  BNX2X_FILTER_RSS_CONF_PENDING,
593				  &vf->filter_state,
594				  BNX2X_OBJ_TYPE_RX_TX);
595
596	vf->leading_rss = cl_id;
597	q->is_leading = true;
598	q->sp_initialized = true;
599}
600
601/* ask the pf to open a queue for the vf */
602int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
603		       bool is_leading)
604{
605	struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
606	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
607	u8 fp_idx = fp->index;
608	u16 tpa_agg_size = 0, flags = 0;
609	int rc;
610
611	/* clear mailbox and prep first tlv */
612	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
613
614	/* select tpa mode to request */
615	if (fp->mode != TPA_MODE_DISABLED) {
616		flags |= VFPF_QUEUE_FLG_TPA;
617		flags |= VFPF_QUEUE_FLG_TPA_IPV6;
618		if (fp->mode == TPA_MODE_GRO)
619			flags |= VFPF_QUEUE_FLG_TPA_GRO;
620		tpa_agg_size = TPA_AGG_SIZE;
621	}
622
623	if (is_leading)
624		flags |= VFPF_QUEUE_FLG_LEADING_RSS;
625
626	/* calculate queue flags */
627	flags |= VFPF_QUEUE_FLG_STATS;
628	flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
629	flags |= VFPF_QUEUE_FLG_VLAN;
630
631	/* Common */
632	req->vf_qid = fp_idx;
633	req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
634
635	/* Rx */
636	req->rxq.rcq_addr = fp->rx_comp_mapping;
637	req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
638	req->rxq.rxq_addr = fp->rx_desc_mapping;
639	req->rxq.sge_addr = fp->rx_sge_mapping;
640	req->rxq.vf_sb = fp_idx;
641	req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
642	req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
643	req->rxq.mtu = bp->dev->mtu;
644	req->rxq.buf_sz = fp->rx_buf_size;
645	req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
646	req->rxq.tpa_agg_sz = tpa_agg_size;
647	req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
648	req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
649			  (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
650	req->rxq.flags = flags;
651	req->rxq.drop_flags = 0;
652	req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
653	req->rxq.stat_id = -1; /* No stats at the moment */
654
655	/* Tx */
656	req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
657	req->txq.vf_sb = fp_idx;
658	req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
659	req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
660	req->txq.flags = flags;
661	req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
662
663	/* add list termination tlv */
664	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
665		      sizeof(struct channel_list_end_tlv));
666
667	/* output tlvs list */
668	bnx2x_dp_tlv_list(bp, req);
669
670	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
671	if (rc)
672		BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
673			  fp_idx);
674
675	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
676		BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
677			  fp_idx, resp->hdr.status);
678		rc = -EINVAL;
679	}
680
681	bnx2x_vfpf_finalize(bp, &req->first_tlv);
682
683	return rc;
684}
685
686static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
687{
688	struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
689	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
690	int rc;
691
692	/* clear mailbox and prep first tlv */
693	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
694			sizeof(*req));
695
696	req->vf_qid = qidx;
697
698	/* add list termination tlv */
699	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
700		      sizeof(struct channel_list_end_tlv));
701
702	/* output tlvs list */
703	bnx2x_dp_tlv_list(bp, req);
704
705	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
706
707	if (rc) {
708		BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
709			  rc);
710		goto out;
711	}
712
713	/* PF failed the transaction */
714	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
715		BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
716			  resp->hdr.status);
717		rc = -EINVAL;
718	}
719
720out:
721	bnx2x_vfpf_finalize(bp, &req->first_tlv);
722
723	return rc;
724}
725
726/* request pf to add a mac for the vf */
727int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set)
728{
729	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
730	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
731	struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
732	int rc = 0;
733
734	/* clear mailbox and prep first tlv */
735	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
736			sizeof(*req));
737
738	req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
739	req->vf_qid = vf_qid;
740	req->n_mac_vlan_filters = 1;
741
742	req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
743	if (set)
744		req->filters[0].flags |= VFPF_Q_FILTER_SET;
745
746	/* sample bulletin board for new mac */
747	bnx2x_sample_bulletin(bp);
748
749	/* copy mac from device to request */
750	memcpy(req->filters[0].mac, addr, ETH_ALEN);
751
752	/* add list termination tlv */
753	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
754		      sizeof(struct channel_list_end_tlv));
755
756	/* output tlvs list */
757	bnx2x_dp_tlv_list(bp, req);
758
759	/* send message to pf */
760	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
761	if (rc) {
762		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
763		goto out;
764	}
765
766	/* failure may mean PF was configured with a new mac for us */
767	while (resp->hdr.status == PFVF_STATUS_FAILURE) {
768		DP(BNX2X_MSG_IOV,
769		   "vfpf SET MAC failed. Check bulletin board for new posts\n");
770
771		/* copy mac from bulletin to device */
772		eth_hw_addr_set(bp->dev, bulletin.mac);
773
774		/* check if bulletin board was updated */
775		if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
776			/* copy mac from device to request */
777			memcpy(req->filters[0].mac, bp->dev->dev_addr,
778			       ETH_ALEN);
779
780			/* send message to pf */
781			rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
782					       bp->vf2pf_mbox_mapping);
783		} else {
784			/* no new info in bulletin */
785			break;
786		}
787	}
788
789	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
790		BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
791		rc = -EINVAL;
792	}
793out:
794	bnx2x_vfpf_finalize(bp, &req->first_tlv);
795
796	return rc;
797}
798
799/* request pf to config rss table for vf queues*/
800int bnx2x_vfpf_config_rss(struct bnx2x *bp,
801			  struct bnx2x_config_rss_params *params)
802{
803	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
804	struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
805	int rc = 0;
806
807	/* clear mailbox and prep first tlv */
808	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
809			sizeof(*req));
810
811	/* add list termination tlv */
812	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
813		      sizeof(struct channel_list_end_tlv));
814
815	memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
816	memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
817	req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
818	req->rss_key_size = T_ETH_RSS_KEY;
819	req->rss_result_mask = params->rss_result_mask;
820
821	/* flags handled individually for backward/forward compatibility */
822	if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
823		req->rss_flags |= VFPF_RSS_MODE_DISABLED;
824	if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
825		req->rss_flags |= VFPF_RSS_MODE_REGULAR;
826	if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
827		req->rss_flags |= VFPF_RSS_SET_SRCH;
828	if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
829		req->rss_flags |= VFPF_RSS_IPV4;
830	if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
831		req->rss_flags |= VFPF_RSS_IPV4_TCP;
832	if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
833		req->rss_flags |= VFPF_RSS_IPV4_UDP;
834	if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
835		req->rss_flags |= VFPF_RSS_IPV6;
836	if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
837		req->rss_flags |= VFPF_RSS_IPV6_TCP;
838	if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
839		req->rss_flags |= VFPF_RSS_IPV6_UDP;
840
841	DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
842
843	/* output tlvs list */
844	bnx2x_dp_tlv_list(bp, req);
845
846	/* send message to pf */
847	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
848	if (rc) {
849		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
850		goto out;
851	}
852
853	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
854		/* Since older drivers don't support this feature (and VF has
855		 * no way of knowing other than failing this), don't propagate
856		 * an error in this case.
857		 */
858		DP(BNX2X_MSG_IOV,
859		   "Failed to send rss message to PF over VF-PF channel [%d]\n",
860		   resp->hdr.status);
861	}
862out:
863	bnx2x_vfpf_finalize(bp, &req->first_tlv);
864
865	return rc;
866}
867
868int bnx2x_vfpf_set_mcast(struct net_device *dev)
869{
870	struct bnx2x *bp = netdev_priv(dev);
871	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
872	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
873	int rc = 0, i = 0;
874	struct netdev_hw_addr *ha;
875
876	if (bp->state != BNX2X_STATE_OPEN) {
877		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
878		return -EINVAL;
879	}
880
881	/* clear mailbox and prep first tlv */
882	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
883			sizeof(*req));
884
885	/* Get Rx mode requested */
886	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
887
888	/* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
889	if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
890		DP(NETIF_MSG_IFUP,
891		   "VF supports not more than %d multicast MAC addresses\n",
892		   PFVF_MAX_MULTICAST_PER_VF);
893		rc = -EINVAL;
894		goto out;
895	}
896
897	netdev_for_each_mc_addr(ha, dev) {
898		DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
899		   bnx2x_mc_addr(ha));
900		memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
901		i++;
902	}
903
904	req->n_multicast = i;
905	req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
906	req->vf_qid = 0;
907
908	/* add list termination tlv */
909	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
910		      sizeof(struct channel_list_end_tlv));
911
912	/* output tlvs list */
913	bnx2x_dp_tlv_list(bp, req);
914	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
915	if (rc) {
916		BNX2X_ERR("Sending a message failed: %d\n", rc);
917		goto out;
918	}
919
920	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
921		BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
922			  resp->hdr.status);
923		rc = -EINVAL;
924	}
925out:
926	bnx2x_vfpf_finalize(bp, &req->first_tlv);
927
928	return rc;
929}
930
931/* request pf to add a vlan for the vf */
932int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
933{
934	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
935	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
936	int rc = 0;
937
938	if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
939		DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
940		return 0;
941	}
942
943	/* clear mailbox and prep first tlv */
944	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
945			sizeof(*req));
946
947	req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
948	req->vf_qid = vf_qid;
949	req->n_mac_vlan_filters = 1;
950
951	req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
952
953	if (add)
954		req->filters[0].flags |= VFPF_Q_FILTER_SET;
955
956	/* sample bulletin board for hypervisor vlan */
957	bnx2x_sample_bulletin(bp);
958
959	if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
960		BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
961		rc = -EINVAL;
962		goto out;
963	}
964
965	req->filters[0].vlan_tag = vid;
966
967	/* add list termination tlv */
968	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
969		      sizeof(struct channel_list_end_tlv));
970
971	/* output tlvs list */
972	bnx2x_dp_tlv_list(bp, req);
973
974	/* send message to pf */
975	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
976	if (rc) {
977		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
978		goto out;
979	}
980
981	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
982		BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
983			  vid);
984		rc = -EINVAL;
985	}
986out:
987	bnx2x_vfpf_finalize(bp, &req->first_tlv);
988
989	return rc;
990}
991
992int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
993{
994	int mode = bp->rx_mode;
995	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
996	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
997	int rc;
998
999	/* clear mailbox and prep first tlv */
1000	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
1001			sizeof(*req));
1002
1003	DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
1004
1005	/* Ignore everything accept MODE_NONE */
1006	if (mode  == BNX2X_RX_MODE_NONE) {
1007		req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
1008	} else {
1009		/* Current PF driver will not look at the specific flags,
1010		 * but they are required when working with older drivers on hv.
1011		 */
1012		req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
1013		req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
1014		req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
1015		if (mode == BNX2X_RX_MODE_PROMISC)
1016			req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
1017	}
1018
1019	if (bp->accept_any_vlan)
1020		req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
1021
1022	req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
1023	req->vf_qid = 0;
1024
1025	/* add list termination tlv */
1026	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
1027		      sizeof(struct channel_list_end_tlv));
1028
1029	/* output tlvs list */
1030	bnx2x_dp_tlv_list(bp, req);
1031
1032	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
1033	if (rc)
1034		BNX2X_ERR("Sending a message failed: %d\n", rc);
1035
1036	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1037		BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
1038		rc = -EINVAL;
1039	}
1040
1041	bnx2x_vfpf_finalize(bp, &req->first_tlv);
1042
1043	return rc;
1044}
1045
1046/* General service functions */
1047static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
1048{
1049	u32 addr = BAR_CSTRORM_INTMEM +
1050		   CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
1051
1052	REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
1053}
1054
1055static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
1056{
1057	u32 addr = BAR_CSTRORM_INTMEM +
1058		   CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
1059
1060	REG_WR8(bp, addr, 1);
1061}
1062
1063/* enable vf_pf mailbox (aka vf-pf-channel) */
1064void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
1065{
1066	bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
1067
1068	/* enable the mailbox in the FW */
1069	storm_memset_vf_mbx_ack(bp, abs_vfid);
1070	storm_memset_vf_mbx_valid(bp, abs_vfid);
1071
1072	/* enable the VF access to the mailbox */
1073	bnx2x_vf_enable_access(bp, abs_vfid);
1074}
1075
1076/* this works only on !E1h */
1077static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
1078				dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
1079				u32 vf_addr_lo, u32 len32)
1080{
1081	struct dmae_command dmae;
1082
1083	if (CHIP_IS_E1x(bp)) {
1084		BNX2X_ERR("Chip revision does not support VFs\n");
1085		return DMAE_NOT_RDY;
1086	}
1087
1088	if (!bp->dmae_ready) {
1089		BNX2X_ERR("DMAE is not ready, can not copy\n");
1090		return DMAE_NOT_RDY;
1091	}
1092
1093	/* set opcode and fixed command fields */
1094	bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
1095
1096	if (from_vf) {
1097		dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
1098			(DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
1099			(DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
1100
1101		dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
1102
1103		dmae.src_addr_lo = vf_addr_lo;
1104		dmae.src_addr_hi = vf_addr_hi;
1105		dmae.dst_addr_lo = U64_LO(pf_addr);
1106		dmae.dst_addr_hi = U64_HI(pf_addr);
1107	} else {
1108		dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
1109			(DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
1110			(DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
1111
1112		dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
1113
1114		dmae.src_addr_lo = U64_LO(pf_addr);
1115		dmae.src_addr_hi = U64_HI(pf_addr);
1116		dmae.dst_addr_lo = vf_addr_lo;
1117		dmae.dst_addr_hi = vf_addr_hi;
1118	}
1119	dmae.len = len32;
1120
1121	/* issue the command and wait for completion */
1122	return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
1123}
1124
1125static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
1126					 struct bnx2x_virtf *vf)
1127{
1128	struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1129	u16 length, type;
1130
1131	/* prepare response */
1132	type = mbx->first_tlv.tl.type;
1133	length = type == CHANNEL_TLV_ACQUIRE ?
1134		sizeof(struct pfvf_acquire_resp_tlv) :
1135		sizeof(struct pfvf_general_resp_tlv);
1136	bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
1137	bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1138		      sizeof(struct channel_list_end_tlv));
1139}
1140
1141static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
1142				       struct bnx2x_virtf *vf,
1143				       int vf_rc)
1144{
1145	struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1146	struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
1147	dma_addr_t pf_addr;
1148	u64 vf_addr;
1149	int rc;
1150
1151	bnx2x_dp_tlv_list(bp, resp);
1152	DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
1153	   mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
1154
1155	resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
1156
1157	/* send response */
1158	vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
1159		  mbx->first_tlv.resp_msg_offset;
1160	pf_addr = mbx->msg_mapping +
1161		  offsetof(struct bnx2x_vf_mbx_msg, resp);
1162
1163	/* Copy the response buffer. The first u64 is written afterwards, as
1164	 * the vf is sensitive to the header being written
1165	 */
1166	vf_addr += sizeof(u64);
1167	pf_addr += sizeof(u64);
1168	rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1169				  U64_HI(vf_addr),
1170				  U64_LO(vf_addr),
1171				  (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
1172	if (rc) {
1173		BNX2X_ERR("Failed to copy response body to VF %d\n",
1174			  vf->abs_vfid);
1175		goto mbx_error;
1176	}
1177	vf_addr -= sizeof(u64);
1178	pf_addr -= sizeof(u64);
1179
1180	/* ack the FW */
1181	storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1182
1183	/* copy the response header including status-done field,
1184	 * must be last dmae, must be after FW is acked
1185	 */
1186	rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1187				  U64_HI(vf_addr),
1188				  U64_LO(vf_addr),
1189				  sizeof(u64)/4);
1190
1191	/* unlock channel mutex */
1192	bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1193
1194	if (rc) {
1195		BNX2X_ERR("Failed to copy response status to VF %d\n",
1196			  vf->abs_vfid);
1197		goto mbx_error;
1198	}
1199	return;
1200
1201mbx_error:
1202	bnx2x_vf_release(bp, vf);
1203}
1204
1205static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
1206			      struct bnx2x_virtf *vf,
1207			      int rc)
1208{
1209	bnx2x_vf_mbx_resp_single_tlv(bp, vf);
1210	bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
1211}
1212
1213static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
1214					struct bnx2x_virtf *vf,
1215					void *buffer,
1216					u16 *offset)
1217{
1218	struct vfpf_port_phys_id_resp_tlv *port_id;
1219
1220	if (!(bp->flags & HAS_PHYS_PORT_ID))
1221		return;
1222
1223	bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
1224		      sizeof(struct vfpf_port_phys_id_resp_tlv));
1225
1226	port_id = (struct vfpf_port_phys_id_resp_tlv *)
1227		  (((u8 *)buffer) + *offset);
1228	memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
1229
1230	/* Offset should continue representing the offset to the tail
1231	 * of TLV data (outside this function scope)
1232	 */
1233	*offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
1234}
1235
1236static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp,
1237					 struct bnx2x_virtf *vf,
1238					 void *buffer,
1239					 u16 *offset)
1240{
1241	struct vfpf_fp_hsi_resp_tlv *fp_hsi;
1242
1243	bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT,
1244		      sizeof(struct vfpf_fp_hsi_resp_tlv));
1245
1246	fp_hsi = (struct vfpf_fp_hsi_resp_tlv *)
1247		 (((u8 *)buffer) + *offset);
1248	fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1;
1249
1250	/* Offset should continue representing the offset to the tail
1251	 * of TLV data (outside this function scope)
1252	 */
1253	*offset += sizeof(struct vfpf_fp_hsi_resp_tlv);
1254}
1255
1256static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1257				      struct bnx2x_vf_mbx *mbx, int vfop_status)
1258{
1259	int i;
1260	struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
1261	struct pf_vf_resc *resc = &resp->resc;
1262	u8 status = bnx2x_pfvf_status_codes(vfop_status);
1263	u16 length;
1264
1265	memset(resp, 0, sizeof(*resp));
1266
1267	/* fill in pfdev info */
1268	resp->pfdev_info.chip_num = bp->common.chip_id;
1269	resp->pfdev_info.db_size = bp->db_size;
1270	resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
1271	resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
1272				   PFVF_CAP_TPA |
1273				   PFVF_CAP_TPA_UPDATE |
1274				   PFVF_CAP_VLAN_FILTER);
1275	bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
1276			  sizeof(resp->pfdev_info.fw_ver));
1277
1278	if (status == PFVF_STATUS_NO_RESOURCE ||
1279	    status == PFVF_STATUS_SUCCESS) {
1280		/* set resources numbers, if status equals NO_RESOURCE these
1281		 * are max possible numbers
1282		 */
1283		resc->num_rxqs = vf_rxq_count(vf) ? :
1284			bnx2x_vf_max_queue_cnt(bp, vf);
1285		resc->num_txqs = vf_txq_count(vf) ? :
1286			bnx2x_vf_max_queue_cnt(bp, vf);
1287		resc->num_sbs = vf_sb_count(vf);
1288		resc->num_mac_filters = vf_mac_rules_cnt(vf);
1289		resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
1290		resc->num_mc_filters = 0;
1291
1292		if (status == PFVF_STATUS_SUCCESS) {
1293			/* fill in the allocated resources */
1294			struct pf_vf_bulletin_content *bulletin =
1295				BP_VF_BULLETIN(bp, vf->index);
1296
1297			for_each_vfq(vf, i)
1298				resc->hw_qid[i] =
1299					vfq_qzone_id(vf, vfq_get(vf, i));
1300
1301			for_each_vf_sb(vf, i) {
1302				resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
1303				resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
1304			}
1305
1306			/* if a mac has been set for this vf, supply it */
1307			if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1308				memcpy(resc->current_mac_addr, bulletin->mac,
1309				       ETH_ALEN);
1310			}
1311		}
1312	}
1313
1314	DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
1315	   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
1316	   vf->abs_vfid,
1317	   resp->pfdev_info.chip_num,
1318	   resp->pfdev_info.db_size,
1319	   resp->pfdev_info.indices_per_sb,
1320	   resp->pfdev_info.pf_cap,
1321	   resc->num_rxqs,
1322	   resc->num_txqs,
1323	   resc->num_sbs,
1324	   resc->num_mac_filters,
1325	   resc->num_vlan_filters,
1326	   resc->num_mc_filters,
1327	   resp->pfdev_info.fw_ver);
1328
1329	DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
1330	for (i = 0; i < vf_rxq_count(vf); i++)
1331		DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
1332	DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
1333	for (i = 0; i < vf_sb_count(vf); i++)
1334		DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
1335			resc->hw_sbs[i].hw_sb_id,
1336			resc->hw_sbs[i].sb_qid);
1337	DP_CONT(BNX2X_MSG_IOV, "]\n");
1338
1339	/* prepare response */
1340	length = sizeof(struct pfvf_acquire_resp_tlv);
1341	bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
1342
1343	/* Handle possible VF requests for physical port identifiers.
1344	 * 'length' should continue to indicate the offset of the first empty
1345	 * place in the buffer (i.e., where next TLV should be inserted)
1346	 */
1347	if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1348				  CHANNEL_TLV_PHYS_PORT_ID))
1349		bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
1350
1351	/* `New' vfs will want to know if fastpath HSI is supported, since
1352	 * if that's not the case they could print into system log the fact
1353	 * the driver version must be updated.
1354	 */
1355	bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
1356
1357	bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1358		      sizeof(struct channel_list_end_tlv));
1359
1360	/* send the response */
1361	bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
1362}
1363
1364static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp,
1365				       struct vfpf_acquire_tlv *acquire)
1366{
1367	/* Windows driver does one of three things:
1368	 * 1. Old driver doesn't have bulletin board address set.
1369	 * 2. 'Middle' driver sends mc_num == 32.
1370	 * 3. New driver sets the OS field.
1371	 */
1372	if (!acquire->bulletin_addr ||
1373	    acquire->resc_request.num_mc_filters == 32 ||
1374	    ((acquire->vfdev_info.vf_os & VF_OS_MASK) ==
1375	     VF_OS_WINDOWS))
1376		return true;
1377
1378	return false;
1379}
1380
1381static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp,
1382					 struct bnx2x_virtf *vf,
1383					 struct bnx2x_vf_mbx *mbx)
1384{
1385	/* Linux drivers which correctly set the doorbell size also
1386	 * send a physical port request
1387	 */
1388	if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1389				  CHANNEL_TLV_PHYS_PORT_ID))
1390		return 0;
1391
1392	/* Issue does not exist in windows VMs */
1393	if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
1394		return 0;
1395
1396	return -EOPNOTSUPP;
1397}
1398
1399static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1400				 struct bnx2x_vf_mbx *mbx)
1401{
1402	int rc;
1403	struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
1404
1405	/* log vfdef info */
1406	DP(BNX2X_MSG_IOV,
1407	   "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
1408	   vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
1409	   acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
1410	   acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
1411	   acquire->resc_request.num_vlan_filters,
1412	   acquire->resc_request.num_mc_filters);
1413
1414	/* Prevent VFs with old drivers from loading, since they calculate
1415	 * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover
1416	 * while being upgraded.
1417	 */
1418	rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx);
1419	if (rc) {
1420		DP(BNX2X_MSG_IOV,
1421		   "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n",
1422		   vf->abs_vfid);
1423		goto out;
1424	}
1425
1426	/* Verify the VF fastpath HSI can be supported by the loaded FW.
1427	 * Linux vfs should be oblivious to changes between v0 and v2.
1428	 */
1429	if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
1430		vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver;
1431	else
1432		vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver,
1433				   ETH_FP_HSI_VER_2);
1434	if (vf->fp_hsi > ETH_FP_HSI_VERSION) {
1435		DP(BNX2X_MSG_IOV,
1436		   "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n",
1437		   vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver,
1438		   ETH_FP_HSI_VERSION);
1439		rc = -EINVAL;
1440		goto out;
1441	}
1442
1443	/* acquire the resources */
1444	rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
1445
1446	/* store address of vf's bulletin board */
1447	vf->bulletin_map = acquire->bulletin_addr;
1448	if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) {
1449		DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n",
1450		   vf->abs_vfid);
1451		vf->cfg_flags |= VF_CFG_EXT_BULLETIN;
1452	} else {
1453		vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
1454	}
1455
1456	if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
1457		DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
1458		   vf->abs_vfid);
1459		vf->cfg_flags |= VF_CFG_VLAN_FILTER;
1460	} else {
1461		vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
1462	}
1463
1464out:
1465	/* response */
1466	bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
1467}
1468
1469static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1470			      struct bnx2x_vf_mbx *mbx)
1471{
1472	struct vfpf_init_tlv *init = &mbx->msg->req.init;
1473	int rc;
1474
1475	/* record ghost addresses from vf message */
1476	vf->fw_stat_map = init->stats_addr;
1477	vf->stats_stride = init->stats_stride;
1478	rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
1479
1480	/* set VF multiqueue statistics collection mode */
1481	if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
1482		vf->cfg_flags |= VF_CFG_STATS_COALESCE;
1483
1484	/* Update VF's view of link state */
1485	if (vf->cfg_flags & VF_CFG_EXT_BULLETIN)
1486		bnx2x_iov_link_update_vf(bp, vf->index);
1487
1488	/* response */
1489	bnx2x_vf_mbx_resp(bp, vf, rc);
1490}
1491
1492/* convert MBX queue-flags to standard SP queue-flags */
1493static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1494				     unsigned long *sp_q_flags)
1495{
1496	if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
1497		__set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
1498	if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
1499		__set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
1500	if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
1501		__set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
1502	if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
1503		__set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
1504	if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
1505		__set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
1506	if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
1507		__set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
1508	if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
1509		__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1510	if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1511		__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1512	if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
1513		__set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
1514
1515	/* outer vlan removal is set according to PF's multi function mode */
1516	if (IS_MF_SD(bp))
1517		__set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
1518}
1519
1520static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1521				 struct bnx2x_vf_mbx *mbx)
1522{
1523	struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
1524	struct bnx2x_vf_queue_construct_params qctor;
1525	int rc = 0;
1526
1527	/* verify vf_qid */
1528	if (setup_q->vf_qid >= vf_rxq_count(vf)) {
1529		BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
1530			  setup_q->vf_qid, vf_rxq_count(vf));
1531		rc = -EINVAL;
1532		goto response;
1533	}
1534
1535	/* tx queues must be setup alongside rx queues thus if the rx queue
1536	 * is not marked as valid there's nothing to do.
1537	 */
1538	if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
1539		struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
1540		unsigned long q_type = 0;
1541
1542		struct bnx2x_queue_init_params *init_p;
1543		struct bnx2x_queue_setup_params *setup_p;
1544
1545		if (bnx2x_vfq_is_leading(q))
1546			bnx2x_leading_vfq_init(bp, vf, q);
1547
1548		/* re-init the VF operation context */
1549		memset(&qctor, 0 ,
1550		       sizeof(struct bnx2x_vf_queue_construct_params));
1551		setup_p = &qctor.prep_qsetup;
1552		init_p =  &qctor.qstate.params.init;
1553
1554		/* activate immediately */
1555		__set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
1556
1557		if (setup_q->param_valid & VFPF_TXQ_VALID) {
1558			struct bnx2x_txq_setup_params *txq_params =
1559				&setup_p->txq_params;
1560
1561			__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1562
1563			/* save sb resource index */
1564			q->sb_idx = setup_q->txq.vf_sb;
1565
1566			/* tx init */
1567			init_p->tx.hc_rate = setup_q->txq.hc_rate;
1568			init_p->tx.sb_cq_index = setup_q->txq.sb_index;
1569
1570			bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1571						 &init_p->tx.flags);
1572
1573			/* tx setup - flags */
1574			bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1575						 &setup_p->flags);
1576
1577			/* tx setup - general, nothing */
1578
1579			/* tx setup - tx */
1580			txq_params->dscr_map = setup_q->txq.txq_addr;
1581			txq_params->sb_cq_index = setup_q->txq.sb_index;
1582			txq_params->traffic_type = setup_q->txq.traffic_type;
1583
1584			bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
1585						 q->index, q->sb_idx);
1586		}
1587
1588		if (setup_q->param_valid & VFPF_RXQ_VALID) {
1589			struct bnx2x_rxq_setup_params *rxq_params =
1590							&setup_p->rxq_params;
1591
1592			__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1593
1594			/* Note: there is no support for different SBs
1595			 * for TX and RX
1596			 */
1597			q->sb_idx = setup_q->rxq.vf_sb;
1598
1599			/* rx init */
1600			init_p->rx.hc_rate = setup_q->rxq.hc_rate;
1601			init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
1602			bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1603						 &init_p->rx.flags);
1604
1605			/* rx setup - flags */
1606			bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1607						 &setup_p->flags);
1608
1609			/* rx setup - general */
1610			setup_p->gen_params.mtu = setup_q->rxq.mtu;
1611
1612			/* rx setup - rx */
1613			rxq_params->drop_flags = setup_q->rxq.drop_flags;
1614			rxq_params->dscr_map = setup_q->rxq.rxq_addr;
1615			rxq_params->sge_map = setup_q->rxq.sge_addr;
1616			rxq_params->rcq_map = setup_q->rxq.rcq_addr;
1617			rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
1618			rxq_params->buf_sz = setup_q->rxq.buf_sz;
1619			rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
1620			rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
1621			rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
1622			rxq_params->cache_line_log =
1623				setup_q->rxq.cache_line_log;
1624			rxq_params->sb_cq_index = setup_q->rxq.sb_index;
1625
1626			/* rx setup - multicast engine */
1627			if (bnx2x_vfq_is_leading(q)) {
1628				u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
1629
1630				rxq_params->mcast_engine_id = mcast_id;
1631				__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
1632			}
1633
1634			bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
1635						 q->index, q->sb_idx);
1636		}
1637		/* complete the preparations */
1638		bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
1639
1640		rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
1641		if (rc)
1642			goto response;
1643	}
1644response:
1645	bnx2x_vf_mbx_resp(bp, vf, rc);
1646}
1647
1648static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
1649				     struct bnx2x_virtf *vf,
1650				     struct vfpf_set_q_filters_tlv *tlv,
1651				     struct bnx2x_vf_mac_vlan_filters **pfl,
1652				     u32 type_flag)
1653{
1654	int i, j;
1655	struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1656
1657	fl = kzalloc(struct_size(fl, filters, tlv->n_mac_vlan_filters),
1658		     GFP_KERNEL);
1659	if (!fl)
1660		return -ENOMEM;
1661
1662	for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
1663		struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
1664
1665		if ((msg_filter->flags & type_flag) != type_flag)
1666			continue;
1667		memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
1668		if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
1669			fl->filters[j].mac = msg_filter->mac;
1670			fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
1671		}
1672		if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
1673			fl->filters[j].vid = msg_filter->vlan_tag;
1674			fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
1675		}
1676		fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
1677		fl->count++;
1678		j++;
1679	}
1680	if (!fl->count)
1681		kfree(fl);
1682	else
1683		*pfl = fl;
1684
1685	return 0;
1686}
1687
1688static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
1689				    u32 flags)
1690{
1691	int i, cnt = 0;
1692
1693	for (i = 0; i < filters->n_mac_vlan_filters; i++)
1694		if  ((filters->filters[i].flags & flags) == flags)
1695			cnt++;
1696
1697	return cnt;
1698}
1699
1700static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
1701				       struct vfpf_q_mac_vlan_filter *filter)
1702{
1703	DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
1704	if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
1705		DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
1706	if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
1707		DP_CONT(msglvl, ", MAC=%pM", filter->mac);
1708	DP_CONT(msglvl, "\n");
1709}
1710
1711static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
1712				       struct vfpf_set_q_filters_tlv *filters)
1713{
1714	int i;
1715
1716	if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
1717		for (i = 0; i < filters->n_mac_vlan_filters; i++)
1718			bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
1719						 &filters->filters[i]);
1720
1721	if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
1722		DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
1723
1724	if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
1725		for (i = 0; i < filters->n_multicast; i++)
1726			DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
1727}
1728
1729#define VFPF_MAC_FILTER		VFPF_Q_FILTER_DEST_MAC_VALID
1730#define VFPF_VLAN_FILTER	VFPF_Q_FILTER_VLAN_TAG_VALID
1731#define VFPF_VLAN_MAC_FILTER	(VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
1732
1733static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1734{
1735	int rc = 0;
1736
1737	struct vfpf_set_q_filters_tlv *msg =
1738		&BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
1739
1740	/* check for any mac/vlan changes */
1741	if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
1742		struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1743
1744		/* build vlan-mac list */
1745		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1746					       VFPF_VLAN_MAC_FILTER);
1747		if (rc)
1748			goto op_err;
1749
1750		if (fl) {
1751
1752			/* set vlan-mac list */
1753			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1754							   msg->vf_qid,
1755							   false);
1756			if (rc)
1757				goto op_err;
1758		}
1759
1760		/* build mac list */
1761		fl = NULL;
1762
1763		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1764					       VFPF_MAC_FILTER);
1765		if (rc)
1766			goto op_err;
1767
1768		if (fl) {
1769			/* set mac list */
1770			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1771							   msg->vf_qid,
1772							   false);
1773			if (rc)
1774				goto op_err;
1775		}
1776
1777		/* build vlan list */
1778		fl = NULL;
1779
1780		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1781					       VFPF_VLAN_FILTER);
1782		if (rc)
1783			goto op_err;
1784
1785		if (fl) {
1786			/* set vlan list */
1787			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1788							   msg->vf_qid,
1789							   false);
1790			if (rc)
1791				goto op_err;
1792		}
1793
1794	}
1795
1796	if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1797		unsigned long accept = 0;
1798		struct pf_vf_bulletin_content *bulletin =
1799					BP_VF_BULLETIN(bp, vf->index);
1800
1801		/* Ignore VF requested mode; instead set a regular mode */
1802		if (msg->rx_mask !=  VFPF_RX_MASK_ACCEPT_NONE) {
1803			__set_bit(BNX2X_ACCEPT_UNICAST, &accept);
1804			__set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
1805			__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1806		}
1807
1808		/* any_vlan is not configured if HV is forcing VLAN
1809		 * any_vlan is configured if
1810		 *   1. VF does not support vlan filtering
1811		 *   OR
1812		 *   2. VF supports vlan filtering and explicitly requested it
1813		 */
1814		if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
1815		    (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
1816		     msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
1817			__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1818
1819		/* set rx-mode */
1820		rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
1821		if (rc)
1822			goto op_err;
1823	}
1824
1825	if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
1826		/* set mcasts */
1827		rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
1828				    msg->n_multicast, false);
1829		if (rc)
1830			goto op_err;
1831	}
1832op_err:
1833	if (rc)
1834		BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
1835			  vf->abs_vfid, msg->vf_qid, rc);
1836	return rc;
1837}
1838
1839static int bnx2x_filters_validate_mac(struct bnx2x *bp,
1840				      struct bnx2x_virtf *vf,
1841				      struct vfpf_set_q_filters_tlv *filters)
1842{
1843	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1844	int rc = 0;
1845
1846	/* if a mac was already set for this VF via the set vf mac ndo, we only
1847	 * accept mac configurations of that mac. Why accept them at all?
1848	 * because PF may have been unable to configure the mac at the time
1849	 * since queue was not set up.
1850	 */
1851	if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1852		struct vfpf_q_mac_vlan_filter *filter = NULL;
1853		int i;
1854
1855		for (i = 0; i < filters->n_mac_vlan_filters; i++) {
1856			if (!(filters->filters[i].flags &
1857			      VFPF_Q_FILTER_DEST_MAC_VALID))
1858				continue;
1859
1860			/* once a mac was set by ndo can only accept
1861			 * a single mac...
1862			 */
1863			if (filter) {
1864				BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
1865					  vf->abs_vfid,
1866					  filters->n_mac_vlan_filters);
1867				rc = -EPERM;
1868				goto response;
1869			}
1870
1871			filter = &filters->filters[i];
1872		}
1873
1874		/* ...and only the mac set by the ndo */
1875		if (filter &&
1876		    !ether_addr_equal(filter->mac, bulletin->mac)) {
1877			BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
1878				  vf->abs_vfid);
1879
1880			rc = -EPERM;
1881			goto response;
1882		}
1883	}
1884
1885response:
1886	return rc;
1887}
1888
1889static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
1890				       struct bnx2x_virtf *vf,
1891				       struct vfpf_set_q_filters_tlv *filters)
1892{
1893	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1894	int rc = 0;
1895
1896	/* if vlan was set by hypervisor we don't allow guest to config vlan */
1897	if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1898		/* search for vlan filters */
1899
1900		if (bnx2x_vf_filters_contain(filters,
1901					     VFPF_Q_FILTER_VLAN_TAG_VALID)) {
1902			BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1903				  vf->abs_vfid);
1904			rc = -EPERM;
1905			goto response;
1906		}
1907	}
1908
1909	/* verify vf_qid */
1910	if (filters->vf_qid > vf_rxq_count(vf)) {
1911		rc = -EPERM;
1912		goto response;
1913	}
1914
1915response:
1916	return rc;
1917}
1918
1919static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1920				       struct bnx2x_virtf *vf,
1921				       struct bnx2x_vf_mbx *mbx)
1922{
1923	struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
1924	int rc;
1925
1926	rc = bnx2x_filters_validate_mac(bp, vf, filters);
1927	if (rc)
1928		goto response;
1929
1930	rc = bnx2x_filters_validate_vlan(bp, vf, filters);
1931	if (rc)
1932		goto response;
1933
1934	DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
1935	   vf->abs_vfid,
1936	   filters->vf_qid);
1937
1938	/* print q_filter message */
1939	bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
1940
1941	rc = bnx2x_vf_mbx_qfilters(bp, vf);
1942response:
1943	bnx2x_vf_mbx_resp(bp, vf, rc);
1944}
1945
1946static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1947				    struct bnx2x_vf_mbx *mbx)
1948{
1949	int qid = mbx->msg->req.q_op.vf_qid;
1950	int rc;
1951
1952	DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
1953	   vf->abs_vfid, qid);
1954
1955	rc = bnx2x_vf_queue_teardown(bp, vf, qid);
1956	bnx2x_vf_mbx_resp(bp, vf, rc);
1957}
1958
1959static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1960				  struct bnx2x_vf_mbx *mbx)
1961{
1962	int rc;
1963
1964	DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
1965
1966	rc = bnx2x_vf_close(bp, vf);
1967	bnx2x_vf_mbx_resp(bp, vf, rc);
1968}
1969
1970static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1971				    struct bnx2x_vf_mbx *mbx)
1972{
1973	int rc;
1974
1975	DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
1976
1977	rc = bnx2x_vf_free(bp, vf);
1978	bnx2x_vf_mbx_resp(bp, vf, rc);
1979}
1980
1981static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1982				    struct bnx2x_vf_mbx *mbx)
1983{
1984	struct bnx2x_config_rss_params rss;
1985	struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
1986	int rc = 0;
1987
1988	if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
1989	    rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
1990		BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
1991			  vf->index);
1992		rc = -EINVAL;
1993		goto mbx_resp;
1994	}
1995
1996	memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
1997
1998	/* set vfop params according to rss tlv */
1999	memcpy(rss.ind_table, rss_tlv->ind_table,
2000	       T_ETH_INDIRECTION_TABLE_SIZE);
2001	memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
2002	rss.rss_obj = &vf->rss_conf_obj;
2003	rss.rss_result_mask = rss_tlv->rss_result_mask;
2004
2005	/* flags handled individually for backward/forward compatibility */
2006	rss.rss_flags = 0;
2007	rss.ramrod_flags = 0;
2008
2009	if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
2010		__set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
2011	if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
2012		__set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
2013	if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
2014		__set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
2015	if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
2016		__set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
2017	if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
2018		__set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
2019	if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
2020		__set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
2021	if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
2022		__set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
2023	if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
2024		__set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
2025	if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
2026		__set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
2027
2028	if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
2029	     rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
2030	    (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
2031	     rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
2032		BNX2X_ERR("about to hit a FW assert. aborting...\n");
2033		rc = -EINVAL;
2034		goto mbx_resp;
2035	}
2036
2037	rc = bnx2x_vf_rss_update(bp, vf, &rss);
2038mbx_resp:
2039	bnx2x_vf_mbx_resp(bp, vf, rc);
2040}
2041
2042static int bnx2x_validate_tpa_params(struct bnx2x *bp,
2043				       struct vfpf_tpa_tlv *tpa_tlv)
2044{
2045	int rc = 0;
2046
2047	if (tpa_tlv->tpa_client_info.max_sges_for_packet >
2048	    U_ETH_MAX_SGES_FOR_PACKET) {
2049		rc = -EINVAL;
2050		BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
2051			  tpa_tlv->tpa_client_info.max_sges_for_packet,
2052			  U_ETH_MAX_SGES_FOR_PACKET);
2053	}
2054
2055	if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
2056		rc = -EINVAL;
2057		BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
2058			  tpa_tlv->tpa_client_info.max_tpa_queues,
2059			  MAX_AGG_QS(bp));
2060	}
2061
2062	return rc;
2063}
2064
2065static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
2066				    struct bnx2x_vf_mbx *mbx)
2067{
2068	struct bnx2x_queue_update_tpa_params vf_op_params;
2069	struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
2070	int rc = 0;
2071
2072	memset(&vf_op_params, 0, sizeof(vf_op_params));
2073
2074	if (bnx2x_validate_tpa_params(bp, tpa_tlv))
2075		goto mbx_resp;
2076
2077	vf_op_params.complete_on_both_clients =
2078		tpa_tlv->tpa_client_info.complete_on_both_clients;
2079	vf_op_params.dont_verify_thr =
2080		tpa_tlv->tpa_client_info.dont_verify_thr;
2081	vf_op_params.max_agg_sz =
2082		tpa_tlv->tpa_client_info.max_agg_size;
2083	vf_op_params.max_sges_pkt =
2084		tpa_tlv->tpa_client_info.max_sges_for_packet;
2085	vf_op_params.max_tpa_queues =
2086		tpa_tlv->tpa_client_info.max_tpa_queues;
2087	vf_op_params.sge_buff_sz =
2088		tpa_tlv->tpa_client_info.sge_buff_size;
2089	vf_op_params.sge_pause_thr_high =
2090		tpa_tlv->tpa_client_info.sge_pause_thr_high;
2091	vf_op_params.sge_pause_thr_low =
2092		tpa_tlv->tpa_client_info.sge_pause_thr_low;
2093	vf_op_params.tpa_mode =
2094		tpa_tlv->tpa_client_info.tpa_mode;
2095	vf_op_params.update_ipv4 =
2096		tpa_tlv->tpa_client_info.update_ipv4;
2097	vf_op_params.update_ipv6 =
2098		tpa_tlv->tpa_client_info.update_ipv6;
2099
2100	rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
2101
2102mbx_resp:
2103	bnx2x_vf_mbx_resp(bp, vf, rc);
2104}
2105
2106/* dispatch request */
2107static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
2108				  struct bnx2x_vf_mbx *mbx)
2109{
2110	int i;
2111
2112	if (vf->state == VF_LOST) {
2113		/* Just ack the FW and return if VFs are lost
2114		 * in case of parity error. VFs are supposed to be timedout
2115		 * on waiting for PF response.
2116		 */
2117		DP(BNX2X_MSG_IOV,
2118		   "VF 0x%x lost, not handling the request\n", vf->abs_vfid);
2119
2120		storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
2121		return;
2122	}
2123
2124	/* check if tlv type is known */
2125	if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
2126		/* Lock the per vf op mutex and note the locker's identity.
2127		 * The unlock will take place in mbx response.
2128		 */
2129		bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
2130
2131		/* switch on the opcode */
2132		switch (mbx->first_tlv.tl.type) {
2133		case CHANNEL_TLV_ACQUIRE:
2134			bnx2x_vf_mbx_acquire(bp, vf, mbx);
2135			return;
2136		case CHANNEL_TLV_INIT:
2137			bnx2x_vf_mbx_init_vf(bp, vf, mbx);
2138			return;
2139		case CHANNEL_TLV_SETUP_Q:
2140			bnx2x_vf_mbx_setup_q(bp, vf, mbx);
2141			return;
2142		case CHANNEL_TLV_SET_Q_FILTERS:
2143			bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
2144			return;
2145		case CHANNEL_TLV_TEARDOWN_Q:
2146			bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
2147			return;
2148		case CHANNEL_TLV_CLOSE:
2149			bnx2x_vf_mbx_close_vf(bp, vf, mbx);
2150			return;
2151		case CHANNEL_TLV_RELEASE:
2152			bnx2x_vf_mbx_release_vf(bp, vf, mbx);
2153			return;
2154		case CHANNEL_TLV_UPDATE_RSS:
2155			bnx2x_vf_mbx_update_rss(bp, vf, mbx);
2156			return;
2157		case CHANNEL_TLV_UPDATE_TPA:
2158			bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
2159			return;
2160		}
2161
2162	} else {
2163		/* unknown TLV - this may belong to a VF driver from the future
2164		 * - a version written after this PF driver was written, which
2165		 * supports features unknown as of yet. Too bad since we don't
2166		 * support them. Or this may be because someone wrote a crappy
2167		 * VF driver and is sending garbage over the channel.
2168		 */
2169		BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
2170			  mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
2171			  vf->state);
2172		for (i = 0; i < 20; i++)
2173			DP_CONT(BNX2X_MSG_IOV, "%x ",
2174				mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
2175	}
2176
2177	/* can we respond to VF (do we have an address for it?) */
2178	if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
2179		/* notify the VF that we do not support this request */
2180		bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
2181	} else {
2182		/* can't send a response since this VF is unknown to us
2183		 * just ack the FW to release the mailbox and unlock
2184		 * the channel.
2185		 */
2186		storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
2187		/* Firmware ack should be written before unlocking channel */
2188		bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
2189	}
2190}
2191
2192void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
2193			   struct vf_pf_event_data *vfpf_event)
2194{
2195	u8 vf_idx;
2196
2197	DP(BNX2X_MSG_IOV,
2198	   "vf pf event received: vfid %d, address_hi %x, address lo %x",
2199	   vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
2200	/* Sanity checks consider removing later */
2201
2202	/* check if the vf_id is valid */
2203	if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
2204	    BNX2X_NR_VIRTFN(bp)) {
2205		BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
2206			  vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
2207		return;
2208	}
2209
2210	vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
2211
2212	/* Update VFDB with current message and schedule its handling */
2213	mutex_lock(&BP_VFDB(bp)->event_mutex);
2214	BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
2215		le32_to_cpu(vfpf_event->msg_addr_hi);
2216	BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
2217		le32_to_cpu(vfpf_event->msg_addr_lo);
2218	BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
2219	mutex_unlock(&BP_VFDB(bp)->event_mutex);
2220
2221	bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
2222}
2223
2224/* handle new vf-pf messages */
2225void bnx2x_vf_mbx(struct bnx2x *bp)
2226{
2227	struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
2228	u64 events;
2229	u8 vf_idx;
2230	int rc;
2231
2232	if (!vfdb)
2233		return;
2234
2235	mutex_lock(&vfdb->event_mutex);
2236	events = vfdb->event_occur;
2237	vfdb->event_occur = 0;
2238	mutex_unlock(&vfdb->event_mutex);
2239
2240	for_each_vf(bp, vf_idx) {
2241		struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
2242		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2243
2244		/* Handle VFs which have pending events */
2245		if (!(events & (1ULL << vf_idx)))
2246			continue;
2247
2248		DP(BNX2X_MSG_IOV,
2249		   "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
2250		   vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
2251		   mbx->first_tlv.resp_msg_offset);
2252
2253		/* dmae to get the VF request */
2254		rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
2255					  vf->abs_vfid, mbx->vf_addr_hi,
2256					  mbx->vf_addr_lo,
2257					  sizeof(union vfpf_tlvs)/4);
2258		if (rc) {
2259			BNX2X_ERR("Failed to copy request VF %d\n",
2260				  vf->abs_vfid);
2261			bnx2x_vf_release(bp, vf);
2262			return;
2263		}
2264
2265		/* process the VF message header */
2266		mbx->first_tlv = mbx->msg->req.first_tlv;
2267
2268		/* Clean response buffer to refrain from falsely
2269		 * seeing chains.
2270		 */
2271		memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
2272
2273		/* dispatch the request (will prepare the response) */
2274		bnx2x_vf_mbx_request(bp, vf, mbx);
2275	}
2276}
2277
2278void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
2279				bool support_long)
2280{
2281	/* Older VFs contain a bug where they can't check CRC for bulletin
2282	 * boards of length greater than legacy size.
2283	 */
2284	bulletin->length = support_long ? BULLETIN_CONTENT_SIZE :
2285					  BULLETIN_CONTENT_LEGACY_SIZE;
2286	bulletin->crc = bnx2x_crc_vf_bulletin(bulletin);
2287}
2288
2289/* propagate local bulletin board to vf */
2290int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
2291{
2292	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
2293	dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
2294		vf * BULLETIN_CONTENT_SIZE;
2295	dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
2296	int rc;
2297
2298	/* can only update vf after init took place */
2299	if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
2300	    bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
2301		return 0;
2302
2303	/* increment bulletin board version and compute crc */
2304	bulletin->version++;
2305	bnx2x_vf_bulletin_finalize(bulletin,
2306				   (bnx2x_vf(bp, vf, cfg_flags) &
2307				    VF_CFG_EXT_BULLETIN) ? true : false);
2308
2309	/* propagate bulletin board via dmae to vm memory */
2310	rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
2311				  bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
2312				  U64_LO(vf_addr), bulletin->length / 4);
2313	return rc;
2314}
2315