xref: /kernel/linux/linux-6.6/include/net/mana/mana.h (revision 62306a36)
1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#ifndef _MANA_H
5#define _MANA_H
6
7#include <net/xdp.h>
8
9#include "gdma.h"
10#include "hw_channel.h"
11
12/* Microsoft Azure Network Adapter (MANA)'s definitions
13 *
14 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
15 * them are naturally aligned and hence don't need __packed.
16 */
17
18/* MANA protocol version */
19#define MANA_MAJOR_VERSION	0
20#define MANA_MINOR_VERSION	1
21#define MANA_MICRO_VERSION	1
22
23typedef u64 mana_handle_t;
24#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
25
26enum TRI_STATE {
27	TRI_STATE_UNKNOWN = -1,
28	TRI_STATE_FALSE = 0,
29	TRI_STATE_TRUE = 1
30};
31
32/* Number of entries for hardware indirection table must be in power of 2 */
33#define MANA_INDIRECT_TABLE_SIZE 64
34#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
35
36/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
37#define MANA_HASH_KEY_SIZE 40
38
39#define COMP_ENTRY_SIZE 64
40
41#define RX_BUFFERS_PER_QUEUE 512
42#define MANA_RX_DATA_ALIGN 64
43
44#define MAX_SEND_BUFFERS_PER_QUEUE 256
45
46#define EQ_SIZE (8 * PAGE_SIZE)
47#define LOG2_EQ_THROTTLE 3
48
49#define MAX_PORTS_IN_MANA_DEV 256
50
51/* Update this count whenever the respective structures are changed */
52#define MANA_STATS_RX_COUNT 5
53#define MANA_STATS_TX_COUNT 11
54
55struct mana_stats_rx {
56	u64 packets;
57	u64 bytes;
58	u64 xdp_drop;
59	u64 xdp_tx;
60	u64 xdp_redirect;
61	struct u64_stats_sync syncp;
62};
63
64struct mana_stats_tx {
65	u64 packets;
66	u64 bytes;
67	u64 xdp_xmit;
68	u64 tso_packets;
69	u64 tso_bytes;
70	u64 tso_inner_packets;
71	u64 tso_inner_bytes;
72	u64 short_pkt_fmt;
73	u64 long_pkt_fmt;
74	u64 csum_partial;
75	u64 mana_map_err;
76	struct u64_stats_sync syncp;
77};
78
79struct mana_txq {
80	struct gdma_queue *gdma_sq;
81
82	union {
83		u32 gdma_txq_id;
84		struct {
85			u32 reserved1	: 10;
86			u32 vsq_frame	: 14;
87			u32 reserved2	: 8;
88		};
89	};
90
91	u16 vp_offset;
92
93	struct net_device *ndev;
94
95	/* The SKBs are sent to the HW and we are waiting for the CQEs. */
96	struct sk_buff_head pending_skbs;
97	struct netdev_queue *net_txq;
98
99	atomic_t pending_sends;
100
101	struct mana_stats_tx stats;
102};
103
104/* skb data and frags dma mappings */
105struct mana_skb_head {
106	/* GSO pkts may have 2 SGEs for the linear part*/
107	dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
108
109	u32 size[MAX_SKB_FRAGS + 2];
110};
111
112#define MANA_HEADROOM sizeof(struct mana_skb_head)
113
114enum mana_tx_pkt_format {
115	MANA_SHORT_PKT_FMT	= 0,
116	MANA_LONG_PKT_FMT	= 1,
117};
118
119struct mana_tx_short_oob {
120	u32 pkt_fmt		: 2;
121	u32 is_outer_ipv4	: 1;
122	u32 is_outer_ipv6	: 1;
123	u32 comp_iphdr_csum	: 1;
124	u32 comp_tcp_csum	: 1;
125	u32 comp_udp_csum	: 1;
126	u32 supress_txcqe_gen	: 1;
127	u32 vcq_num		: 24;
128
129	u32 trans_off		: 10; /* Transport header offset */
130	u32 vsq_frame		: 14;
131	u32 short_vp_offset	: 8;
132}; /* HW DATA */
133
134struct mana_tx_long_oob {
135	u32 is_encap		: 1;
136	u32 inner_is_ipv6	: 1;
137	u32 inner_tcp_opt	: 1;
138	u32 inject_vlan_pri_tag : 1;
139	u32 reserved1		: 12;
140	u32 pcp			: 3;  /* 802.1Q */
141	u32 dei			: 1;  /* 802.1Q */
142	u32 vlan_id		: 12; /* 802.1Q */
143
144	u32 inner_frame_offset	: 10;
145	u32 inner_ip_rel_offset : 6;
146	u32 long_vp_offset	: 12;
147	u32 reserved2		: 4;
148
149	u32 reserved3;
150	u32 reserved4;
151}; /* HW DATA */
152
153struct mana_tx_oob {
154	struct mana_tx_short_oob s_oob;
155	struct mana_tx_long_oob l_oob;
156}; /* HW DATA */
157
158enum mana_cq_type {
159	MANA_CQ_TYPE_RX,
160	MANA_CQ_TYPE_TX,
161};
162
163enum mana_cqe_type {
164	CQE_INVALID			= 0,
165	CQE_RX_OKAY			= 1,
166	CQE_RX_COALESCED_4		= 2,
167	CQE_RX_OBJECT_FENCE		= 3,
168	CQE_RX_TRUNCATED		= 4,
169
170	CQE_TX_OKAY			= 32,
171	CQE_TX_SA_DROP			= 33,
172	CQE_TX_MTU_DROP			= 34,
173	CQE_TX_INVALID_OOB		= 35,
174	CQE_TX_INVALID_ETH_TYPE		= 36,
175	CQE_TX_HDR_PROCESSING_ERROR	= 37,
176	CQE_TX_VF_DISABLED		= 38,
177	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
178	CQE_TX_VPORT_DISABLED		= 40,
179	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
180};
181
182#define MANA_CQE_COMPLETION 1
183
184struct mana_cqe_header {
185	u32 cqe_type	: 6;
186	u32 client_type	: 2;
187	u32 vendor_err	: 24;
188}; /* HW DATA */
189
190/* NDIS HASH Types */
191#define NDIS_HASH_IPV4		BIT(0)
192#define NDIS_HASH_TCP_IPV4	BIT(1)
193#define NDIS_HASH_UDP_IPV4	BIT(2)
194#define NDIS_HASH_IPV6		BIT(3)
195#define NDIS_HASH_TCP_IPV6	BIT(4)
196#define NDIS_HASH_UDP_IPV6	BIT(5)
197#define NDIS_HASH_IPV6_EX	BIT(6)
198#define NDIS_HASH_TCP_IPV6_EX	BIT(7)
199#define NDIS_HASH_UDP_IPV6_EX	BIT(8)
200
201#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
202#define MANA_HASH_L4                                                         \
203	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
204	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
205
206struct mana_rxcomp_perpkt_info {
207	u32 pkt_len	: 16;
208	u32 reserved1	: 16;
209	u32 reserved2;
210	u32 pkt_hash;
211}; /* HW DATA */
212
213#define MANA_RXCOMP_OOB_NUM_PPI 4
214
215/* Receive completion OOB */
216struct mana_rxcomp_oob {
217	struct mana_cqe_header cqe_hdr;
218
219	u32 rx_vlan_id			: 12;
220	u32 rx_vlantag_present		: 1;
221	u32 rx_outer_iphdr_csum_succeed	: 1;
222	u32 rx_outer_iphdr_csum_fail	: 1;
223	u32 reserved1			: 1;
224	u32 rx_hashtype			: 9;
225	u32 rx_iphdr_csum_succeed	: 1;
226	u32 rx_iphdr_csum_fail		: 1;
227	u32 rx_tcp_csum_succeed		: 1;
228	u32 rx_tcp_csum_fail		: 1;
229	u32 rx_udp_csum_succeed		: 1;
230	u32 rx_udp_csum_fail		: 1;
231	u32 reserved2			: 1;
232
233	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
234
235	u32 rx_wqe_offset;
236}; /* HW DATA */
237
238struct mana_tx_comp_oob {
239	struct mana_cqe_header cqe_hdr;
240
241	u32 tx_data_offset;
242
243	u32 tx_sgl_offset	: 5;
244	u32 tx_wqe_offset	: 27;
245
246	u32 reserved[12];
247}; /* HW DATA */
248
249struct mana_rxq;
250
251#define CQE_POLLING_BUFFER 512
252
253struct mana_cq {
254	struct gdma_queue *gdma_cq;
255
256	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
257	u32 gdma_id;
258
259	/* Type of the CQ: TX or RX */
260	enum mana_cq_type type;
261
262	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
263	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
264	 */
265	struct mana_rxq *rxq;
266
267	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
268	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
269	 */
270	struct mana_txq *txq;
271
272	/* Buffer which the CQ handler can copy the CQE's into. */
273	struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
274
275	/* NAPI data */
276	struct napi_struct napi;
277	int work_done;
278	int budget;
279};
280
281struct mana_recv_buf_oob {
282	/* A valid GDMA work request representing the data buffer. */
283	struct gdma_wqe_request wqe_req;
284
285	void *buf_va;
286	bool from_pool; /* allocated from a page pool */
287
288	/* SGL of the buffer going to be sent has part of the work request. */
289	u32 num_sge;
290	struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
291
292	/* Required to store the result of mana_gd_post_work_request.
293	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
294	 * work queue when the WQE is consumed.
295	 */
296	struct gdma_posted_wqe_info wqe_inf;
297};
298
299#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
300			+ ETH_HLEN)
301
302#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
303
304struct mana_rxq {
305	struct gdma_queue *gdma_rq;
306	/* Cache the gdma receive queue id */
307	u32 gdma_id;
308
309	/* Index of RQ in the vPort, not gdma receive queue id */
310	u32 rxq_idx;
311
312	u32 datasize;
313	u32 alloc_size;
314	u32 headroom;
315
316	mana_handle_t rxobj;
317
318	struct mana_cq rx_cq;
319
320	struct completion fence_event;
321
322	struct net_device *ndev;
323
324	/* Total number of receive buffers to be allocated */
325	u32 num_rx_buf;
326
327	u32 buf_index;
328
329	struct mana_stats_rx stats;
330
331	struct bpf_prog __rcu *bpf_prog;
332	struct xdp_rxq_info xdp_rxq;
333	void *xdp_save_va; /* for reusing */
334	bool xdp_flush;
335	int xdp_rc; /* XDP redirect return code */
336
337	struct page_pool *page_pool;
338
339	/* MUST BE THE LAST MEMBER:
340	 * Each receive buffer has an associated mana_recv_buf_oob.
341	 */
342	struct mana_recv_buf_oob rx_oobs[];
343};
344
345struct mana_tx_qp {
346	struct mana_txq txq;
347
348	struct mana_cq tx_cq;
349
350	mana_handle_t tx_object;
351};
352
353struct mana_ethtool_stats {
354	u64 stop_queue;
355	u64 wake_queue;
356	u64 hc_tx_bytes;
357	u64 hc_tx_ucast_pkts;
358	u64 hc_tx_ucast_bytes;
359	u64 hc_tx_bcast_pkts;
360	u64 hc_tx_bcast_bytes;
361	u64 hc_tx_mcast_pkts;
362	u64 hc_tx_mcast_bytes;
363	u64 tx_cqe_err;
364	u64 tx_cqe_unknown_type;
365	u64 rx_coalesced_err;
366	u64 rx_cqe_unknown_type;
367};
368
369struct mana_context {
370	struct gdma_dev *gdma_dev;
371
372	u16 num_ports;
373
374	struct mana_eq *eqs;
375
376	struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
377};
378
379struct mana_port_context {
380	struct mana_context *ac;
381	struct net_device *ndev;
382
383	u8 mac_addr[ETH_ALEN];
384
385	enum TRI_STATE rss_state;
386
387	mana_handle_t default_rxobj;
388	bool tx_shortform_allowed;
389	u16 tx_vp_offset;
390
391	struct mana_tx_qp *tx_qp;
392
393	/* Indirection Table for RX & TX. The values are queue indexes */
394	u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
395
396	/* Indirection table containing RxObject Handles */
397	mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
398
399	/*  Hash key used by the NIC */
400	u8 hashkey[MANA_HASH_KEY_SIZE];
401
402	/* This points to an array of num_queues of RQ pointers. */
403	struct mana_rxq **rxqs;
404
405	/* pre-allocated rx buffer array */
406	void **rxbufs_pre;
407	dma_addr_t *das_pre;
408	int rxbpre_total;
409	u32 rxbpre_datasize;
410	u32 rxbpre_alloc_size;
411	u32 rxbpre_headroom;
412
413	struct bpf_prog *bpf_prog;
414
415	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
416	unsigned int max_queues;
417	unsigned int num_queues;
418
419	mana_handle_t port_handle;
420	mana_handle_t pf_filter_handle;
421
422	/* Mutex for sharing access to vport_use_count */
423	struct mutex vport_mutex;
424	int vport_use_count;
425
426	u16 port_idx;
427
428	bool port_is_up;
429	bool port_st_save; /* Saved port state */
430
431	struct mana_ethtool_stats eth_stats;
432};
433
434netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
435int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
436		    bool update_hash, bool update_tab);
437
438int mana_alloc_queues(struct net_device *ndev);
439int mana_attach(struct net_device *ndev);
440int mana_detach(struct net_device *ndev, bool from_close);
441
442int mana_probe(struct gdma_dev *gd, bool resuming);
443void mana_remove(struct gdma_dev *gd, bool suspending);
444
445void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
446int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
447		  u32 flags);
448u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
449		 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
450struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
451void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
452int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
453void mana_query_gf_stats(struct mana_port_context *apc);
454
455extern const struct ethtool_ops mana_ethtool_ops;
456
457/* A CQ can be created not associated with any EQ */
458#define GDMA_CQ_NO_EQ  0xffff
459
460struct mana_obj_spec {
461	u32 queue_index;
462	u64 gdma_region;
463	u32 queue_size;
464	u32 attached_eq;
465	u32 modr_ctx_id;
466};
467
468enum mana_command_code {
469	MANA_QUERY_DEV_CONFIG	= 0x20001,
470	MANA_QUERY_GF_STAT	= 0x20002,
471	MANA_CONFIG_VPORT_TX	= 0x20003,
472	MANA_CREATE_WQ_OBJ	= 0x20004,
473	MANA_DESTROY_WQ_OBJ	= 0x20005,
474	MANA_FENCE_RQ		= 0x20006,
475	MANA_CONFIG_VPORT_RX	= 0x20007,
476	MANA_QUERY_VPORT_CONFIG	= 0x20008,
477
478	/* Privileged commands for the PF mode */
479	MANA_REGISTER_FILTER	= 0x28000,
480	MANA_DEREGISTER_FILTER	= 0x28001,
481	MANA_REGISTER_HW_PORT	= 0x28003,
482	MANA_DEREGISTER_HW_PORT	= 0x28004,
483};
484
485/* Query Device Configuration */
486struct mana_query_device_cfg_req {
487	struct gdma_req_hdr hdr;
488
489	/* MANA Nic Driver Capability flags */
490	u64 mn_drv_cap_flags1;
491	u64 mn_drv_cap_flags2;
492	u64 mn_drv_cap_flags3;
493	u64 mn_drv_cap_flags4;
494
495	u32 proto_major_ver;
496	u32 proto_minor_ver;
497	u32 proto_micro_ver;
498
499	u32 reserved;
500}; /* HW DATA */
501
502struct mana_query_device_cfg_resp {
503	struct gdma_resp_hdr hdr;
504
505	u64 pf_cap_flags1;
506	u64 pf_cap_flags2;
507	u64 pf_cap_flags3;
508	u64 pf_cap_flags4;
509
510	u16 max_num_vports;
511	u16 reserved;
512	u32 max_num_eqs;
513
514	/* response v2: */
515	u16 adapter_mtu;
516	u16 reserved2;
517	u32 reserved3;
518}; /* HW DATA */
519
520/* Query vPort Configuration */
521struct mana_query_vport_cfg_req {
522	struct gdma_req_hdr hdr;
523	u32 vport_index;
524}; /* HW DATA */
525
526struct mana_query_vport_cfg_resp {
527	struct gdma_resp_hdr hdr;
528	u32 max_num_sq;
529	u32 max_num_rq;
530	u32 num_indirection_ent;
531	u32 reserved1;
532	u8 mac_addr[6];
533	u8 reserved2[2];
534	mana_handle_t vport;
535}; /* HW DATA */
536
537/* Configure vPort */
538struct mana_config_vport_req {
539	struct gdma_req_hdr hdr;
540	mana_handle_t vport;
541	u32 pdid;
542	u32 doorbell_pageid;
543}; /* HW DATA */
544
545struct mana_config_vport_resp {
546	struct gdma_resp_hdr hdr;
547	u16 tx_vport_offset;
548	u8 short_form_allowed;
549	u8 reserved;
550}; /* HW DATA */
551
552/* Create WQ Object */
553struct mana_create_wqobj_req {
554	struct gdma_req_hdr hdr;
555	mana_handle_t vport;
556	u32 wq_type;
557	u32 reserved;
558	u64 wq_gdma_region;
559	u64 cq_gdma_region;
560	u32 wq_size;
561	u32 cq_size;
562	u32 cq_moderation_ctx_id;
563	u32 cq_parent_qid;
564}; /* HW DATA */
565
566struct mana_create_wqobj_resp {
567	struct gdma_resp_hdr hdr;
568	u32 wq_id;
569	u32 cq_id;
570	mana_handle_t wq_obj;
571}; /* HW DATA */
572
573/* Destroy WQ Object */
574struct mana_destroy_wqobj_req {
575	struct gdma_req_hdr hdr;
576	u32 wq_type;
577	u32 reserved;
578	mana_handle_t wq_obj_handle;
579}; /* HW DATA */
580
581struct mana_destroy_wqobj_resp {
582	struct gdma_resp_hdr hdr;
583}; /* HW DATA */
584
585/* Fence RQ */
586struct mana_fence_rq_req {
587	struct gdma_req_hdr hdr;
588	mana_handle_t wq_obj_handle;
589}; /* HW DATA */
590
591struct mana_fence_rq_resp {
592	struct gdma_resp_hdr hdr;
593}; /* HW DATA */
594
595/* Query stats RQ */
596struct mana_query_gf_stat_req {
597	struct gdma_req_hdr hdr;
598	u64 req_stats;
599}; /* HW DATA */
600
601struct mana_query_gf_stat_resp {
602	struct gdma_resp_hdr hdr;
603	u64 reported_stats;
604	/* rx errors/discards */
605	u64 discard_rx_nowqe;
606	u64 err_rx_vport_disabled;
607	/* rx bytes/packets */
608	u64 hc_rx_bytes;
609	u64 hc_rx_ucast_pkts;
610	u64 hc_rx_ucast_bytes;
611	u64 hc_rx_bcast_pkts;
612	u64 hc_rx_bcast_bytes;
613	u64 hc_rx_mcast_pkts;
614	u64 hc_rx_mcast_bytes;
615	/* tx errors */
616	u64 err_tx_gf_disabled;
617	u64 err_tx_vport_disabled;
618	u64 err_tx_inval_vport_offset_pkt;
619	u64 err_tx_vlan_enforcement;
620	u64 err_tx_ethtype_enforcement;
621	u64 err_tx_SA_enforecement;
622	u64 err_tx_SQPDID_enforcement;
623	u64 err_tx_CQPDID_enforcement;
624	u64 err_tx_mtu_violation;
625	u64 err_tx_inval_oob;
626	/* tx bytes/packets */
627	u64 hc_tx_bytes;
628	u64 hc_tx_ucast_pkts;
629	u64 hc_tx_ucast_bytes;
630	u64 hc_tx_bcast_pkts;
631	u64 hc_tx_bcast_bytes;
632	u64 hc_tx_mcast_pkts;
633	u64 hc_tx_mcast_bytes;
634	/* tx error */
635	u64 err_tx_gdma;
636}; /* HW DATA */
637
638/* Configure vPort Rx Steering */
639struct mana_cfg_rx_steer_req_v2 {
640	struct gdma_req_hdr hdr;
641	mana_handle_t vport;
642	u16 num_indir_entries;
643	u16 indir_tab_offset;
644	u32 rx_enable;
645	u32 rss_enable;
646	u8 update_default_rxobj;
647	u8 update_hashkey;
648	u8 update_indir_tab;
649	u8 reserved;
650	mana_handle_t default_rxobj;
651	u8 hashkey[MANA_HASH_KEY_SIZE];
652	u8 cqe_coalescing_enable;
653	u8 reserved2[7];
654}; /* HW DATA */
655
656struct mana_cfg_rx_steer_resp {
657	struct gdma_resp_hdr hdr;
658}; /* HW DATA */
659
660/* Register HW vPort */
661struct mana_register_hw_vport_req {
662	struct gdma_req_hdr hdr;
663	u16 attached_gfid;
664	u8 is_pf_default_vport;
665	u8 reserved1;
666	u8 allow_all_ether_types;
667	u8 reserved2;
668	u8 reserved3;
669	u8 reserved4;
670}; /* HW DATA */
671
672struct mana_register_hw_vport_resp {
673	struct gdma_resp_hdr hdr;
674	mana_handle_t hw_vport_handle;
675}; /* HW DATA */
676
677/* Deregister HW vPort */
678struct mana_deregister_hw_vport_req {
679	struct gdma_req_hdr hdr;
680	mana_handle_t hw_vport_handle;
681}; /* HW DATA */
682
683struct mana_deregister_hw_vport_resp {
684	struct gdma_resp_hdr hdr;
685}; /* HW DATA */
686
687/* Register filter */
688struct mana_register_filter_req {
689	struct gdma_req_hdr hdr;
690	mana_handle_t vport;
691	u8 mac_addr[6];
692	u8 reserved1;
693	u8 reserved2;
694	u8 reserved3;
695	u8 reserved4;
696	u16 reserved5;
697	u32 reserved6;
698	u32 reserved7;
699	u32 reserved8;
700}; /* HW DATA */
701
702struct mana_register_filter_resp {
703	struct gdma_resp_hdr hdr;
704	mana_handle_t filter_handle;
705}; /* HW DATA */
706
707/* Deregister filter */
708struct mana_deregister_filter_req {
709	struct gdma_req_hdr hdr;
710	mana_handle_t filter_handle;
711}; /* HW DATA */
712
713struct mana_deregister_filter_resp {
714	struct gdma_resp_hdr hdr;
715}; /* HW DATA */
716
717/* Requested GF stats Flags */
718/* Rx discards/Errors */
719#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE		0x0000000000000001
720#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED	0x0000000000000002
721/* Rx bytes/pkts */
722#define STATISTICS_FLAGS_HC_RX_BYTES			0x0000000000000004
723#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS		0x0000000000000008
724#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES		0x0000000000000010
725#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS		0x0000000000000020
726#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES		0x0000000000000040
727#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS		0x0000000000000080
728#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES		0x0000000000000100
729/* Tx errors */
730#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED		0x0000000000000200
731#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED	0x0000000000000400
732#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS		\
733							0x0000000000000800
734#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT	0x0000000000001000
735#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT			\
736							0x0000000000002000
737#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT	0x0000000000004000
738#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT	0x0000000000008000
739#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT	0x0000000000010000
740#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION	0x0000000000020000
741#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB		0x0000000000040000
742/* Tx bytes/pkts */
743#define STATISTICS_FLAGS_HC_TX_BYTES			0x0000000000080000
744#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS		0x0000000000100000
745#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES		0x0000000000200000
746#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS		0x0000000000400000
747#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES		0x0000000000800000
748#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS		0x0000000001000000
749#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES		0x0000000002000000
750/* Tx error */
751#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR		0x0000000004000000
752
753#define MANA_MAX_NUM_QUEUES 64
754
755#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
756
757struct mana_tx_package {
758	struct gdma_wqe_request wqe_req;
759	struct gdma_sge sgl_array[5];
760	struct gdma_sge *sgl_ptr;
761
762	struct mana_tx_oob tx_oob;
763
764	struct gdma_posted_wqe_info wqe_info;
765};
766
767int mana_create_wq_obj(struct mana_port_context *apc,
768		       mana_handle_t vport,
769		       u32 wq_type, struct mana_obj_spec *wq_spec,
770		       struct mana_obj_spec *cq_spec,
771		       mana_handle_t *wq_obj);
772
773void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
774			 mana_handle_t wq_obj);
775
776int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
777		   u32 doorbell_pg_id);
778void mana_uncfg_vport(struct mana_port_context *apc);
779#endif /* _MANA_H */
780