1/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 * Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7#ifndef _GVE_ADMINQ_H
8#define _GVE_ADMINQ_H
9
10#include <linux/build_bug.h>
11
12/* Admin queue opcodes */
13enum gve_adminq_opcodes {
14	GVE_ADMINQ_DESCRIBE_DEVICE		= 0x1,
15	GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES	= 0x2,
16	GVE_ADMINQ_REGISTER_PAGE_LIST		= 0x3,
17	GVE_ADMINQ_UNREGISTER_PAGE_LIST		= 0x4,
18	GVE_ADMINQ_CREATE_TX_QUEUE		= 0x5,
19	GVE_ADMINQ_CREATE_RX_QUEUE		= 0x6,
20	GVE_ADMINQ_DESTROY_TX_QUEUE		= 0x7,
21	GVE_ADMINQ_DESTROY_RX_QUEUE		= 0x8,
22	GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES	= 0x9,
23	GVE_ADMINQ_SET_DRIVER_PARAMETER		= 0xB,
24	GVE_ADMINQ_REPORT_STATS			= 0xC,
25	GVE_ADMINQ_REPORT_LINK_SPEED		= 0xD,
26	GVE_ADMINQ_GET_PTYPE_MAP		= 0xE,
27	GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY	= 0xF,
28};
29
30/* Admin queue status codes */
31enum gve_adminq_statuses {
32	GVE_ADMINQ_COMMAND_UNSET			= 0x0,
33	GVE_ADMINQ_COMMAND_PASSED			= 0x1,
34	GVE_ADMINQ_COMMAND_ERROR_ABORTED		= 0xFFFFFFF0,
35	GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS		= 0xFFFFFFF1,
36	GVE_ADMINQ_COMMAND_ERROR_CANCELLED		= 0xFFFFFFF2,
37	GVE_ADMINQ_COMMAND_ERROR_DATALOSS		= 0xFFFFFFF3,
38	GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED	= 0xFFFFFFF4,
39	GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION	= 0xFFFFFFF5,
40	GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR		= 0xFFFFFFF6,
41	GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT	= 0xFFFFFFF7,
42	GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND		= 0xFFFFFFF8,
43	GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE		= 0xFFFFFFF9,
44	GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED	= 0xFFFFFFFA,
45	GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED	= 0xFFFFFFFB,
46	GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED	= 0xFFFFFFFC,
47	GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE		= 0xFFFFFFFD,
48	GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED		= 0xFFFFFFFE,
49	GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR		= 0xFFFFFFFF,
50};
51
52#define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1
53
54/* All AdminQ command structs should be naturally packed. The static_assert
55 * calls make sure this is the case at compile time.
56 */
57
58struct gve_adminq_describe_device {
59	__be64 device_descriptor_addr;
60	__be32 device_descriptor_version;
61	__be32 available_length;
62};
63
64static_assert(sizeof(struct gve_adminq_describe_device) == 16);
65
66struct gve_device_descriptor {
67	__be64 max_registered_pages;
68	__be16 reserved1;
69	__be16 tx_queue_entries;
70	__be16 rx_queue_entries;
71	__be16 default_num_queues;
72	__be16 mtu;
73	__be16 counters;
74	__be16 tx_pages_per_qpl;
75	__be16 rx_pages_per_qpl;
76	u8  mac[ETH_ALEN];
77	__be16 num_device_options;
78	__be16 total_length;
79	u8  reserved2[6];
80};
81
82static_assert(sizeof(struct gve_device_descriptor) == 40);
83
84struct gve_device_option {
85	__be16 option_id;
86	__be16 option_length;
87	__be32 required_features_mask;
88};
89
90static_assert(sizeof(struct gve_device_option) == 8);
91
92struct gve_device_option_gqi_rda {
93	__be32 supported_features_mask;
94};
95
96static_assert(sizeof(struct gve_device_option_gqi_rda) == 4);
97
98struct gve_device_option_gqi_qpl {
99	__be32 supported_features_mask;
100};
101
102static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
103
104struct gve_device_option_dqo_rda {
105	__be32 supported_features_mask;
106	__be16 tx_comp_ring_entries;
107	__be16 rx_buff_ring_entries;
108};
109
110static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
111
112struct gve_device_option_dqo_qpl {
113	__be32 supported_features_mask;
114	__be16 tx_pages_per_qpl;
115	__be16 rx_pages_per_qpl;
116};
117
118static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8);
119
120struct gve_device_option_jumbo_frames {
121	__be32 supported_features_mask;
122	__be16 max_mtu;
123	u8 padding[2];
124};
125
126static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
127
128/* Terminology:
129 *
130 * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
131 *       mapped and read/updated by the device.
132 *
133 * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with
134 *       the device for read/write and data is copied from/to SKBs.
135 */
136enum gve_dev_opt_id {
137	GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
138	GVE_DEV_OPT_ID_GQI_RDA = 0x2,
139	GVE_DEV_OPT_ID_GQI_QPL = 0x3,
140	GVE_DEV_OPT_ID_DQO_RDA = 0x4,
141	GVE_DEV_OPT_ID_DQO_QPL = 0x7,
142	GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
143};
144
145enum gve_dev_opt_req_feat_mask {
146	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
147	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
148	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
149	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
150	GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
151	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
152};
153
154enum gve_sup_feature_mask {
155	GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
156};
157
158#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
159
160#define GVE_VERSION_STR_LEN 128
161
162enum gve_driver_capbility {
163	gve_driver_capability_gqi_qpl = 0,
164	gve_driver_capability_gqi_rda = 1,
165	gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
166	gve_driver_capability_dqo_rda = 3,
167	gve_driver_capability_alt_miss_compl = 4,
168};
169
170#define GVE_CAP1(a) BIT((int)a)
171#define GVE_CAP2(a) BIT(((int)a) - 64)
172#define GVE_CAP3(a) BIT(((int)a) - 128)
173#define GVE_CAP4(a) BIT(((int)a) - 192)
174
175#define GVE_DRIVER_CAPABILITY_FLAGS1 \
176	(GVE_CAP1(gve_driver_capability_gqi_qpl) | \
177	 GVE_CAP1(gve_driver_capability_gqi_rda) | \
178	 GVE_CAP1(gve_driver_capability_dqo_rda) | \
179	 GVE_CAP1(gve_driver_capability_alt_miss_compl))
180
181#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
182#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
183#define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
184
185struct gve_driver_info {
186	u8 os_type;	/* 0x01 = Linux */
187	u8 driver_major;
188	u8 driver_minor;
189	u8 driver_sub;
190	__be32 os_version_major;
191	__be32 os_version_minor;
192	__be32 os_version_sub;
193	__be64 driver_capability_flags[4];
194	u8 os_version_str1[GVE_VERSION_STR_LEN];
195	u8 os_version_str2[GVE_VERSION_STR_LEN];
196};
197
198struct gve_adminq_verify_driver_compatibility {
199	__be64 driver_info_len;
200	__be64 driver_info_addr;
201};
202
203static_assert(sizeof(struct gve_adminq_verify_driver_compatibility) == 16);
204
205struct gve_adminq_configure_device_resources {
206	__be64 counter_array;
207	__be64 irq_db_addr;
208	__be32 num_counters;
209	__be32 num_irq_dbs;
210	__be32 irq_db_stride;
211	__be32 ntfy_blk_msix_base_idx;
212	u8 queue_format;
213	u8 padding[7];
214};
215
216static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40);
217
218struct gve_adminq_register_page_list {
219	__be32 page_list_id;
220	__be32 num_pages;
221	__be64 page_address_list_addr;
222};
223
224static_assert(sizeof(struct gve_adminq_register_page_list) == 16);
225
226struct gve_adminq_unregister_page_list {
227	__be32 page_list_id;
228};
229
230static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4);
231
232#define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF
233
234struct gve_adminq_create_tx_queue {
235	__be32 queue_id;
236	__be32 reserved;
237	__be64 queue_resources_addr;
238	__be64 tx_ring_addr;
239	__be32 queue_page_list_id;
240	__be32 ntfy_id;
241	__be64 tx_comp_ring_addr;
242	__be16 tx_ring_size;
243	__be16 tx_comp_ring_size;
244	u8 padding[4];
245};
246
247static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48);
248
249struct gve_adminq_create_rx_queue {
250	__be32 queue_id;
251	__be32 index;
252	__be32 reserved;
253	__be32 ntfy_id;
254	__be64 queue_resources_addr;
255	__be64 rx_desc_ring_addr;
256	__be64 rx_data_ring_addr;
257	__be32 queue_page_list_id;
258	__be16 rx_ring_size;
259	__be16 packet_buffer_size;
260	__be16 rx_buff_ring_size;
261	u8 enable_rsc;
262	u8 padding[5];
263};
264
265static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
266
267/* Queue resources that are shared with the device */
268struct gve_queue_resources {
269	union {
270		struct {
271			__be32 db_index;	/* Device -> Guest */
272			__be32 counter_index;	/* Device -> Guest */
273		};
274		u8 reserved[64];
275	};
276};
277
278static_assert(sizeof(struct gve_queue_resources) == 64);
279
280struct gve_adminq_destroy_tx_queue {
281	__be32 queue_id;
282};
283
284static_assert(sizeof(struct gve_adminq_destroy_tx_queue) == 4);
285
286struct gve_adminq_destroy_rx_queue {
287	__be32 queue_id;
288};
289
290static_assert(sizeof(struct gve_adminq_destroy_rx_queue) == 4);
291
292/* GVE Set Driver Parameter Types */
293enum gve_set_driver_param_types {
294	GVE_SET_PARAM_MTU	= 0x1,
295};
296
297struct gve_adminq_set_driver_parameter {
298	__be32 parameter_type;
299	u8 reserved[4];
300	__be64 parameter_value;
301};
302
303static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16);
304
305struct gve_adminq_report_stats {
306	__be64 stats_report_len;
307	__be64 stats_report_addr;
308	__be64 interval;
309};
310
311static_assert(sizeof(struct gve_adminq_report_stats) == 24);
312
313struct gve_adminq_report_link_speed {
314	__be64 link_speed_address;
315};
316
317static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
318
319struct stats {
320	__be32 stat_name;
321	__be32 queue_id;
322	__be64 value;
323};
324
325static_assert(sizeof(struct stats) == 16);
326
327struct gve_stats_report {
328	__be64 written_count;
329	struct stats stats[];
330};
331
332static_assert(sizeof(struct gve_stats_report) == 8);
333
334enum gve_stat_names {
335	// stats from gve
336	TX_WAKE_CNT			= 1,
337	TX_STOP_CNT			= 2,
338	TX_FRAMES_SENT			= 3,
339	TX_BYTES_SENT			= 4,
340	TX_LAST_COMPLETION_PROCESSED	= 5,
341	RX_NEXT_EXPECTED_SEQUENCE	= 6,
342	RX_BUFFERS_POSTED		= 7,
343	TX_TIMEOUT_CNT			= 8,
344	// stats from NIC
345	RX_QUEUE_DROP_CNT		= 65,
346	RX_NO_BUFFERS_POSTED		= 66,
347	RX_DROPS_PACKET_OVER_MRU	= 67,
348	RX_DROPS_INVALID_CHECKSUM	= 68,
349};
350
351enum gve_l3_type {
352	/* Must be zero so zero initialized LUT is unknown. */
353	GVE_L3_TYPE_UNKNOWN = 0,
354	GVE_L3_TYPE_OTHER,
355	GVE_L3_TYPE_IPV4,
356	GVE_L3_TYPE_IPV6,
357};
358
359enum gve_l4_type {
360	/* Must be zero so zero initialized LUT is unknown. */
361	GVE_L4_TYPE_UNKNOWN = 0,
362	GVE_L4_TYPE_OTHER,
363	GVE_L4_TYPE_TCP,
364	GVE_L4_TYPE_UDP,
365	GVE_L4_TYPE_ICMP,
366	GVE_L4_TYPE_SCTP,
367};
368
369/* These are control path types for PTYPE which are the same as the data path
370 * types.
371 */
372struct gve_ptype_entry {
373	u8 l3_type;
374	u8 l4_type;
375};
376
377struct gve_ptype_map {
378	struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
379};
380
381struct gve_adminq_get_ptype_map {
382	__be64 ptype_map_len;
383	__be64 ptype_map_addr;
384};
385
386union gve_adminq_command {
387	struct {
388		__be32 opcode;
389		__be32 status;
390		union {
391			struct gve_adminq_configure_device_resources
392						configure_device_resources;
393			struct gve_adminq_create_tx_queue create_tx_queue;
394			struct gve_adminq_create_rx_queue create_rx_queue;
395			struct gve_adminq_destroy_tx_queue destroy_tx_queue;
396			struct gve_adminq_destroy_rx_queue destroy_rx_queue;
397			struct gve_adminq_describe_device describe_device;
398			struct gve_adminq_register_page_list reg_page_list;
399			struct gve_adminq_unregister_page_list unreg_page_list;
400			struct gve_adminq_set_driver_parameter set_driver_param;
401			struct gve_adminq_report_stats report_stats;
402			struct gve_adminq_report_link_speed report_link_speed;
403			struct gve_adminq_get_ptype_map get_ptype_map;
404			struct gve_adminq_verify_driver_compatibility
405						verify_driver_compatibility;
406		};
407	};
408	u8 reserved[64];
409};
410
411static_assert(sizeof(union gve_adminq_command) == 64);
412
413int gve_adminq_alloc(struct device *dev, struct gve_priv *priv);
414void gve_adminq_free(struct device *dev, struct gve_priv *priv);
415void gve_adminq_release(struct gve_priv *priv);
416int gve_adminq_describe_device(struct gve_priv *priv);
417int gve_adminq_configure_device_resources(struct gve_priv *priv,
418					  dma_addr_t counter_array_bus_addr,
419					  u32 num_counters,
420					  dma_addr_t db_array_bus_addr,
421					  u32 num_ntfy_blks);
422int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
423int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
424int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
425int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
426int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
427int gve_adminq_register_page_list(struct gve_priv *priv,
428				  struct gve_queue_page_list *qpl);
429int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
430int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
431int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
432			    dma_addr_t stats_report_addr, u64 interval);
433int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
434					   u64 driver_info_len,
435					   dma_addr_t driver_info_addr);
436int gve_adminq_report_link_speed(struct gve_priv *priv);
437
438struct gve_ptype_lut;
439int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
440				 struct gve_ptype_lut *ptype_lut);
441
442#endif /* _GVE_ADMINQ_H */
443