1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/export.h>
7#include <linux/err.h>
8#include <linux/device.h>
9#include <linux/pci.h>
10#include <linux/interrupt.h>
11#include <linux/wait.h>
12#include <linux/types.h>
13#include <linux/skbuff.h>
14#include <linux/if_vlan.h>
15#include <linux/log2.h>
16#include <linux/string.h>
17
18#include "pci_hw.h"
19#include "pci.h"
20#include "core.h"
21#include "cmd.h"
22#include "port.h"
23#include "resources.h"
24
25#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
26	iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
27#define mlxsw_pci_read32(mlxsw_pci, reg) \
28	ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
29
30enum mlxsw_pci_queue_type {
31	MLXSW_PCI_QUEUE_TYPE_SDQ,
32	MLXSW_PCI_QUEUE_TYPE_RDQ,
33	MLXSW_PCI_QUEUE_TYPE_CQ,
34	MLXSW_PCI_QUEUE_TYPE_EQ,
35};
36
37#define MLXSW_PCI_QUEUE_TYPE_COUNT	4
38
39static const u16 mlxsw_pci_doorbell_type_offset[] = {
40	MLXSW_PCI_DOORBELL_SDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
41	MLXSW_PCI_DOORBELL_RDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
42	MLXSW_PCI_DOORBELL_CQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_CQ */
43	MLXSW_PCI_DOORBELL_EQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_EQ */
44};
45
46static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
47	0, /* unused */
48	0, /* unused */
49	MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
50	MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
51};
52
53struct mlxsw_pci_mem_item {
54	char *buf;
55	dma_addr_t mapaddr;
56	size_t size;
57};
58
59struct mlxsw_pci_queue_elem_info {
60	char *elem; /* pointer to actual dma mapped element mem chunk */
61	union {
62		struct {
63			struct sk_buff *skb;
64		} sdq;
65		struct {
66			struct sk_buff *skb;
67		} rdq;
68	} u;
69};
70
71struct mlxsw_pci_queue {
72	spinlock_t lock; /* for queue accesses */
73	struct mlxsw_pci_mem_item mem_item;
74	struct mlxsw_pci_queue_elem_info *elem_info;
75	u16 producer_counter;
76	u16 consumer_counter;
77	u16 count; /* number of elements in queue */
78	u8 num; /* queue number */
79	u8 elem_size; /* size of one element */
80	enum mlxsw_pci_queue_type type;
81	struct tasklet_struct tasklet; /* queue processing tasklet */
82	struct mlxsw_pci *pci;
83	union {
84		struct {
85			u32 comp_sdq_count;
86			u32 comp_rdq_count;
87			enum mlxsw_pci_cqe_v v;
88		} cq;
89		struct {
90			u32 ev_cmd_count;
91			u32 ev_comp_count;
92			u32 ev_other_count;
93		} eq;
94	} u;
95};
96
97struct mlxsw_pci_queue_type_group {
98	struct mlxsw_pci_queue *q;
99	u8 count; /* number of queues in group */
100};
101
102struct mlxsw_pci {
103	struct pci_dev *pdev;
104	u8 __iomem *hw_addr;
105	u64 free_running_clock_offset;
106	struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
107	u32 doorbell_offset;
108	struct mlxsw_core *core;
109	struct {
110		struct mlxsw_pci_mem_item *items;
111		unsigned int count;
112	} fw_area;
113	struct {
114		struct mlxsw_pci_mem_item out_mbox;
115		struct mlxsw_pci_mem_item in_mbox;
116		struct mutex lock; /* Lock access to command registers */
117		bool nopoll;
118		wait_queue_head_t wait;
119		bool wait_done;
120		struct {
121			u8 status;
122			u64 out_param;
123		} comp;
124	} cmd;
125	struct mlxsw_bus_info bus_info;
126	const struct pci_device_id *id;
127	enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
128	u8 num_sdq_cqs; /* Number of CQs used for SDQs */
129};
130
131static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
132{
133	tasklet_schedule(&q->tasklet);
134}
135
136static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
137					size_t elem_size, int elem_index)
138{
139	return q->mem_item.buf + (elem_size * elem_index);
140}
141
142static struct mlxsw_pci_queue_elem_info *
143mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
144{
145	return &q->elem_info[elem_index];
146}
147
148static struct mlxsw_pci_queue_elem_info *
149mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
150{
151	int index = q->producer_counter & (q->count - 1);
152
153	if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
154		return NULL;
155	return mlxsw_pci_queue_elem_info_get(q, index);
156}
157
158static struct mlxsw_pci_queue_elem_info *
159mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
160{
161	int index = q->consumer_counter & (q->count - 1);
162
163	return mlxsw_pci_queue_elem_info_get(q, index);
164}
165
166static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
167{
168	return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
169}
170
171static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
172{
173	return owner_bit != !!(q->consumer_counter & q->count);
174}
175
176static struct mlxsw_pci_queue_type_group *
177mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
178			       enum mlxsw_pci_queue_type q_type)
179{
180	return &mlxsw_pci->queues[q_type];
181}
182
183static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
184				  enum mlxsw_pci_queue_type q_type)
185{
186	struct mlxsw_pci_queue_type_group *queue_group;
187
188	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
189	return queue_group->count;
190}
191
192static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
193{
194	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
195}
196
197static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
198{
199	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
200}
201
202static struct mlxsw_pci_queue *
203__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
204		      enum mlxsw_pci_queue_type q_type, u8 q_num)
205{
206	return &mlxsw_pci->queues[q_type].q[q_num];
207}
208
209static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
210						 u8 q_num)
211{
212	return __mlxsw_pci_queue_get(mlxsw_pci,
213				     MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
214}
215
216static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
217						 u8 q_num)
218{
219	return __mlxsw_pci_queue_get(mlxsw_pci,
220				     MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
221}
222
223static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
224						u8 q_num)
225{
226	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
227}
228
229static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
230						u8 q_num)
231{
232	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
233}
234
235static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
236					   struct mlxsw_pci_queue *q,
237					   u16 val)
238{
239	mlxsw_pci_write32(mlxsw_pci,
240			  DOORBELL(mlxsw_pci->doorbell_offset,
241				   mlxsw_pci_doorbell_type_offset[q->type],
242				   q->num), val);
243}
244
245static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
246					       struct mlxsw_pci_queue *q,
247					       u16 val)
248{
249	mlxsw_pci_write32(mlxsw_pci,
250			  DOORBELL(mlxsw_pci->doorbell_offset,
251				   mlxsw_pci_doorbell_arm_type_offset[q->type],
252				   q->num), val);
253}
254
255static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
256						   struct mlxsw_pci_queue *q)
257{
258	wmb(); /* ensure all writes are done before we ring a bell */
259	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
260}
261
262static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
263						   struct mlxsw_pci_queue *q)
264{
265	wmb(); /* ensure all writes are done before we ring a bell */
266	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
267				       q->consumer_counter + q->count);
268}
269
270static void
271mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
272					   struct mlxsw_pci_queue *q)
273{
274	wmb(); /* ensure all writes are done before we ring a bell */
275	__mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
276}
277
278static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
279					     int page_index)
280{
281	return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
282}
283
284static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
285			      struct mlxsw_pci_queue *q)
286{
287	int tclass;
288	int lp;
289	int i;
290	int err;
291
292	q->producer_counter = 0;
293	q->consumer_counter = 0;
294	tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
295						      MLXSW_PCI_SDQ_CTL_TC;
296	lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
297						  MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
298
299	/* Set CQ of same number of this SDQ. */
300	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
301	mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
302	mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
303	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
304	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
305		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
306
307		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
308	}
309
310	err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
311	if (err)
312		return err;
313	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
314	return 0;
315}
316
317static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
318			       struct mlxsw_pci_queue *q)
319{
320	mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
321}
322
323static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
324				  int index, char *frag_data, size_t frag_len,
325				  int direction)
326{
327	struct pci_dev *pdev = mlxsw_pci->pdev;
328	dma_addr_t mapaddr;
329
330	mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
331	if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
332		dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
333		return -EIO;
334	}
335	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
336	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
337	return 0;
338}
339
340static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
341				     int index, int direction)
342{
343	struct pci_dev *pdev = mlxsw_pci->pdev;
344	size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
345	dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
346
347	if (!frag_len)
348		return;
349	pci_unmap_single(pdev, mapaddr, frag_len, direction);
350}
351
352static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
353				   struct mlxsw_pci_queue_elem_info *elem_info)
354{
355	size_t buf_len = MLXSW_PORT_MAX_MTU;
356	char *wqe = elem_info->elem;
357	struct sk_buff *skb;
358	int err;
359
360	skb = netdev_alloc_skb_ip_align(NULL, buf_len);
361	if (!skb)
362		return -ENOMEM;
363
364	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
365				     buf_len, DMA_FROM_DEVICE);
366	if (err)
367		goto err_frag_map;
368
369	elem_info->u.rdq.skb = skb;
370	return 0;
371
372err_frag_map:
373	dev_kfree_skb_any(skb);
374	return err;
375}
376
377static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
378				   struct mlxsw_pci_queue_elem_info *elem_info)
379{
380	struct sk_buff *skb;
381	char *wqe;
382
383	skb = elem_info->u.rdq.skb;
384	wqe = elem_info->elem;
385
386	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
387	dev_kfree_skb_any(skb);
388}
389
390static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
391			      struct mlxsw_pci_queue *q)
392{
393	struct mlxsw_pci_queue_elem_info *elem_info;
394	u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
395	int i;
396	int err;
397
398	q->producer_counter = 0;
399	q->consumer_counter = 0;
400
401	/* Set CQ of same number of this RDQ with base
402	 * above SDQ count as the lower ones are assigned to SDQs.
403	 */
404	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
405	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
406	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
407		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
408
409		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
410	}
411
412	err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
413	if (err)
414		return err;
415
416	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
417
418	for (i = 0; i < q->count; i++) {
419		elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
420		BUG_ON(!elem_info);
421		err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
422		if (err)
423			goto rollback;
424		/* Everything is set up, ring doorbell to pass elem to HW */
425		q->producer_counter++;
426		mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
427	}
428
429	return 0;
430
431rollback:
432	for (i--; i >= 0; i--) {
433		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
434		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
435	}
436	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
437
438	return err;
439}
440
441static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
442			       struct mlxsw_pci_queue *q)
443{
444	struct mlxsw_pci_queue_elem_info *elem_info;
445	int i;
446
447	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
448	for (i = 0; i < q->count; i++) {
449		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
450		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
451	}
452}
453
454static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
455				  struct mlxsw_pci_queue *q)
456{
457	q->u.cq.v = mlxsw_pci->max_cqe_ver;
458
459	/* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
460	if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
461	    q->num < mlxsw_pci->num_sdq_cqs)
462		q->u.cq.v = MLXSW_PCI_CQE_V1;
463}
464
465static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
466			     struct mlxsw_pci_queue *q)
467{
468	int i;
469	int err;
470
471	q->consumer_counter = 0;
472
473	for (i = 0; i < q->count; i++) {
474		char *elem = mlxsw_pci_queue_elem_get(q, i);
475
476		mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
477	}
478
479	if (q->u.cq.v == MLXSW_PCI_CQE_V1)
480		mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
481				MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
482	else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
483		mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
484				MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
485
486	mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
487	mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
488	mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
489	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
490		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
491
492		mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
493	}
494	err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
495	if (err)
496		return err;
497	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
498	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
499	return 0;
500}
501
502static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
503			      struct mlxsw_pci_queue *q)
504{
505	mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
506}
507
508static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
509				     struct mlxsw_pci_queue *q,
510				     u16 consumer_counter_limit,
511				     char *cqe)
512{
513	struct pci_dev *pdev = mlxsw_pci->pdev;
514	struct mlxsw_pci_queue_elem_info *elem_info;
515	struct mlxsw_tx_info tx_info;
516	char *wqe;
517	struct sk_buff *skb;
518	int i;
519
520	spin_lock(&q->lock);
521	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
522	tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
523	skb = elem_info->u.sdq.skb;
524	wqe = elem_info->elem;
525	for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
526		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
527
528	if (unlikely(!tx_info.is_emad &&
529		     skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
530		mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
531					   tx_info.local_port);
532		skb = NULL;
533	}
534
535	if (skb)
536		dev_kfree_skb_any(skb);
537	elem_info->u.sdq.skb = NULL;
538
539	if (q->consumer_counter++ != consumer_counter_limit)
540		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
541	spin_unlock(&q->lock);
542}
543
544static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
545				     struct mlxsw_pci_queue *q,
546				     u16 consumer_counter_limit,
547				     enum mlxsw_pci_cqe_v cqe_v, char *cqe)
548{
549	struct pci_dev *pdev = mlxsw_pci->pdev;
550	struct mlxsw_pci_queue_elem_info *elem_info;
551	struct mlxsw_rx_info rx_info = {};
552	char wqe[MLXSW_PCI_WQE_SIZE];
553	struct sk_buff *skb;
554	u16 byte_count;
555	int err;
556
557	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
558	skb = elem_info->u.rdq.skb;
559	memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
560
561	if (q->consumer_counter++ != consumer_counter_limit)
562		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
563
564	err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
565	if (err) {
566		dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
567		goto out;
568	}
569
570	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
571
572	if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
573		rx_info.is_lag = true;
574		rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
575		rx_info.lag_port_index =
576			mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
577	} else {
578		rx_info.is_lag = false;
579		rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
580	}
581
582	rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
583
584	if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
585	    rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
586		u32 cookie_index = 0;
587
588		if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
589			cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
590		mlxsw_skb_cb(skb)->cookie_index = cookie_index;
591	} else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
592		   rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
593		   mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
594		rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
595	}
596
597	byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
598	if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
599		byte_count -= ETH_FCS_LEN;
600	skb_put(skb, byte_count);
601	mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
602
603out:
604	/* Everything is set up, ring doorbell to pass elem to HW */
605	q->producer_counter++;
606	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
607	return;
608}
609
610static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
611{
612	struct mlxsw_pci_queue_elem_info *elem_info;
613	char *elem;
614	bool owner_bit;
615
616	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
617	elem = elem_info->elem;
618	owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
619	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
620		return NULL;
621	q->consumer_counter++;
622	rmb(); /* make sure we read owned bit before the rest of elem */
623	return elem;
624}
625
626static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
627{
628	struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
629	struct mlxsw_pci *mlxsw_pci = q->pci;
630	char *cqe;
631	int items = 0;
632	int credits = q->count >> 1;
633
634	while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
635		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
636		u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
637		u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
638		char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
639
640		memcpy(ncqe, cqe, q->elem_size);
641		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
642
643		if (sendq) {
644			struct mlxsw_pci_queue *sdq;
645
646			sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
647			mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
648						 wqe_counter, ncqe);
649			q->u.cq.comp_sdq_count++;
650		} else {
651			struct mlxsw_pci_queue *rdq;
652
653			rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
654			mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
655						 wqe_counter, q->u.cq.v, ncqe);
656			q->u.cq.comp_rdq_count++;
657		}
658		if (++items == credits)
659			break;
660	}
661	if (items)
662		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
663}
664
665static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
666{
667	return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
668					       MLXSW_PCI_CQE01_COUNT;
669}
670
671static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
672{
673	return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
674					       MLXSW_PCI_CQE01_SIZE;
675}
676
677static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
678			     struct mlxsw_pci_queue *q)
679{
680	int i;
681	int err;
682
683	q->consumer_counter = 0;
684
685	for (i = 0; i < q->count; i++) {
686		char *elem = mlxsw_pci_queue_elem_get(q, i);
687
688		mlxsw_pci_eqe_owner_set(elem, 1);
689	}
690
691	mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
692	mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
693	mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
694	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
695		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
696
697		mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
698	}
699	err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
700	if (err)
701		return err;
702	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
703	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
704	return 0;
705}
706
707static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
708			      struct mlxsw_pci_queue *q)
709{
710	mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
711}
712
713static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
714{
715	mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
716	mlxsw_pci->cmd.comp.out_param =
717		((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
718		mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
719	mlxsw_pci->cmd.wait_done = true;
720	wake_up(&mlxsw_pci->cmd.wait);
721}
722
723static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
724{
725	struct mlxsw_pci_queue_elem_info *elem_info;
726	char *elem;
727	bool owner_bit;
728
729	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
730	elem = elem_info->elem;
731	owner_bit = mlxsw_pci_eqe_owner_get(elem);
732	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
733		return NULL;
734	q->consumer_counter++;
735	rmb(); /* make sure we read owned bit before the rest of elem */
736	return elem;
737}
738
739static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
740{
741	struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
742	struct mlxsw_pci *mlxsw_pci = q->pci;
743	u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
744	unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
745	char *eqe;
746	u8 cqn;
747	bool cq_handle = false;
748	int items = 0;
749	int credits = q->count >> 1;
750
751	memset(&active_cqns, 0, sizeof(active_cqns));
752
753	while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
754
755		/* Command interface completion events are always received on
756		 * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
757		 * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
758		 */
759		switch (q->num) {
760		case MLXSW_PCI_EQ_ASYNC_NUM:
761			mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
762			q->u.eq.ev_cmd_count++;
763			break;
764		case MLXSW_PCI_EQ_COMP_NUM:
765			cqn = mlxsw_pci_eqe_cqn_get(eqe);
766			set_bit(cqn, active_cqns);
767			cq_handle = true;
768			q->u.eq.ev_comp_count++;
769			break;
770		default:
771			q->u.eq.ev_other_count++;
772		}
773		if (++items == credits)
774			break;
775	}
776	if (items) {
777		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
778		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
779	}
780
781	if (!cq_handle)
782		return;
783	for_each_set_bit(cqn, active_cqns, cq_count) {
784		q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
785		mlxsw_pci_queue_tasklet_schedule(q);
786	}
787}
788
789struct mlxsw_pci_queue_ops {
790	const char *name;
791	enum mlxsw_pci_queue_type type;
792	void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
793			 struct mlxsw_pci_queue *q);
794	int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
795		    struct mlxsw_pci_queue *q);
796	void (*fini)(struct mlxsw_pci *mlxsw_pci,
797		     struct mlxsw_pci_queue *q);
798	void (*tasklet)(struct tasklet_struct *t);
799	u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
800	u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
801	u16 elem_count;
802	u8 elem_size;
803};
804
805static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
806	.type		= MLXSW_PCI_QUEUE_TYPE_SDQ,
807	.init		= mlxsw_pci_sdq_init,
808	.fini		= mlxsw_pci_sdq_fini,
809	.elem_count	= MLXSW_PCI_WQE_COUNT,
810	.elem_size	= MLXSW_PCI_WQE_SIZE,
811};
812
813static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
814	.type		= MLXSW_PCI_QUEUE_TYPE_RDQ,
815	.init		= mlxsw_pci_rdq_init,
816	.fini		= mlxsw_pci_rdq_fini,
817	.elem_count	= MLXSW_PCI_WQE_COUNT,
818	.elem_size	= MLXSW_PCI_WQE_SIZE
819};
820
821static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
822	.type		= MLXSW_PCI_QUEUE_TYPE_CQ,
823	.pre_init	= mlxsw_pci_cq_pre_init,
824	.init		= mlxsw_pci_cq_init,
825	.fini		= mlxsw_pci_cq_fini,
826	.tasklet	= mlxsw_pci_cq_tasklet,
827	.elem_count_f	= mlxsw_pci_cq_elem_count,
828	.elem_size_f	= mlxsw_pci_cq_elem_size
829};
830
831static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
832	.type		= MLXSW_PCI_QUEUE_TYPE_EQ,
833	.init		= mlxsw_pci_eq_init,
834	.fini		= mlxsw_pci_eq_fini,
835	.tasklet	= mlxsw_pci_eq_tasklet,
836	.elem_count	= MLXSW_PCI_EQE_COUNT,
837	.elem_size	= MLXSW_PCI_EQE_SIZE
838};
839
840static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
841				const struct mlxsw_pci_queue_ops *q_ops,
842				struct mlxsw_pci_queue *q, u8 q_num)
843{
844	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
845	int i;
846	int err;
847
848	q->num = q_num;
849	if (q_ops->pre_init)
850		q_ops->pre_init(mlxsw_pci, q);
851
852	spin_lock_init(&q->lock);
853	q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
854					 q_ops->elem_count;
855	q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
856					    q_ops->elem_size;
857	q->type = q_ops->type;
858	q->pci = mlxsw_pci;
859
860	if (q_ops->tasklet)
861		tasklet_setup(&q->tasklet, q_ops->tasklet);
862
863	mem_item->size = MLXSW_PCI_AQ_SIZE;
864	mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
865					     mem_item->size,
866					     &mem_item->mapaddr);
867	if (!mem_item->buf)
868		return -ENOMEM;
869
870	q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
871	if (!q->elem_info) {
872		err = -ENOMEM;
873		goto err_elem_info_alloc;
874	}
875
876	/* Initialize dma mapped elements info elem_info for
877	 * future easy access.
878	 */
879	for (i = 0; i < q->count; i++) {
880		struct mlxsw_pci_queue_elem_info *elem_info;
881
882		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
883		elem_info->elem =
884			__mlxsw_pci_queue_elem_get(q, q->elem_size, i);
885	}
886
887	mlxsw_cmd_mbox_zero(mbox);
888	err = q_ops->init(mlxsw_pci, mbox, q);
889	if (err)
890		goto err_q_ops_init;
891	return 0;
892
893err_q_ops_init:
894	kfree(q->elem_info);
895err_elem_info_alloc:
896	pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
897			    mem_item->buf, mem_item->mapaddr);
898	return err;
899}
900
901static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
902				 const struct mlxsw_pci_queue_ops *q_ops,
903				 struct mlxsw_pci_queue *q)
904{
905	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
906
907	q_ops->fini(mlxsw_pci, q);
908	kfree(q->elem_info);
909	pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
910			    mem_item->buf, mem_item->mapaddr);
911}
912
913static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
914				      const struct mlxsw_pci_queue_ops *q_ops,
915				      u8 num_qs)
916{
917	struct mlxsw_pci_queue_type_group *queue_group;
918	int i;
919	int err;
920
921	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
922	queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
923	if (!queue_group->q)
924		return -ENOMEM;
925
926	for (i = 0; i < num_qs; i++) {
927		err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
928					   &queue_group->q[i], i);
929		if (err)
930			goto err_queue_init;
931	}
932	queue_group->count = num_qs;
933
934	return 0;
935
936err_queue_init:
937	for (i--; i >= 0; i--)
938		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
939	kfree(queue_group->q);
940	return err;
941}
942
943static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
944				       const struct mlxsw_pci_queue_ops *q_ops)
945{
946	struct mlxsw_pci_queue_type_group *queue_group;
947	int i;
948
949	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
950	for (i = 0; i < queue_group->count; i++)
951		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
952	kfree(queue_group->q);
953}
954
955static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
956{
957	struct pci_dev *pdev = mlxsw_pci->pdev;
958	u8 num_sdqs;
959	u8 sdq_log2sz;
960	u8 num_rdqs;
961	u8 rdq_log2sz;
962	u8 num_cqs;
963	u8 cq_log2sz;
964	u8 cqv2_log2sz;
965	u8 num_eqs;
966	u8 eq_log2sz;
967	int err;
968
969	mlxsw_cmd_mbox_zero(mbox);
970	err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
971	if (err)
972		return err;
973
974	num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
975	sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
976	num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
977	rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
978	num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
979	cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
980	cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
981	num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
982	eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
983
984	if (num_sdqs + num_rdqs > num_cqs ||
985	    num_sdqs < MLXSW_PCI_SDQS_MIN ||
986	    num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
987		dev_err(&pdev->dev, "Unsupported number of queues\n");
988		return -EINVAL;
989	}
990
991	if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
992	    (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
993	    (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
994	    (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
995	     (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
996	    (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
997		dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
998		return -EINVAL;
999	}
1000
1001	mlxsw_pci->num_sdq_cqs = num_sdqs;
1002
1003	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1004					 num_eqs);
1005	if (err) {
1006		dev_err(&pdev->dev, "Failed to initialize event queues\n");
1007		return err;
1008	}
1009
1010	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1011					 num_cqs);
1012	if (err) {
1013		dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1014		goto err_cqs_init;
1015	}
1016
1017	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1018					 num_sdqs);
1019	if (err) {
1020		dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1021		goto err_sdqs_init;
1022	}
1023
1024	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1025					 num_rdqs);
1026	if (err) {
1027		dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1028		goto err_rdqs_init;
1029	}
1030
1031	/* We have to poll in command interface until queues are initialized */
1032	mlxsw_pci->cmd.nopoll = true;
1033	return 0;
1034
1035err_rdqs_init:
1036	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1037err_sdqs_init:
1038	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1039err_cqs_init:
1040	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1041	return err;
1042}
1043
1044static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1045{
1046	mlxsw_pci->cmd.nopoll = false;
1047	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1048	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1049	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1050	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1051}
1052
1053static void
1054mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1055				     char *mbox, int index,
1056				     const struct mlxsw_swid_config *swid)
1057{
1058	u8 mask = 0;
1059
1060	if (swid->used_type) {
1061		mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1062			mbox, index, swid->type);
1063		mask |= 1;
1064	}
1065	if (swid->used_properties) {
1066		mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1067			mbox, index, swid->properties);
1068		mask |= 2;
1069	}
1070	mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1071}
1072
1073static int
1074mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1075				const struct mlxsw_config_profile *profile,
1076				struct mlxsw_res *res)
1077{
1078	u64 single_size, double_size, linear_size;
1079	int err;
1080
1081	err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1082				       &single_size, &double_size,
1083				       &linear_size);
1084	if (err)
1085		return err;
1086
1087	MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1088	MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1089	MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1090
1091	return 0;
1092}
1093
1094static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1095				    const struct mlxsw_config_profile *profile,
1096				    struct mlxsw_res *res)
1097{
1098	int i;
1099	int err;
1100
1101	mlxsw_cmd_mbox_zero(mbox);
1102
1103	if (profile->used_max_vepa_channels) {
1104		mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1105			mbox, 1);
1106		mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1107			mbox, profile->max_vepa_channels);
1108	}
1109	if (profile->used_max_mid) {
1110		mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1111			mbox, 1);
1112		mlxsw_cmd_mbox_config_profile_max_mid_set(
1113			mbox, profile->max_mid);
1114	}
1115	if (profile->used_max_pgt) {
1116		mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1117			mbox, 1);
1118		mlxsw_cmd_mbox_config_profile_max_pgt_set(
1119			mbox, profile->max_pgt);
1120	}
1121	if (profile->used_max_system_port) {
1122		mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1123			mbox, 1);
1124		mlxsw_cmd_mbox_config_profile_max_system_port_set(
1125			mbox, profile->max_system_port);
1126	}
1127	if (profile->used_max_vlan_groups) {
1128		mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1129			mbox, 1);
1130		mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1131			mbox, profile->max_vlan_groups);
1132	}
1133	if (profile->used_max_regions) {
1134		mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1135			mbox, 1);
1136		mlxsw_cmd_mbox_config_profile_max_regions_set(
1137			mbox, profile->max_regions);
1138	}
1139	if (profile->used_flood_tables) {
1140		mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1141			mbox, 1);
1142		mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1143			mbox, profile->max_flood_tables);
1144		mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1145			mbox, profile->max_vid_flood_tables);
1146		mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1147			mbox, profile->max_fid_offset_flood_tables);
1148		mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1149			mbox, profile->fid_offset_flood_table_size);
1150		mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1151			mbox, profile->max_fid_flood_tables);
1152		mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1153			mbox, profile->fid_flood_table_size);
1154	}
1155	if (profile->used_flood_mode) {
1156		mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1157			mbox, 1);
1158		mlxsw_cmd_mbox_config_profile_flood_mode_set(
1159			mbox, profile->flood_mode);
1160	}
1161	if (profile->used_max_ib_mc) {
1162		mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1163			mbox, 1);
1164		mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1165			mbox, profile->max_ib_mc);
1166	}
1167	if (profile->used_max_pkey) {
1168		mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1169			mbox, 1);
1170		mlxsw_cmd_mbox_config_profile_max_pkey_set(
1171			mbox, profile->max_pkey);
1172	}
1173	if (profile->used_ar_sec) {
1174		mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1175			mbox, 1);
1176		mlxsw_cmd_mbox_config_profile_ar_sec_set(
1177			mbox, profile->ar_sec);
1178	}
1179	if (profile->used_adaptive_routing_group_cap) {
1180		mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1181			mbox, 1);
1182		mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1183			mbox, profile->adaptive_routing_group_cap);
1184	}
1185	if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
1186		err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
1187		if (err)
1188			return err;
1189
1190		mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1191		mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1192					MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1193		mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1194									   1);
1195		mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1196					MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1197		mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1198								mbox, 1);
1199		mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1200					MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1201	}
1202
1203	for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1204		mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1205						     &profile->swid_config[i]);
1206
1207	if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1208		mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1209		mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1210	}
1211
1212	return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1213}
1214
1215static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1216{
1217	struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1218	int err;
1219
1220	mlxsw_cmd_mbox_zero(mbox);
1221	err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1222	if (err)
1223		return err;
1224	mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1225	mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1226	return 0;
1227}
1228
1229static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1230				  u16 num_pages)
1231{
1232	struct mlxsw_pci_mem_item *mem_item;
1233	int nent = 0;
1234	int i;
1235	int err;
1236
1237	mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1238					   GFP_KERNEL);
1239	if (!mlxsw_pci->fw_area.items)
1240		return -ENOMEM;
1241	mlxsw_pci->fw_area.count = num_pages;
1242
1243	mlxsw_cmd_mbox_zero(mbox);
1244	for (i = 0; i < num_pages; i++) {
1245		mem_item = &mlxsw_pci->fw_area.items[i];
1246
1247		mem_item->size = MLXSW_PCI_PAGE_SIZE;
1248		mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
1249						     mem_item->size,
1250						     &mem_item->mapaddr);
1251		if (!mem_item->buf) {
1252			err = -ENOMEM;
1253			goto err_alloc;
1254		}
1255		mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1256		mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1257		if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1258			err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1259			if (err)
1260				goto err_cmd_map_fa;
1261			nent = 0;
1262			mlxsw_cmd_mbox_zero(mbox);
1263		}
1264	}
1265
1266	if (nent) {
1267		err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1268		if (err)
1269			goto err_cmd_map_fa;
1270	}
1271
1272	return 0;
1273
1274err_cmd_map_fa:
1275err_alloc:
1276	for (i--; i >= 0; i--) {
1277		mem_item = &mlxsw_pci->fw_area.items[i];
1278
1279		pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1280				    mem_item->buf, mem_item->mapaddr);
1281	}
1282	kfree(mlxsw_pci->fw_area.items);
1283	return err;
1284}
1285
1286static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1287{
1288	struct mlxsw_pci_mem_item *mem_item;
1289	int i;
1290
1291	mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1292
1293	for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1294		mem_item = &mlxsw_pci->fw_area.items[i];
1295
1296		pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1297				    mem_item->buf, mem_item->mapaddr);
1298	}
1299	kfree(mlxsw_pci->fw_area.items);
1300}
1301
1302static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1303{
1304	struct mlxsw_pci *mlxsw_pci = dev_id;
1305	struct mlxsw_pci_queue *q;
1306	int i;
1307
1308	for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1309		q = mlxsw_pci_eq_get(mlxsw_pci, i);
1310		mlxsw_pci_queue_tasklet_schedule(q);
1311	}
1312	return IRQ_HANDLED;
1313}
1314
1315static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1316				struct mlxsw_pci_mem_item *mbox)
1317{
1318	struct pci_dev *pdev = mlxsw_pci->pdev;
1319	int err = 0;
1320
1321	mbox->size = MLXSW_CMD_MBOX_SIZE;
1322	mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
1323					 &mbox->mapaddr);
1324	if (!mbox->buf) {
1325		dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1326		err = -ENOMEM;
1327	}
1328
1329	return err;
1330}
1331
1332static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1333				struct mlxsw_pci_mem_item *mbox)
1334{
1335	struct pci_dev *pdev = mlxsw_pci->pdev;
1336
1337	pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1338			    mbox->mapaddr);
1339}
1340
1341static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
1342				    const struct pci_device_id *id,
1343				    u32 *p_sys_status)
1344{
1345	unsigned long end;
1346	u32 val;
1347
1348	if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
1349		msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1350		return 0;
1351	}
1352
1353	/* We must wait for the HW to become responsive. */
1354	msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1355
1356	end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1357	do {
1358		val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1359		if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1360			return 0;
1361		cond_resched();
1362	} while (time_before(jiffies, end));
1363
1364	*p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
1365
1366	return -EBUSY;
1367}
1368
1369static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1370			      const struct pci_device_id *id)
1371{
1372	struct pci_dev *pdev = mlxsw_pci->pdev;
1373	char mrsr_pl[MLXSW_REG_MRSR_LEN];
1374	u32 sys_status;
1375	int err;
1376
1377	err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1378	if (err) {
1379		dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
1380			sys_status);
1381		return err;
1382	}
1383
1384	mlxsw_reg_mrsr_pack(mrsr_pl);
1385	err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1386	if (err)
1387		return err;
1388
1389	err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1390	if (err) {
1391		dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
1392			sys_status);
1393		return err;
1394	}
1395
1396	return 0;
1397}
1398
1399static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1400{
1401	int err;
1402
1403	err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1404	if (err < 0)
1405		dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1406	return err;
1407}
1408
1409static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1410{
1411	pci_free_irq_vectors(mlxsw_pci->pdev);
1412}
1413
1414static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1415			  const struct mlxsw_config_profile *profile,
1416			  struct mlxsw_res *res)
1417{
1418	struct mlxsw_pci *mlxsw_pci = bus_priv;
1419	struct pci_dev *pdev = mlxsw_pci->pdev;
1420	char *mbox;
1421	u16 num_pages;
1422	int err;
1423
1424	mlxsw_pci->core = mlxsw_core;
1425
1426	mbox = mlxsw_cmd_mbox_alloc();
1427	if (!mbox)
1428		return -ENOMEM;
1429
1430	err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
1431	if (err)
1432		goto err_sw_reset;
1433
1434	err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1435	if (err < 0) {
1436		dev_err(&pdev->dev, "MSI-X init failed\n");
1437		goto err_alloc_irq;
1438	}
1439
1440	err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1441	if (err)
1442		goto err_query_fw;
1443
1444	mlxsw_pci->bus_info.fw_rev.major =
1445		mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1446	mlxsw_pci->bus_info.fw_rev.minor =
1447		mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1448	mlxsw_pci->bus_info.fw_rev.subminor =
1449		mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1450
1451	if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1452		dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1453		err = -EINVAL;
1454		goto err_iface_rev;
1455	}
1456	if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1457		dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1458		err = -EINVAL;
1459		goto err_doorbell_page_bar;
1460	}
1461
1462	mlxsw_pci->doorbell_offset =
1463		mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1464
1465	if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
1466		dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
1467		err = -EINVAL;
1468		goto err_fr_rn_clk_bar;
1469	}
1470
1471	mlxsw_pci->free_running_clock_offset =
1472		mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
1473
1474	num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1475	err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1476	if (err)
1477		goto err_fw_area_init;
1478
1479	err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1480	if (err)
1481		goto err_boardinfo;
1482
1483	err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1484	if (err)
1485		goto err_query_resources;
1486
1487	if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1488	    MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1489		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1490	else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1491		 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1492		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1493	else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1494		  MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1495		 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1496		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1497	} else {
1498		dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1499		goto err_cqe_v_check;
1500	}
1501
1502	err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1503	if (err)
1504		goto err_config_profile;
1505
1506	err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1507	if (err)
1508		goto err_aqs_init;
1509
1510	err = request_irq(pci_irq_vector(pdev, 0),
1511			  mlxsw_pci_eq_irq_handler, 0,
1512			  mlxsw_pci->bus_info.device_kind, mlxsw_pci);
1513	if (err) {
1514		dev_err(&pdev->dev, "IRQ request failed\n");
1515		goto err_request_eq_irq;
1516	}
1517
1518	goto mbox_put;
1519
1520err_request_eq_irq:
1521	mlxsw_pci_aqs_fini(mlxsw_pci);
1522err_aqs_init:
1523err_config_profile:
1524err_cqe_v_check:
1525err_query_resources:
1526err_boardinfo:
1527	mlxsw_pci_fw_area_fini(mlxsw_pci);
1528err_fw_area_init:
1529err_fr_rn_clk_bar:
1530err_doorbell_page_bar:
1531err_iface_rev:
1532err_query_fw:
1533	mlxsw_pci_free_irq_vectors(mlxsw_pci);
1534err_alloc_irq:
1535err_sw_reset:
1536mbox_put:
1537	mlxsw_cmd_mbox_free(mbox);
1538	return err;
1539}
1540
1541static void mlxsw_pci_fini(void *bus_priv)
1542{
1543	struct mlxsw_pci *mlxsw_pci = bus_priv;
1544
1545	free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
1546	mlxsw_pci_aqs_fini(mlxsw_pci);
1547	mlxsw_pci_fw_area_fini(mlxsw_pci);
1548	mlxsw_pci_free_irq_vectors(mlxsw_pci);
1549}
1550
1551static struct mlxsw_pci_queue *
1552mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1553		   const struct mlxsw_tx_info *tx_info)
1554{
1555	u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
1556	u8 sdqn;
1557
1558	if (tx_info->is_emad) {
1559		sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
1560	} else {
1561		BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
1562		sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
1563	}
1564
1565	return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1566}
1567
1568static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1569					const struct mlxsw_tx_info *tx_info)
1570{
1571	struct mlxsw_pci *mlxsw_pci = bus_priv;
1572	struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1573
1574	return !mlxsw_pci_queue_elem_info_producer_get(q);
1575}
1576
1577static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1578				  const struct mlxsw_tx_info *tx_info)
1579{
1580	struct mlxsw_pci *mlxsw_pci = bus_priv;
1581	struct mlxsw_pci_queue *q;
1582	struct mlxsw_pci_queue_elem_info *elem_info;
1583	char *wqe;
1584	int i;
1585	int err;
1586
1587	if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1588		err = skb_linearize(skb);
1589		if (err)
1590			return err;
1591	}
1592
1593	q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1594	spin_lock_bh(&q->lock);
1595	elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1596	if (!elem_info) {
1597		/* queue is full */
1598		err = -EAGAIN;
1599		goto unlock;
1600	}
1601	mlxsw_skb_cb(skb)->tx_info = *tx_info;
1602	elem_info->u.sdq.skb = skb;
1603
1604	wqe = elem_info->elem;
1605	mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1606	mlxsw_pci_wqe_lp_set(wqe, 0);
1607	mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1608
1609	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1610				     skb_headlen(skb), DMA_TO_DEVICE);
1611	if (err)
1612		goto unlock;
1613
1614	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1615		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1616
1617		err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1618					     skb_frag_address(frag),
1619					     skb_frag_size(frag),
1620					     DMA_TO_DEVICE);
1621		if (err)
1622			goto unmap_frags;
1623	}
1624
1625	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1626		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1627
1628	/* Set unused sq entries byte count to zero. */
1629	for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1630		mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1631
1632	/* Everything is set up, ring producer doorbell to get HW going */
1633	q->producer_counter++;
1634	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1635
1636	goto unlock;
1637
1638unmap_frags:
1639	for (; i >= 0; i--)
1640		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1641unlock:
1642	spin_unlock_bh(&q->lock);
1643	return err;
1644}
1645
1646static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1647			      u32 in_mod, bool out_mbox_direct,
1648			      char *in_mbox, size_t in_mbox_size,
1649			      char *out_mbox, size_t out_mbox_size,
1650			      u8 *p_status)
1651{
1652	struct mlxsw_pci *mlxsw_pci = bus_priv;
1653	dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
1654	bool evreq = mlxsw_pci->cmd.nopoll;
1655	unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1656	bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1657	int err;
1658
1659	*p_status = MLXSW_CMD_STATUS_OK;
1660
1661	err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1662	if (err)
1663		return err;
1664
1665	if (in_mbox) {
1666		memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1667		in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1668	}
1669	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1670	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1671
1672	if (out_mbox)
1673		out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1674	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1675	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1676
1677	mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1678	mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1679
1680	*p_wait_done = false;
1681
1682	wmb(); /* all needs to be written before we write control register */
1683	mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1684			  MLXSW_PCI_CIR_CTRL_GO_BIT |
1685			  (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1686			  (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1687			  opcode);
1688
1689	if (!evreq) {
1690		unsigned long end;
1691
1692		end = jiffies + timeout;
1693		do {
1694			u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1695
1696			if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1697				*p_wait_done = true;
1698				*p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1699				break;
1700			}
1701			cond_resched();
1702		} while (time_before(jiffies, end));
1703	} else {
1704		wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1705		*p_status = mlxsw_pci->cmd.comp.status;
1706	}
1707
1708	err = 0;
1709	if (*p_wait_done) {
1710		if (*p_status)
1711			err = -EIO;
1712	} else {
1713		err = -ETIMEDOUT;
1714	}
1715
1716	if (!err && out_mbox && out_mbox_direct) {
1717		/* Some commands don't use output param as address to mailbox
1718		 * but they store output directly into registers. In that case,
1719		 * copy registers into mbox buffer.
1720		 */
1721		__be32 tmp;
1722
1723		if (!evreq) {
1724			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1725							   CIR_OUT_PARAM_HI));
1726			memcpy(out_mbox, &tmp, sizeof(tmp));
1727			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1728							   CIR_OUT_PARAM_LO));
1729			memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1730		}
1731	} else if (!err && out_mbox) {
1732		memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
1733	}
1734
1735	mutex_unlock(&mlxsw_pci->cmd.lock);
1736
1737	return err;
1738}
1739
1740static u32 mlxsw_pci_read_frc_h(void *bus_priv)
1741{
1742	struct mlxsw_pci *mlxsw_pci = bus_priv;
1743	u64 frc_offset;
1744
1745	frc_offset = mlxsw_pci->free_running_clock_offset;
1746	return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_H(frc_offset));
1747}
1748
1749static u32 mlxsw_pci_read_frc_l(void *bus_priv)
1750{
1751	struct mlxsw_pci *mlxsw_pci = bus_priv;
1752	u64 frc_offset;
1753
1754	frc_offset = mlxsw_pci->free_running_clock_offset;
1755	return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_L(frc_offset));
1756}
1757
1758static const struct mlxsw_bus mlxsw_pci_bus = {
1759	.kind			= "pci",
1760	.init			= mlxsw_pci_init,
1761	.fini			= mlxsw_pci_fini,
1762	.skb_transmit_busy	= mlxsw_pci_skb_transmit_busy,
1763	.skb_transmit		= mlxsw_pci_skb_transmit,
1764	.cmd_exec		= mlxsw_pci_cmd_exec,
1765	.read_frc_h		= mlxsw_pci_read_frc_h,
1766	.read_frc_l		= mlxsw_pci_read_frc_l,
1767	.features		= MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
1768};
1769
1770static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
1771{
1772	int err;
1773
1774	mutex_init(&mlxsw_pci->cmd.lock);
1775	init_waitqueue_head(&mlxsw_pci->cmd.wait);
1776
1777	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1778	if (err)
1779		goto err_in_mbox_alloc;
1780
1781	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1782	if (err)
1783		goto err_out_mbox_alloc;
1784
1785	return 0;
1786
1787err_out_mbox_alloc:
1788	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1789err_in_mbox_alloc:
1790	mutex_destroy(&mlxsw_pci->cmd.lock);
1791	return err;
1792}
1793
1794static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
1795{
1796	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1797	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1798	mutex_destroy(&mlxsw_pci->cmd.lock);
1799}
1800
1801static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1802{
1803	const char *driver_name = pdev->driver->name;
1804	struct mlxsw_pci *mlxsw_pci;
1805	int err;
1806
1807	mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1808	if (!mlxsw_pci)
1809		return -ENOMEM;
1810
1811	err = pci_enable_device(pdev);
1812	if (err) {
1813		dev_err(&pdev->dev, "pci_enable_device failed\n");
1814		goto err_pci_enable_device;
1815	}
1816
1817	err = pci_request_regions(pdev, driver_name);
1818	if (err) {
1819		dev_err(&pdev->dev, "pci_request_regions failed\n");
1820		goto err_pci_request_regions;
1821	}
1822
1823	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1824	if (!err) {
1825		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1826		if (err) {
1827			dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
1828			goto err_pci_set_dma_mask;
1829		}
1830	} else {
1831		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1832		if (err) {
1833			dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
1834			goto err_pci_set_dma_mask;
1835		}
1836	}
1837
1838	if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1839		dev_err(&pdev->dev, "invalid PCI region size\n");
1840		err = -EINVAL;
1841		goto err_pci_resource_len_check;
1842	}
1843
1844	mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1845				     pci_resource_len(pdev, 0));
1846	if (!mlxsw_pci->hw_addr) {
1847		dev_err(&pdev->dev, "ioremap failed\n");
1848		err = -EIO;
1849		goto err_ioremap;
1850	}
1851	pci_set_master(pdev);
1852
1853	mlxsw_pci->pdev = pdev;
1854	pci_set_drvdata(pdev, mlxsw_pci);
1855
1856	err = mlxsw_pci_cmd_init(mlxsw_pci);
1857	if (err)
1858		goto err_pci_cmd_init;
1859
1860	mlxsw_pci->bus_info.device_kind = driver_name;
1861	mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1862	mlxsw_pci->bus_info.dev = &pdev->dev;
1863	mlxsw_pci->bus_info.read_frc_capable = true;
1864	mlxsw_pci->id = id;
1865
1866	err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1867					     &mlxsw_pci_bus, mlxsw_pci, false,
1868					     NULL, NULL);
1869	if (err) {
1870		dev_err(&pdev->dev, "cannot register bus device\n");
1871		goto err_bus_device_register;
1872	}
1873
1874	return 0;
1875
1876err_bus_device_register:
1877	mlxsw_pci_cmd_fini(mlxsw_pci);
1878err_pci_cmd_init:
1879	iounmap(mlxsw_pci->hw_addr);
1880err_ioremap:
1881err_pci_resource_len_check:
1882err_pci_set_dma_mask:
1883	pci_release_regions(pdev);
1884err_pci_request_regions:
1885	pci_disable_device(pdev);
1886err_pci_enable_device:
1887	kfree(mlxsw_pci);
1888	return err;
1889}
1890
1891static void mlxsw_pci_remove(struct pci_dev *pdev)
1892{
1893	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1894
1895	mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
1896	mlxsw_pci_cmd_fini(mlxsw_pci);
1897	iounmap(mlxsw_pci->hw_addr);
1898	pci_release_regions(mlxsw_pci->pdev);
1899	pci_disable_device(mlxsw_pci->pdev);
1900	kfree(mlxsw_pci);
1901}
1902
1903int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
1904{
1905	pci_driver->probe = mlxsw_pci_probe;
1906	pci_driver->remove = mlxsw_pci_remove;
1907	pci_driver->shutdown = mlxsw_pci_remove;
1908	return pci_register_driver(pci_driver);
1909}
1910EXPORT_SYMBOL(mlxsw_pci_driver_register);
1911
1912void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
1913{
1914	pci_unregister_driver(pci_driver);
1915}
1916EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
1917
1918static int __init mlxsw_pci_module_init(void)
1919{
1920	return 0;
1921}
1922
1923static void __exit mlxsw_pci_module_exit(void)
1924{
1925}
1926
1927module_init(mlxsw_pci_module_init);
1928module_exit(mlxsw_pci_module_exit);
1929
1930MODULE_LICENSE("Dual BSD/GPL");
1931MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1932MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
1933