1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4#include <linux/ethtool.h>
5#include <linux/printk.h>
6#include <linux/dynamic_debug.h>
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/if_vlan.h>
10#include <linux/rtnetlink.h>
11#include <linux/interrupt.h>
12#include <linux/pci.h>
13#include <linux/cpumask.h>
14#include <linux/crash_dump.h>
15#include <linux/vmalloc.h>
16
17#include "ionic.h"
18#include "ionic_bus.h"
19#include "ionic_dev.h"
20#include "ionic_lif.h"
21#include "ionic_txrx.h"
22#include "ionic_ethtool.h"
23#include "ionic_debugfs.h"
24
25/* queuetype support level */
26static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
27	[IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
28	[IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
29	[IONIC_QTYPE_RXQ]     = 2,   /* 0 = Base version with CQ+SG support
30				      * 2 =       ... with CMB rings
31				      */
32	[IONIC_QTYPE_TXQ]     = 3,   /* 0 = Base version with CQ+SG support
33				      * 1 =       ... with Tx SG version 1
34				      * 3 =       ... with CMB rings
35				      */
36};
37
38static void ionic_link_status_check(struct ionic_lif *lif);
39static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
40static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
41static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
42
43static void ionic_txrx_deinit(struct ionic_lif *lif);
44static int ionic_txrx_init(struct ionic_lif *lif);
45static int ionic_start_queues(struct ionic_lif *lif);
46static void ionic_stop_queues(struct ionic_lif *lif);
47static void ionic_lif_queue_identify(struct ionic_lif *lif);
48
49static void ionic_dim_work(struct work_struct *work)
50{
51	struct dim *dim = container_of(work, struct dim, work);
52	struct ionic_intr_info *intr;
53	struct dim_cq_moder cur_moder;
54	struct ionic_qcq *qcq;
55	struct ionic_lif *lif;
56	u32 new_coal;
57
58	cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
59	qcq = container_of(dim, struct ionic_qcq, dim);
60	lif = qcq->q.lif;
61	new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
62	new_coal = new_coal ? new_coal : 1;
63
64	intr = &qcq->intr;
65	if (intr->dim_coal_hw != new_coal) {
66		intr->dim_coal_hw = new_coal;
67
68		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
69				     intr->index, intr->dim_coal_hw);
70	}
71
72	dim->state = DIM_START_MEASURE;
73}
74
75static void ionic_lif_deferred_work(struct work_struct *work)
76{
77	struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
78	struct ionic_deferred *def = &lif->deferred;
79	struct ionic_deferred_work *w = NULL;
80
81	do {
82		spin_lock_bh(&def->lock);
83		if (!list_empty(&def->list)) {
84			w = list_first_entry(&def->list,
85					     struct ionic_deferred_work, list);
86			list_del(&w->list);
87		}
88		spin_unlock_bh(&def->lock);
89
90		if (!w)
91			break;
92
93		switch (w->type) {
94		case IONIC_DW_TYPE_RX_MODE:
95			ionic_lif_rx_mode(lif);
96			break;
97		case IONIC_DW_TYPE_LINK_STATUS:
98			ionic_link_status_check(lif);
99			break;
100		case IONIC_DW_TYPE_LIF_RESET:
101			if (w->fw_status) {
102				ionic_lif_handle_fw_up(lif);
103			} else {
104				ionic_lif_handle_fw_down(lif);
105
106				/* Fire off another watchdog to see
107				 * if the FW is already back rather than
108				 * waiting another whole cycle
109				 */
110				mod_timer(&lif->ionic->watchdog_timer, jiffies + 1);
111			}
112			break;
113		default:
114			break;
115		}
116		kfree(w);
117		w = NULL;
118	} while (true);
119}
120
121void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
122				struct ionic_deferred_work *work)
123{
124	spin_lock_bh(&def->lock);
125	list_add_tail(&work->list, &def->list);
126	spin_unlock_bh(&def->lock);
127	schedule_work(&def->work);
128}
129
130static void ionic_link_status_check(struct ionic_lif *lif)
131{
132	struct net_device *netdev = lif->netdev;
133	u16 link_status;
134	bool link_up;
135
136	if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
137		return;
138
139	/* Don't put carrier back up if we're in a broken state */
140	if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) {
141		clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
142		return;
143	}
144
145	link_status = le16_to_cpu(lif->info->status.link_status);
146	link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
147
148	if (link_up) {
149		int err = 0;
150
151		if (netdev->flags & IFF_UP && netif_running(netdev)) {
152			mutex_lock(&lif->queue_lock);
153			err = ionic_start_queues(lif);
154			if (err && err != -EBUSY) {
155				netdev_err(netdev,
156					   "Failed to start queues: %d\n", err);
157				set_bit(IONIC_LIF_F_BROKEN, lif->state);
158				netif_carrier_off(lif->netdev);
159			}
160			mutex_unlock(&lif->queue_lock);
161		}
162
163		if (!err && !netif_carrier_ok(netdev)) {
164			ionic_port_identify(lif->ionic);
165			netdev_info(netdev, "Link up - %d Gbps\n",
166				    le32_to_cpu(lif->info->status.link_speed) / 1000);
167			netif_carrier_on(netdev);
168		}
169	} else {
170		if (netif_carrier_ok(netdev)) {
171			lif->link_down_count++;
172			netdev_info(netdev, "Link down\n");
173			netif_carrier_off(netdev);
174		}
175
176		if (netdev->flags & IFF_UP && netif_running(netdev)) {
177			mutex_lock(&lif->queue_lock);
178			ionic_stop_queues(lif);
179			mutex_unlock(&lif->queue_lock);
180		}
181	}
182
183	clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
184}
185
186void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
187{
188	struct ionic_deferred_work *work;
189
190	/* we only need one request outstanding at a time */
191	if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
192		return;
193
194	if (!can_sleep) {
195		work = kzalloc(sizeof(*work), GFP_ATOMIC);
196		if (!work) {
197			clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
198			return;
199		}
200
201		work->type = IONIC_DW_TYPE_LINK_STATUS;
202		ionic_lif_deferred_enqueue(&lif->deferred, work);
203	} else {
204		ionic_link_status_check(lif);
205	}
206}
207
208static void ionic_napi_deadline(struct timer_list *timer)
209{
210	struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
211
212	napi_schedule(&qcq->napi);
213}
214
215static irqreturn_t ionic_isr(int irq, void *data)
216{
217	struct napi_struct *napi = data;
218
219	napi_schedule_irqoff(napi);
220
221	return IRQ_HANDLED;
222}
223
224static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
225{
226	struct ionic_intr_info *intr = &qcq->intr;
227	struct device *dev = lif->ionic->dev;
228	struct ionic_queue *q = &qcq->q;
229	const char *name;
230
231	if (lif->registered)
232		name = lif->netdev->name;
233	else
234		name = dev_name(dev);
235
236	snprintf(intr->name, sizeof(intr->name),
237		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
238
239	return devm_request_irq(dev, intr->vector, ionic_isr,
240				0, intr->name, &qcq->napi);
241}
242
243static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
244{
245	struct ionic *ionic = lif->ionic;
246	int index;
247
248	index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
249	if (index == ionic->nintrs) {
250		netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
251			    __func__, index, ionic->nintrs);
252		return -ENOSPC;
253	}
254
255	set_bit(index, ionic->intrs);
256	ionic_intr_init(&ionic->idev, intr, index);
257
258	return 0;
259}
260
261static void ionic_intr_free(struct ionic *ionic, int index)
262{
263	if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
264		clear_bit(index, ionic->intrs);
265}
266
267static int ionic_qcq_enable(struct ionic_qcq *qcq)
268{
269	struct ionic_queue *q = &qcq->q;
270	struct ionic_lif *lif = q->lif;
271	struct ionic_dev *idev;
272	struct device *dev;
273
274	struct ionic_admin_ctx ctx = {
275		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
276		.cmd.q_control = {
277			.opcode = IONIC_CMD_Q_CONTROL,
278			.lif_index = cpu_to_le16(lif->index),
279			.type = q->type,
280			.index = cpu_to_le32(q->index),
281			.oper = IONIC_Q_ENABLE,
282		},
283	};
284	int ret;
285
286	idev = &lif->ionic->idev;
287	dev = lif->ionic->dev;
288
289	dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
290		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
291
292	if (qcq->flags & IONIC_QCQ_F_INTR)
293		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
294
295	ret = ionic_adminq_post_wait(lif, &ctx);
296	if (ret)
297		return ret;
298
299	if (qcq->napi.poll)
300		napi_enable(&qcq->napi);
301
302	if (qcq->flags & IONIC_QCQ_F_INTR) {
303		irq_set_affinity_hint(qcq->intr.vector,
304				      &qcq->intr.affinity_mask);
305		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
306				IONIC_INTR_MASK_CLEAR);
307	}
308
309	return 0;
310}
311
312static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
313{
314	struct ionic_queue *q;
315
316	struct ionic_admin_ctx ctx = {
317		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
318		.cmd.q_control = {
319			.opcode = IONIC_CMD_Q_CONTROL,
320			.oper = IONIC_Q_DISABLE,
321		},
322	};
323
324	if (!qcq) {
325		netdev_err(lif->netdev, "%s: bad qcq\n", __func__);
326		return -ENXIO;
327	}
328
329	q = &qcq->q;
330
331	if (qcq->flags & IONIC_QCQ_F_INTR) {
332		struct ionic_dev *idev = &lif->ionic->idev;
333
334		cancel_work_sync(&qcq->dim.work);
335		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
336				IONIC_INTR_MASK_SET);
337		synchronize_irq(qcq->intr.vector);
338		irq_set_affinity_hint(qcq->intr.vector, NULL);
339		napi_disable(&qcq->napi);
340		del_timer_sync(&qcq->napi_deadline);
341	}
342
343	/* If there was a previous fw communcation error, don't bother with
344	 * sending the adminq command and just return the same error value.
345	 */
346	if (fw_err == -ETIMEDOUT || fw_err == -ENXIO)
347		return fw_err;
348
349	ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
350	ctx.cmd.q_control.type = q->type;
351	ctx.cmd.q_control.index = cpu_to_le32(q->index);
352	dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
353		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
354
355	return ionic_adminq_post_wait(lif, &ctx);
356}
357
358static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
359{
360	struct ionic_dev *idev = &lif->ionic->idev;
361
362	if (!qcq)
363		return;
364
365	if (!(qcq->flags & IONIC_QCQ_F_INITED))
366		return;
367
368	if (qcq->flags & IONIC_QCQ_F_INTR) {
369		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
370				IONIC_INTR_MASK_SET);
371		netif_napi_del(&qcq->napi);
372	}
373
374	qcq->flags &= ~IONIC_QCQ_F_INITED;
375}
376
377static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
378{
379	if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
380		return;
381
382	irq_set_affinity_hint(qcq->intr.vector, NULL);
383	devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
384	qcq->intr.vector = 0;
385	ionic_intr_free(lif->ionic, qcq->intr.index);
386	qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
387}
388
389static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
390{
391	struct device *dev = lif->ionic->dev;
392
393	if (!qcq)
394		return;
395
396	ionic_debugfs_del_qcq(qcq);
397
398	if (qcq->q_base) {
399		dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
400		qcq->q_base = NULL;
401		qcq->q_base_pa = 0;
402	}
403
404	if (qcq->cmb_q_base) {
405		iounmap(qcq->cmb_q_base);
406		ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order);
407		qcq->cmb_pgid = 0;
408		qcq->cmb_order = 0;
409		qcq->cmb_q_base = NULL;
410		qcq->cmb_q_base_pa = 0;
411	}
412
413	if (qcq->cq_base) {
414		dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
415		qcq->cq_base = NULL;
416		qcq->cq_base_pa = 0;
417	}
418
419	if (qcq->sg_base) {
420		dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
421		qcq->sg_base = NULL;
422		qcq->sg_base_pa = 0;
423	}
424
425	ionic_qcq_intr_free(lif, qcq);
426
427	if (qcq->cq.info) {
428		vfree(qcq->cq.info);
429		qcq->cq.info = NULL;
430	}
431	if (qcq->q.info) {
432		vfree(qcq->q.info);
433		qcq->q.info = NULL;
434	}
435}
436
437void ionic_qcqs_free(struct ionic_lif *lif)
438{
439	struct device *dev = lif->ionic->dev;
440	struct ionic_qcq *adminqcq;
441	unsigned long irqflags;
442
443	if (lif->notifyqcq) {
444		ionic_qcq_free(lif, lif->notifyqcq);
445		devm_kfree(dev, lif->notifyqcq);
446		lif->notifyqcq = NULL;
447	}
448
449	if (lif->adminqcq) {
450		spin_lock_irqsave(&lif->adminq_lock, irqflags);
451		adminqcq = READ_ONCE(lif->adminqcq);
452		lif->adminqcq = NULL;
453		spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
454		if (adminqcq) {
455			ionic_qcq_free(lif, adminqcq);
456			devm_kfree(dev, adminqcq);
457		}
458	}
459
460	if (lif->rxqcqs) {
461		devm_kfree(dev, lif->rxqstats);
462		lif->rxqstats = NULL;
463		devm_kfree(dev, lif->rxqcqs);
464		lif->rxqcqs = NULL;
465	}
466
467	if (lif->txqcqs) {
468		devm_kfree(dev, lif->txqstats);
469		lif->txqstats = NULL;
470		devm_kfree(dev, lif->txqcqs);
471		lif->txqcqs = NULL;
472	}
473}
474
475static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
476				      struct ionic_qcq *n_qcq)
477{
478	n_qcq->intr.vector = src_qcq->intr.vector;
479	n_qcq->intr.index = src_qcq->intr.index;
480	n_qcq->napi_qcq = src_qcq->napi_qcq;
481}
482
483static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
484{
485	int err;
486
487	if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
488		qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
489		return 0;
490	}
491
492	err = ionic_intr_alloc(lif, &qcq->intr);
493	if (err) {
494		netdev_warn(lif->netdev, "no intr for %s: %d\n",
495			    qcq->q.name, err);
496		goto err_out;
497	}
498
499	err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
500	if (err < 0) {
501		netdev_warn(lif->netdev, "no vector for %s: %d\n",
502			    qcq->q.name, err);
503		goto err_out_free_intr;
504	}
505	qcq->intr.vector = err;
506	ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
507			       IONIC_INTR_MASK_SET);
508
509	err = ionic_request_irq(lif, qcq);
510	if (err) {
511		netdev_warn(lif->netdev, "irq request failed %d\n", err);
512		goto err_out_free_intr;
513	}
514
515	/* try to get the irq on the local numa node first */
516	qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
517					     dev_to_node(lif->ionic->dev));
518	if (qcq->intr.cpu != -1)
519		cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
520
521	netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
522	return 0;
523
524err_out_free_intr:
525	ionic_intr_free(lif->ionic, qcq->intr.index);
526err_out:
527	return err;
528}
529
530static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
531			   unsigned int index,
532			   const char *name, unsigned int flags,
533			   unsigned int num_descs, unsigned int desc_size,
534			   unsigned int cq_desc_size,
535			   unsigned int sg_desc_size,
536			   unsigned int pid, struct ionic_qcq **qcq)
537{
538	struct ionic_dev *idev = &lif->ionic->idev;
539	struct device *dev = lif->ionic->dev;
540	void *q_base, *cq_base, *sg_base;
541	dma_addr_t cq_base_pa = 0;
542	dma_addr_t sg_base_pa = 0;
543	dma_addr_t q_base_pa = 0;
544	struct ionic_qcq *new;
545	int err;
546
547	*qcq = NULL;
548
549	new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
550	if (!new) {
551		netdev_err(lif->netdev, "Cannot allocate queue structure\n");
552		err = -ENOMEM;
553		goto err_out;
554	}
555
556	new->q.dev = dev;
557	new->flags = flags;
558
559	new->q.info = vcalloc(num_descs, sizeof(*new->q.info));
560	if (!new->q.info) {
561		netdev_err(lif->netdev, "Cannot allocate queue info\n");
562		err = -ENOMEM;
563		goto err_out_free_qcq;
564	}
565
566	new->q.type = type;
567	new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
568
569	err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
570			   desc_size, sg_desc_size, pid);
571	if (err) {
572		netdev_err(lif->netdev, "Cannot initialize queue\n");
573		goto err_out_free_q_info;
574	}
575
576	err = ionic_alloc_qcq_interrupt(lif, new);
577	if (err)
578		goto err_out;
579
580	new->cq.info = vcalloc(num_descs, sizeof(*new->cq.info));
581	if (!new->cq.info) {
582		netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
583		err = -ENOMEM;
584		goto err_out_free_irq;
585	}
586
587	err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
588	if (err) {
589		netdev_err(lif->netdev, "Cannot initialize completion queue\n");
590		goto err_out_free_cq_info;
591	}
592
593	if (flags & IONIC_QCQ_F_NOTIFYQ) {
594		int q_size;
595
596		/* q & cq need to be contiguous in NotifyQ, so alloc it all in q
597		 * and don't alloc qc.  We leave new->qc_size and new->qc_base
598		 * as 0 to be sure we don't try to free it later.
599		 */
600		q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
601		new->q_size = PAGE_SIZE + q_size +
602			      ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
603		new->q_base = dma_alloc_coherent(dev, new->q_size,
604						 &new->q_base_pa, GFP_KERNEL);
605		if (!new->q_base) {
606			netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
607			err = -ENOMEM;
608			goto err_out_free_cq_info;
609		}
610		q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
611		q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
612		ionic_q_map(&new->q, q_base, q_base_pa);
613
614		cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
615		cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
616		ionic_cq_map(&new->cq, cq_base, cq_base_pa);
617		ionic_cq_bind(&new->cq, &new->q);
618	} else {
619		/* regular DMA q descriptors */
620		new->q_size = PAGE_SIZE + (num_descs * desc_size);
621		new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
622						 GFP_KERNEL);
623		if (!new->q_base) {
624			netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
625			err = -ENOMEM;
626			goto err_out_free_cq_info;
627		}
628		q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
629		q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
630		ionic_q_map(&new->q, q_base, q_base_pa);
631
632		if (flags & IONIC_QCQ_F_CMB_RINGS) {
633			/* on-chip CMB q descriptors */
634			new->cmb_q_size = num_descs * desc_size;
635			new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
636
637			err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
638					    new->cmb_order);
639			if (err) {
640				netdev_err(lif->netdev,
641					   "Cannot allocate queue order %d from cmb: err %d\n",
642					   new->cmb_order, err);
643				goto err_out_free_q;
644			}
645
646			new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size);
647			if (!new->cmb_q_base) {
648				netdev_err(lif->netdev, "Cannot map queue from cmb\n");
649				ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
650				err = -ENOMEM;
651				goto err_out_free_q;
652			}
653
654			new->cmb_q_base_pa -= idev->phy_cmb_pages;
655			ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa);
656		}
657
658		/* cq DMA descriptors */
659		new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
660		new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
661						  GFP_KERNEL);
662		if (!new->cq_base) {
663			netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
664			err = -ENOMEM;
665			goto err_out_free_q;
666		}
667		cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
668		cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
669		ionic_cq_map(&new->cq, cq_base, cq_base_pa);
670		ionic_cq_bind(&new->cq, &new->q);
671	}
672
673	if (flags & IONIC_QCQ_F_SG) {
674		new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
675		new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
676						  GFP_KERNEL);
677		if (!new->sg_base) {
678			netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
679			err = -ENOMEM;
680			goto err_out_free_cq;
681		}
682		sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
683		sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
684		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
685	}
686
687	INIT_WORK(&new->dim.work, ionic_dim_work);
688	new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
689
690	*qcq = new;
691
692	return 0;
693
694err_out_free_cq:
695	dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
696err_out_free_q:
697	if (new->cmb_q_base) {
698		iounmap(new->cmb_q_base);
699		ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
700	}
701	dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
702err_out_free_cq_info:
703	vfree(new->cq.info);
704err_out_free_irq:
705	if (flags & IONIC_QCQ_F_INTR) {
706		devm_free_irq(dev, new->intr.vector, &new->napi);
707		ionic_intr_free(lif->ionic, new->intr.index);
708	}
709err_out_free_q_info:
710	vfree(new->q.info);
711err_out_free_qcq:
712	devm_kfree(dev, new);
713err_out:
714	dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
715	return err;
716}
717
718static int ionic_qcqs_alloc(struct ionic_lif *lif)
719{
720	struct device *dev = lif->ionic->dev;
721	unsigned int flags;
722	int err;
723
724	flags = IONIC_QCQ_F_INTR;
725	err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
726			      IONIC_ADMINQ_LENGTH,
727			      sizeof(struct ionic_admin_cmd),
728			      sizeof(struct ionic_admin_comp),
729			      0, lif->kern_pid, &lif->adminqcq);
730	if (err)
731		return err;
732	ionic_debugfs_add_qcq(lif, lif->adminqcq);
733
734	if (lif->ionic->nnqs_per_lif) {
735		flags = IONIC_QCQ_F_NOTIFYQ;
736		err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
737				      flags, IONIC_NOTIFYQ_LENGTH,
738				      sizeof(struct ionic_notifyq_cmd),
739				      sizeof(union ionic_notifyq_comp),
740				      0, lif->kern_pid, &lif->notifyqcq);
741		if (err)
742			goto err_out;
743		ionic_debugfs_add_qcq(lif, lif->notifyqcq);
744
745		/* Let the notifyq ride on the adminq interrupt */
746		ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
747	}
748
749	err = -ENOMEM;
750	lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
751				   sizeof(*lif->txqcqs), GFP_KERNEL);
752	if (!lif->txqcqs)
753		goto err_out;
754	lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
755				   sizeof(*lif->rxqcqs), GFP_KERNEL);
756	if (!lif->rxqcqs)
757		goto err_out;
758
759	lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1,
760				     sizeof(*lif->txqstats), GFP_KERNEL);
761	if (!lif->txqstats)
762		goto err_out;
763	lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1,
764				     sizeof(*lif->rxqstats), GFP_KERNEL);
765	if (!lif->rxqstats)
766		goto err_out;
767
768	return 0;
769
770err_out:
771	ionic_qcqs_free(lif);
772	return err;
773}
774
775static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
776{
777	qcq->q.tail_idx = 0;
778	qcq->q.head_idx = 0;
779	qcq->cq.tail_idx = 0;
780	qcq->cq.done_color = 1;
781	memset(qcq->q_base, 0, qcq->q_size);
782	if (qcq->cmb_q_base)
783		memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size);
784	memset(qcq->cq_base, 0, qcq->cq_size);
785	memset(qcq->sg_base, 0, qcq->sg_size);
786}
787
788static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
789{
790	struct device *dev = lif->ionic->dev;
791	struct ionic_queue *q = &qcq->q;
792	struct ionic_cq *cq = &qcq->cq;
793	struct ionic_admin_ctx ctx = {
794		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
795		.cmd.q_init = {
796			.opcode = IONIC_CMD_Q_INIT,
797			.lif_index = cpu_to_le16(lif->index),
798			.type = q->type,
799			.ver = lif->qtype_info[q->type].version,
800			.index = cpu_to_le32(q->index),
801			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
802					     IONIC_QINIT_F_SG),
803			.intr_index = cpu_to_le16(qcq->intr.index),
804			.pid = cpu_to_le16(q->pid),
805			.ring_size = ilog2(q->num_descs),
806			.ring_base = cpu_to_le64(q->base_pa),
807			.cq_ring_base = cpu_to_le64(cq->base_pa),
808			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
809			.features = cpu_to_le64(q->features),
810		},
811	};
812	int err;
813
814	if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
815		ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
816		ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
817	}
818
819	dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
820	dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
821	dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
822	dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
823	dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base);
824	dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base);
825	dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
826	dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
827	dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
828
829	ionic_qcq_sanitize(qcq);
830
831	err = ionic_adminq_post_wait(lif, &ctx);
832	if (err)
833		return err;
834
835	q->hw_type = ctx.comp.q_init.hw_type;
836	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
837	q->dbval = IONIC_DBELL_QID(q->hw_index);
838
839	dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
840	dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
841
842	q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
843	q->dbell_jiffies = jiffies;
844
845	if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
846		netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
847		qcq->napi_qcq = qcq;
848		timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
849	}
850
851	qcq->flags |= IONIC_QCQ_F_INITED;
852
853	return 0;
854}
855
856static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
857{
858	struct device *dev = lif->ionic->dev;
859	struct ionic_queue *q = &qcq->q;
860	struct ionic_cq *cq = &qcq->cq;
861	struct ionic_admin_ctx ctx = {
862		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
863		.cmd.q_init = {
864			.opcode = IONIC_CMD_Q_INIT,
865			.lif_index = cpu_to_le16(lif->index),
866			.type = q->type,
867			.ver = lif->qtype_info[q->type].version,
868			.index = cpu_to_le32(q->index),
869			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
870					     IONIC_QINIT_F_SG),
871			.intr_index = cpu_to_le16(cq->bound_intr->index),
872			.pid = cpu_to_le16(q->pid),
873			.ring_size = ilog2(q->num_descs),
874			.ring_base = cpu_to_le64(q->base_pa),
875			.cq_ring_base = cpu_to_le64(cq->base_pa),
876			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
877			.features = cpu_to_le64(q->features),
878		},
879	};
880	int err;
881
882	if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
883		ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
884		ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
885	}
886
887	dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
888	dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
889	dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
890	dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
891	dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
892	dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
893	dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
894
895	ionic_qcq_sanitize(qcq);
896
897	err = ionic_adminq_post_wait(lif, &ctx);
898	if (err)
899		return err;
900
901	q->hw_type = ctx.comp.q_init.hw_type;
902	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
903	q->dbval = IONIC_DBELL_QID(q->hw_index);
904
905	dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
906	dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
907
908	q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
909	q->dbell_jiffies = jiffies;
910
911	if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
912		netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
913	else
914		netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
915
916	qcq->napi_qcq = qcq;
917	timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
918
919	qcq->flags |= IONIC_QCQ_F_INITED;
920
921	return 0;
922}
923
924int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
925{
926	unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
927	unsigned int txq_i, flags;
928	struct ionic_qcq *txq;
929	u64 features;
930	int err;
931
932	if (lif->hwstamp_txq)
933		return 0;
934
935	features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP;
936
937	num_desc = IONIC_MIN_TXRX_DESC;
938	desc_sz = sizeof(struct ionic_txq_desc);
939	comp_sz = 2 * sizeof(struct ionic_txq_comp);
940
941	if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
942	    lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1))
943		sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
944	else
945		sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
946
947	txq_i = lif->ionic->ntxqs_per_lif;
948	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
949
950	err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
951			      num_desc, desc_sz, comp_sz, sg_desc_sz,
952			      lif->kern_pid, &txq);
953	if (err)
954		goto err_qcq_alloc;
955
956	txq->q.features = features;
957
958	ionic_link_qcq_interrupts(lif->adminqcq, txq);
959	ionic_debugfs_add_qcq(lif, txq);
960
961	lif->hwstamp_txq = txq;
962
963	if (netif_running(lif->netdev)) {
964		err = ionic_lif_txq_init(lif, txq);
965		if (err)
966			goto err_qcq_init;
967
968		if (test_bit(IONIC_LIF_F_UP, lif->state)) {
969			err = ionic_qcq_enable(txq);
970			if (err)
971				goto err_qcq_enable;
972		}
973	}
974
975	return 0;
976
977err_qcq_enable:
978	ionic_lif_qcq_deinit(lif, txq);
979err_qcq_init:
980	lif->hwstamp_txq = NULL;
981	ionic_debugfs_del_qcq(txq);
982	ionic_qcq_free(lif, txq);
983	devm_kfree(lif->ionic->dev, txq);
984err_qcq_alloc:
985	return err;
986}
987
988int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
989{
990	unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
991	unsigned int rxq_i, flags;
992	struct ionic_qcq *rxq;
993	u64 features;
994	int err;
995
996	if (lif->hwstamp_rxq)
997		return 0;
998
999	features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
1000
1001	num_desc = IONIC_MIN_TXRX_DESC;
1002	desc_sz = sizeof(struct ionic_rxq_desc);
1003	comp_sz = 2 * sizeof(struct ionic_rxq_comp);
1004	sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
1005
1006	rxq_i = lif->ionic->nrxqs_per_lif;
1007	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
1008
1009	err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
1010			      num_desc, desc_sz, comp_sz, sg_desc_sz,
1011			      lif->kern_pid, &rxq);
1012	if (err)
1013		goto err_qcq_alloc;
1014
1015	rxq->q.features = features;
1016
1017	ionic_link_qcq_interrupts(lif->adminqcq, rxq);
1018	ionic_debugfs_add_qcq(lif, rxq);
1019
1020	lif->hwstamp_rxq = rxq;
1021
1022	if (netif_running(lif->netdev)) {
1023		err = ionic_lif_rxq_init(lif, rxq);
1024		if (err)
1025			goto err_qcq_init;
1026
1027		if (test_bit(IONIC_LIF_F_UP, lif->state)) {
1028			ionic_rx_fill(&rxq->q);
1029			err = ionic_qcq_enable(rxq);
1030			if (err)
1031				goto err_qcq_enable;
1032		}
1033	}
1034
1035	return 0;
1036
1037err_qcq_enable:
1038	ionic_lif_qcq_deinit(lif, rxq);
1039err_qcq_init:
1040	lif->hwstamp_rxq = NULL;
1041	ionic_debugfs_del_qcq(rxq);
1042	ionic_qcq_free(lif, rxq);
1043	devm_kfree(lif->ionic->dev, rxq);
1044err_qcq_alloc:
1045	return err;
1046}
1047
1048int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all)
1049{
1050	struct ionic_queue_params qparam;
1051
1052	ionic_init_queue_params(lif, &qparam);
1053
1054	if (rx_all)
1055		qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
1056	else
1057		qparam.rxq_features = 0;
1058
1059	/* if we're not running, just set the values and return */
1060	if (!netif_running(lif->netdev)) {
1061		lif->rxq_features = qparam.rxq_features;
1062		return 0;
1063	}
1064
1065	return ionic_reconfigure_queues(lif, &qparam);
1066}
1067
1068int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode)
1069{
1070	struct ionic_admin_ctx ctx = {
1071		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1072		.cmd.lif_setattr = {
1073			.opcode = IONIC_CMD_LIF_SETATTR,
1074			.index = cpu_to_le16(lif->index),
1075			.attr = IONIC_LIF_ATTR_TXSTAMP,
1076			.txstamp_mode = cpu_to_le16(txstamp_mode),
1077		},
1078	};
1079
1080	return ionic_adminq_post_wait(lif, &ctx);
1081}
1082
1083static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif)
1084{
1085	struct ionic_admin_ctx ctx = {
1086		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1087		.cmd.rx_filter_del = {
1088			.opcode = IONIC_CMD_RX_FILTER_DEL,
1089			.lif_index = cpu_to_le16(lif->index),
1090		},
1091	};
1092	struct ionic_rx_filter *f;
1093	u32 filter_id;
1094	int err;
1095
1096	spin_lock_bh(&lif->rx_filters.lock);
1097
1098	f = ionic_rx_filter_rxsteer(lif);
1099	if (!f) {
1100		spin_unlock_bh(&lif->rx_filters.lock);
1101		return;
1102	}
1103
1104	filter_id = f->filter_id;
1105	ionic_rx_filter_free(lif, f);
1106
1107	spin_unlock_bh(&lif->rx_filters.lock);
1108
1109	netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id);
1110
1111	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id);
1112
1113	err = ionic_adminq_post_wait(lif, &ctx);
1114	if (err && err != -EEXIST)
1115		netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id);
1116}
1117
1118static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1119{
1120	struct ionic_admin_ctx ctx = {
1121		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1122		.cmd.rx_filter_add = {
1123			.opcode = IONIC_CMD_RX_FILTER_ADD,
1124			.lif_index = cpu_to_le16(lif->index),
1125			.match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS),
1126			.pkt_class = cpu_to_le64(pkt_class),
1127		},
1128	};
1129	u8 qtype;
1130	u32 qid;
1131	int err;
1132
1133	if (!lif->hwstamp_rxq)
1134		return -EINVAL;
1135
1136	qtype = lif->hwstamp_rxq->q.type;
1137	ctx.cmd.rx_filter_add.qtype = qtype;
1138
1139	qid = lif->hwstamp_rxq->q.index;
1140	ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid);
1141
1142	netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n");
1143	err = ionic_adminq_post_wait(lif, &ctx);
1144	if (err && err != -EEXIST)
1145		return err;
1146
1147	spin_lock_bh(&lif->rx_filters.lock);
1148	err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED);
1149	spin_unlock_bh(&lif->rx_filters.lock);
1150
1151	return err;
1152}
1153
1154int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1155{
1156	ionic_lif_del_hwstamp_rxfilt(lif);
1157
1158	if (!pkt_class)
1159		return 0;
1160
1161	return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
1162}
1163
1164static bool ionic_notifyq_service(struct ionic_cq *cq,
1165				  struct ionic_cq_info *cq_info)
1166{
1167	union ionic_notifyq_comp *comp = cq_info->cq_desc;
1168	struct ionic_deferred_work *work;
1169	struct net_device *netdev;
1170	struct ionic_queue *q;
1171	struct ionic_lif *lif;
1172	u64 eid;
1173
1174	q = cq->bound_q;
1175	lif = q->info[0].cb_arg;
1176	netdev = lif->netdev;
1177	eid = le64_to_cpu(comp->event.eid);
1178
1179	/* Have we run out of new completions to process? */
1180	if ((s64)(eid - lif->last_eid) <= 0)
1181		return false;
1182
1183	lif->last_eid = eid;
1184
1185	dev_dbg(lif->ionic->dev, "notifyq event:\n");
1186	dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
1187			 comp, sizeof(*comp), true);
1188
1189	switch (le16_to_cpu(comp->event.ecode)) {
1190	case IONIC_EVENT_LINK_CHANGE:
1191		ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1192		break;
1193	case IONIC_EVENT_RESET:
1194		if (lif->ionic->idev.fw_status_ready &&
1195		    !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
1196		    !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
1197			work = kzalloc(sizeof(*work), GFP_ATOMIC);
1198			if (!work) {
1199				netdev_err(lif->netdev, "Reset event dropped\n");
1200				clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
1201			} else {
1202				work->type = IONIC_DW_TYPE_LIF_RESET;
1203				ionic_lif_deferred_enqueue(&lif->deferred, work);
1204			}
1205		}
1206		break;
1207	default:
1208		netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
1209			    comp->event.ecode, eid);
1210		break;
1211	}
1212
1213	return true;
1214}
1215
1216static bool ionic_adminq_service(struct ionic_cq *cq,
1217				 struct ionic_cq_info *cq_info)
1218{
1219	struct ionic_admin_comp *comp = cq_info->cq_desc;
1220
1221	if (!color_match(comp->color, cq->done_color))
1222		return false;
1223
1224	ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
1225
1226	return true;
1227}
1228
1229static int ionic_adminq_napi(struct napi_struct *napi, int budget)
1230{
1231	struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
1232	struct ionic_lif *lif = napi_to_cq(napi)->lif;
1233	struct ionic_dev *idev = &lif->ionic->idev;
1234	unsigned long irqflags;
1235	unsigned int flags = 0;
1236	bool resched = false;
1237	int rx_work = 0;
1238	int tx_work = 0;
1239	int n_work = 0;
1240	int a_work = 0;
1241	int work_done;
1242	int credits;
1243
1244	if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
1245		n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
1246					  ionic_notifyq_service, NULL, NULL);
1247
1248	spin_lock_irqsave(&lif->adminq_lock, irqflags);
1249	if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
1250		a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
1251					  ionic_adminq_service, NULL, NULL);
1252	spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
1253
1254	if (lif->hwstamp_rxq)
1255		rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget,
1256					   ionic_rx_service, NULL, NULL);
1257
1258	if (lif->hwstamp_txq)
1259		tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
1260					   ionic_tx_service, NULL, NULL);
1261
1262	work_done = max(max(n_work, a_work), max(rx_work, tx_work));
1263	if (work_done < budget && napi_complete_done(napi, work_done)) {
1264		flags |= IONIC_INTR_CRED_UNMASK;
1265		intr->rearm_count++;
1266	}
1267
1268	if (work_done || flags) {
1269		flags |= IONIC_INTR_CRED_RESET_COALESCE;
1270		credits = n_work + a_work + rx_work + tx_work;
1271		ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
1272	}
1273
1274	if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
1275		resched = true;
1276	if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
1277		resched = true;
1278	if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
1279		resched = true;
1280	if (resched)
1281		mod_timer(&lif->adminqcq->napi_deadline,
1282			  jiffies + IONIC_NAPI_DEADLINE);
1283
1284	return work_done;
1285}
1286
1287void ionic_get_stats64(struct net_device *netdev,
1288		       struct rtnl_link_stats64 *ns)
1289{
1290	struct ionic_lif *lif = netdev_priv(netdev);
1291	struct ionic_lif_stats *ls;
1292
1293	memset(ns, 0, sizeof(*ns));
1294	ls = &lif->info->stats;
1295
1296	ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
1297			 le64_to_cpu(ls->rx_mcast_packets) +
1298			 le64_to_cpu(ls->rx_bcast_packets);
1299
1300	ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
1301			 le64_to_cpu(ls->tx_mcast_packets) +
1302			 le64_to_cpu(ls->tx_bcast_packets);
1303
1304	ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
1305		       le64_to_cpu(ls->rx_mcast_bytes) +
1306		       le64_to_cpu(ls->rx_bcast_bytes);
1307
1308	ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
1309		       le64_to_cpu(ls->tx_mcast_bytes) +
1310		       le64_to_cpu(ls->tx_bcast_bytes);
1311
1312	ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
1313			 le64_to_cpu(ls->rx_mcast_drop_packets) +
1314			 le64_to_cpu(ls->rx_bcast_drop_packets);
1315
1316	ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
1317			 le64_to_cpu(ls->tx_mcast_drop_packets) +
1318			 le64_to_cpu(ls->tx_bcast_drop_packets);
1319
1320	ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
1321
1322	ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
1323
1324	ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
1325			       le64_to_cpu(ls->rx_queue_disabled) +
1326			       le64_to_cpu(ls->rx_desc_fetch_error) +
1327			       le64_to_cpu(ls->rx_desc_data_error);
1328
1329	ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
1330				le64_to_cpu(ls->tx_queue_disabled) +
1331				le64_to_cpu(ls->tx_desc_fetch_error) +
1332				le64_to_cpu(ls->tx_desc_data_error);
1333
1334	ns->rx_errors = ns->rx_over_errors +
1335			ns->rx_missed_errors;
1336
1337	ns->tx_errors = ns->tx_aborted_errors;
1338}
1339
1340static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1341{
1342	return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
1343}
1344
1345static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1346{
1347	/* Don't delete our own address from the uc list */
1348	if (ether_addr_equal(addr, netdev->dev_addr))
1349		return 0;
1350
1351	return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
1352}
1353
1354void ionic_lif_rx_mode(struct ionic_lif *lif)
1355{
1356	struct net_device *netdev = lif->netdev;
1357	unsigned int nfilters;
1358	unsigned int nd_flags;
1359	char buf[128];
1360	u16 rx_mode;
1361	int i;
1362#define REMAIN(__x) (sizeof(buf) - (__x))
1363
1364	mutex_lock(&lif->config_lock);
1365
1366	/* grab the flags once for local use */
1367	nd_flags = netdev->flags;
1368
1369	rx_mode = IONIC_RX_MODE_F_UNICAST;
1370	rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1371	rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1372	rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1373	rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1374
1375	/* sync the filters */
1376	ionic_rx_filter_sync(lif);
1377
1378	/* check for overflow state
1379	 *    if so, we track that we overflowed and enable NIC PROMISC
1380	 *    else if the overflow is set and not needed
1381	 *       we remove our overflow flag and check the netdev flags
1382	 *       to see if we can disable NIC PROMISC
1383	 */
1384	nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1385
1386	if (((lif->nucast + lif->nmcast) >= nfilters) ||
1387	    (lif->max_vlans && lif->nvlans >= lif->max_vlans)) {
1388		rx_mode |= IONIC_RX_MODE_F_PROMISC;
1389		rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1390	} else {
1391		if (!(nd_flags & IFF_PROMISC))
1392			rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1393		if (!(nd_flags & IFF_ALLMULTI))
1394			rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1395	}
1396
1397	i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1398		      lif->rx_mode, rx_mode);
1399	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
1400		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
1401	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
1402		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
1403	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
1404		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
1405	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
1406		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
1407	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
1408		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1409	if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
1410		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
1411	netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
1412
1413	if (lif->rx_mode != rx_mode) {
1414		struct ionic_admin_ctx ctx = {
1415			.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1416			.cmd.rx_mode_set = {
1417				.opcode = IONIC_CMD_RX_MODE_SET,
1418				.lif_index = cpu_to_le16(lif->index),
1419			},
1420		};
1421		int err;
1422
1423		ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
1424		err = ionic_adminq_post_wait(lif, &ctx);
1425		if (err)
1426			netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
1427				    rx_mode, err);
1428		else
1429			lif->rx_mode = rx_mode;
1430	}
1431
1432	mutex_unlock(&lif->config_lock);
1433}
1434
1435static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1436{
1437	struct ionic_lif *lif = netdev_priv(netdev);
1438	struct ionic_deferred_work *work;
1439
1440	/* Sync the kernel filter list with the driver filter list */
1441	__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1442	__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1443
1444	/* Shove off the rest of the rxmode work to the work task
1445	 * which will include syncing the filters to the firmware.
1446	 */
1447	work = kzalloc(sizeof(*work), GFP_ATOMIC);
1448	if (!work) {
1449		netdev_err(lif->netdev, "rxmode change dropped\n");
1450		return;
1451	}
1452	work->type = IONIC_DW_TYPE_RX_MODE;
1453	netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1454	ionic_lif_deferred_enqueue(&lif->deferred, work);
1455}
1456
1457static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1458{
1459	u64 wanted = 0;
1460
1461	if (features & NETIF_F_HW_VLAN_CTAG_TX)
1462		wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1463	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1464		wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1465	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1466		wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1467	if (features & NETIF_F_RXHASH)
1468		wanted |= IONIC_ETH_HW_RX_HASH;
1469	if (features & NETIF_F_RXCSUM)
1470		wanted |= IONIC_ETH_HW_RX_CSUM;
1471	if (features & NETIF_F_SG)
1472		wanted |= IONIC_ETH_HW_TX_SG;
1473	if (features & NETIF_F_HW_CSUM)
1474		wanted |= IONIC_ETH_HW_TX_CSUM;
1475	if (features & NETIF_F_TSO)
1476		wanted |= IONIC_ETH_HW_TSO;
1477	if (features & NETIF_F_TSO6)
1478		wanted |= IONIC_ETH_HW_TSO_IPV6;
1479	if (features & NETIF_F_TSO_ECN)
1480		wanted |= IONIC_ETH_HW_TSO_ECN;
1481	if (features & NETIF_F_GSO_GRE)
1482		wanted |= IONIC_ETH_HW_TSO_GRE;
1483	if (features & NETIF_F_GSO_GRE_CSUM)
1484		wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1485	if (features & NETIF_F_GSO_IPXIP4)
1486		wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1487	if (features & NETIF_F_GSO_IPXIP6)
1488		wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1489	if (features & NETIF_F_GSO_UDP_TUNNEL)
1490		wanted |= IONIC_ETH_HW_TSO_UDP;
1491	if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1492		wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1493
1494	return cpu_to_le64(wanted);
1495}
1496
1497static int ionic_set_nic_features(struct ionic_lif *lif,
1498				  netdev_features_t features)
1499{
1500	struct device *dev = lif->ionic->dev;
1501	struct ionic_admin_ctx ctx = {
1502		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1503		.cmd.lif_setattr = {
1504			.opcode = IONIC_CMD_LIF_SETATTR,
1505			.index = cpu_to_le16(lif->index),
1506			.attr = IONIC_LIF_ATTR_FEATURES,
1507		},
1508	};
1509	u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1510			 IONIC_ETH_HW_VLAN_RX_STRIP |
1511			 IONIC_ETH_HW_VLAN_RX_FILTER;
1512	u64 old_hw_features;
1513	int err;
1514
1515	ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1516
1517	if (lif->phc)
1518		ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP);
1519
1520	err = ionic_adminq_post_wait(lif, &ctx);
1521	if (err)
1522		return err;
1523
1524	old_hw_features = lif->hw_features;
1525	lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1526				       ctx.comp.lif_setattr.features);
1527
1528	if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1529		ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1530
1531	if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
1532	    !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1533		dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1534
1535	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1536		dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1537	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1538		dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1539	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1540		dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1541	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1542		dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1543	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1544		dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1545	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1546		dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1547	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1548		dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1549	if (lif->hw_features & IONIC_ETH_HW_TSO)
1550		dev_dbg(dev, "feature ETH_HW_TSO\n");
1551	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1552		dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1553	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1554		dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1555	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1556		dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1557	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1558		dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1559	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1560		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1561	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1562		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1563	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1564		dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1565	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1566		dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1567	if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP)
1568		dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n");
1569
1570	return 0;
1571}
1572
1573static int ionic_init_nic_features(struct ionic_lif *lif)
1574{
1575	struct net_device *netdev = lif->netdev;
1576	netdev_features_t features;
1577	int err;
1578
1579	/* set up what we expect to support by default */
1580	features = NETIF_F_HW_VLAN_CTAG_TX |
1581		   NETIF_F_HW_VLAN_CTAG_RX |
1582		   NETIF_F_HW_VLAN_CTAG_FILTER |
1583		   NETIF_F_SG |
1584		   NETIF_F_HW_CSUM |
1585		   NETIF_F_RXCSUM |
1586		   NETIF_F_TSO |
1587		   NETIF_F_TSO6 |
1588		   NETIF_F_TSO_ECN |
1589		   NETIF_F_GSO_GRE |
1590		   NETIF_F_GSO_GRE_CSUM |
1591		   NETIF_F_GSO_IPXIP4 |
1592		   NETIF_F_GSO_IPXIP6 |
1593		   NETIF_F_GSO_UDP_TUNNEL |
1594		   NETIF_F_GSO_UDP_TUNNEL_CSUM;
1595
1596	if (lif->nxqs > 1)
1597		features |= NETIF_F_RXHASH;
1598
1599	err = ionic_set_nic_features(lif, features);
1600	if (err)
1601		return err;
1602
1603	/* tell the netdev what we actually can support */
1604	netdev->features |= NETIF_F_HIGHDMA;
1605
1606	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1607		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1608	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1609		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1610	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1611		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1612	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1613		netdev->hw_features |= NETIF_F_RXHASH;
1614	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1615		netdev->hw_features |= NETIF_F_SG;
1616
1617	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1618		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1619	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1620		netdev->hw_enc_features |= NETIF_F_RXCSUM;
1621	if (lif->hw_features & IONIC_ETH_HW_TSO)
1622		netdev->hw_enc_features |= NETIF_F_TSO;
1623	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1624		netdev->hw_enc_features |= NETIF_F_TSO6;
1625	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1626		netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1627	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1628		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1629	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1630		netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1631	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1632		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1633	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1634		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1635	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1636		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1637	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1638		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1639
1640	netdev->hw_features |= netdev->hw_enc_features;
1641	netdev->features |= netdev->hw_features;
1642	netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1643
1644	netdev->priv_flags |= IFF_UNICAST_FLT |
1645			      IFF_LIVE_ADDR_CHANGE;
1646
1647	return 0;
1648}
1649
1650static int ionic_set_features(struct net_device *netdev,
1651			      netdev_features_t features)
1652{
1653	struct ionic_lif *lif = netdev_priv(netdev);
1654	int err;
1655
1656	netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1657		   __func__, (u64)lif->netdev->features, (u64)features);
1658
1659	err = ionic_set_nic_features(lif, features);
1660
1661	return err;
1662}
1663
1664static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
1665{
1666	struct ionic_admin_ctx ctx = {
1667		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1668		.cmd.lif_setattr = {
1669			.opcode = IONIC_CMD_LIF_SETATTR,
1670			.index = cpu_to_le16(lif->index),
1671			.attr = IONIC_LIF_ATTR_MAC,
1672		},
1673	};
1674
1675	ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
1676	return ionic_adminq_post_wait(lif, &ctx);
1677}
1678
1679static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
1680{
1681	struct ionic_admin_ctx ctx = {
1682		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1683		.cmd.lif_getattr = {
1684			.opcode = IONIC_CMD_LIF_GETATTR,
1685			.index = cpu_to_le16(lif->index),
1686			.attr = IONIC_LIF_ATTR_MAC,
1687		},
1688	};
1689	int err;
1690
1691	err = ionic_adminq_post_wait(lif, &ctx);
1692	if (err)
1693		return err;
1694
1695	ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
1696	return 0;
1697}
1698
1699static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
1700{
1701	u8  get_mac[ETH_ALEN];
1702	int err;
1703
1704	err = ionic_set_attr_mac(lif, mac);
1705	if (err)
1706		return err;
1707
1708	err = ionic_get_attr_mac(lif, get_mac);
1709	if (err)
1710		return err;
1711
1712	/* To deal with older firmware that silently ignores the set attr mac:
1713	 * doesn't actually change the mac and doesn't return an error, so we
1714	 * do the get attr to verify whether or not the set actually happened
1715	 */
1716	if (!ether_addr_equal(get_mac, mac))
1717		return 1;
1718
1719	return 0;
1720}
1721
1722static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1723{
1724	struct ionic_lif *lif = netdev_priv(netdev);
1725	struct sockaddr *addr = sa;
1726	u8 *mac;
1727	int err;
1728
1729	mac = (u8 *)addr->sa_data;
1730	if (ether_addr_equal(netdev->dev_addr, mac))
1731		return 0;
1732
1733	err = ionic_program_mac(lif, mac);
1734	if (err < 0)
1735		return err;
1736
1737	if (err > 0)
1738		netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
1739			   __func__);
1740
1741	err = eth_prepare_mac_addr_change(netdev, addr);
1742	if (err)
1743		return err;
1744
1745	if (!is_zero_ether_addr(netdev->dev_addr)) {
1746		netdev_info(netdev, "deleting mac addr %pM\n",
1747			    netdev->dev_addr);
1748		ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr);
1749	}
1750
1751	eth_commit_mac_addr_change(netdev, addr);
1752	netdev_info(netdev, "updating mac addr %pM\n", mac);
1753
1754	return ionic_lif_addr_add(netdev_priv(netdev), mac);
1755}
1756
1757void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1758{
1759	/* Stop and clean the queues before reconfiguration */
1760	netif_device_detach(lif->netdev);
1761	ionic_stop_queues(lif);
1762	ionic_txrx_deinit(lif);
1763}
1764
1765static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1766{
1767	int err;
1768
1769	/* Re-init the queues after reconfiguration */
1770
1771	/* The only way txrx_init can fail here is if communication
1772	 * with FW is suddenly broken.  There's not much we can do
1773	 * at this point - error messages have already been printed,
1774	 * so we can continue on and the user can eventually do a
1775	 * DOWN and UP to try to reset and clear the issue.
1776	 */
1777	err = ionic_txrx_init(lif);
1778	ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1779	netif_device_attach(lif->netdev);
1780
1781	return err;
1782}
1783
1784static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1785{
1786	struct ionic_lif *lif = netdev_priv(netdev);
1787	struct ionic_admin_ctx ctx = {
1788		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1789		.cmd.lif_setattr = {
1790			.opcode = IONIC_CMD_LIF_SETATTR,
1791			.index = cpu_to_le16(lif->index),
1792			.attr = IONIC_LIF_ATTR_MTU,
1793			.mtu = cpu_to_le32(new_mtu),
1794		},
1795	};
1796	int err;
1797
1798	err = ionic_adminq_post_wait(lif, &ctx);
1799	if (err)
1800		return err;
1801
1802	/* if we're not running, nothing more to do */
1803	if (!netif_running(netdev)) {
1804		netdev->mtu = new_mtu;
1805		return 0;
1806	}
1807
1808	mutex_lock(&lif->queue_lock);
1809	ionic_stop_queues_reconfig(lif);
1810	netdev->mtu = new_mtu;
1811	err = ionic_start_queues_reconfig(lif);
1812	mutex_unlock(&lif->queue_lock);
1813
1814	return err;
1815}
1816
1817static void ionic_tx_timeout_work(struct work_struct *ws)
1818{
1819	struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1820	int err;
1821
1822	if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1823		return;
1824
1825	/* if we were stopped before this scheduled job was launched,
1826	 * don't bother the queues as they are already stopped.
1827	 */
1828	if (!netif_running(lif->netdev))
1829		return;
1830
1831	mutex_lock(&lif->queue_lock);
1832	ionic_stop_queues_reconfig(lif);
1833	err = ionic_start_queues_reconfig(lif);
1834	mutex_unlock(&lif->queue_lock);
1835
1836	if (err)
1837		dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__);
1838}
1839
1840static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1841{
1842	struct ionic_lif *lif = netdev_priv(netdev);
1843
1844	netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue);
1845	schedule_work(&lif->tx_timeout_work);
1846}
1847
1848static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1849				 u16 vid)
1850{
1851	struct ionic_lif *lif = netdev_priv(netdev);
1852	int err;
1853
1854	err = ionic_lif_vlan_add(lif, vid);
1855	if (err)
1856		return err;
1857
1858	ionic_lif_rx_mode(lif);
1859
1860	return 0;
1861}
1862
1863static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1864				  u16 vid)
1865{
1866	struct ionic_lif *lif = netdev_priv(netdev);
1867	int err;
1868
1869	err = ionic_lif_vlan_del(lif, vid);
1870	if (err)
1871		return err;
1872
1873	ionic_lif_rx_mode(lif);
1874
1875	return 0;
1876}
1877
1878int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1879			 const u8 *key, const u32 *indir)
1880{
1881	struct ionic_admin_ctx ctx = {
1882		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1883		.cmd.lif_setattr = {
1884			.opcode = IONIC_CMD_LIF_SETATTR,
1885			.attr = IONIC_LIF_ATTR_RSS,
1886			.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1887		},
1888	};
1889	unsigned int i, tbl_sz;
1890
1891	if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1892		lif->rss_types = types;
1893		ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1894	}
1895
1896	if (key)
1897		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1898
1899	if (indir) {
1900		tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1901		for (i = 0; i < tbl_sz; i++)
1902			lif->rss_ind_tbl[i] = indir[i];
1903	}
1904
1905	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1906	       IONIC_RSS_HASH_KEY_SIZE);
1907
1908	return ionic_adminq_post_wait(lif, &ctx);
1909}
1910
1911static int ionic_lif_rss_init(struct ionic_lif *lif)
1912{
1913	unsigned int tbl_sz;
1914	unsigned int i;
1915
1916	lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1917			 IONIC_RSS_TYPE_IPV4_TCP |
1918			 IONIC_RSS_TYPE_IPV4_UDP |
1919			 IONIC_RSS_TYPE_IPV6     |
1920			 IONIC_RSS_TYPE_IPV6_TCP |
1921			 IONIC_RSS_TYPE_IPV6_UDP;
1922
1923	/* Fill indirection table with 'default' values */
1924	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1925	for (i = 0; i < tbl_sz; i++)
1926		lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1927
1928	return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1929}
1930
1931static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1932{
1933	int tbl_sz;
1934
1935	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1936	memset(lif->rss_ind_tbl, 0, tbl_sz);
1937	memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1938
1939	ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1940}
1941
1942static void ionic_lif_quiesce(struct ionic_lif *lif)
1943{
1944	struct ionic_admin_ctx ctx = {
1945		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1946		.cmd.lif_setattr = {
1947			.opcode = IONIC_CMD_LIF_SETATTR,
1948			.index = cpu_to_le16(lif->index),
1949			.attr = IONIC_LIF_ATTR_STATE,
1950			.state = IONIC_LIF_QUIESCE,
1951		},
1952	};
1953	int err;
1954
1955	err = ionic_adminq_post_wait(lif, &ctx);
1956	if (err)
1957		netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err);
1958}
1959
1960static void ionic_txrx_disable(struct ionic_lif *lif)
1961{
1962	unsigned int i;
1963	int err = 0;
1964
1965	if (lif->txqcqs) {
1966		for (i = 0; i < lif->nxqs; i++)
1967			err = ionic_qcq_disable(lif, lif->txqcqs[i], err);
1968	}
1969
1970	if (lif->hwstamp_txq)
1971		err = ionic_qcq_disable(lif, lif->hwstamp_txq, err);
1972
1973	if (lif->rxqcqs) {
1974		for (i = 0; i < lif->nxqs; i++)
1975			err = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
1976	}
1977
1978	if (lif->hwstamp_rxq)
1979		err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err);
1980
1981	ionic_lif_quiesce(lif);
1982}
1983
1984static void ionic_txrx_deinit(struct ionic_lif *lif)
1985{
1986	unsigned int i;
1987
1988	if (lif->txqcqs) {
1989		for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
1990			ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1991			ionic_tx_flush(&lif->txqcqs[i]->cq);
1992			ionic_tx_empty(&lif->txqcqs[i]->q);
1993		}
1994	}
1995
1996	if (lif->rxqcqs) {
1997		for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
1998			ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1999			ionic_rx_empty(&lif->rxqcqs[i]->q);
2000		}
2001	}
2002	lif->rx_mode = 0;
2003
2004	if (lif->hwstamp_txq) {
2005		ionic_lif_qcq_deinit(lif, lif->hwstamp_txq);
2006		ionic_tx_flush(&lif->hwstamp_txq->cq);
2007		ionic_tx_empty(&lif->hwstamp_txq->q);
2008	}
2009
2010	if (lif->hwstamp_rxq) {
2011		ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq);
2012		ionic_rx_empty(&lif->hwstamp_rxq->q);
2013	}
2014}
2015
2016void ionic_txrx_free(struct ionic_lif *lif)
2017{
2018	unsigned int i;
2019
2020	if (lif->txqcqs) {
2021		for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
2022			ionic_qcq_free(lif, lif->txqcqs[i]);
2023			devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
2024			lif->txqcqs[i] = NULL;
2025		}
2026	}
2027
2028	if (lif->rxqcqs) {
2029		for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
2030			ionic_qcq_free(lif, lif->rxqcqs[i]);
2031			devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
2032			lif->rxqcqs[i] = NULL;
2033		}
2034	}
2035
2036	if (lif->hwstamp_txq) {
2037		ionic_qcq_free(lif, lif->hwstamp_txq);
2038		devm_kfree(lif->ionic->dev, lif->hwstamp_txq);
2039		lif->hwstamp_txq = NULL;
2040	}
2041
2042	if (lif->hwstamp_rxq) {
2043		ionic_qcq_free(lif, lif->hwstamp_rxq);
2044		devm_kfree(lif->ionic->dev, lif->hwstamp_rxq);
2045		lif->hwstamp_rxq = NULL;
2046	}
2047}
2048
2049static int ionic_txrx_alloc(struct ionic_lif *lif)
2050{
2051	unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2052	unsigned int flags, i;
2053	int err = 0;
2054
2055	num_desc = lif->ntxq_descs;
2056	desc_sz = sizeof(struct ionic_txq_desc);
2057	comp_sz = sizeof(struct ionic_txq_comp);
2058
2059	if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2060	    lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2061					  sizeof(struct ionic_txq_sg_desc_v1))
2062		sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2063	else
2064		sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2065
2066	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2067
2068	if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state))
2069		flags |= IONIC_QCQ_F_CMB_RINGS;
2070
2071	if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2072		flags |= IONIC_QCQ_F_INTR;
2073
2074	for (i = 0; i < lif->nxqs; i++) {
2075		err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2076				      num_desc, desc_sz, comp_sz, sg_desc_sz,
2077				      lif->kern_pid, &lif->txqcqs[i]);
2078		if (err)
2079			goto err_out;
2080
2081		if (flags & IONIC_QCQ_F_INTR) {
2082			ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2083					     lif->txqcqs[i]->intr.index,
2084					     lif->tx_coalesce_hw);
2085			if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2086				lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2087		}
2088
2089		ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2090	}
2091
2092	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
2093
2094	if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
2095		flags |= IONIC_QCQ_F_CMB_RINGS;
2096
2097	num_desc = lif->nrxq_descs;
2098	desc_sz = sizeof(struct ionic_rxq_desc);
2099	comp_sz = sizeof(struct ionic_rxq_comp);
2100	sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2101
2102	if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2103		comp_sz *= 2;
2104
2105	for (i = 0; i < lif->nxqs; i++) {
2106		err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2107				      num_desc, desc_sz, comp_sz, sg_desc_sz,
2108				      lif->kern_pid, &lif->rxqcqs[i]);
2109		if (err)
2110			goto err_out;
2111
2112		lif->rxqcqs[i]->q.features = lif->rxq_features;
2113
2114		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2115				     lif->rxqcqs[i]->intr.index,
2116				     lif->rx_coalesce_hw);
2117		if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
2118			lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
2119
2120		if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2121			ionic_link_qcq_interrupts(lif->rxqcqs[i],
2122						  lif->txqcqs[i]);
2123
2124		ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2125	}
2126
2127	return 0;
2128
2129err_out:
2130	ionic_txrx_free(lif);
2131
2132	return err;
2133}
2134
2135static int ionic_txrx_init(struct ionic_lif *lif)
2136{
2137	unsigned int i;
2138	int err;
2139
2140	for (i = 0; i < lif->nxqs; i++) {
2141		err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
2142		if (err)
2143			goto err_out;
2144
2145		err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
2146		if (err) {
2147			ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2148			goto err_out;
2149		}
2150	}
2151
2152	if (lif->netdev->features & NETIF_F_RXHASH)
2153		ionic_lif_rss_init(lif);
2154
2155	ionic_lif_rx_mode(lif);
2156
2157	return 0;
2158
2159err_out:
2160	while (i--) {
2161		ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2162		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
2163	}
2164
2165	return err;
2166}
2167
2168static int ionic_txrx_enable(struct ionic_lif *lif)
2169{
2170	int derr = 0;
2171	int i, err;
2172
2173	for (i = 0; i < lif->nxqs; i++) {
2174		if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
2175			dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
2176			err = -ENXIO;
2177			goto err_out;
2178		}
2179
2180		ionic_rx_fill(&lif->rxqcqs[i]->q);
2181		err = ionic_qcq_enable(lif->rxqcqs[i]);
2182		if (err)
2183			goto err_out;
2184
2185		err = ionic_qcq_enable(lif->txqcqs[i]);
2186		if (err) {
2187			derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
2188			goto err_out;
2189		}
2190	}
2191
2192	if (lif->hwstamp_rxq) {
2193		ionic_rx_fill(&lif->hwstamp_rxq->q);
2194		err = ionic_qcq_enable(lif->hwstamp_rxq);
2195		if (err)
2196			goto err_out_hwstamp_rx;
2197	}
2198
2199	if (lif->hwstamp_txq) {
2200		err = ionic_qcq_enable(lif->hwstamp_txq);
2201		if (err)
2202			goto err_out_hwstamp_tx;
2203	}
2204
2205	return 0;
2206
2207err_out_hwstamp_tx:
2208	if (lif->hwstamp_rxq)
2209		derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr);
2210err_out_hwstamp_rx:
2211	i = lif->nxqs;
2212err_out:
2213	while (i--) {
2214		derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr);
2215		derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
2216	}
2217
2218	return err;
2219}
2220
2221static int ionic_start_queues(struct ionic_lif *lif)
2222{
2223	int err;
2224
2225	if (test_bit(IONIC_LIF_F_BROKEN, lif->state))
2226		return -EIO;
2227
2228	if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2229		return -EBUSY;
2230
2231	if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
2232		return 0;
2233
2234	err = ionic_txrx_enable(lif);
2235	if (err) {
2236		clear_bit(IONIC_LIF_F_UP, lif->state);
2237		return err;
2238	}
2239	netif_tx_wake_all_queues(lif->netdev);
2240
2241	return 0;
2242}
2243
2244static int ionic_open(struct net_device *netdev)
2245{
2246	struct ionic_lif *lif = netdev_priv(netdev);
2247	int err;
2248
2249	/* If recovering from a broken state, clear the bit and we'll try again */
2250	if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
2251		netdev_info(netdev, "clearing broken state\n");
2252
2253	mutex_lock(&lif->queue_lock);
2254
2255	err = ionic_txrx_alloc(lif);
2256	if (err)
2257		goto err_unlock;
2258
2259	err = ionic_txrx_init(lif);
2260	if (err)
2261		goto err_txrx_free;
2262
2263	err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
2264	if (err)
2265		goto err_txrx_deinit;
2266
2267	err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
2268	if (err)
2269		goto err_txrx_deinit;
2270
2271	/* don't start the queues until we have link */
2272	if (netif_carrier_ok(netdev)) {
2273		err = ionic_start_queues(lif);
2274		if (err)
2275			goto err_txrx_deinit;
2276	}
2277
2278	/* If hardware timestamping is enabled, but the queues were freed by
2279	 * ionic_stop, those need to be reallocated and initialized, too.
2280	 */
2281	ionic_lif_hwstamp_recreate_queues(lif);
2282
2283	mutex_unlock(&lif->queue_lock);
2284
2285	return 0;
2286
2287err_txrx_deinit:
2288	ionic_txrx_deinit(lif);
2289err_txrx_free:
2290	ionic_txrx_free(lif);
2291err_unlock:
2292	mutex_unlock(&lif->queue_lock);
2293	return err;
2294}
2295
2296static void ionic_stop_queues(struct ionic_lif *lif)
2297{
2298	if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
2299		return;
2300
2301	netif_tx_disable(lif->netdev);
2302	ionic_txrx_disable(lif);
2303}
2304
2305static int ionic_stop(struct net_device *netdev)
2306{
2307	struct ionic_lif *lif = netdev_priv(netdev);
2308
2309	if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2310		return 0;
2311
2312	mutex_lock(&lif->queue_lock);
2313	ionic_stop_queues(lif);
2314	ionic_txrx_deinit(lif);
2315	ionic_txrx_free(lif);
2316	mutex_unlock(&lif->queue_lock);
2317
2318	return 0;
2319}
2320
2321static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2322{
2323	struct ionic_lif *lif = netdev_priv(netdev);
2324
2325	switch (cmd) {
2326	case SIOCSHWTSTAMP:
2327		return ionic_lif_hwstamp_set(lif, ifr);
2328	case SIOCGHWTSTAMP:
2329		return ionic_lif_hwstamp_get(lif, ifr);
2330	default:
2331		return -EOPNOTSUPP;
2332	}
2333}
2334
2335static int ionic_get_fw_vf_config(struct ionic *ionic, int vf, struct ionic_vf *vfdata)
2336{
2337	struct ionic_vf_getattr_comp comp = { 0 };
2338	int err;
2339	u8 attr;
2340
2341	attr = IONIC_VF_ATTR_VLAN;
2342	err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2343	if (err && comp.status != IONIC_RC_ENOSUPP)
2344		goto err_out;
2345	if (!err)
2346		vfdata->vlanid = comp.vlanid;
2347
2348	attr = IONIC_VF_ATTR_SPOOFCHK;
2349	err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2350	if (err && comp.status != IONIC_RC_ENOSUPP)
2351		goto err_out;
2352	if (!err)
2353		vfdata->spoofchk = comp.spoofchk;
2354
2355	attr = IONIC_VF_ATTR_LINKSTATE;
2356	err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2357	if (err && comp.status != IONIC_RC_ENOSUPP)
2358		goto err_out;
2359	if (!err) {
2360		switch (comp.linkstate) {
2361		case IONIC_VF_LINK_STATUS_UP:
2362			vfdata->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2363			break;
2364		case IONIC_VF_LINK_STATUS_DOWN:
2365			vfdata->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2366			break;
2367		case IONIC_VF_LINK_STATUS_AUTO:
2368			vfdata->linkstate = IFLA_VF_LINK_STATE_AUTO;
2369			break;
2370		default:
2371			dev_warn(ionic->dev, "Unexpected link state %u\n", comp.linkstate);
2372			break;
2373		}
2374	}
2375
2376	attr = IONIC_VF_ATTR_RATE;
2377	err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2378	if (err && comp.status != IONIC_RC_ENOSUPP)
2379		goto err_out;
2380	if (!err)
2381		vfdata->maxrate = comp.maxrate;
2382
2383	attr = IONIC_VF_ATTR_TRUST;
2384	err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2385	if (err && comp.status != IONIC_RC_ENOSUPP)
2386		goto err_out;
2387	if (!err)
2388		vfdata->trusted = comp.trust;
2389
2390	attr = IONIC_VF_ATTR_MAC;
2391	err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2392	if (err && comp.status != IONIC_RC_ENOSUPP)
2393		goto err_out;
2394	if (!err)
2395		ether_addr_copy(vfdata->macaddr, comp.macaddr);
2396
2397err_out:
2398	if (err)
2399		dev_err(ionic->dev, "Failed to get %s for VF %d\n",
2400			ionic_vf_attr_to_str(attr), vf);
2401
2402	return err;
2403}
2404
2405static int ionic_get_vf_config(struct net_device *netdev,
2406			       int vf, struct ifla_vf_info *ivf)
2407{
2408	struct ionic_lif *lif = netdev_priv(netdev);
2409	struct ionic *ionic = lif->ionic;
2410	struct ionic_vf vfdata = { 0 };
2411	int ret = 0;
2412
2413	if (!netif_device_present(netdev))
2414		return -EBUSY;
2415
2416	down_read(&ionic->vf_op_lock);
2417
2418	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2419		ret = -EINVAL;
2420	} else {
2421		ivf->vf = vf;
2422		ivf->qos = 0;
2423
2424		ret = ionic_get_fw_vf_config(ionic, vf, &vfdata);
2425		if (!ret) {
2426			ivf->vlan         = le16_to_cpu(vfdata.vlanid);
2427			ivf->spoofchk     = vfdata.spoofchk;
2428			ivf->linkstate    = vfdata.linkstate;
2429			ivf->max_tx_rate  = le32_to_cpu(vfdata.maxrate);
2430			ivf->trusted      = vfdata.trusted;
2431			ether_addr_copy(ivf->mac, vfdata.macaddr);
2432		}
2433	}
2434
2435	up_read(&ionic->vf_op_lock);
2436	return ret;
2437}
2438
2439static int ionic_get_vf_stats(struct net_device *netdev, int vf,
2440			      struct ifla_vf_stats *vf_stats)
2441{
2442	struct ionic_lif *lif = netdev_priv(netdev);
2443	struct ionic *ionic = lif->ionic;
2444	struct ionic_lif_stats *vs;
2445	int ret = 0;
2446
2447	if (!netif_device_present(netdev))
2448		return -EBUSY;
2449
2450	down_read(&ionic->vf_op_lock);
2451
2452	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2453		ret = -EINVAL;
2454	} else {
2455		memset(vf_stats, 0, sizeof(*vf_stats));
2456		vs = &ionic->vfs[vf].stats;
2457
2458		vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
2459		vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
2460		vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
2461		vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
2462		vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
2463		vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
2464		vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
2465				       le64_to_cpu(vs->rx_mcast_drop_packets) +
2466				       le64_to_cpu(vs->rx_bcast_drop_packets);
2467		vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
2468				       le64_to_cpu(vs->tx_mcast_drop_packets) +
2469				       le64_to_cpu(vs->tx_bcast_drop_packets);
2470	}
2471
2472	up_read(&ionic->vf_op_lock);
2473	return ret;
2474}
2475
2476static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2477{
2478	struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC };
2479	struct ionic_lif *lif = netdev_priv(netdev);
2480	struct ionic *ionic = lif->ionic;
2481	int ret;
2482
2483	if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
2484		return -EINVAL;
2485
2486	if (!netif_device_present(netdev))
2487		return -EBUSY;
2488
2489	down_write(&ionic->vf_op_lock);
2490
2491	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2492		ret = -EINVAL;
2493	} else {
2494		ether_addr_copy(vfc.macaddr, mac);
2495		dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
2496			__func__, vf, vfc.macaddr);
2497
2498		ret = ionic_set_vf_config(ionic, vf, &vfc);
2499		if (!ret)
2500			ether_addr_copy(ionic->vfs[vf].macaddr, mac);
2501	}
2502
2503	up_write(&ionic->vf_op_lock);
2504	return ret;
2505}
2506
2507static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2508			     u8 qos, __be16 proto)
2509{
2510	struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN };
2511	struct ionic_lif *lif = netdev_priv(netdev);
2512	struct ionic *ionic = lif->ionic;
2513	int ret;
2514
2515	/* until someday when we support qos */
2516	if (qos)
2517		return -EINVAL;
2518
2519	if (vlan > 4095)
2520		return -EINVAL;
2521
2522	if (proto != htons(ETH_P_8021Q))
2523		return -EPROTONOSUPPORT;
2524
2525	if (!netif_device_present(netdev))
2526		return -EBUSY;
2527
2528	down_write(&ionic->vf_op_lock);
2529
2530	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2531		ret = -EINVAL;
2532	} else {
2533		vfc.vlanid = cpu_to_le16(vlan);
2534		dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
2535			__func__, vf, le16_to_cpu(vfc.vlanid));
2536
2537		ret = ionic_set_vf_config(ionic, vf, &vfc);
2538		if (!ret)
2539			ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
2540	}
2541
2542	up_write(&ionic->vf_op_lock);
2543	return ret;
2544}
2545
2546static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2547			     int tx_min, int tx_max)
2548{
2549	struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE };
2550	struct ionic_lif *lif = netdev_priv(netdev);
2551	struct ionic *ionic = lif->ionic;
2552	int ret;
2553
2554	/* setting the min just seems silly */
2555	if (tx_min)
2556		return -EINVAL;
2557
2558	if (!netif_device_present(netdev))
2559		return -EBUSY;
2560
2561	down_write(&ionic->vf_op_lock);
2562
2563	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2564		ret = -EINVAL;
2565	} else {
2566		vfc.maxrate = cpu_to_le32(tx_max);
2567		dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
2568			__func__, vf, le32_to_cpu(vfc.maxrate));
2569
2570		ret = ionic_set_vf_config(ionic, vf, &vfc);
2571		if (!ret)
2572			ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
2573	}
2574
2575	up_write(&ionic->vf_op_lock);
2576	return ret;
2577}
2578
2579static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2580{
2581	struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK };
2582	struct ionic_lif *lif = netdev_priv(netdev);
2583	struct ionic *ionic = lif->ionic;
2584	int ret;
2585
2586	if (!netif_device_present(netdev))
2587		return -EBUSY;
2588
2589	down_write(&ionic->vf_op_lock);
2590
2591	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2592		ret = -EINVAL;
2593	} else {
2594		vfc.spoofchk = set;
2595		dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
2596			__func__, vf, vfc.spoofchk);
2597
2598		ret = ionic_set_vf_config(ionic, vf, &vfc);
2599		if (!ret)
2600			ionic->vfs[vf].spoofchk = set;
2601	}
2602
2603	up_write(&ionic->vf_op_lock);
2604	return ret;
2605}
2606
2607static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2608{
2609	struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST };
2610	struct ionic_lif *lif = netdev_priv(netdev);
2611	struct ionic *ionic = lif->ionic;
2612	int ret;
2613
2614	if (!netif_device_present(netdev))
2615		return -EBUSY;
2616
2617	down_write(&ionic->vf_op_lock);
2618
2619	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2620		ret = -EINVAL;
2621	} else {
2622		vfc.trust = set;
2623		dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
2624			__func__, vf, vfc.trust);
2625
2626		ret = ionic_set_vf_config(ionic, vf, &vfc);
2627		if (!ret)
2628			ionic->vfs[vf].trusted = set;
2629	}
2630
2631	up_write(&ionic->vf_op_lock);
2632	return ret;
2633}
2634
2635static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2636{
2637	struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE };
2638	struct ionic_lif *lif = netdev_priv(netdev);
2639	struct ionic *ionic = lif->ionic;
2640	u8 vfls;
2641	int ret;
2642
2643	switch (set) {
2644	case IFLA_VF_LINK_STATE_ENABLE:
2645		vfls = IONIC_VF_LINK_STATUS_UP;
2646		break;
2647	case IFLA_VF_LINK_STATE_DISABLE:
2648		vfls = IONIC_VF_LINK_STATUS_DOWN;
2649		break;
2650	case IFLA_VF_LINK_STATE_AUTO:
2651		vfls = IONIC_VF_LINK_STATUS_AUTO;
2652		break;
2653	default:
2654		return -EINVAL;
2655	}
2656
2657	if (!netif_device_present(netdev))
2658		return -EBUSY;
2659
2660	down_write(&ionic->vf_op_lock);
2661
2662	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2663		ret = -EINVAL;
2664	} else {
2665		vfc.linkstate = vfls;
2666		dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
2667			__func__, vf, vfc.linkstate);
2668
2669		ret = ionic_set_vf_config(ionic, vf, &vfc);
2670		if (!ret)
2671			ionic->vfs[vf].linkstate = set;
2672	}
2673
2674	up_write(&ionic->vf_op_lock);
2675	return ret;
2676}
2677
2678static void ionic_vf_attr_replay(struct ionic_lif *lif)
2679{
2680	struct ionic_vf_setattr_cmd vfc = { };
2681	struct ionic *ionic = lif->ionic;
2682	struct ionic_vf *v;
2683	int i;
2684
2685	if (!ionic->vfs)
2686		return;
2687
2688	down_read(&ionic->vf_op_lock);
2689
2690	for (i = 0; i < ionic->num_vfs; i++) {
2691		v = &ionic->vfs[i];
2692
2693		if (v->stats_pa) {
2694			vfc.attr = IONIC_VF_ATTR_STATSADDR;
2695			vfc.stats_pa = cpu_to_le64(v->stats_pa);
2696			ionic_set_vf_config(ionic, i, &vfc);
2697			vfc.stats_pa = 0;
2698		}
2699
2700		if (!is_zero_ether_addr(v->macaddr)) {
2701			vfc.attr = IONIC_VF_ATTR_MAC;
2702			ether_addr_copy(vfc.macaddr, v->macaddr);
2703			ionic_set_vf_config(ionic, i, &vfc);
2704			eth_zero_addr(vfc.macaddr);
2705		}
2706
2707		if (v->vlanid) {
2708			vfc.attr = IONIC_VF_ATTR_VLAN;
2709			vfc.vlanid = v->vlanid;
2710			ionic_set_vf_config(ionic, i, &vfc);
2711			vfc.vlanid = 0;
2712		}
2713
2714		if (v->maxrate) {
2715			vfc.attr = IONIC_VF_ATTR_RATE;
2716			vfc.maxrate = v->maxrate;
2717			ionic_set_vf_config(ionic, i, &vfc);
2718			vfc.maxrate = 0;
2719		}
2720
2721		if (v->spoofchk) {
2722			vfc.attr = IONIC_VF_ATTR_SPOOFCHK;
2723			vfc.spoofchk = v->spoofchk;
2724			ionic_set_vf_config(ionic, i, &vfc);
2725			vfc.spoofchk = 0;
2726		}
2727
2728		if (v->trusted) {
2729			vfc.attr = IONIC_VF_ATTR_TRUST;
2730			vfc.trust = v->trusted;
2731			ionic_set_vf_config(ionic, i, &vfc);
2732			vfc.trust = 0;
2733		}
2734
2735		if (v->linkstate) {
2736			vfc.attr = IONIC_VF_ATTR_LINKSTATE;
2737			vfc.linkstate = v->linkstate;
2738			ionic_set_vf_config(ionic, i, &vfc);
2739			vfc.linkstate = 0;
2740		}
2741	}
2742
2743	up_read(&ionic->vf_op_lock);
2744
2745	ionic_vf_start(ionic);
2746}
2747
2748static const struct net_device_ops ionic_netdev_ops = {
2749	.ndo_open               = ionic_open,
2750	.ndo_stop               = ionic_stop,
2751	.ndo_eth_ioctl		= ionic_eth_ioctl,
2752	.ndo_start_xmit		= ionic_start_xmit,
2753	.ndo_get_stats64	= ionic_get_stats64,
2754	.ndo_set_rx_mode	= ionic_ndo_set_rx_mode,
2755	.ndo_set_features	= ionic_set_features,
2756	.ndo_set_mac_address	= ionic_set_mac_address,
2757	.ndo_validate_addr	= eth_validate_addr,
2758	.ndo_tx_timeout         = ionic_tx_timeout,
2759	.ndo_change_mtu         = ionic_change_mtu,
2760	.ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
2761	.ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
2762	.ndo_set_vf_vlan	= ionic_set_vf_vlan,
2763	.ndo_set_vf_trust	= ionic_set_vf_trust,
2764	.ndo_set_vf_mac		= ionic_set_vf_mac,
2765	.ndo_set_vf_rate	= ionic_set_vf_rate,
2766	.ndo_set_vf_spoofchk	= ionic_set_vf_spoofchk,
2767	.ndo_get_vf_config	= ionic_get_vf_config,
2768	.ndo_set_vf_link_state	= ionic_set_vf_link_state,
2769	.ndo_get_vf_stats       = ionic_get_vf_stats,
2770};
2771
2772static int ionic_cmb_reconfig(struct ionic_lif *lif,
2773			      struct ionic_queue_params *qparam)
2774{
2775	struct ionic_queue_params start_qparams;
2776	int err = 0;
2777
2778	/* When changing CMB queue parameters, we're using limited
2779	 * on-device memory and don't have extra memory to use for
2780	 * duplicate allocations, so we free it all first then
2781	 * re-allocate with the new parameters.
2782	 */
2783
2784	/* Checkpoint for possible unwind */
2785	ionic_init_queue_params(lif, &start_qparams);
2786
2787	/* Stop and free the queues */
2788	ionic_stop_queues_reconfig(lif);
2789	ionic_txrx_free(lif);
2790
2791	/* Set up new qparams */
2792	ionic_set_queue_params(lif, qparam);
2793
2794	if (netif_running(lif->netdev)) {
2795		/* Alloc and start the new configuration */
2796		err = ionic_txrx_alloc(lif);
2797		if (err) {
2798			dev_warn(lif->ionic->dev,
2799				 "CMB reconfig failed, restoring values: %d\n", err);
2800
2801			/* Back out the changes */
2802			ionic_set_queue_params(lif, &start_qparams);
2803			err = ionic_txrx_alloc(lif);
2804			if (err) {
2805				dev_err(lif->ionic->dev,
2806					"CMB restore failed: %d\n", err);
2807				goto err_out;
2808			}
2809		}
2810
2811		err = ionic_start_queues_reconfig(lif);
2812		if (err) {
2813			dev_err(lif->ionic->dev,
2814				"CMB reconfig failed: %d\n", err);
2815			goto err_out;
2816		}
2817	}
2818
2819err_out:
2820	/* This was detached in ionic_stop_queues_reconfig() */
2821	netif_device_attach(lif->netdev);
2822
2823	return err;
2824}
2825
2826static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2827{
2828	/* only swapping the queues, not the napi, flags, or other stuff */
2829	swap(a->q.features,   b->q.features);
2830	swap(a->q.num_descs,  b->q.num_descs);
2831	swap(a->q.desc_size,  b->q.desc_size);
2832	swap(a->q.base,       b->q.base);
2833	swap(a->q.base_pa,    b->q.base_pa);
2834	swap(a->q.info,       b->q.info);
2835	swap(a->q_base,       b->q_base);
2836	swap(a->q_base_pa,    b->q_base_pa);
2837	swap(a->q_size,       b->q_size);
2838
2839	swap(a->q.sg_desc_size, b->q.sg_desc_size);
2840	swap(a->q.sg_base,    b->q.sg_base);
2841	swap(a->q.sg_base_pa, b->q.sg_base_pa);
2842	swap(a->sg_base,      b->sg_base);
2843	swap(a->sg_base_pa,   b->sg_base_pa);
2844	swap(a->sg_size,      b->sg_size);
2845
2846	swap(a->cq.num_descs, b->cq.num_descs);
2847	swap(a->cq.desc_size, b->cq.desc_size);
2848	swap(a->cq.base,      b->cq.base);
2849	swap(a->cq.base_pa,   b->cq.base_pa);
2850	swap(a->cq.info,      b->cq.info);
2851	swap(a->cq_base,      b->cq_base);
2852	swap(a->cq_base_pa,   b->cq_base_pa);
2853	swap(a->cq_size,      b->cq_size);
2854
2855	ionic_debugfs_del_qcq(a);
2856	ionic_debugfs_add_qcq(a->q.lif, a);
2857}
2858
2859int ionic_reconfigure_queues(struct ionic_lif *lif,
2860			     struct ionic_queue_params *qparam)
2861{
2862	unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2863	struct ionic_qcq **tx_qcqs = NULL;
2864	struct ionic_qcq **rx_qcqs = NULL;
2865	unsigned int flags, i;
2866	int err = 0;
2867
2868	/* Are we changing q params while CMB is on */
2869	if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) ||
2870	    (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx))
2871		return ionic_cmb_reconfig(lif, qparam);
2872
2873	/* allocate temporary qcq arrays to hold new queue structs */
2874	if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2875		tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
2876				       sizeof(struct ionic_qcq *), GFP_KERNEL);
2877		if (!tx_qcqs) {
2878			err = -ENOMEM;
2879			goto err_out;
2880		}
2881	}
2882	if (qparam->nxqs != lif->nxqs ||
2883	    qparam->nrxq_descs != lif->nrxq_descs ||
2884	    qparam->rxq_features != lif->rxq_features) {
2885		rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
2886				       sizeof(struct ionic_qcq *), GFP_KERNEL);
2887		if (!rx_qcqs) {
2888			err = -ENOMEM;
2889			goto err_out;
2890		}
2891	}
2892
2893	/* allocate new desc_info and rings, but leave the interrupt setup
2894	 * until later so as to not mess with the still-running queues
2895	 */
2896	if (tx_qcqs) {
2897		num_desc = qparam->ntxq_descs;
2898		desc_sz = sizeof(struct ionic_txq_desc);
2899		comp_sz = sizeof(struct ionic_txq_comp);
2900
2901		if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2902		    lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2903		    sizeof(struct ionic_txq_sg_desc_v1))
2904			sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2905		else
2906			sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2907
2908		for (i = 0; i < qparam->nxqs; i++) {
2909			/* If missing, short placeholder qcq needed for swap */
2910			if (!lif->txqcqs[i]) {
2911				flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2912				err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2913						      4, desc_sz, comp_sz, sg_desc_sz,
2914						      lif->kern_pid, &lif->txqcqs[i]);
2915				if (err)
2916					goto err_out;
2917			}
2918
2919			flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2920			err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2921					      num_desc, desc_sz, comp_sz, sg_desc_sz,
2922					      lif->kern_pid, &tx_qcqs[i]);
2923			if (err)
2924				goto err_out;
2925		}
2926	}
2927
2928	if (rx_qcqs) {
2929		num_desc = qparam->nrxq_descs;
2930		desc_sz = sizeof(struct ionic_rxq_desc);
2931		comp_sz = sizeof(struct ionic_rxq_comp);
2932		sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2933
2934		if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2935			comp_sz *= 2;
2936
2937		for (i = 0; i < qparam->nxqs; i++) {
2938			/* If missing, short placeholder qcq needed for swap */
2939			if (!lif->rxqcqs[i]) {
2940				flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
2941				err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2942						      4, desc_sz, comp_sz, sg_desc_sz,
2943						      lif->kern_pid, &lif->rxqcqs[i]);
2944				if (err)
2945					goto err_out;
2946			}
2947
2948			flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2949			err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2950					      num_desc, desc_sz, comp_sz, sg_desc_sz,
2951					      lif->kern_pid, &rx_qcqs[i]);
2952			if (err)
2953				goto err_out;
2954
2955			rx_qcqs[i]->q.features = qparam->rxq_features;
2956		}
2957	}
2958
2959	/* stop and clean the queues */
2960	ionic_stop_queues_reconfig(lif);
2961
2962	if (qparam->nxqs != lif->nxqs) {
2963		err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
2964		if (err)
2965			goto err_out_reinit_unlock;
2966		err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
2967		if (err) {
2968			netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
2969			goto err_out_reinit_unlock;
2970		}
2971	}
2972
2973	/* swap new desc_info and rings, keeping existing interrupt config */
2974	if (tx_qcqs) {
2975		lif->ntxq_descs = qparam->ntxq_descs;
2976		for (i = 0; i < qparam->nxqs; i++)
2977			ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
2978	}
2979
2980	if (rx_qcqs) {
2981		lif->nrxq_descs = qparam->nrxq_descs;
2982		for (i = 0; i < qparam->nxqs; i++)
2983			ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
2984	}
2985
2986	/* if we need to change the interrupt layout, this is the time */
2987	if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
2988	    qparam->nxqs != lif->nxqs) {
2989		if (qparam->intr_split) {
2990			set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2991		} else {
2992			clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2993			lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2994			lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2995		}
2996
2997		/* Clear existing interrupt assignments.  We check for NULL here
2998		 * because we're checking the whole array for potential qcqs, not
2999		 * just those qcqs that have just been set up.
3000		 */
3001		for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
3002			if (lif->txqcqs[i])
3003				ionic_qcq_intr_free(lif, lif->txqcqs[i]);
3004			if (lif->rxqcqs[i])
3005				ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
3006		}
3007
3008		/* re-assign the interrupts */
3009		for (i = 0; i < qparam->nxqs; i++) {
3010			lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
3011			err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
3012			ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
3013					     lif->rxqcqs[i]->intr.index,
3014					     lif->rx_coalesce_hw);
3015
3016			if (qparam->intr_split) {
3017				lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
3018				err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
3019				ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
3020						     lif->txqcqs[i]->intr.index,
3021						     lif->tx_coalesce_hw);
3022				if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
3023					lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
3024			} else {
3025				lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3026				ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
3027			}
3028		}
3029	}
3030
3031	/* now we can rework the debugfs mappings */
3032	if (tx_qcqs) {
3033		for (i = 0; i < qparam->nxqs; i++) {
3034			ionic_debugfs_del_qcq(lif->txqcqs[i]);
3035			ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
3036		}
3037	}
3038
3039	if (rx_qcqs) {
3040		for (i = 0; i < qparam->nxqs; i++) {
3041			ionic_debugfs_del_qcq(lif->rxqcqs[i]);
3042			ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
3043		}
3044	}
3045
3046	swap(lif->nxqs, qparam->nxqs);
3047	swap(lif->rxq_features, qparam->rxq_features);
3048
3049err_out_reinit_unlock:
3050	/* re-init the queues, but don't lose an error code */
3051	if (err)
3052		ionic_start_queues_reconfig(lif);
3053	else
3054		err = ionic_start_queues_reconfig(lif);
3055
3056err_out:
3057	/* free old allocs without cleaning intr */
3058	for (i = 0; i < qparam->nxqs; i++) {
3059		if (tx_qcqs && tx_qcqs[i]) {
3060			tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3061			ionic_qcq_free(lif, tx_qcqs[i]);
3062			devm_kfree(lif->ionic->dev, tx_qcqs[i]);
3063			tx_qcqs[i] = NULL;
3064		}
3065		if (rx_qcqs && rx_qcqs[i]) {
3066			rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3067			ionic_qcq_free(lif, rx_qcqs[i]);
3068			devm_kfree(lif->ionic->dev, rx_qcqs[i]);
3069			rx_qcqs[i] = NULL;
3070		}
3071	}
3072
3073	/* free q array */
3074	if (rx_qcqs) {
3075		devm_kfree(lif->ionic->dev, rx_qcqs);
3076		rx_qcqs = NULL;
3077	}
3078	if (tx_qcqs) {
3079		devm_kfree(lif->ionic->dev, tx_qcqs);
3080		tx_qcqs = NULL;
3081	}
3082
3083	/* clean the unused dma and info allocations when new set is smaller
3084	 * than the full array, but leave the qcq shells in place
3085	 */
3086	for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
3087		if (lif->txqcqs && lif->txqcqs[i]) {
3088			lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3089			ionic_qcq_free(lif, lif->txqcqs[i]);
3090		}
3091
3092		if (lif->rxqcqs && lif->rxqcqs[i]) {
3093			lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3094			ionic_qcq_free(lif, lif->rxqcqs[i]);
3095		}
3096	}
3097
3098	if (err)
3099		netdev_info(lif->netdev, "%s: failed %d\n", __func__, err);
3100
3101	return err;
3102}
3103
3104int ionic_lif_alloc(struct ionic *ionic)
3105{
3106	struct device *dev = ionic->dev;
3107	union ionic_lif_identity *lid;
3108	struct net_device *netdev;
3109	struct ionic_lif *lif;
3110	int tbl_sz;
3111	int err;
3112
3113	lid = kzalloc(sizeof(*lid), GFP_KERNEL);
3114	if (!lid)
3115		return -ENOMEM;
3116
3117	netdev = alloc_etherdev_mqs(sizeof(*lif),
3118				    ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
3119	if (!netdev) {
3120		dev_err(dev, "Cannot allocate netdev, aborting\n");
3121		err = -ENOMEM;
3122		goto err_out_free_lid;
3123	}
3124
3125	SET_NETDEV_DEV(netdev, dev);
3126
3127	lif = netdev_priv(netdev);
3128	lif->netdev = netdev;
3129	ionic->lif = lif;
3130	netdev->netdev_ops = &ionic_netdev_ops;
3131	ionic_ethtool_set_ops(netdev);
3132
3133	netdev->watchdog_timeo = 2 * HZ;
3134	netif_carrier_off(netdev);
3135
3136	lif->identity = lid;
3137	lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
3138	err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
3139	if (err) {
3140		dev_err(ionic->dev, "Cannot identify type %d: %d\n",
3141			lif->lif_type, err);
3142		goto err_out_free_netdev;
3143	}
3144	lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
3145				     le32_to_cpu(lif->identity->eth.min_frame_size));
3146	lif->netdev->max_mtu =
3147		le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
3148
3149	lif->neqs = ionic->neqs_per_lif;
3150	lif->nxqs = ionic->ntxqs_per_lif;
3151
3152	lif->ionic = ionic;
3153	lif->index = 0;
3154
3155	if (is_kdump_kernel()) {
3156		lif->ntxq_descs = IONIC_MIN_TXRX_DESC;
3157		lif->nrxq_descs = IONIC_MIN_TXRX_DESC;
3158	} else {
3159		lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
3160		lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
3161	}
3162
3163	/* Convert the default coalesce value to actual hw resolution */
3164	lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
3165	lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
3166						    lif->rx_coalesce_usecs);
3167	lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
3168	lif->tx_coalesce_hw = lif->rx_coalesce_hw;
3169	set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
3170	set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
3171
3172	snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
3173
3174	mutex_init(&lif->queue_lock);
3175	mutex_init(&lif->config_lock);
3176
3177	spin_lock_init(&lif->adminq_lock);
3178
3179	spin_lock_init(&lif->deferred.lock);
3180	INIT_LIST_HEAD(&lif->deferred.list);
3181	INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
3182
3183	/* allocate lif info */
3184	lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
3185	lif->info = dma_alloc_coherent(dev, lif->info_sz,
3186				       &lif->info_pa, GFP_KERNEL);
3187	if (!lif->info) {
3188		dev_err(dev, "Failed to allocate lif info, aborting\n");
3189		err = -ENOMEM;
3190		goto err_out_free_mutex;
3191	}
3192
3193	ionic_debugfs_add_lif(lif);
3194
3195	/* allocate control queues and txrx queue arrays */
3196	ionic_lif_queue_identify(lif);
3197	err = ionic_qcqs_alloc(lif);
3198	if (err)
3199		goto err_out_free_lif_info;
3200
3201	/* allocate rss indirection table */
3202	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
3203	lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
3204	lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
3205					      &lif->rss_ind_tbl_pa,
3206					      GFP_KERNEL);
3207
3208	if (!lif->rss_ind_tbl) {
3209		err = -ENOMEM;
3210		dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
3211		goto err_out_free_qcqs;
3212	}
3213	netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
3214
3215	ionic_lif_alloc_phc(lif);
3216
3217	return 0;
3218
3219err_out_free_qcqs:
3220	ionic_qcqs_free(lif);
3221err_out_free_lif_info:
3222	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3223	lif->info = NULL;
3224	lif->info_pa = 0;
3225err_out_free_mutex:
3226	mutex_destroy(&lif->config_lock);
3227	mutex_destroy(&lif->queue_lock);
3228err_out_free_netdev:
3229	free_netdev(lif->netdev);
3230	lif = NULL;
3231err_out_free_lid:
3232	kfree(lid);
3233
3234	return err;
3235}
3236
3237static void ionic_lif_reset(struct ionic_lif *lif)
3238{
3239	struct ionic_dev *idev = &lif->ionic->idev;
3240
3241	if (!ionic_is_fw_running(idev))
3242		return;
3243
3244	mutex_lock(&lif->ionic->dev_cmd_lock);
3245	ionic_dev_cmd_lif_reset(idev, lif->index);
3246	ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3247	mutex_unlock(&lif->ionic->dev_cmd_lock);
3248}
3249
3250static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
3251{
3252	struct ionic *ionic = lif->ionic;
3253
3254	if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
3255		return;
3256
3257	dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
3258
3259	netif_device_detach(lif->netdev);
3260
3261	mutex_lock(&lif->queue_lock);
3262	if (test_bit(IONIC_LIF_F_UP, lif->state)) {
3263		dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
3264		ionic_stop_queues(lif);
3265	}
3266
3267	if (netif_running(lif->netdev)) {
3268		ionic_txrx_deinit(lif);
3269		ionic_txrx_free(lif);
3270	}
3271	ionic_lif_deinit(lif);
3272	ionic_reset(ionic);
3273	ionic_qcqs_free(lif);
3274
3275	mutex_unlock(&lif->queue_lock);
3276
3277	clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
3278	dev_info(ionic->dev, "FW Down: LIFs stopped\n");
3279}
3280
3281int ionic_restart_lif(struct ionic_lif *lif)
3282{
3283	struct ionic *ionic = lif->ionic;
3284	int err;
3285
3286	mutex_lock(&lif->queue_lock);
3287
3288	if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
3289		dev_info(ionic->dev, "FW Up: clearing broken state\n");
3290
3291	err = ionic_qcqs_alloc(lif);
3292	if (err)
3293		goto err_unlock;
3294
3295	err = ionic_lif_init(lif);
3296	if (err)
3297		goto err_qcqs_free;
3298
3299	ionic_vf_attr_replay(lif);
3300
3301	if (lif->registered)
3302		ionic_lif_set_netdev_info(lif);
3303
3304	ionic_rx_filter_replay(lif);
3305
3306	if (netif_running(lif->netdev)) {
3307		err = ionic_txrx_alloc(lif);
3308		if (err)
3309			goto err_lifs_deinit;
3310
3311		err = ionic_txrx_init(lif);
3312		if (err)
3313			goto err_txrx_free;
3314	}
3315
3316	mutex_unlock(&lif->queue_lock);
3317
3318	clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
3319	ionic_link_status_check_request(lif, CAN_SLEEP);
3320	netif_device_attach(lif->netdev);
3321
3322	return 0;
3323
3324err_txrx_free:
3325	ionic_txrx_free(lif);
3326err_lifs_deinit:
3327	ionic_lif_deinit(lif);
3328err_qcqs_free:
3329	ionic_qcqs_free(lif);
3330err_unlock:
3331	mutex_unlock(&lif->queue_lock);
3332
3333	return err;
3334}
3335
3336static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
3337{
3338	struct ionic *ionic = lif->ionic;
3339	int err;
3340
3341	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3342		return;
3343
3344	dev_info(ionic->dev, "FW Up: restarting LIFs\n");
3345
3346	/* This is a little different from what happens at
3347	 * probe time because the LIF already exists so we
3348	 * just need to reanimate it.
3349	 */
3350	ionic_init_devinfo(ionic);
3351	err = ionic_identify(ionic);
3352	if (err)
3353		goto err_out;
3354	err = ionic_port_identify(ionic);
3355	if (err)
3356		goto err_out;
3357	err = ionic_port_init(ionic);
3358	if (err)
3359		goto err_out;
3360
3361	err = ionic_restart_lif(lif);
3362	if (err)
3363		goto err_out;
3364
3365	dev_info(ionic->dev, "FW Up: LIFs restarted\n");
3366
3367	/* restore the hardware timestamping queues */
3368	ionic_lif_hwstamp_replay(lif);
3369
3370	return;
3371
3372err_out:
3373	dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
3374}
3375
3376void ionic_lif_free(struct ionic_lif *lif)
3377{
3378	struct device *dev = lif->ionic->dev;
3379
3380	ionic_lif_free_phc(lif);
3381
3382	/* free rss indirection table */
3383	dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
3384			  lif->rss_ind_tbl_pa);
3385	lif->rss_ind_tbl = NULL;
3386	lif->rss_ind_tbl_pa = 0;
3387
3388	/* free queues */
3389	ionic_qcqs_free(lif);
3390	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3391		ionic_lif_reset(lif);
3392
3393	/* free lif info */
3394	kfree(lif->identity);
3395	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3396	lif->info = NULL;
3397	lif->info_pa = 0;
3398
3399	/* unmap doorbell page */
3400	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3401	lif->kern_dbpage = NULL;
3402
3403	mutex_destroy(&lif->config_lock);
3404	mutex_destroy(&lif->queue_lock);
3405
3406	/* free netdev & lif */
3407	ionic_debugfs_del_lif(lif);
3408	free_netdev(lif->netdev);
3409}
3410
3411void ionic_lif_deinit(struct ionic_lif *lif)
3412{
3413	if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
3414		return;
3415
3416	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3417		cancel_work_sync(&lif->deferred.work);
3418		cancel_work_sync(&lif->tx_timeout_work);
3419		ionic_rx_filters_deinit(lif);
3420		if (lif->netdev->features & NETIF_F_RXHASH)
3421			ionic_lif_rss_deinit(lif);
3422	}
3423
3424	napi_disable(&lif->adminqcq->napi);
3425	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3426	ionic_lif_qcq_deinit(lif, lif->adminqcq);
3427
3428	ionic_lif_reset(lif);
3429}
3430
3431static int ionic_lif_adminq_init(struct ionic_lif *lif)
3432{
3433	struct device *dev = lif->ionic->dev;
3434	struct ionic_q_init_comp comp;
3435	struct ionic_dev *idev;
3436	struct ionic_qcq *qcq;
3437	struct ionic_queue *q;
3438	int err;
3439
3440	idev = &lif->ionic->idev;
3441	qcq = lif->adminqcq;
3442	q = &qcq->q;
3443
3444	mutex_lock(&lif->ionic->dev_cmd_lock);
3445	ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
3446	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3447	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3448	mutex_unlock(&lif->ionic->dev_cmd_lock);
3449	if (err) {
3450		netdev_err(lif->netdev, "adminq init failed %d\n", err);
3451		return err;
3452	}
3453
3454	q->hw_type = comp.hw_type;
3455	q->hw_index = le32_to_cpu(comp.hw_index);
3456	q->dbval = IONIC_DBELL_QID(q->hw_index);
3457
3458	dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
3459	dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
3460
3461	q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
3462	q->dbell_jiffies = jiffies;
3463
3464	netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
3465
3466	qcq->napi_qcq = qcq;
3467	timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
3468
3469	napi_enable(&qcq->napi);
3470
3471	if (qcq->flags & IONIC_QCQ_F_INTR)
3472		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
3473				IONIC_INTR_MASK_CLEAR);
3474
3475	qcq->flags |= IONIC_QCQ_F_INITED;
3476
3477	return 0;
3478}
3479
3480static int ionic_lif_notifyq_init(struct ionic_lif *lif)
3481{
3482	struct ionic_qcq *qcq = lif->notifyqcq;
3483	struct device *dev = lif->ionic->dev;
3484	struct ionic_queue *q = &qcq->q;
3485	int err;
3486
3487	struct ionic_admin_ctx ctx = {
3488		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3489		.cmd.q_init = {
3490			.opcode = IONIC_CMD_Q_INIT,
3491			.lif_index = cpu_to_le16(lif->index),
3492			.type = q->type,
3493			.ver = lif->qtype_info[q->type].version,
3494			.index = cpu_to_le32(q->index),
3495			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
3496					     IONIC_QINIT_F_ENA),
3497			.intr_index = cpu_to_le16(lif->adminqcq->intr.index),
3498			.pid = cpu_to_le16(q->pid),
3499			.ring_size = ilog2(q->num_descs),
3500			.ring_base = cpu_to_le64(q->base_pa),
3501		}
3502	};
3503
3504	dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
3505	dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
3506	dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
3507	dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
3508
3509	err = ionic_adminq_post_wait(lif, &ctx);
3510	if (err)
3511		return err;
3512
3513	lif->last_eid = 0;
3514	q->hw_type = ctx.comp.q_init.hw_type;
3515	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
3516	q->dbval = IONIC_DBELL_QID(q->hw_index);
3517
3518	dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
3519	dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
3520
3521	/* preset the callback info */
3522	q->info[0].cb_arg = lif;
3523
3524	qcq->flags |= IONIC_QCQ_F_INITED;
3525
3526	return 0;
3527}
3528
3529static int ionic_station_set(struct ionic_lif *lif)
3530{
3531	struct net_device *netdev = lif->netdev;
3532	struct ionic_admin_ctx ctx = {
3533		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3534		.cmd.lif_getattr = {
3535			.opcode = IONIC_CMD_LIF_GETATTR,
3536			.index = cpu_to_le16(lif->index),
3537			.attr = IONIC_LIF_ATTR_MAC,
3538		},
3539	};
3540	u8 mac_address[ETH_ALEN];
3541	struct sockaddr addr;
3542	int err;
3543
3544	err = ionic_adminq_post_wait(lif, &ctx);
3545	if (err)
3546		return err;
3547	netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
3548		   ctx.comp.lif_getattr.mac);
3549	ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
3550
3551	if (is_zero_ether_addr(mac_address)) {
3552		eth_hw_addr_random(netdev);
3553		netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
3554		ether_addr_copy(mac_address, netdev->dev_addr);
3555
3556		err = ionic_program_mac(lif, mac_address);
3557		if (err < 0)
3558			return err;
3559
3560		if (err > 0) {
3561			netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
3562				   __func__);
3563			return 0;
3564		}
3565	}
3566
3567	if (!is_zero_ether_addr(netdev->dev_addr)) {
3568		/* If the netdev mac is non-zero and doesn't match the default
3569		 * device address, it was set by something earlier and we're
3570		 * likely here again after a fw-upgrade reset.  We need to be
3571		 * sure the netdev mac is in our filter list.
3572		 */
3573		if (!ether_addr_equal(mac_address, netdev->dev_addr))
3574			ionic_lif_addr_add(lif, netdev->dev_addr);
3575	} else {
3576		/* Update the netdev mac with the device's mac */
3577		ether_addr_copy(addr.sa_data, mac_address);
3578		addr.sa_family = AF_INET;
3579		err = eth_prepare_mac_addr_change(netdev, &addr);
3580		if (err) {
3581			netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
3582				    addr.sa_data, err);
3583			return 0;
3584		}
3585
3586		eth_commit_mac_addr_change(netdev, &addr);
3587	}
3588
3589	netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
3590		   netdev->dev_addr);
3591	ionic_lif_addr_add(lif, netdev->dev_addr);
3592
3593	return 0;
3594}
3595
3596int ionic_lif_init(struct ionic_lif *lif)
3597{
3598	struct ionic_dev *idev = &lif->ionic->idev;
3599	struct device *dev = lif->ionic->dev;
3600	struct ionic_lif_init_comp comp;
3601	int dbpage_num;
3602	int err;
3603
3604	mutex_lock(&lif->ionic->dev_cmd_lock);
3605	ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
3606	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3607	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3608	mutex_unlock(&lif->ionic->dev_cmd_lock);
3609	if (err)
3610		return err;
3611
3612	lif->hw_index = le16_to_cpu(comp.hw_index);
3613
3614	/* now that we have the hw_index we can figure out our doorbell page */
3615	lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
3616	if (!lif->dbid_count) {
3617		dev_err(dev, "No doorbell pages, aborting\n");
3618		return -EINVAL;
3619	}
3620
3621	lif->kern_pid = 0;
3622	dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
3623	lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
3624	if (!lif->kern_dbpage) {
3625		dev_err(dev, "Cannot map dbpage, aborting\n");
3626		return -ENOMEM;
3627	}
3628
3629	err = ionic_lif_adminq_init(lif);
3630	if (err)
3631		goto err_out_adminq_deinit;
3632
3633	if (lif->ionic->nnqs_per_lif) {
3634		err = ionic_lif_notifyq_init(lif);
3635		if (err)
3636			goto err_out_notifyq_deinit;
3637	}
3638
3639	err = ionic_init_nic_features(lif);
3640	if (err)
3641		goto err_out_notifyq_deinit;
3642
3643	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3644		err = ionic_rx_filters_init(lif);
3645		if (err)
3646			goto err_out_notifyq_deinit;
3647	}
3648
3649	err = ionic_station_set(lif);
3650	if (err)
3651		goto err_out_notifyq_deinit;
3652
3653	lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
3654
3655	set_bit(IONIC_LIF_F_INITED, lif->state);
3656
3657	INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
3658
3659	return 0;
3660
3661err_out_notifyq_deinit:
3662	napi_disable(&lif->adminqcq->napi);
3663	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3664err_out_adminq_deinit:
3665	ionic_lif_qcq_deinit(lif, lif->adminqcq);
3666	ionic_lif_reset(lif);
3667	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3668	lif->kern_dbpage = NULL;
3669
3670	return err;
3671}
3672
3673static void ionic_lif_notify_work(struct work_struct *ws)
3674{
3675}
3676
3677static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
3678{
3679	struct ionic_admin_ctx ctx = {
3680		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3681		.cmd.lif_setattr = {
3682			.opcode = IONIC_CMD_LIF_SETATTR,
3683			.index = cpu_to_le16(lif->index),
3684			.attr = IONIC_LIF_ATTR_NAME,
3685		},
3686	};
3687
3688	strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
3689		sizeof(ctx.cmd.lif_setattr.name));
3690
3691	ionic_adminq_post_wait(lif, &ctx);
3692}
3693
3694static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
3695{
3696	if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
3697		return NULL;
3698
3699	return netdev_priv(netdev);
3700}
3701
3702static int ionic_lif_notify(struct notifier_block *nb,
3703			    unsigned long event, void *info)
3704{
3705	struct net_device *ndev = netdev_notifier_info_to_dev(info);
3706	struct ionic *ionic = container_of(nb, struct ionic, nb);
3707	struct ionic_lif *lif = ionic_netdev_lif(ndev);
3708
3709	if (!lif || lif->ionic != ionic)
3710		return NOTIFY_DONE;
3711
3712	switch (event) {
3713	case NETDEV_CHANGENAME:
3714		ionic_lif_set_netdev_info(lif);
3715		break;
3716	}
3717
3718	return NOTIFY_DONE;
3719}
3720
3721int ionic_lif_register(struct ionic_lif *lif)
3722{
3723	int err;
3724
3725	ionic_lif_register_phc(lif);
3726
3727	INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
3728
3729	lif->ionic->nb.notifier_call = ionic_lif_notify;
3730
3731	err = register_netdevice_notifier(&lif->ionic->nb);
3732	if (err)
3733		lif->ionic->nb.notifier_call = NULL;
3734
3735	/* only register LIF0 for now */
3736	err = register_netdev(lif->netdev);
3737	if (err) {
3738		dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
3739		ionic_lif_unregister_phc(lif);
3740		return err;
3741	}
3742
3743	ionic_link_status_check_request(lif, CAN_SLEEP);
3744	lif->registered = true;
3745	ionic_lif_set_netdev_info(lif);
3746
3747	return 0;
3748}
3749
3750void ionic_lif_unregister(struct ionic_lif *lif)
3751{
3752	if (lif->ionic->nb.notifier_call) {
3753		unregister_netdevice_notifier(&lif->ionic->nb);
3754		cancel_work_sync(&lif->ionic->nb_work);
3755		lif->ionic->nb.notifier_call = NULL;
3756	}
3757
3758	if (lif->netdev->reg_state == NETREG_REGISTERED)
3759		unregister_netdev(lif->netdev);
3760
3761	ionic_lif_unregister_phc(lif);
3762
3763	lif->registered = false;
3764}
3765
3766static void ionic_lif_queue_identify(struct ionic_lif *lif)
3767{
3768	union ionic_q_identity __iomem *q_ident;
3769	struct ionic *ionic = lif->ionic;
3770	struct ionic_dev *idev;
3771	int qtype;
3772	int err;
3773
3774	idev = &lif->ionic->idev;
3775	q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
3776
3777	for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
3778		struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
3779
3780		/* filter out the ones we know about */
3781		switch (qtype) {
3782		case IONIC_QTYPE_ADMINQ:
3783		case IONIC_QTYPE_NOTIFYQ:
3784		case IONIC_QTYPE_RXQ:
3785		case IONIC_QTYPE_TXQ:
3786			break;
3787		default:
3788			continue;
3789		}
3790
3791		memset(qti, 0, sizeof(*qti));
3792
3793		mutex_lock(&ionic->dev_cmd_lock);
3794		ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3795					     ionic_qtype_versions[qtype]);
3796		err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3797		if (!err) {
3798			qti->version   = readb(&q_ident->version);
3799			qti->supported = readb(&q_ident->supported);
3800			qti->features  = readq(&q_ident->features);
3801			qti->desc_sz   = readw(&q_ident->desc_sz);
3802			qti->comp_sz   = readw(&q_ident->comp_sz);
3803			qti->sg_desc_sz   = readw(&q_ident->sg_desc_sz);
3804			qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3805			qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
3806		}
3807		mutex_unlock(&ionic->dev_cmd_lock);
3808
3809		if (err == -EINVAL) {
3810			dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3811			continue;
3812		} else if (err == -EIO) {
3813			dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3814			return;
3815		} else if (err) {
3816			dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3817				qtype, err);
3818			return;
3819		}
3820
3821		dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3822			qtype, qti->version);
3823		dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3824			qtype, qti->supported);
3825		dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3826			qtype, qti->features);
3827		dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3828			qtype, qti->desc_sz);
3829		dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3830			qtype, qti->comp_sz);
3831		dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3832			qtype, qti->sg_desc_sz);
3833		dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3834			qtype, qti->max_sg_elems);
3835		dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3836			qtype, qti->sg_desc_stride);
3837	}
3838}
3839
3840int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3841		       union ionic_lif_identity *lid)
3842{
3843	struct ionic_dev *idev = &ionic->idev;
3844	size_t sz;
3845	int err;
3846
3847	sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3848
3849	mutex_lock(&ionic->dev_cmd_lock);
3850	ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3851	err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3852	memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3853	mutex_unlock(&ionic->dev_cmd_lock);
3854	if (err)
3855		return (err);
3856
3857	dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3858		le64_to_cpu(lid->capabilities));
3859
3860	dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3861		le32_to_cpu(lid->eth.max_ucast_filters));
3862	dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3863		le32_to_cpu(lid->eth.max_mcast_filters));
3864	dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3865		le64_to_cpu(lid->eth.config.features));
3866	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3867		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3868	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3869		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3870	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3871		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3872	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3873		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3874	dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3875	dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3876	dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3877		le32_to_cpu(lid->eth.config.mtu));
3878
3879	return 0;
3880}
3881
3882int ionic_lif_size(struct ionic *ionic)
3883{
3884	struct ionic_identity *ident = &ionic->ident;
3885	unsigned int nintrs, dev_nintrs;
3886	union ionic_lif_config *lc;
3887	unsigned int ntxqs_per_lif;
3888	unsigned int nrxqs_per_lif;
3889	unsigned int neqs_per_lif;
3890	unsigned int nnqs_per_lif;
3891	unsigned int nxqs, neqs;
3892	unsigned int min_intrs;
3893	int err;
3894
3895	/* retrieve basic values from FW */
3896	lc = &ident->lif.eth.config;
3897	dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3898	neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3899	nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3900	ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3901	nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3902
3903	/* limit values to play nice with kdump */
3904	if (is_kdump_kernel()) {
3905		dev_nintrs = 2;
3906		neqs_per_lif = 0;
3907		nnqs_per_lif = 0;
3908		ntxqs_per_lif = 1;
3909		nrxqs_per_lif = 1;
3910	}
3911
3912	/* reserve last queue id for hardware timestamping */
3913	if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
3914		if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
3915			lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP);
3916		} else {
3917			ntxqs_per_lif -= 1;
3918			nrxqs_per_lif -= 1;
3919		}
3920	}
3921
3922	nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3923	nxqs = min(nxqs, num_online_cpus());
3924	neqs = min(neqs_per_lif, num_online_cpus());
3925
3926try_again:
3927	/* interrupt usage:
3928	 *    1 for master lif adminq/notifyq
3929	 *    1 for each CPU for master lif TxRx queue pairs
3930	 *    whatever's left is for RDMA queues
3931	 */
3932	nintrs = 1 + nxqs + neqs;
3933	min_intrs = 2;  /* adminq + 1 TxRx queue pair */
3934
3935	if (nintrs > dev_nintrs)
3936		goto try_fewer;
3937
3938	err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3939	if (err < 0 && err != -ENOSPC) {
3940		dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3941		return err;
3942	}
3943	if (err == -ENOSPC)
3944		goto try_fewer;
3945
3946	if (err != nintrs) {
3947		ionic_bus_free_irq_vectors(ionic);
3948		goto try_fewer;
3949	}
3950
3951	ionic->nnqs_per_lif = nnqs_per_lif;
3952	ionic->neqs_per_lif = neqs;
3953	ionic->ntxqs_per_lif = nxqs;
3954	ionic->nrxqs_per_lif = nxqs;
3955	ionic->nintrs = nintrs;
3956
3957	ionic_debugfs_add_sizes(ionic);
3958
3959	return 0;
3960
3961try_fewer:
3962	if (nnqs_per_lif > 1) {
3963		nnqs_per_lif >>= 1;
3964		goto try_again;
3965	}
3966	if (neqs > 1) {
3967		neqs >>= 1;
3968		goto try_again;
3969	}
3970	if (nxqs > 1) {
3971		nxqs >>= 1;
3972		goto try_again;
3973	}
3974	dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
3975	return -ENOSPC;
3976}
3977