1// SPDX-License-Identifier: GPL-2.0-only
2/* Atlantic Network Driver
3 *
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
6 */
7
8/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
9 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
10 */
11
12#include "aq_vec.h"
13#include "aq_nic.h"
14#include "aq_ring.h"
15#include "aq_hw.h"
16
17#include <linux/netdevice.h>
18
19struct aq_vec_s {
20	const struct aq_hw_ops *aq_hw_ops;
21	struct aq_hw_s *aq_hw;
22	struct aq_nic_s *aq_nic;
23	unsigned int tx_rings;
24	unsigned int rx_rings;
25	struct aq_ring_param_s aq_ring_param;
26	struct napi_struct napi;
27	struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
28};
29
30#define AQ_VEC_TX_ID 0
31#define AQ_VEC_RX_ID 1
32
33static int aq_vec_poll(struct napi_struct *napi, int budget)
34{
35	struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
36	unsigned int sw_tail_old = 0U;
37	struct aq_ring_s *ring = NULL;
38	bool was_tx_cleaned = true;
39	unsigned int i = 0U;
40	int work_done = 0;
41	int err = 0;
42
43	if (!self) {
44		err = -EINVAL;
45	} else {
46		for (i = 0U; self->tx_rings > i; ++i) {
47			ring = self->ring[i];
48			u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
49			ring[AQ_VEC_RX_ID].stats.rx.polls++;
50			u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
51			if (self->aq_hw_ops->hw_ring_tx_head_update) {
52				err = self->aq_hw_ops->hw_ring_tx_head_update(
53							self->aq_hw,
54							&ring[AQ_VEC_TX_ID]);
55				if (err < 0)
56					goto err_exit;
57			}
58
59			if (ring[AQ_VEC_TX_ID].sw_head !=
60			    ring[AQ_VEC_TX_ID].hw_head) {
61				was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
62				aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
63			}
64
65			err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
66					    &ring[AQ_VEC_RX_ID]);
67			if (err < 0)
68				goto err_exit;
69
70			if (ring[AQ_VEC_RX_ID].sw_head !=
71				ring[AQ_VEC_RX_ID].hw_head) {
72				err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
73						       napi,
74						       &work_done,
75						       budget - work_done);
76				if (err < 0)
77					goto err_exit;
78
79				sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
80
81				err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
82				if (err < 0)
83					goto err_exit;
84
85				err = self->aq_hw_ops->hw_ring_rx_fill(
86					self->aq_hw,
87					&ring[AQ_VEC_RX_ID], sw_tail_old);
88				if (err < 0)
89					goto err_exit;
90			}
91		}
92
93err_exit:
94		if (!was_tx_cleaned)
95			work_done = budget;
96
97		if (work_done < budget) {
98			napi_complete_done(napi, work_done);
99			self->aq_hw_ops->hw_irq_enable(self->aq_hw,
100					1U << self->aq_ring_param.vec_idx);
101		}
102	}
103
104	return work_done;
105}
106
107struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
108			      struct aq_nic_cfg_s *aq_nic_cfg)
109{
110	struct aq_vec_s *self = NULL;
111
112	self = kzalloc(sizeof(*self), GFP_KERNEL);
113	if (!self)
114		goto err_exit;
115
116	self->aq_nic = aq_nic;
117	self->aq_ring_param.vec_idx = idx;
118	self->aq_ring_param.cpu =
119		idx + aq_nic_cfg->aq_rss.base_cpu_number;
120
121	cpumask_set_cpu(self->aq_ring_param.cpu,
122			&self->aq_ring_param.affinity_mask);
123
124	self->tx_rings = 0;
125	self->rx_rings = 0;
126
127	netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
128		       aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
129
130err_exit:
131	return self;
132}
133
134int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
135		      unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg)
136{
137	struct aq_ring_s *ring = NULL;
138	unsigned int i = 0U;
139	int err = 0;
140
141	for (i = 0; i < aq_nic_cfg->tcs; ++i) {
142		const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
143								    i, idx);
144
145		ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
146					idx_ring, aq_nic_cfg);
147		if (!ring) {
148			err = -ENOMEM;
149			goto err_exit;
150		}
151
152		++self->tx_rings;
153
154		aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
155
156		ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
157					idx_ring, aq_nic_cfg);
158		if (!ring) {
159			err = -ENOMEM;
160			goto err_exit;
161		}
162
163		++self->rx_rings;
164	}
165
166err_exit:
167	if (err < 0) {
168		aq_vec_ring_free(self);
169		self = NULL;
170	}
171
172	return err;
173}
174
175int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
176		struct aq_hw_s *aq_hw)
177{
178	struct aq_ring_s *ring = NULL;
179	unsigned int i = 0U;
180	int err = 0;
181
182	self->aq_hw_ops = aq_hw_ops;
183	self->aq_hw = aq_hw;
184
185	for (i = 0U; self->tx_rings > i; ++i) {
186		ring = self->ring[i];
187		err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
188		if (err < 0)
189			goto err_exit;
190
191		err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
192						       &ring[AQ_VEC_TX_ID],
193						       &self->aq_ring_param);
194		if (err < 0)
195			goto err_exit;
196
197		err = aq_ring_init(&ring[AQ_VEC_RX_ID], ATL_RING_RX);
198		if (err < 0)
199			goto err_exit;
200
201		err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
202						       &ring[AQ_VEC_RX_ID],
203						       &self->aq_ring_param);
204		if (err < 0)
205			goto err_exit;
206
207		err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
208		if (err < 0)
209			goto err_exit;
210
211		err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
212						       &ring[AQ_VEC_RX_ID], 0U);
213		if (err < 0)
214			goto err_exit;
215	}
216
217err_exit:
218	return err;
219}
220
221int aq_vec_start(struct aq_vec_s *self)
222{
223	struct aq_ring_s *ring = NULL;
224	unsigned int i = 0U;
225	int err = 0;
226
227	for (i = 0U; self->tx_rings > i; ++i) {
228		ring = self->ring[i];
229		err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
230							&ring[AQ_VEC_TX_ID]);
231		if (err < 0)
232			goto err_exit;
233
234		err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
235							&ring[AQ_VEC_RX_ID]);
236		if (err < 0)
237			goto err_exit;
238	}
239
240	napi_enable(&self->napi);
241
242err_exit:
243	return err;
244}
245
246void aq_vec_stop(struct aq_vec_s *self)
247{
248	struct aq_ring_s *ring = NULL;
249	unsigned int i = 0U;
250
251	for (i = 0U; self->tx_rings > i; ++i) {
252		ring = self->ring[i];
253		self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
254						 &ring[AQ_VEC_TX_ID]);
255
256		self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
257						 &ring[AQ_VEC_RX_ID]);
258	}
259
260	napi_disable(&self->napi);
261}
262
263void aq_vec_deinit(struct aq_vec_s *self)
264{
265	struct aq_ring_s *ring = NULL;
266	unsigned int i = 0U;
267
268	if (!self)
269		goto err_exit;
270
271	for (i = 0U; self->tx_rings > i; ++i) {
272		ring = self->ring[i];
273		aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
274		aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
275	}
276
277err_exit:;
278}
279
280void aq_vec_free(struct aq_vec_s *self)
281{
282	if (!self)
283		goto err_exit;
284
285	netif_napi_del(&self->napi);
286
287	kfree(self);
288
289err_exit:;
290}
291
292void aq_vec_ring_free(struct aq_vec_s *self)
293{
294	struct aq_ring_s *ring = NULL;
295	unsigned int i = 0U;
296
297	if (!self)
298		goto err_exit;
299
300	for (i = 0U; self->tx_rings > i; ++i) {
301		ring = self->ring[i];
302		aq_ring_free(&ring[AQ_VEC_TX_ID]);
303		if (i < self->rx_rings)
304			aq_ring_free(&ring[AQ_VEC_RX_ID]);
305	}
306
307	self->tx_rings = 0;
308	self->rx_rings = 0;
309err_exit:;
310}
311
312irqreturn_t aq_vec_isr(int irq, void *private)
313{
314	struct aq_vec_s *self = private;
315	int err = 0;
316
317	if (!self) {
318		err = -EINVAL;
319		goto err_exit;
320	}
321	napi_schedule(&self->napi);
322
323err_exit:
324	return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
325}
326
327irqreturn_t aq_vec_isr_legacy(int irq, void *private)
328{
329	struct aq_vec_s *self = private;
330	u64 irq_mask = 0U;
331	int err;
332
333	if (!self)
334		return IRQ_NONE;
335	err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
336	if (err < 0)
337		return IRQ_NONE;
338
339	if (irq_mask) {
340		self->aq_hw_ops->hw_irq_disable(self->aq_hw,
341			      1U << self->aq_ring_param.vec_idx);
342		napi_schedule(&self->napi);
343	} else {
344		self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
345		return IRQ_NONE;
346	}
347
348	return IRQ_HANDLED;
349}
350
351cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
352{
353	return &self->aq_ring_param.affinity_mask;
354}
355
356bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc)
357{
358	return tc < self->rx_rings && tc < self->tx_rings;
359}
360
361unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data)
362{
363	unsigned int count;
364
365	if (!aq_vec_is_valid_tc(self, tc))
366		return 0;
367
368	count = aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_RX_ID], data);
369	count += aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_TX_ID], data + count);
370
371	return count;
372}
373