1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2022 MediaTek Inc.
3 *
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 *	   Sujuan Chen <sujuan.chen@mediatek.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/dma-mapping.h>
10#include <linux/interrupt.h>
11#include <linux/mfd/syscon.h>
12#include <linux/of.h>
13#include <linux/of_irq.h>
14#include <linux/bitfield.h>
15
16#include "mtk_wed.h"
17#include "mtk_wed_regs.h"
18#include "mtk_wed_wo.h"
19
20static u32
21mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
22{
23	u32 val;
24
25	if (regmap_read(wo->mmio.regs, reg, &val))
26		val = ~0;
27
28	return val;
29}
30
31static void
32mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
33{
34	regmap_write(wo->mmio.regs, reg, val);
35}
36
37static u32
38mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
39{
40	u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
41
42	return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
43}
44
45static void
46mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
47{
48	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
49}
50
51static void
52mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
53{
54	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
55}
56
57static void
58mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
59{
60	unsigned long flags;
61
62	spin_lock_irqsave(&wo->mmio.lock, flags);
63	wo->mmio.irq_mask &= ~mask;
64	wo->mmio.irq_mask |= val;
65	if (set)
66		mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
67	spin_unlock_irqrestore(&wo->mmio.lock, flags);
68}
69
70static void
71mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
72{
73	mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
74	tasklet_schedule(&wo->mmio.irq_tasklet);
75}
76
77static void
78mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
79{
80	mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
81}
82
83static void
84mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
85{
86	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
87	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
88}
89
90static void
91mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
92		      u32 val)
93{
94	wmb();
95	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
96}
97
98static void *
99mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
100		   bool flush)
101{
102	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
103	int index = (q->tail + 1) % q->n_desc;
104	struct mtk_wed_wo_queue_entry *entry;
105	struct mtk_wed_wo_queue_desc *desc;
106	void *buf;
107
108	if (!q->queued)
109		return NULL;
110
111	if (flush)
112		q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
113	else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
114		return NULL;
115
116	q->tail = index;
117	q->queued--;
118
119	desc = &q->desc[index];
120	entry = &q->entry[index];
121	buf = entry->buf;
122	if (len)
123		*len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
124				 le32_to_cpu(READ_ONCE(desc->ctrl)));
125	if (buf)
126		dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
127				 DMA_FROM_DEVICE);
128	entry->buf = NULL;
129
130	return buf;
131}
132
133static int
134mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
135			bool rx)
136{
137	enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
138	int n_buf = 0;
139
140	while (q->queued < q->n_desc) {
141		struct mtk_wed_wo_queue_entry *entry;
142		dma_addr_t addr;
143		void *buf;
144
145		buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
146		if (!buf)
147			break;
148
149		addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
150		if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
151			skb_free_frag(buf);
152			break;
153		}
154
155		q->head = (q->head + 1) % q->n_desc;
156		entry = &q->entry[q->head];
157		entry->addr = addr;
158		entry->len = q->buf_size;
159		q->entry[q->head].buf = buf;
160
161		if (rx) {
162			struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
163			u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
164				   FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
165					      entry->len);
166
167			WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
168			WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
169		}
170		q->queued++;
171		n_buf++;
172	}
173
174	return n_buf;
175}
176
177static void
178mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
179{
180	mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
181	mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
182}
183
184static void
185mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
186{
187	for (;;) {
188		struct mtk_wed_mcu_hdr *hdr;
189		struct sk_buff *skb;
190		void *data;
191		u32 len;
192
193		data = mtk_wed_wo_dequeue(wo, q, &len, false);
194		if (!data)
195			break;
196
197		skb = build_skb(data, q->buf_size);
198		if (!skb) {
199			skb_free_frag(data);
200			continue;
201		}
202
203		__skb_put(skb, len);
204		if (mtk_wed_mcu_check_msg(wo, skb)) {
205			dev_kfree_skb(skb);
206			continue;
207		}
208
209		hdr = (struct mtk_wed_mcu_hdr *)skb->data;
210		if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
211			mtk_wed_mcu_rx_event(wo, skb);
212		else
213			mtk_wed_mcu_rx_unsolicited_event(wo, skb);
214	}
215
216	if (mtk_wed_wo_queue_refill(wo, q, true)) {
217		u32 index = (q->head - 1) % q->n_desc;
218
219		mtk_wed_wo_queue_kick(wo, q, index);
220	}
221}
222
223static irqreturn_t
224mtk_wed_wo_irq_handler(int irq, void *data)
225{
226	struct mtk_wed_wo *wo = data;
227
228	mtk_wed_wo_set_isr(wo, 0);
229	tasklet_schedule(&wo->mmio.irq_tasklet);
230
231	return IRQ_HANDLED;
232}
233
234static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
235{
236	struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
237	u32 intr, mask;
238
239	/* disable interrupts */
240	mtk_wed_wo_set_isr(wo, 0);
241
242	intr = mtk_wed_wo_get_isr(wo);
243	intr &= wo->mmio.irq_mask;
244	mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
245	mtk_wed_wo_irq_disable(wo, mask);
246
247	if (intr & MTK_WED_WO_RXCH_INT_MASK) {
248		mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
249		mtk_wed_wo_rx_complete(wo);
250	}
251}
252
253/* mtk wed wo hw queues */
254
255static int
256mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
257		       int n_desc, int buf_size, int index,
258		       struct mtk_wed_wo_queue_regs *regs)
259{
260	q->regs = *regs;
261	q->n_desc = n_desc;
262	q->buf_size = buf_size;
263
264	q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
265				      &q->desc_dma, GFP_KERNEL);
266	if (!q->desc)
267		return -ENOMEM;
268
269	q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
270				GFP_KERNEL);
271	if (!q->entry)
272		return -ENOMEM;
273
274	return 0;
275}
276
277static void
278mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
279{
280	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
281	dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
282			  q->desc_dma);
283}
284
285static void
286mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
287{
288	struct page *page;
289	int i;
290
291	for (i = 0; i < q->n_desc; i++) {
292		struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
293
294		if (!entry->buf)
295			continue;
296
297		dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
298				 DMA_TO_DEVICE);
299		skb_free_frag(entry->buf);
300		entry->buf = NULL;
301	}
302
303	if (!q->cache.va)
304		return;
305
306	page = virt_to_page(q->cache.va);
307	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
308	memset(&q->cache, 0, sizeof(q->cache));
309}
310
311static void
312mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
313{
314	struct page *page;
315
316	for (;;) {
317		void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
318
319		if (!buf)
320			break;
321
322		skb_free_frag(buf);
323	}
324
325	if (!q->cache.va)
326		return;
327
328	page = virt_to_page(q->cache.va);
329	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
330	memset(&q->cache, 0, sizeof(q->cache));
331}
332
333static void
334mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
335{
336	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
337	mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
338	mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
339}
340
341int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
342			    struct sk_buff *skb)
343{
344	struct mtk_wed_wo_queue_entry *entry;
345	struct mtk_wed_wo_queue_desc *desc;
346	int ret = 0, index;
347	u32 ctrl;
348
349	q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
350	index = (q->head + 1) % q->n_desc;
351	if (q->tail == index) {
352		ret = -ENOMEM;
353		goto out;
354	}
355
356	entry = &q->entry[index];
357	if (skb->len > entry->len) {
358		ret = -ENOMEM;
359		goto out;
360	}
361
362	desc = &q->desc[index];
363	q->head = index;
364
365	dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
366				DMA_TO_DEVICE);
367	memcpy(entry->buf, skb->data, skb->len);
368	dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
369				   DMA_TO_DEVICE);
370
371	ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
372	       MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
373	WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
374	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
375
376	mtk_wed_wo_queue_kick(wo, q, q->head);
377	mtk_wed_wo_kickout(wo);
378out:
379	dev_kfree_skb(skb);
380
381	return ret;
382}
383
384static int
385mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
386{
387	return 0;
388}
389
390static int
391mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
392{
393	struct mtk_wed_wo_queue_regs regs;
394	struct device_node *np;
395	int ret;
396
397	np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
398	if (!np)
399		return -ENODEV;
400
401	wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
402	if (IS_ERR(wo->mmio.regs)) {
403		ret = PTR_ERR(wo->mmio.regs);
404		goto error_put;
405	}
406
407	wo->mmio.irq = irq_of_parse_and_map(np, 0);
408	wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
409	spin_lock_init(&wo->mmio.lock);
410	tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
411
412	ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
413			       mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
414			       KBUILD_MODNAME, wo);
415	if (ret)
416		goto error;
417
418	regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
419	regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
420	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
421	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
422
423	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
424				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
425				     &regs);
426	if (ret)
427		goto error;
428
429	mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
430	mtk_wed_wo_queue_reset(wo, &wo->q_tx);
431
432	regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
433	regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
434	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
435	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
436
437	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
438				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
439				     &regs);
440	if (ret)
441		goto error;
442
443	mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
444	mtk_wed_wo_queue_reset(wo, &wo->q_rx);
445
446	/* rx queue irqmask */
447	mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
448
449	return 0;
450
451error:
452	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
453error_put:
454	of_node_put(np);
455	return ret;
456}
457
458static void
459mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
460{
461	/* disable interrupts */
462	mtk_wed_wo_set_isr(wo, 0);
463
464	tasklet_disable(&wo->mmio.irq_tasklet);
465
466	disable_irq(wo->mmio.irq);
467	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
468
469	mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
470	mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
471	mtk_wed_wo_queue_free(wo, &wo->q_tx);
472	mtk_wed_wo_queue_free(wo, &wo->q_rx);
473}
474
475int mtk_wed_wo_init(struct mtk_wed_hw *hw)
476{
477	struct mtk_wed_wo *wo;
478	int ret;
479
480	wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
481	if (!wo)
482		return -ENOMEM;
483
484	hw->wed_wo = wo;
485	wo->hw = hw;
486
487	ret = mtk_wed_wo_hardware_init(wo);
488	if (ret)
489		return ret;
490
491	ret = mtk_wed_mcu_init(wo);
492	if (ret)
493		return ret;
494
495	return mtk_wed_wo_exception_init(wo);
496}
497
498void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
499{
500	struct mtk_wed_wo *wo = hw->wed_wo;
501
502	mtk_wed_wo_hw_deinit(wo);
503}
504