1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/bits.h>
7#include <linux/clk.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/platform_device.h>
13#include <linux/property.h>
14#include <linux/regulator/consumer.h>
15#include <linux/of_address.h>
16#include <linux/iommu.h>
17
18#include "ce.h"
19#include "coredump.h"
20#include "debug.h"
21#include "hif.h"
22#include "htc.h"
23#include "snoc.h"
24
25#define ATH10K_SNOC_RX_POST_RETRY_MS 50
26#define CE_POLL_PIPE 4
27#define ATH10K_SNOC_WAKE_IRQ 2
28
29static char *const ce_name[] = {
30	"WLAN_CE_0",
31	"WLAN_CE_1",
32	"WLAN_CE_2",
33	"WLAN_CE_3",
34	"WLAN_CE_4",
35	"WLAN_CE_5",
36	"WLAN_CE_6",
37	"WLAN_CE_7",
38	"WLAN_CE_8",
39	"WLAN_CE_9",
40	"WLAN_CE_10",
41	"WLAN_CE_11",
42};
43
44static const char * const ath10k_regulators[] = {
45	"vdd-0.8-cx-mx",
46	"vdd-1.8-xo",
47	"vdd-1.3-rfa",
48	"vdd-3.3-ch0",
49	"vdd-3.3-ch1",
50};
51
52static const char * const ath10k_clocks[] = {
53	"cxo_ref_clk_pin", "qdss",
54};
55
56static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
57static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
58static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
59static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
60static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
61static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
62
63static const struct ath10k_snoc_drv_priv drv_priv = {
64	.hw_rev = ATH10K_HW_WCN3990,
65	.dma_mask = DMA_BIT_MASK(35),
66	.msa_size = 0x100000,
67};
68
69#define WCN3990_SRC_WR_IDX_OFFSET 0x3C
70#define WCN3990_DST_WR_IDX_OFFSET 0x40
71
72static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
73		{
74			.ce_id = __cpu_to_le16(0),
75			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
76		},
77
78		{
79			.ce_id = __cpu_to_le16(3),
80			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
81		},
82
83		{
84			.ce_id = __cpu_to_le16(4),
85			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
86		},
87
88		{
89			.ce_id = __cpu_to_le16(5),
90			.reg_offset =  __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
91		},
92
93		{
94			.ce_id = __cpu_to_le16(7),
95			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
96		},
97
98		{
99			.ce_id = __cpu_to_le16(1),
100			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
101		},
102
103		{
104			.ce_id = __cpu_to_le16(2),
105			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
106		},
107
108		{
109			.ce_id = __cpu_to_le16(7),
110			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
111		},
112
113		{
114			.ce_id = __cpu_to_le16(8),
115			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
116		},
117
118		{
119			.ce_id = __cpu_to_le16(9),
120			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
121		},
122
123		{
124			.ce_id = __cpu_to_le16(10),
125			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
126		},
127
128		{
129			.ce_id = __cpu_to_le16(11),
130			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
131		},
132};
133
134static struct ce_attr host_ce_config_wlan[] = {
135	/* CE0: host->target HTC control streams */
136	{
137		.flags = CE_ATTR_FLAGS,
138		.src_nentries = 16,
139		.src_sz_max = 2048,
140		.dest_nentries = 0,
141		.send_cb = ath10k_snoc_htc_tx_cb,
142	},
143
144	/* CE1: target->host HTT + HTC control */
145	{
146		.flags = CE_ATTR_FLAGS,
147		.src_nentries = 0,
148		.src_sz_max = 2048,
149		.dest_nentries = 512,
150		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
151	},
152
153	/* CE2: target->host WMI */
154	{
155		.flags = CE_ATTR_FLAGS,
156		.src_nentries = 0,
157		.src_sz_max = 2048,
158		.dest_nentries = 64,
159		.recv_cb = ath10k_snoc_htc_rx_cb,
160	},
161
162	/* CE3: host->target WMI */
163	{
164		.flags = CE_ATTR_FLAGS,
165		.src_nentries = 32,
166		.src_sz_max = 2048,
167		.dest_nentries = 0,
168		.send_cb = ath10k_snoc_htc_tx_cb,
169	},
170
171	/* CE4: host->target HTT */
172	{
173		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
174		.src_nentries = 2048,
175		.src_sz_max = 256,
176		.dest_nentries = 0,
177		.send_cb = ath10k_snoc_htt_tx_cb,
178	},
179
180	/* CE5: target->host HTT (ipa_uc->target ) */
181	{
182		.flags = CE_ATTR_FLAGS,
183		.src_nentries = 0,
184		.src_sz_max = 512,
185		.dest_nentries = 512,
186		.recv_cb = ath10k_snoc_htt_rx_cb,
187	},
188
189	/* CE6: target autonomous hif_memcpy */
190	{
191		.flags = CE_ATTR_FLAGS,
192		.src_nentries = 0,
193		.src_sz_max = 0,
194		.dest_nentries = 0,
195	},
196
197	/* CE7: ce_diag, the Diagnostic Window */
198	{
199		.flags = CE_ATTR_FLAGS,
200		.src_nentries = 2,
201		.src_sz_max = 2048,
202		.dest_nentries = 2,
203	},
204
205	/* CE8: Target to uMC */
206	{
207		.flags = CE_ATTR_FLAGS,
208		.src_nentries = 0,
209		.src_sz_max = 2048,
210		.dest_nentries = 128,
211	},
212
213	/* CE9 target->host HTT */
214	{
215		.flags = CE_ATTR_FLAGS,
216		.src_nentries = 0,
217		.src_sz_max = 2048,
218		.dest_nentries = 512,
219		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
220	},
221
222	/* CE10: target->host HTT */
223	{
224		.flags = CE_ATTR_FLAGS,
225		.src_nentries = 0,
226		.src_sz_max = 2048,
227		.dest_nentries = 512,
228		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
229	},
230
231	/* CE11: target -> host PKTLOG */
232	{
233		.flags = CE_ATTR_FLAGS,
234		.src_nentries = 0,
235		.src_sz_max = 2048,
236		.dest_nentries = 512,
237		.recv_cb = ath10k_snoc_pktlog_rx_cb,
238	},
239};
240
241static struct ce_pipe_config target_ce_config_wlan[] = {
242	/* CE0: host->target HTC control and raw streams */
243	{
244		.pipenum = __cpu_to_le32(0),
245		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
246		.nentries = __cpu_to_le32(32),
247		.nbytes_max = __cpu_to_le32(2048),
248		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
249		.reserved = __cpu_to_le32(0),
250	},
251
252	/* CE1: target->host HTT + HTC control */
253	{
254		.pipenum = __cpu_to_le32(1),
255		.pipedir = __cpu_to_le32(PIPEDIR_IN),
256		.nentries = __cpu_to_le32(32),
257		.nbytes_max = __cpu_to_le32(2048),
258		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
259		.reserved = __cpu_to_le32(0),
260	},
261
262	/* CE2: target->host WMI */
263	{
264		.pipenum = __cpu_to_le32(2),
265		.pipedir = __cpu_to_le32(PIPEDIR_IN),
266		.nentries = __cpu_to_le32(64),
267		.nbytes_max = __cpu_to_le32(2048),
268		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
269		.reserved = __cpu_to_le32(0),
270	},
271
272	/* CE3: host->target WMI */
273	{
274		.pipenum = __cpu_to_le32(3),
275		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
276		.nentries = __cpu_to_le32(32),
277		.nbytes_max = __cpu_to_le32(2048),
278		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
279		.reserved = __cpu_to_le32(0),
280	},
281
282	/* CE4: host->target HTT */
283	{
284		.pipenum = __cpu_to_le32(4),
285		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
286		.nentries = __cpu_to_le32(256),
287		.nbytes_max = __cpu_to_le32(256),
288		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
289		.reserved = __cpu_to_le32(0),
290	},
291
292	/* CE5: target->host HTT (HIF->HTT) */
293	{
294		.pipenum = __cpu_to_le32(5),
295		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
296		.nentries = __cpu_to_le32(1024),
297		.nbytes_max = __cpu_to_le32(64),
298		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
299		.reserved = __cpu_to_le32(0),
300	},
301
302	/* CE6: Reserved for target autonomous hif_memcpy */
303	{
304		.pipenum = __cpu_to_le32(6),
305		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
306		.nentries = __cpu_to_le32(32),
307		.nbytes_max = __cpu_to_le32(16384),
308		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
309		.reserved = __cpu_to_le32(0),
310	},
311
312	/* CE7 used only by Host */
313	{
314		.pipenum = __cpu_to_le32(7),
315		.pipedir = __cpu_to_le32(4),
316		.nentries = __cpu_to_le32(0),
317		.nbytes_max = __cpu_to_le32(0),
318		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
319		.reserved = __cpu_to_le32(0),
320	},
321
322	/* CE8 Target to uMC */
323	{
324		.pipenum = __cpu_to_le32(8),
325		.pipedir = __cpu_to_le32(PIPEDIR_IN),
326		.nentries = __cpu_to_le32(32),
327		.nbytes_max = __cpu_to_le32(2048),
328		.flags = __cpu_to_le32(0),
329		.reserved = __cpu_to_le32(0),
330	},
331
332	/* CE9 target->host HTT */
333	{
334		.pipenum = __cpu_to_le32(9),
335		.pipedir = __cpu_to_le32(PIPEDIR_IN),
336		.nentries = __cpu_to_le32(32),
337		.nbytes_max = __cpu_to_le32(2048),
338		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
339		.reserved = __cpu_to_le32(0),
340	},
341
342	/* CE10 target->host HTT */
343	{
344		.pipenum = __cpu_to_le32(10),
345		.pipedir = __cpu_to_le32(PIPEDIR_IN),
346		.nentries = __cpu_to_le32(32),
347		.nbytes_max = __cpu_to_le32(2048),
348		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
349		.reserved = __cpu_to_le32(0),
350	},
351
352	/* CE11 target autonomous qcache memcpy */
353	{
354		.pipenum = __cpu_to_le32(11),
355		.pipedir = __cpu_to_le32(PIPEDIR_IN),
356		.nentries = __cpu_to_le32(32),
357		.nbytes_max = __cpu_to_le32(2048),
358		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
359		.reserved = __cpu_to_le32(0),
360	},
361};
362
363static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = {
364	{
365		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
366		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
367		__cpu_to_le32(3),
368	},
369	{
370		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
371		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
372		__cpu_to_le32(2),
373	},
374	{
375		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
376		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
377		__cpu_to_le32(3),
378	},
379	{
380		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
381		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
382		__cpu_to_le32(2),
383	},
384	{
385		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
386		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
387		__cpu_to_le32(3),
388	},
389	{
390		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
391		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
392		__cpu_to_le32(2),
393	},
394	{
395		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
396		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
397		__cpu_to_le32(3),
398	},
399	{
400		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
401		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
402		__cpu_to_le32(2),
403	},
404	{
405		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
406		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
407		__cpu_to_le32(3),
408	},
409	{
410		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
411		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
412		__cpu_to_le32(2),
413	},
414	{
415		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
416		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
417		__cpu_to_le32(0),
418	},
419	{
420		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
421		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
422		__cpu_to_le32(2),
423	},
424	{ /* not used */
425		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
426		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
427		__cpu_to_le32(0),
428	},
429	{ /* not used */
430		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
431		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
432		__cpu_to_le32(2),
433	},
434	{
435		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
436		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
437		__cpu_to_le32(4),
438	},
439	{
440		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
441		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
442		__cpu_to_le32(1),
443	},
444	{ /* not used */
445		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
446		__cpu_to_le32(PIPEDIR_OUT),
447		__cpu_to_le32(5),
448	},
449	{ /* in = DL = target -> host */
450		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
451		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
452		__cpu_to_le32(9),
453	},
454	{ /* in = DL = target -> host */
455		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
456		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
457		__cpu_to_le32(10),
458	},
459	{ /* in = DL = target -> host pktlog */
460		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
461		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
462		__cpu_to_le32(11),
463	},
464	/* (Additions here) */
465
466	{ /* must be last */
467		__cpu_to_le32(0),
468		__cpu_to_le32(0),
469		__cpu_to_le32(0),
470	},
471};
472
473static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
474{
475	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
476
477	iowrite32(value, ar_snoc->mem + offset);
478}
479
480static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
481{
482	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
483	u32 val;
484
485	val = ioread32(ar_snoc->mem + offset);
486
487	return val;
488}
489
490static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
491{
492	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
493	struct ath10k *ar = pipe->hif_ce_state;
494	struct ath10k_ce *ce = ath10k_ce_priv(ar);
495	struct sk_buff *skb;
496	dma_addr_t paddr;
497	int ret;
498
499	skb = dev_alloc_skb(pipe->buf_sz);
500	if (!skb)
501		return -ENOMEM;
502
503	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
504
505	paddr = dma_map_single(ar->dev, skb->data,
506			       skb->len + skb_tailroom(skb),
507			       DMA_FROM_DEVICE);
508	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
509		ath10k_warn(ar, "failed to dma map snoc rx buf\n");
510		dev_kfree_skb_any(skb);
511		return -EIO;
512	}
513
514	ATH10K_SKB_RXCB(skb)->paddr = paddr;
515
516	spin_lock_bh(&ce->ce_lock);
517	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
518	spin_unlock_bh(&ce->ce_lock);
519	if (ret) {
520		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
521				 DMA_FROM_DEVICE);
522		dev_kfree_skb_any(skb);
523		return ret;
524	}
525
526	return 0;
527}
528
529static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
530{
531	struct ath10k *ar = pipe->hif_ce_state;
532	struct ath10k_ce *ce = ath10k_ce_priv(ar);
533	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
534	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
535	int ret, num;
536
537	if (pipe->buf_sz == 0)
538		return;
539
540	if (!ce_pipe->dest_ring)
541		return;
542
543	spin_lock_bh(&ce->ce_lock);
544	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
545	spin_unlock_bh(&ce->ce_lock);
546	while (num--) {
547		ret = __ath10k_snoc_rx_post_buf(pipe);
548		if (ret) {
549			if (ret == -ENOSPC)
550				break;
551			ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
552			mod_timer(&ar_snoc->rx_post_retry, jiffies +
553				  ATH10K_SNOC_RX_POST_RETRY_MS);
554			break;
555		}
556	}
557}
558
559static void ath10k_snoc_rx_post(struct ath10k *ar)
560{
561	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
562	int i;
563
564	for (i = 0; i < CE_COUNT; i++)
565		ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
566}
567
568static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
569				      void (*callback)(struct ath10k *ar,
570						       struct sk_buff *skb))
571{
572	struct ath10k *ar = ce_state->ar;
573	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
574	struct ath10k_snoc_pipe *pipe_info =  &ar_snoc->pipe_info[ce_state->id];
575	struct sk_buff *skb;
576	struct sk_buff_head list;
577	void *transfer_context;
578	unsigned int nbytes, max_nbytes;
579
580	__skb_queue_head_init(&list);
581	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
582					     &nbytes) == 0) {
583		skb = transfer_context;
584		max_nbytes = skb->len + skb_tailroom(skb);
585		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
586				 max_nbytes, DMA_FROM_DEVICE);
587
588		if (unlikely(max_nbytes < nbytes)) {
589			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)\n",
590				    nbytes, max_nbytes);
591			dev_kfree_skb_any(skb);
592			continue;
593		}
594
595		skb_put(skb, nbytes);
596		__skb_queue_tail(&list, skb);
597	}
598
599	while ((skb = __skb_dequeue(&list))) {
600		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
601			   ce_state->id, skb->len);
602
603		callback(ar, skb);
604	}
605
606	ath10k_snoc_rx_post_pipe(pipe_info);
607}
608
609static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
610{
611	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
612}
613
614static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
615{
616	/* CE4 polling needs to be done whenever CE pipe which transports
617	 * HTT Rx (target->host) is processed.
618	 */
619	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
620
621	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
622}
623
624/* Called by lower (CE) layer when data is received from the Target.
625 * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
626 */
627static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
628{
629	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
630}
631
632static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
633{
634	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
635	ath10k_htt_t2h_msg_handler(ar, skb);
636}
637
638static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
639{
640	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
641	ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
642}
643
644static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
645{
646	struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
647	struct ath10k *ar = ar_snoc->ar;
648
649	ath10k_snoc_rx_post(ar);
650}
651
652static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
653{
654	struct ath10k *ar = ce_state->ar;
655	struct sk_buff_head list;
656	struct sk_buff *skb;
657
658	__skb_queue_head_init(&list);
659	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
660		if (!skb)
661			continue;
662
663		__skb_queue_tail(&list, skb);
664	}
665
666	while ((skb = __skb_dequeue(&list)))
667		ath10k_htc_tx_completion_handler(ar, skb);
668}
669
670static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
671{
672	struct ath10k *ar = ce_state->ar;
673	struct sk_buff *skb;
674
675	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
676		if (!skb)
677			continue;
678
679		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
680				 skb->len, DMA_TO_DEVICE);
681		ath10k_htt_hif_tx_complete(ar, skb);
682	}
683}
684
685static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
686				 struct ath10k_hif_sg_item *items, int n_items)
687{
688	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
689	struct ath10k_ce *ce = ath10k_ce_priv(ar);
690	struct ath10k_snoc_pipe *snoc_pipe;
691	struct ath10k_ce_pipe *ce_pipe;
692	int err, i = 0;
693
694	snoc_pipe = &ar_snoc->pipe_info[pipe_id];
695	ce_pipe = snoc_pipe->ce_hdl;
696	spin_lock_bh(&ce->ce_lock);
697
698	for (i = 0; i < n_items - 1; i++) {
699		ath10k_dbg(ar, ATH10K_DBG_SNOC,
700			   "snoc tx item %d paddr %pad len %d n_items %d\n",
701			   i, &items[i].paddr, items[i].len, n_items);
702
703		err = ath10k_ce_send_nolock(ce_pipe,
704					    items[i].transfer_context,
705					    items[i].paddr,
706					    items[i].len,
707					    items[i].transfer_id,
708					    CE_SEND_FLAG_GATHER);
709		if (err)
710			goto err;
711	}
712
713	ath10k_dbg(ar, ATH10K_DBG_SNOC,
714		   "snoc tx item %d paddr %pad len %d n_items %d\n",
715		   i, &items[i].paddr, items[i].len, n_items);
716
717	err = ath10k_ce_send_nolock(ce_pipe,
718				    items[i].transfer_context,
719				    items[i].paddr,
720				    items[i].len,
721				    items[i].transfer_id,
722				    0);
723	if (err)
724		goto err;
725
726	spin_unlock_bh(&ce->ce_lock);
727
728	return 0;
729
730err:
731	for (; i > 0; i--)
732		__ath10k_ce_send_revert(ce_pipe);
733
734	spin_unlock_bh(&ce->ce_lock);
735	return err;
736}
737
738static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
739					   struct bmi_target_info *target_info)
740{
741	target_info->version = ATH10K_HW_WCN3990;
742	target_info->type = ATH10K_HW_WCN3990;
743
744	return 0;
745}
746
747static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
748{
749	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
750
751	ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
752
753	return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
754}
755
756static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
757						int force)
758{
759	int resources;
760
761	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
762
763	if (!force) {
764		resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
765
766		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
767			return;
768	}
769	ath10k_ce_per_engine_service(ar, pipe);
770}
771
772static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
773					       u16 service_id,
774					       u8 *ul_pipe, u8 *dl_pipe)
775{
776	const struct ce_service_to_pipe *entry;
777	bool ul_set = false, dl_set = false;
778	int i;
779
780	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
781
782	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
783		entry = &target_service_to_ce_map_wlan[i];
784
785		if (__le32_to_cpu(entry->service_id) != service_id)
786			continue;
787
788		switch (__le32_to_cpu(entry->pipedir)) {
789		case PIPEDIR_NONE:
790			break;
791		case PIPEDIR_IN:
792			WARN_ON(dl_set);
793			*dl_pipe = __le32_to_cpu(entry->pipenum);
794			dl_set = true;
795			break;
796		case PIPEDIR_OUT:
797			WARN_ON(ul_set);
798			*ul_pipe = __le32_to_cpu(entry->pipenum);
799			ul_set = true;
800			break;
801		case PIPEDIR_INOUT:
802			WARN_ON(dl_set);
803			WARN_ON(ul_set);
804			*dl_pipe = __le32_to_cpu(entry->pipenum);
805			*ul_pipe = __le32_to_cpu(entry->pipenum);
806			dl_set = true;
807			ul_set = true;
808			break;
809		}
810	}
811
812	if (!ul_set || !dl_set)
813		return -ENOENT;
814
815	return 0;
816}
817
818static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
819					     u8 *ul_pipe, u8 *dl_pipe)
820{
821	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
822
823	(void)ath10k_snoc_hif_map_service_to_pipe(ar,
824						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
825						 ul_pipe, dl_pipe);
826}
827
828static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
829{
830	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
831	int id;
832
833	for (id = 0; id < CE_COUNT_MAX; id++)
834		disable_irq(ar_snoc->ce_irqs[id].irq_line);
835}
836
837static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
838{
839	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
840	int id;
841
842	for (id = 0; id < CE_COUNT_MAX; id++)
843		enable_irq(ar_snoc->ce_irqs[id].irq_line);
844}
845
846static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
847{
848	struct ath10k_ce_pipe *ce_pipe;
849	struct ath10k_ce_ring *ce_ring;
850	struct sk_buff *skb;
851	struct ath10k *ar;
852	int i;
853
854	ar = snoc_pipe->hif_ce_state;
855	ce_pipe = snoc_pipe->ce_hdl;
856	ce_ring = ce_pipe->dest_ring;
857
858	if (!ce_ring)
859		return;
860
861	if (!snoc_pipe->buf_sz)
862		return;
863
864	for (i = 0; i < ce_ring->nentries; i++) {
865		skb = ce_ring->per_transfer_context[i];
866		if (!skb)
867			continue;
868
869		ce_ring->per_transfer_context[i] = NULL;
870
871		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
872				 skb->len + skb_tailroom(skb),
873				 DMA_FROM_DEVICE);
874		dev_kfree_skb_any(skb);
875	}
876}
877
878static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
879{
880	struct ath10k_ce_pipe *ce_pipe;
881	struct ath10k_ce_ring *ce_ring;
882	struct sk_buff *skb;
883	struct ath10k *ar;
884	int i;
885
886	ar = snoc_pipe->hif_ce_state;
887	ce_pipe = snoc_pipe->ce_hdl;
888	ce_ring = ce_pipe->src_ring;
889
890	if (!ce_ring)
891		return;
892
893	if (!snoc_pipe->buf_sz)
894		return;
895
896	for (i = 0; i < ce_ring->nentries; i++) {
897		skb = ce_ring->per_transfer_context[i];
898		if (!skb)
899			continue;
900
901		ce_ring->per_transfer_context[i] = NULL;
902
903		ath10k_htc_tx_completion_handler(ar, skb);
904	}
905}
906
907static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
908{
909	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
910	struct ath10k_snoc_pipe *pipe_info;
911	int pipe_num;
912
913	del_timer_sync(&ar_snoc->rx_post_retry);
914	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
915		pipe_info = &ar_snoc->pipe_info[pipe_num];
916		ath10k_snoc_rx_pipe_cleanup(pipe_info);
917		ath10k_snoc_tx_pipe_cleanup(pipe_info);
918	}
919}
920
921static void ath10k_snoc_hif_stop(struct ath10k *ar)
922{
923	if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
924		ath10k_snoc_irq_disable(ar);
925
926	napi_synchronize(&ar->napi);
927	napi_disable(&ar->napi);
928	ath10k_snoc_buffer_cleanup(ar);
929	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
930}
931
932static int ath10k_snoc_hif_start(struct ath10k *ar)
933{
934	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
935
936	bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
937	napi_enable(&ar->napi);
938	ath10k_snoc_irq_enable(ar);
939	ath10k_snoc_rx_post(ar);
940
941	clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
942
943	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
944
945	return 0;
946}
947
948static int ath10k_snoc_init_pipes(struct ath10k *ar)
949{
950	int i, ret;
951
952	for (i = 0; i < CE_COUNT; i++) {
953		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
954		if (ret) {
955			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
956				   i, ret);
957			return ret;
958		}
959	}
960
961	return 0;
962}
963
964static int ath10k_snoc_wlan_enable(struct ath10k *ar,
965				   enum ath10k_firmware_mode fw_mode)
966{
967	struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
968	struct ath10k_qmi_wlan_enable_cfg cfg;
969	enum wlfw_driver_mode_enum_v01 mode;
970	int pipe_num;
971
972	for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
973		tgt_cfg[pipe_num].pipe_num =
974				target_ce_config_wlan[pipe_num].pipenum;
975		tgt_cfg[pipe_num].pipe_dir =
976				target_ce_config_wlan[pipe_num].pipedir;
977		tgt_cfg[pipe_num].nentries =
978				target_ce_config_wlan[pipe_num].nentries;
979		tgt_cfg[pipe_num].nbytes_max =
980				target_ce_config_wlan[pipe_num].nbytes_max;
981		tgt_cfg[pipe_num].flags =
982				target_ce_config_wlan[pipe_num].flags;
983		tgt_cfg[pipe_num].reserved = 0;
984	}
985
986	cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
987				sizeof(struct ath10k_tgt_pipe_cfg);
988	cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
989		&tgt_cfg;
990	cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
991				  sizeof(struct ath10k_svc_pipe_cfg);
992	cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
993		&target_service_to_ce_map_wlan;
994	cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map);
995	cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
996		&target_shadow_reg_cfg_map;
997
998	switch (fw_mode) {
999	case ATH10K_FIRMWARE_MODE_NORMAL:
1000		mode = QMI_WLFW_MISSION_V01;
1001		break;
1002	case ATH10K_FIRMWARE_MODE_UTF:
1003		mode = QMI_WLFW_FTM_V01;
1004		break;
1005	default:
1006		ath10k_err(ar, "invalid firmware mode %d\n", fw_mode);
1007		return -EINVAL;
1008	}
1009
1010	return ath10k_qmi_wlan_enable(ar, &cfg, mode,
1011				       NULL);
1012}
1013
1014static void ath10k_snoc_wlan_disable(struct ath10k *ar)
1015{
1016	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1017
1018	/* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY
1019	 * flags are not set, it means that the driver has restarted
1020	 * due to a crash inject via debugfs. In this case, the driver
1021	 * needs to restart the firmware and hence send qmi wlan disable,
1022	 * during the driver restart sequence.
1023	 */
1024	if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) ||
1025	    !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1026		ath10k_qmi_wlan_disable(ar);
1027}
1028
1029static void ath10k_snoc_hif_power_down(struct ath10k *ar)
1030{
1031	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1032
1033	ath10k_snoc_wlan_disable(ar);
1034	ath10k_ce_free_rri(ar);
1035}
1036
1037static int ath10k_snoc_hif_power_up(struct ath10k *ar,
1038				    enum ath10k_firmware_mode fw_mode)
1039{
1040	int ret;
1041
1042	ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
1043		   __func__, ar->state);
1044
1045	ret = ath10k_snoc_wlan_enable(ar, fw_mode);
1046	if (ret) {
1047		ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
1048		return ret;
1049	}
1050
1051	ath10k_ce_alloc_rri(ar);
1052
1053	ret = ath10k_snoc_init_pipes(ar);
1054	if (ret) {
1055		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1056		goto err_free_rri;
1057	}
1058
1059	ath10k_ce_enable_interrupts(ar);
1060
1061	return 0;
1062
1063err_free_rri:
1064	ath10k_ce_free_rri(ar);
1065	ath10k_snoc_wlan_disable(ar);
1066
1067	return ret;
1068}
1069
1070static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar,
1071					       u8 fw_log_mode)
1072{
1073	u8 fw_dbg_mode;
1074
1075	if (fw_log_mode)
1076		fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE;
1077	else
1078		fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG;
1079
1080	return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode);
1081}
1082
1083#ifdef CONFIG_PM
1084static int ath10k_snoc_hif_suspend(struct ath10k *ar)
1085{
1086	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1087	int ret;
1088
1089	if (!device_may_wakeup(ar->dev))
1090		return -EPERM;
1091
1092	ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1093	if (ret) {
1094		ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret);
1095		return ret;
1096	}
1097
1098	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n");
1099
1100	return ret;
1101}
1102
1103static int ath10k_snoc_hif_resume(struct ath10k *ar)
1104{
1105	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1106	int ret;
1107
1108	if (!device_may_wakeup(ar->dev))
1109		return -EPERM;
1110
1111	ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1112	if (ret) {
1113		ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret);
1114		return ret;
1115	}
1116
1117	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n");
1118
1119	return ret;
1120}
1121#endif
1122
1123static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
1124	.read32		= ath10k_snoc_read32,
1125	.write32	= ath10k_snoc_write32,
1126	.start		= ath10k_snoc_hif_start,
1127	.stop		= ath10k_snoc_hif_stop,
1128	.map_service_to_pipe	= ath10k_snoc_hif_map_service_to_pipe,
1129	.get_default_pipe	= ath10k_snoc_hif_get_default_pipe,
1130	.power_up		= ath10k_snoc_hif_power_up,
1131	.power_down		= ath10k_snoc_hif_power_down,
1132	.tx_sg			= ath10k_snoc_hif_tx_sg,
1133	.send_complete_check	= ath10k_snoc_hif_send_complete_check,
1134	.get_free_queue_number	= ath10k_snoc_hif_get_free_queue_number,
1135	.get_target_info	= ath10k_snoc_hif_get_target_info,
1136	.set_target_log_mode    = ath10k_snoc_hif_set_target_log_mode,
1137
1138#ifdef CONFIG_PM
1139	.suspend                = ath10k_snoc_hif_suspend,
1140	.resume                 = ath10k_snoc_hif_resume,
1141#endif
1142};
1143
1144static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
1145	.read32		= ath10k_snoc_read32,
1146	.write32	= ath10k_snoc_write32,
1147};
1148
1149static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
1150{
1151	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1152	int i;
1153
1154	for (i = 0; i < CE_COUNT_MAX; i++) {
1155		if (ar_snoc->ce_irqs[i].irq_line == irq)
1156			return i;
1157	}
1158	ath10k_err(ar, "No matching CE id for irq %d\n", irq);
1159
1160	return -EINVAL;
1161}
1162
1163static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
1164{
1165	struct ath10k *ar = arg;
1166	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1167	int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
1168
1169	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
1170		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1171			    ce_id);
1172		return IRQ_HANDLED;
1173	}
1174
1175	ath10k_ce_disable_interrupt(ar, ce_id);
1176	set_bit(ce_id, ar_snoc->pending_ce_irqs);
1177
1178	napi_schedule(&ar->napi);
1179
1180	return IRQ_HANDLED;
1181}
1182
1183static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
1184{
1185	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
1186	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1187	int done = 0;
1188	int ce_id;
1189
1190	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
1191		napi_complete(ctx);
1192		return done;
1193	}
1194
1195	for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1196		if (test_and_clear_bit(ce_id, ar_snoc->pending_ce_irqs)) {
1197			ath10k_ce_per_engine_service(ar, ce_id);
1198			ath10k_ce_enable_interrupt(ar, ce_id);
1199		}
1200
1201	done = ath10k_htt_txrx_compl_task(ar, budget);
1202
1203	if (done < budget)
1204		napi_complete(ctx);
1205
1206	return done;
1207}
1208
1209static void ath10k_snoc_init_napi(struct ath10k *ar)
1210{
1211	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
1212		       ATH10K_NAPI_BUDGET);
1213}
1214
1215static int ath10k_snoc_request_irq(struct ath10k *ar)
1216{
1217	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1218	int ret, id;
1219
1220	for (id = 0; id < CE_COUNT_MAX; id++) {
1221		ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
1222				  ath10k_snoc_per_engine_handler,
1223				  IRQF_NO_AUTOEN, ce_name[id], ar);
1224		if (ret) {
1225			ath10k_err(ar,
1226				   "failed to register IRQ handler for CE %d: %d\n",
1227				   id, ret);
1228			goto err_irq;
1229		}
1230	}
1231
1232	return 0;
1233
1234err_irq:
1235	for (id -= 1; id >= 0; id--)
1236		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1237
1238	return ret;
1239}
1240
1241static void ath10k_snoc_free_irq(struct ath10k *ar)
1242{
1243	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1244	int id;
1245
1246	for (id = 0; id < CE_COUNT_MAX; id++)
1247		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1248}
1249
1250static int ath10k_snoc_resource_init(struct ath10k *ar)
1251{
1252	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1253	struct platform_device *pdev;
1254	struct resource *res;
1255	int i, ret = 0;
1256
1257	pdev = ar_snoc->dev;
1258	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
1259	if (!res) {
1260		ath10k_err(ar, "Memory base not found in DT\n");
1261		return -EINVAL;
1262	}
1263
1264	ar_snoc->mem_pa = res->start;
1265	ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
1266				    resource_size(res));
1267	if (!ar_snoc->mem) {
1268		ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
1269			   &ar_snoc->mem_pa);
1270		return -EINVAL;
1271	}
1272
1273	for (i = 0; i < CE_COUNT; i++) {
1274		res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
1275		if (!res) {
1276			ath10k_err(ar, "failed to get IRQ%d\n", i);
1277			ret = -ENODEV;
1278			goto out;
1279		}
1280		ar_snoc->ce_irqs[i].irq_line = res->start;
1281	}
1282
1283	ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
1284				       &ar_snoc->xo_cal_data);
1285	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret);
1286	if (ret == 0) {
1287		ar_snoc->xo_cal_supported = true;
1288		ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
1289			   ar_snoc->xo_cal_data);
1290	}
1291	ret = 0;
1292
1293out:
1294	return ret;
1295}
1296
1297static void ath10k_snoc_quirks_init(struct ath10k *ar)
1298{
1299	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1300	struct device *dev = &ar_snoc->dev->dev;
1301
1302	if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
1303		set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
1304}
1305
1306int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1307{
1308	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1309	struct ath10k_bus_params bus_params = {};
1310	int ret;
1311
1312	if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
1313		return 0;
1314
1315	switch (type) {
1316	case ATH10K_QMI_EVENT_FW_READY_IND:
1317		if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
1318			queue_work(ar->workqueue, &ar->restart_work);
1319			break;
1320		}
1321
1322		bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1323		bus_params.chip_id = ar_snoc->target_info.soc_version;
1324		ret = ath10k_core_register(ar, &bus_params);
1325		if (ret) {
1326			ath10k_err(ar, "Failed to register driver core: %d\n",
1327				   ret);
1328			return ret;
1329		}
1330		set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
1331		break;
1332	case ATH10K_QMI_EVENT_FW_DOWN_IND:
1333		set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
1334		set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
1335		break;
1336	default:
1337		ath10k_err(ar, "invalid fw indication: %llx\n", type);
1338		return -EINVAL;
1339	}
1340
1341	return 0;
1342}
1343
1344static int ath10k_snoc_setup_resource(struct ath10k *ar)
1345{
1346	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1347	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1348	struct ath10k_snoc_pipe *pipe;
1349	int i, ret;
1350
1351	timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
1352	spin_lock_init(&ce->ce_lock);
1353	for (i = 0; i < CE_COUNT; i++) {
1354		pipe = &ar_snoc->pipe_info[i];
1355		pipe->ce_hdl = &ce->ce_states[i];
1356		pipe->pipe_num = i;
1357		pipe->hif_ce_state = ar;
1358
1359		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1360		if (ret) {
1361			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1362				   i, ret);
1363			return ret;
1364		}
1365
1366		pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
1367	}
1368	ath10k_snoc_init_napi(ar);
1369
1370	return 0;
1371}
1372
1373static void ath10k_snoc_release_resource(struct ath10k *ar)
1374{
1375	int i;
1376
1377	netif_napi_del(&ar->napi);
1378	for (i = 0; i < CE_COUNT; i++)
1379		ath10k_ce_free_pipe(ar, i);
1380}
1381
1382static int ath10k_hw_power_on(struct ath10k *ar)
1383{
1384	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1385	int ret;
1386
1387	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1388
1389	ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
1390	if (ret)
1391		return ret;
1392
1393	ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
1394	if (ret)
1395		goto vreg_off;
1396
1397	return ret;
1398
1399vreg_off:
1400	regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1401	return ret;
1402}
1403
1404static int ath10k_hw_power_off(struct ath10k *ar)
1405{
1406	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1407
1408	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1409
1410	clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
1411
1412	return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1413}
1414
1415static void ath10k_msa_dump_memory(struct ath10k *ar,
1416				   struct ath10k_fw_crash_data *crash_data)
1417{
1418	const struct ath10k_hw_mem_layout *mem_layout;
1419	const struct ath10k_mem_region *current_region;
1420	struct ath10k_dump_ram_data_hdr *hdr;
1421	size_t buf_len;
1422	u8 *buf;
1423
1424	if (!crash_data || !crash_data->ramdump_buf)
1425		return;
1426
1427	mem_layout = ath10k_coredump_get_mem_layout(ar);
1428	if (!mem_layout)
1429		return;
1430
1431	current_region = &mem_layout->region_table.regions[0];
1432
1433	buf = crash_data->ramdump_buf;
1434	buf_len = crash_data->ramdump_buf_len;
1435	memset(buf, 0, buf_len);
1436
1437	/* Reserve space for the header. */
1438	hdr = (void *)buf;
1439	buf += sizeof(*hdr);
1440	buf_len -= sizeof(*hdr);
1441
1442	hdr->region_type = cpu_to_le32(current_region->type);
1443	hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
1444	hdr->length = cpu_to_le32(ar->msa.mem_size);
1445
1446	if (current_region->len < ar->msa.mem_size) {
1447		memcpy(buf, ar->msa.vaddr, current_region->len);
1448		ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
1449			    current_region->len, ar->msa.mem_size);
1450	} else {
1451		memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
1452	}
1453}
1454
1455void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
1456{
1457	struct ath10k_fw_crash_data *crash_data;
1458	char guid[UUID_STRING_LEN + 1];
1459
1460	mutex_lock(&ar->dump_mutex);
1461
1462	spin_lock_bh(&ar->data_lock);
1463	ar->stats.fw_crash_counter++;
1464	spin_unlock_bh(&ar->data_lock);
1465
1466	crash_data = ath10k_coredump_new(ar);
1467
1468	if (crash_data)
1469		scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1470	else
1471		scnprintf(guid, sizeof(guid), "n/a");
1472
1473	ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1474	ath10k_print_driver_info(ar);
1475	ath10k_msa_dump_memory(ar, crash_data);
1476	mutex_unlock(&ar->dump_mutex);
1477}
1478
1479static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
1480{
1481	struct device *dev = ar->dev;
1482	struct device_node *node;
1483	struct resource r;
1484	int ret;
1485
1486	node = of_parse_phandle(dev->of_node, "memory-region", 0);
1487	if (node) {
1488		ret = of_address_to_resource(node, 0, &r);
1489		of_node_put(node);
1490		if (ret) {
1491			dev_err(dev, "failed to resolve msa fixed region\n");
1492			return ret;
1493		}
1494
1495		ar->msa.paddr = r.start;
1496		ar->msa.mem_size = resource_size(&r);
1497		ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
1498					      ar->msa.mem_size,
1499					      MEMREMAP_WT);
1500		if (IS_ERR(ar->msa.vaddr)) {
1501			dev_err(dev, "failed to map memory region: %pa\n",
1502				&r.start);
1503			return PTR_ERR(ar->msa.vaddr);
1504		}
1505	} else {
1506		ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
1507						    &ar->msa.paddr,
1508						    GFP_KERNEL);
1509		if (!ar->msa.vaddr) {
1510			ath10k_err(ar, "failed to allocate dma memory for msa region\n");
1511			return -ENOMEM;
1512		}
1513		ar->msa.mem_size = msa_size;
1514	}
1515
1516	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n",
1517		   &ar->msa.paddr,
1518		   ar->msa.vaddr);
1519
1520	return 0;
1521}
1522
1523static int ath10k_fw_init(struct ath10k *ar)
1524{
1525	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1526	struct device *host_dev = &ar_snoc->dev->dev;
1527	struct platform_device_info info;
1528	struct iommu_domain *iommu_dom;
1529	struct platform_device *pdev;
1530	struct device_node *node;
1531	int ret;
1532
1533	node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
1534	if (!node) {
1535		ar_snoc->use_tz = true;
1536		return 0;
1537	}
1538
1539	memset(&info, 0, sizeof(info));
1540	info.fwnode = &node->fwnode;
1541	info.parent = host_dev;
1542	info.name = node->name;
1543	info.dma_mask = DMA_BIT_MASK(32);
1544
1545	pdev = platform_device_register_full(&info);
1546	if (IS_ERR(pdev)) {
1547		of_node_put(node);
1548		return PTR_ERR(pdev);
1549	}
1550
1551	pdev->dev.of_node = node;
1552
1553	ret = of_dma_configure(&pdev->dev, node, true);
1554	if (ret) {
1555		ath10k_err(ar, "dma configure fail: %d\n", ret);
1556		goto err_unregister;
1557	}
1558
1559	ar_snoc->fw.dev = &pdev->dev;
1560
1561	iommu_dom = iommu_domain_alloc(&platform_bus_type);
1562	if (!iommu_dom) {
1563		ath10k_err(ar, "failed to allocate iommu domain\n");
1564		ret = -ENOMEM;
1565		goto err_unregister;
1566	}
1567
1568	ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev);
1569	if (ret) {
1570		ath10k_err(ar, "could not attach device: %d\n", ret);
1571		goto err_iommu_free;
1572	}
1573
1574	ar_snoc->fw.iommu_domain = iommu_dom;
1575	ar_snoc->fw.fw_start_addr = ar->msa.paddr;
1576
1577	ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
1578			ar->msa.paddr, ar->msa.mem_size,
1579			IOMMU_READ | IOMMU_WRITE);
1580	if (ret) {
1581		ath10k_err(ar, "failed to map firmware region: %d\n", ret);
1582		goto err_iommu_detach;
1583	}
1584
1585	of_node_put(node);
1586
1587	return 0;
1588
1589err_iommu_detach:
1590	iommu_detach_device(iommu_dom, ar_snoc->fw.dev);
1591
1592err_iommu_free:
1593	iommu_domain_free(iommu_dom);
1594
1595err_unregister:
1596	platform_device_unregister(pdev);
1597	of_node_put(node);
1598
1599	return ret;
1600}
1601
1602static int ath10k_fw_deinit(struct ath10k *ar)
1603{
1604	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1605	const size_t mapped_size = ar_snoc->fw.mapped_mem_size;
1606	struct iommu_domain *iommu;
1607	size_t unmapped_size;
1608
1609	if (ar_snoc->use_tz)
1610		return 0;
1611
1612	iommu = ar_snoc->fw.iommu_domain;
1613
1614	unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr,
1615				    mapped_size);
1616	if (unmapped_size != mapped_size)
1617		ath10k_err(ar, "failed to unmap firmware: %zu\n",
1618			   unmapped_size);
1619
1620	iommu_detach_device(iommu, ar_snoc->fw.dev);
1621	iommu_domain_free(iommu);
1622
1623	platform_device_unregister(to_platform_device(ar_snoc->fw.dev));
1624
1625	return 0;
1626}
1627
1628static const struct of_device_id ath10k_snoc_dt_match[] = {
1629	{ .compatible = "qcom,wcn3990-wifi",
1630	 .data = &drv_priv,
1631	},
1632	{ }
1633};
1634MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1635
1636static int ath10k_snoc_probe(struct platform_device *pdev)
1637{
1638	const struct ath10k_snoc_drv_priv *drv_data;
1639	struct ath10k_snoc *ar_snoc;
1640	struct device *dev;
1641	struct ath10k *ar;
1642	u32 msa_size;
1643	int ret;
1644	u32 i;
1645
1646	dev = &pdev->dev;
1647	drv_data = device_get_match_data(dev);
1648	if (!drv_data) {
1649		dev_err(dev, "failed to find matching device tree id\n");
1650		return -EINVAL;
1651	}
1652
1653	ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1654	if (ret) {
1655		dev_err(dev, "failed to set dma mask: %d\n", ret);
1656		return ret;
1657	}
1658
1659	ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1660				drv_data->hw_rev, &ath10k_snoc_hif_ops);
1661	if (!ar) {
1662		dev_err(dev, "failed to allocate core\n");
1663		return -ENOMEM;
1664	}
1665
1666	ar_snoc = ath10k_snoc_priv(ar);
1667	ar_snoc->dev = pdev;
1668	platform_set_drvdata(pdev, ar);
1669	ar_snoc->ar = ar;
1670	ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1671	ar->ce_priv = &ar_snoc->ce;
1672	msa_size = drv_data->msa_size;
1673
1674	ath10k_snoc_quirks_init(ar);
1675
1676	ret = ath10k_snoc_resource_init(ar);
1677	if (ret) {
1678		ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1679		goto err_core_destroy;
1680	}
1681
1682	ret = ath10k_snoc_setup_resource(ar);
1683	if (ret) {
1684		ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1685		goto err_core_destroy;
1686	}
1687	ret = ath10k_snoc_request_irq(ar);
1688	if (ret) {
1689		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1690		goto err_release_resource;
1691	}
1692
1693	ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
1694	ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
1695				      sizeof(*ar_snoc->vregs), GFP_KERNEL);
1696	if (!ar_snoc->vregs) {
1697		ret = -ENOMEM;
1698		goto err_free_irq;
1699	}
1700	for (i = 0; i < ar_snoc->num_vregs; i++)
1701		ar_snoc->vregs[i].supply = ath10k_regulators[i];
1702
1703	ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs,
1704				      ar_snoc->vregs);
1705	if (ret < 0)
1706		goto err_free_irq;
1707
1708	ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks);
1709	ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks,
1710				     sizeof(*ar_snoc->clks), GFP_KERNEL);
1711	if (!ar_snoc->clks) {
1712		ret = -ENOMEM;
1713		goto err_free_irq;
1714	}
1715
1716	for (i = 0; i < ar_snoc->num_clks; i++)
1717		ar_snoc->clks[i].id = ath10k_clocks[i];
1718
1719	ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks,
1720					 ar_snoc->clks);
1721	if (ret)
1722		goto err_free_irq;
1723
1724	ret = ath10k_hw_power_on(ar);
1725	if (ret) {
1726		ath10k_err(ar, "failed to power on device: %d\n", ret);
1727		goto err_free_irq;
1728	}
1729
1730	ret = ath10k_setup_msa_resources(ar, msa_size);
1731	if (ret) {
1732		ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
1733		goto err_power_off;
1734	}
1735
1736	ret = ath10k_fw_init(ar);
1737	if (ret) {
1738		ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
1739		goto err_power_off;
1740	}
1741
1742	ret = ath10k_qmi_init(ar, msa_size);
1743	if (ret) {
1744		ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1745		goto err_fw_deinit;
1746	}
1747
1748	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1749
1750	return 0;
1751
1752err_fw_deinit:
1753	ath10k_fw_deinit(ar);
1754
1755err_power_off:
1756	ath10k_hw_power_off(ar);
1757
1758err_free_irq:
1759	ath10k_snoc_free_irq(ar);
1760
1761err_release_resource:
1762	ath10k_snoc_release_resource(ar);
1763
1764err_core_destroy:
1765	ath10k_core_destroy(ar);
1766
1767	return ret;
1768}
1769
1770static int ath10k_snoc_remove(struct platform_device *pdev)
1771{
1772	struct ath10k *ar = platform_get_drvdata(pdev);
1773	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1774
1775	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1776
1777	reinit_completion(&ar->driver_recovery);
1778
1779	if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1780		wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
1781
1782	set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
1783
1784	ath10k_core_unregister(ar);
1785	ath10k_hw_power_off(ar);
1786	ath10k_fw_deinit(ar);
1787	ath10k_snoc_free_irq(ar);
1788	ath10k_snoc_release_resource(ar);
1789	ath10k_qmi_deinit(ar);
1790	ath10k_core_destroy(ar);
1791
1792	return 0;
1793}
1794
1795static void ath10k_snoc_shutdown(struct platform_device *pdev)
1796{
1797	struct ath10k *ar = platform_get_drvdata(pdev);
1798
1799	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc shutdown\n");
1800	ath10k_snoc_remove(pdev);
1801}
1802
1803static struct platform_driver ath10k_snoc_driver = {
1804	.probe  = ath10k_snoc_probe,
1805	.remove = ath10k_snoc_remove,
1806	.shutdown =  ath10k_snoc_shutdown,
1807	.driver = {
1808		.name   = "ath10k_snoc",
1809		.of_match_table = ath10k_snoc_dt_match,
1810	},
1811};
1812module_platform_driver(ath10k_snoc_driver);
1813
1814MODULE_AUTHOR("Qualcomm");
1815MODULE_LICENSE("Dual BSD/GPL");
1816MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
1817