1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/interrupt.h>
26#include <linux/soc/qcom/smem_state.h>
27#include "wcn36xx.h"
28#include "txrx.h"
29
30static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
31{
32	wcn36xx_dbg(WCN36XX_DBG_DXE,
33		    "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
34		    addr, data);
35
36	writel(data, wcn->ccu_base + addr);
37}
38
39static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
40{
41	wcn36xx_dbg(WCN36XX_DBG_DXE,
42		    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
43		    addr, data);
44
45	writel(data, wcn->dxe_base + addr);
46}
47
48static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
49{
50	*data = readl(wcn->dxe_base + addr);
51
52	wcn36xx_dbg(WCN36XX_DBG_DXE,
53		    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
54		    addr, *data);
55}
56
57static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
58{
59	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
60	int i;
61
62	for (i = 0; i < ch->desc_num && ctl; i++) {
63		next = ctl->next;
64		kfree(ctl);
65		ctl = next;
66	}
67}
68
69static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
70{
71	struct wcn36xx_dxe_ctl *prev_ctl = NULL;
72	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
73	int i;
74
75	spin_lock_init(&ch->lock);
76	for (i = 0; i < ch->desc_num; i++) {
77		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
78		if (!cur_ctl)
79			goto out_fail;
80
81		cur_ctl->ctl_blk_order = i;
82		if (i == 0) {
83			ch->head_blk_ctl = cur_ctl;
84			ch->tail_blk_ctl = cur_ctl;
85		} else if (ch->desc_num - 1 == i) {
86			prev_ctl->next = cur_ctl;
87			cur_ctl->next = ch->head_blk_ctl;
88		} else {
89			prev_ctl->next = cur_ctl;
90		}
91		prev_ctl = cur_ctl;
92	}
93
94	return 0;
95
96out_fail:
97	wcn36xx_dxe_free_ctl_block(ch);
98	return -ENOMEM;
99}
100
101int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
102{
103	int ret;
104
105	wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
106	wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
107	wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
108	wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
109
110	wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
111	wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
112	wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
113	wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
114
115	wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
116	wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
117
118	wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
119	wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
120
121	wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
122	wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
123
124	wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
125	wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
126
127	wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
128	wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
129
130	/* DXE control block allocation */
131	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
132	if (ret)
133		goto out_err;
134	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
135	if (ret)
136		goto out_err;
137	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
138	if (ret)
139		goto out_err;
140	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
141	if (ret)
142		goto out_err;
143
144	/* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
145	ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
146					  WCN36XX_SMSM_WLAN_TX_ENABLE |
147					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
148					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
149	if (ret)
150		goto out_err;
151
152	return 0;
153
154out_err:
155	wcn36xx_err("Failed to allocate DXE control blocks\n");
156	wcn36xx_dxe_free_ctl_blks(wcn);
157	return -ENOMEM;
158}
159
160void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
161{
162	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
163	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
164	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
165	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
166}
167
168static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
169{
170	struct wcn36xx_dxe_desc *cur_dxe = NULL;
171	struct wcn36xx_dxe_desc *prev_dxe = NULL;
172	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
173	size_t size;
174	int i;
175
176	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
177	wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
178					      GFP_KERNEL);
179	if (!wcn_ch->cpu_addr)
180		return -ENOMEM;
181
182	cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
183	cur_ctl = wcn_ch->head_blk_ctl;
184
185	for (i = 0; i < wcn_ch->desc_num; i++) {
186		cur_ctl->desc = cur_dxe;
187		cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
188			i * sizeof(struct wcn36xx_dxe_desc);
189
190		switch (wcn_ch->ch_type) {
191		case WCN36XX_DXE_CH_TX_L:
192			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
193			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
194			break;
195		case WCN36XX_DXE_CH_TX_H:
196			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
197			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
198			break;
199		case WCN36XX_DXE_CH_RX_L:
200			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
201			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
202			break;
203		case WCN36XX_DXE_CH_RX_H:
204			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
205			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
206			break;
207		}
208		if (0 == i) {
209			cur_dxe->phy_next_l = 0;
210		} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
211			prev_dxe->phy_next_l =
212				cur_ctl->desc_phy_addr;
213		} else if (i == (wcn_ch->desc_num - 1)) {
214			prev_dxe->phy_next_l =
215				cur_ctl->desc_phy_addr;
216			cur_dxe->phy_next_l =
217				wcn_ch->head_blk_ctl->desc_phy_addr;
218		}
219		cur_ctl = cur_ctl->next;
220		prev_dxe = cur_dxe;
221		cur_dxe++;
222	}
223
224	return 0;
225}
226
227static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
228{
229	size_t size;
230
231	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
232	dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
233}
234
235static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
236				   struct wcn36xx_dxe_mem_pool *pool)
237{
238	int i, chunk_size = pool->chunk_size;
239	dma_addr_t bd_phy_addr = pool->phy_addr;
240	void *bd_cpu_addr = pool->virt_addr;
241	struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
242
243	for (i = 0; i < ch->desc_num; i++) {
244		/* Only every second dxe needs a bd pointer,
245		   the other will point to the skb data */
246		if (!(i & 1)) {
247			cur->bd_phy_addr = bd_phy_addr;
248			cur->bd_cpu_addr = bd_cpu_addr;
249			bd_phy_addr += chunk_size;
250			bd_cpu_addr += chunk_size;
251		} else {
252			cur->bd_phy_addr = 0;
253			cur->bd_cpu_addr = NULL;
254		}
255		cur = cur->next;
256	}
257}
258
259static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
260{
261	int reg_data = 0;
262
263	wcn36xx_dxe_read_register(wcn,
264				  WCN36XX_DXE_INT_MASK_REG,
265				  &reg_data);
266
267	reg_data |= wcn_ch;
268
269	wcn36xx_dxe_write_register(wcn,
270				   WCN36XX_DXE_INT_MASK_REG,
271				   (int)reg_data);
272	return 0;
273}
274
275static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
276{
277	int reg_data = 0;
278
279	wcn36xx_dxe_read_register(wcn,
280				  WCN36XX_DXE_INT_MASK_REG,
281				  &reg_data);
282
283	reg_data &= ~wcn_ch;
284
285	wcn36xx_dxe_write_register(wcn,
286				   WCN36XX_DXE_INT_MASK_REG,
287				   (int)reg_data);
288}
289
290static int wcn36xx_dxe_fill_skb(struct device *dev,
291				struct wcn36xx_dxe_ctl *ctl,
292				gfp_t gfp)
293{
294	struct wcn36xx_dxe_desc *dxe = ctl->desc;
295	struct sk_buff *skb;
296
297	skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
298	if (skb == NULL)
299		return -ENOMEM;
300
301	dxe->dst_addr_l = dma_map_single(dev,
302					 skb_tail_pointer(skb),
303					 WCN36XX_PKT_SIZE,
304					 DMA_FROM_DEVICE);
305	if (dma_mapping_error(dev, dxe->dst_addr_l)) {
306		dev_err(dev, "unable to map skb\n");
307		kfree_skb(skb);
308		return -ENOMEM;
309	}
310	ctl->skb = skb;
311
312	return 0;
313}
314
315static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
316				    struct wcn36xx_dxe_ch *wcn_ch)
317{
318	int i;
319	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
320
321	cur_ctl = wcn_ch->head_blk_ctl;
322
323	for (i = 0; i < wcn_ch->desc_num; i++) {
324		wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
325		cur_ctl = cur_ctl->next;
326	}
327
328	return 0;
329}
330
331static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
332				     struct wcn36xx_dxe_ch *wcn_ch)
333{
334	struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
335	int i;
336
337	for (i = 0; i < wcn_ch->desc_num; i++) {
338		kfree_skb(cur->skb);
339		cur = cur->next;
340	}
341}
342
343void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
344{
345	struct ieee80211_tx_info *info;
346	struct sk_buff *skb;
347	unsigned long flags;
348
349	spin_lock_irqsave(&wcn->dxe_lock, flags);
350	skb = wcn->tx_ack_skb;
351	wcn->tx_ack_skb = NULL;
352	del_timer(&wcn->tx_ack_timer);
353	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
354
355	if (!skb) {
356		wcn36xx_warn("Spurious TX complete indication\n");
357		return;
358	}
359
360	info = IEEE80211_SKB_CB(skb);
361
362	if (status == 1)
363		info->flags |= IEEE80211_TX_STAT_ACK;
364	else
365		info->flags &= ~IEEE80211_TX_STAT_ACK;
366
367	wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
368
369	ieee80211_tx_status_irqsafe(wcn->hw, skb);
370	ieee80211_wake_queues(wcn->hw);
371}
372
373static void wcn36xx_dxe_tx_timer(struct timer_list *t)
374{
375	struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
376	struct ieee80211_tx_info *info;
377	unsigned long flags;
378	struct sk_buff *skb;
379
380	/* TX Timeout */
381	wcn36xx_dbg(WCN36XX_DBG_DXE, "TX timeout\n");
382
383	spin_lock_irqsave(&wcn->dxe_lock, flags);
384	skb = wcn->tx_ack_skb;
385	wcn->tx_ack_skb = NULL;
386	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
387
388	if (!skb)
389		return;
390
391	info = IEEE80211_SKB_CB(skb);
392	info->flags &= ~IEEE80211_TX_STAT_ACK;
393	info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
394
395	ieee80211_tx_status_irqsafe(wcn->hw, skb);
396	ieee80211_wake_queues(wcn->hw);
397}
398
399static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
400{
401	struct wcn36xx_dxe_ctl *ctl;
402	struct ieee80211_tx_info *info;
403	unsigned long flags;
404
405	/*
406	 * Make at least one loop of do-while because in case ring is
407	 * completely full head and tail are pointing to the same element
408	 * and while-do will not make any cycles.
409	 */
410	spin_lock_irqsave(&ch->lock, flags);
411	ctl = ch->tail_blk_ctl;
412	do {
413		if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
414			break;
415
416		if (ctl->skb &&
417		    READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
418			dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
419					 ctl->skb->len, DMA_TO_DEVICE);
420			info = IEEE80211_SKB_CB(ctl->skb);
421			if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
422				if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
423					info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
424					ieee80211_tx_status_irqsafe(wcn->hw, ctl->skb);
425				} else {
426					/* Wait for the TX ack indication or timeout... */
427					spin_lock(&wcn->dxe_lock);
428					if (WARN_ON(wcn->tx_ack_skb))
429						ieee80211_free_txskb(wcn->hw, wcn->tx_ack_skb);
430					wcn->tx_ack_skb = ctl->skb; /* Tracking ref */
431					mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
432					spin_unlock(&wcn->dxe_lock);
433				}
434				/* do not free, ownership transferred to mac80211 status cb */
435			} else {
436				ieee80211_free_txskb(wcn->hw, ctl->skb);
437			}
438
439			if (wcn->queues_stopped) {
440				wcn->queues_stopped = false;
441				ieee80211_wake_queues(wcn->hw);
442			}
443
444			ctl->skb = NULL;
445		}
446		ctl = ctl->next;
447	} while (ctl != ch->head_blk_ctl);
448
449	ch->tail_blk_ctl = ctl;
450	spin_unlock_irqrestore(&ch->lock, flags);
451}
452
453static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
454{
455	struct wcn36xx *wcn = (struct wcn36xx *)dev;
456	int int_src, int_reason;
457
458	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
459
460	if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
461		wcn36xx_dxe_read_register(wcn,
462					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
463					  &int_reason);
464
465		wcn36xx_dxe_write_register(wcn,
466					   WCN36XX_DXE_0_INT_CLR,
467					   WCN36XX_INT_MASK_CHAN_TX_H);
468
469		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
470			wcn36xx_dxe_write_register(wcn,
471						   WCN36XX_DXE_0_INT_ERR_CLR,
472						   WCN36XX_INT_MASK_CHAN_TX_H);
473
474			wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
475					int_src);
476		}
477
478		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
479			wcn36xx_dxe_write_register(wcn,
480						   WCN36XX_DXE_0_INT_DONE_CLR,
481						   WCN36XX_INT_MASK_CHAN_TX_H);
482		}
483
484		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
485			wcn36xx_dxe_write_register(wcn,
486						   WCN36XX_DXE_0_INT_ED_CLR,
487						   WCN36XX_INT_MASK_CHAN_TX_H);
488		}
489
490		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
491			    int_reason);
492
493		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
494				  WCN36XX_CH_STAT_INT_ED_MASK)) {
495			reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
496		}
497	}
498
499	if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
500		wcn36xx_dxe_read_register(wcn,
501					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
502					  &int_reason);
503
504		wcn36xx_dxe_write_register(wcn,
505					   WCN36XX_DXE_0_INT_CLR,
506					   WCN36XX_INT_MASK_CHAN_TX_L);
507
508		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
509			wcn36xx_dxe_write_register(wcn,
510						   WCN36XX_DXE_0_INT_ERR_CLR,
511						   WCN36XX_INT_MASK_CHAN_TX_L);
512
513			wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
514					int_src);
515		}
516
517		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
518			wcn36xx_dxe_write_register(wcn,
519						   WCN36XX_DXE_0_INT_DONE_CLR,
520						   WCN36XX_INT_MASK_CHAN_TX_L);
521		}
522
523		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
524			wcn36xx_dxe_write_register(wcn,
525						   WCN36XX_DXE_0_INT_ED_CLR,
526						   WCN36XX_INT_MASK_CHAN_TX_L);
527		}
528
529		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
530			    int_reason);
531
532		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
533				  WCN36XX_CH_STAT_INT_ED_MASK)) {
534			reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
535		}
536	}
537
538	return IRQ_HANDLED;
539}
540
541static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
542{
543	struct wcn36xx *wcn = (struct wcn36xx *)dev;
544
545	wcn36xx_dxe_rx_frame(wcn);
546
547	return IRQ_HANDLED;
548}
549
550static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
551{
552	int ret;
553
554	ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
555			  IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
556	if (ret) {
557		wcn36xx_err("failed to alloc tx irq\n");
558		goto out_err;
559	}
560
561	ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
562			  "wcn36xx_rx", wcn);
563	if (ret) {
564		wcn36xx_err("failed to alloc rx irq\n");
565		goto out_txirq;
566	}
567
568	enable_irq_wake(wcn->rx_irq);
569
570	return 0;
571
572out_txirq:
573	free_irq(wcn->tx_irq, wcn);
574out_err:
575	return ret;
576
577}
578
579static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
580				     struct wcn36xx_dxe_ch *ch,
581				     u32 ctrl,
582				     u32 en_mask,
583				     u32 int_mask,
584				     u32 status_reg)
585{
586	struct wcn36xx_dxe_desc *dxe;
587	struct wcn36xx_dxe_ctl *ctl;
588	dma_addr_t  dma_addr;
589	struct sk_buff *skb;
590	u32 int_reason;
591	int ret;
592
593	wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
594	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
595
596	if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
597		wcn36xx_dxe_write_register(wcn,
598					   WCN36XX_DXE_0_INT_ERR_CLR,
599					   int_mask);
600
601		wcn36xx_err("DXE IRQ reported error on RX channel\n");
602	}
603
604	if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
605		wcn36xx_dxe_write_register(wcn,
606					   WCN36XX_DXE_0_INT_DONE_CLR,
607					   int_mask);
608
609	if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
610		wcn36xx_dxe_write_register(wcn,
611					   WCN36XX_DXE_0_INT_ED_CLR,
612					   int_mask);
613
614	if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
615			    WCN36XX_CH_STAT_INT_ED_MASK)))
616		return 0;
617
618	spin_lock(&ch->lock);
619
620	ctl = ch->head_blk_ctl;
621	dxe = ctl->desc;
622
623	while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
624		/* do not read until we own DMA descriptor */
625		dma_rmb();
626
627		/* read/modify DMA descriptor */
628		skb = ctl->skb;
629		dma_addr = dxe->dst_addr_l;
630		ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
631		if (0 == ret) {
632			/* new skb allocation ok. Use the new one and queue
633			 * the old one to network system.
634			 */
635			dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
636					DMA_FROM_DEVICE);
637			wcn36xx_rx_skb(wcn, skb);
638		}
639		/* else keep old skb not submitted and reuse it for rx DMA
640		 * (dropping the packet that it contained)
641		 */
642
643		/* flush descriptor changes before re-marking as valid */
644		dma_wmb();
645		dxe->ctrl = ctrl;
646
647		ctl = ctl->next;
648		dxe = ctl->desc;
649	}
650	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
651
652	ch->head_blk_ctl = ctl;
653
654	spin_unlock(&ch->lock);
655
656	return 0;
657}
658
659void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
660{
661	int int_src;
662
663	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
664
665	/* RX_LOW_PRI */
666	if (int_src & WCN36XX_DXE_INT_CH1_MASK)
667		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
668					  WCN36XX_DXE_CTRL_RX_L,
669					  WCN36XX_DXE_INT_CH1_MASK,
670					  WCN36XX_INT_MASK_CHAN_RX_L,
671					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
672
673	/* RX_HIGH_PRI */
674	if (int_src & WCN36XX_DXE_INT_CH3_MASK)
675		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
676					  WCN36XX_DXE_CTRL_RX_H,
677					  WCN36XX_DXE_INT_CH3_MASK,
678					  WCN36XX_INT_MASK_CHAN_RX_H,
679					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
680
681	if (!int_src)
682		wcn36xx_warn("No DXE interrupt pending\n");
683}
684
685int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
686{
687	size_t s;
688	void *cpu_addr;
689
690	/* Allocate BD headers for MGMT frames */
691
692	/* Where this come from ask QC */
693	wcn->mgmt_mem_pool.chunk_size =	WCN36XX_BD_CHUNK_SIZE +
694		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
695
696	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
697	cpu_addr = dma_alloc_coherent(wcn->dev, s,
698				      &wcn->mgmt_mem_pool.phy_addr,
699				      GFP_KERNEL);
700	if (!cpu_addr)
701		goto out_err;
702
703	wcn->mgmt_mem_pool.virt_addr = cpu_addr;
704
705	/* Allocate BD headers for DATA frames */
706
707	/* Where this come from ask QC */
708	wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
709		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
710
711	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
712	cpu_addr = dma_alloc_coherent(wcn->dev, s,
713				      &wcn->data_mem_pool.phy_addr,
714				      GFP_KERNEL);
715	if (!cpu_addr)
716		goto out_err;
717
718	wcn->data_mem_pool.virt_addr = cpu_addr;
719
720	return 0;
721
722out_err:
723	wcn36xx_dxe_free_mem_pools(wcn);
724	wcn36xx_err("Failed to allocate BD mempool\n");
725	return -ENOMEM;
726}
727
728void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
729{
730	if (wcn->mgmt_mem_pool.virt_addr)
731		dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
732				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
733				  wcn->mgmt_mem_pool.virt_addr,
734				  wcn->mgmt_mem_pool.phy_addr);
735
736	if (wcn->data_mem_pool.virt_addr) {
737		dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
738				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
739				  wcn->data_mem_pool.virt_addr,
740				  wcn->data_mem_pool.phy_addr);
741	}
742}
743
744int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
745			 struct wcn36xx_vif *vif_priv,
746			 struct wcn36xx_tx_bd *bd,
747			 struct sk_buff *skb,
748			 bool is_low)
749{
750	struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
751	struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
752	struct wcn36xx_dxe_ch *ch = NULL;
753	unsigned long flags;
754	int ret;
755
756	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
757
758	spin_lock_irqsave(&ch->lock, flags);
759	ctl_bd = ch->head_blk_ctl;
760	ctl_skb = ctl_bd->next;
761
762	/*
763	 * If skb is not null that means that we reached the tail of the ring
764	 * hence ring is full. Stop queues to let mac80211 back off until ring
765	 * has an empty slot again.
766	 */
767	if (NULL != ctl_skb->skb) {
768		ieee80211_stop_queues(wcn->hw);
769		wcn->queues_stopped = true;
770		spin_unlock_irqrestore(&ch->lock, flags);
771		return -EBUSY;
772	}
773
774	if (unlikely(ctl_skb->bd_cpu_addr)) {
775		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
776		ret = -EINVAL;
777		goto unlock;
778	}
779
780	desc_bd = ctl_bd->desc;
781	desc_skb = ctl_skb->desc;
782
783	ctl_bd->skb = NULL;
784
785	/* write buffer descriptor */
786	memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
787
788	/* Set source address of the BD we send */
789	desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
790	desc_bd->dst_addr_l = ch->dxe_wq;
791	desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
792
793	wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
794
795	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
796			 (char *)desc_bd, sizeof(*desc_bd));
797	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
798			 "BD   >>> ", (char *)ctl_bd->bd_cpu_addr,
799			 sizeof(struct wcn36xx_tx_bd));
800
801	desc_skb->src_addr_l = dma_map_single(wcn->dev,
802					      skb->data,
803					      skb->len,
804					      DMA_TO_DEVICE);
805	if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
806		dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
807		ret = -ENOMEM;
808		goto unlock;
809	}
810
811	ctl_skb->skb = skb;
812	desc_skb->dst_addr_l = ch->dxe_wq;
813	desc_skb->fr_len = ctl_skb->skb->len;
814
815	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
816			 (char *)desc_skb, sizeof(*desc_skb));
817	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
818			 (char *)ctl_skb->skb->data, ctl_skb->skb->len);
819
820	/* Move the head of the ring to the next empty descriptor */
821	 ch->head_blk_ctl = ctl_skb->next;
822
823	/* Commit all previous writes and set descriptors to VALID */
824	wmb();
825	desc_skb->ctrl = ch->ctrl_skb;
826	wmb();
827	desc_bd->ctrl = ch->ctrl_bd;
828
829	/*
830	 * When connected and trying to send data frame chip can be in sleep
831	 * mode and writing to the register will not wake up the chip. Instead
832	 * notify chip about new frame through SMSM bus.
833	 */
834	if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
835		qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
836					    WCN36XX_SMSM_WLAN_TX_ENABLE,
837					    WCN36XX_SMSM_WLAN_TX_ENABLE);
838	} else {
839		/* indicate End Of Packet and generate interrupt on descriptor
840		 * done.
841		 */
842		wcn36xx_dxe_write_register(wcn,
843			ch->reg_ctrl, ch->def_ctrl);
844	}
845
846	ret = 0;
847unlock:
848	spin_unlock_irqrestore(&ch->lock, flags);
849	return ret;
850}
851
852int wcn36xx_dxe_init(struct wcn36xx *wcn)
853{
854	int reg_data = 0, ret;
855
856	reg_data = WCN36XX_DXE_REG_RESET;
857	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
858
859	/* Select channels for rx avail and xfer done interrupts... */
860	reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
861		    WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
862	if (wcn->is_pronto)
863		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
864	else
865		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
866
867	/***************************************/
868	/* Init descriptors for TX LOW channel */
869	/***************************************/
870	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
871	if (ret) {
872		dev_err(wcn->dev, "Error allocating descriptor\n");
873		return ret;
874	}
875	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
876
877	/* Write channel head to a NEXT register */
878	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
879		wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
880
881	/* Program DMA destination addr for TX LOW */
882	wcn36xx_dxe_write_register(wcn,
883		WCN36XX_DXE_CH_DEST_ADDR_TX_L,
884		WCN36XX_DXE_WQ_TX_L);
885
886	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
887
888	/***************************************/
889	/* Init descriptors for TX HIGH channel */
890	/***************************************/
891	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
892	if (ret) {
893		dev_err(wcn->dev, "Error allocating descriptor\n");
894		goto out_err_txh_ch;
895	}
896
897	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
898
899	/* Write channel head to a NEXT register */
900	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
901		wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
902
903	/* Program DMA destination addr for TX HIGH */
904	wcn36xx_dxe_write_register(wcn,
905		WCN36XX_DXE_CH_DEST_ADDR_TX_H,
906		WCN36XX_DXE_WQ_TX_H);
907
908	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
909
910	/***************************************/
911	/* Init descriptors for RX LOW channel */
912	/***************************************/
913	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
914	if (ret) {
915		dev_err(wcn->dev, "Error allocating descriptor\n");
916		goto out_err_rxl_ch;
917	}
918
919	/* For RX we need to preallocated buffers */
920	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
921
922	/* Write channel head to a NEXT register */
923	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
924		wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
925
926	/* Write DMA source address */
927	wcn36xx_dxe_write_register(wcn,
928		WCN36XX_DXE_CH_SRC_ADDR_RX_L,
929		WCN36XX_DXE_WQ_RX_L);
930
931	/* Program preallocated destination address */
932	wcn36xx_dxe_write_register(wcn,
933		WCN36XX_DXE_CH_DEST_ADDR_RX_L,
934		wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
935
936	/* Enable default control registers */
937	wcn36xx_dxe_write_register(wcn,
938		WCN36XX_DXE_REG_CTL_RX_L,
939		WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
940
941	/***************************************/
942	/* Init descriptors for RX HIGH channel */
943	/***************************************/
944	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
945	if (ret) {
946		dev_err(wcn->dev, "Error allocating descriptor\n");
947		goto out_err_rxh_ch;
948	}
949
950	/* For RX we need to prealocat buffers */
951	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
952
953	/* Write chanel head to a NEXT register */
954	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
955		wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
956
957	/* Write DMA source address */
958	wcn36xx_dxe_write_register(wcn,
959		WCN36XX_DXE_CH_SRC_ADDR_RX_H,
960		WCN36XX_DXE_WQ_RX_H);
961
962	/* Program preallocated destination address */
963	wcn36xx_dxe_write_register(wcn,
964		WCN36XX_DXE_CH_DEST_ADDR_RX_H,
965		 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
966
967	/* Enable default control registers */
968	wcn36xx_dxe_write_register(wcn,
969		WCN36XX_DXE_REG_CTL_RX_H,
970		WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
971
972	ret = wcn36xx_dxe_request_irqs(wcn);
973	if (ret < 0)
974		goto out_err_irq;
975
976	timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
977
978	/* Enable channel interrupts */
979	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
980	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
981	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
982	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
983
984	return 0;
985
986out_err_irq:
987	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
988out_err_rxh_ch:
989	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
990out_err_rxl_ch:
991	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
992out_err_txh_ch:
993	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
994
995	return ret;
996}
997
998void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
999{
1000	int reg_data = 0;
1001
1002	/* Disable channel interrupts */
1003	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
1004	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
1005	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
1006	wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
1007
1008	free_irq(wcn->tx_irq, wcn);
1009	free_irq(wcn->rx_irq, wcn);
1010	del_timer(&wcn->tx_ack_timer);
1011
1012	if (wcn->tx_ack_skb) {
1013		ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
1014		wcn->tx_ack_skb = NULL;
1015	}
1016
1017	/* Put the DXE block into reset before freeing memory */
1018	reg_data = WCN36XX_DXE_REG_RESET;
1019	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
1020
1021	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
1022	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
1023
1024	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
1025	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
1026	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
1027	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
1028}
1029