1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3
4  Broadcom B43legacy wireless driver
5
6  DMA ringbuffer and descriptor allocation/management
7
8  Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
9
10  Some code in this file is derived from the b44.c driver
11  Copyright (C) 2002 David S. Miller
12  Copyright (C) Pekka Pietikainen
13
14
15*/
16
17#include "b43legacy.h"
18#include "dma.h"
19#include "main.h"
20#include "debugfs.h"
21#include "xmit.h"
22
23#include <linux/dma-mapping.h>
24#include <linux/pci.h>
25#include <linux/delay.h>
26#include <linux/skbuff.h>
27#include <linux/slab.h>
28#include <net/dst.h>
29
30/* 32bit DMA ops. */
31static
32struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
33					  int slot,
34					  struct b43legacy_dmadesc_meta **meta)
35{
36	struct b43legacy_dmadesc32 *desc;
37
38	*meta = &(ring->meta[slot]);
39	desc = ring->descbase;
40	desc = &(desc[slot]);
41
42	return desc;
43}
44
45static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
46				 struct b43legacy_dmadesc32 *desc,
47				 dma_addr_t dmaaddr, u16 bufsize,
48				 int start, int end, int irq)
49{
50	struct b43legacy_dmadesc32 *descbase = ring->descbase;
51	int slot;
52	u32 ctl;
53	u32 addr;
54	u32 addrext;
55
56	slot = (int)(desc - descbase);
57	B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
58
59	addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
60	addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
61		   >> SSB_DMA_TRANSLATION_SHIFT;
62	addr |= ring->dev->dma.translation;
63	ctl = (bufsize - ring->frameoffset)
64	      & B43legacy_DMA32_DCTL_BYTECNT;
65	if (slot == ring->nr_slots - 1)
66		ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
67	if (start)
68		ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
69	if (end)
70		ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
71	if (irq)
72		ctl |= B43legacy_DMA32_DCTL_IRQ;
73	ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
74	       & B43legacy_DMA32_DCTL_ADDREXT_MASK;
75
76	desc->control = cpu_to_le32(ctl);
77	desc->address = cpu_to_le32(addr);
78}
79
80static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
81{
82	b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
83			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
84}
85
86static void op32_tx_suspend(struct b43legacy_dmaring *ring)
87{
88	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
89			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
90			    | B43legacy_DMA32_TXSUSPEND);
91}
92
93static void op32_tx_resume(struct b43legacy_dmaring *ring)
94{
95	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
96			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
97			    & ~B43legacy_DMA32_TXSUSPEND);
98}
99
100static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
101{
102	u32 val;
103
104	val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
105	val &= B43legacy_DMA32_RXDPTR;
106
107	return (val / sizeof(struct b43legacy_dmadesc32));
108}
109
110static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
111				    int slot)
112{
113	b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
114			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
115}
116
117static inline int free_slots(struct b43legacy_dmaring *ring)
118{
119	return (ring->nr_slots - ring->used_slots);
120}
121
122static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
123{
124	B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
125	if (slot == ring->nr_slots - 1)
126		return 0;
127	return slot + 1;
128}
129
130static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
131{
132	B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
133	if (slot == 0)
134		return ring->nr_slots - 1;
135	return slot - 1;
136}
137
138#ifdef CONFIG_B43LEGACY_DEBUG
139static void update_max_used_slots(struct b43legacy_dmaring *ring,
140				  int current_used_slots)
141{
142	if (current_used_slots <= ring->max_used_slots)
143		return;
144	ring->max_used_slots = current_used_slots;
145	if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
146		b43legacydbg(ring->dev->wl,
147		       "max_used_slots increased to %d on %s ring %d\n",
148		       ring->max_used_slots,
149		       ring->tx ? "TX" : "RX",
150		       ring->index);
151}
152#else
153static inline
154void update_max_used_slots(struct b43legacy_dmaring *ring,
155			   int current_used_slots)
156{ }
157#endif /* DEBUG */
158
159/* Request a slot for usage. */
160static inline
161int request_slot(struct b43legacy_dmaring *ring)
162{
163	int slot;
164
165	B43legacy_WARN_ON(!ring->tx);
166	B43legacy_WARN_ON(ring->stopped);
167	B43legacy_WARN_ON(free_slots(ring) == 0);
168
169	slot = next_slot(ring, ring->current_slot);
170	ring->current_slot = slot;
171	ring->used_slots++;
172
173	update_max_used_slots(ring, ring->used_slots);
174
175	return slot;
176}
177
178/* Mac80211-queue to b43legacy-ring mapping */
179static struct b43legacy_dmaring *priority_to_txring(
180						struct b43legacy_wldev *dev,
181						int queue_priority)
182{
183	struct b43legacy_dmaring *ring;
184
185/*FIXME: For now we always run on TX-ring-1 */
186return dev->dma.tx_ring1;
187
188	/* 0 = highest priority */
189	switch (queue_priority) {
190	default:
191		B43legacy_WARN_ON(1);
192		fallthrough;
193	case 0:
194		ring = dev->dma.tx_ring3;
195		break;
196	case 1:
197		ring = dev->dma.tx_ring2;
198		break;
199	case 2:
200		ring = dev->dma.tx_ring1;
201		break;
202	case 3:
203		ring = dev->dma.tx_ring0;
204		break;
205	case 4:
206		ring = dev->dma.tx_ring4;
207		break;
208	case 5:
209		ring = dev->dma.tx_ring5;
210		break;
211	}
212
213	return ring;
214}
215
216/* Bcm4301-ring to mac80211-queue mapping */
217static inline int txring_to_priority(struct b43legacy_dmaring *ring)
218{
219	static const u8 idx_to_prio[] =
220		{ 3, 2, 1, 0, 4, 5, };
221
222/*FIXME: have only one queue, for now */
223return 0;
224
225	return idx_to_prio[ring->index];
226}
227
228
229static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
230					int controller_idx)
231{
232	static const u16 map32[] = {
233		B43legacy_MMIO_DMA32_BASE0,
234		B43legacy_MMIO_DMA32_BASE1,
235		B43legacy_MMIO_DMA32_BASE2,
236		B43legacy_MMIO_DMA32_BASE3,
237		B43legacy_MMIO_DMA32_BASE4,
238		B43legacy_MMIO_DMA32_BASE5,
239	};
240
241	B43legacy_WARN_ON(!(controller_idx >= 0 &&
242			  controller_idx < ARRAY_SIZE(map32)));
243	return map32[controller_idx];
244}
245
246static inline
247dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
248			  unsigned char *buf,
249			  size_t len,
250			  int tx)
251{
252	dma_addr_t dmaaddr;
253
254	if (tx)
255		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
256					     buf, len,
257					     DMA_TO_DEVICE);
258	else
259		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
260					     buf, len,
261					     DMA_FROM_DEVICE);
262
263	return dmaaddr;
264}
265
266static inline
267void unmap_descbuffer(struct b43legacy_dmaring *ring,
268		      dma_addr_t addr,
269		      size_t len,
270		      int tx)
271{
272	if (tx)
273		dma_unmap_single(ring->dev->dev->dma_dev,
274				     addr, len,
275				     DMA_TO_DEVICE);
276	else
277		dma_unmap_single(ring->dev->dev->dma_dev,
278				     addr, len,
279				     DMA_FROM_DEVICE);
280}
281
282static inline
283void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
284			     dma_addr_t addr,
285			     size_t len)
286{
287	B43legacy_WARN_ON(ring->tx);
288
289	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
290				addr, len, DMA_FROM_DEVICE);
291}
292
293static inline
294void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
295				dma_addr_t addr,
296				size_t len)
297{
298	B43legacy_WARN_ON(ring->tx);
299
300	dma_sync_single_for_device(ring->dev->dev->dma_dev,
301				   addr, len, DMA_FROM_DEVICE);
302}
303
304static inline
305void free_descriptor_buffer(struct b43legacy_dmaring *ring,
306			    struct b43legacy_dmadesc_meta *meta,
307			    int irq_context)
308{
309	if (meta->skb) {
310		if (irq_context)
311			dev_kfree_skb_irq(meta->skb);
312		else
313			dev_kfree_skb(meta->skb);
314		meta->skb = NULL;
315	}
316}
317
318static int alloc_ringmemory(struct b43legacy_dmaring *ring)
319{
320	/* GFP flags must match the flags in free_ringmemory()! */
321	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
322					    B43legacy_DMA_RINGMEMSIZE,
323					    &(ring->dmabase), GFP_KERNEL);
324	if (!ring->descbase)
325		return -ENOMEM;
326
327	return 0;
328}
329
330static void free_ringmemory(struct b43legacy_dmaring *ring)
331{
332	dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
333			  ring->descbase, ring->dmabase);
334}
335
336/* Reset the RX DMA channel */
337static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
338					    u16 mmio_base,
339					    enum b43legacy_dmatype type)
340{
341	int i;
342	u32 value;
343	u16 offset;
344
345	might_sleep();
346
347	offset = B43legacy_DMA32_RXCTL;
348	b43legacy_write32(dev, mmio_base + offset, 0);
349	for (i = 0; i < 10; i++) {
350		offset = B43legacy_DMA32_RXSTATUS;
351		value = b43legacy_read32(dev, mmio_base + offset);
352		value &= B43legacy_DMA32_RXSTATE;
353		if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
354			i = -1;
355			break;
356		}
357		msleep(1);
358	}
359	if (i != -1) {
360		b43legacyerr(dev->wl, "DMA RX reset timed out\n");
361		return -ENODEV;
362	}
363
364	return 0;
365}
366
367/* Reset the RX DMA channel */
368static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
369					    u16 mmio_base,
370					    enum b43legacy_dmatype type)
371{
372	int i;
373	u32 value;
374	u16 offset;
375
376	might_sleep();
377
378	for (i = 0; i < 10; i++) {
379		offset = B43legacy_DMA32_TXSTATUS;
380		value = b43legacy_read32(dev, mmio_base + offset);
381		value &= B43legacy_DMA32_TXSTATE;
382		if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
383		    value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
384		    value == B43legacy_DMA32_TXSTAT_STOPPED)
385			break;
386		msleep(1);
387	}
388	offset = B43legacy_DMA32_TXCTL;
389	b43legacy_write32(dev, mmio_base + offset, 0);
390	for (i = 0; i < 10; i++) {
391		offset = B43legacy_DMA32_TXSTATUS;
392		value = b43legacy_read32(dev, mmio_base + offset);
393		value &= B43legacy_DMA32_TXSTATE;
394		if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
395			i = -1;
396			break;
397		}
398		msleep(1);
399	}
400	if (i != -1) {
401		b43legacyerr(dev->wl, "DMA TX reset timed out\n");
402		return -ENODEV;
403	}
404	/* ensure the reset is completed. */
405	msleep(1);
406
407	return 0;
408}
409
410/* Check if a DMA mapping address is invalid. */
411static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
412					 dma_addr_t addr,
413					 size_t buffersize,
414					 bool dma_to_device)
415{
416	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
417		return true;
418
419	switch (ring->type) {
420	case B43legacy_DMA_30BIT:
421		if ((u64)addr + buffersize > (1ULL << 30))
422			goto address_error;
423		break;
424	case B43legacy_DMA_32BIT:
425		if ((u64)addr + buffersize > (1ULL << 32))
426			goto address_error;
427		break;
428	}
429
430	/* The address is OK. */
431	return false;
432
433address_error:
434	/* We can't support this address. Unmap it again. */
435	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
436
437	return true;
438}
439
440static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
441			       struct b43legacy_dmadesc32 *desc,
442			       struct b43legacy_dmadesc_meta *meta,
443			       gfp_t gfp_flags)
444{
445	struct b43legacy_rxhdr_fw3 *rxhdr;
446	struct b43legacy_hwtxstatus *txstat;
447	dma_addr_t dmaaddr;
448	struct sk_buff *skb;
449
450	B43legacy_WARN_ON(ring->tx);
451
452	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
453	if (unlikely(!skb))
454		return -ENOMEM;
455	dmaaddr = map_descbuffer(ring, skb->data,
456				 ring->rx_buffersize, 0);
457	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
458		/* ugh. try to realloc in zone_dma */
459		gfp_flags |= GFP_DMA;
460
461		dev_kfree_skb_any(skb);
462
463		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
464		if (unlikely(!skb))
465			return -ENOMEM;
466		dmaaddr = map_descbuffer(ring, skb->data,
467					 ring->rx_buffersize, 0);
468	}
469
470	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
471		dev_kfree_skb_any(skb);
472		return -EIO;
473	}
474
475	meta->skb = skb;
476	meta->dmaaddr = dmaaddr;
477	op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
478
479	rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
480	rxhdr->frame_len = 0;
481	txstat = (struct b43legacy_hwtxstatus *)(skb->data);
482	txstat->cookie = 0;
483
484	return 0;
485}
486
487/* Allocate the initial descbuffers.
488 * This is used for an RX ring only.
489 */
490static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
491{
492	int i;
493	int err = -ENOMEM;
494	struct b43legacy_dmadesc32 *desc;
495	struct b43legacy_dmadesc_meta *meta;
496
497	for (i = 0; i < ring->nr_slots; i++) {
498		desc = op32_idx2desc(ring, i, &meta);
499
500		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
501		if (err) {
502			b43legacyerr(ring->dev->wl,
503			       "Failed to allocate initial descbuffers\n");
504			goto err_unwind;
505		}
506	}
507	mb(); /* all descbuffer setup before next line */
508	ring->used_slots = ring->nr_slots;
509	err = 0;
510out:
511	return err;
512
513err_unwind:
514	for (i--; i >= 0; i--) {
515		desc = op32_idx2desc(ring, i, &meta);
516
517		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
518		dev_kfree_skb(meta->skb);
519	}
520	goto out;
521}
522
523/* Do initial setup of the DMA controller.
524 * Reset the controller, write the ring busaddress
525 * and switch the "enable" bit on.
526 */
527static int dmacontroller_setup(struct b43legacy_dmaring *ring)
528{
529	int err = 0;
530	u32 value;
531	u32 addrext;
532	u32 trans = ring->dev->dma.translation;
533	u32 ringbase = (u32)(ring->dmabase);
534
535	if (ring->tx) {
536		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
537			  >> SSB_DMA_TRANSLATION_SHIFT;
538		value = B43legacy_DMA32_TXENABLE;
539		value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
540			& B43legacy_DMA32_TXADDREXT_MASK;
541		b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
542		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
543				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
544				    | trans);
545	} else {
546		err = alloc_initial_descbuffers(ring);
547		if (err)
548			goto out;
549
550		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
551			  >> SSB_DMA_TRANSLATION_SHIFT;
552		value = (ring->frameoffset <<
553			 B43legacy_DMA32_RXFROFF_SHIFT);
554		value |= B43legacy_DMA32_RXENABLE;
555		value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT)
556			 & B43legacy_DMA32_RXADDREXT_MASK;
557		b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
558		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
559				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
560				    | trans);
561		b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
562	}
563
564out:
565	return err;
566}
567
568/* Shutdown the DMA controller. */
569static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
570{
571	if (ring->tx) {
572		b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
573						 ring->type);
574		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
575	} else {
576		b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
577						 ring->type);
578		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
579	}
580}
581
582static void free_all_descbuffers(struct b43legacy_dmaring *ring)
583{
584	struct b43legacy_dmadesc_meta *meta;
585	int i;
586
587	if (!ring->used_slots)
588		return;
589	for (i = 0; i < ring->nr_slots; i++) {
590		op32_idx2desc(ring, i, &meta);
591
592		if (!meta->skb) {
593			B43legacy_WARN_ON(!ring->tx);
594			continue;
595		}
596		if (ring->tx)
597			unmap_descbuffer(ring, meta->dmaaddr,
598					 meta->skb->len, 1);
599		else
600			unmap_descbuffer(ring, meta->dmaaddr,
601					 ring->rx_buffersize, 0);
602		free_descriptor_buffer(ring, meta, 0);
603	}
604}
605
606static enum b43legacy_dmatype b43legacy_engine_type(struct b43legacy_wldev *dev)
607{
608	u32 tmp;
609	u16 mmio_base;
610
611	mmio_base = b43legacy_dmacontroller_base(0, 0);
612	b43legacy_write32(dev,
613			mmio_base + B43legacy_DMA32_TXCTL,
614			B43legacy_DMA32_TXADDREXT_MASK);
615	tmp = b43legacy_read32(dev, mmio_base +
616			       B43legacy_DMA32_TXCTL);
617	if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
618		return B43legacy_DMA_32BIT;
619	return B43legacy_DMA_30BIT;
620}
621
622/* Main initialization function. */
623static
624struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
625						  int controller_index,
626						  int for_tx,
627						  enum b43legacy_dmatype type)
628{
629	struct b43legacy_dmaring *ring;
630	int err;
631	int nr_slots;
632	dma_addr_t dma_test;
633
634	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
635	if (!ring)
636		goto out;
637	ring->type = type;
638	ring->dev = dev;
639
640	nr_slots = B43legacy_RXRING_SLOTS;
641	if (for_tx)
642		nr_slots = B43legacy_TXRING_SLOTS;
643
644	ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
645			     GFP_KERNEL);
646	if (!ring->meta)
647		goto err_kfree_ring;
648	if (for_tx) {
649		ring->txhdr_cache = kcalloc(nr_slots,
650					sizeof(struct b43legacy_txhdr_fw3),
651					GFP_KERNEL);
652		if (!ring->txhdr_cache)
653			goto err_kfree_meta;
654
655		/* test for ability to dma to txhdr_cache */
656		dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
657					      sizeof(struct b43legacy_txhdr_fw3),
658					      DMA_TO_DEVICE);
659
660		if (b43legacy_dma_mapping_error(ring, dma_test,
661					sizeof(struct b43legacy_txhdr_fw3), 1)) {
662			/* ugh realloc */
663			kfree(ring->txhdr_cache);
664			ring->txhdr_cache = kcalloc(nr_slots,
665					sizeof(struct b43legacy_txhdr_fw3),
666					GFP_KERNEL | GFP_DMA);
667			if (!ring->txhdr_cache)
668				goto err_kfree_meta;
669
670			dma_test = dma_map_single(dev->dev->dma_dev,
671					ring->txhdr_cache,
672					sizeof(struct b43legacy_txhdr_fw3),
673					DMA_TO_DEVICE);
674
675			if (b43legacy_dma_mapping_error(ring, dma_test,
676					sizeof(struct b43legacy_txhdr_fw3), 1))
677				goto err_kfree_txhdr_cache;
678		}
679
680		dma_unmap_single(dev->dev->dma_dev, dma_test,
681				 sizeof(struct b43legacy_txhdr_fw3),
682				 DMA_TO_DEVICE);
683	}
684
685	ring->nr_slots = nr_slots;
686	ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
687	ring->index = controller_index;
688	if (for_tx) {
689		ring->tx = true;
690		ring->current_slot = -1;
691	} else {
692		if (ring->index == 0) {
693			ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
694			ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
695		} else if (ring->index == 3) {
696			ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
697			ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
698		} else
699			B43legacy_WARN_ON(1);
700	}
701#ifdef CONFIG_B43LEGACY_DEBUG
702	ring->last_injected_overflow = jiffies;
703#endif
704
705	err = alloc_ringmemory(ring);
706	if (err)
707		goto err_kfree_txhdr_cache;
708	err = dmacontroller_setup(ring);
709	if (err)
710		goto err_free_ringmemory;
711
712out:
713	return ring;
714
715err_free_ringmemory:
716	free_ringmemory(ring);
717err_kfree_txhdr_cache:
718	kfree(ring->txhdr_cache);
719err_kfree_meta:
720	kfree(ring->meta);
721err_kfree_ring:
722	kfree(ring);
723	ring = NULL;
724	goto out;
725}
726
727/* Main cleanup function. */
728static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
729{
730	if (!ring)
731		return;
732
733	b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
734		     " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
735		     (ring->tx) ? "TX" : "RX", ring->max_used_slots,
736		     ring->nr_slots);
737	/* Device IRQs are disabled prior entering this function,
738	 * so no need to take care of concurrency with rx handler stuff.
739	 */
740	dmacontroller_cleanup(ring);
741	free_all_descbuffers(ring);
742	free_ringmemory(ring);
743
744	kfree(ring->txhdr_cache);
745	kfree(ring->meta);
746	kfree(ring);
747}
748
749void b43legacy_dma_free(struct b43legacy_wldev *dev)
750{
751	struct b43legacy_dma *dma;
752
753	if (b43legacy_using_pio(dev))
754		return;
755	dma = &dev->dma;
756
757	b43legacy_destroy_dmaring(dma->rx_ring3);
758	dma->rx_ring3 = NULL;
759	b43legacy_destroy_dmaring(dma->rx_ring0);
760	dma->rx_ring0 = NULL;
761
762	b43legacy_destroy_dmaring(dma->tx_ring5);
763	dma->tx_ring5 = NULL;
764	b43legacy_destroy_dmaring(dma->tx_ring4);
765	dma->tx_ring4 = NULL;
766	b43legacy_destroy_dmaring(dma->tx_ring3);
767	dma->tx_ring3 = NULL;
768	b43legacy_destroy_dmaring(dma->tx_ring2);
769	dma->tx_ring2 = NULL;
770	b43legacy_destroy_dmaring(dma->tx_ring1);
771	dma->tx_ring1 = NULL;
772	b43legacy_destroy_dmaring(dma->tx_ring0);
773	dma->tx_ring0 = NULL;
774}
775
776int b43legacy_dma_init(struct b43legacy_wldev *dev)
777{
778	struct b43legacy_dma *dma = &dev->dma;
779	struct b43legacy_dmaring *ring;
780	enum b43legacy_dmatype type = b43legacy_engine_type(dev);
781	int err;
782
783	err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type));
784	if (err) {
785#ifdef CONFIG_B43LEGACY_PIO
786		b43legacywarn(dev->wl, "DMA for this device not supported. "
787			"Falling back to PIO\n");
788		dev->__using_pio = true;
789		return -EAGAIN;
790#else
791		b43legacyerr(dev->wl, "DMA for this device not supported and "
792		       "no PIO support compiled in\n");
793		return -EOPNOTSUPP;
794#endif
795	}
796	dma->translation = ssb_dma_translation(dev->dev);
797
798	err = -ENOMEM;
799	/* setup TX DMA channels. */
800	ring = b43legacy_setup_dmaring(dev, 0, 1, type);
801	if (!ring)
802		goto out;
803	dma->tx_ring0 = ring;
804
805	ring = b43legacy_setup_dmaring(dev, 1, 1, type);
806	if (!ring)
807		goto err_destroy_tx0;
808	dma->tx_ring1 = ring;
809
810	ring = b43legacy_setup_dmaring(dev, 2, 1, type);
811	if (!ring)
812		goto err_destroy_tx1;
813	dma->tx_ring2 = ring;
814
815	ring = b43legacy_setup_dmaring(dev, 3, 1, type);
816	if (!ring)
817		goto err_destroy_tx2;
818	dma->tx_ring3 = ring;
819
820	ring = b43legacy_setup_dmaring(dev, 4, 1, type);
821	if (!ring)
822		goto err_destroy_tx3;
823	dma->tx_ring4 = ring;
824
825	ring = b43legacy_setup_dmaring(dev, 5, 1, type);
826	if (!ring)
827		goto err_destroy_tx4;
828	dma->tx_ring5 = ring;
829
830	/* setup RX DMA channels. */
831	ring = b43legacy_setup_dmaring(dev, 0, 0, type);
832	if (!ring)
833		goto err_destroy_tx5;
834	dma->rx_ring0 = ring;
835
836	if (dev->dev->id.revision < 5) {
837		ring = b43legacy_setup_dmaring(dev, 3, 0, type);
838		if (!ring)
839			goto err_destroy_rx0;
840		dma->rx_ring3 = ring;
841	}
842
843	b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
844	err = 0;
845out:
846	return err;
847
848err_destroy_rx0:
849	b43legacy_destroy_dmaring(dma->rx_ring0);
850	dma->rx_ring0 = NULL;
851err_destroy_tx5:
852	b43legacy_destroy_dmaring(dma->tx_ring5);
853	dma->tx_ring5 = NULL;
854err_destroy_tx4:
855	b43legacy_destroy_dmaring(dma->tx_ring4);
856	dma->tx_ring4 = NULL;
857err_destroy_tx3:
858	b43legacy_destroy_dmaring(dma->tx_ring3);
859	dma->tx_ring3 = NULL;
860err_destroy_tx2:
861	b43legacy_destroy_dmaring(dma->tx_ring2);
862	dma->tx_ring2 = NULL;
863err_destroy_tx1:
864	b43legacy_destroy_dmaring(dma->tx_ring1);
865	dma->tx_ring1 = NULL;
866err_destroy_tx0:
867	b43legacy_destroy_dmaring(dma->tx_ring0);
868	dma->tx_ring0 = NULL;
869	goto out;
870}
871
872/* Generate a cookie for the TX header. */
873static u16 generate_cookie(struct b43legacy_dmaring *ring,
874			   int slot)
875{
876	u16 cookie = 0x1000;
877
878	/* Use the upper 4 bits of the cookie as
879	 * DMA controller ID and store the slot number
880	 * in the lower 12 bits.
881	 * Note that the cookie must never be 0, as this
882	 * is a special value used in RX path.
883	 */
884	switch (ring->index) {
885	case 0:
886		cookie = 0xA000;
887		break;
888	case 1:
889		cookie = 0xB000;
890		break;
891	case 2:
892		cookie = 0xC000;
893		break;
894	case 3:
895		cookie = 0xD000;
896		break;
897	case 4:
898		cookie = 0xE000;
899		break;
900	case 5:
901		cookie = 0xF000;
902		break;
903	}
904	B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
905	cookie |= (u16)slot;
906
907	return cookie;
908}
909
910/* Inspect a cookie and find out to which controller/slot it belongs. */
911static
912struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
913				      u16 cookie, int *slot)
914{
915	struct b43legacy_dma *dma = &dev->dma;
916	struct b43legacy_dmaring *ring = NULL;
917
918	switch (cookie & 0xF000) {
919	case 0xA000:
920		ring = dma->tx_ring0;
921		break;
922	case 0xB000:
923		ring = dma->tx_ring1;
924		break;
925	case 0xC000:
926		ring = dma->tx_ring2;
927		break;
928	case 0xD000:
929		ring = dma->tx_ring3;
930		break;
931	case 0xE000:
932		ring = dma->tx_ring4;
933		break;
934	case 0xF000:
935		ring = dma->tx_ring5;
936		break;
937	default:
938		B43legacy_WARN_ON(1);
939	}
940	*slot = (cookie & 0x0FFF);
941	B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
942
943	return ring;
944}
945
946static int dma_tx_fragment(struct b43legacy_dmaring *ring,
947			    struct sk_buff **in_skb)
948{
949	struct sk_buff *skb = *in_skb;
950	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
951	u8 *header;
952	int slot, old_top_slot, old_used_slots;
953	int err;
954	struct b43legacy_dmadesc32 *desc;
955	struct b43legacy_dmadesc_meta *meta;
956	struct b43legacy_dmadesc_meta *meta_hdr;
957	struct sk_buff *bounce_skb;
958
959#define SLOTS_PER_PACKET  2
960	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
961
962	old_top_slot = ring->current_slot;
963	old_used_slots = ring->used_slots;
964
965	/* Get a slot for the header. */
966	slot = request_slot(ring);
967	desc = op32_idx2desc(ring, slot, &meta_hdr);
968	memset(meta_hdr, 0, sizeof(*meta_hdr));
969
970	header = &(ring->txhdr_cache[slot * sizeof(
971			       struct b43legacy_txhdr_fw3)]);
972	err = b43legacy_generate_txhdr(ring->dev, header,
973				 skb->data, skb->len, info,
974				 generate_cookie(ring, slot));
975	if (unlikely(err)) {
976		ring->current_slot = old_top_slot;
977		ring->used_slots = old_used_slots;
978		return err;
979	}
980
981	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
982					   sizeof(struct b43legacy_txhdr_fw3), 1);
983	if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
984					sizeof(struct b43legacy_txhdr_fw3), 1)) {
985		ring->current_slot = old_top_slot;
986		ring->used_slots = old_used_slots;
987		return -EIO;
988	}
989	op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
990			     sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
991
992	/* Get a slot for the payload. */
993	slot = request_slot(ring);
994	desc = op32_idx2desc(ring, slot, &meta);
995	memset(meta, 0, sizeof(*meta));
996
997	meta->skb = skb;
998	meta->is_last_fragment = true;
999
1000	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1001	/* create a bounce buffer in zone_dma on mapping failure. */
1002	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1003		bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA);
1004		if (!bounce_skb) {
1005			ring->current_slot = old_top_slot;
1006			ring->used_slots = old_used_slots;
1007			err = -ENOMEM;
1008			goto out_unmap_hdr;
1009		}
1010
1011		skb_put_data(bounce_skb, skb->data, skb->len);
1012		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1013		bounce_skb->dev = skb->dev;
1014		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1015		info = IEEE80211_SKB_CB(bounce_skb);
1016
1017		dev_kfree_skb_any(skb);
1018		skb = bounce_skb;
1019		*in_skb = bounce_skb;
1020		meta->skb = skb;
1021		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1022		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1023			ring->current_slot = old_top_slot;
1024			ring->used_slots = old_used_slots;
1025			err = -EIO;
1026			goto out_free_bounce;
1027		}
1028	}
1029
1030	op32_fill_descriptor(ring, desc, meta->dmaaddr,
1031			     skb->len, 0, 1, 1);
1032
1033	wmb();	/* previous stuff MUST be done */
1034	/* Now transfer the whole frame. */
1035	op32_poke_tx(ring, next_slot(ring, slot));
1036	return 0;
1037
1038out_free_bounce:
1039	dev_kfree_skb_any(skb);
1040out_unmap_hdr:
1041	unmap_descbuffer(ring, meta_hdr->dmaaddr,
1042			 sizeof(struct b43legacy_txhdr_fw3), 1);
1043	return err;
1044}
1045
1046static inline
1047int should_inject_overflow(struct b43legacy_dmaring *ring)
1048{
1049#ifdef CONFIG_B43LEGACY_DEBUG
1050	if (unlikely(b43legacy_debug(ring->dev,
1051				     B43legacy_DBG_DMAOVERFLOW))) {
1052		/* Check if we should inject another ringbuffer overflow
1053		 * to test handling of this situation in the stack. */
1054		unsigned long next_overflow;
1055
1056		next_overflow = ring->last_injected_overflow + HZ;
1057		if (time_after(jiffies, next_overflow)) {
1058			ring->last_injected_overflow = jiffies;
1059			b43legacydbg(ring->dev->wl,
1060			       "Injecting TX ring overflow on "
1061			       "DMA controller %d\n", ring->index);
1062			return 1;
1063		}
1064	}
1065#endif /* CONFIG_B43LEGACY_DEBUG */
1066	return 0;
1067}
1068
1069int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1070		     struct sk_buff *skb)
1071{
1072	struct b43legacy_dmaring *ring;
1073	int err = 0;
1074
1075	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1076	B43legacy_WARN_ON(!ring->tx);
1077
1078	if (unlikely(ring->stopped)) {
1079		/* We get here only because of a bug in mac80211.
1080		 * Because of a race, one packet may be queued after
1081		 * the queue is stopped, thus we got called when we shouldn't.
1082		 * For now, just refuse the transmit. */
1083		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1084			b43legacyerr(dev->wl, "Packet after queue stopped\n");
1085		return -ENOSPC;
1086	}
1087
1088	if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) {
1089		/* If we get here, we have a real error with the queue
1090		 * full, but queues not stopped. */
1091		b43legacyerr(dev->wl, "DMA queue overflow\n");
1092		return -ENOSPC;
1093	}
1094
1095	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1096	 * into the skb data or cb now. */
1097	err = dma_tx_fragment(ring, &skb);
1098	if (unlikely(err == -ENOKEY)) {
1099		/* Drop this packet, as we don't have the encryption key
1100		 * anymore and must not transmit it unencrypted. */
1101		dev_kfree_skb_any(skb);
1102		return 0;
1103	}
1104	if (unlikely(err)) {
1105		b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1106		return err;
1107	}
1108	if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1109	    should_inject_overflow(ring)) {
1110		/* This TX ring is full. */
1111		unsigned int skb_mapping = skb_get_queue_mapping(skb);
1112		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1113		dev->wl->tx_queue_stopped[skb_mapping] = 1;
1114		ring->stopped = true;
1115		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1116			b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1117			       ring->index);
1118	}
1119	return err;
1120}
1121
1122void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1123				 const struct b43legacy_txstatus *status)
1124{
1125	struct b43legacy_dmaring *ring;
1126	struct b43legacy_dmadesc_meta *meta;
1127	int retry_limit;
1128	int slot;
1129	int firstused;
1130
1131	ring = parse_cookie(dev, status->cookie, &slot);
1132	if (unlikely(!ring))
1133		return;
1134	B43legacy_WARN_ON(!ring->tx);
1135
1136	/* Sanity check: TX packets are processed in-order on one ring.
1137	 * Check if the slot deduced from the cookie really is the first
1138	 * used slot. */
1139	firstused = ring->current_slot - ring->used_slots + 1;
1140	if (firstused < 0)
1141		firstused = ring->nr_slots + firstused;
1142	if (unlikely(slot != firstused)) {
1143		/* This possibly is a firmware bug and will result in
1144		 * malfunction, memory leaks and/or stall of DMA functionality.
1145		 */
1146		b43legacydbg(dev->wl, "Out of order TX status report on DMA "
1147			     "ring %d. Expected %d, but got %d\n",
1148			     ring->index, firstused, slot);
1149		return;
1150	}
1151
1152	while (1) {
1153		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1154		op32_idx2desc(ring, slot, &meta);
1155
1156		if (meta->skb)
1157			unmap_descbuffer(ring, meta->dmaaddr,
1158					 meta->skb->len, 1);
1159		else
1160			unmap_descbuffer(ring, meta->dmaaddr,
1161					 sizeof(struct b43legacy_txhdr_fw3),
1162					 1);
1163
1164		if (meta->is_last_fragment) {
1165			struct ieee80211_tx_info *info;
1166			BUG_ON(!meta->skb);
1167			info = IEEE80211_SKB_CB(meta->skb);
1168
1169			/* preserve the confiured retry limit before clearing the status
1170			 * The xmit function has overwritten the rc's value with the actual
1171			 * retry limit done by the hardware */
1172			retry_limit = info->status.rates[0].count;
1173			ieee80211_tx_info_clear_status(info);
1174
1175			if (status->acked)
1176				info->flags |= IEEE80211_TX_STAT_ACK;
1177
1178			if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1179				/*
1180				 * If the short retries (RTS, not data frame) have exceeded
1181				 * the limit, the hw will not have tried the selected rate,
1182				 * but will have used the fallback rate instead.
1183				 * Don't let the rate control count attempts for the selected
1184				 * rate in this case, otherwise the statistics will be off.
1185				 */
1186				info->status.rates[0].count = 0;
1187				info->status.rates[1].count = status->frame_count;
1188			} else {
1189				if (status->frame_count > retry_limit) {
1190					info->status.rates[0].count = retry_limit;
1191					info->status.rates[1].count = status->frame_count -
1192							retry_limit;
1193
1194				} else {
1195					info->status.rates[0].count = status->frame_count;
1196					info->status.rates[1].idx = -1;
1197				}
1198			}
1199
1200			/* Call back to inform the ieee80211 subsystem about the
1201			 * status of the transmission.
1202			 * Some fields of txstat are already filled in dma_tx().
1203			 */
1204			ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1205			/* skb is freed by ieee80211_tx_status_irqsafe() */
1206			meta->skb = NULL;
1207		} else {
1208			/* No need to call free_descriptor_buffer here, as
1209			 * this is only the txhdr, which is not allocated.
1210			 */
1211			B43legacy_WARN_ON(meta->skb != NULL);
1212		}
1213
1214		/* Everything unmapped and free'd. So it's not used anymore. */
1215		ring->used_slots--;
1216
1217		if (meta->is_last_fragment)
1218			break;
1219		slot = next_slot(ring, slot);
1220	}
1221	dev->stats.last_tx = jiffies;
1222	if (ring->stopped) {
1223		B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1224		ring->stopped = false;
1225	}
1226
1227	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1228		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1229	} else {
1230		/* If the driver queue is running wake the corresponding
1231		 * mac80211 queue. */
1232		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1233		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1234			b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1235				     ring->index);
1236	}
1237	/* Add work to the queue. */
1238	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1239}
1240
1241static void dma_rx(struct b43legacy_dmaring *ring,
1242		   int *slot)
1243{
1244	struct b43legacy_dmadesc32 *desc;
1245	struct b43legacy_dmadesc_meta *meta;
1246	struct b43legacy_rxhdr_fw3 *rxhdr;
1247	struct sk_buff *skb;
1248	u16 len;
1249	int err;
1250	dma_addr_t dmaaddr;
1251
1252	desc = op32_idx2desc(ring, *slot, &meta);
1253
1254	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1255	skb = meta->skb;
1256
1257	if (ring->index == 3) {
1258		/* We received an xmit status. */
1259		struct b43legacy_hwtxstatus *hw =
1260				(struct b43legacy_hwtxstatus *)skb->data;
1261		int i = 0;
1262
1263		while (hw->cookie == 0) {
1264			if (i > 100)
1265				break;
1266			i++;
1267			udelay(2);
1268			barrier();
1269		}
1270		b43legacy_handle_hwtxstatus(ring->dev, hw);
1271		/* recycle the descriptor buffer. */
1272		sync_descbuffer_for_device(ring, meta->dmaaddr,
1273					   ring->rx_buffersize);
1274
1275		return;
1276	}
1277	rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1278	len = le16_to_cpu(rxhdr->frame_len);
1279	if (len == 0) {
1280		int i = 0;
1281
1282		do {
1283			udelay(2);
1284			barrier();
1285			len = le16_to_cpu(rxhdr->frame_len);
1286		} while (len == 0 && i++ < 5);
1287		if (unlikely(len == 0)) {
1288			/* recycle the descriptor buffer. */
1289			sync_descbuffer_for_device(ring, meta->dmaaddr,
1290						   ring->rx_buffersize);
1291			goto drop;
1292		}
1293	}
1294	if (unlikely(len > ring->rx_buffersize)) {
1295		/* The data did not fit into one descriptor buffer
1296		 * and is split over multiple buffers.
1297		 * This should never happen, as we try to allocate buffers
1298		 * big enough. So simply ignore this packet.
1299		 */
1300		int cnt = 0;
1301		s32 tmp = len;
1302
1303		while (1) {
1304			desc = op32_idx2desc(ring, *slot, &meta);
1305			/* recycle the descriptor buffer. */
1306			sync_descbuffer_for_device(ring, meta->dmaaddr,
1307						   ring->rx_buffersize);
1308			*slot = next_slot(ring, *slot);
1309			cnt++;
1310			tmp -= ring->rx_buffersize;
1311			if (tmp <= 0)
1312				break;
1313		}
1314		b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1315		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
1316		       len, ring->rx_buffersize, cnt);
1317		goto drop;
1318	}
1319
1320	dmaaddr = meta->dmaaddr;
1321	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1322	if (unlikely(err)) {
1323		b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1324			     " failed\n");
1325		sync_descbuffer_for_device(ring, dmaaddr,
1326					   ring->rx_buffersize);
1327		goto drop;
1328	}
1329
1330	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1331	skb_put(skb, len + ring->frameoffset);
1332	skb_pull(skb, ring->frameoffset);
1333
1334	b43legacy_rx(ring->dev, skb, rxhdr);
1335drop:
1336	return;
1337}
1338
1339void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1340{
1341	int slot;
1342	int current_slot;
1343	int used_slots = 0;
1344
1345	B43legacy_WARN_ON(ring->tx);
1346	current_slot = op32_get_current_rxslot(ring);
1347	B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1348			   ring->nr_slots));
1349
1350	slot = ring->current_slot;
1351	for (; slot != current_slot; slot = next_slot(ring, slot)) {
1352		dma_rx(ring, &slot);
1353		update_max_used_slots(ring, ++used_slots);
1354	}
1355	op32_set_current_rxslot(ring, slot);
1356	ring->current_slot = slot;
1357}
1358
1359static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1360{
1361	B43legacy_WARN_ON(!ring->tx);
1362	op32_tx_suspend(ring);
1363}
1364
1365static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1366{
1367	B43legacy_WARN_ON(!ring->tx);
1368	op32_tx_resume(ring);
1369}
1370
1371void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1372{
1373	b43legacy_power_saving_ctl_bits(dev, -1, 1);
1374	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1375	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1376	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1377	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1378	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1379	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1380}
1381
1382void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1383{
1384	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1385	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1386	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1387	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1388	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1389	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1390	b43legacy_power_saving_ctl_bits(dev, -1, -1);
1391}
1392