Lines Matching refs:ring
32 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
38 *meta = &(ring->meta[slot]);
39 desc = ring->descbase;
45 static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
50 struct b43legacy_dmadesc32 *descbase = ring->descbase;
57 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
62 addr |= ring->dev->dma.translation;
63 ctl = (bufsize - ring->frameoffset)
65 if (slot == ring->nr_slots - 1)
80 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
82 b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
86 static void op32_tx_suspend(struct b43legacy_dmaring *ring)
88 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
89 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
93 static void op32_tx_resume(struct b43legacy_dmaring *ring)
95 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
96 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
100 static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
104 val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
110 static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
113 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
117 static inline int free_slots(struct b43legacy_dmaring *ring)
119 return (ring->nr_slots - ring->used_slots);
122 static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
124 B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
125 if (slot == ring->nr_slots - 1)
130 static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
132 B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
134 return ring->nr_slots - 1;
139 static void update_max_used_slots(struct b43legacy_dmaring *ring,
142 if (current_used_slots <= ring->max_used_slots)
144 ring->max_used_slots = current_used_slots;
145 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
146 b43legacydbg(ring->dev->wl,
147 "max_used_slots increased to %d on %s ring %d\n",
148 ring->max_used_slots,
149 ring->tx ? "TX" : "RX",
150 ring->index);
154 void update_max_used_slots(struct b43legacy_dmaring *ring,
161 int request_slot(struct b43legacy_dmaring *ring)
165 B43legacy_WARN_ON(!ring->tx);
166 B43legacy_WARN_ON(ring->stopped);
167 B43legacy_WARN_ON(free_slots(ring) == 0);
169 slot = next_slot(ring, ring->current_slot);
170 ring->current_slot = slot;
171 ring->used_slots++;
173 update_max_used_slots(ring, ring->used_slots);
178 /* Mac80211-queue to b43legacy-ring mapping */
183 struct b43legacy_dmaring *ring;
185 /*FIXME: For now we always run on TX-ring-1 */
194 ring = dev->dma.tx_ring3;
197 ring = dev->dma.tx_ring2;
200 ring = dev->dma.tx_ring1;
203 ring = dev->dma.tx_ring0;
206 ring = dev->dma.tx_ring4;
209 ring = dev->dma.tx_ring5;
213 return ring;
216 /* Bcm4301-ring to mac80211-queue mapping */
217 static inline int txring_to_priority(struct b43legacy_dmaring *ring)
225 return idx_to_prio[ring->index];
247 dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
255 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
259 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
267 void unmap_descbuffer(struct b43legacy_dmaring *ring,
273 dma_unmap_single(ring->dev->dev->dma_dev,
277 dma_unmap_single(ring->dev->dev->dma_dev,
283 void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
287 B43legacy_WARN_ON(ring->tx);
289 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
294 void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
298 B43legacy_WARN_ON(ring->tx);
300 dma_sync_single_for_device(ring->dev->dev->dma_dev,
305 void free_descriptor_buffer(struct b43legacy_dmaring *ring,
318 static int alloc_ringmemory(struct b43legacy_dmaring *ring)
321 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
323 &(ring->dmabase), GFP_KERNEL);
324 if (!ring->descbase)
330 static void free_ringmemory(struct b43legacy_dmaring *ring)
332 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
333 ring->descbase, ring->dmabase);
411 static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
416 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
419 switch (ring->type) {
435 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
440 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
450 B43legacy_WARN_ON(ring->tx);
452 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
455 dmaaddr = map_descbuffer(ring, skb->data,
456 ring->rx_buffersize, 0);
457 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
463 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
466 dmaaddr = map_descbuffer(ring, skb->data,
467 ring->rx_buffersize, 0);
470 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
477 op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
488 * This is used for an RX ring only.
490 static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
497 for (i = 0; i < ring->nr_slots; i++) {
498 desc = op32_idx2desc(ring, i, &meta);
500 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
502 b43legacyerr(ring->dev->wl,
508 ring->used_slots = ring->nr_slots;
515 desc = op32_idx2desc(ring, i, &meta);
517 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
524 * Reset the controller, write the ring busaddress
527 static int dmacontroller_setup(struct b43legacy_dmaring *ring)
532 u32 trans = ring->dev->dma.translation;
533 u32 ringbase = (u32)(ring->dmabase);
535 if (ring->tx) {
541 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
542 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
546 err = alloc_initial_descbuffers(ring);
552 value = (ring->frameoffset <<
557 b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
558 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
561 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
569 static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
571 if (ring->tx) {
572 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
573 ring->type);
574 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
576 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
577 ring->type);
578 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
582 static void free_all_descbuffers(struct b43legacy_dmaring *ring)
587 if (!ring->used_slots)
589 for (i = 0; i < ring->nr_slots; i++) {
590 op32_idx2desc(ring, i, &meta);
593 B43legacy_WARN_ON(!ring->tx);
596 if (ring->tx)
597 unmap_descbuffer(ring, meta->dmaaddr,
600 unmap_descbuffer(ring, meta->dmaaddr,
601 ring->rx_buffersize, 0);
602 free_descriptor_buffer(ring, meta, 0);
629 struct b43legacy_dmaring *ring;
634 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
635 if (!ring)
637 ring->type = type;
638 ring->dev = dev;
644 ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
646 if (!ring->meta)
649 ring->txhdr_cache = kcalloc(nr_slots,
652 if (!ring->txhdr_cache)
656 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
660 if (b43legacy_dma_mapping_error(ring, dma_test,
663 kfree(ring->txhdr_cache);
664 ring->txhdr_cache = kcalloc(nr_slots,
667 if (!ring->txhdr_cache)
671 ring->txhdr_cache,
675 if (b43legacy_dma_mapping_error(ring, dma_test,
685 ring->nr_slots = nr_slots;
686 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
687 ring->index = controller_index;
689 ring->tx = true;
690 ring->current_slot = -1;
692 if (ring->index == 0) {
693 ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
694 ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
695 } else if (ring->index == 3) {
696 ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
697 ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
702 ring->last_injected_overflow = jiffies;
705 err = alloc_ringmemory(ring);
708 err = dmacontroller_setup(ring);
713 return ring;
716 free_ringmemory(ring);
718 kfree(ring->txhdr_cache);
720 kfree(ring->meta);
722 kfree(ring);
723 ring = NULL;
728 static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
730 if (!ring)
733 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
734 " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
735 (ring->tx) ? "TX" : "RX", ring->max_used_slots,
736 ring->nr_slots);
740 dmacontroller_cleanup(ring);
741 free_all_descbuffers(ring);
742 free_ringmemory(ring);
744 kfree(ring->txhdr_cache);
745 kfree(ring->meta);
746 kfree(ring);
779 struct b43legacy_dmaring *ring;
800 ring = b43legacy_setup_dmaring(dev, 0, 1, type);
801 if (!ring)
803 dma->tx_ring0 = ring;
805 ring = b43legacy_setup_dmaring(dev, 1, 1, type);
806 if (!ring)
808 dma->tx_ring1 = ring;
810 ring = b43legacy_setup_dmaring(dev, 2, 1, type);
811 if (!ring)
813 dma->tx_ring2 = ring;
815 ring = b43legacy_setup_dmaring(dev, 3, 1, type);
816 if (!ring)
818 dma->tx_ring3 = ring;
820 ring = b43legacy_setup_dmaring(dev, 4, 1, type);
821 if (!ring)
823 dma->tx_ring4 = ring;
825 ring = b43legacy_setup_dmaring(dev, 5, 1, type);
826 if (!ring)
828 dma->tx_ring5 = ring;
831 ring = b43legacy_setup_dmaring(dev, 0, 0, type);
832 if (!ring)
834 dma->rx_ring0 = ring;
837 ring = b43legacy_setup_dmaring(dev, 3, 0, type);
838 if (!ring)
840 dma->rx_ring3 = ring;
873 static u16 generate_cookie(struct b43legacy_dmaring *ring,
884 switch (ring->index) {
916 struct b43legacy_dmaring *ring = NULL;
920 ring = dma->tx_ring0;
923 ring = dma->tx_ring1;
926 ring = dma->tx_ring2;
929 ring = dma->tx_ring3;
932 ring = dma->tx_ring4;
935 ring = dma->tx_ring5;
941 B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
943 return ring;
946 static int dma_tx_fragment(struct b43legacy_dmaring *ring,
962 old_top_slot = ring->current_slot;
963 old_used_slots = ring->used_slots;
966 slot = request_slot(ring);
967 desc = op32_idx2desc(ring, slot, &meta_hdr);
970 header = &(ring->txhdr_cache[slot * sizeof(
972 err = b43legacy_generate_txhdr(ring->dev, header,
974 generate_cookie(ring, slot));
976 ring->current_slot = old_top_slot;
977 ring->used_slots = old_used_slots;
981 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
983 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
985 ring->current_slot = old_top_slot;
986 ring->used_slots = old_used_slots;
989 op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
993 slot = request_slot(ring);
994 desc = op32_idx2desc(ring, slot, &meta);
1000 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1002 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1005 ring->current_slot = old_top_slot;
1006 ring->used_slots = old_used_slots;
1021 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1022 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1023 ring->current_slot = old_top_slot;
1024 ring->used_slots = old_used_slots;
1030 op32_fill_descriptor(ring, desc, meta->dmaaddr,
1035 op32_poke_tx(ring, next_slot(ring, slot));
1041 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1047 int should_inject_overflow(struct b43legacy_dmaring *ring)
1050 if (unlikely(b43legacy_debug(ring->dev,
1056 next_overflow = ring->last_injected_overflow + HZ;
1058 ring->last_injected_overflow = jiffies;
1059 b43legacydbg(ring->dev->wl,
1060 "Injecting TX ring overflow on "
1061 "DMA controller %d\n", ring->index);
1072 struct b43legacy_dmaring *ring;
1075 ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1076 B43legacy_WARN_ON(!ring->tx);
1078 if (unlikely(ring->stopped)) {
1088 if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) {
1097 err = dma_tx_fragment(ring, &skb);
1108 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1109 should_inject_overflow(ring)) {
1110 /* This TX ring is full. */
1114 ring->stopped = true;
1116 b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1117 ring->index);
1125 struct b43legacy_dmaring *ring;
1131 ring = parse_cookie(dev, status->cookie, &slot);
1132 if (unlikely(!ring))
1134 B43legacy_WARN_ON(!ring->tx);
1136 /* Sanity check: TX packets are processed in-order on one ring.
1139 firstused = ring->current_slot - ring->used_slots + 1;
1141 firstused = ring->nr_slots + firstused;
1147 "ring %d. Expected %d, but got %d\n",
1148 ring->index, firstused, slot);
1153 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1154 op32_idx2desc(ring, slot, &meta);
1157 unmap_descbuffer(ring, meta->dmaaddr,
1160 unmap_descbuffer(ring, meta->dmaaddr,
1215 ring->used_slots--;
1219 slot = next_slot(ring, slot);
1222 if (ring->stopped) {
1223 B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1224 ring->stopped = false;
1227 if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1228 dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1232 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1234 b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1235 ring->index);
1241 static void dma_rx(struct b43legacy_dmaring *ring,
1252 desc = op32_idx2desc(ring, *slot, &meta);
1254 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1257 if (ring->index == 3) {
1270 b43legacy_handle_hwtxstatus(ring->dev, hw);
1272 sync_descbuffer_for_device(ring, meta->dmaaddr,
1273 ring->rx_buffersize);
1289 sync_descbuffer_for_device(ring, meta->dmaaddr,
1290 ring->rx_buffersize);
1294 if (unlikely(len > ring->rx_buffersize)) {
1304 desc = op32_idx2desc(ring, *slot, &meta);
1306 sync_descbuffer_for_device(ring, meta->dmaaddr,
1307 ring->rx_buffersize);
1308 *slot = next_slot(ring, *slot);
1310 tmp -= ring->rx_buffersize;
1314 b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1316 len, ring->rx_buffersize, cnt);
1321 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1323 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1325 sync_descbuffer_for_device(ring, dmaaddr,
1326 ring->rx_buffersize);
1330 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1331 skb_put(skb, len + ring->frameoffset);
1332 skb_pull(skb, ring->frameoffset);
1334 b43legacy_rx(ring->dev, skb, rxhdr);
1339 void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1345 B43legacy_WARN_ON(ring->tx);
1346 current_slot = op32_get_current_rxslot(ring);
1348 ring->nr_slots));
1350 slot = ring->current_slot;
1351 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1352 dma_rx(ring, &slot);
1353 update_max_used_slots(ring, ++used_slots);
1355 op32_set_current_rxslot(ring, slot);
1356 ring->current_slot = slot;
1359 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1361 B43legacy_WARN_ON(!ring->tx);
1362 op32_tx_suspend(ring);
1365 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1367 B43legacy_WARN_ON(!ring->tx);
1368 op32_tx_resume(ring);