Lines Matching refs:dev

62 	addr |= ring->dev->dma.translation;
145 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
146 b43legacydbg(ring->dev->wl,
180 struct b43legacy_wldev *dev,
186 return dev->dma.tx_ring1;
194 ring = dev->dma.tx_ring3;
197 ring = dev->dma.tx_ring2;
200 ring = dev->dma.tx_ring1;
203 ring = dev->dma.tx_ring0;
206 ring = dev->dma.tx_ring4;
209 ring = dev->dma.tx_ring5;
255 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
259 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
273 dma_unmap_single(ring->dev->dev->dma_dev,
277 dma_unmap_single(ring->dev->dev->dma_dev,
289 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
300 dma_sync_single_for_device(ring->dev->dev->dma_dev,
321 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
332 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
337 static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
348 b43legacy_write32(dev, mmio_base + offset, 0);
351 value = b43legacy_read32(dev, mmio_base + offset);
360 b43legacyerr(dev->wl, "DMA RX reset timed out\n");
368 static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
380 value = b43legacy_read32(dev, mmio_base + offset);
389 b43legacy_write32(dev, mmio_base + offset, 0);
392 value = b43legacy_read32(dev, mmio_base + offset);
401 b43legacyerr(dev->wl, "DMA TX reset timed out\n");
416 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
502 b43legacyerr(ring->dev->wl,
532 u32 trans = ring->dev->dma.translation;
572 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
576 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
606 static enum b43legacy_dmatype b43legacy_engine_type(struct b43legacy_wldev *dev)
612 b43legacy_write32(dev,
615 tmp = b43legacy_read32(dev, mmio_base +
624 struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
638 ring->dev = dev;
656 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
670 dma_test = dma_map_single(dev->dev->dma_dev,
680 dma_unmap_single(dev->dev->dma_dev, dma_test,
733 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
749 void b43legacy_dma_free(struct b43legacy_wldev *dev)
753 if (b43legacy_using_pio(dev))
755 dma = &dev->dma;
776 int b43legacy_dma_init(struct b43legacy_wldev *dev)
778 struct b43legacy_dma *dma = &dev->dma;
780 enum b43legacy_dmatype type = b43legacy_engine_type(dev);
783 err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type));
786 b43legacywarn(dev->wl, "DMA for this device not supported. "
788 dev->__using_pio = true;
791 b43legacyerr(dev->wl, "DMA for this device not supported and "
796 dma->translation = ssb_dma_translation(dev->dev);
800 ring = b43legacy_setup_dmaring(dev, 0, 1, type);
805 ring = b43legacy_setup_dmaring(dev, 1, 1, type);
810 ring = b43legacy_setup_dmaring(dev, 2, 1, type);
815 ring = b43legacy_setup_dmaring(dev, 3, 1, type);
820 ring = b43legacy_setup_dmaring(dev, 4, 1, type);
825 ring = b43legacy_setup_dmaring(dev, 5, 1, type);
831 ring = b43legacy_setup_dmaring(dev, 0, 0, type);
836 if (dev->dev->id.revision < 5) {
837 ring = b43legacy_setup_dmaring(dev, 3, 0, type);
843 b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
912 struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
915 struct b43legacy_dma *dma = &dev->dma;
972 err = b43legacy_generate_txhdr(ring->dev, header,
1013 bounce_skb->dev = skb->dev;
1050 if (unlikely(b43legacy_debug(ring->dev,
1059 b43legacydbg(ring->dev->wl,
1069 int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1075 ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1083 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1084 b43legacyerr(dev->wl, "Packet after queue stopped\n");
1091 b43legacyerr(dev->wl, "DMA queue overflow\n");
1105 b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1112 ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1113 dev->wl->tx_queue_stopped[skb_mapping] = 1;
1115 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1116 b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1122 void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1131 ring = parse_cookie(dev, status->cookie, &slot);
1146 b43legacydbg(dev->wl, "Out of order TX status report on DMA "
1178 if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1204 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1221 dev->stats.last_tx = jiffies;
1227 if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1228 dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1232 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1233 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1234 b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1238 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1270 b43legacy_handle_hwtxstatus(ring->dev, hw);
1314 b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1323 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1334 b43legacy_rx(ring->dev, skb, rxhdr);
1371 void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1373 b43legacy_power_saving_ctl_bits(dev, -1, 1);
1374 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1375 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1376 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1377 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1378 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1379 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1382 void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1384 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1385 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1386 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1387 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1388 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1389 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1390 b43legacy_power_saving_ctl_bits(dev, -1, -1);