Lines Matching refs:dev

62 	addr |= ring->dev->dma.translation;
137 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
138 b43legacydbg(ring->dev->wl,
172 struct b43legacy_wldev *dev,
178 return dev->dma.tx_ring1;
186 ring = dev->dma.tx_ring3;
189 ring = dev->dma.tx_ring2;
192 ring = dev->dma.tx_ring1;
195 ring = dev->dma.tx_ring0;
198 ring = dev->dma.tx_ring4;
201 ring = dev->dma.tx_ring5;
234 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
238 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
252 dma_unmap_single(ring->dev->dev->dma_dev,
256 dma_unmap_single(ring->dev->dev->dma_dev,
268 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
279 dma_sync_single_for_device(ring->dev->dev->dma_dev,
300 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
311 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
316 static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
327 b43legacy_write32(dev, mmio_base + offset, 0);
330 value = b43legacy_read32(dev, mmio_base + offset);
339 b43legacyerr(dev->wl, "DMA RX reset timed out\n");
347 static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
359 value = b43legacy_read32(dev, mmio_base + offset);
368 b43legacy_write32(dev, mmio_base + offset, 0);
371 value = b43legacy_read32(dev, mmio_base + offset);
380 b43legacyerr(dev->wl, "DMA TX reset timed out\n");
395 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
481 b43legacyerr(ring->dev->wl,
511 u32 trans = ring->dev->dma.translation;
551 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
555 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
585 static enum b43legacy_dmatype b43legacy_engine_type(struct b43legacy_wldev *dev)
591 b43legacy_write32(dev,
594 tmp = b43legacy_read32(dev, mmio_base +
603 struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
617 ring->dev = dev;
635 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
649 dma_test = dma_map_single(dev->dev->dma_dev,
659 dma_unmap_single(dev->dev->dma_dev, dma_test,
712 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
728 void b43legacy_dma_free(struct b43legacy_wldev *dev)
732 if (b43legacy_using_pio(dev))
734 dma = &dev->dma;
755 int b43legacy_dma_init(struct b43legacy_wldev *dev)
757 struct b43legacy_dma *dma = &dev->dma;
759 enum b43legacy_dmatype type = b43legacy_engine_type(dev);
762 err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type));
765 b43legacywarn(dev->wl, "DMA for this device not supported. "
767 dev->__using_pio = true;
770 b43legacyerr(dev->wl, "DMA for this device not supported and "
775 dma->translation = ssb_dma_translation(dev->dev);
779 ring = b43legacy_setup_dmaring(dev, 0, 1, type);
784 ring = b43legacy_setup_dmaring(dev, 1, 1, type);
789 ring = b43legacy_setup_dmaring(dev, 2, 1, type);
794 ring = b43legacy_setup_dmaring(dev, 3, 1, type);
799 ring = b43legacy_setup_dmaring(dev, 4, 1, type);
804 ring = b43legacy_setup_dmaring(dev, 5, 1, type);
810 ring = b43legacy_setup_dmaring(dev, 0, 0, type);
815 if (dev->dev->id.revision < 5) {
816 ring = b43legacy_setup_dmaring(dev, 3, 0, type);
822 b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
891 struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
894 struct b43legacy_dma *dma = &dev->dma;
951 err = b43legacy_generate_txhdr(ring->dev, header,
992 bounce_skb->dev = skb->dev;
1029 if (unlikely(b43legacy_debug(ring->dev,
1038 b43legacydbg(ring->dev->wl,
1048 int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1054 ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1062 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1063 b43legacyerr(dev->wl, "Packet after queue stopped\n");
1070 b43legacyerr(dev->wl, "DMA queue overflow\n");
1084 b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1091 ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1092 dev->wl->tx_queue_stopped[skb_mapping] = 1;
1094 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1095 b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1101 void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1110 ring = parse_cookie(dev, status->cookie, &slot);
1125 b43legacydbg(dev->wl, "Out of order TX status report on DMA "
1157 if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1183 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1200 dev->stats.last_tx = jiffies;
1206 if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1207 dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1211 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1212 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1213 b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1217 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1249 b43legacy_handle_hwtxstatus(ring->dev, hw);
1293 b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1302 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1313 b43legacy_rx(ring->dev, skb, rxhdr);
1350 void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1352 b43legacy_power_saving_ctl_bits(dev, -1, 1);
1353 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1354 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1355 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1356 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1357 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1358 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1361 void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1363 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1364 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1365 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1366 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1367 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1368 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1369 b43legacy_power_saving_ctl_bits(dev, -1, -1);