Lines Matching refs:dma

65 	struct ltq_dma_channel dma;
135 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
142 ch->dma.desc++;
143 ch->dma.desc %= LTQ_DESC_NUM;
152 ltq_dma_open(&priv->chan_tx.dma);
153 ltq_dma_enable_irq(&priv->chan_tx.dma);
156 ltq_dma_open(&priv->chan_rx.dma);
165 ltq_dma_enable_irq(&priv->chan_rx.dma);
179 ltq_dma_close(&priv->chan_rx.dma);
182 ltq_dma_close(&priv->chan_tx.dma);
189 void *buf = ch->rx_buff[ch->dma.desc];
194 ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
195 if (!ch->rx_buff[ch->dma.desc]) {
196 ch->rx_buff[ch->dma.desc] = buf;
201 mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc],
204 skb_free_frag(ch->rx_buff[ch->dma.desc]);
205 ch->rx_buff[ch->dma.desc] = buf;
210 ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN;
214 ch->dma.desc_base[ch->dma.desc].ctl =
223 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
224 void *buf = ch->rx_buff[ch->dma.desc];
233 ch->dma.desc++;
234 ch->dma.desc %= LTQ_DESC_NUM;
291 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
307 ltq_dma_enable_irq(&ch->dma);
323 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
332 memset(&ch->dma.desc_base[ch->tx_free], 0,
351 ltq_dma_enable_irq(&ch->dma);
362 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
375 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
381 ch->skb[ch->dma.desc] = skb;
387 /* dma needs to start on a burst length value aligned address */
395 ch->dma.desc++;
396 ch->dma.desc %= LTQ_DESC_NUM;
397 if (ch->dma.desc == ch->tx_free)
432 ltq_dma_close(&ch_rx->dma);
436 curr_desc = ch_rx->dma.desc;
438 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
439 ch_rx->dma.desc++) {
440 buff = ch_rx->rx_buff[ch_rx->dma.desc];
451 ch_rx->dma.desc = curr_desc;
454 ltq_dma_open(&ch_rx->dma);
455 ltq_dma_enable_irq(&ch_rx->dma);
475 ltq_dma_disable_irq(&ch->dma);
479 ltq_dma_ack_irq(&ch->dma);
494 ch_rx->dma.nr = XRX200_DMA_RX;
495 ch_rx->dma.dev = priv->dev;
498 ltq_dma_alloc_rx(&ch_rx->dma);
499 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
500 ch_rx->dma.desc++) {
505 ch_rx->dma.desc = 0;
506 ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
510 ch_rx->dma.irq);
514 ch_tx->dma.nr = XRX200_DMA_TX;
515 ch_tx->dma.dev = priv->dev;
518 ltq_dma_alloc_tx(&ch_tx->dma);
519 ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
523 ch_tx->dma.irq);
530 ltq_dma_free(&ch_tx->dma);
540 ltq_dma_free(&ch_rx->dma);
548 ltq_dma_free(&priv->chan_tx.dma);
549 ltq_dma_free(&priv->chan_rx.dma);
585 priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
586 if (priv->chan_rx.dma.irq < 0)
588 priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
589 if (priv->chan_tx.dma.irq < 0)
603 /* bring up the dma engine and IP core */