Lines Matching refs:ctlr
112 struct cpdma_ctlr *ctlr;
173 #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
176 #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
190 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
192 struct cpdma_desc_pool *pool = ctlr->pool;
202 dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
212 static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
214 struct cpdma_params *cpdma_params = &ctlr->params;
218 pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
221 ctlr->pool = pool;
240 pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
244 dev_err(ctlr->dev, "pool create failed %d\n", ret);
250 pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
254 pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size,
266 dev_err(ctlr->dev, "pool add failed %d\n", ret);
273 cpdma_desc_pool_destroy(ctlr);
275 ctlr->pool = NULL;
306 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
311 if (!ctlr->params.has_ext_regs)
314 if (ctlr->state != CPDMA_STATE_ACTIVE)
323 val = dma_reg_read(ctlr, info->reg);
326 dma_reg_write(ctlr, info->reg, val);
331 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
336 if (!ctlr->params.has_ext_regs)
339 if (ctlr->state != CPDMA_STATE_ACTIVE)
348 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
353 * Has to be called under ctlr lock
357 struct cpdma_ctlr *ctlr = chan->ctlr;
366 dma_reg_write(ctlr, rate_reg, chan->rate_factor);
368 rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
371 ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
377 struct cpdma_ctlr *ctlr = chan->ctlr;
378 struct cpdma_desc_pool *pool = ctlr->pool;
386 if (ctlr->state != CPDMA_STATE_ACTIVE) {
390 dma_reg_write(ctlr, chan->int_set, chan->mask);
409 struct cpdma_ctlr *ctlr = ch->ctlr;
417 chan = ctlr->channels[i];
440 dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
445 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
460 freq = ctlr->params.bus_freq_mhz * 1000 * 32;
462 dev_err(ctlr->dev, "The bus frequency is not set\n");
506 dma_reg_write(ctlr, rate_reg, ch->rate_factor);
512 struct cpdma_ctlr *ctlr;
514 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
515 if (!ctlr)
518 ctlr->state = CPDMA_STATE_IDLE;
519 ctlr->params = *params;
520 ctlr->dev = params->dev;
521 ctlr->chan_num = 0;
522 spin_lock_init(&ctlr->lock);
524 if (cpdma_desc_pool_create(ctlr))
527 ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
528 ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
530 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
531 ctlr->num_chan = CPDMA_MAX_CHANNELS;
532 return ctlr;
535 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
541 spin_lock_irqsave(&ctlr->lock, flags);
542 if (ctlr->state != CPDMA_STATE_IDLE) {
543 spin_unlock_irqrestore(&ctlr->lock, flags);
547 if (ctlr->params.has_soft_reset) {
550 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
552 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
560 for (i = 0; i < ctlr->num_chan; i++) {
561 writel(0, ctlr->params.txhdp + 4 * i);
562 writel(0, ctlr->params.rxhdp + 4 * i);
563 writel(0, ctlr->params.txcp + 4 * i);
564 writel(0, ctlr->params.rxcp + 4 * i);
567 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
568 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
570 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
571 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
573 ctlr->state = CPDMA_STATE_ACTIVE;
576 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
577 chan = ctlr->channels[i];
588 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
589 _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
591 spin_unlock_irqrestore(&ctlr->lock, flags);
595 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
600 spin_lock_irqsave(&ctlr->lock, flags);
601 if (ctlr->state != CPDMA_STATE_ACTIVE) {
602 spin_unlock_irqrestore(&ctlr->lock, flags);
606 ctlr->state = CPDMA_STATE_TEARDOWN;
607 spin_unlock_irqrestore(&ctlr->lock, flags);
609 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
610 if (ctlr->channels[i])
611 cpdma_chan_stop(ctlr->channels[i]);
614 spin_lock_irqsave(&ctlr->lock, flags);
615 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
616 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
618 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
619 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
621 ctlr->state = CPDMA_STATE_IDLE;
623 spin_unlock_irqrestore(&ctlr->lock, flags);
627 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
631 if (!ctlr)
634 if (ctlr->state != CPDMA_STATE_IDLE)
635 cpdma_ctlr_stop(ctlr);
637 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
638 cpdma_chan_destroy(ctlr->channels[i]);
640 cpdma_desc_pool_destroy(ctlr);
644 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
649 spin_lock_irqsave(&ctlr->lock, flags);
650 if (ctlr->state != CPDMA_STATE_ACTIVE) {
651 spin_unlock_irqrestore(&ctlr->lock, flags);
655 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
656 if (ctlr->channels[i])
657 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
660 spin_unlock_irqrestore(&ctlr->lock, flags);
664 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
666 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
669 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
671 return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
674 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
676 return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
679 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
700 chan = ctlr->channels[i];
723 * Has to be called under ctlr lock
725 static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
734 if (!ctlr->chan_num)
737 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
738 chan = ctlr->channels[i];
756 tx_desc_num = ctlr->num_tx_desc;
757 rx_desc_num = ctlr->num_rx_desc;
768 cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
769 cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
786 struct cpdma_ctlr *ctlr = ch->ctlr;
790 spin_lock_irqsave(&ctlr->lock, flags);
794 spin_unlock_irqrestore(&ctlr->lock, flags);
801 ret = cpdma_chan_split_pool(ctlr);
802 spin_unlock_irqrestore(&ctlr->lock, flags);
810 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
814 divident = ctlr->params.bus_freq_mhz * 32 * 1000;
828 struct cpdma_ctlr *ctlr;
838 ctlr = ch->ctlr;
839 spin_lock_irqsave(&ctlr->lock, flags);
846 ret = cpdma_chan_set_factors(ctlr, ch);
853 _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
854 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
855 spin_unlock_irqrestore(&ctlr->lock, flags);
860 spin_unlock_irqrestore(&ctlr->lock, flags);
876 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
885 if (__chan_linear(chan_num) >= ctlr->num_chan)
888 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
892 spin_lock_irqsave(&ctlr->lock, flags);
893 if (ctlr->channels[chan_num]) {
894 spin_unlock_irqrestore(&ctlr->lock, flags);
895 devm_kfree(ctlr->dev, chan);
899 chan->ctlr = ctlr;
907 chan->hdp = ctlr->params.rxhdp + offset;
908 chan->cp = ctlr->params.rxcp + offset;
909 chan->rxfree = ctlr->params.rxfree + offset;
915 chan->hdp = ctlr->params.txhdp + offset;
916 chan->cp = ctlr->params.txcp + offset;
926 ctlr->channels[chan_num] = chan;
927 ctlr->chan_num++;
929 cpdma_chan_split_pool(ctlr);
931 spin_unlock_irqrestore(&ctlr->lock, flags);
949 struct cpdma_ctlr *ctlr;
954 ctlr = chan->ctlr;
956 spin_lock_irqsave(&ctlr->lock, flags);
959 ctlr->channels[chan->chan_num] = NULL;
960 ctlr->chan_num--;
961 devm_kfree(ctlr->dev, chan);
962 cpdma_chan_split_pool(ctlr);
964 spin_unlock_irqrestore(&ctlr->lock, flags);
983 struct cpdma_ctlr *ctlr = chan->ctlr;
985 struct cpdma_desc_pool *pool = ctlr->pool;
1019 struct cpdma_ctlr *ctlr = chan->ctlr;
1031 desc = cpdma_desc_alloc(ctlr->pool);
1037 if (len < ctlr->params.min_packet_size) {
1038 len = ctlr->params.min_packet_size;
1047 dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
1049 buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
1050 ret = dma_mapping_error(ctlr->dev, buffer);
1052 cpdma_desc_free(ctlr->pool, desc, 1);
1181 struct cpdma_ctlr *ctlr = chan->ctlr;
1182 struct cpdma_desc_pool *pool = ctlr->pool;
1197 struct cpdma_ctlr *ctlr = chan->ctlr;
1198 struct cpdma_desc_pool *pool = ctlr->pool;
1209 dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
1212 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
1221 struct cpdma_ctlr *ctlr = chan->ctlr;
1225 struct cpdma_desc_pool *pool = ctlr->pool;
1295 struct cpdma_ctlr *ctlr = chan->ctlr;
1299 spin_lock_irqsave(&ctlr->lock, flags);
1301 spin_unlock_irqrestore(&ctlr->lock, flags);
1314 struct cpdma_ctlr *ctlr = chan->ctlr;
1315 struct cpdma_desc_pool *pool = ctlr->pool;
1327 dma_reg_write(ctlr, chan->int_clear, chan->mask);
1330 dma_reg_write(ctlr, chan->td, chan_linear(chan));
1384 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
1391 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
1396 spin_lock_irqsave(&ctlr->lock, flags);
1397 ret = _cpdma_control_get(ctlr, control);
1398 spin_unlock_irqrestore(&ctlr->lock, flags);
1403 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1408 spin_lock_irqsave(&ctlr->lock, flags);
1409 ret = _cpdma_control_set(ctlr, control, value);
1410 spin_unlock_irqrestore(&ctlr->lock, flags);
1415 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
1417 return ctlr->num_rx_desc;
1420 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
1422 return ctlr->num_tx_desc;
1425 int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
1430 spin_lock_irqsave(&ctlr->lock, flags);
1432 temp = ctlr->num_rx_desc;
1433 ctlr->num_rx_desc = num_rx_desc;
1434 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1435 ret = cpdma_chan_split_pool(ctlr);
1437 ctlr->num_rx_desc = temp;
1438 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1441 spin_unlock_irqrestore(&ctlr->lock, flags);