Lines Matching defs:mhi_chan

133 		      struct mhi_chan *mhi_chan)
135 struct mhi_ring *ring = &mhi_chan->tre_ring;
147 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
277 struct mhi_chan *ul_chan, *dl_chan;
338 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
340 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
363 struct mhi_chan *mhi_chan;
368 mhi_chan = mhi_cntrl->mhi_chan;
369 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
370 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
371 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
378 switch (mhi_chan->dir) {
380 mhi_dev->ul_chan = mhi_chan;
381 mhi_dev->ul_chan_id = mhi_chan->chan;
385 mhi_dev->dl_chan = mhi_chan;
386 mhi_dev->dl_chan_id = mhi_chan->chan;
395 mhi_chan->mhi_dev = mhi_dev;
398 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
399 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
401 mhi_chan++;
402 if (mhi_chan->dir == DMA_TO_DEVICE) {
403 mhi_dev->ul_chan = mhi_chan;
404 mhi_dev->ul_chan_id = mhi_chan->chan;
406 mhi_dev->dl_chan = mhi_chan;
407 mhi_dev->dl_chan_id = mhi_chan->chan;
410 mhi_chan->mhi_dev = mhi_dev;
415 mhi_dev->name = mhi_chan->name;
467 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
468 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
570 struct mhi_chan *mhi_chan)
579 buf_ring = &mhi_chan->buf_ring;
580 tre_ring = &mhi_chan->tre_ring;
592 write_lock_irqsave(&mhi_chan->lock, flags);
594 read_lock_bh(&mhi_chan->lock);
596 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
622 result.dir = mhi_chan->dir;
646 read_unlock_bh(&mhi_chan->lock);
649 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
651 if (mhi_chan->dir == DMA_TO_DEVICE) {
662 if (mhi_chan->pre_alloc) {
663 if (mhi_queue_buf(mhi_chan->mhi_dev,
664 mhi_chan->dir,
669 mhi_chan->chan);
674 read_lock_bh(&mhi_chan->lock);
683 mhi_chan->db_cfg.db_mode = 1;
687 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
700 write_unlock_irqrestore(&mhi_chan->lock, flags);
702 read_unlock_bh(&mhi_chan->lock);
709 struct mhi_chan *mhi_chan)
718 buf_ring = &mhi_chan->buf_ring;
719 tre_ring = &mhi_chan->tre_ring;
736 result.dir = mhi_chan->dir;
738 read_lock_bh(&mhi_chan->lock);
740 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
746 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
766 read_unlock_bh(&mhi_chan->lock);
778 struct mhi_chan *mhi_chan;
792 mhi_cntrl->mhi_chan[chan].configured) {
793 mhi_chan = &mhi_cntrl->mhi_chan[chan];
794 write_lock_bh(&mhi_chan->lock);
795 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
796 complete(&mhi_chan->completion);
797 write_unlock_bh(&mhi_chan->lock);
814 struct mhi_chan *mhi_chan;
942 mhi_chan = &mhi_cntrl->mhi_chan[chan];
943 if (!mhi_chan->configured)
945 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
987 struct mhi_chan *mhi_chan;
1014 mhi_cntrl->mhi_chan[chan].configured) {
1015 mhi_chan = &mhi_cntrl->mhi_chan[chan];
1018 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
1021 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
1121 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1123 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1134 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1149 if (mhi_chan->dir == DMA_TO_DEVICE)
1153 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1166 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1174 if (unlikely(mhi_chan->pre_alloc))
1184 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1193 if (unlikely(mhi_chan->pre_alloc))
1200 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1210 write_lock_bh(&mhi_chan->lock);
1212 buf_ring = &mhi_chan->buf_ring;
1213 tre_ring = &mhi_chan->tre_ring;
1224 buf_info->dir = mhi_chan->dir;
1230 write_unlock_bh(&mhi_chan->lock);
1238 bei = !!(mhi_chan->intmod);
1249 write_unlock_bh(&mhi_chan->lock);
1270 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
1272 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1279 struct mhi_chan *mhi_chan,
1288 if (mhi_chan)
1289 chan = mhi_chan->chan;
1332 struct mhi_chan *mhi_chan,
1335 struct device *dev = &mhi_chan->mhi_dev->dev;
1339 dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
1344 write_lock_irq(&mhi_chan->lock);
1345 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1346 mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
1347 mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
1348 write_unlock_irq(&mhi_chan->lock);
1351 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1352 write_unlock_irq(&mhi_chan->lock);
1357 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
1363 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1364 mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
1371 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1381 reinit_completion(&mhi_chan->completion);
1382 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
1385 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1389 ret = wait_for_completion_timeout(&mhi_chan->completion,
1391 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1394 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1402 write_lock_irq(&mhi_chan->lock);
1403 mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
1405 write_unlock_irq(&mhi_chan->lock);
1409 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1419 struct mhi_chan *mhi_chan)
1422 struct device *dev = &mhi_chan->mhi_dev->dev;
1424 mutex_lock(&mhi_chan->mutex);
1426 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1428 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1433 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1437 mhi_chan->chan);
1440 write_lock_irq(&mhi_chan->lock);
1441 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1442 write_unlock_irq(&mhi_chan->lock);
1444 if (!mhi_chan->offload_ch) {
1445 mhi_reset_chan(mhi_cntrl, mhi_chan);
1446 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1448 dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
1450 mutex_unlock(&mhi_chan->mutex);
1454 struct mhi_chan *mhi_chan, unsigned int flags)
1457 struct device *dev = &mhi_chan->mhi_dev->dev;
1459 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1461 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1465 mutex_lock(&mhi_chan->mutex);
1468 if (!mhi_chan->offload_ch) {
1469 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1474 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1479 if (mhi_chan->dir == DMA_FROM_DEVICE)
1480 mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
1483 if (mhi_chan->pre_alloc) {
1485 &mhi_chan->tre_ring);
1502 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1511 read_lock_irq(&mhi_chan->lock);
1512 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1513 read_unlock_irq(&mhi_chan->lock);
1518 mutex_unlock(&mhi_chan->mutex);
1523 if (!mhi_chan->offload_ch)
1524 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1527 mutex_unlock(&mhi_chan->mutex);
1532 mutex_unlock(&mhi_chan->mutex);
1533 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1582 struct mhi_chan *mhi_chan)
1588 buf_ring = &mhi_chan->buf_ring;
1589 tre_ring = &mhi_chan->tre_ring;
1595 if (mhi_chan->dir == DMA_TO_DEVICE) {
1607 if (mhi_chan->pre_alloc) {
1611 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1616 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1620 int chan = mhi_chan->chan;
1623 if (mhi_chan->offload_ch)
1627 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1628 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1632 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1641 struct mhi_chan *mhi_chan;
1644 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1645 if (!mhi_chan)
1648 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
1657 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1658 if (!mhi_chan)
1661 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1682 struct mhi_chan *mhi_chan;
1686 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1687 if (!mhi_chan)
1690 mhi_unprepare_channel(mhi_cntrl, mhi_chan);