Lines Matching defs:gpii
473 u32 max_gpii; /* maximum # of gpii instances available per gpi block */
474 u32 gpii_mask; /* gpii instances available for apps */
476 struct gpii *gpiis;
490 struct gpii *gpii;
501 struct gpii {
543 static void gpi_process_events(struct gpii *gpii);
566 static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
571 static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
577 static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
580 u32 tmp = gpi_read_reg(gpii, addr);
584 gpi_write_reg(gpii, addr, val);
588 gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
590 void __iomem *addr = gpii->regs + offset;
591 u32 tmp = gpi_read_reg(gpii, addr);
596 gpi_write_reg(gpii, addr, tmp);
599 static void gpi_disable_interrupts(struct gpii *gpii)
601 gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
603 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id),
605 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id),
607 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id),
609 gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id),
611 gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id),
613 gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id),
616 gpii->cntxt_type_irq_msk = 0;
617 devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
618 gpii->configured_irq = false;
622 static int gpi_config_interrupts(struct gpii *gpii, enum gpii_irq_settings settings, bool mask)
631 if (!gpii->configured_irq) {
632 ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
634 "gpi-dma", gpii);
636 dev_err(gpii->gpi_dev->dev, "error request irq:%d ret:%d\n",
637 gpii->irq, ret);
644 * GPII only uses one EV ring per gpii so we can globally
648 gpii->cntxt_type_irq_msk |= GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
650 gpii->cntxt_type_irq_msk &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
651 gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
652 GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, gpii->cntxt_type_irq_msk);
654 gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
656 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id),
659 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id),
662 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id),
665 gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id),
668 gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id),
670 gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_LSB_OFFS(gpii->gpii_id), U32_MAX, 0);
671 gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_MSB_OFFS(gpii->gpii_id), U32_MAX, 0);
672 gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_0_OFFS(gpii->gpii_id), U32_MAX, 0);
673 gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_1_OFFS(gpii->gpii_id), U32_MAX, 0);
674 gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id),
676 gpi_update_reg(gpii, GPII_n_ERROR_LOG_OFFS(gpii->gpii_id), U32_MAX, 0);
678 gpii->cntxt_type_irq_msk = enable;
681 gpii->configured_irq = true;
685 /* Sends gpii event or channel command */
686 static int gpi_send_cmd(struct gpii *gpii, struct gchan *gchan,
699 dev_dbg(gpii->gpi_dev->dev,
703 reinit_completion(&gpii->cmd_completion);
704 gpii->gpi_cmd = gpi_cmd;
706 cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gchan->ch_cmd_reg : gpii->ev_cmd_reg;
709 gpi_write_reg(gpii, cmd_reg, cmd);
710 timeout = wait_for_completion_timeout(&gpii->cmd_completion,
713 dev_err(gpii->gpi_dev->dev, "cmd: %s completion timeout:%u\n",
725 if (!IS_CHAN_CMD(gpi_cmd) && gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
735 struct gpii *gpii = gchan->gpii;
739 gpi_write_reg(gpii, gchan->ch_cntxt_db_reg, p_wp);
743 static inline void gpi_write_ev_db(struct gpii *gpii,
749 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, p_wp);
753 static void gpi_process_ieob(struct gpii *gpii)
755 gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
757 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
758 tasklet_hi_schedule(&gpii->ev_task);
762 static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
764 u32 gpii_id = gpii->gpii_id;
766 u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
772 gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
778 gchan = &gpii->gchan[chid];
779 state = gpi_read_reg(gpii, gchan->ch_cntxt_base_reg +
788 if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
798 complete_all(&gpii->cmd_completion);
803 static void gpi_process_gen_err_irq(struct gpii *gpii)
805 u32 gpii_id = gpii->gpii_id;
807 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
810 dev_dbg(gpii->gpi_dev->dev, "irq_stts:0x%x\n", irq_stts);
814 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
818 static void gpi_process_glob_err_irq(struct gpii *gpii)
820 u32 gpii_id = gpii->gpii_id;
822 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
825 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
829 dev_err(gpii->gpi_dev->dev, "invalid error status:0x%x\n", irq_stts);
834 gpi_write_reg(gpii, gpii->regs + offset, 0);
837 /* gpii interrupt handler */
840 struct gpii *gpii = data;
841 u32 gpii_id = gpii->gpii_id;
845 read_lock_irqsave(&gpii->pm_lock, flags);
851 if (!REG_ACCESS_VALID(gpii->pm_state)) {
852 dev_err(gpii->gpi_dev->dev, "receive interrupt while in %s state\n",
853 TO_GPI_PM_STR(gpii->pm_state));
857 offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
858 type = gpi_read_reg(gpii, gpii->regs + offset);
861 /* global gpii error */
863 gpi_process_glob_err_irq(gpii);
869 gpi_process_ieob(gpii);
878 dev_dbg(gpii->gpi_dev->dev,
881 ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
885 gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
886 ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
895 if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
898 gpii->ev_state = ev_state;
899 dev_dbg(gpii->gpi_dev->dev, "setting EV state to %s\n",
900 TO_GPI_EV_STATE_STR(gpii->ev_state));
901 complete_all(&gpii->cmd_completion);
907 dev_dbg(gpii->gpi_dev->dev, "process CH CTRL interrupts\n");
908 gpi_process_ch_ctrl_irq(gpii);
913 dev_err(gpii->gpi_dev->dev, "Unhandled interrupt status:0x%x\n", type);
914 gpi_process_gen_err_irq(gpii);
918 offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
919 type = gpi_read_reg(gpii, gpii->regs + offset);
923 read_unlock_irqrestore(&gpii->pm_lock, flags);
932 struct gpii *gpii = gchan->gpii;
945 dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n",
957 dev_dbg(gpii->gpi_dev->dev, "event without a pending descriptor!\n");
959 dev_dbg(gpii->gpi_dev->dev,
964 dev_dbg(gpii->gpi_dev->dev,
986 if (imed_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
1014 struct gpii *gpii = gchan->gpii;
1025 dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n",
1036 dev_err(gpii->gpi_dev->dev, "Event without a pending descriptor!\n");
1038 dev_err(gpii->gpi_dev->dev,
1061 if (compl_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
1069 dev_err(gpii->gpi_dev->dev, "Error in Transaction\n");
1072 dev_dbg(gpii->gpi_dev->dev, "Transaction Success\n");
1076 dev_dbg(gpii->gpi_dev->dev, "Residue %d\n", result.residue);
1090 static void gpi_process_events(struct gpii *gpii)
1092 struct gpi_ring *ev_ring = &gpii->ev_ring;
1099 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1108 dev_dbg(gpii->gpi_dev->dev,
1116 gchan = &gpii->gchan[chid];
1121 dev_dbg(gpii->gpi_dev->dev, "stale event, not processing\n");
1124 gchan = &gpii->gchan[chid];
1129 dev_dbg(gpii->gpi_dev->dev, "QUP_NOTIF_EV_TYPE\n");
1132 dev_dbg(gpii->gpi_dev->dev,
1137 gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
1140 gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
1142 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1151 struct gpii *gpii = (struct gpii *)data;
1153 read_lock(&gpii->pm_lock);
1154 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1155 read_unlock(&gpii->pm_lock);
1156 dev_err(gpii->gpi_dev->dev, "not processing any events, pm_state:%s\n",
1157 TO_GPI_PM_STR(gpii->pm_state));
1162 gpi_process_events(gpii);
1165 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
1166 read_unlock(&gpii->pm_lock);
1172 struct gpii *gpii = gchan->gpii;
1173 struct gpi_ring *ev_ring = &gpii->ev_ring;
1177 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1190 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1198 struct gpii *gpii = gchan->gpii;
1204 ret = gpi_send_cmd(gpii, gchan, gpi_cmd);
1206 dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1219 write_lock_irq(&gpii->pm_lock);
1226 write_unlock_irq(&gpii->pm_lock);
1234 struct gpii *gpii = gchan->gpii;
1237 ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_START);
1239 dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1244 /* gpii CH is active now */
1245 write_lock_irq(&gpii->pm_lock);
1247 write_unlock_irq(&gpii->pm_lock);
1254 struct gpii *gpii = gchan->gpii;
1257 ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_STOP);
1259 dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1270 struct gpii *gpii = chan->gpii;
1273 u32 id = gpii->gpii_id;
1278 ret = gpi_send_cmd(gpii, chan, GPI_CH_CMD_ALLOCATE);
1280 dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1286 gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_0_CONFIG,
1288 gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_1_R_LENGTH, ring->len);
1289 gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_2_RING_BASE_LSB, ring->phys_addr);
1290 gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_3_RING_BASE_MSB,
1292 gpi_write_reg(gpii, chan->ch_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1294 gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_0_OFFS(id, chid),
1296 gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_1_OFFS(id, chid), 0);
1297 gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_2_OFFS(id, chid), 0);
1298 gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_3_OFFS(id, chid), 0);
1299 gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_QOS_OFFS(id, chid), 1);
1307 static int gpi_alloc_ev_chan(struct gpii *gpii)
1309 struct gpi_ring *ring = &gpii->ev_ring;
1310 void __iomem *base = gpii->ev_cntxt_base_reg;
1313 ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
1315 dev_err(gpii->gpi_dev->dev, "error with cmd:%s ret:%d\n",
1321 gpi_write_reg(gpii, base + CNTXT_0_CONFIG,
1323 gpi_write_reg(gpii, base + CNTXT_1_R_LENGTH, ring->len);
1324 gpi_write_reg(gpii, base + CNTXT_2_RING_BASE_LSB, lower_32_bits(ring->phys_addr));
1325 gpi_write_reg(gpii, base + CNTXT_3_RING_BASE_MSB, upper_32_bits(ring->phys_addr));
1326 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1328 gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0);
1329 gpi_write_reg(gpii, base + CNTXT_10_RING_MSI_LSB, 0);
1330 gpi_write_reg(gpii, base + CNTXT_11_RING_MSI_MSB, 0);
1331 gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0);
1332 gpi_write_reg(gpii, base + CNTXT_12_RING_RP_UPDATE_LSB, 0);
1333 gpi_write_reg(gpii, base + CNTXT_13_RING_RP_UPDATE_MSB, 0);
1341 /* gpii is active now */
1342 write_lock_irq(&gpii->pm_lock);
1343 gpii->pm_state = ACTIVE_STATE;
1344 write_unlock_irq(&gpii->pm_lock);
1345 gpi_write_ev_db(gpii, ring, ring->wp);
1398 struct gpii *gpii)
1400 dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
1407 u32 el_size, struct gpii *gpii)
1418 dev_dbg(gpii->gpi_dev->dev,
1423 ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
1427 dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
1446 dev_dbg(gpii->gpi_dev->dev,
1455 static void gpi_queue_xfer(struct gpii *gpii, struct gchan *gchan,
1464 dev_err(gpii->gpi_dev->dev, "Error adding ring element to xfer ring\n");
1477 struct gpii *gpii = gchan->gpii;
1481 mutex_lock(&gpii->ctrl_lock);
1492 gchan = &gpii->gchan[i];
1495 write_lock_irq(&gpii->pm_lock);
1497 write_unlock_irq(&gpii->pm_lock);
1505 gchan = &gpii->gchan[i];
1509 dev_err(gpii->gpi_dev->dev, "Error resetting channel ret:%d\n", ret);
1516 dev_err(gpii->gpi_dev->dev, "Error alloc_channel ret:%d\n", ret);
1523 gchan = &gpii->gchan[i];
1527 dev_err(gpii->gpi_dev->dev, "Error Starting Channel ret:%d\n", ret);
1533 mutex_unlock(&gpii->ctrl_lock);
1541 struct gpii *gpii = gchan->gpii;
1544 mutex_lock(&gpii->ctrl_lock);
1547 * pause/resume are per gpii not per channel, so
1550 if (gpii->pm_state == PAUSE_STATE) {
1551 dev_dbg(gpii->gpi_dev->dev, "channel is already paused\n");
1552 mutex_unlock(&gpii->ctrl_lock);
1558 ret = gpi_stop_chan(&gpii->gchan[i]);
1560 mutex_unlock(&gpii->ctrl_lock);
1565 disable_irq(gpii->irq);
1568 tasklet_kill(&gpii->ev_task);
1570 write_lock_irq(&gpii->pm_lock);
1571 gpii->pm_state = PAUSE_STATE;
1572 write_unlock_irq(&gpii->pm_lock);
1573 mutex_unlock(&gpii->ctrl_lock);
1582 struct gpii *gpii = gchan->gpii;
1585 mutex_lock(&gpii->ctrl_lock);
1586 if (gpii->pm_state == ACTIVE_STATE) {
1587 dev_dbg(gpii->gpi_dev->dev, "channel is already active\n");
1588 mutex_unlock(&gpii->ctrl_lock);
1592 enable_irq(gpii->irq);
1596 ret = gpi_send_cmd(gpii, &gpii->gchan[i], GPI_CH_CMD_START);
1598 dev_err(gpii->gpi_dev->dev, "Error starting chan, ret:%d\n", ret);
1599 mutex_unlock(&gpii->ctrl_lock);
1604 write_lock_irq(&gpii->pm_lock);
1605 gpii->pm_state = ACTIVE_STATE;
1606 write_unlock_irq(&gpii->pm_lock);
1607 mutex_unlock(&gpii->ctrl_lock);
1641 struct device *dev = chan->gpii->gpi_dev->dev;
1716 struct device *dev = chan->gpii->gpi_dev->dev;
1796 struct gpii *gpii = gchan->gpii;
1797 struct device *dev = gpii->gpi_dev->dev;
1804 gpii->ieob_set = false;
1806 dev_err(gpii->gpi_dev->dev, "invalid dma direction: %d\n", direction);
1856 struct gpii *gpii = gchan->gpii;
1864 read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
1875 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
1882 gpi_queue_xfer(gpii, gchan, tre, &wp);
1887 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
1892 struct gpii *gpii = gchan->gpii;
1893 const int ev_factor = gpii->gpi_dev->ev_factor;
1901 if (gpii->gchan[i].pm_state != CONFIG_STATE)
1905 if (gpii->gchan[0].protocol != gpii->gchan[1].protocol) {
1906 dev_err(gpii->gpi_dev->dev, "protocol did not match protocol %u != %u\n",
1907 gpii->gchan[0].protocol, gpii->gchan[1].protocol);
1914 ret = gpi_alloc_ring(&gpii->ev_ring, elements,
1915 sizeof(union gpi_event), gpii);
1920 write_lock_irq(&gpii->pm_lock);
1921 gpii->pm_state = PREPARE_HARDWARE;
1922 write_unlock_irq(&gpii->pm_lock);
1923 ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
1925 dev_err(gpii->gpi_dev->dev, "error config. interrupts, ret:%d\n", ret);
1930 ret = gpi_alloc_ev_chan(gpii);
1932 dev_err(gpii->gpi_dev->dev, "error alloc_ev_chan:%d\n", ret);
1938 ret = gpi_alloc_chan(&gpii->gchan[i], true);
1940 dev_err(gpii->gpi_dev->dev, "Error allocating chan:%d\n", ret);
1947 ret = gpi_start_chan(&gpii->gchan[i]);
1949 dev_err(gpii->gpi_dev->dev, "Error start chan:%d\n", ret);
1957 gpi_stop_chan(&gpii->gchan[i]);
1958 gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
1965 gpi_disable_interrupts(gpii);
1967 gpi_free_ring(&gpii->ev_ring, gpii);
1976 struct gpii *gpii = gchan->gpii;
1980 mutex_lock(&gpii->ctrl_lock);
1985 write_lock_irq(&gpii->pm_lock);
1987 write_unlock_irq(&gpii->pm_lock);
1993 ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
1995 dev_err(gpii->gpi_dev->dev, "error resetting channel:%d\n", ret);
2001 gpi_free_ring(&gchan->ch_ring, gpii);
2005 write_lock_irq(&gpii->pm_lock);
2007 write_unlock_irq(&gpii->pm_lock);
2011 if (gpii->gchan[i].ch_ring.configured)
2015 cur_state = gpii->pm_state;
2016 write_lock_irq(&gpii->pm_lock);
2017 gpii->pm_state = PREPARE_TERMINATE;
2018 write_unlock_irq(&gpii->pm_lock);
2021 tasklet_kill(&gpii->ev_task);
2025 gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
2027 gpi_free_ring(&gpii->ev_ring, gpii);
2031 gpi_disable_interrupts(gpii);
2034 write_lock_irq(&gpii->pm_lock);
2035 gpii->pm_state = DISABLE_STATE;
2036 write_unlock_irq(&gpii->pm_lock);
2039 mutex_unlock(&gpii->ctrl_lock);
2046 struct gpii *gpii = gchan->gpii;
2049 mutex_lock(&gpii->ctrl_lock);
2053 sizeof(struct gpi_tre), gpii);
2059 mutex_unlock(&gpii->ctrl_lock);
2063 mutex_unlock(&gpii->ctrl_lock);
2071 unsigned int gpii;
2074 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2075 if (!((1 << gpii) & gpi_dev->gpii_mask))
2078 tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN];
2079 rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN];
2082 return gpii;
2084 return gpii;
2087 /* no channels configured with same seid, return next avail gpii */
2088 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2089 if (!((1 << gpii) & gpi_dev->gpii_mask))
2092 tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN];
2093 rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN];
2095 /* check if gpii is configured */
2100 /* found a free gpii */
2101 return gpii;
2104 /* no gpii instance available to use */
2114 int gpii;
2118 dev_err(gpi_dev->dev, "gpii require minimum 2 args, client passed:%d args\n",
2125 dev_err(gpi_dev->dev, "gpii channel:%d not valid\n", chid);
2131 /* find next available gpii to use */
2132 gpii = gpi_find_avail_gpii(gpi_dev, seid);
2133 if (gpii < 0) {
2134 dev_err(gpi_dev->dev, "no available gpii instances\n");
2138 gchan = &gpi_dev->gpiis[gpii].gchan[chid];
2140 dev_err(gpi_dev->dev, "gpii:%d chid:%d seid:%d already configured\n",
2141 gpii, chid, gchan->seid);
2171 dev_err(gpi_dev->dev, "missing 'max-no-gpii' DT node\n");
2178 dev_err(gpi_dev->dev, "missing 'gpii-mask' DT node\n");
2198 /* setup all the supported gpii */
2201 struct gpii *gpii = &gpi_dev->gpiis[i];
2208 gpii->ev_cntxt_base_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
2209 gpii->ev_cntxt_db_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
2210 gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg + CNTXT_4_RING_RP_LSB;
2211 gpii->ev_cmd_reg = gpi_dev->ee_base + GPII_n_EV_CH_CMD_OFFS(i);
2212 gpii->ieob_clr_reg = gpi_dev->ee_base + GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
2218 gpii->irq = ret;
2222 struct gchan *gchan = &gpii->gchan[chan];
2235 gchan->gpii = gpii;
2238 mutex_init(&gpii->ctrl_lock);
2239 rwlock_init(&gpii->pm_lock);
2240 tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
2241 (unsigned long)gpii);
2242 init_completion(&gpii->cmd_completion);
2243 gpii->gpii_id = i;
2244 gpii->regs = gpi_dev->ee_base;
2245 gpii->gpi_dev = gpi_dev;