Lines Matching refs:cq_host
35 static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
37 return cq_host->desc_base + (tag * cq_host->slot_sz);
40 static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
42 u8 *desc = get_desc(cq_host, tag);
44 return desc + cq_host->task_desc_len;
47 static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
49 return cq_host->trans_desc_dma_base +
50 (cq_host->mmc->max_segs * tag *
51 cq_host->trans_desc_len);
54 static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
56 return cq_host->trans_desc_base +
57 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
60 static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
65 link_temp = get_link_desc(cq_host, tag);
66 trans_temp = get_trans_desc_dma(cq_host, tag);
68 memset(link_temp, 0, cq_host->link_desc_len);
69 if (cq_host->link_desc_len > 8)
72 if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
79 if (cq_host->dma64) {
90 static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
92 cqhci_writel(cq_host, set, CQHCI_ISTE);
93 cqhci_writel(cq_host, set, CQHCI_ISGE);
101 static void cqhci_dumpregs(struct cqhci_host *cq_host)
103 struct mmc_host *mmc = cq_host->mmc;
108 cqhci_readl(cq_host, CQHCI_CAP),
109 cqhci_readl(cq_host, CQHCI_VER));
111 cqhci_readl(cq_host, CQHCI_CFG),
112 cqhci_readl(cq_host, CQHCI_CTL));
114 cqhci_readl(cq_host, CQHCI_IS),
115 cqhci_readl(cq_host, CQHCI_ISTE));
117 cqhci_readl(cq_host, CQHCI_ISGE),
118 cqhci_readl(cq_host, CQHCI_IC));
120 cqhci_readl(cq_host, CQHCI_TDLBA),
121 cqhci_readl(cq_host, CQHCI_TDLBAU));
123 cqhci_readl(cq_host, CQHCI_TDBR),
124 cqhci_readl(cq_host, CQHCI_TCN));
126 cqhci_readl(cq_host, CQHCI_DQS),
127 cqhci_readl(cq_host, CQHCI_DPT));
129 cqhci_readl(cq_host, CQHCI_TCLR),
130 cqhci_readl(cq_host, CQHCI_SSC1));
132 cqhci_readl(cq_host, CQHCI_SSC2),
133 cqhci_readl(cq_host, CQHCI_CRDCT));
135 cqhci_readl(cq_host, CQHCI_RMEM),
136 cqhci_readl(cq_host, CQHCI_TERRI));
138 cqhci_readl(cq_host, CQHCI_CRI),
139 cqhci_readl(cq_host, CQHCI_CRA));
141 if (cq_host->ops->dumpregs)
142 cq_host->ops->dumpregs(mmc);
162 static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
167 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
168 cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
170 cq_host->task_desc_len = 16;
172 cq_host->task_desc_len = 8;
180 if (cq_host->dma64) {
181 if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
182 cq_host->trans_desc_len = 12;
184 cq_host->trans_desc_len = 16;
185 cq_host->link_desc_len = 16;
187 cq_host->trans_desc_len = 8;
188 cq_host->link_desc_len = 8;
192 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
194 cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
196 cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
197 cq_host->mmc->cqe_qdepth;
200 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
201 cq_host->slot_sz);
209 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
210 cq_host->desc_size,
211 &cq_host->desc_dma_base,
213 if (!cq_host->desc_base)
216 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
217 cq_host->data_size,
218 &cq_host->trans_desc_dma_base,
220 if (!cq_host->trans_desc_base) {
221 dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
222 cq_host->desc_base,
223 cq_host->desc_dma_base);
224 cq_host->desc_base = NULL;
225 cq_host->desc_dma_base = 0;
230 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
231 (unsigned long long)cq_host->desc_dma_base,
232 (unsigned long long)cq_host->trans_desc_dma_base);
234 for (; i < (cq_host->num_slots); i++)
235 setup_trans_desc(cq_host, i);
240 static void __cqhci_enable(struct cqhci_host *cq_host)
242 struct mmc_host *mmc = cq_host->mmc;
245 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
250 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
258 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
261 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
263 cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
265 cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
268 cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
270 cqhci_set_irqs(cq_host, 0);
274 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
276 if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
277 cqhci_writel(cq_host, 0, CQHCI_CTL);
281 if (cq_host->ops->enable)
282 cq_host->ops->enable(mmc);
287 cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
289 cq_host->activated = true;
292 static void __cqhci_disable(struct cqhci_host *cq_host)
296 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
298 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
300 cq_host->mmc->cqe_on = false;
302 cq_host->activated = false;
307 struct cqhci_host *cq_host = mmc->cqe_private;
309 if (cq_host->enabled && cq_host->activated)
310 __cqhci_disable(cq_host);
325 struct cqhci_host *cq_host = mmc->cqe_private;
331 if (cq_host->enabled)
334 cq_host->rca = card->rca;
336 err = cqhci_host_alloc_tdl(cq_host);
343 __cqhci_enable(cq_host);
345 cq_host->enabled = true;
348 cqhci_dumpregs(cq_host);
356 static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
358 return cqhci_readl(cq_host, CQHCI_CTL);
363 struct cqhci_host *cq_host = mmc->cqe_private;
367 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
370 if (cq_host->ops->disable)
371 cq_host->ops->disable(mmc, false);
373 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
375 err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
382 if (cq_host->ops->post_disable)
383 cq_host->ops->post_disable(mmc);
390 struct cqhci_host *cq_host = mmc->cqe_private;
392 if (!cq_host->enabled)
397 __cqhci_disable(cq_host);
399 dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
400 cq_host->trans_desc_base,
401 cq_host->trans_desc_dma_base);
403 dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
404 cq_host->desc_base,
405 cq_host->desc_dma_base);
407 cq_host->trans_desc_base = NULL;
408 cq_host->desc_base = NULL;
410 cq_host->enabled = false;
478 struct cqhci_host *cq_host, int tag)
483 bool dma64 = cq_host->dma64;
495 desc = get_trans_desc(cq_host, tag);
504 desc += cq_host->trans_desc_len;
518 struct cqhci_host *cq_host = mmc->cqe_private;
534 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
535 memset(task_desc, 0, cq_host->task_desc_len);
543 if (cq_host->ops->update_dcmd_desc)
544 cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
576 struct cqhci_host *cq_host = mmc->cqe_private;
579 if (!cq_host->enabled) {
585 if (!cq_host->activated)
586 __cqhci_enable(cq_host);
589 if (cq_host->ops->pre_enable)
590 cq_host->ops->pre_enable(mmc);
592 cqhci_writel(cq_host, 0, CQHCI_CTL);
595 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
599 if (cq_host->ops->enable)
600 cq_host->ops->enable(mmc);
604 task_desc = (__le64 __force *)get_desc(cq_host, tag);
607 err = cqhci_prep_tran_desc(mrq, cq_host, tag);
617 spin_lock_irqsave(&cq_host->lock, flags);
619 if (cq_host->recovery_halt) {
624 cq_host->slot[tag].mrq = mrq;
625 cq_host->slot[tag].flags = 0;
627 cq_host->qcnt += 1;
630 cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
631 if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
635 spin_unlock_irqrestore(&cq_host->lock, flags);
646 struct cqhci_host *cq_host = mmc->cqe_private;
648 if (!cq_host->recovery_halt) {
649 cq_host->recovery_halt = true;
651 wake_up(&cq_host->wait_queue);
674 struct cqhci_host *cq_host = mmc->cqe_private;
679 spin_lock(&cq_host->lock);
681 terri = cqhci_readl(cq_host, CQHCI_TERRI);
687 if (cq_host->recovery_halt)
690 if (!cq_host->qcnt) {
699 slot = &cq_host->slot[tag];
708 slot = &cq_host->slot[tag];
715 if (!cq_host->recovery_halt) {
721 slot = &cq_host->slot[tag];
731 spin_unlock(&cq_host->lock);
736 struct cqhci_host *cq_host = mmc->cqe_private;
737 struct cqhci_slot *slot = &cq_host->slot[tag];
748 if (cq_host->recovery_halt) {
755 cq_host->qcnt -= 1;
773 struct cqhci_host *cq_host = mmc->cqe_private;
775 status = cqhci_readl(cq_host, CQHCI_IS);
776 cqhci_writel(cq_host, status, CQHCI_IS);
785 comp_status = cqhci_readl(cq_host, CQHCI_TCN);
786 cqhci_writel(cq_host, comp_status, CQHCI_TCN);
790 spin_lock(&cq_host->lock);
792 for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
799 if (cq_host->waiting_for_idle && !cq_host->qcnt) {
800 cq_host->waiting_for_idle = false;
801 wake_up(&cq_host->wait_queue);
804 spin_unlock(&cq_host->lock);
808 wake_up(&cq_host->wait_queue);
811 wake_up(&cq_host->wait_queue);
817 static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
822 spin_lock_irqsave(&cq_host->lock, flags);
823 is_idle = !cq_host->qcnt || cq_host->recovery_halt;
824 *ret = cq_host->recovery_halt ? -EBUSY : 0;
825 cq_host->waiting_for_idle = !is_idle;
826 spin_unlock_irqrestore(&cq_host->lock, flags);
833 struct cqhci_host *cq_host = mmc->cqe_private;
836 wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
844 struct cqhci_host *cq_host = mmc->cqe_private;
846 struct cqhci_slot *slot = &cq_host->slot[tag];
850 spin_lock_irqsave(&cq_host->lock, flags);
855 *recovery_needed = cq_host->recovery_halt;
857 spin_unlock_irqrestore(&cq_host->lock, flags);
862 cqhci_dumpregs(cq_host);
868 static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
870 return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
875 struct cqhci_host *cq_host = mmc->cqe_private;
879 cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
881 ctl = cqhci_readl(cq_host, CQHCI_CTL);
883 cqhci_writel(cq_host, ctl, CQHCI_CTL);
885 wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
888 cqhci_set_irqs(cq_host, 0);
890 ret = cqhci_tasks_cleared(cq_host);
899 static bool cqhci_halted(struct cqhci_host *cq_host)
901 return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
906 struct cqhci_host *cq_host = mmc->cqe_private;
910 if (cqhci_halted(cq_host))
913 cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
915 ctl = cqhci_readl(cq_host, CQHCI_CTL);
917 cqhci_writel(cq_host, ctl, CQHCI_CTL);
919 wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
922 cqhci_set_irqs(cq_host, 0);
924 ret = cqhci_halted(cq_host);
942 struct cqhci_host *cq_host = mmc->cqe_private;
946 WARN_ON(!cq_host->recovery_halt);
950 if (cq_host->ops->disable)
951 cq_host->ops->disable(mmc, true);
971 static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
973 struct cqhci_slot *slot = &cq_host->slot[tag];
982 cq_host->qcnt -= 1;
992 mmc_cqe_request_done(cq_host->mmc, mrq);
995 static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
999 for (i = 0; i < cq_host->num_slots; i++)
1000 cqhci_recover_mrq(cq_host, i);
1015 struct cqhci_host *cq_host = mmc->cqe_private;
1022 WARN_ON(!cq_host->recovery_halt);
1036 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1038 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1040 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1042 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1049 cqhci_recover_mrqs(cq_host);
1051 WARN_ON(cq_host->qcnt);
1053 spin_lock_irqsave(&cq_host->lock, flags);
1054 cq_host->qcnt = 0;
1055 cq_host->recovery_halt = false;
1057 spin_unlock_irqrestore(&cq_host->lock, flags);
1062 cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1064 cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1083 struct cqhci_host *cq_host;
1094 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1095 if (!cq_host)
1097 cq_host->mmio = devm_ioremap(&pdev->dev,
1100 if (!cq_host->mmio) {
1106 return cq_host;
1110 static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1112 return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1115 static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1117 u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1122 int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1127 cq_host->dma64 = dma64;
1128 cq_host->mmc = mmc;
1129 cq_host->mmc->cqe_private = cq_host;
1131 cq_host->num_slots = NUM_SLOTS;
1132 cq_host->dcmd_slot = DCMD_SLOT;
1140 cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1141 sizeof(*cq_host->slot), GFP_KERNEL);
1142 if (!cq_host->slot) {
1147 spin_lock_init(&cq_host->lock);
1149 init_completion(&cq_host->halt_comp);
1150 init_waitqueue_head(&cq_host->wait_queue);
1153 mmc_hostname(mmc), cqhci_ver_major(cq_host),
1154 cqhci_ver_minor(cq_host));
1160 mmc_hostname(mmc), cqhci_ver_major(cq_host),
1161 cqhci_ver_minor(cq_host), err);