Lines Matching defs:host

44  * The Cavium MMC host hardware assumes that all commands have fixed
47 * response types that are unexpected by the host hardware.
165 static void check_switch_errors(struct cvm_mmc_host *host)
169 emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
171 dev_err(host->dev, "Switch power class error\n");
173 dev_err(host->dev, "Switch hs timing error\n");
175 dev_err(host->dev, "Switch bus width error\n");
200 static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
212 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
215 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
219 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
225 check_switch_errors(host);
247 writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
252 struct cvm_mmc_host *host = slot->host;
255 emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
260 wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
261 do_switch(slot->host, emm_switch);
267 writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
273 struct cvm_mmc_host *host = slot->host;
277 if (slot->bus_id == host->last_slot)
280 if (host->last_slot >= 0 && host->slot[host->last_slot]) {
281 old_slot = host->slot[host->last_slot];
282 old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
283 old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
286 writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
289 do_switch(host, emm_switch);
293 writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
295 host->last_slot = slot->bus_id;
298 static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
301 struct sg_mapping_iter *smi = &host->smi;
307 writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
317 dat = readq(host->base + MIO_EMM_BUF_DAT(host));
340 static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
348 rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
361 rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
373 static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
377 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
381 static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
387 fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
390 dev_err(host->dev, "%u requests still pending\n", count);
396 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
397 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
401 static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
403 if (host->use_sg && data->sg_len > 1)
404 return finish_dma_sg(host, data);
406 return finish_dma_single(host, data);
424 static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
428 emm_dma = readq(host->base + MIO_EMM_DMA(host));
432 writeq(emm_dma, host->base + MIO_EMM_DMA(host));
437 struct cvm_mmc_host *host = dev_id;
442 if (host->need_irq_handler_lock)
443 spin_lock(&host->irq_handler_lock);
445 __acquire(&host->irq_handler_lock);
448 emm_int = readq(host->base + MIO_EMM_INT(host));
449 writeq(emm_int, host->base + MIO_EMM_INT(host));
452 check_switch_errors(host);
454 req = host->current_req;
458 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
464 if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
467 if (!host->dma_active && req->data &&
472 do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
487 if (host->dma_active && req->data)
488 if (!finish_dma(host, req->data))
491 set_cmd_response(host, req, rsp_sts);
494 cleanup_dma(host, rsp_sts);
496 host->current_req = NULL;
500 if (host->dmar_fixup_done)
501 host->dmar_fixup_done(host);
503 host->release_bus(host);
505 if (host->need_irq_handler_lock)
506 spin_unlock(&host->irq_handler_lock);
508 __release(&host->irq_handler_lock);
516 static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
521 count = dma_map_sg(host->dev, data->sg, data->sg_len,
536 if (!host->big_dma_addr)
538 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
543 if (host->big_dma_addr)
544 writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
552 static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
558 count = dma_map_sg(host->dev, data->sg, data->sg_len,
566 writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
573 writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
578 * host->big_dma_addr here.
596 writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
611 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
613 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
617 static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
619 if (host->use_sg && data->sg_len > 1)
620 return prepare_dma_sg(host, data);
622 return prepare_dma_single(host, data);
653 struct cvm_mmc_host *host = slot->host;
671 WARN_ON(host->current_req);
672 host->current_req = mrq;
675 addr = prepare_dma(host, data);
677 dev_err(host->dev, "prepare_dma failed\n");
681 host->dma_active = true;
682 host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
685 if (host->dmar_fixup)
686 host->dmar_fixup(host, mrq->cmd, data, addr);
694 writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
696 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
697 writeq(emm_dma, host->base + MIO_EMM_DMA(host));
704 host->release_bus(host);
707 static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
709 sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
713 static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
716 struct sg_mapping_iter *smi = &host->smi;
725 writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
742 writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
753 struct cvm_mmc_host *host = slot->host;
768 host->acquire_bus(host);
778 WARN_ON(host->current_req);
779 host->current_req = mrq;
783 do_read_request(host, mrq);
785 do_write_request(host, mrq);
792 host->dma_active = false;
793 host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
805 writeq(0, host->base + MIO_EMM_STS_MASK(host));
808 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
818 dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
819 writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
825 struct cvm_mmc_host *host = slot->host;
829 host->acquire_bus(host);
839 if (host->global_pwr_gpiod)
840 host->set_shared_power(host, 0);
846 if (host->global_pwr_gpiod)
847 host->set_shared_power(host, 1);
877 clk_period = (host->sys_freq + clock - 1) / (2 * clock);
891 do_switch(host, emm_switch);
894 host->release_bus(host);
915 struct cvm_mmc_host *host = slot->host;
919 host->emm_cfg |= (1ull << slot->bus_id);
920 writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
927 (host->sys_freq / slot->clock) / 2);
929 (host->sys_freq / slot->clock) / 2);
933 do_switch(host, emm_switch);
944 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
945 writeq(1, host->base + MIO_EMM_RCA(host));
963 if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
1000 clock_period = 1000000000000ull / slot->host->sys_freq;
1009 int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1021 slot->host = host;
1028 /* Set up host parameters */
1041 if (host->use_sg)
1048 dma_get_max_seg_size(host->dev));
1059 host->acquire_bus(host);
1060 host->slot[id] = slot;
1063 host->release_bus(host);
1068 slot->host->slot[id] = NULL;
1081 slot->host->slot[slot->bus_id] = NULL;